1 /* tc-i386.c -- Assemble code for the Intel 80386
2 Copyright 1989, 1991, 1992, 1993, 1994, 1995, 1996, 1997, 1998, 1999,
3 2000, 2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2009, 2010, 2011,
5 Free Software Foundation, Inc.
7 This file is part of GAS, the GNU Assembler.
9 GAS is free software; you can redistribute it and/or modify
10 it under the terms of the GNU General Public License as published by
11 the Free Software Foundation; either version 3, or (at your option)
14 GAS is distributed in the hope that it will be useful,
15 but WITHOUT ANY WARRANTY; without even the implied warranty of
16 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
17 GNU General Public License for more details.
19 You should have received a copy of the GNU General Public License
20 along with GAS; see the file COPYING. If not, write to the Free
21 Software Foundation, 51 Franklin Street - Fifth Floor, Boston, MA
24 /* Intel 80386 machine specific gas.
25 Written by Eliot Dresselhaus (eliot@mgm.mit.edu).
26 x86_64 support by Jan Hubicka (jh@suse.cz)
27 VIA PadLock support by Michal Ludvig (mludvig@suse.cz)
28 Bugs & suggestions are completely welcome. This is free software.
29 Please help us make it better. */
32 #include "safe-ctype.h"
34 #include "dwarf2dbg.h"
35 #include "dw2gencfi.h"
36 #include "elf/x86-64.h"
37 #include "opcodes/i386-init.h"
39 #ifndef REGISTER_WARNINGS
40 #define REGISTER_WARNINGS 1
43 #ifndef INFER_ADDR_PREFIX
44 #define INFER_ADDR_PREFIX 1
48 #define DEFAULT_ARCH "i386"
53 #define INLINE __inline__
59 /* Prefixes will be emitted in the order defined below.
60 WAIT_PREFIX must be the first prefix since FWAIT is really is an
61 instruction, and so must come before any prefixes.
62 The preferred prefix order is SEG_PREFIX, ADDR_PREFIX, DATA_PREFIX,
63 REP_PREFIX/HLE_PREFIX, LOCK_PREFIX. */
69 #define HLE_PREFIX REP_PREFIX
71 #define REX_PREFIX 6 /* must come last. */
72 #define MAX_PREFIXES 7 /* max prefixes per opcode */
74 /* we define the syntax here (modulo base,index,scale syntax) */
75 #define REGISTER_PREFIX '%'
76 #define IMMEDIATE_PREFIX '$'
77 #define ABSOLUTE_PREFIX '*'
79 /* these are the instruction mnemonic suffixes in AT&T syntax or
80 memory operand size in Intel syntax. */
81 #define WORD_MNEM_SUFFIX 'w'
82 #define BYTE_MNEM_SUFFIX 'b'
83 #define SHORT_MNEM_SUFFIX 's'
84 #define LONG_MNEM_SUFFIX 'l'
85 #define QWORD_MNEM_SUFFIX 'q'
86 #define XMMWORD_MNEM_SUFFIX 'x'
87 #define YMMWORD_MNEM_SUFFIX 'y'
88 /* Intel Syntax. Use a non-ascii letter since since it never appears
90 #define LONG_DOUBLE_MNEM_SUFFIX '\1'
92 #define END_OF_INSN '\0'
95 'templates' is for grouping together 'template' structures for opcodes
96 of the same name. This is only used for storing the insns in the grand
97 ole hash table of insns.
98 The templates themselves start at START and range up to (but not including)
103 const insn_template *start;
104 const insn_template *end;
108 /* 386 operand encoding bytes: see 386 book for details of this. */
111 unsigned int regmem; /* codes register or memory operand */
112 unsigned int reg; /* codes register operand (or extended opcode) */
113 unsigned int mode; /* how to interpret regmem & reg */
117 /* x86-64 extension prefix. */
118 typedef int rex_byte;
120 /* 386 opcode byte to code indirect addressing. */
129 /* x86 arch names, types and features */
132 const char *name; /* arch name */
133 unsigned int len; /* arch string length */
134 enum processor_type type; /* arch type */
135 i386_cpu_flags flags; /* cpu feature flags */
136 unsigned int skip; /* show_arch should skip this. */
137 unsigned int negated; /* turn off indicated flags. */
141 static void update_code_flag (int, int);
142 static void set_code_flag (int);
143 static void set_16bit_gcc_code_flag (int);
144 static void set_intel_syntax (int);
145 static void set_intel_mnemonic (int);
146 static void set_allow_index_reg (int);
147 static void set_check (int);
148 static void set_cpu_arch (int);
150 static void pe_directive_secrel (int);
152 static void signed_cons (int);
153 static char *output_invalid (int c);
154 static int i386_finalize_immediate (segT, expressionS *, i386_operand_type,
156 static int i386_finalize_displacement (segT, expressionS *, i386_operand_type,
158 static int i386_att_operand (char *);
159 static int i386_intel_operand (char *, int);
160 static int i386_intel_simplify (expressionS *);
161 static int i386_intel_parse_name (const char *, expressionS *);
162 static const reg_entry *parse_register (char *, char **);
163 static char *parse_insn (char *, char *);
164 static char *parse_operands (char *, const char *);
165 static void swap_operands (void);
166 static void swap_2_operands (int, int);
167 static void optimize_imm (void);
168 static void optimize_disp (void);
169 static const insn_template *match_template (void);
170 static int check_string (void);
171 static int process_suffix (void);
172 static int check_byte_reg (void);
173 static int check_long_reg (void);
174 static int check_qword_reg (void);
175 static int check_word_reg (void);
176 static int finalize_imm (void);
177 static int process_operands (void);
178 static const seg_entry *build_modrm_byte (void);
179 static void output_insn (void);
180 static void output_imm (fragS *, offsetT);
181 static void output_disp (fragS *, offsetT);
183 static void s_bss (int);
185 #if defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF)
186 static void handle_large_common (int small ATTRIBUTE_UNUSED);
189 static const char *default_arch = DEFAULT_ARCH;
194 /* VEX prefix is either 2 byte or 3 byte. */
195 unsigned char bytes[3];
197 /* Destination or source register specifier. */
198 const reg_entry *register_specifier;
201 /* 'md_assemble ()' gathers together information and puts it into a
208 const reg_entry *regs;
213 operand_size_mismatch,
214 operand_type_mismatch,
215 register_type_mismatch,
216 number_of_operands_mismatch,
217 invalid_instruction_suffix,
220 unsupported_with_intel_mnemonic,
223 invalid_vsib_address,
224 invalid_vector_register_set,
225 unsupported_vector_index_register
230 /* TM holds the template for the insn were currently assembling. */
233 /* SUFFIX holds the instruction size suffix for byte, word, dword
234 or qword, if given. */
237 /* OPERANDS gives the number of given operands. */
238 unsigned int operands;
240 /* REG_OPERANDS, DISP_OPERANDS, MEM_OPERANDS, IMM_OPERANDS give the number
241 of given register, displacement, memory operands and immediate
243 unsigned int reg_operands, disp_operands, mem_operands, imm_operands;
245 /* TYPES [i] is the type (see above #defines) which tells us how to
246 use OP[i] for the corresponding operand. */
247 i386_operand_type types[MAX_OPERANDS];
249 /* Displacement expression, immediate expression, or register for each
251 union i386_op op[MAX_OPERANDS];
253 /* Flags for operands. */
254 unsigned int flags[MAX_OPERANDS];
255 #define Operand_PCrel 1
257 /* Relocation type for operand */
258 enum bfd_reloc_code_real reloc[MAX_OPERANDS];
260 /* BASE_REG, INDEX_REG, and LOG2_SCALE_FACTOR are used to encode
261 the base index byte below. */
262 const reg_entry *base_reg;
263 const reg_entry *index_reg;
264 unsigned int log2_scale_factor;
266 /* SEG gives the seg_entries of this insn. They are zero unless
267 explicit segment overrides are given. */
268 const seg_entry *seg[2];
270 /* PREFIX holds all the given prefix opcodes (usually null).
271 PREFIXES is the number of prefix opcodes. */
272 unsigned int prefixes;
273 unsigned char prefix[MAX_PREFIXES];
275 /* RM and SIB are the modrm byte and the sib byte where the
276 addressing modes of this insn are encoded. */
282 /* Swap operand in encoding. */
283 unsigned int swap_operand;
285 /* Prefer 8bit or 32bit displacement in encoding. */
288 disp_encoding_default = 0,
294 const char *rep_prefix;
297 const char *hle_prefix;
300 enum i386_error error;
303 typedef struct _i386_insn i386_insn;
305 /* List of chars besides those in app.c:symbol_chars that can start an
306 operand. Used to prevent the scrubber eating vital white-space. */
307 const char extra_symbol_chars[] = "*%-(["
316 #if (defined (TE_I386AIX) \
317 || ((defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF)) \
318 && !defined (TE_GNU) \
319 && !defined (TE_LINUX) \
320 && !defined (TE_NACL) \
321 && !defined (TE_NETWARE) \
322 && !defined (TE_FreeBSD) \
323 && !defined (TE_DragonFly) \
324 && !defined (TE_NetBSD)))
325 /* This array holds the chars that always start a comment. If the
326 pre-processor is disabled, these aren't very useful. The option
327 --divide will remove '/' from this list. */
328 const char *i386_comment_chars = "#/";
329 #define SVR4_COMMENT_CHARS 1
330 #define PREFIX_SEPARATOR '\\'
333 const char *i386_comment_chars = "#";
334 #define PREFIX_SEPARATOR '/'
337 /* This array holds the chars that only start a comment at the beginning of
338 a line. If the line seems to have the form '# 123 filename'
339 .line and .file directives will appear in the pre-processed output.
340 Note that input_file.c hand checks for '#' at the beginning of the
341 first line of the input file. This is because the compiler outputs
342 #NO_APP at the beginning of its output.
343 Also note that comments started like this one will always work if
344 '/' isn't otherwise defined. */
345 const char line_comment_chars[] = "#/";
347 const char line_separator_chars[] = ";";
349 /* Chars that can be used to separate mant from exp in floating point
351 const char EXP_CHARS[] = "eE";
353 /* Chars that mean this number is a floating point constant
356 const char FLT_CHARS[] = "fFdDxX";
358 /* Tables for lexical analysis. */
359 static char mnemonic_chars[256];
360 static char register_chars[256];
361 static char operand_chars[256];
362 static char identifier_chars[256];
363 static char digit_chars[256];
365 /* Lexical macros. */
366 #define is_mnemonic_char(x) (mnemonic_chars[(unsigned char) x])
367 #define is_operand_char(x) (operand_chars[(unsigned char) x])
368 #define is_register_char(x) (register_chars[(unsigned char) x])
369 #define is_space_char(x) ((x) == ' ')
370 #define is_identifier_char(x) (identifier_chars[(unsigned char) x])
371 #define is_digit_char(x) (digit_chars[(unsigned char) x])
373 /* All non-digit non-letter characters that may occur in an operand. */
374 static char operand_special_chars[] = "%$-+(,)*._~/<>|&^!:[@]";
376 /* md_assemble() always leaves the strings it's passed unaltered. To
377 effect this we maintain a stack of saved characters that we've smashed
378 with '\0's (indicating end of strings for various sub-fields of the
379 assembler instruction). */
380 static char save_stack[32];
381 static char *save_stack_p;
382 #define END_STRING_AND_SAVE(s) \
383 do { *save_stack_p++ = *(s); *(s) = '\0'; } while (0)
384 #define RESTORE_END_STRING(s) \
385 do { *(s) = *--save_stack_p; } while (0)
387 /* The instruction we're assembling. */
390 /* Possible templates for current insn. */
391 static const templates *current_templates;
393 /* Per instruction expressionS buffers: max displacements & immediates. */
394 static expressionS disp_expressions[MAX_MEMORY_OPERANDS];
395 static expressionS im_expressions[MAX_IMMEDIATE_OPERANDS];
397 /* Current operand we are working on. */
398 static int this_operand = -1;
400 /* We support four different modes. FLAG_CODE variable is used to distinguish
408 static enum flag_code flag_code;
409 static unsigned int object_64bit;
410 static unsigned int disallow_64bit_reloc;
411 static int use_rela_relocations = 0;
413 #if ((defined (OBJ_MAYBE_COFF) && defined (OBJ_MAYBE_AOUT)) \
414 || defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF) \
415 || defined (TE_PE) || defined (TE_PEP) || defined (OBJ_MACH_O))
417 /* The ELF ABI to use. */
425 static enum x86_elf_abi x86_elf_abi = I386_ABI;
428 /* 1 for intel syntax,
430 static int intel_syntax = 0;
432 /* 1 for intel mnemonic,
433 0 if att mnemonic. */
434 static int intel_mnemonic = !SYSV386_COMPAT;
436 /* 1 if support old (<= 2.8.1) versions of gcc. */
437 static int old_gcc = OLDGCC_COMPAT;
439 /* 1 if pseudo registers are permitted. */
440 static int allow_pseudo_reg = 0;
442 /* 1 if register prefix % not required. */
443 static int allow_naked_reg = 0;
445 /* 1 if pseudo index register, eiz/riz, is allowed . */
446 static int allow_index_reg = 0;
448 static enum check_kind
454 sse_check, operand_check = check_warning;
456 /* Register prefix used for error message. */
457 static const char *register_prefix = "%";
459 /* Used in 16 bit gcc mode to add an l suffix to call, ret, enter,
460 leave, push, and pop instructions so that gcc has the same stack
461 frame as in 32 bit mode. */
462 static char stackop_size = '\0';
464 /* Non-zero to optimize code alignment. */
465 int optimize_align_code = 1;
467 /* Non-zero to quieten some warnings. */
468 static int quiet_warnings = 0;
471 static const char *cpu_arch_name = NULL;
472 static char *cpu_sub_arch_name = NULL;
474 /* CPU feature flags. */
475 static i386_cpu_flags cpu_arch_flags = CPU_UNKNOWN_FLAGS;
477 /* If we have selected a cpu we are generating instructions for. */
478 static int cpu_arch_tune_set = 0;
480 /* Cpu we are generating instructions for. */
481 enum processor_type cpu_arch_tune = PROCESSOR_UNKNOWN;
483 /* CPU feature flags of cpu we are generating instructions for. */
484 static i386_cpu_flags cpu_arch_tune_flags;
486 /* CPU instruction set architecture used. */
487 enum processor_type cpu_arch_isa = PROCESSOR_UNKNOWN;
489 /* CPU feature flags of instruction set architecture used. */
490 i386_cpu_flags cpu_arch_isa_flags;
492 /* If set, conditional jumps are not automatically promoted to handle
493 larger than a byte offset. */
494 static unsigned int no_cond_jump_promotion = 0;
496 /* Encode SSE instructions with VEX prefix. */
497 static unsigned int sse2avx;
499 /* Encode scalar AVX instructions with specific vector length. */
506 /* Pre-defined "_GLOBAL_OFFSET_TABLE_". */
507 static symbolS *GOT_symbol;
509 /* The dwarf2 return column, adjusted for 32 or 64 bit. */
510 unsigned int x86_dwarf2_return_column;
512 /* The dwarf2 data alignment, adjusted for 32 or 64 bit. */
513 int x86_cie_data_alignment;
515 /* Interface to relax_segment.
516 There are 3 major relax states for 386 jump insns because the
517 different types of jumps add different sizes to frags when we're
518 figuring out what sort of jump to choose to reach a given label. */
521 #define UNCOND_JUMP 0
523 #define COND_JUMP86 2
528 #define SMALL16 (SMALL | CODE16)
530 #define BIG16 (BIG | CODE16)
534 #define INLINE __inline__
540 #define ENCODE_RELAX_STATE(type, size) \
541 ((relax_substateT) (((type) << 2) | (size)))
542 #define TYPE_FROM_RELAX_STATE(s) \
544 #define DISP_SIZE_FROM_RELAX_STATE(s) \
545 ((((s) & 3) == BIG ? 4 : (((s) & 3) == BIG16 ? 2 : 1)))
547 /* This table is used by relax_frag to promote short jumps to long
548 ones where necessary. SMALL (short) jumps may be promoted to BIG
549 (32 bit long) ones, and SMALL16 jumps to BIG16 (16 bit long). We
550 don't allow a short jump in a 32 bit code segment to be promoted to
551 a 16 bit offset jump because it's slower (requires data size
552 prefix), and doesn't work, unless the destination is in the bottom
553 64k of the code segment (The top 16 bits of eip are zeroed). */
555 const relax_typeS md_relax_table[] =
558 1) most positive reach of this state,
559 2) most negative reach of this state,
560 3) how many bytes this mode will have in the variable part of the frag
561 4) which index into the table to try if we can't fit into this one. */
563 /* UNCOND_JUMP states. */
564 {127 + 1, -128 + 1, 1, ENCODE_RELAX_STATE (UNCOND_JUMP, BIG)},
565 {127 + 1, -128 + 1, 1, ENCODE_RELAX_STATE (UNCOND_JUMP, BIG16)},
566 /* dword jmp adds 4 bytes to frag:
567 0 extra opcode bytes, 4 displacement bytes. */
569 /* word jmp adds 2 byte2 to frag:
570 0 extra opcode bytes, 2 displacement bytes. */
573 /* COND_JUMP states. */
574 {127 + 1, -128 + 1, 1, ENCODE_RELAX_STATE (COND_JUMP, BIG)},
575 {127 + 1, -128 + 1, 1, ENCODE_RELAX_STATE (COND_JUMP, BIG16)},
576 /* dword conditionals adds 5 bytes to frag:
577 1 extra opcode byte, 4 displacement bytes. */
579 /* word conditionals add 3 bytes to frag:
580 1 extra opcode byte, 2 displacement bytes. */
583 /* COND_JUMP86 states. */
584 {127 + 1, -128 + 1, 1, ENCODE_RELAX_STATE (COND_JUMP86, BIG)},
585 {127 + 1, -128 + 1, 1, ENCODE_RELAX_STATE (COND_JUMP86, BIG16)},
586 /* dword conditionals adds 5 bytes to frag:
587 1 extra opcode byte, 4 displacement bytes. */
589 /* word conditionals add 4 bytes to frag:
590 1 displacement byte and a 3 byte long branch insn. */
594 static const arch_entry cpu_arch[] =
596 /* Do not replace the first two entries - i386_target_format()
597 relies on them being there in this order. */
598 { STRING_COMMA_LEN ("generic32"), PROCESSOR_GENERIC32,
599 CPU_GENERIC32_FLAGS, 0, 0 },
600 { STRING_COMMA_LEN ("generic64"), PROCESSOR_GENERIC64,
601 CPU_GENERIC64_FLAGS, 0, 0 },
602 { STRING_COMMA_LEN ("i8086"), PROCESSOR_UNKNOWN,
603 CPU_NONE_FLAGS, 0, 0 },
604 { STRING_COMMA_LEN ("i186"), PROCESSOR_UNKNOWN,
605 CPU_I186_FLAGS, 0, 0 },
606 { STRING_COMMA_LEN ("i286"), PROCESSOR_UNKNOWN,
607 CPU_I286_FLAGS, 0, 0 },
608 { STRING_COMMA_LEN ("i386"), PROCESSOR_I386,
609 CPU_I386_FLAGS, 0, 0 },
610 { STRING_COMMA_LEN ("i486"), PROCESSOR_I486,
611 CPU_I486_FLAGS, 0, 0 },
612 { STRING_COMMA_LEN ("i586"), PROCESSOR_PENTIUM,
613 CPU_I586_FLAGS, 0, 0 },
614 { STRING_COMMA_LEN ("i686"), PROCESSOR_PENTIUMPRO,
615 CPU_I686_FLAGS, 0, 0 },
616 { STRING_COMMA_LEN ("pentium"), PROCESSOR_PENTIUM,
617 CPU_I586_FLAGS, 0, 0 },
618 { STRING_COMMA_LEN ("pentiumpro"), PROCESSOR_PENTIUMPRO,
619 CPU_PENTIUMPRO_FLAGS, 0, 0 },
620 { STRING_COMMA_LEN ("pentiumii"), PROCESSOR_PENTIUMPRO,
621 CPU_P2_FLAGS, 0, 0 },
622 { STRING_COMMA_LEN ("pentiumiii"),PROCESSOR_PENTIUMPRO,
623 CPU_P3_FLAGS, 0, 0 },
624 { STRING_COMMA_LEN ("pentium4"), PROCESSOR_PENTIUM4,
625 CPU_P4_FLAGS, 0, 0 },
626 { STRING_COMMA_LEN ("prescott"), PROCESSOR_NOCONA,
627 CPU_CORE_FLAGS, 0, 0 },
628 { STRING_COMMA_LEN ("nocona"), PROCESSOR_NOCONA,
629 CPU_NOCONA_FLAGS, 0, 0 },
630 { STRING_COMMA_LEN ("yonah"), PROCESSOR_CORE,
631 CPU_CORE_FLAGS, 1, 0 },
632 { STRING_COMMA_LEN ("core"), PROCESSOR_CORE,
633 CPU_CORE_FLAGS, 0, 0 },
634 { STRING_COMMA_LEN ("merom"), PROCESSOR_CORE2,
635 CPU_CORE2_FLAGS, 1, 0 },
636 { STRING_COMMA_LEN ("core2"), PROCESSOR_CORE2,
637 CPU_CORE2_FLAGS, 0, 0 },
638 { STRING_COMMA_LEN ("corei7"), PROCESSOR_COREI7,
639 CPU_COREI7_FLAGS, 0, 0 },
640 { STRING_COMMA_LEN ("l1om"), PROCESSOR_L1OM,
641 CPU_L1OM_FLAGS, 0, 0 },
642 { STRING_COMMA_LEN ("k1om"), PROCESSOR_K1OM,
643 CPU_K1OM_FLAGS, 0, 0 },
644 { STRING_COMMA_LEN ("k6"), PROCESSOR_K6,
645 CPU_K6_FLAGS, 0, 0 },
646 { STRING_COMMA_LEN ("k6_2"), PROCESSOR_K6,
647 CPU_K6_2_FLAGS, 0, 0 },
648 { STRING_COMMA_LEN ("athlon"), PROCESSOR_ATHLON,
649 CPU_ATHLON_FLAGS, 0, 0 },
650 { STRING_COMMA_LEN ("sledgehammer"), PROCESSOR_K8,
651 CPU_K8_FLAGS, 1, 0 },
652 { STRING_COMMA_LEN ("opteron"), PROCESSOR_K8,
653 CPU_K8_FLAGS, 0, 0 },
654 { STRING_COMMA_LEN ("k8"), PROCESSOR_K8,
655 CPU_K8_FLAGS, 0, 0 },
656 { STRING_COMMA_LEN ("amdfam10"), PROCESSOR_AMDFAM10,
657 CPU_AMDFAM10_FLAGS, 0, 0 },
658 { STRING_COMMA_LEN ("bdver1"), PROCESSOR_BD,
659 CPU_BDVER1_FLAGS, 0, 0 },
660 { STRING_COMMA_LEN ("bdver2"), PROCESSOR_BD,
661 CPU_BDVER2_FLAGS, 0, 0 },
662 { STRING_COMMA_LEN ("bdver3"), PROCESSOR_BD,
663 CPU_BDVER3_FLAGS, 0, 0 },
664 { STRING_COMMA_LEN ("btver1"), PROCESSOR_BT,
665 CPU_BTVER1_FLAGS, 0, 0 },
666 { STRING_COMMA_LEN ("btver2"), PROCESSOR_BT,
667 CPU_BTVER2_FLAGS, 0, 0 },
668 { STRING_COMMA_LEN (".8087"), PROCESSOR_UNKNOWN,
669 CPU_8087_FLAGS, 0, 0 },
670 { STRING_COMMA_LEN (".287"), PROCESSOR_UNKNOWN,
671 CPU_287_FLAGS, 0, 0 },
672 { STRING_COMMA_LEN (".387"), PROCESSOR_UNKNOWN,
673 CPU_387_FLAGS, 0, 0 },
674 { STRING_COMMA_LEN (".no87"), PROCESSOR_UNKNOWN,
675 CPU_ANY87_FLAGS, 0, 1 },
676 { STRING_COMMA_LEN (".mmx"), PROCESSOR_UNKNOWN,
677 CPU_MMX_FLAGS, 0, 0 },
678 { STRING_COMMA_LEN (".nommx"), PROCESSOR_UNKNOWN,
679 CPU_3DNOWA_FLAGS, 0, 1 },
680 { STRING_COMMA_LEN (".sse"), PROCESSOR_UNKNOWN,
681 CPU_SSE_FLAGS, 0, 0 },
682 { STRING_COMMA_LEN (".sse2"), PROCESSOR_UNKNOWN,
683 CPU_SSE2_FLAGS, 0, 0 },
684 { STRING_COMMA_LEN (".sse3"), PROCESSOR_UNKNOWN,
685 CPU_SSE3_FLAGS, 0, 0 },
686 { STRING_COMMA_LEN (".ssse3"), PROCESSOR_UNKNOWN,
687 CPU_SSSE3_FLAGS, 0, 0 },
688 { STRING_COMMA_LEN (".sse4.1"), PROCESSOR_UNKNOWN,
689 CPU_SSE4_1_FLAGS, 0, 0 },
690 { STRING_COMMA_LEN (".sse4.2"), PROCESSOR_UNKNOWN,
691 CPU_SSE4_2_FLAGS, 0, 0 },
692 { STRING_COMMA_LEN (".sse4"), PROCESSOR_UNKNOWN,
693 CPU_SSE4_2_FLAGS, 0, 0 },
694 { STRING_COMMA_LEN (".nosse"), PROCESSOR_UNKNOWN,
695 CPU_ANY_SSE_FLAGS, 0, 1 },
696 { STRING_COMMA_LEN (".avx"), PROCESSOR_UNKNOWN,
697 CPU_AVX_FLAGS, 0, 0 },
698 { STRING_COMMA_LEN (".avx2"), PROCESSOR_UNKNOWN,
699 CPU_AVX2_FLAGS, 0, 0 },
700 { STRING_COMMA_LEN (".noavx"), PROCESSOR_UNKNOWN,
701 CPU_ANY_AVX_FLAGS, 0, 1 },
702 { STRING_COMMA_LEN (".vmx"), PROCESSOR_UNKNOWN,
703 CPU_VMX_FLAGS, 0, 0 },
704 { STRING_COMMA_LEN (".vmfunc"), PROCESSOR_UNKNOWN,
705 CPU_VMFUNC_FLAGS, 0, 0 },
706 { STRING_COMMA_LEN (".smx"), PROCESSOR_UNKNOWN,
707 CPU_SMX_FLAGS, 0, 0 },
708 { STRING_COMMA_LEN (".xsave"), PROCESSOR_UNKNOWN,
709 CPU_XSAVE_FLAGS, 0, 0 },
710 { STRING_COMMA_LEN (".xsaveopt"), PROCESSOR_UNKNOWN,
711 CPU_XSAVEOPT_FLAGS, 0, 0 },
712 { STRING_COMMA_LEN (".aes"), PROCESSOR_UNKNOWN,
713 CPU_AES_FLAGS, 0, 0 },
714 { STRING_COMMA_LEN (".pclmul"), PROCESSOR_UNKNOWN,
715 CPU_PCLMUL_FLAGS, 0, 0 },
716 { STRING_COMMA_LEN (".clmul"), PROCESSOR_UNKNOWN,
717 CPU_PCLMUL_FLAGS, 1, 0 },
718 { STRING_COMMA_LEN (".fsgsbase"), PROCESSOR_UNKNOWN,
719 CPU_FSGSBASE_FLAGS, 0, 0 },
720 { STRING_COMMA_LEN (".rdrnd"), PROCESSOR_UNKNOWN,
721 CPU_RDRND_FLAGS, 0, 0 },
722 { STRING_COMMA_LEN (".f16c"), PROCESSOR_UNKNOWN,
723 CPU_F16C_FLAGS, 0, 0 },
724 { STRING_COMMA_LEN (".bmi2"), PROCESSOR_UNKNOWN,
725 CPU_BMI2_FLAGS, 0, 0 },
726 { STRING_COMMA_LEN (".fma"), PROCESSOR_UNKNOWN,
727 CPU_FMA_FLAGS, 0, 0 },
728 { STRING_COMMA_LEN (".fma4"), PROCESSOR_UNKNOWN,
729 CPU_FMA4_FLAGS, 0, 0 },
730 { STRING_COMMA_LEN (".xop"), PROCESSOR_UNKNOWN,
731 CPU_XOP_FLAGS, 0, 0 },
732 { STRING_COMMA_LEN (".lwp"), PROCESSOR_UNKNOWN,
733 CPU_LWP_FLAGS, 0, 0 },
734 { STRING_COMMA_LEN (".movbe"), PROCESSOR_UNKNOWN,
735 CPU_MOVBE_FLAGS, 0, 0 },
736 { STRING_COMMA_LEN (".cx16"), PROCESSOR_UNKNOWN,
737 CPU_CX16_FLAGS, 0, 0 },
738 { STRING_COMMA_LEN (".ept"), PROCESSOR_UNKNOWN,
739 CPU_EPT_FLAGS, 0, 0 },
740 { STRING_COMMA_LEN (".lzcnt"), PROCESSOR_UNKNOWN,
741 CPU_LZCNT_FLAGS, 0, 0 },
742 { STRING_COMMA_LEN (".hle"), PROCESSOR_UNKNOWN,
743 CPU_HLE_FLAGS, 0, 0 },
744 { STRING_COMMA_LEN (".rtm"), PROCESSOR_UNKNOWN,
745 CPU_RTM_FLAGS, 0, 0 },
746 { STRING_COMMA_LEN (".invpcid"), PROCESSOR_UNKNOWN,
747 CPU_INVPCID_FLAGS, 0, 0 },
748 { STRING_COMMA_LEN (".clflush"), PROCESSOR_UNKNOWN,
749 CPU_CLFLUSH_FLAGS, 0, 0 },
750 { STRING_COMMA_LEN (".nop"), PROCESSOR_UNKNOWN,
751 CPU_NOP_FLAGS, 0, 0 },
752 { STRING_COMMA_LEN (".syscall"), PROCESSOR_UNKNOWN,
753 CPU_SYSCALL_FLAGS, 0, 0 },
754 { STRING_COMMA_LEN (".rdtscp"), PROCESSOR_UNKNOWN,
755 CPU_RDTSCP_FLAGS, 0, 0 },
756 { STRING_COMMA_LEN (".3dnow"), PROCESSOR_UNKNOWN,
757 CPU_3DNOW_FLAGS, 0, 0 },
758 { STRING_COMMA_LEN (".3dnowa"), PROCESSOR_UNKNOWN,
759 CPU_3DNOWA_FLAGS, 0, 0 },
760 { STRING_COMMA_LEN (".padlock"), PROCESSOR_UNKNOWN,
761 CPU_PADLOCK_FLAGS, 0, 0 },
762 { STRING_COMMA_LEN (".pacifica"), PROCESSOR_UNKNOWN,
763 CPU_SVME_FLAGS, 1, 0 },
764 { STRING_COMMA_LEN (".svme"), PROCESSOR_UNKNOWN,
765 CPU_SVME_FLAGS, 0, 0 },
766 { STRING_COMMA_LEN (".sse4a"), PROCESSOR_UNKNOWN,
767 CPU_SSE4A_FLAGS, 0, 0 },
768 { STRING_COMMA_LEN (".abm"), PROCESSOR_UNKNOWN,
769 CPU_ABM_FLAGS, 0, 0 },
770 { STRING_COMMA_LEN (".bmi"), PROCESSOR_UNKNOWN,
771 CPU_BMI_FLAGS, 0, 0 },
772 { STRING_COMMA_LEN (".tbm"), PROCESSOR_UNKNOWN,
773 CPU_TBM_FLAGS, 0, 0 },
774 { STRING_COMMA_LEN (".adx"), PROCESSOR_UNKNOWN,
775 CPU_ADX_FLAGS, 0, 0 },
776 { STRING_COMMA_LEN (".rdseed"), PROCESSOR_UNKNOWN,
777 CPU_RDSEED_FLAGS, 0, 0 },
778 { STRING_COMMA_LEN (".prfchw"), PROCESSOR_UNKNOWN,
779 CPU_PRFCHW_FLAGS, 0, 0 },
780 { STRING_COMMA_LEN (".smap"), PROCESSOR_UNKNOWN,
781 CPU_SMAP_FLAGS, 0, 0 },
785 /* Like s_lcomm_internal in gas/read.c but the alignment string
786 is allowed to be optional. */
789 pe_lcomm_internal (int needs_align, symbolS *symbolP, addressT size)
796 && *input_line_pointer == ',')
798 align = parse_align (needs_align - 1);
800 if (align == (addressT) -1)
815 bss_alloc (symbolP, size, align);
820 pe_lcomm (int needs_align)
822 s_comm_internal (needs_align * 2, pe_lcomm_internal);
826 const pseudo_typeS md_pseudo_table[] =
828 #if !defined(OBJ_AOUT) && !defined(USE_ALIGN_PTWO)
829 {"align", s_align_bytes, 0},
831 {"align", s_align_ptwo, 0},
833 {"arch", set_cpu_arch, 0},
837 {"lcomm", pe_lcomm, 1},
839 {"ffloat", float_cons, 'f'},
840 {"dfloat", float_cons, 'd'},
841 {"tfloat", float_cons, 'x'},
843 {"slong", signed_cons, 4},
844 {"noopt", s_ignore, 0},
845 {"optim", s_ignore, 0},
846 {"code16gcc", set_16bit_gcc_code_flag, CODE_16BIT},
847 {"code16", set_code_flag, CODE_16BIT},
848 {"code32", set_code_flag, CODE_32BIT},
849 {"code64", set_code_flag, CODE_64BIT},
850 {"intel_syntax", set_intel_syntax, 1},
851 {"att_syntax", set_intel_syntax, 0},
852 {"intel_mnemonic", set_intel_mnemonic, 1},
853 {"att_mnemonic", set_intel_mnemonic, 0},
854 {"allow_index_reg", set_allow_index_reg, 1},
855 {"disallow_index_reg", set_allow_index_reg, 0},
856 {"sse_check", set_check, 0},
857 {"operand_check", set_check, 1},
858 #if defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF)
859 {"largecomm", handle_large_common, 0},
861 {"file", (void (*) (int)) dwarf2_directive_file, 0},
862 {"loc", dwarf2_directive_loc, 0},
863 {"loc_mark_labels", dwarf2_directive_loc_mark_labels, 0},
866 {"secrel32", pe_directive_secrel, 0},
871 /* For interface with expression (). */
872 extern char *input_line_pointer;
874 /* Hash table for instruction mnemonic lookup. */
875 static struct hash_control *op_hash;
877 /* Hash table for register lookup. */
878 static struct hash_control *reg_hash;
881 i386_align_code (fragS *fragP, int count)
883 /* Various efficient no-op patterns for aligning code labels.
884 Note: Don't try to assemble the instructions in the comments.
885 0L and 0w are not legal. */
886 static const char f32_1[] =
888 static const char f32_2[] =
889 {0x66,0x90}; /* xchg %ax,%ax */
890 static const char f32_3[] =
891 {0x8d,0x76,0x00}; /* leal 0(%esi),%esi */
892 static const char f32_4[] =
893 {0x8d,0x74,0x26,0x00}; /* leal 0(%esi,1),%esi */
894 static const char f32_5[] =
896 0x8d,0x74,0x26,0x00}; /* leal 0(%esi,1),%esi */
897 static const char f32_6[] =
898 {0x8d,0xb6,0x00,0x00,0x00,0x00}; /* leal 0L(%esi),%esi */
899 static const char f32_7[] =
900 {0x8d,0xb4,0x26,0x00,0x00,0x00,0x00}; /* leal 0L(%esi,1),%esi */
901 static const char f32_8[] =
903 0x8d,0xb4,0x26,0x00,0x00,0x00,0x00}; /* leal 0L(%esi,1),%esi */
904 static const char f32_9[] =
905 {0x89,0xf6, /* movl %esi,%esi */
906 0x8d,0xbc,0x27,0x00,0x00,0x00,0x00}; /* leal 0L(%edi,1),%edi */
907 static const char f32_10[] =
908 {0x8d,0x76,0x00, /* leal 0(%esi),%esi */
909 0x8d,0xbc,0x27,0x00,0x00,0x00,0x00}; /* leal 0L(%edi,1),%edi */
910 static const char f32_11[] =
911 {0x8d,0x74,0x26,0x00, /* leal 0(%esi,1),%esi */
912 0x8d,0xbc,0x27,0x00,0x00,0x00,0x00}; /* leal 0L(%edi,1),%edi */
913 static const char f32_12[] =
914 {0x8d,0xb6,0x00,0x00,0x00,0x00, /* leal 0L(%esi),%esi */
915 0x8d,0xbf,0x00,0x00,0x00,0x00}; /* leal 0L(%edi),%edi */
916 static const char f32_13[] =
917 {0x8d,0xb6,0x00,0x00,0x00,0x00, /* leal 0L(%esi),%esi */
918 0x8d,0xbc,0x27,0x00,0x00,0x00,0x00}; /* leal 0L(%edi,1),%edi */
919 static const char f32_14[] =
920 {0x8d,0xb4,0x26,0x00,0x00,0x00,0x00, /* leal 0L(%esi,1),%esi */
921 0x8d,0xbc,0x27,0x00,0x00,0x00,0x00}; /* leal 0L(%edi,1),%edi */
922 static const char f16_3[] =
923 {0x8d,0x74,0x00}; /* lea 0(%esi),%esi */
924 static const char f16_4[] =
925 {0x8d,0xb4,0x00,0x00}; /* lea 0w(%si),%si */
926 static const char f16_5[] =
928 0x8d,0xb4,0x00,0x00}; /* lea 0w(%si),%si */
929 static const char f16_6[] =
930 {0x89,0xf6, /* mov %si,%si */
931 0x8d,0xbd,0x00,0x00}; /* lea 0w(%di),%di */
932 static const char f16_7[] =
933 {0x8d,0x74,0x00, /* lea 0(%si),%si */
934 0x8d,0xbd,0x00,0x00}; /* lea 0w(%di),%di */
935 static const char f16_8[] =
936 {0x8d,0xb4,0x00,0x00, /* lea 0w(%si),%si */
937 0x8d,0xbd,0x00,0x00}; /* lea 0w(%di),%di */
938 static const char jump_31[] =
939 {0xeb,0x1d,0x90,0x90,0x90,0x90,0x90, /* jmp .+31; lotsa nops */
940 0x90,0x90,0x90,0x90,0x90,0x90,0x90,0x90,
941 0x90,0x90,0x90,0x90,0x90,0x90,0x90,0x90,
942 0x90,0x90,0x90,0x90,0x90,0x90,0x90,0x90};
943 static const char *const f32_patt[] = {
944 f32_1, f32_2, f32_3, f32_4, f32_5, f32_6, f32_7, f32_8,
945 f32_9, f32_10, f32_11, f32_12, f32_13, f32_14
947 static const char *const f16_patt[] = {
948 f32_1, f32_2, f16_3, f16_4, f16_5, f16_6, f16_7, f16_8
951 static const char alt_3[] =
953 /* nopl 0(%[re]ax) */
954 static const char alt_4[] =
955 {0x0f,0x1f,0x40,0x00};
956 /* nopl 0(%[re]ax,%[re]ax,1) */
957 static const char alt_5[] =
958 {0x0f,0x1f,0x44,0x00,0x00};
959 /* nopw 0(%[re]ax,%[re]ax,1) */
960 static const char alt_6[] =
961 {0x66,0x0f,0x1f,0x44,0x00,0x00};
962 /* nopl 0L(%[re]ax) */
963 static const char alt_7[] =
964 {0x0f,0x1f,0x80,0x00,0x00,0x00,0x00};
965 /* nopl 0L(%[re]ax,%[re]ax,1) */
966 static const char alt_8[] =
967 {0x0f,0x1f,0x84,0x00,0x00,0x00,0x00,0x00};
968 /* nopw 0L(%[re]ax,%[re]ax,1) */
969 static const char alt_9[] =
970 {0x66,0x0f,0x1f,0x84,0x00,0x00,0x00,0x00,0x00};
971 /* nopw %cs:0L(%[re]ax,%[re]ax,1) */
972 static const char alt_10[] =
973 {0x66,0x2e,0x0f,0x1f,0x84,0x00,0x00,0x00,0x00,0x00};
975 nopw %cs:0L(%[re]ax,%[re]ax,1) */
976 static const char alt_long_11[] =
978 0x66,0x2e,0x0f,0x1f,0x84,0x00,0x00,0x00,0x00,0x00};
981 nopw %cs:0L(%[re]ax,%[re]ax,1) */
982 static const char alt_long_12[] =
985 0x66,0x2e,0x0f,0x1f,0x84,0x00,0x00,0x00,0x00,0x00};
989 nopw %cs:0L(%[re]ax,%[re]ax,1) */
990 static const char alt_long_13[] =
994 0x66,0x2e,0x0f,0x1f,0x84,0x00,0x00,0x00,0x00,0x00};
999 nopw %cs:0L(%[re]ax,%[re]ax,1) */
1000 static const char alt_long_14[] =
1005 0x66,0x2e,0x0f,0x1f,0x84,0x00,0x00,0x00,0x00,0x00};
1011 nopw %cs:0L(%[re]ax,%[re]ax,1) */
1012 static const char alt_long_15[] =
1018 0x66,0x2e,0x0f,0x1f,0x84,0x00,0x00,0x00,0x00,0x00};
1019 /* nopl 0(%[re]ax,%[re]ax,1)
1020 nopw 0(%[re]ax,%[re]ax,1) */
1021 static const char alt_short_11[] =
1022 {0x0f,0x1f,0x44,0x00,0x00,
1023 0x66,0x0f,0x1f,0x44,0x00,0x00};
1024 /* nopw 0(%[re]ax,%[re]ax,1)
1025 nopw 0(%[re]ax,%[re]ax,1) */
1026 static const char alt_short_12[] =
1027 {0x66,0x0f,0x1f,0x44,0x00,0x00,
1028 0x66,0x0f,0x1f,0x44,0x00,0x00};
1029 /* nopw 0(%[re]ax,%[re]ax,1)
1031 static const char alt_short_13[] =
1032 {0x66,0x0f,0x1f,0x44,0x00,0x00,
1033 0x0f,0x1f,0x80,0x00,0x00,0x00,0x00};
1036 static const char alt_short_14[] =
1037 {0x0f,0x1f,0x80,0x00,0x00,0x00,0x00,
1038 0x0f,0x1f,0x80,0x00,0x00,0x00,0x00};
1040 nopl 0L(%[re]ax,%[re]ax,1) */
1041 static const char alt_short_15[] =
1042 {0x0f,0x1f,0x80,0x00,0x00,0x00,0x00,
1043 0x0f,0x1f,0x84,0x00,0x00,0x00,0x00,0x00};
1044 static const char *const alt_short_patt[] = {
1045 f32_1, f32_2, alt_3, alt_4, alt_5, alt_6, alt_7, alt_8,
1046 alt_9, alt_10, alt_short_11, alt_short_12, alt_short_13,
1047 alt_short_14, alt_short_15
1049 static const char *const alt_long_patt[] = {
1050 f32_1, f32_2, alt_3, alt_4, alt_5, alt_6, alt_7, alt_8,
1051 alt_9, alt_10, alt_long_11, alt_long_12, alt_long_13,
1052 alt_long_14, alt_long_15
1055 /* Only align for at least a positive non-zero boundary. */
1056 if (count <= 0 || count > MAX_MEM_FOR_RS_ALIGN_CODE)
1059 /* We need to decide which NOP sequence to use for 32bit and
1060 64bit. When -mtune= is used:
1062 1. For PROCESSOR_I386, PROCESSOR_I486, PROCESSOR_PENTIUM and
1063 PROCESSOR_GENERIC32, f32_patt will be used.
1064 2. For PROCESSOR_PENTIUMPRO, PROCESSOR_PENTIUM4, PROCESSOR_NOCONA,
1065 PROCESSOR_CORE, PROCESSOR_CORE2, PROCESSOR_COREI7, and
1066 PROCESSOR_GENERIC64, alt_long_patt will be used.
1067 3. For PROCESSOR_ATHLON, PROCESSOR_K6, PROCESSOR_K8 and
1068 PROCESSOR_AMDFAM10, PROCESSOR_BD and PROCESSOR_BT, alt_short_patt
1071 When -mtune= isn't used, alt_long_patt will be used if
1072 cpu_arch_isa_flags has CpuNop. Otherwise, f32_patt will
1075 When -march= or .arch is used, we can't use anything beyond
1076 cpu_arch_isa_flags. */
1078 if (flag_code == CODE_16BIT)
1082 memcpy (fragP->fr_literal + fragP->fr_fix,
1084 /* Adjust jump offset. */
1085 fragP->fr_literal[fragP->fr_fix + 1] = count - 2;
1088 memcpy (fragP->fr_literal + fragP->fr_fix,
1089 f16_patt[count - 1], count);
1093 const char *const *patt = NULL;
1095 if (fragP->tc_frag_data.isa == PROCESSOR_UNKNOWN)
1097 /* PROCESSOR_UNKNOWN means that all ISAs may be used. */
1098 switch (cpu_arch_tune)
1100 case PROCESSOR_UNKNOWN:
1101 /* We use cpu_arch_isa_flags to check if we SHOULD
1102 optimize with nops. */
1103 if (fragP->tc_frag_data.isa_flags.bitfield.cpunop)
1104 patt = alt_long_patt;
1108 case PROCESSOR_PENTIUM4:
1109 case PROCESSOR_NOCONA:
1110 case PROCESSOR_CORE:
1111 case PROCESSOR_CORE2:
1112 case PROCESSOR_COREI7:
1113 case PROCESSOR_L1OM:
1114 case PROCESSOR_K1OM:
1115 case PROCESSOR_GENERIC64:
1116 patt = alt_long_patt;
1119 case PROCESSOR_ATHLON:
1121 case PROCESSOR_AMDFAM10:
1124 patt = alt_short_patt;
1126 case PROCESSOR_I386:
1127 case PROCESSOR_I486:
1128 case PROCESSOR_PENTIUM:
1129 case PROCESSOR_PENTIUMPRO:
1130 case PROCESSOR_GENERIC32:
1137 switch (fragP->tc_frag_data.tune)
1139 case PROCESSOR_UNKNOWN:
1140 /* When cpu_arch_isa is set, cpu_arch_tune shouldn't be
1141 PROCESSOR_UNKNOWN. */
1145 case PROCESSOR_I386:
1146 case PROCESSOR_I486:
1147 case PROCESSOR_PENTIUM:
1149 case PROCESSOR_ATHLON:
1151 case PROCESSOR_AMDFAM10:
1154 case PROCESSOR_GENERIC32:
1155 /* We use cpu_arch_isa_flags to check if we CAN optimize
1157 if (fragP->tc_frag_data.isa_flags.bitfield.cpunop)
1158 patt = alt_short_patt;
1162 case PROCESSOR_PENTIUMPRO:
1163 case PROCESSOR_PENTIUM4:
1164 case PROCESSOR_NOCONA:
1165 case PROCESSOR_CORE:
1166 case PROCESSOR_CORE2:
1167 case PROCESSOR_COREI7:
1168 case PROCESSOR_L1OM:
1169 case PROCESSOR_K1OM:
1170 if (fragP->tc_frag_data.isa_flags.bitfield.cpunop)
1171 patt = alt_long_patt;
1175 case PROCESSOR_GENERIC64:
1176 patt = alt_long_patt;
1181 if (patt == f32_patt)
1183 /* If the padding is less than 15 bytes, we use the normal
1184 ones. Otherwise, we use a jump instruction and adjust
1188 /* For 64bit, the limit is 3 bytes. */
1189 if (flag_code == CODE_64BIT
1190 && fragP->tc_frag_data.isa_flags.bitfield.cpulm)
1195 memcpy (fragP->fr_literal + fragP->fr_fix,
1196 patt[count - 1], count);
1199 memcpy (fragP->fr_literal + fragP->fr_fix,
1201 /* Adjust jump offset. */
1202 fragP->fr_literal[fragP->fr_fix + 1] = count - 2;
1207 /* Maximum length of an instruction is 15 byte. If the
1208 padding is greater than 15 bytes and we don't use jump,
1209 we have to break it into smaller pieces. */
1210 int padding = count;
1211 while (padding > 15)
1214 memcpy (fragP->fr_literal + fragP->fr_fix + padding,
1219 memcpy (fragP->fr_literal + fragP->fr_fix,
1220 patt [padding - 1], padding);
1223 fragP->fr_var = count;
1227 operand_type_all_zero (const union i386_operand_type *x)
1229 switch (ARRAY_SIZE(x->array))
1238 return !x->array[0];
1245 operand_type_set (union i386_operand_type *x, unsigned int v)
1247 switch (ARRAY_SIZE(x->array))
1262 operand_type_equal (const union i386_operand_type *x,
1263 const union i386_operand_type *y)
1265 switch (ARRAY_SIZE(x->array))
1268 if (x->array[2] != y->array[2])
1271 if (x->array[1] != y->array[1])
1274 return x->array[0] == y->array[0];
1282 cpu_flags_all_zero (const union i386_cpu_flags *x)
1284 switch (ARRAY_SIZE(x->array))
1293 return !x->array[0];
1300 cpu_flags_set (union i386_cpu_flags *x, unsigned int v)
1302 switch (ARRAY_SIZE(x->array))
1317 cpu_flags_equal (const union i386_cpu_flags *x,
1318 const union i386_cpu_flags *y)
1320 switch (ARRAY_SIZE(x->array))
1323 if (x->array[2] != y->array[2])
1326 if (x->array[1] != y->array[1])
1329 return x->array[0] == y->array[0];
1337 cpu_flags_check_cpu64 (i386_cpu_flags f)
1339 return !((flag_code == CODE_64BIT && f.bitfield.cpuno64)
1340 || (flag_code != CODE_64BIT && f.bitfield.cpu64));
1343 static INLINE i386_cpu_flags
1344 cpu_flags_and (i386_cpu_flags x, i386_cpu_flags y)
1346 switch (ARRAY_SIZE (x.array))
1349 x.array [2] &= y.array [2];
1351 x.array [1] &= y.array [1];
1353 x.array [0] &= y.array [0];
1361 static INLINE i386_cpu_flags
1362 cpu_flags_or (i386_cpu_flags x, i386_cpu_flags y)
1364 switch (ARRAY_SIZE (x.array))
1367 x.array [2] |= y.array [2];
1369 x.array [1] |= y.array [1];
1371 x.array [0] |= y.array [0];
1379 static INLINE i386_cpu_flags
1380 cpu_flags_and_not (i386_cpu_flags x, i386_cpu_flags y)
1382 switch (ARRAY_SIZE (x.array))
1385 x.array [2] &= ~y.array [2];
1387 x.array [1] &= ~y.array [1];
1389 x.array [0] &= ~y.array [0];
1397 #define CPU_FLAGS_ARCH_MATCH 0x1
1398 #define CPU_FLAGS_64BIT_MATCH 0x2
1399 #define CPU_FLAGS_AES_MATCH 0x4
1400 #define CPU_FLAGS_PCLMUL_MATCH 0x8
1401 #define CPU_FLAGS_AVX_MATCH 0x10
1403 #define CPU_FLAGS_32BIT_MATCH \
1404 (CPU_FLAGS_ARCH_MATCH | CPU_FLAGS_AES_MATCH \
1405 | CPU_FLAGS_PCLMUL_MATCH | CPU_FLAGS_AVX_MATCH)
1406 #define CPU_FLAGS_PERFECT_MATCH \
1407 (CPU_FLAGS_32BIT_MATCH | CPU_FLAGS_64BIT_MATCH)
1409 /* Return CPU flags match bits. */
1412 cpu_flags_match (const insn_template *t)
1414 i386_cpu_flags x = t->cpu_flags;
1415 int match = cpu_flags_check_cpu64 (x) ? CPU_FLAGS_64BIT_MATCH : 0;
1417 x.bitfield.cpu64 = 0;
1418 x.bitfield.cpuno64 = 0;
1420 if (cpu_flags_all_zero (&x))
1422 /* This instruction is available on all archs. */
1423 match |= CPU_FLAGS_32BIT_MATCH;
1427 /* This instruction is available only on some archs. */
1428 i386_cpu_flags cpu = cpu_arch_flags;
1430 cpu.bitfield.cpu64 = 0;
1431 cpu.bitfield.cpuno64 = 0;
1432 cpu = cpu_flags_and (x, cpu);
1433 if (!cpu_flags_all_zero (&cpu))
1435 if (x.bitfield.cpuavx)
1437 /* We only need to check AES/PCLMUL/SSE2AVX with AVX. */
1438 if (cpu.bitfield.cpuavx)
1440 /* Check SSE2AVX. */
1441 if (!t->opcode_modifier.sse2avx|| sse2avx)
1443 match |= (CPU_FLAGS_ARCH_MATCH
1444 | CPU_FLAGS_AVX_MATCH);
1446 if (!x.bitfield.cpuaes || cpu.bitfield.cpuaes)
1447 match |= CPU_FLAGS_AES_MATCH;
1449 if (!x.bitfield.cpupclmul
1450 || cpu.bitfield.cpupclmul)
1451 match |= CPU_FLAGS_PCLMUL_MATCH;
1455 match |= CPU_FLAGS_ARCH_MATCH;
1458 match |= CPU_FLAGS_32BIT_MATCH;
1464 static INLINE i386_operand_type
1465 operand_type_and (i386_operand_type x, i386_operand_type y)
1467 switch (ARRAY_SIZE (x.array))
1470 x.array [2] &= y.array [2];
1472 x.array [1] &= y.array [1];
1474 x.array [0] &= y.array [0];
1482 static INLINE i386_operand_type
1483 operand_type_or (i386_operand_type x, i386_operand_type y)
1485 switch (ARRAY_SIZE (x.array))
1488 x.array [2] |= y.array [2];
1490 x.array [1] |= y.array [1];
1492 x.array [0] |= y.array [0];
1500 static INLINE i386_operand_type
1501 operand_type_xor (i386_operand_type x, i386_operand_type y)
1503 switch (ARRAY_SIZE (x.array))
1506 x.array [2] ^= y.array [2];
1508 x.array [1] ^= y.array [1];
1510 x.array [0] ^= y.array [0];
1518 static const i386_operand_type acc32 = OPERAND_TYPE_ACC32;
1519 static const i386_operand_type acc64 = OPERAND_TYPE_ACC64;
1520 static const i386_operand_type control = OPERAND_TYPE_CONTROL;
1521 static const i386_operand_type inoutportreg
1522 = OPERAND_TYPE_INOUTPORTREG;
1523 static const i386_operand_type reg16_inoutportreg
1524 = OPERAND_TYPE_REG16_INOUTPORTREG;
1525 static const i386_operand_type disp16 = OPERAND_TYPE_DISP16;
1526 static const i386_operand_type disp32 = OPERAND_TYPE_DISP32;
1527 static const i386_operand_type disp32s = OPERAND_TYPE_DISP32S;
1528 static const i386_operand_type disp16_32 = OPERAND_TYPE_DISP16_32;
1529 static const i386_operand_type anydisp
1530 = OPERAND_TYPE_ANYDISP;
1531 static const i386_operand_type regxmm = OPERAND_TYPE_REGXMM;
1532 static const i386_operand_type regymm = OPERAND_TYPE_REGYMM;
1533 static const i386_operand_type imm8 = OPERAND_TYPE_IMM8;
1534 static const i386_operand_type imm8s = OPERAND_TYPE_IMM8S;
1535 static const i386_operand_type imm16 = OPERAND_TYPE_IMM16;
1536 static const i386_operand_type imm32 = OPERAND_TYPE_IMM32;
1537 static const i386_operand_type imm32s = OPERAND_TYPE_IMM32S;
1538 static const i386_operand_type imm64 = OPERAND_TYPE_IMM64;
1539 static const i386_operand_type imm16_32 = OPERAND_TYPE_IMM16_32;
1540 static const i386_operand_type imm16_32s = OPERAND_TYPE_IMM16_32S;
1541 static const i386_operand_type imm16_32_32s = OPERAND_TYPE_IMM16_32_32S;
1542 static const i386_operand_type vec_imm4 = OPERAND_TYPE_VEC_IMM4;
1553 operand_type_check (i386_operand_type t, enum operand_type c)
1558 return (t.bitfield.reg8
1561 || t.bitfield.reg64);
1564 return (t.bitfield.imm8
1568 || t.bitfield.imm32s
1569 || t.bitfield.imm64);
1572 return (t.bitfield.disp8
1573 || t.bitfield.disp16
1574 || t.bitfield.disp32
1575 || t.bitfield.disp32s
1576 || t.bitfield.disp64);
1579 return (t.bitfield.disp8
1580 || t.bitfield.disp16
1581 || t.bitfield.disp32
1582 || t.bitfield.disp32s
1583 || t.bitfield.disp64
1584 || t.bitfield.baseindex);
1593 /* Return 1 if there is no conflict in 8bit/16bit/32bit/64bit on
1594 operand J for instruction template T. */
1597 match_reg_size (const insn_template *t, unsigned int j)
1599 return !((i.types[j].bitfield.byte
1600 && !t->operand_types[j].bitfield.byte)
1601 || (i.types[j].bitfield.word
1602 && !t->operand_types[j].bitfield.word)
1603 || (i.types[j].bitfield.dword
1604 && !t->operand_types[j].bitfield.dword)
1605 || (i.types[j].bitfield.qword
1606 && !t->operand_types[j].bitfield.qword));
1609 /* Return 1 if there is no conflict in any size on operand J for
1610 instruction template T. */
1613 match_mem_size (const insn_template *t, unsigned int j)
1615 return (match_reg_size (t, j)
1616 && !((i.types[j].bitfield.unspecified
1617 && !t->operand_types[j].bitfield.unspecified)
1618 || (i.types[j].bitfield.fword
1619 && !t->operand_types[j].bitfield.fword)
1620 || (i.types[j].bitfield.tbyte
1621 && !t->operand_types[j].bitfield.tbyte)
1622 || (i.types[j].bitfield.xmmword
1623 && !t->operand_types[j].bitfield.xmmword)
1624 || (i.types[j].bitfield.ymmword
1625 && !t->operand_types[j].bitfield.ymmword)));
1628 /* Return 1 if there is no size conflict on any operands for
1629 instruction template T. */
1632 operand_size_match (const insn_template *t)
1637 /* Don't check jump instructions. */
1638 if (t->opcode_modifier.jump
1639 || t->opcode_modifier.jumpbyte
1640 || t->opcode_modifier.jumpdword
1641 || t->opcode_modifier.jumpintersegment)
1644 /* Check memory and accumulator operand size. */
1645 for (j = 0; j < i.operands; j++)
1647 if (t->operand_types[j].bitfield.anysize)
1650 if (t->operand_types[j].bitfield.acc && !match_reg_size (t, j))
1656 if (i.types[j].bitfield.mem && !match_mem_size (t, j))
1665 else if (!t->opcode_modifier.d && !t->opcode_modifier.floatd)
1668 i.error = operand_size_mismatch;
1672 /* Check reverse. */
1673 gas_assert (i.operands == 2);
1676 for (j = 0; j < 2; j++)
1678 if (t->operand_types[j].bitfield.acc
1679 && !match_reg_size (t, j ? 0 : 1))
1682 if (i.types[j].bitfield.mem
1683 && !match_mem_size (t, j ? 0 : 1))
1691 operand_type_match (i386_operand_type overlap,
1692 i386_operand_type given)
1694 i386_operand_type temp = overlap;
1696 temp.bitfield.jumpabsolute = 0;
1697 temp.bitfield.unspecified = 0;
1698 temp.bitfield.byte = 0;
1699 temp.bitfield.word = 0;
1700 temp.bitfield.dword = 0;
1701 temp.bitfield.fword = 0;
1702 temp.bitfield.qword = 0;
1703 temp.bitfield.tbyte = 0;
1704 temp.bitfield.xmmword = 0;
1705 temp.bitfield.ymmword = 0;
1706 if (operand_type_all_zero (&temp))
1709 if (given.bitfield.baseindex == overlap.bitfield.baseindex
1710 && given.bitfield.jumpabsolute == overlap.bitfield.jumpabsolute)
1714 i.error = operand_type_mismatch;
1718 /* If given types g0 and g1 are registers they must be of the same type
1719 unless the expected operand type register overlap is null.
1720 Note that Acc in a template matches every size of reg. */
1723 operand_type_register_match (i386_operand_type m0,
1724 i386_operand_type g0,
1725 i386_operand_type t0,
1726 i386_operand_type m1,
1727 i386_operand_type g1,
1728 i386_operand_type t1)
1730 if (!operand_type_check (g0, reg))
1733 if (!operand_type_check (g1, reg))
1736 if (g0.bitfield.reg8 == g1.bitfield.reg8
1737 && g0.bitfield.reg16 == g1.bitfield.reg16
1738 && g0.bitfield.reg32 == g1.bitfield.reg32
1739 && g0.bitfield.reg64 == g1.bitfield.reg64)
1742 if (m0.bitfield.acc)
1744 t0.bitfield.reg8 = 1;
1745 t0.bitfield.reg16 = 1;
1746 t0.bitfield.reg32 = 1;
1747 t0.bitfield.reg64 = 1;
1750 if (m1.bitfield.acc)
1752 t1.bitfield.reg8 = 1;
1753 t1.bitfield.reg16 = 1;
1754 t1.bitfield.reg32 = 1;
1755 t1.bitfield.reg64 = 1;
1758 if (!(t0.bitfield.reg8 & t1.bitfield.reg8)
1759 && !(t0.bitfield.reg16 & t1.bitfield.reg16)
1760 && !(t0.bitfield.reg32 & t1.bitfield.reg32)
1761 && !(t0.bitfield.reg64 & t1.bitfield.reg64))
1764 i.error = register_type_mismatch;
1769 static INLINE unsigned int
1770 register_number (const reg_entry *r)
1772 unsigned int nr = r->reg_num;
1774 if (r->reg_flags & RegRex)
1780 static INLINE unsigned int
1781 mode_from_disp_size (i386_operand_type t)
1783 if (t.bitfield.disp8)
1785 else if (t.bitfield.disp16
1786 || t.bitfield.disp32
1787 || t.bitfield.disp32s)
1794 fits_in_signed_byte (offsetT num)
1796 return (num >= -128) && (num <= 127);
1800 fits_in_unsigned_byte (offsetT num)
1802 return (num & 0xff) == num;
1806 fits_in_unsigned_word (offsetT num)
1808 return (num & 0xffff) == num;
1812 fits_in_signed_word (offsetT num)
1814 return (-32768 <= num) && (num <= 32767);
1818 fits_in_signed_long (offsetT num ATTRIBUTE_UNUSED)
1823 return (!(((offsetT) -1 << 31) & num)
1824 || (((offsetT) -1 << 31) & num) == ((offsetT) -1 << 31));
1826 } /* fits_in_signed_long() */
1829 fits_in_unsigned_long (offsetT num ATTRIBUTE_UNUSED)
1834 return (num & (((offsetT) 2 << 31) - 1)) == num;
1836 } /* fits_in_unsigned_long() */
1839 fits_in_imm4 (offsetT num)
1841 return (num & 0xf) == num;
1844 static i386_operand_type
1845 smallest_imm_type (offsetT num)
1847 i386_operand_type t;
1849 operand_type_set (&t, 0);
1850 t.bitfield.imm64 = 1;
1852 if (cpu_arch_tune != PROCESSOR_I486 && num == 1)
1854 /* This code is disabled on the 486 because all the Imm1 forms
1855 in the opcode table are slower on the i486. They're the
1856 versions with the implicitly specified single-position
1857 displacement, which has another syntax if you really want to
1859 t.bitfield.imm1 = 1;
1860 t.bitfield.imm8 = 1;
1861 t.bitfield.imm8s = 1;
1862 t.bitfield.imm16 = 1;
1863 t.bitfield.imm32 = 1;
1864 t.bitfield.imm32s = 1;
1866 else if (fits_in_signed_byte (num))
1868 t.bitfield.imm8 = 1;
1869 t.bitfield.imm8s = 1;
1870 t.bitfield.imm16 = 1;
1871 t.bitfield.imm32 = 1;
1872 t.bitfield.imm32s = 1;
1874 else if (fits_in_unsigned_byte (num))
1876 t.bitfield.imm8 = 1;
1877 t.bitfield.imm16 = 1;
1878 t.bitfield.imm32 = 1;
1879 t.bitfield.imm32s = 1;
1881 else if (fits_in_signed_word (num) || fits_in_unsigned_word (num))
1883 t.bitfield.imm16 = 1;
1884 t.bitfield.imm32 = 1;
1885 t.bitfield.imm32s = 1;
1887 else if (fits_in_signed_long (num))
1889 t.bitfield.imm32 = 1;
1890 t.bitfield.imm32s = 1;
1892 else if (fits_in_unsigned_long (num))
1893 t.bitfield.imm32 = 1;
1899 offset_in_range (offsetT val, int size)
1905 case 1: mask = ((addressT) 1 << 8) - 1; break;
1906 case 2: mask = ((addressT) 1 << 16) - 1; break;
1907 case 4: mask = ((addressT) 2 << 31) - 1; break;
1909 case 8: mask = ((addressT) 2 << 63) - 1; break;
1915 /* If BFD64, sign extend val for 32bit address mode. */
1916 if (flag_code != CODE_64BIT
1917 || i.prefix[ADDR_PREFIX])
1918 if ((val & ~(((addressT) 2 << 31) - 1)) == 0)
1919 val = (val ^ ((addressT) 1 << 31)) - ((addressT) 1 << 31);
1922 if ((val & ~mask) != 0 && (val & ~mask) != ~mask)
1924 char buf1[40], buf2[40];
1926 sprint_value (buf1, val);
1927 sprint_value (buf2, val & mask);
1928 as_warn (_("%s shortened to %s"), buf1, buf2);
1942 a. PREFIX_EXIST if attempting to add a prefix where one from the
1943 same class already exists.
1944 b. PREFIX_LOCK if lock prefix is added.
1945 c. PREFIX_REP if rep/repne prefix is added.
1946 d. PREFIX_OTHER if other prefix is added.
1949 static enum PREFIX_GROUP
1950 add_prefix (unsigned int prefix)
1952 enum PREFIX_GROUP ret = PREFIX_OTHER;
1955 if (prefix >= REX_OPCODE && prefix < REX_OPCODE + 16
1956 && flag_code == CODE_64BIT)
1958 if ((i.prefix[REX_PREFIX] & prefix & REX_W)
1959 || ((i.prefix[REX_PREFIX] & (REX_R | REX_X | REX_B))
1960 && (prefix & (REX_R | REX_X | REX_B))))
1971 case CS_PREFIX_OPCODE:
1972 case DS_PREFIX_OPCODE:
1973 case ES_PREFIX_OPCODE:
1974 case FS_PREFIX_OPCODE:
1975 case GS_PREFIX_OPCODE:
1976 case SS_PREFIX_OPCODE:
1980 case REPNE_PREFIX_OPCODE:
1981 case REPE_PREFIX_OPCODE:
1986 case LOCK_PREFIX_OPCODE:
1995 case ADDR_PREFIX_OPCODE:
1999 case DATA_PREFIX_OPCODE:
2003 if (i.prefix[q] != 0)
2011 i.prefix[q] |= prefix;
2014 as_bad (_("same type of prefix used twice"));
2020 update_code_flag (int value, int check)
2022 PRINTF_LIKE ((*as_error));
2024 flag_code = (enum flag_code) value;
2025 if (flag_code == CODE_64BIT)
2027 cpu_arch_flags.bitfield.cpu64 = 1;
2028 cpu_arch_flags.bitfield.cpuno64 = 0;
2032 cpu_arch_flags.bitfield.cpu64 = 0;
2033 cpu_arch_flags.bitfield.cpuno64 = 1;
2035 if (value == CODE_64BIT && !cpu_arch_flags.bitfield.cpulm )
2038 as_error = as_fatal;
2041 (*as_error) (_("64bit mode not supported on `%s'."),
2042 cpu_arch_name ? cpu_arch_name : default_arch);
2044 if (value == CODE_32BIT && !cpu_arch_flags.bitfield.cpui386)
2047 as_error = as_fatal;
2050 (*as_error) (_("32bit mode not supported on `%s'."),
2051 cpu_arch_name ? cpu_arch_name : default_arch);
2053 stackop_size = '\0';
2057 set_code_flag (int value)
2059 update_code_flag (value, 0);
2063 set_16bit_gcc_code_flag (int new_code_flag)
2065 flag_code = (enum flag_code) new_code_flag;
2066 if (flag_code != CODE_16BIT)
2068 cpu_arch_flags.bitfield.cpu64 = 0;
2069 cpu_arch_flags.bitfield.cpuno64 = 1;
2070 stackop_size = LONG_MNEM_SUFFIX;
2074 set_intel_syntax (int syntax_flag)
2076 /* Find out if register prefixing is specified. */
2077 int ask_naked_reg = 0;
2080 if (!is_end_of_line[(unsigned char) *input_line_pointer])
2082 char *string = input_line_pointer;
2083 int e = get_symbol_end ();
2085 if (strcmp (string, "prefix") == 0)
2087 else if (strcmp (string, "noprefix") == 0)
2090 as_bad (_("bad argument to syntax directive."));
2091 *input_line_pointer = e;
2093 demand_empty_rest_of_line ();
2095 intel_syntax = syntax_flag;
2097 if (ask_naked_reg == 0)
2098 allow_naked_reg = (intel_syntax
2099 && (bfd_get_symbol_leading_char (stdoutput) != '\0'));
2101 allow_naked_reg = (ask_naked_reg < 0);
2103 expr_set_rank (O_full_ptr, syntax_flag ? 10 : 0);
2105 identifier_chars['%'] = intel_syntax && allow_naked_reg ? '%' : 0;
2106 identifier_chars['$'] = intel_syntax ? '$' : 0;
2107 register_prefix = allow_naked_reg ? "" : "%";
2111 set_intel_mnemonic (int mnemonic_flag)
2113 intel_mnemonic = mnemonic_flag;
2117 set_allow_index_reg (int flag)
2119 allow_index_reg = flag;
2123 set_check (int what)
2125 enum check_kind *kind;
2130 kind = &operand_check;
2141 if (!is_end_of_line[(unsigned char) *input_line_pointer])
2143 char *string = input_line_pointer;
2144 int e = get_symbol_end ();
2146 if (strcmp (string, "none") == 0)
2148 else if (strcmp (string, "warning") == 0)
2149 *kind = check_warning;
2150 else if (strcmp (string, "error") == 0)
2151 *kind = check_error;
2153 as_bad (_("bad argument to %s_check directive."), str);
2154 *input_line_pointer = e;
2157 as_bad (_("missing argument for %s_check directive"), str);
2159 demand_empty_rest_of_line ();
2163 check_cpu_arch_compatible (const char *name ATTRIBUTE_UNUSED,
2164 i386_cpu_flags new_flag ATTRIBUTE_UNUSED)
2166 #if defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF)
2167 static const char *arch;
2169 /* Intel LIOM is only supported on ELF. */
2175 /* Use cpu_arch_name if it is set in md_parse_option. Otherwise
2176 use default_arch. */
2177 arch = cpu_arch_name;
2179 arch = default_arch;
2182 /* If we are targeting Intel L1OM, we must enable it. */
2183 if (get_elf_backend_data (stdoutput)->elf_machine_code != EM_L1OM
2184 || new_flag.bitfield.cpul1om)
2187 /* If we are targeting Intel K1OM, we must enable it. */
2188 if (get_elf_backend_data (stdoutput)->elf_machine_code != EM_K1OM
2189 || new_flag.bitfield.cpuk1om)
2192 as_bad (_("`%s' is not supported on `%s'"), name, arch);
2197 set_cpu_arch (int dummy ATTRIBUTE_UNUSED)
2201 if (!is_end_of_line[(unsigned char) *input_line_pointer])
2203 char *string = input_line_pointer;
2204 int e = get_symbol_end ();
2206 i386_cpu_flags flags;
2208 for (j = 0; j < ARRAY_SIZE (cpu_arch); j++)
2210 if (strcmp (string, cpu_arch[j].name) == 0)
2212 check_cpu_arch_compatible (string, cpu_arch[j].flags);
2216 cpu_arch_name = cpu_arch[j].name;
2217 cpu_sub_arch_name = NULL;
2218 cpu_arch_flags = cpu_arch[j].flags;
2219 if (flag_code == CODE_64BIT)
2221 cpu_arch_flags.bitfield.cpu64 = 1;
2222 cpu_arch_flags.bitfield.cpuno64 = 0;
2226 cpu_arch_flags.bitfield.cpu64 = 0;
2227 cpu_arch_flags.bitfield.cpuno64 = 1;
2229 cpu_arch_isa = cpu_arch[j].type;
2230 cpu_arch_isa_flags = cpu_arch[j].flags;
2231 if (!cpu_arch_tune_set)
2233 cpu_arch_tune = cpu_arch_isa;
2234 cpu_arch_tune_flags = cpu_arch_isa_flags;
2239 if (!cpu_arch[j].negated)
2240 flags = cpu_flags_or (cpu_arch_flags,
2243 flags = cpu_flags_and_not (cpu_arch_flags,
2245 if (!cpu_flags_equal (&flags, &cpu_arch_flags))
2247 if (cpu_sub_arch_name)
2249 char *name = cpu_sub_arch_name;
2250 cpu_sub_arch_name = concat (name,
2252 (const char *) NULL);
2256 cpu_sub_arch_name = xstrdup (cpu_arch[j].name);
2257 cpu_arch_flags = flags;
2258 cpu_arch_isa_flags = flags;
2260 *input_line_pointer = e;
2261 demand_empty_rest_of_line ();
2265 if (j >= ARRAY_SIZE (cpu_arch))
2266 as_bad (_("no such architecture: `%s'"), string);
2268 *input_line_pointer = e;
2271 as_bad (_("missing cpu architecture"));
2273 no_cond_jump_promotion = 0;
2274 if (*input_line_pointer == ','
2275 && !is_end_of_line[(unsigned char) input_line_pointer[1]])
2277 char *string = ++input_line_pointer;
2278 int e = get_symbol_end ();
2280 if (strcmp (string, "nojumps") == 0)
2281 no_cond_jump_promotion = 1;
2282 else if (strcmp (string, "jumps") == 0)
2285 as_bad (_("no such architecture modifier: `%s'"), string);
2287 *input_line_pointer = e;
2290 demand_empty_rest_of_line ();
2293 enum bfd_architecture
2296 if (cpu_arch_isa == PROCESSOR_L1OM)
2298 if (OUTPUT_FLAVOR != bfd_target_elf_flavour
2299 || flag_code != CODE_64BIT)
2300 as_fatal (_("Intel L1OM is 64bit ELF only"));
2301 return bfd_arch_l1om;
2303 else if (cpu_arch_isa == PROCESSOR_K1OM)
2305 if (OUTPUT_FLAVOR != bfd_target_elf_flavour
2306 || flag_code != CODE_64BIT)
2307 as_fatal (_("Intel K1OM is 64bit ELF only"));
2308 return bfd_arch_k1om;
2311 return bfd_arch_i386;
2317 if (!strncmp (default_arch, "x86_64", 6))
2319 if (cpu_arch_isa == PROCESSOR_L1OM)
2321 if (OUTPUT_FLAVOR != bfd_target_elf_flavour
2322 || default_arch[6] != '\0')
2323 as_fatal (_("Intel L1OM is 64bit ELF only"));
2324 return bfd_mach_l1om;
2326 else if (cpu_arch_isa == PROCESSOR_K1OM)
2328 if (OUTPUT_FLAVOR != bfd_target_elf_flavour
2329 || default_arch[6] != '\0')
2330 as_fatal (_("Intel K1OM is 64bit ELF only"));
2331 return bfd_mach_k1om;
2333 else if (default_arch[6] == '\0')
2334 return bfd_mach_x86_64;
2336 return bfd_mach_x64_32;
2338 else if (!strcmp (default_arch, "i386"))
2339 return bfd_mach_i386_i386;
2341 as_fatal (_("unknown architecture"));
2347 const char *hash_err;
2349 /* Initialize op_hash hash table. */
2350 op_hash = hash_new ();
2353 const insn_template *optab;
2354 templates *core_optab;
2356 /* Setup for loop. */
2358 core_optab = (templates *) xmalloc (sizeof (templates));
2359 core_optab->start = optab;
2364 if (optab->name == NULL
2365 || strcmp (optab->name, (optab - 1)->name) != 0)
2367 /* different name --> ship out current template list;
2368 add to hash table; & begin anew. */
2369 core_optab->end = optab;
2370 hash_err = hash_insert (op_hash,
2372 (void *) core_optab);
2375 as_fatal (_("can't hash %s: %s"),
2379 if (optab->name == NULL)
2381 core_optab = (templates *) xmalloc (sizeof (templates));
2382 core_optab->start = optab;
2387 /* Initialize reg_hash hash table. */
2388 reg_hash = hash_new ();
2390 const reg_entry *regtab;
2391 unsigned int regtab_size = i386_regtab_size;
2393 for (regtab = i386_regtab; regtab_size--; regtab++)
2395 hash_err = hash_insert (reg_hash, regtab->reg_name, (void *) regtab);
2397 as_fatal (_("can't hash %s: %s"),
2403 /* Fill in lexical tables: mnemonic_chars, operand_chars. */
2408 for (c = 0; c < 256; c++)
2413 mnemonic_chars[c] = c;
2414 register_chars[c] = c;
2415 operand_chars[c] = c;
2417 else if (ISLOWER (c))
2419 mnemonic_chars[c] = c;
2420 register_chars[c] = c;
2421 operand_chars[c] = c;
2423 else if (ISUPPER (c))
2425 mnemonic_chars[c] = TOLOWER (c);
2426 register_chars[c] = mnemonic_chars[c];
2427 operand_chars[c] = c;
2430 if (ISALPHA (c) || ISDIGIT (c))
2431 identifier_chars[c] = c;
2434 identifier_chars[c] = c;
2435 operand_chars[c] = c;
2440 identifier_chars['@'] = '@';
2443 identifier_chars['?'] = '?';
2444 operand_chars['?'] = '?';
2446 digit_chars['-'] = '-';
2447 mnemonic_chars['_'] = '_';
2448 mnemonic_chars['-'] = '-';
2449 mnemonic_chars['.'] = '.';
2450 identifier_chars['_'] = '_';
2451 identifier_chars['.'] = '.';
2453 for (p = operand_special_chars; *p != '\0'; p++)
2454 operand_chars[(unsigned char) *p] = *p;
2457 #if defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF)
2460 record_alignment (text_section, 2);
2461 record_alignment (data_section, 2);
2462 record_alignment (bss_section, 2);
2466 if (flag_code == CODE_64BIT)
2468 #if defined (OBJ_COFF) && defined (TE_PE)
2469 x86_dwarf2_return_column = (OUTPUT_FLAVOR == bfd_target_coff_flavour
2472 x86_dwarf2_return_column = 16;
2474 x86_cie_data_alignment = -8;
2478 x86_dwarf2_return_column = 8;
2479 x86_cie_data_alignment = -4;
2484 i386_print_statistics (FILE *file)
2486 hash_print_statistics (file, "i386 opcode", op_hash);
2487 hash_print_statistics (file, "i386 register", reg_hash);
2492 /* Debugging routines for md_assemble. */
2493 static void pte (insn_template *);
2494 static void pt (i386_operand_type);
2495 static void pe (expressionS *);
2496 static void ps (symbolS *);
2499 pi (char *line, i386_insn *x)
2503 fprintf (stdout, "%s: template ", line);
2505 fprintf (stdout, " address: base %s index %s scale %x\n",
2506 x->base_reg ? x->base_reg->reg_name : "none",
2507 x->index_reg ? x->index_reg->reg_name : "none",
2508 x->log2_scale_factor);
2509 fprintf (stdout, " modrm: mode %x reg %x reg/mem %x\n",
2510 x->rm.mode, x->rm.reg, x->rm.regmem);
2511 fprintf (stdout, " sib: base %x index %x scale %x\n",
2512 x->sib.base, x->sib.index, x->sib.scale);
2513 fprintf (stdout, " rex: 64bit %x extX %x extY %x extZ %x\n",
2514 (x->rex & REX_W) != 0,
2515 (x->rex & REX_R) != 0,
2516 (x->rex & REX_X) != 0,
2517 (x->rex & REX_B) != 0);
2518 for (j = 0; j < x->operands; j++)
2520 fprintf (stdout, " #%d: ", j + 1);
2522 fprintf (stdout, "\n");
2523 if (x->types[j].bitfield.reg8
2524 || x->types[j].bitfield.reg16
2525 || x->types[j].bitfield.reg32
2526 || x->types[j].bitfield.reg64
2527 || x->types[j].bitfield.regmmx
2528 || x->types[j].bitfield.regxmm
2529 || x->types[j].bitfield.regymm
2530 || x->types[j].bitfield.sreg2
2531 || x->types[j].bitfield.sreg3
2532 || x->types[j].bitfield.control
2533 || x->types[j].bitfield.debug
2534 || x->types[j].bitfield.test)
2535 fprintf (stdout, "%s\n", x->op[j].regs->reg_name);
2536 if (operand_type_check (x->types[j], imm))
2538 if (operand_type_check (x->types[j], disp))
2539 pe (x->op[j].disps);
2544 pte (insn_template *t)
2547 fprintf (stdout, " %d operands ", t->operands);
2548 fprintf (stdout, "opcode %x ", t->base_opcode);
2549 if (t->extension_opcode != None)
2550 fprintf (stdout, "ext %x ", t->extension_opcode);
2551 if (t->opcode_modifier.d)
2552 fprintf (stdout, "D");
2553 if (t->opcode_modifier.w)
2554 fprintf (stdout, "W");
2555 fprintf (stdout, "\n");
2556 for (j = 0; j < t->operands; j++)
2558 fprintf (stdout, " #%d type ", j + 1);
2559 pt (t->operand_types[j]);
2560 fprintf (stdout, "\n");
2567 fprintf (stdout, " operation %d\n", e->X_op);
2568 fprintf (stdout, " add_number %ld (%lx)\n",
2569 (long) e->X_add_number, (long) e->X_add_number);
2570 if (e->X_add_symbol)
2572 fprintf (stdout, " add_symbol ");
2573 ps (e->X_add_symbol);
2574 fprintf (stdout, "\n");
2578 fprintf (stdout, " op_symbol ");
2579 ps (e->X_op_symbol);
2580 fprintf (stdout, "\n");
2587 fprintf (stdout, "%s type %s%s",
2589 S_IS_EXTERNAL (s) ? "EXTERNAL " : "",
2590 segment_name (S_GET_SEGMENT (s)));
2593 static struct type_name
2595 i386_operand_type mask;
2598 const type_names[] =
2600 { OPERAND_TYPE_REG8, "r8" },
2601 { OPERAND_TYPE_REG16, "r16" },
2602 { OPERAND_TYPE_REG32, "r32" },
2603 { OPERAND_TYPE_REG64, "r64" },
2604 { OPERAND_TYPE_IMM8, "i8" },
2605 { OPERAND_TYPE_IMM8, "i8s" },
2606 { OPERAND_TYPE_IMM16, "i16" },
2607 { OPERAND_TYPE_IMM32, "i32" },
2608 { OPERAND_TYPE_IMM32S, "i32s" },
2609 { OPERAND_TYPE_IMM64, "i64" },
2610 { OPERAND_TYPE_IMM1, "i1" },
2611 { OPERAND_TYPE_BASEINDEX, "BaseIndex" },
2612 { OPERAND_TYPE_DISP8, "d8" },
2613 { OPERAND_TYPE_DISP16, "d16" },
2614 { OPERAND_TYPE_DISP32, "d32" },
2615 { OPERAND_TYPE_DISP32S, "d32s" },
2616 { OPERAND_TYPE_DISP64, "d64" },
2617 { OPERAND_TYPE_INOUTPORTREG, "InOutPortReg" },
2618 { OPERAND_TYPE_SHIFTCOUNT, "ShiftCount" },
2619 { OPERAND_TYPE_CONTROL, "control reg" },
2620 { OPERAND_TYPE_TEST, "test reg" },
2621 { OPERAND_TYPE_DEBUG, "debug reg" },
2622 { OPERAND_TYPE_FLOATREG, "FReg" },
2623 { OPERAND_TYPE_FLOATACC, "FAcc" },
2624 { OPERAND_TYPE_SREG2, "SReg2" },
2625 { OPERAND_TYPE_SREG3, "SReg3" },
2626 { OPERAND_TYPE_ACC, "Acc" },
2627 { OPERAND_TYPE_JUMPABSOLUTE, "Jump Absolute" },
2628 { OPERAND_TYPE_REGMMX, "rMMX" },
2629 { OPERAND_TYPE_REGXMM, "rXMM" },
2630 { OPERAND_TYPE_REGYMM, "rYMM" },
2631 { OPERAND_TYPE_ESSEG, "es" },
2635 pt (i386_operand_type t)
2638 i386_operand_type a;
2640 for (j = 0; j < ARRAY_SIZE (type_names); j++)
2642 a = operand_type_and (t, type_names[j].mask);
2643 if (!operand_type_all_zero (&a))
2644 fprintf (stdout, "%s, ", type_names[j].name);
2649 #endif /* DEBUG386 */
2651 static bfd_reloc_code_real_type
2652 reloc (unsigned int size,
2655 bfd_reloc_code_real_type other)
2657 if (other != NO_RELOC)
2659 reloc_howto_type *rel;
2664 case BFD_RELOC_X86_64_GOT32:
2665 return BFD_RELOC_X86_64_GOT64;
2667 case BFD_RELOC_X86_64_PLTOFF64:
2668 return BFD_RELOC_X86_64_PLTOFF64;
2670 case BFD_RELOC_X86_64_GOTPC32:
2671 other = BFD_RELOC_X86_64_GOTPC64;
2673 case BFD_RELOC_X86_64_GOTPCREL:
2674 other = BFD_RELOC_X86_64_GOTPCREL64;
2676 case BFD_RELOC_X86_64_TPOFF32:
2677 other = BFD_RELOC_X86_64_TPOFF64;
2679 case BFD_RELOC_X86_64_DTPOFF32:
2680 other = BFD_RELOC_X86_64_DTPOFF64;
2686 #if defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF)
2687 if (other == BFD_RELOC_SIZE32)
2690 return BFD_RELOC_SIZE64;
2692 as_bad (_("there are no pc-relative size relocations"));
2696 /* Sign-checking 4-byte relocations in 16-/32-bit code is pointless. */
2697 if (size == 4 && (flag_code != CODE_64BIT || disallow_64bit_reloc))
2700 rel = bfd_reloc_type_lookup (stdoutput, other);
2702 as_bad (_("unknown relocation (%u)"), other);
2703 else if (size != bfd_get_reloc_size (rel))
2704 as_bad (_("%u-byte relocation cannot be applied to %u-byte field"),
2705 bfd_get_reloc_size (rel),
2707 else if (pcrel && !rel->pc_relative)
2708 as_bad (_("non-pc-relative relocation for pc-relative field"));
2709 else if ((rel->complain_on_overflow == complain_overflow_signed
2711 || (rel->complain_on_overflow == complain_overflow_unsigned
2713 as_bad (_("relocated field and relocation type differ in signedness"));
2722 as_bad (_("there are no unsigned pc-relative relocations"));
2725 case 1: return BFD_RELOC_8_PCREL;
2726 case 2: return BFD_RELOC_16_PCREL;
2727 case 4: return BFD_RELOC_32_PCREL;
2728 case 8: return BFD_RELOC_64_PCREL;
2730 as_bad (_("cannot do %u byte pc-relative relocation"), size);
2737 case 4: return BFD_RELOC_X86_64_32S;
2742 case 1: return BFD_RELOC_8;
2743 case 2: return BFD_RELOC_16;
2744 case 4: return BFD_RELOC_32;
2745 case 8: return BFD_RELOC_64;
2747 as_bad (_("cannot do %s %u byte relocation"),
2748 sign > 0 ? "signed" : "unsigned", size);
2754 /* Here we decide which fixups can be adjusted to make them relative to
2755 the beginning of the section instead of the symbol. Basically we need
2756 to make sure that the dynamic relocations are done correctly, so in
2757 some cases we force the original symbol to be used. */
2760 tc_i386_fix_adjustable (fixS *fixP ATTRIBUTE_UNUSED)
2762 #if defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF)
2766 /* Don't adjust pc-relative references to merge sections in 64-bit
2768 if (use_rela_relocations
2769 && (S_GET_SEGMENT (fixP->fx_addsy)->flags & SEC_MERGE) != 0
2773 /* The x86_64 GOTPCREL are represented as 32bit PCrel relocations
2774 and changed later by validate_fix. */
2775 if (GOT_symbol && fixP->fx_subsy == GOT_symbol
2776 && fixP->fx_r_type == BFD_RELOC_32_PCREL)
2779 /* Adjust_reloc_syms doesn't know about the GOT. Need to keep symbol
2780 for size relocations. */
2781 if (fixP->fx_r_type == BFD_RELOC_SIZE32
2782 || fixP->fx_r_type == BFD_RELOC_SIZE64
2783 || fixP->fx_r_type == BFD_RELOC_386_GOTOFF
2784 || fixP->fx_r_type == BFD_RELOC_386_PLT32
2785 || fixP->fx_r_type == BFD_RELOC_386_GOT32
2786 || fixP->fx_r_type == BFD_RELOC_386_TLS_GD
2787 || fixP->fx_r_type == BFD_RELOC_386_TLS_LDM
2788 || fixP->fx_r_type == BFD_RELOC_386_TLS_LDO_32
2789 || fixP->fx_r_type == BFD_RELOC_386_TLS_IE_32
2790 || fixP->fx_r_type == BFD_RELOC_386_TLS_IE
2791 || fixP->fx_r_type == BFD_RELOC_386_TLS_GOTIE
2792 || fixP->fx_r_type == BFD_RELOC_386_TLS_LE_32
2793 || fixP->fx_r_type == BFD_RELOC_386_TLS_LE
2794 || fixP->fx_r_type == BFD_RELOC_386_TLS_GOTDESC
2795 || fixP->fx_r_type == BFD_RELOC_386_TLS_DESC_CALL
2796 || fixP->fx_r_type == BFD_RELOC_X86_64_PLT32
2797 || fixP->fx_r_type == BFD_RELOC_X86_64_GOT32
2798 || fixP->fx_r_type == BFD_RELOC_X86_64_GOTPCREL
2799 || fixP->fx_r_type == BFD_RELOC_X86_64_TLSGD
2800 || fixP->fx_r_type == BFD_RELOC_X86_64_TLSLD
2801 || fixP->fx_r_type == BFD_RELOC_X86_64_DTPOFF32
2802 || fixP->fx_r_type == BFD_RELOC_X86_64_DTPOFF64
2803 || fixP->fx_r_type == BFD_RELOC_X86_64_GOTTPOFF
2804 || fixP->fx_r_type == BFD_RELOC_X86_64_TPOFF32
2805 || fixP->fx_r_type == BFD_RELOC_X86_64_TPOFF64
2806 || fixP->fx_r_type == BFD_RELOC_X86_64_GOTOFF64
2807 || fixP->fx_r_type == BFD_RELOC_X86_64_GOTPC32_TLSDESC
2808 || fixP->fx_r_type == BFD_RELOC_X86_64_TLSDESC_CALL
2809 || fixP->fx_r_type == BFD_RELOC_VTABLE_INHERIT
2810 || fixP->fx_r_type == BFD_RELOC_VTABLE_ENTRY)
2817 intel_float_operand (const char *mnemonic)
2819 /* Note that the value returned is meaningful only for opcodes with (memory)
2820 operands, hence the code here is free to improperly handle opcodes that
2821 have no operands (for better performance and smaller code). */
2823 if (mnemonic[0] != 'f')
2824 return 0; /* non-math */
2826 switch (mnemonic[1])
2828 /* fclex, fdecstp, fdisi, femms, feni, fincstp, finit, fsetpm, and
2829 the fs segment override prefix not currently handled because no
2830 call path can make opcodes without operands get here */
2832 return 2 /* integer op */;
2834 if (mnemonic[2] == 'd' && (mnemonic[3] == 'c' || mnemonic[3] == 'e'))
2835 return 3; /* fldcw/fldenv */
2838 if (mnemonic[2] != 'o' /* fnop */)
2839 return 3; /* non-waiting control op */
2842 if (mnemonic[2] == 's')
2843 return 3; /* frstor/frstpm */
2846 if (mnemonic[2] == 'a')
2847 return 3; /* fsave */
2848 if (mnemonic[2] == 't')
2850 switch (mnemonic[3])
2852 case 'c': /* fstcw */
2853 case 'd': /* fstdw */
2854 case 'e': /* fstenv */
2855 case 's': /* fsts[gw] */
2861 if (mnemonic[2] == 'r' || mnemonic[2] == 's')
2862 return 0; /* fxsave/fxrstor are not really math ops */
2869 /* Build the VEX prefix. */
2872 build_vex_prefix (const insn_template *t)
2874 unsigned int register_specifier;
2875 unsigned int implied_prefix;
2876 unsigned int vector_length;
2878 /* Check register specifier. */
2879 if (i.vex.register_specifier)
2880 register_specifier = ~register_number (i.vex.register_specifier) & 0xf;
2882 register_specifier = 0xf;
2884 /* Use 2-byte VEX prefix by swappping destination and source
2887 && i.operands == i.reg_operands
2888 && i.tm.opcode_modifier.vexopcode == VEX0F
2889 && i.tm.opcode_modifier.s
2892 unsigned int xchg = i.operands - 1;
2893 union i386_op temp_op;
2894 i386_operand_type temp_type;
2896 temp_type = i.types[xchg];
2897 i.types[xchg] = i.types[0];
2898 i.types[0] = temp_type;
2899 temp_op = i.op[xchg];
2900 i.op[xchg] = i.op[0];
2903 gas_assert (i.rm.mode == 3);
2907 i.rm.regmem = i.rm.reg;
2910 /* Use the next insn. */
2914 if (i.tm.opcode_modifier.vex == VEXScalar)
2915 vector_length = avxscalar;
2917 vector_length = i.tm.opcode_modifier.vex == VEX256 ? 1 : 0;
2919 switch ((i.tm.base_opcode >> 8) & 0xff)
2924 case DATA_PREFIX_OPCODE:
2927 case REPE_PREFIX_OPCODE:
2930 case REPNE_PREFIX_OPCODE:
2937 /* Use 2-byte VEX prefix if possible. */
2938 if (i.tm.opcode_modifier.vexopcode == VEX0F
2939 && i.tm.opcode_modifier.vexw != VEXW1
2940 && (i.rex & (REX_W | REX_X | REX_B)) == 0)
2942 /* 2-byte VEX prefix. */
2946 i.vex.bytes[0] = 0xc5;
2948 /* Check the REX.R bit. */
2949 r = (i.rex & REX_R) ? 0 : 1;
2950 i.vex.bytes[1] = (r << 7
2951 | register_specifier << 3
2952 | vector_length << 2
2957 /* 3-byte VEX prefix. */
2962 switch (i.tm.opcode_modifier.vexopcode)
2966 i.vex.bytes[0] = 0xc4;
2970 i.vex.bytes[0] = 0xc4;
2974 i.vex.bytes[0] = 0xc4;
2978 i.vex.bytes[0] = 0x8f;
2982 i.vex.bytes[0] = 0x8f;
2986 i.vex.bytes[0] = 0x8f;
2992 /* The high 3 bits of the second VEX byte are 1's compliment
2993 of RXB bits from REX. */
2994 i.vex.bytes[1] = (~i.rex & 0x7) << 5 | m;
2996 /* Check the REX.W bit. */
2997 w = (i.rex & REX_W) ? 1 : 0;
2998 if (i.tm.opcode_modifier.vexw)
3003 if (i.tm.opcode_modifier.vexw == VEXW1)
3007 i.vex.bytes[2] = (w << 7
3008 | register_specifier << 3
3009 | vector_length << 2
3015 process_immext (void)
3019 if ((i.tm.cpu_flags.bitfield.cpusse3 || i.tm.cpu_flags.bitfield.cpusvme)
3022 /* MONITOR/MWAIT as well as SVME instructions have fixed operands
3023 with an opcode suffix which is coded in the same place as an
3024 8-bit immediate field would be.
3025 Here we check those operands and remove them afterwards. */
3028 for (x = 0; x < i.operands; x++)
3029 if (register_number (i.op[x].regs) != x)
3030 as_bad (_("can't use register '%s%s' as operand %d in '%s'."),
3031 register_prefix, i.op[x].regs->reg_name, x + 1,
3037 /* These AMD 3DNow! and SSE2 instructions have an opcode suffix
3038 which is coded in the same place as an 8-bit immediate field
3039 would be. Here we fake an 8-bit immediate operand from the
3040 opcode suffix stored in tm.extension_opcode.
3042 AVX instructions also use this encoding, for some of
3043 3 argument instructions. */
3045 gas_assert (i.imm_operands == 0
3047 || (i.tm.opcode_modifier.vex
3048 && i.operands <= 4)));
3050 exp = &im_expressions[i.imm_operands++];
3051 i.op[i.operands].imms = exp;
3052 i.types[i.operands] = imm8;
3054 exp->X_op = O_constant;
3055 exp->X_add_number = i.tm.extension_opcode;
3056 i.tm.extension_opcode = None;
3063 switch (i.tm.opcode_modifier.hleprefixok)
3068 as_bad (_("invalid instruction `%s' after `%s'"),
3069 i.tm.name, i.hle_prefix);
3072 if (i.prefix[LOCK_PREFIX])
3074 as_bad (_("missing `lock' with `%s'"), i.hle_prefix);
3078 case HLEPrefixRelease:
3079 if (i.prefix[HLE_PREFIX] != XRELEASE_PREFIX_OPCODE)
3081 as_bad (_("instruction `%s' after `xacquire' not allowed"),
3085 if (i.mem_operands == 0
3086 || !operand_type_check (i.types[i.operands - 1], anymem))
3088 as_bad (_("memory destination needed for instruction `%s'"
3089 " after `xrelease'"), i.tm.name);
3096 /* This is the guts of the machine-dependent assembler. LINE points to a
3097 machine dependent instruction. This function is supposed to emit
3098 the frags/bytes it assembles to. */
3101 md_assemble (char *line)
3104 char mnemonic[MAX_MNEM_SIZE];
3105 const insn_template *t;
3107 /* Initialize globals. */
3108 memset (&i, '\0', sizeof (i));
3109 for (j = 0; j < MAX_OPERANDS; j++)
3110 i.reloc[j] = NO_RELOC;
3111 memset (disp_expressions, '\0', sizeof (disp_expressions));
3112 memset (im_expressions, '\0', sizeof (im_expressions));
3113 save_stack_p = save_stack;
3115 /* First parse an instruction mnemonic & call i386_operand for the operands.
3116 We assume that the scrubber has arranged it so that line[0] is the valid
3117 start of a (possibly prefixed) mnemonic. */
3119 line = parse_insn (line, mnemonic);
3123 line = parse_operands (line, mnemonic);
3128 /* Now we've parsed the mnemonic into a set of templates, and have the
3129 operands at hand. */
3131 /* All intel opcodes have reversed operands except for "bound" and
3132 "enter". We also don't reverse intersegment "jmp" and "call"
3133 instructions with 2 immediate operands so that the immediate segment
3134 precedes the offset, as it does when in AT&T mode. */
3137 && (strcmp (mnemonic, "bound") != 0)
3138 && (strcmp (mnemonic, "invlpga") != 0)
3139 && !(operand_type_check (i.types[0], imm)
3140 && operand_type_check (i.types[1], imm)))
3143 /* The order of the immediates should be reversed
3144 for 2 immediates extrq and insertq instructions */
3145 if (i.imm_operands == 2
3146 && (strcmp (mnemonic, "extrq") == 0
3147 || strcmp (mnemonic, "insertq") == 0))
3148 swap_2_operands (0, 1);
3153 /* Don't optimize displacement for movabs since it only takes 64bit
3156 && i.disp_encoding != disp_encoding_32bit
3157 && (flag_code != CODE_64BIT
3158 || strcmp (mnemonic, "movabs") != 0))
3161 /* Next, we find a template that matches the given insn,
3162 making sure the overlap of the given operands types is consistent
3163 with the template operand types. */
3165 if (!(t = match_template ()))
3168 if (sse_check != check_none
3169 && !i.tm.opcode_modifier.noavx
3170 && (i.tm.cpu_flags.bitfield.cpusse
3171 || i.tm.cpu_flags.bitfield.cpusse2
3172 || i.tm.cpu_flags.bitfield.cpusse3
3173 || i.tm.cpu_flags.bitfield.cpussse3
3174 || i.tm.cpu_flags.bitfield.cpusse4_1
3175 || i.tm.cpu_flags.bitfield.cpusse4_2))
3177 (sse_check == check_warning
3179 : as_bad) (_("SSE instruction `%s' is used"), i.tm.name);
3182 /* Zap movzx and movsx suffix. The suffix has been set from
3183 "word ptr" or "byte ptr" on the source operand in Intel syntax
3184 or extracted from mnemonic in AT&T syntax. But we'll use
3185 the destination register to choose the suffix for encoding. */
3186 if ((i.tm.base_opcode & ~9) == 0x0fb6)
3188 /* In Intel syntax, there must be a suffix. In AT&T syntax, if
3189 there is no suffix, the default will be byte extension. */
3190 if (i.reg_operands != 2
3193 as_bad (_("ambiguous operand size for `%s'"), i.tm.name);
3198 if (i.tm.opcode_modifier.fwait)
3199 if (!add_prefix (FWAIT_OPCODE))
3202 /* Check if REP prefix is OK. */
3203 if (i.rep_prefix && !i.tm.opcode_modifier.repprefixok)
3205 as_bad (_("invalid instruction `%s' after `%s'"),
3206 i.tm.name, i.rep_prefix);
3210 /* Check for lock without a lockable instruction. Destination operand
3211 must be memory unless it is xchg (0x86). */
3212 if (i.prefix[LOCK_PREFIX]
3213 && (!i.tm.opcode_modifier.islockable
3214 || i.mem_operands == 0
3215 || (i.tm.base_opcode != 0x86
3216 && !operand_type_check (i.types[i.operands - 1], anymem))))
3218 as_bad (_("expecting lockable instruction after `lock'"));
3222 /* Check if HLE prefix is OK. */
3223 if (i.hle_prefix && !check_hle ())
3226 /* Check string instruction segment overrides. */
3227 if (i.tm.opcode_modifier.isstring && i.mem_operands != 0)
3229 if (!check_string ())
3231 i.disp_operands = 0;
3234 if (!process_suffix ())
3237 /* Update operand types. */
3238 for (j = 0; j < i.operands; j++)
3239 i.types[j] = operand_type_and (i.types[j], i.tm.operand_types[j]);
3241 /* Make still unresolved immediate matches conform to size of immediate
3242 given in i.suffix. */
3243 if (!finalize_imm ())
3246 if (i.types[0].bitfield.imm1)
3247 i.imm_operands = 0; /* kludge for shift insns. */
3249 /* We only need to check those implicit registers for instructions
3250 with 3 operands or less. */
3251 if (i.operands <= 3)
3252 for (j = 0; j < i.operands; j++)
3253 if (i.types[j].bitfield.inoutportreg
3254 || i.types[j].bitfield.shiftcount
3255 || i.types[j].bitfield.acc
3256 || i.types[j].bitfield.floatacc)
3259 /* ImmExt should be processed after SSE2AVX. */
3260 if (!i.tm.opcode_modifier.sse2avx
3261 && i.tm.opcode_modifier.immext)
3264 /* For insns with operands there are more diddles to do to the opcode. */
3267 if (!process_operands ())
3270 else if (!quiet_warnings && i.tm.opcode_modifier.ugh)
3272 /* UnixWare fsub no args is alias for fsubp, fadd -> faddp, etc. */
3273 as_warn (_("translating to `%sp'"), i.tm.name);
3276 if (i.tm.opcode_modifier.vex)
3277 build_vex_prefix (t);
3279 /* Handle conversion of 'int $3' --> special int3 insn. XOP or FMA4
3280 instructions may define INT_OPCODE as well, so avoid this corner
3281 case for those instructions that use MODRM. */
3282 if (i.tm.base_opcode == INT_OPCODE
3283 && !i.tm.opcode_modifier.modrm
3284 && i.op[0].imms->X_add_number == 3)
3286 i.tm.base_opcode = INT3_OPCODE;
3290 if ((i.tm.opcode_modifier.jump
3291 || i.tm.opcode_modifier.jumpbyte
3292 || i.tm.opcode_modifier.jumpdword)
3293 && i.op[0].disps->X_op == O_constant)
3295 /* Convert "jmp constant" (and "call constant") to a jump (call) to
3296 the absolute address given by the constant. Since ix86 jumps and
3297 calls are pc relative, we need to generate a reloc. */
3298 i.op[0].disps->X_add_symbol = &abs_symbol;
3299 i.op[0].disps->X_op = O_symbol;
3302 if (i.tm.opcode_modifier.rex64)
3305 /* For 8 bit registers we need an empty rex prefix. Also if the
3306 instruction already has a prefix, we need to convert old
3307 registers to new ones. */
3309 if ((i.types[0].bitfield.reg8
3310 && (i.op[0].regs->reg_flags & RegRex64) != 0)
3311 || (i.types[1].bitfield.reg8
3312 && (i.op[1].regs->reg_flags & RegRex64) != 0)
3313 || ((i.types[0].bitfield.reg8
3314 || i.types[1].bitfield.reg8)
3319 i.rex |= REX_OPCODE;
3320 for (x = 0; x < 2; x++)
3322 /* Look for 8 bit operand that uses old registers. */
3323 if (i.types[x].bitfield.reg8
3324 && (i.op[x].regs->reg_flags & RegRex64) == 0)
3326 /* In case it is "hi" register, give up. */
3327 if (i.op[x].regs->reg_num > 3)
3328 as_bad (_("can't encode register '%s%s' in an "
3329 "instruction requiring REX prefix."),
3330 register_prefix, i.op[x].regs->reg_name);
3332 /* Otherwise it is equivalent to the extended register.
3333 Since the encoding doesn't change this is merely
3334 cosmetic cleanup for debug output. */
3336 i.op[x].regs = i.op[x].regs + 8;
3342 add_prefix (REX_OPCODE | i.rex);
3344 /* We are ready to output the insn. */
3349 parse_insn (char *line, char *mnemonic)
3352 char *token_start = l;
3355 const insn_template *t;
3361 while ((*mnem_p = mnemonic_chars[(unsigned char) *l]) != 0)
3366 if (mnem_p >= mnemonic + MAX_MNEM_SIZE)
3368 as_bad (_("no such instruction: `%s'"), token_start);
3373 if (!is_space_char (*l)
3374 && *l != END_OF_INSN
3376 || (*l != PREFIX_SEPARATOR
3379 as_bad (_("invalid character %s in mnemonic"),
3380 output_invalid (*l));
3383 if (token_start == l)
3385 if (!intel_syntax && *l == PREFIX_SEPARATOR)
3386 as_bad (_("expecting prefix; got nothing"));
3388 as_bad (_("expecting mnemonic; got nothing"));
3392 /* Look up instruction (or prefix) via hash table. */
3393 current_templates = (const templates *) hash_find (op_hash, mnemonic);
3395 if (*l != END_OF_INSN
3396 && (!is_space_char (*l) || l[1] != END_OF_INSN)
3397 && current_templates
3398 && current_templates->start->opcode_modifier.isprefix)
3400 if (!cpu_flags_check_cpu64 (current_templates->start->cpu_flags))
3402 as_bad ((flag_code != CODE_64BIT
3403 ? _("`%s' is only supported in 64-bit mode")
3404 : _("`%s' is not supported in 64-bit mode")),
3405 current_templates->start->name);
3408 /* If we are in 16-bit mode, do not allow addr16 or data16.
3409 Similarly, in 32-bit mode, do not allow addr32 or data32. */
3410 if ((current_templates->start->opcode_modifier.size16
3411 || current_templates->start->opcode_modifier.size32)
3412 && flag_code != CODE_64BIT
3413 && (current_templates->start->opcode_modifier.size32
3414 ^ (flag_code == CODE_16BIT)))
3416 as_bad (_("redundant %s prefix"),
3417 current_templates->start->name);
3420 /* Add prefix, checking for repeated prefixes. */
3421 switch (add_prefix (current_templates->start->base_opcode))
3426 if (current_templates->start->cpu_flags.bitfield.cpuhle)
3427 i.hle_prefix = current_templates->start->name;
3429 i.rep_prefix = current_templates->start->name;
3434 /* Skip past PREFIX_SEPARATOR and reset token_start. */
3441 if (!current_templates)
3443 /* Check if we should swap operand or force 32bit displacement in
3445 if (mnem_p - 2 == dot_p && dot_p[1] == 's')
3447 else if (mnem_p - 3 == dot_p
3450 i.disp_encoding = disp_encoding_8bit;
3451 else if (mnem_p - 4 == dot_p
3455 i.disp_encoding = disp_encoding_32bit;
3460 current_templates = (const templates *) hash_find (op_hash, mnemonic);
3463 if (!current_templates)
3466 /* See if we can get a match by trimming off a suffix. */
3469 case WORD_MNEM_SUFFIX:
3470 if (intel_syntax && (intel_float_operand (mnemonic) & 2))
3471 i.suffix = SHORT_MNEM_SUFFIX;
3473 case BYTE_MNEM_SUFFIX:
3474 case QWORD_MNEM_SUFFIX:
3475 i.suffix = mnem_p[-1];
3477 current_templates = (const templates *) hash_find (op_hash,
3480 case SHORT_MNEM_SUFFIX:
3481 case LONG_MNEM_SUFFIX:
3484 i.suffix = mnem_p[-1];
3486 current_templates = (const templates *) hash_find (op_hash,
3495 if (intel_float_operand (mnemonic) == 1)
3496 i.suffix = SHORT_MNEM_SUFFIX;
3498 i.suffix = LONG_MNEM_SUFFIX;
3500 current_templates = (const templates *) hash_find (op_hash,
3505 if (!current_templates)
3507 as_bad (_("no such instruction: `%s'"), token_start);
3512 if (current_templates->start->opcode_modifier.jump
3513 || current_templates->start->opcode_modifier.jumpbyte)
3515 /* Check for a branch hint. We allow ",pt" and ",pn" for
3516 predict taken and predict not taken respectively.
3517 I'm not sure that branch hints actually do anything on loop
3518 and jcxz insns (JumpByte) for current Pentium4 chips. They
3519 may work in the future and it doesn't hurt to accept them
3521 if (l[0] == ',' && l[1] == 'p')
3525 if (!add_prefix (DS_PREFIX_OPCODE))
3529 else if (l[2] == 'n')
3531 if (!add_prefix (CS_PREFIX_OPCODE))
3537 /* Any other comma loses. */
3540 as_bad (_("invalid character %s in mnemonic"),
3541 output_invalid (*l));
3545 /* Check if instruction is supported on specified architecture. */
3547 for (t = current_templates->start; t < current_templates->end; ++t)
3549 supported |= cpu_flags_match (t);
3550 if (supported == CPU_FLAGS_PERFECT_MATCH)
3554 if (!(supported & CPU_FLAGS_64BIT_MATCH))
3556 as_bad (flag_code == CODE_64BIT
3557 ? _("`%s' is not supported in 64-bit mode")
3558 : _("`%s' is only supported in 64-bit mode"),
3559 current_templates->start->name);
3562 if (supported != CPU_FLAGS_PERFECT_MATCH)
3564 as_bad (_("`%s' is not supported on `%s%s'"),
3565 current_templates->start->name,
3566 cpu_arch_name ? cpu_arch_name : default_arch,
3567 cpu_sub_arch_name ? cpu_sub_arch_name : "");
3572 if (!cpu_arch_flags.bitfield.cpui386
3573 && (flag_code != CODE_16BIT))
3575 as_warn (_("use .code16 to ensure correct addressing mode"));
3582 parse_operands (char *l, const char *mnemonic)
3586 /* 1 if operand is pending after ','. */
3587 unsigned int expecting_operand = 0;
3589 /* Non-zero if operand parens not balanced. */
3590 unsigned int paren_not_balanced;
3592 while (*l != END_OF_INSN)
3594 /* Skip optional white space before operand. */
3595 if (is_space_char (*l))
3597 if (!is_operand_char (*l) && *l != END_OF_INSN)
3599 as_bad (_("invalid character %s before operand %d"),
3600 output_invalid (*l),
3604 token_start = l; /* after white space */
3605 paren_not_balanced = 0;
3606 while (paren_not_balanced || *l != ',')
3608 if (*l == END_OF_INSN)
3610 if (paren_not_balanced)
3613 as_bad (_("unbalanced parenthesis in operand %d."),
3616 as_bad (_("unbalanced brackets in operand %d."),
3621 break; /* we are done */
3623 else if (!is_operand_char (*l) && !is_space_char (*l))
3625 as_bad (_("invalid character %s in operand %d"),
3626 output_invalid (*l),
3633 ++paren_not_balanced;
3635 --paren_not_balanced;
3640 ++paren_not_balanced;
3642 --paren_not_balanced;
3646 if (l != token_start)
3647 { /* Yes, we've read in another operand. */
3648 unsigned int operand_ok;
3649 this_operand = i.operands++;
3650 i.types[this_operand].bitfield.unspecified = 1;
3651 if (i.operands > MAX_OPERANDS)
3653 as_bad (_("spurious operands; (%d operands/instruction max)"),
3657 /* Now parse operand adding info to 'i' as we go along. */
3658 END_STRING_AND_SAVE (l);
3662 i386_intel_operand (token_start,
3663 intel_float_operand (mnemonic));
3665 operand_ok = i386_att_operand (token_start);
3667 RESTORE_END_STRING (l);
3673 if (expecting_operand)
3675 expecting_operand_after_comma:
3676 as_bad (_("expecting operand after ','; got nothing"));
3681 as_bad (_("expecting operand before ','; got nothing"));
3686 /* Now *l must be either ',' or END_OF_INSN. */
3689 if (*++l == END_OF_INSN)
3691 /* Just skip it, if it's \n complain. */
3692 goto expecting_operand_after_comma;
3694 expecting_operand = 1;
3701 swap_2_operands (int xchg1, int xchg2)
3703 union i386_op temp_op;
3704 i386_operand_type temp_type;
3705 enum bfd_reloc_code_real temp_reloc;
3707 temp_type = i.types[xchg2];
3708 i.types[xchg2] = i.types[xchg1];
3709 i.types[xchg1] = temp_type;
3710 temp_op = i.op[xchg2];
3711 i.op[xchg2] = i.op[xchg1];
3712 i.op[xchg1] = temp_op;
3713 temp_reloc = i.reloc[xchg2];
3714 i.reloc[xchg2] = i.reloc[xchg1];
3715 i.reloc[xchg1] = temp_reloc;
3719 swap_operands (void)
3725 swap_2_operands (1, i.operands - 2);
3728 swap_2_operands (0, i.operands - 1);
3734 if (i.mem_operands == 2)
3736 const seg_entry *temp_seg;
3737 temp_seg = i.seg[0];
3738 i.seg[0] = i.seg[1];
3739 i.seg[1] = temp_seg;
3743 /* Try to ensure constant immediates are represented in the smallest
3748 char guess_suffix = 0;
3752 guess_suffix = i.suffix;
3753 else if (i.reg_operands)
3755 /* Figure out a suffix from the last register operand specified.
3756 We can't do this properly yet, ie. excluding InOutPortReg,
3757 but the following works for instructions with immediates.
3758 In any case, we can't set i.suffix yet. */
3759 for (op = i.operands; --op >= 0;)
3760 if (i.types[op].bitfield.reg8)
3762 guess_suffix = BYTE_MNEM_SUFFIX;
3765 else if (i.types[op].bitfield.reg16)
3767 guess_suffix = WORD_MNEM_SUFFIX;
3770 else if (i.types[op].bitfield.reg32)
3772 guess_suffix = LONG_MNEM_SUFFIX;
3775 else if (i.types[op].bitfield.reg64)
3777 guess_suffix = QWORD_MNEM_SUFFIX;
3781 else if ((flag_code == CODE_16BIT) ^ (i.prefix[DATA_PREFIX] != 0))
3782 guess_suffix = WORD_MNEM_SUFFIX;
3784 for (op = i.operands; --op >= 0;)
3785 if (operand_type_check (i.types[op], imm))
3787 switch (i.op[op].imms->X_op)
3790 /* If a suffix is given, this operand may be shortened. */
3791 switch (guess_suffix)
3793 case LONG_MNEM_SUFFIX:
3794 i.types[op].bitfield.imm32 = 1;
3795 i.types[op].bitfield.imm64 = 1;
3797 case WORD_MNEM_SUFFIX:
3798 i.types[op].bitfield.imm16 = 1;
3799 i.types[op].bitfield.imm32 = 1;
3800 i.types[op].bitfield.imm32s = 1;
3801 i.types[op].bitfield.imm64 = 1;
3803 case BYTE_MNEM_SUFFIX:
3804 i.types[op].bitfield.imm8 = 1;
3805 i.types[op].bitfield.imm8s = 1;
3806 i.types[op].bitfield.imm16 = 1;
3807 i.types[op].bitfield.imm32 = 1;
3808 i.types[op].bitfield.imm32s = 1;
3809 i.types[op].bitfield.imm64 = 1;
3813 /* If this operand is at most 16 bits, convert it
3814 to a signed 16 bit number before trying to see
3815 whether it will fit in an even smaller size.
3816 This allows a 16-bit operand such as $0xffe0 to
3817 be recognised as within Imm8S range. */
3818 if ((i.types[op].bitfield.imm16)
3819 && (i.op[op].imms->X_add_number & ~(offsetT) 0xffff) == 0)
3821 i.op[op].imms->X_add_number =
3822 (((i.op[op].imms->X_add_number & 0xffff) ^ 0x8000) - 0x8000);
3824 if ((i.types[op].bitfield.imm32)
3825 && ((i.op[op].imms->X_add_number & ~(((offsetT) 2 << 31) - 1))
3828 i.op[op].imms->X_add_number = ((i.op[op].imms->X_add_number
3829 ^ ((offsetT) 1 << 31))
3830 - ((offsetT) 1 << 31));
3833 = operand_type_or (i.types[op],
3834 smallest_imm_type (i.op[op].imms->X_add_number));
3836 /* We must avoid matching of Imm32 templates when 64bit
3837 only immediate is available. */
3838 if (guess_suffix == QWORD_MNEM_SUFFIX)
3839 i.types[op].bitfield.imm32 = 0;
3846 /* Symbols and expressions. */
3848 /* Convert symbolic operand to proper sizes for matching, but don't
3849 prevent matching a set of insns that only supports sizes other
3850 than those matching the insn suffix. */
3852 i386_operand_type mask, allowed;
3853 const insn_template *t;
3855 operand_type_set (&mask, 0);
3856 operand_type_set (&allowed, 0);
3858 for (t = current_templates->start;
3859 t < current_templates->end;
3861 allowed = operand_type_or (allowed,
3862 t->operand_types[op]);
3863 switch (guess_suffix)
3865 case QWORD_MNEM_SUFFIX:
3866 mask.bitfield.imm64 = 1;
3867 mask.bitfield.imm32s = 1;
3869 case LONG_MNEM_SUFFIX:
3870 mask.bitfield.imm32 = 1;
3872 case WORD_MNEM_SUFFIX:
3873 mask.bitfield.imm16 = 1;
3875 case BYTE_MNEM_SUFFIX:
3876 mask.bitfield.imm8 = 1;
3881 allowed = operand_type_and (mask, allowed);
3882 if (!operand_type_all_zero (&allowed))
3883 i.types[op] = operand_type_and (i.types[op], mask);
3890 /* Try to use the smallest displacement type too. */
3892 optimize_disp (void)
3896 for (op = i.operands; --op >= 0;)
3897 if (operand_type_check (i.types[op], disp))
3899 if (i.op[op].disps->X_op == O_constant)
3901 offsetT op_disp = i.op[op].disps->X_add_number;
3903 if (i.types[op].bitfield.disp16
3904 && (op_disp & ~(offsetT) 0xffff) == 0)
3906 /* If this operand is at most 16 bits, convert
3907 to a signed 16 bit number and don't use 64bit
3909 op_disp = (((op_disp & 0xffff) ^ 0x8000) - 0x8000);
3910 i.types[op].bitfield.disp64 = 0;
3912 if (i.types[op].bitfield.disp32
3913 && (op_disp & ~(((offsetT) 2 << 31) - 1)) == 0)
3915 /* If this operand is at most 32 bits, convert
3916 to a signed 32 bit number and don't use 64bit
3918 op_disp &= (((offsetT) 2 << 31) - 1);
3919 op_disp = (op_disp ^ ((offsetT) 1 << 31)) - ((addressT) 1 << 31);
3920 i.types[op].bitfield.disp64 = 0;
3922 if (!op_disp && i.types[op].bitfield.baseindex)
3924 i.types[op].bitfield.disp8 = 0;
3925 i.types[op].bitfield.disp16 = 0;
3926 i.types[op].bitfield.disp32 = 0;
3927 i.types[op].bitfield.disp32s = 0;
3928 i.types[op].bitfield.disp64 = 0;
3932 else if (flag_code == CODE_64BIT)
3934 if (fits_in_signed_long (op_disp))
3936 i.types[op].bitfield.disp64 = 0;
3937 i.types[op].bitfield.disp32s = 1;
3939 if (i.prefix[ADDR_PREFIX]
3940 && fits_in_unsigned_long (op_disp))
3941 i.types[op].bitfield.disp32 = 1;
3943 if ((i.types[op].bitfield.disp32
3944 || i.types[op].bitfield.disp32s
3945 || i.types[op].bitfield.disp16)
3946 && fits_in_signed_byte (op_disp))
3947 i.types[op].bitfield.disp8 = 1;
3949 else if (i.reloc[op] == BFD_RELOC_386_TLS_DESC_CALL
3950 || i.reloc[op] == BFD_RELOC_X86_64_TLSDESC_CALL)
3952 fix_new_exp (frag_now, frag_more (0) - frag_now->fr_literal, 0,
3953 i.op[op].disps, 0, i.reloc[op]);
3954 i.types[op].bitfield.disp8 = 0;
3955 i.types[op].bitfield.disp16 = 0;
3956 i.types[op].bitfield.disp32 = 0;
3957 i.types[op].bitfield.disp32s = 0;
3958 i.types[op].bitfield.disp64 = 0;
3961 /* We only support 64bit displacement on constants. */
3962 i.types[op].bitfield.disp64 = 0;
3966 /* Check if operands are valid for the instruction. */
3969 check_VecOperands (const insn_template *t)
3971 /* Without VSIB byte, we can't have a vector register for index. */
3972 if (!t->opcode_modifier.vecsib
3974 && (i.index_reg->reg_type.bitfield.regxmm
3975 || i.index_reg->reg_type.bitfield.regymm))
3977 i.error = unsupported_vector_index_register;
3981 /* For VSIB byte, we need a vector register for index, and all vector
3982 registers must be distinct. */
3983 if (t->opcode_modifier.vecsib)
3986 || !((t->opcode_modifier.vecsib == VecSIB128
3987 && i.index_reg->reg_type.bitfield.regxmm)
3988 || (t->opcode_modifier.vecsib == VecSIB256
3989 && i.index_reg->reg_type.bitfield.regymm)))
3991 i.error = invalid_vsib_address;
3995 gas_assert (i.reg_operands == 2);
3996 gas_assert (i.types[0].bitfield.regxmm
3997 || i.types[0].bitfield.regymm);
3998 gas_assert (i.types[2].bitfield.regxmm
3999 || i.types[2].bitfield.regymm);
4001 if (operand_check == check_none)
4003 if (register_number (i.op[0].regs) != register_number (i.index_reg)
4004 && register_number (i.op[2].regs) != register_number (i.index_reg)
4005 && register_number (i.op[0].regs) != register_number (i.op[2].regs))
4007 if (operand_check == check_error)
4009 i.error = invalid_vector_register_set;
4012 as_warn (_("mask, index, and destination registers should be distinct"));
4018 /* Check if operands are valid for the instruction. Update VEX
4022 VEX_check_operands (const insn_template *t)
4024 if (!t->opcode_modifier.vex)
4027 /* Only check VEX_Imm4, which must be the first operand. */
4028 if (t->operand_types[0].bitfield.vec_imm4)
4030 if (i.op[0].imms->X_op != O_constant
4031 || !fits_in_imm4 (i.op[0].imms->X_add_number))
4037 /* Turn off Imm8 so that update_imm won't complain. */
4038 i.types[0] = vec_imm4;
4044 static const insn_template *
4045 match_template (void)
4047 /* Points to template once we've found it. */
4048 const insn_template *t;
4049 i386_operand_type overlap0, overlap1, overlap2, overlap3;
4050 i386_operand_type overlap4;
4051 unsigned int found_reverse_match;
4052 i386_opcode_modifier suffix_check;
4053 i386_operand_type operand_types [MAX_OPERANDS];
4054 int addr_prefix_disp;
4056 unsigned int found_cpu_match;
4057 unsigned int check_register;
4058 enum i386_error specific_error = 0;
4060 #if MAX_OPERANDS != 5
4061 # error "MAX_OPERANDS must be 5."
4064 found_reverse_match = 0;
4065 addr_prefix_disp = -1;
4067 memset (&suffix_check, 0, sizeof (suffix_check));
4068 if (i.suffix == BYTE_MNEM_SUFFIX)
4069 suffix_check.no_bsuf = 1;
4070 else if (i.suffix == WORD_MNEM_SUFFIX)
4071 suffix_check.no_wsuf = 1;
4072 else if (i.suffix == SHORT_MNEM_SUFFIX)
4073 suffix_check.no_ssuf = 1;
4074 else if (i.suffix == LONG_MNEM_SUFFIX)
4075 suffix_check.no_lsuf = 1;
4076 else if (i.suffix == QWORD_MNEM_SUFFIX)
4077 suffix_check.no_qsuf = 1;
4078 else if (i.suffix == LONG_DOUBLE_MNEM_SUFFIX)
4079 suffix_check.no_ldsuf = 1;
4081 /* Must have right number of operands. */
4082 i.error = number_of_operands_mismatch;
4084 for (t = current_templates->start; t < current_templates->end; t++)
4086 addr_prefix_disp = -1;
4088 if (i.operands != t->operands)
4091 /* Check processor support. */
4092 i.error = unsupported;
4093 found_cpu_match = (cpu_flags_match (t)
4094 == CPU_FLAGS_PERFECT_MATCH);
4095 if (!found_cpu_match)
4098 /* Check old gcc support. */
4099 i.error = old_gcc_only;
4100 if (!old_gcc && t->opcode_modifier.oldgcc)
4103 /* Check AT&T mnemonic. */
4104 i.error = unsupported_with_intel_mnemonic;
4105 if (intel_mnemonic && t->opcode_modifier.attmnemonic)
4108 /* Check AT&T/Intel syntax. */
4109 i.error = unsupported_syntax;
4110 if ((intel_syntax && t->opcode_modifier.attsyntax)
4111 || (!intel_syntax && t->opcode_modifier.intelsyntax))
4114 /* Check the suffix, except for some instructions in intel mode. */
4115 i.error = invalid_instruction_suffix;
4116 if ((!intel_syntax || !t->opcode_modifier.ignoresize)
4117 && ((t->opcode_modifier.no_bsuf && suffix_check.no_bsuf)
4118 || (t->opcode_modifier.no_wsuf && suffix_check.no_wsuf)
4119 || (t->opcode_modifier.no_lsuf && suffix_check.no_lsuf)
4120 || (t->opcode_modifier.no_ssuf && suffix_check.no_ssuf)
4121 || (t->opcode_modifier.no_qsuf && suffix_check.no_qsuf)
4122 || (t->opcode_modifier.no_ldsuf && suffix_check.no_ldsuf)))
4125 if (!operand_size_match (t))
4128 for (j = 0; j < MAX_OPERANDS; j++)
4129 operand_types[j] = t->operand_types[j];
4131 /* In general, don't allow 64-bit operands in 32-bit mode. */
4132 if (i.suffix == QWORD_MNEM_SUFFIX
4133 && flag_code != CODE_64BIT
4135 ? (!t->opcode_modifier.ignoresize
4136 && !intel_float_operand (t->name))
4137 : intel_float_operand (t->name) != 2)
4138 && ((!operand_types[0].bitfield.regmmx
4139 && !operand_types[0].bitfield.regxmm
4140 && !operand_types[0].bitfield.regymm)
4141 || (!operand_types[t->operands > 1].bitfield.regmmx
4142 && !!operand_types[t->operands > 1].bitfield.regxmm
4143 && !!operand_types[t->operands > 1].bitfield.regymm))
4144 && (t->base_opcode != 0x0fc7
4145 || t->extension_opcode != 1 /* cmpxchg8b */))
4148 /* In general, don't allow 32-bit operands on pre-386. */
4149 else if (i.suffix == LONG_MNEM_SUFFIX
4150 && !cpu_arch_flags.bitfield.cpui386
4152 ? (!t->opcode_modifier.ignoresize
4153 && !intel_float_operand (t->name))
4154 : intel_float_operand (t->name) != 2)
4155 && ((!operand_types[0].bitfield.regmmx
4156 && !operand_types[0].bitfield.regxmm)
4157 || (!operand_types[t->operands > 1].bitfield.regmmx
4158 && !!operand_types[t->operands > 1].bitfield.regxmm)))
4161 /* Do not verify operands when there are none. */
4165 /* We've found a match; break out of loop. */
4169 /* Address size prefix will turn Disp64/Disp32/Disp16 operand
4170 into Disp32/Disp16/Disp32 operand. */
4171 if (i.prefix[ADDR_PREFIX] != 0)
4173 /* There should be only one Disp operand. */
4177 for (j = 0; j < MAX_OPERANDS; j++)
4179 if (operand_types[j].bitfield.disp16)
4181 addr_prefix_disp = j;
4182 operand_types[j].bitfield.disp32 = 1;
4183 operand_types[j].bitfield.disp16 = 0;
4189 for (j = 0; j < MAX_OPERANDS; j++)
4191 if (operand_types[j].bitfield.disp32)
4193 addr_prefix_disp = j;
4194 operand_types[j].bitfield.disp32 = 0;
4195 operand_types[j].bitfield.disp16 = 1;
4201 for (j = 0; j < MAX_OPERANDS; j++)
4203 if (operand_types[j].bitfield.disp64)
4205 addr_prefix_disp = j;
4206 operand_types[j].bitfield.disp64 = 0;
4207 operand_types[j].bitfield.disp32 = 1;
4215 /* We check register size if needed. */
4216 check_register = t->opcode_modifier.checkregsize;
4217 overlap0 = operand_type_and (i.types[0], operand_types[0]);
4218 switch (t->operands)
4221 if (!operand_type_match (overlap0, i.types[0]))
4225 /* xchg %eax, %eax is a special case. It is an aliase for nop
4226 only in 32bit mode and we can use opcode 0x90. In 64bit
4227 mode, we can't use 0x90 for xchg %eax, %eax since it should
4228 zero-extend %eax to %rax. */
4229 if (flag_code == CODE_64BIT
4230 && t->base_opcode == 0x90
4231 && operand_type_equal (&i.types [0], &acc32)
4232 && operand_type_equal (&i.types [1], &acc32))
4236 /* If we swap operand in encoding, we either match
4237 the next one or reverse direction of operands. */
4238 if (t->opcode_modifier.s)
4240 else if (t->opcode_modifier.d)
4245 /* If we swap operand in encoding, we match the next one. */
4246 if (i.swap_operand && t->opcode_modifier.s)
4250 overlap1 = operand_type_and (i.types[1], operand_types[1]);
4251 if (!operand_type_match (overlap0, i.types[0])
4252 || !operand_type_match (overlap1, i.types[1])
4254 && !operand_type_register_match (overlap0, i.types[0],
4256 overlap1, i.types[1],
4259 /* Check if other direction is valid ... */
4260 if (!t->opcode_modifier.d && !t->opcode_modifier.floatd)
4264 /* Try reversing direction of operands. */
4265 overlap0 = operand_type_and (i.types[0], operand_types[1]);
4266 overlap1 = operand_type_and (i.types[1], operand_types[0]);
4267 if (!operand_type_match (overlap0, i.types[0])
4268 || !operand_type_match (overlap1, i.types[1])
4270 && !operand_type_register_match (overlap0,
4277 /* Does not match either direction. */
4280 /* found_reverse_match holds which of D or FloatDR
4282 if (t->opcode_modifier.d)
4283 found_reverse_match = Opcode_D;
4284 else if (t->opcode_modifier.floatd)
4285 found_reverse_match = Opcode_FloatD;
4287 found_reverse_match = 0;
4288 if (t->opcode_modifier.floatr)
4289 found_reverse_match |= Opcode_FloatR;
4293 /* Found a forward 2 operand match here. */
4294 switch (t->operands)
4297 overlap4 = operand_type_and (i.types[4],
4300 overlap3 = operand_type_and (i.types[3],
4303 overlap2 = operand_type_and (i.types[2],
4308 switch (t->operands)
4311 if (!operand_type_match (overlap4, i.types[4])
4312 || !operand_type_register_match (overlap3,
4320 if (!operand_type_match (overlap3, i.types[3])
4322 && !operand_type_register_match (overlap2,
4330 /* Here we make use of the fact that there are no
4331 reverse match 3 operand instructions, and all 3
4332 operand instructions only need to be checked for
4333 register consistency between operands 2 and 3. */
4334 if (!operand_type_match (overlap2, i.types[2])
4336 && !operand_type_register_match (overlap1,
4346 /* Found either forward/reverse 2, 3 or 4 operand match here:
4347 slip through to break. */
4349 if (!found_cpu_match)
4351 found_reverse_match = 0;
4355 /* Check if vector and VEX operands are valid. */
4356 if (check_VecOperands (t) || VEX_check_operands (t))
4358 specific_error = i.error;
4362 /* We've found a match; break out of loop. */
4366 if (t == current_templates->end)
4368 /* We found no match. */
4369 const char *err_msg;
4370 switch (specific_error ? specific_error : i.error)
4374 case operand_size_mismatch:
4375 err_msg = _("operand size mismatch");
4377 case operand_type_mismatch:
4378 err_msg = _("operand type mismatch");
4380 case register_type_mismatch:
4381 err_msg = _("register type mismatch");
4383 case number_of_operands_mismatch:
4384 err_msg = _("number of operands mismatch");
4386 case invalid_instruction_suffix:
4387 err_msg = _("invalid instruction suffix");
4390 err_msg = _("constant doesn't fit in 4 bits");
4393 err_msg = _("only supported with old gcc");
4395 case unsupported_with_intel_mnemonic:
4396 err_msg = _("unsupported with Intel mnemonic");
4398 case unsupported_syntax:
4399 err_msg = _("unsupported syntax");
4402 as_bad (_("unsupported instruction `%s'"),
4403 current_templates->start->name);
4405 case invalid_vsib_address:
4406 err_msg = _("invalid VSIB address");
4408 case invalid_vector_register_set:
4409 err_msg = _("mask, index, and destination registers must be distinct");
4411 case unsupported_vector_index_register:
4412 err_msg = _("unsupported vector index register");
4415 as_bad (_("%s for `%s'"), err_msg,
4416 current_templates->start->name);
4420 if (!quiet_warnings)
4423 && (i.types[0].bitfield.jumpabsolute
4424 != operand_types[0].bitfield.jumpabsolute))
4426 as_warn (_("indirect %s without `*'"), t->name);
4429 if (t->opcode_modifier.isprefix
4430 && t->opcode_modifier.ignoresize)
4432 /* Warn them that a data or address size prefix doesn't
4433 affect assembly of the next line of code. */
4434 as_warn (_("stand-alone `%s' prefix"), t->name);
4438 /* Copy the template we found. */
4441 if (addr_prefix_disp != -1)
4442 i.tm.operand_types[addr_prefix_disp]
4443 = operand_types[addr_prefix_disp];
4445 if (found_reverse_match)
4447 /* If we found a reverse match we must alter the opcode
4448 direction bit. found_reverse_match holds bits to change
4449 (different for int & float insns). */
4451 i.tm.base_opcode ^= found_reverse_match;
4453 i.tm.operand_types[0] = operand_types[1];
4454 i.tm.operand_types[1] = operand_types[0];
4463 int mem_op = operand_type_check (i.types[0], anymem) ? 0 : 1;
4464 if (i.tm.operand_types[mem_op].bitfield.esseg)
4466 if (i.seg[0] != NULL && i.seg[0] != &es)
4468 as_bad (_("`%s' operand %d must use `%ses' segment"),
4474 /* There's only ever one segment override allowed per instruction.
4475 This instruction possibly has a legal segment override on the
4476 second operand, so copy the segment to where non-string
4477 instructions store it, allowing common code. */
4478 i.seg[0] = i.seg[1];
4480 else if (i.tm.operand_types[mem_op + 1].bitfield.esseg)
4482 if (i.seg[1] != NULL && i.seg[1] != &es)
4484 as_bad (_("`%s' operand %d must use `%ses' segment"),
4495 process_suffix (void)
4497 /* If matched instruction specifies an explicit instruction mnemonic
4499 if (i.tm.opcode_modifier.size16)
4500 i.suffix = WORD_MNEM_SUFFIX;
4501 else if (i.tm.opcode_modifier.size32)
4502 i.suffix = LONG_MNEM_SUFFIX;
4503 else if (i.tm.opcode_modifier.size64)
4504 i.suffix = QWORD_MNEM_SUFFIX;
4505 else if (i.reg_operands)
4507 /* If there's no instruction mnemonic suffix we try to invent one
4508 based on register operands. */
4511 /* We take i.suffix from the last register operand specified,
4512 Destination register type is more significant than source
4513 register type. crc32 in SSE4.2 prefers source register
4515 if (i.tm.base_opcode == 0xf20f38f1)
4517 if (i.types[0].bitfield.reg16)
4518 i.suffix = WORD_MNEM_SUFFIX;
4519 else if (i.types[0].bitfield.reg32)
4520 i.suffix = LONG_MNEM_SUFFIX;
4521 else if (i.types[0].bitfield.reg64)
4522 i.suffix = QWORD_MNEM_SUFFIX;
4524 else if (i.tm.base_opcode == 0xf20f38f0)
4526 if (i.types[0].bitfield.reg8)
4527 i.suffix = BYTE_MNEM_SUFFIX;
4534 if (i.tm.base_opcode == 0xf20f38f1
4535 || i.tm.base_opcode == 0xf20f38f0)
4537 /* We have to know the operand size for crc32. */
4538 as_bad (_("ambiguous memory operand size for `%s`"),
4543 for (op = i.operands; --op >= 0;)
4544 if (!i.tm.operand_types[op].bitfield.inoutportreg)
4546 if (i.types[op].bitfield.reg8)
4548 i.suffix = BYTE_MNEM_SUFFIX;
4551 else if (i.types[op].bitfield.reg16)
4553 i.suffix = WORD_MNEM_SUFFIX;
4556 else if (i.types[op].bitfield.reg32)
4558 i.suffix = LONG_MNEM_SUFFIX;
4561 else if (i.types[op].bitfield.reg64)
4563 i.suffix = QWORD_MNEM_SUFFIX;
4569 else if (i.suffix == BYTE_MNEM_SUFFIX)
4572 && i.tm.opcode_modifier.ignoresize
4573 && i.tm.opcode_modifier.no_bsuf)
4575 else if (!check_byte_reg ())
4578 else if (i.suffix == LONG_MNEM_SUFFIX)
4581 && i.tm.opcode_modifier.ignoresize
4582 && i.tm.opcode_modifier.no_lsuf)
4584 else if (!check_long_reg ())
4587 else if (i.suffix == QWORD_MNEM_SUFFIX)
4590 && i.tm.opcode_modifier.ignoresize
4591 && i.tm.opcode_modifier.no_qsuf)
4593 else if (!check_qword_reg ())
4596 else if (i.suffix == WORD_MNEM_SUFFIX)
4599 && i.tm.opcode_modifier.ignoresize
4600 && i.tm.opcode_modifier.no_wsuf)
4602 else if (!check_word_reg ())
4605 else if (i.suffix == XMMWORD_MNEM_SUFFIX
4606 || i.suffix == YMMWORD_MNEM_SUFFIX)
4608 /* Skip if the instruction has x/y suffix. match_template
4609 should check if it is a valid suffix. */
4611 else if (intel_syntax && i.tm.opcode_modifier.ignoresize)
4612 /* Do nothing if the instruction is going to ignore the prefix. */
4617 else if (i.tm.opcode_modifier.defaultsize
4619 /* exclude fldenv/frstor/fsave/fstenv */
4620 && i.tm.opcode_modifier.no_ssuf)
4622 i.suffix = stackop_size;
4624 else if (intel_syntax
4626 && (i.tm.operand_types[0].bitfield.jumpabsolute
4627 || i.tm.opcode_modifier.jumpbyte
4628 || i.tm.opcode_modifier.jumpintersegment
4629 || (i.tm.base_opcode == 0x0f01 /* [ls][gi]dt */
4630 && i.tm.extension_opcode <= 3)))
4635 if (!i.tm.opcode_modifier.no_qsuf)
4637 i.suffix = QWORD_MNEM_SUFFIX;
4641 if (!i.tm.opcode_modifier.no_lsuf)
4642 i.suffix = LONG_MNEM_SUFFIX;
4645 if (!i.tm.opcode_modifier.no_wsuf)
4646 i.suffix = WORD_MNEM_SUFFIX;
4655 if (i.tm.opcode_modifier.w)
4657 as_bad (_("no instruction mnemonic suffix given and "
4658 "no register operands; can't size instruction"));
4664 unsigned int suffixes;
4666 suffixes = !i.tm.opcode_modifier.no_bsuf;
4667 if (!i.tm.opcode_modifier.no_wsuf)
4669 if (!i.tm.opcode_modifier.no_lsuf)
4671 if (!i.tm.opcode_modifier.no_ldsuf)
4673 if (!i.tm.opcode_modifier.no_ssuf)
4675 if (!i.tm.opcode_modifier.no_qsuf)
4678 /* There are more than suffix matches. */
4679 if (i.tm.opcode_modifier.w
4680 || ((suffixes & (suffixes - 1))
4681 && !i.tm.opcode_modifier.defaultsize
4682 && !i.tm.opcode_modifier.ignoresize))
4684 as_bad (_("ambiguous operand size for `%s'"), i.tm.name);
4690 /* Change the opcode based on the operand size given by i.suffix;
4691 We don't need to change things for byte insns. */
4694 && i.suffix != BYTE_MNEM_SUFFIX
4695 && i.suffix != XMMWORD_MNEM_SUFFIX
4696 && i.suffix != YMMWORD_MNEM_SUFFIX)
4698 /* It's not a byte, select word/dword operation. */
4699 if (i.tm.opcode_modifier.w)
4701 if (i.tm.opcode_modifier.shortform)
4702 i.tm.base_opcode |= 8;
4704 i.tm.base_opcode |= 1;
4707 /* Now select between word & dword operations via the operand
4708 size prefix, except for instructions that will ignore this
4710 if (i.tm.opcode_modifier.addrprefixop0)
4712 /* The address size override prefix changes the size of the
4714 if ((flag_code == CODE_32BIT
4715 && i.op->regs[0].reg_type.bitfield.reg16)
4716 || (flag_code != CODE_32BIT
4717 && i.op->regs[0].reg_type.bitfield.reg32))
4718 if (!add_prefix (ADDR_PREFIX_OPCODE))
4721 else if (i.suffix != QWORD_MNEM_SUFFIX
4722 && i.suffix != LONG_DOUBLE_MNEM_SUFFIX
4723 && !i.tm.opcode_modifier.ignoresize
4724 && !i.tm.opcode_modifier.floatmf
4725 && ((i.suffix == LONG_MNEM_SUFFIX) == (flag_code == CODE_16BIT)
4726 || (flag_code == CODE_64BIT
4727 && i.tm.opcode_modifier.jumpbyte)))
4729 unsigned int prefix = DATA_PREFIX_OPCODE;
4731 if (i.tm.opcode_modifier.jumpbyte) /* jcxz, loop */
4732 prefix = ADDR_PREFIX_OPCODE;
4734 if (!add_prefix (prefix))
4738 /* Set mode64 for an operand. */
4739 if (i.suffix == QWORD_MNEM_SUFFIX
4740 && flag_code == CODE_64BIT
4741 && !i.tm.opcode_modifier.norex64)
4743 /* Special case for xchg %rax,%rax. It is NOP and doesn't
4744 need rex64. cmpxchg8b is also a special case. */
4745 if (! (i.operands == 2
4746 && i.tm.base_opcode == 0x90
4747 && i.tm.extension_opcode == None
4748 && operand_type_equal (&i.types [0], &acc64)
4749 && operand_type_equal (&i.types [1], &acc64))
4750 && ! (i.operands == 1
4751 && i.tm.base_opcode == 0xfc7
4752 && i.tm.extension_opcode == 1
4753 && !operand_type_check (i.types [0], reg)
4754 && operand_type_check (i.types [0], anymem)))
4758 /* Size floating point instruction. */
4759 if (i.suffix == LONG_MNEM_SUFFIX)
4760 if (i.tm.opcode_modifier.floatmf)
4761 i.tm.base_opcode ^= 4;
4768 check_byte_reg (void)
4772 for (op = i.operands; --op >= 0;)
4774 /* If this is an eight bit register, it's OK. If it's the 16 or
4775 32 bit version of an eight bit register, we will just use the
4776 low portion, and that's OK too. */
4777 if (i.types[op].bitfield.reg8)
4780 /* I/O port address operands are OK too. */
4781 if (i.tm.operand_types[op].bitfield.inoutportreg)
4784 /* crc32 doesn't generate this warning. */
4785 if (i.tm.base_opcode == 0xf20f38f0)
4788 if ((i.types[op].bitfield.reg16
4789 || i.types[op].bitfield.reg32
4790 || i.types[op].bitfield.reg64)
4791 && i.op[op].regs->reg_num < 4
4792 /* Prohibit these changes in 64bit mode, since the lowering
4793 would be more complicated. */
4794 && flag_code != CODE_64BIT)
4796 #if REGISTER_WARNINGS
4797 if (!quiet_warnings)
4798 as_warn (_("using `%s%s' instead of `%s%s' due to `%c' suffix"),
4800 (i.op[op].regs + (i.types[op].bitfield.reg16
4801 ? REGNAM_AL - REGNAM_AX
4802 : REGNAM_AL - REGNAM_EAX))->reg_name,
4804 i.op[op].regs->reg_name,
4809 /* Any other register is bad. */
4810 if (i.types[op].bitfield.reg16
4811 || i.types[op].bitfield.reg32
4812 || i.types[op].bitfield.reg64
4813 || i.types[op].bitfield.regmmx
4814 || i.types[op].bitfield.regxmm
4815 || i.types[op].bitfield.regymm
4816 || i.types[op].bitfield.sreg2
4817 || i.types[op].bitfield.sreg3
4818 || i.types[op].bitfield.control
4819 || i.types[op].bitfield.debug
4820 || i.types[op].bitfield.test
4821 || i.types[op].bitfield.floatreg
4822 || i.types[op].bitfield.floatacc)
4824 as_bad (_("`%s%s' not allowed with `%s%c'"),
4826 i.op[op].regs->reg_name,
4836 check_long_reg (void)
4840 for (op = i.operands; --op >= 0;)
4841 /* Reject eight bit registers, except where the template requires
4842 them. (eg. movzb) */
4843 if (i.types[op].bitfield.reg8
4844 && (i.tm.operand_types[op].bitfield.reg16
4845 || i.tm.operand_types[op].bitfield.reg32
4846 || i.tm.operand_types[op].bitfield.acc))
4848 as_bad (_("`%s%s' not allowed with `%s%c'"),
4850 i.op[op].regs->reg_name,
4855 /* Warn if the e prefix on a general reg is missing. */
4856 else if ((!quiet_warnings || flag_code == CODE_64BIT)
4857 && i.types[op].bitfield.reg16
4858 && (i.tm.operand_types[op].bitfield.reg32
4859 || i.tm.operand_types[op].bitfield.acc))
4861 /* Prohibit these changes in the 64bit mode, since the
4862 lowering is more complicated. */
4863 if (flag_code == CODE_64BIT)
4865 as_bad (_("incorrect register `%s%s' used with `%c' suffix"),
4866 register_prefix, i.op[op].regs->reg_name,
4870 #if REGISTER_WARNINGS
4872 as_warn (_("using `%s%s' instead of `%s%s' due to `%c' suffix"),
4874 (i.op[op].regs + REGNAM_EAX - REGNAM_AX)->reg_name,
4876 i.op[op].regs->reg_name,
4880 /* Warn if the r prefix on a general reg is missing. */
4881 else if (i.types[op].bitfield.reg64
4882 && (i.tm.operand_types[op].bitfield.reg32
4883 || i.tm.operand_types[op].bitfield.acc))
4886 && i.tm.opcode_modifier.toqword
4887 && !i.types[0].bitfield.regxmm)
4889 /* Convert to QWORD. We want REX byte. */
4890 i.suffix = QWORD_MNEM_SUFFIX;
4894 as_bad (_("incorrect register `%s%s' used with `%c' suffix"),
4895 register_prefix, i.op[op].regs->reg_name,
4904 check_qword_reg (void)
4908 for (op = i.operands; --op >= 0; )
4909 /* Reject eight bit registers, except where the template requires
4910 them. (eg. movzb) */
4911 if (i.types[op].bitfield.reg8
4912 && (i.tm.operand_types[op].bitfield.reg16
4913 || i.tm.operand_types[op].bitfield.reg32
4914 || i.tm.operand_types[op].bitfield.acc))
4916 as_bad (_("`%s%s' not allowed with `%s%c'"),
4918 i.op[op].regs->reg_name,
4923 /* Warn if the e prefix on a general reg is missing. */
4924 else if ((i.types[op].bitfield.reg16
4925 || i.types[op].bitfield.reg32)
4926 && (i.tm.operand_types[op].bitfield.reg32
4927 || i.tm.operand_types[op].bitfield.acc))
4929 /* Prohibit these changes in the 64bit mode, since the
4930 lowering is more complicated. */
4932 && i.tm.opcode_modifier.todword
4933 && !i.types[0].bitfield.regxmm)
4935 /* Convert to DWORD. We don't want REX byte. */
4936 i.suffix = LONG_MNEM_SUFFIX;
4940 as_bad (_("incorrect register `%s%s' used with `%c' suffix"),
4941 register_prefix, i.op[op].regs->reg_name,
4950 check_word_reg (void)
4953 for (op = i.operands; --op >= 0;)
4954 /* Reject eight bit registers, except where the template requires
4955 them. (eg. movzb) */
4956 if (i.types[op].bitfield.reg8
4957 && (i.tm.operand_types[op].bitfield.reg16
4958 || i.tm.operand_types[op].bitfield.reg32
4959 || i.tm.operand_types[op].bitfield.acc))
4961 as_bad (_("`%s%s' not allowed with `%s%c'"),
4963 i.op[op].regs->reg_name,
4968 /* Warn if the e prefix on a general reg is present. */
4969 else if ((!quiet_warnings || flag_code == CODE_64BIT)
4970 && i.types[op].bitfield.reg32
4971 && (i.tm.operand_types[op].bitfield.reg16
4972 || i.tm.operand_types[op].bitfield.acc))
4974 /* Prohibit these changes in the 64bit mode, since the
4975 lowering is more complicated. */
4976 if (flag_code == CODE_64BIT)
4978 as_bad (_("incorrect register `%s%s' used with `%c' suffix"),
4979 register_prefix, i.op[op].regs->reg_name,
4984 #if REGISTER_WARNINGS
4985 as_warn (_("using `%s%s' instead of `%s%s' due to `%c' suffix"),
4987 (i.op[op].regs + REGNAM_AX - REGNAM_EAX)->reg_name,
4989 i.op[op].regs->reg_name,
4997 update_imm (unsigned int j)
4999 i386_operand_type overlap = i.types[j];
5000 if ((overlap.bitfield.imm8
5001 || overlap.bitfield.imm8s
5002 || overlap.bitfield.imm16
5003 || overlap.bitfield.imm32
5004 || overlap.bitfield.imm32s
5005 || overlap.bitfield.imm64)
5006 && !operand_type_equal (&overlap, &imm8)
5007 && !operand_type_equal (&overlap, &imm8s)
5008 && !operand_type_equal (&overlap, &imm16)
5009 && !operand_type_equal (&overlap, &imm32)
5010 && !operand_type_equal (&overlap, &imm32s)
5011 && !operand_type_equal (&overlap, &imm64))
5015 i386_operand_type temp;
5017 operand_type_set (&temp, 0);
5018 if (i.suffix == BYTE_MNEM_SUFFIX)
5020 temp.bitfield.imm8 = overlap.bitfield.imm8;
5021 temp.bitfield.imm8s = overlap.bitfield.imm8s;
5023 else if (i.suffix == WORD_MNEM_SUFFIX)
5024 temp.bitfield.imm16 = overlap.bitfield.imm16;
5025 else if (i.suffix == QWORD_MNEM_SUFFIX)
5027 temp.bitfield.imm64 = overlap.bitfield.imm64;
5028 temp.bitfield.imm32s = overlap.bitfield.imm32s;
5031 temp.bitfield.imm32 = overlap.bitfield.imm32;
5034 else if (operand_type_equal (&overlap, &imm16_32_32s)
5035 || operand_type_equal (&overlap, &imm16_32)
5036 || operand_type_equal (&overlap, &imm16_32s))
5038 if ((flag_code == CODE_16BIT) ^ (i.prefix[DATA_PREFIX] != 0))
5043 if (!operand_type_equal (&overlap, &imm8)
5044 && !operand_type_equal (&overlap, &imm8s)
5045 && !operand_type_equal (&overlap, &imm16)
5046 && !operand_type_equal (&overlap, &imm32)
5047 && !operand_type_equal (&overlap, &imm32s)
5048 && !operand_type_equal (&overlap, &imm64))
5050 as_bad (_("no instruction mnemonic suffix given; "
5051 "can't determine immediate size"));
5055 i.types[j] = overlap;
5065 /* Update the first 2 immediate operands. */
5066 n = i.operands > 2 ? 2 : i.operands;
5069 for (j = 0; j < n; j++)
5070 if (update_imm (j) == 0)
5073 /* The 3rd operand can't be immediate operand. */
5074 gas_assert (operand_type_check (i.types[2], imm) == 0);
5081 bad_implicit_operand (int xmm)
5083 const char *ireg = xmm ? "xmm0" : "ymm0";
5086 as_bad (_("the last operand of `%s' must be `%s%s'"),
5087 i.tm.name, register_prefix, ireg);
5089 as_bad (_("the first operand of `%s' must be `%s%s'"),
5090 i.tm.name, register_prefix, ireg);
5095 process_operands (void)
5097 /* Default segment register this instruction will use for memory
5098 accesses. 0 means unknown. This is only for optimizing out
5099 unnecessary segment overrides. */
5100 const seg_entry *default_seg = 0;
5102 if (i.tm.opcode_modifier.sse2avx && i.tm.opcode_modifier.vexvvvv)
5104 unsigned int dupl = i.operands;
5105 unsigned int dest = dupl - 1;
5108 /* The destination must be an xmm register. */
5109 gas_assert (i.reg_operands
5110 && MAX_OPERANDS > dupl
5111 && operand_type_equal (&i.types[dest], ®xmm));
5113 if (i.tm.opcode_modifier.firstxmm0)
5115 /* The first operand is implicit and must be xmm0. */
5116 gas_assert (operand_type_equal (&i.types[0], ®xmm));
5117 if (register_number (i.op[0].regs) != 0)
5118 return bad_implicit_operand (1);
5120 if (i.tm.opcode_modifier.vexsources == VEX3SOURCES)
5122 /* Keep xmm0 for instructions with VEX prefix and 3
5128 /* We remove the first xmm0 and keep the number of
5129 operands unchanged, which in fact duplicates the
5131 for (j = 1; j < i.operands; j++)
5133 i.op[j - 1] = i.op[j];
5134 i.types[j - 1] = i.types[j];
5135 i.tm.operand_types[j - 1] = i.tm.operand_types[j];
5139 else if (i.tm.opcode_modifier.implicit1stxmm0)
5141 gas_assert ((MAX_OPERANDS - 1) > dupl
5142 && (i.tm.opcode_modifier.vexsources
5145 /* Add the implicit xmm0 for instructions with VEX prefix
5147 for (j = i.operands; j > 0; j--)
5149 i.op[j] = i.op[j - 1];
5150 i.types[j] = i.types[j - 1];
5151 i.tm.operand_types[j] = i.tm.operand_types[j - 1];
5154 = (const reg_entry *) hash_find (reg_hash, "xmm0");
5155 i.types[0] = regxmm;
5156 i.tm.operand_types[0] = regxmm;
5159 i.reg_operands += 2;
5164 i.op[dupl] = i.op[dest];
5165 i.types[dupl] = i.types[dest];
5166 i.tm.operand_types[dupl] = i.tm.operand_types[dest];
5175 i.op[dupl] = i.op[dest];
5176 i.types[dupl] = i.types[dest];
5177 i.tm.operand_types[dupl] = i.tm.operand_types[dest];
5180 if (i.tm.opcode_modifier.immext)
5183 else if (i.tm.opcode_modifier.firstxmm0)
5187 /* The first operand is implicit and must be xmm0/ymm0. */
5188 gas_assert (i.reg_operands
5189 && (operand_type_equal (&i.types[0], ®xmm)
5190 || operand_type_equal (&i.types[0], ®ymm)));
5191 if (register_number (i.op[0].regs) != 0)
5192 return bad_implicit_operand (i.types[0].bitfield.regxmm);
5194 for (j = 1; j < i.operands; j++)
5196 i.op[j - 1] = i.op[j];
5197 i.types[j - 1] = i.types[j];
5199 /* We need to adjust fields in i.tm since they are used by
5200 build_modrm_byte. */
5201 i.tm.operand_types [j - 1] = i.tm.operand_types [j];
5208 else if (i.tm.opcode_modifier.regkludge)
5210 /* The imul $imm, %reg instruction is converted into
5211 imul $imm, %reg, %reg, and the clr %reg instruction
5212 is converted into xor %reg, %reg. */
5214 unsigned int first_reg_op;
5216 if (operand_type_check (i.types[0], reg))
5220 /* Pretend we saw the extra register operand. */
5221 gas_assert (i.reg_operands == 1
5222 && i.op[first_reg_op + 1].regs == 0);
5223 i.op[first_reg_op + 1].regs = i.op[first_reg_op].regs;
5224 i.types[first_reg_op + 1] = i.types[first_reg_op];
5229 if (i.tm.opcode_modifier.shortform)
5231 if (i.types[0].bitfield.sreg2
5232 || i.types[0].bitfield.sreg3)
5234 if (i.tm.base_opcode == POP_SEG_SHORT
5235 && i.op[0].regs->reg_num == 1)
5237 as_bad (_("you can't `pop %scs'"), register_prefix);
5240 i.tm.base_opcode |= (i.op[0].regs->reg_num << 3);
5241 if ((i.op[0].regs->reg_flags & RegRex) != 0)
5246 /* The register or float register operand is in operand
5250 if (i.types[0].bitfield.floatreg
5251 || operand_type_check (i.types[0], reg))
5255 /* Register goes in low 3 bits of opcode. */
5256 i.tm.base_opcode |= i.op[op].regs->reg_num;
5257 if ((i.op[op].regs->reg_flags & RegRex) != 0)
5259 if (!quiet_warnings && i.tm.opcode_modifier.ugh)
5261 /* Warn about some common errors, but press on regardless.
5262 The first case can be generated by gcc (<= 2.8.1). */
5263 if (i.operands == 2)
5265 /* Reversed arguments on faddp, fsubp, etc. */
5266 as_warn (_("translating to `%s %s%s,%s%s'"), i.tm.name,
5267 register_prefix, i.op[!intel_syntax].regs->reg_name,
5268 register_prefix, i.op[intel_syntax].regs->reg_name);
5272 /* Extraneous `l' suffix on fp insn. */
5273 as_warn (_("translating to `%s %s%s'"), i.tm.name,
5274 register_prefix, i.op[0].regs->reg_name);
5279 else if (i.tm.opcode_modifier.modrm)
5281 /* The opcode is completed (modulo i.tm.extension_opcode which
5282 must be put into the modrm byte). Now, we make the modrm and
5283 index base bytes based on all the info we've collected. */
5285 default_seg = build_modrm_byte ();
5287 else if ((i.tm.base_opcode & ~0x3) == MOV_AX_DISP32)
5291 else if (i.tm.opcode_modifier.isstring)
5293 /* For the string instructions that allow a segment override
5294 on one of their operands, the default segment is ds. */
5298 if (i.tm.base_opcode == 0x8d /* lea */
5301 as_warn (_("segment override on `%s' is ineffectual"), i.tm.name);
5303 /* If a segment was explicitly specified, and the specified segment
5304 is not the default, use an opcode prefix to select it. If we
5305 never figured out what the default segment is, then default_seg
5306 will be zero at this point, and the specified segment prefix will
5308 if ((i.seg[0]) && (i.seg[0] != default_seg))
5310 if (!add_prefix (i.seg[0]->seg_prefix))
5316 static const seg_entry *
5317 build_modrm_byte (void)
5319 const seg_entry *default_seg = 0;
5320 unsigned int source, dest;
5323 /* The first operand of instructions with VEX prefix and 3 sources
5324 must be VEX_Imm4. */
5325 vex_3_sources = i.tm.opcode_modifier.vexsources == VEX3SOURCES;
5328 unsigned int nds, reg_slot;
5331 if (i.tm.opcode_modifier.veximmext
5332 && i.tm.opcode_modifier.immext)
5334 dest = i.operands - 2;
5335 gas_assert (dest == 3);
5338 dest = i.operands - 1;
5341 /* There are 2 kinds of instructions:
5342 1. 5 operands: 4 register operands or 3 register operands
5343 plus 1 memory operand plus one Vec_Imm4 operand, VexXDS, and
5344 VexW0 or VexW1. The destination must be either XMM or YMM
5346 2. 4 operands: 4 register operands or 3 register operands
5347 plus 1 memory operand, VexXDS, and VexImmExt */
5348 gas_assert ((i.reg_operands == 4
5349 || (i.reg_operands == 3 && i.mem_operands == 1))
5350 && i.tm.opcode_modifier.vexvvvv == VEXXDS
5351 && (i.tm.opcode_modifier.veximmext
5352 || (i.imm_operands == 1
5353 && i.types[0].bitfield.vec_imm4
5354 && (i.tm.opcode_modifier.vexw == VEXW0
5355 || i.tm.opcode_modifier.vexw == VEXW1)
5356 && (operand_type_equal (&i.tm.operand_types[dest], ®xmm)
5357 || operand_type_equal (&i.tm.operand_types[dest], ®ymm)))));
5359 if (i.imm_operands == 0)
5361 /* When there is no immediate operand, generate an 8bit
5362 immediate operand to encode the first operand. */
5363 exp = &im_expressions[i.imm_operands++];
5364 i.op[i.operands].imms = exp;
5365 i.types[i.operands] = imm8;
5367 /* If VexW1 is set, the first operand is the source and
5368 the second operand is encoded in the immediate operand. */
5369 if (i.tm.opcode_modifier.vexw == VEXW1)
5380 /* FMA swaps REG and NDS. */
5381 if (i.tm.cpu_flags.bitfield.cpufma)
5389 gas_assert (operand_type_equal (&i.tm.operand_types[reg_slot],
5391 || operand_type_equal (&i.tm.operand_types[reg_slot],
5393 exp->X_op = O_constant;
5394 exp->X_add_number = register_number (i.op[reg_slot].regs) << 4;
5398 unsigned int imm_slot;
5400 if (i.tm.opcode_modifier.vexw == VEXW0)
5402 /* If VexW0 is set, the third operand is the source and
5403 the second operand is encoded in the immediate
5410 /* VexW1 is set, the second operand is the source and
5411 the third operand is encoded in the immediate
5417 if (i.tm.opcode_modifier.immext)
5419 /* When ImmExt is set, the immdiate byte is the last
5421 imm_slot = i.operands - 1;
5429 /* Turn on Imm8 so that output_imm will generate it. */
5430 i.types[imm_slot].bitfield.imm8 = 1;
5433 gas_assert (operand_type_equal (&i.tm.operand_types[reg_slot],
5435 || operand_type_equal (&i.tm.operand_types[reg_slot],
5437 i.op[imm_slot].imms->X_add_number
5438 |= register_number (i.op[reg_slot].regs) << 4;
5441 gas_assert (operand_type_equal (&i.tm.operand_types[nds], ®xmm)
5442 || operand_type_equal (&i.tm.operand_types[nds],
5444 i.vex.register_specifier = i.op[nds].regs;
5449 /* i.reg_operands MUST be the number of real register operands;
5450 implicit registers do not count. If there are 3 register
5451 operands, it must be a instruction with VexNDS. For a
5452 instruction with VexNDD, the destination register is encoded
5453 in VEX prefix. If there are 4 register operands, it must be
5454 a instruction with VEX prefix and 3 sources. */
5455 if (i.mem_operands == 0
5456 && ((i.reg_operands == 2
5457 && i.tm.opcode_modifier.vexvvvv <= VEXXDS)
5458 || (i.reg_operands == 3
5459 && i.tm.opcode_modifier.vexvvvv == VEXXDS)
5460 || (i.reg_operands == 4 && vex_3_sources)))
5468 /* When there are 3 operands, one of them may be immediate,
5469 which may be the first or the last operand. Otherwise,
5470 the first operand must be shift count register (cl) or it
5471 is an instruction with VexNDS. */
5472 gas_assert (i.imm_operands == 1
5473 || (i.imm_operands == 0
5474 && (i.tm.opcode_modifier.vexvvvv == VEXXDS
5475 || i.types[0].bitfield.shiftcount)));
5476 if (operand_type_check (i.types[0], imm)
5477 || i.types[0].bitfield.shiftcount)
5483 /* When there are 4 operands, the first two must be 8bit
5484 immediate operands. The source operand will be the 3rd
5487 For instructions with VexNDS, if the first operand
5488 an imm8, the source operand is the 2nd one. If the last
5489 operand is imm8, the source operand is the first one. */
5490 gas_assert ((i.imm_operands == 2
5491 && i.types[0].bitfield.imm8
5492 && i.types[1].bitfield.imm8)
5493 || (i.tm.opcode_modifier.vexvvvv == VEXXDS
5494 && i.imm_operands == 1
5495 && (i.types[0].bitfield.imm8
5496 || i.types[i.operands - 1].bitfield.imm8)));
5497 if (i.imm_operands == 2)
5501 if (i.types[0].bitfield.imm8)
5517 if (i.tm.opcode_modifier.vexvvvv == VEXXDS)
5519 /* For instructions with VexNDS, the register-only
5520 source operand must be 32/64bit integer, XMM or
5521 YMM register. It is encoded in VEX prefix. We
5522 need to clear RegMem bit before calling
5523 operand_type_equal. */
5525 i386_operand_type op;
5528 /* Check register-only source operand when two source
5529 operands are swapped. */
5530 if (!i.tm.operand_types[source].bitfield.baseindex
5531 && i.tm.operand_types[dest].bitfield.baseindex)
5539 op = i.tm.operand_types[vvvv];
5540 op.bitfield.regmem = 0;
5541 if ((dest + 1) >= i.operands
5542 || (op.bitfield.reg32 != 1
5543 && !op.bitfield.reg64 != 1
5544 && !operand_type_equal (&op, ®xmm)
5545 && !operand_type_equal (&op, ®ymm)))
5547 i.vex.register_specifier = i.op[vvvv].regs;
5553 /* One of the register operands will be encoded in the i.tm.reg
5554 field, the other in the combined i.tm.mode and i.tm.regmem
5555 fields. If no form of this instruction supports a memory
5556 destination operand, then we assume the source operand may
5557 sometimes be a memory operand and so we need to store the
5558 destination in the i.rm.reg field. */
5559 if (!i.tm.operand_types[dest].bitfield.regmem
5560 && operand_type_check (i.tm.operand_types[dest], anymem) == 0)
5562 i.rm.reg = i.op[dest].regs->reg_num;
5563 i.rm.regmem = i.op[source].regs->reg_num;
5564 if ((i.op[dest].regs->reg_flags & RegRex) != 0)
5566 if ((i.op[source].regs->reg_flags & RegRex) != 0)
5571 i.rm.reg = i.op[source].regs->reg_num;
5572 i.rm.regmem = i.op[dest].regs->reg_num;
5573 if ((i.op[dest].regs->reg_flags & RegRex) != 0)
5575 if ((i.op[source].regs->reg_flags & RegRex) != 0)
5578 if (flag_code != CODE_64BIT && (i.rex & (REX_R | REX_B)))
5580 if (!i.types[0].bitfield.control
5581 && !i.types[1].bitfield.control)
5583 i.rex &= ~(REX_R | REX_B);
5584 add_prefix (LOCK_PREFIX_OPCODE);
5588 { /* If it's not 2 reg operands... */
5593 unsigned int fake_zero_displacement = 0;
5596 for (op = 0; op < i.operands; op++)
5597 if (operand_type_check (i.types[op], anymem))
5599 gas_assert (op < i.operands);
5601 if (i.tm.opcode_modifier.vecsib)
5603 if (i.index_reg->reg_num == RegEiz
5604 || i.index_reg->reg_num == RegRiz)
5607 i.rm.regmem = ESCAPE_TO_TWO_BYTE_ADDRESSING;
5610 i.sib.base = NO_BASE_REGISTER;
5611 i.sib.scale = i.log2_scale_factor;
5612 i.types[op].bitfield.disp8 = 0;
5613 i.types[op].bitfield.disp16 = 0;
5614 i.types[op].bitfield.disp64 = 0;
5615 if (flag_code != CODE_64BIT)
5617 /* Must be 32 bit */
5618 i.types[op].bitfield.disp32 = 1;
5619 i.types[op].bitfield.disp32s = 0;
5623 i.types[op].bitfield.disp32 = 0;
5624 i.types[op].bitfield.disp32s = 1;
5627 i.sib.index = i.index_reg->reg_num;
5628 if ((i.index_reg->reg_flags & RegRex) != 0)
5634 if (i.base_reg == 0)
5637 if (!i.disp_operands)
5639 fake_zero_displacement = 1;
5640 /* Instructions with VSIB byte need 32bit displacement
5641 if there is no base register. */
5642 if (i.tm.opcode_modifier.vecsib)
5643 i.types[op].bitfield.disp32 = 1;
5645 if (i.index_reg == 0)
5647 gas_assert (!i.tm.opcode_modifier.vecsib);
5648 /* Operand is just <disp> */
5649 if (flag_code == CODE_64BIT)
5651 /* 64bit mode overwrites the 32bit absolute
5652 addressing by RIP relative addressing and
5653 absolute addressing is encoded by one of the
5654 redundant SIB forms. */
5655 i.rm.regmem = ESCAPE_TO_TWO_BYTE_ADDRESSING;
5656 i.sib.base = NO_BASE_REGISTER;
5657 i.sib.index = NO_INDEX_REGISTER;
5658 i.types[op] = ((i.prefix[ADDR_PREFIX] == 0)
5659 ? disp32s : disp32);
5661 else if ((flag_code == CODE_16BIT)
5662 ^ (i.prefix[ADDR_PREFIX] != 0))
5664 i.rm.regmem = NO_BASE_REGISTER_16;
5665 i.types[op] = disp16;
5669 i.rm.regmem = NO_BASE_REGISTER;
5670 i.types[op] = disp32;
5673 else if (!i.tm.opcode_modifier.vecsib)
5675 /* !i.base_reg && i.index_reg */
5676 if (i.index_reg->reg_num == RegEiz
5677 || i.index_reg->reg_num == RegRiz)
5678 i.sib.index = NO_INDEX_REGISTER;
5680 i.sib.index = i.index_reg->reg_num;
5681 i.sib.base = NO_BASE_REGISTER;
5682 i.sib.scale = i.log2_scale_factor;
5683 i.rm.regmem = ESCAPE_TO_TWO_BYTE_ADDRESSING;
5684 i.types[op].bitfield.disp8 = 0;
5685 i.types[op].bitfield.disp16 = 0;
5686 i.types[op].bitfield.disp64 = 0;
5687 if (flag_code != CODE_64BIT)
5689 /* Must be 32 bit */
5690 i.types[op].bitfield.disp32 = 1;
5691 i.types[op].bitfield.disp32s = 0;
5695 i.types[op].bitfield.disp32 = 0;
5696 i.types[op].bitfield.disp32s = 1;
5698 if ((i.index_reg->reg_flags & RegRex) != 0)
5702 /* RIP addressing for 64bit mode. */
5703 else if (i.base_reg->reg_num == RegRip ||
5704 i.base_reg->reg_num == RegEip)
5706 gas_assert (!i.tm.opcode_modifier.vecsib);
5707 i.rm.regmem = NO_BASE_REGISTER;
5708 i.types[op].bitfield.disp8 = 0;
5709 i.types[op].bitfield.disp16 = 0;
5710 i.types[op].bitfield.disp32 = 0;
5711 i.types[op].bitfield.disp32s = 1;
5712 i.types[op].bitfield.disp64 = 0;
5713 i.flags[op] |= Operand_PCrel;
5714 if (! i.disp_operands)
5715 fake_zero_displacement = 1;
5717 else if (i.base_reg->reg_type.bitfield.reg16)
5719 gas_assert (!i.tm.opcode_modifier.vecsib);
5720 switch (i.base_reg->reg_num)
5723 if (i.index_reg == 0)
5725 else /* (%bx,%si) -> 0, or (%bx,%di) -> 1 */
5726 i.rm.regmem = i.index_reg->reg_num - 6;
5730 if (i.index_reg == 0)
5733 if (operand_type_check (i.types[op], disp) == 0)
5735 /* fake (%bp) into 0(%bp) */
5736 i.types[op].bitfield.disp8 = 1;
5737 fake_zero_displacement = 1;
5740 else /* (%bp,%si) -> 2, or (%bp,%di) -> 3 */
5741 i.rm.regmem = i.index_reg->reg_num - 6 + 2;
5743 default: /* (%si) -> 4 or (%di) -> 5 */
5744 i.rm.regmem = i.base_reg->reg_num - 6 + 4;
5746 i.rm.mode = mode_from_disp_size (i.types[op]);
5748 else /* i.base_reg and 32/64 bit mode */
5750 if (flag_code == CODE_64BIT
5751 && operand_type_check (i.types[op], disp))
5753 i386_operand_type temp;
5754 operand_type_set (&temp, 0);
5755 temp.bitfield.disp8 = i.types[op].bitfield.disp8;
5757 if (i.prefix[ADDR_PREFIX] == 0)
5758 i.types[op].bitfield.disp32s = 1;
5760 i.types[op].bitfield.disp32 = 1;
5763 if (!i.tm.opcode_modifier.vecsib)
5764 i.rm.regmem = i.base_reg->reg_num;
5765 if ((i.base_reg->reg_flags & RegRex) != 0)
5767 i.sib.base = i.base_reg->reg_num;
5768 /* x86-64 ignores REX prefix bit here to avoid decoder
5770 if (!(i.base_reg->reg_flags & RegRex)
5771 && (i.base_reg->reg_num == EBP_REG_NUM
5772 || i.base_reg->reg_num == ESP_REG_NUM))
5774 if (i.base_reg->reg_num == 5 && i.disp_operands == 0)
5776 fake_zero_displacement = 1;
5777 i.types[op].bitfield.disp8 = 1;
5779 i.sib.scale = i.log2_scale_factor;
5780 if (i.index_reg == 0)
5782 gas_assert (!i.tm.opcode_modifier.vecsib);
5783 /* <disp>(%esp) becomes two byte modrm with no index
5784 register. We've already stored the code for esp
5785 in i.rm.regmem ie. ESCAPE_TO_TWO_BYTE_ADDRESSING.
5786 Any base register besides %esp will not use the
5787 extra modrm byte. */
5788 i.sib.index = NO_INDEX_REGISTER;
5790 else if (!i.tm.opcode_modifier.vecsib)
5792 if (i.index_reg->reg_num == RegEiz
5793 || i.index_reg->reg_num == RegRiz)
5794 i.sib.index = NO_INDEX_REGISTER;
5796 i.sib.index = i.index_reg->reg_num;
5797 i.rm.regmem = ESCAPE_TO_TWO_BYTE_ADDRESSING;
5798 if ((i.index_reg->reg_flags & RegRex) != 0)
5803 && (i.reloc[op] == BFD_RELOC_386_TLS_DESC_CALL
5804 || i.reloc[op] == BFD_RELOC_X86_64_TLSDESC_CALL))
5808 if (!fake_zero_displacement
5812 fake_zero_displacement = 1;
5813 if (i.disp_encoding == disp_encoding_8bit)
5814 i.types[op].bitfield.disp8 = 1;
5816 i.types[op].bitfield.disp32 = 1;
5818 i.rm.mode = mode_from_disp_size (i.types[op]);
5822 if (fake_zero_displacement)
5824 /* Fakes a zero displacement assuming that i.types[op]
5825 holds the correct displacement size. */
5828 gas_assert (i.op[op].disps == 0);
5829 exp = &disp_expressions[i.disp_operands++];
5830 i.op[op].disps = exp;
5831 exp->X_op = O_constant;
5832 exp->X_add_number = 0;
5833 exp->X_add_symbol = (symbolS *) 0;
5834 exp->X_op_symbol = (symbolS *) 0;
5842 if (i.tm.opcode_modifier.vexsources == XOP2SOURCES)
5844 if (operand_type_check (i.types[0], imm))
5845 i.vex.register_specifier = NULL;
5848 /* VEX.vvvv encodes one of the sources when the first
5849 operand is not an immediate. */
5850 if (i.tm.opcode_modifier.vexw == VEXW0)
5851 i.vex.register_specifier = i.op[0].regs;
5853 i.vex.register_specifier = i.op[1].regs;
5856 /* Destination is a XMM register encoded in the ModRM.reg
5858 i.rm.reg = i.op[2].regs->reg_num;
5859 if ((i.op[2].regs->reg_flags & RegRex) != 0)
5862 /* ModRM.rm and VEX.B encodes the other source. */
5863 if (!i.mem_operands)
5867 if (i.tm.opcode_modifier.vexw == VEXW0)
5868 i.rm.regmem = i.op[1].regs->reg_num;
5870 i.rm.regmem = i.op[0].regs->reg_num;
5872 if ((i.op[1].regs->reg_flags & RegRex) != 0)
5876 else if (i.tm.opcode_modifier.vexvvvv == VEXLWP)
5878 i.vex.register_specifier = i.op[2].regs;
5879 if (!i.mem_operands)
5882 i.rm.regmem = i.op[1].regs->reg_num;
5883 if ((i.op[1].regs->reg_flags & RegRex) != 0)
5887 /* Fill in i.rm.reg or i.rm.regmem field with register operand
5888 (if any) based on i.tm.extension_opcode. Again, we must be
5889 careful to make sure that segment/control/debug/test/MMX
5890 registers are coded into the i.rm.reg field. */
5891 else if (i.reg_operands)
5894 unsigned int vex_reg = ~0;
5896 for (op = 0; op < i.operands; op++)
5897 if (i.types[op].bitfield.reg8
5898 || i.types[op].bitfield.reg16
5899 || i.types[op].bitfield.reg32
5900 || i.types[op].bitfield.reg64
5901 || i.types[op].bitfield.regmmx
5902 || i.types[op].bitfield.regxmm
5903 || i.types[op].bitfield.regymm
5904 || i.types[op].bitfield.sreg2
5905 || i.types[op].bitfield.sreg3
5906 || i.types[op].bitfield.control
5907 || i.types[op].bitfield.debug
5908 || i.types[op].bitfield.test)
5913 else if (i.tm.opcode_modifier.vexvvvv == VEXXDS)
5915 /* For instructions with VexNDS, the register-only
5916 source operand is encoded in VEX prefix. */
5917 gas_assert (mem != (unsigned int) ~0);
5922 gas_assert (op < i.operands);
5926 /* Check register-only source operand when two source
5927 operands are swapped. */
5928 if (!i.tm.operand_types[op].bitfield.baseindex
5929 && i.tm.operand_types[op + 1].bitfield.baseindex)
5933 gas_assert (mem == (vex_reg + 1)
5934 && op < i.operands);
5939 gas_assert (vex_reg < i.operands);
5943 else if (i.tm.opcode_modifier.vexvvvv == VEXNDD)
5945 /* For instructions with VexNDD, the register destination
5946 is encoded in VEX prefix. */
5947 if (i.mem_operands == 0)
5949 /* There is no memory operand. */
5950 gas_assert ((op + 2) == i.operands);
5955 /* There are only 2 operands. */
5956 gas_assert (op < 2 && i.operands == 2);
5961 gas_assert (op < i.operands);
5963 if (vex_reg != (unsigned int) ~0)
5965 i386_operand_type *type = &i.tm.operand_types[vex_reg];
5967 if (type->bitfield.reg32 != 1
5968 && type->bitfield.reg64 != 1
5969 && !operand_type_equal (type, ®xmm)
5970 && !operand_type_equal (type, ®ymm))
5973 i.vex.register_specifier = i.op[vex_reg].regs;
5976 /* Don't set OP operand twice. */
5979 /* If there is an extension opcode to put here, the
5980 register number must be put into the regmem field. */
5981 if (i.tm.extension_opcode != None)
5983 i.rm.regmem = i.op[op].regs->reg_num;
5984 if ((i.op[op].regs->reg_flags & RegRex) != 0)
5989 i.rm.reg = i.op[op].regs->reg_num;
5990 if ((i.op[op].regs->reg_flags & RegRex) != 0)
5995 /* Now, if no memory operand has set i.rm.mode = 0, 1, 2 we
5996 must set it to 3 to indicate this is a register operand
5997 in the regmem field. */
5998 if (!i.mem_operands)
6002 /* Fill in i.rm.reg field with extension opcode (if any). */
6003 if (i.tm.extension_opcode != None)
6004 i.rm.reg = i.tm.extension_opcode;
6010 output_branch (void)
6016 relax_substateT subtype;
6020 code16 = flag_code == CODE_16BIT ? CODE16 : 0;
6021 size = i.disp_encoding == disp_encoding_32bit ? BIG : SMALL;
6024 if (i.prefix[DATA_PREFIX] != 0)
6030 /* Pentium4 branch hints. */
6031 if (i.prefix[SEG_PREFIX] == CS_PREFIX_OPCODE /* not taken */
6032 || i.prefix[SEG_PREFIX] == DS_PREFIX_OPCODE /* taken */)
6037 if (i.prefix[REX_PREFIX] != 0)
6043 if (i.prefixes != 0 && !intel_syntax)
6044 as_warn (_("skipping prefixes on this instruction"));
6046 /* It's always a symbol; End frag & setup for relax.
6047 Make sure there is enough room in this frag for the largest
6048 instruction we may generate in md_convert_frag. This is 2
6049 bytes for the opcode and room for the prefix and largest
6051 frag_grow (prefix + 2 + 4);
6052 /* Prefix and 1 opcode byte go in fr_fix. */
6053 p = frag_more (prefix + 1);
6054 if (i.prefix[DATA_PREFIX] != 0)
6055 *p++ = DATA_PREFIX_OPCODE;
6056 if (i.prefix[SEG_PREFIX] == CS_PREFIX_OPCODE
6057 || i.prefix[SEG_PREFIX] == DS_PREFIX_OPCODE)
6058 *p++ = i.prefix[SEG_PREFIX];
6059 if (i.prefix[REX_PREFIX] != 0)
6060 *p++ = i.prefix[REX_PREFIX];
6061 *p = i.tm.base_opcode;
6063 if ((unsigned char) *p == JUMP_PC_RELATIVE)
6064 subtype = ENCODE_RELAX_STATE (UNCOND_JUMP, size);
6065 else if (cpu_arch_flags.bitfield.cpui386)
6066 subtype = ENCODE_RELAX_STATE (COND_JUMP, size);
6068 subtype = ENCODE_RELAX_STATE (COND_JUMP86, size);
6071 sym = i.op[0].disps->X_add_symbol;
6072 off = i.op[0].disps->X_add_number;
6074 if (i.op[0].disps->X_op != O_constant
6075 && i.op[0].disps->X_op != O_symbol)
6077 /* Handle complex expressions. */
6078 sym = make_expr_symbol (i.op[0].disps);
6082 /* 1 possible extra opcode + 4 byte displacement go in var part.
6083 Pass reloc in fr_var. */
6084 frag_var (rs_machine_dependent, 5, i.reloc[0], subtype, sym, off, p);
6094 if (i.tm.opcode_modifier.jumpbyte)
6096 /* This is a loop or jecxz type instruction. */
6098 if (i.prefix[ADDR_PREFIX] != 0)
6100 FRAG_APPEND_1_CHAR (ADDR_PREFIX_OPCODE);
6103 /* Pentium4 branch hints. */
6104 if (i.prefix[SEG_PREFIX] == CS_PREFIX_OPCODE /* not taken */
6105 || i.prefix[SEG_PREFIX] == DS_PREFIX_OPCODE /* taken */)
6107 FRAG_APPEND_1_CHAR (i.prefix[SEG_PREFIX]);
6116 if (flag_code == CODE_16BIT)
6119 if (i.prefix[DATA_PREFIX] != 0)
6121 FRAG_APPEND_1_CHAR (DATA_PREFIX_OPCODE);
6131 if (i.prefix[REX_PREFIX] != 0)
6133 FRAG_APPEND_1_CHAR (i.prefix[REX_PREFIX]);
6137 if (i.prefixes != 0 && !intel_syntax)
6138 as_warn (_("skipping prefixes on this instruction"));
6140 p = frag_more (i.tm.opcode_length + size);
6141 switch (i.tm.opcode_length)
6144 *p++ = i.tm.base_opcode >> 8;
6146 *p++ = i.tm.base_opcode;
6152 fixP = fix_new_exp (frag_now, p - frag_now->fr_literal, size,
6153 i.op[0].disps, 1, reloc (size, 1, 1, i.reloc[0]));
6155 /* All jumps handled here are signed, but don't use a signed limit
6156 check for 32 and 16 bit jumps as we want to allow wrap around at
6157 4G and 64k respectively. */
6159 fixP->fx_signed = 1;
6163 output_interseg_jump (void)
6171 if (flag_code == CODE_16BIT)
6175 if (i.prefix[DATA_PREFIX] != 0)
6181 if (i.prefix[REX_PREFIX] != 0)
6191 if (i.prefixes != 0 && !intel_syntax)
6192 as_warn (_("skipping prefixes on this instruction"));
6194 /* 1 opcode; 2 segment; offset */
6195 p = frag_more (prefix + 1 + 2 + size);
6197 if (i.prefix[DATA_PREFIX] != 0)
6198 *p++ = DATA_PREFIX_OPCODE;
6200 if (i.prefix[REX_PREFIX] != 0)
6201 *p++ = i.prefix[REX_PREFIX];
6203 *p++ = i.tm.base_opcode;
6204 if (i.op[1].imms->X_op == O_constant)
6206 offsetT n = i.op[1].imms->X_add_number;
6209 && !fits_in_unsigned_word (n)
6210 && !fits_in_signed_word (n))
6212 as_bad (_("16-bit jump out of range"));
6215 md_number_to_chars (p, n, size);
6218 fix_new_exp (frag_now, p - frag_now->fr_literal, size,
6219 i.op[1].imms, 0, reloc (size, 0, 0, i.reloc[1]));
6220 if (i.op[0].imms->X_op != O_constant)
6221 as_bad (_("can't handle non absolute segment in `%s'"),
6223 md_number_to_chars (p + size, (valueT) i.op[0].imms->X_add_number, 2);
6229 fragS *insn_start_frag;
6230 offsetT insn_start_off;
6232 /* Tie dwarf2 debug info to the address at the start of the insn.
6233 We can't do this after the insn has been output as the current
6234 frag may have been closed off. eg. by frag_var. */
6235 dwarf2_emit_insn (0);
6237 insn_start_frag = frag_now;
6238 insn_start_off = frag_now_fix ();
6241 if (i.tm.opcode_modifier.jump)
6243 else if (i.tm.opcode_modifier.jumpbyte
6244 || i.tm.opcode_modifier.jumpdword)
6246 else if (i.tm.opcode_modifier.jumpintersegment)
6247 output_interseg_jump ();
6250 /* Output normal instructions here. */
6254 unsigned int prefix;
6256 /* Since the VEX prefix contains the implicit prefix, we don't
6257 need the explicit prefix. */
6258 if (!i.tm.opcode_modifier.vex)
6260 switch (i.tm.opcode_length)
6263 if (i.tm.base_opcode & 0xff000000)
6265 prefix = (i.tm.base_opcode >> 24) & 0xff;
6270 if ((i.tm.base_opcode & 0xff0000) != 0)
6272 prefix = (i.tm.base_opcode >> 16) & 0xff;
6273 if (i.tm.cpu_flags.bitfield.cpupadlock)
6276 if (prefix != REPE_PREFIX_OPCODE
6277 || (i.prefix[REP_PREFIX]
6278 != REPE_PREFIX_OPCODE))
6279 add_prefix (prefix);
6282 add_prefix (prefix);
6291 /* The prefix bytes. */
6292 for (j = ARRAY_SIZE (i.prefix), q = i.prefix; j > 0; j--, q++)
6294 FRAG_APPEND_1_CHAR (*q);
6298 for (j = 0, q = i.prefix; j < ARRAY_SIZE (i.prefix); j++, q++)
6303 /* REX byte is encoded in VEX prefix. */
6307 FRAG_APPEND_1_CHAR (*q);
6310 /* There should be no other prefixes for instructions
6315 /* Now the VEX prefix. */
6316 p = frag_more (i.vex.length);
6317 for (j = 0; j < i.vex.length; j++)
6318 p[j] = i.vex.bytes[j];
6321 /* Now the opcode; be careful about word order here! */
6322 if (i.tm.opcode_length == 1)
6324 FRAG_APPEND_1_CHAR (i.tm.base_opcode);
6328 switch (i.tm.opcode_length)
6332 *p++ = (i.tm.base_opcode >> 16) & 0xff;
6342 /* Put out high byte first: can't use md_number_to_chars! */
6343 *p++ = (i.tm.base_opcode >> 8) & 0xff;
6344 *p = i.tm.base_opcode & 0xff;
6347 /* Now the modrm byte and sib byte (if present). */
6348 if (i.tm.opcode_modifier.modrm)
6350 FRAG_APPEND_1_CHAR ((i.rm.regmem << 0
6353 /* If i.rm.regmem == ESP (4)
6354 && i.rm.mode != (Register mode)
6356 ==> need second modrm byte. */
6357 if (i.rm.regmem == ESCAPE_TO_TWO_BYTE_ADDRESSING
6359 && !(i.base_reg && i.base_reg->reg_type.bitfield.reg16))
6360 FRAG_APPEND_1_CHAR ((i.sib.base << 0
6362 | i.sib.scale << 6));
6365 if (i.disp_operands)
6366 output_disp (insn_start_frag, insn_start_off);
6369 output_imm (insn_start_frag, insn_start_off);
6375 pi ("" /*line*/, &i);
6377 #endif /* DEBUG386 */
6380 /* Return the size of the displacement operand N. */
6383 disp_size (unsigned int n)
6386 if (i.types[n].bitfield.disp64)
6388 else if (i.types[n].bitfield.disp8)
6390 else if (i.types[n].bitfield.disp16)
6395 /* Return the size of the immediate operand N. */
6398 imm_size (unsigned int n)
6401 if (i.types[n].bitfield.imm64)
6403 else if (i.types[n].bitfield.imm8 || i.types[n].bitfield.imm8s)
6405 else if (i.types[n].bitfield.imm16)
6411 output_disp (fragS *insn_start_frag, offsetT insn_start_off)
6416 for (n = 0; n < i.operands; n++)
6418 if (operand_type_check (i.types[n], disp))
6420 if (i.op[n].disps->X_op == O_constant)
6422 int size = disp_size (n);
6425 val = offset_in_range (i.op[n].disps->X_add_number,
6427 p = frag_more (size);
6428 md_number_to_chars (p, val, size);
6432 enum bfd_reloc_code_real reloc_type;
6433 int size = disp_size (n);
6434 int sign = i.types[n].bitfield.disp32s;
6435 int pcrel = (i.flags[n] & Operand_PCrel) != 0;
6437 /* We can't have 8 bit displacement here. */
6438 gas_assert (!i.types[n].bitfield.disp8);
6440 /* The PC relative address is computed relative
6441 to the instruction boundary, so in case immediate
6442 fields follows, we need to adjust the value. */
6443 if (pcrel && i.imm_operands)
6448 for (n1 = 0; n1 < i.operands; n1++)
6449 if (operand_type_check (i.types[n1], imm))
6451 /* Only one immediate is allowed for PC
6452 relative address. */
6453 gas_assert (sz == 0);
6455 i.op[n].disps->X_add_number -= sz;
6457 /* We should find the immediate. */
6458 gas_assert (sz != 0);
6461 p = frag_more (size);
6462 reloc_type = reloc (size, pcrel, sign, i.reloc[n]);
6464 && GOT_symbol == i.op[n].disps->X_add_symbol
6465 && (((reloc_type == BFD_RELOC_32
6466 || reloc_type == BFD_RELOC_X86_64_32S
6467 || (reloc_type == BFD_RELOC_64
6469 && (i.op[n].disps->X_op == O_symbol
6470 || (i.op[n].disps->X_op == O_add
6471 && ((symbol_get_value_expression
6472 (i.op[n].disps->X_op_symbol)->X_op)
6474 || reloc_type == BFD_RELOC_32_PCREL))
6478 if (insn_start_frag == frag_now)
6479 add = (p - frag_now->fr_literal) - insn_start_off;
6484 add = insn_start_frag->fr_fix - insn_start_off;
6485 for (fr = insn_start_frag->fr_next;
6486 fr && fr != frag_now; fr = fr->fr_next)
6488 add += p - frag_now->fr_literal;
6493 reloc_type = BFD_RELOC_386_GOTPC;
6494 i.op[n].imms->X_add_number += add;
6496 else if (reloc_type == BFD_RELOC_64)
6497 reloc_type = BFD_RELOC_X86_64_GOTPC64;
6499 /* Don't do the adjustment for x86-64, as there
6500 the pcrel addressing is relative to the _next_
6501 insn, and that is taken care of in other code. */
6502 reloc_type = BFD_RELOC_X86_64_GOTPC32;
6504 fix_new_exp (frag_now, p - frag_now->fr_literal, size,
6505 i.op[n].disps, pcrel, reloc_type);
6512 output_imm (fragS *insn_start_frag, offsetT insn_start_off)
6517 for (n = 0; n < i.operands; n++)
6519 if (operand_type_check (i.types[n], imm))
6521 if (i.op[n].imms->X_op == O_constant)
6523 int size = imm_size (n);
6526 val = offset_in_range (i.op[n].imms->X_add_number,
6528 p = frag_more (size);
6529 md_number_to_chars (p, val, size);
6533 /* Not absolute_section.
6534 Need a 32-bit fixup (don't support 8bit
6535 non-absolute imms). Try to support other
6537 enum bfd_reloc_code_real reloc_type;
6538 int size = imm_size (n);
6541 if (i.types[n].bitfield.imm32s
6542 && (i.suffix == QWORD_MNEM_SUFFIX
6543 || (!i.suffix && i.tm.opcode_modifier.no_lsuf)))
6548 p = frag_more (size);
6549 reloc_type = reloc (size, 0, sign, i.reloc[n]);
6551 /* This is tough to explain. We end up with this one if we
6552 * have operands that look like
6553 * "_GLOBAL_OFFSET_TABLE_+[.-.L284]". The goal here is to
6554 * obtain the absolute address of the GOT, and it is strongly
6555 * preferable from a performance point of view to avoid using
6556 * a runtime relocation for this. The actual sequence of
6557 * instructions often look something like:
6562 * addl $_GLOBAL_OFFSET_TABLE_+[.-.L66],%ebx
6564 * The call and pop essentially return the absolute address
6565 * of the label .L66 and store it in %ebx. The linker itself
6566 * will ultimately change the first operand of the addl so
6567 * that %ebx points to the GOT, but to keep things simple, the
6568 * .o file must have this operand set so that it generates not
6569 * the absolute address of .L66, but the absolute address of
6570 * itself. This allows the linker itself simply treat a GOTPC
6571 * relocation as asking for a pcrel offset to the GOT to be
6572 * added in, and the addend of the relocation is stored in the
6573 * operand field for the instruction itself.
6575 * Our job here is to fix the operand so that it would add
6576 * the correct offset so that %ebx would point to itself. The
6577 * thing that is tricky is that .-.L66 will point to the
6578 * beginning of the instruction, so we need to further modify
6579 * the operand so that it will point to itself. There are
6580 * other cases where you have something like:
6582 * .long $_GLOBAL_OFFSET_TABLE_+[.-.L66]
6584 * and here no correction would be required. Internally in
6585 * the assembler we treat operands of this form as not being
6586 * pcrel since the '.' is explicitly mentioned, and I wonder
6587 * whether it would simplify matters to do it this way. Who
6588 * knows. In earlier versions of the PIC patches, the
6589 * pcrel_adjust field was used to store the correction, but
6590 * since the expression is not pcrel, I felt it would be
6591 * confusing to do it this way. */
6593 if ((reloc_type == BFD_RELOC_32
6594 || reloc_type == BFD_RELOC_X86_64_32S
6595 || reloc_type == BFD_RELOC_64)
6597 && GOT_symbol == i.op[n].imms->X_add_symbol
6598 && (i.op[n].imms->X_op == O_symbol
6599 || (i.op[n].imms->X_op == O_add
6600 && ((symbol_get_value_expression
6601 (i.op[n].imms->X_op_symbol)->X_op)
6606 if (insn_start_frag == frag_now)
6607 add = (p - frag_now->fr_literal) - insn_start_off;
6612 add = insn_start_frag->fr_fix - insn_start_off;
6613 for (fr = insn_start_frag->fr_next;
6614 fr && fr != frag_now; fr = fr->fr_next)
6616 add += p - frag_now->fr_literal;
6620 reloc_type = BFD_RELOC_386_GOTPC;
6622 reloc_type = BFD_RELOC_X86_64_GOTPC32;
6624 reloc_type = BFD_RELOC_X86_64_GOTPC64;
6625 i.op[n].imms->X_add_number += add;
6627 fix_new_exp (frag_now, p - frag_now->fr_literal, size,
6628 i.op[n].imms, 0, reloc_type);
6634 /* x86_cons_fix_new is called via the expression parsing code when a
6635 reloc is needed. We use this hook to get the correct .got reloc. */
6636 static enum bfd_reloc_code_real got_reloc = NO_RELOC;
6637 static int cons_sign = -1;
6640 x86_cons_fix_new (fragS *frag, unsigned int off, unsigned int len,
6643 enum bfd_reloc_code_real r = reloc (len, 0, cons_sign, got_reloc);
6645 got_reloc = NO_RELOC;
6648 if (exp->X_op == O_secrel)
6650 exp->X_op = O_symbol;
6651 r = BFD_RELOC_32_SECREL;
6655 fix_new_exp (frag, off, len, exp, 0, r);
6658 /* Export the ABI address size for use by TC_ADDRESS_BYTES for the
6659 purpose of the `.dc.a' internal pseudo-op. */
6662 x86_address_bytes (void)
6664 if ((stdoutput->arch_info->mach & bfd_mach_x64_32))
6666 return stdoutput->arch_info->bits_per_address / 8;
6669 #if !(defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF) || defined (OBJ_MACH_O)) \
6671 # define lex_got(reloc, adjust, types) NULL
6673 /* Parse operands of the form
6674 <symbol>@GOTOFF+<nnn>
6675 and similar .plt or .got references.
6677 If we find one, set up the correct relocation in RELOC and copy the
6678 input string, minus the `@GOTOFF' into a malloc'd buffer for
6679 parsing by the calling routine. Return this buffer, and if ADJUST
6680 is non-null set it to the length of the string we removed from the
6681 input line. Otherwise return NULL. */
6683 lex_got (enum bfd_reloc_code_real *rel,
6685 i386_operand_type *types)
6687 /* Some of the relocations depend on the size of what field is to
6688 be relocated. But in our callers i386_immediate and i386_displacement
6689 we don't yet know the operand size (this will be set by insn
6690 matching). Hence we record the word32 relocation here,
6691 and adjust the reloc according to the real size in reloc(). */
6692 static const struct {
6695 const enum bfd_reloc_code_real rel[2];
6696 const i386_operand_type types64;
6698 #if defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF)
6699 { STRING_COMMA_LEN ("SIZE"), { BFD_RELOC_SIZE32,
6701 OPERAND_TYPE_IMM32_64 },
6703 { STRING_COMMA_LEN ("PLTOFF"), { _dummy_first_bfd_reloc_code_real,
6704 BFD_RELOC_X86_64_PLTOFF64 },
6705 OPERAND_TYPE_IMM64 },
6706 { STRING_COMMA_LEN ("PLT"), { BFD_RELOC_386_PLT32,
6707 BFD_RELOC_X86_64_PLT32 },
6708 OPERAND_TYPE_IMM32_32S_DISP32 },
6709 { STRING_COMMA_LEN ("GOTPLT"), { _dummy_first_bfd_reloc_code_real,
6710 BFD_RELOC_X86_64_GOTPLT64 },
6711 OPERAND_TYPE_IMM64_DISP64 },
6712 { STRING_COMMA_LEN ("GOTOFF"), { BFD_RELOC_386_GOTOFF,
6713 BFD_RELOC_X86_64_GOTOFF64 },
6714 OPERAND_TYPE_IMM64_DISP64 },
6715 { STRING_COMMA_LEN ("GOTPCREL"), { _dummy_first_bfd_reloc_code_real,
6716 BFD_RELOC_X86_64_GOTPCREL },
6717 OPERAND_TYPE_IMM32_32S_DISP32 },
6718 { STRING_COMMA_LEN ("TLSGD"), { BFD_RELOC_386_TLS_GD,
6719 BFD_RELOC_X86_64_TLSGD },
6720 OPERAND_TYPE_IMM32_32S_DISP32 },
6721 { STRING_COMMA_LEN ("TLSLDM"), { BFD_RELOC_386_TLS_LDM,
6722 _dummy_first_bfd_reloc_code_real },
6723 OPERAND_TYPE_NONE },
6724 { STRING_COMMA_LEN ("TLSLD"), { _dummy_first_bfd_reloc_code_real,
6725 BFD_RELOC_X86_64_TLSLD },
6726 OPERAND_TYPE_IMM32_32S_DISP32 },
6727 { STRING_COMMA_LEN ("GOTTPOFF"), { BFD_RELOC_386_TLS_IE_32,
6728 BFD_RELOC_X86_64_GOTTPOFF },
6729 OPERAND_TYPE_IMM32_32S_DISP32 },
6730 { STRING_COMMA_LEN ("TPOFF"), { BFD_RELOC_386_TLS_LE_32,
6731 BFD_RELOC_X86_64_TPOFF32 },
6732 OPERAND_TYPE_IMM32_32S_64_DISP32_64 },
6733 { STRING_COMMA_LEN ("NTPOFF"), { BFD_RELOC_386_TLS_LE,
6734 _dummy_first_bfd_reloc_code_real },
6735 OPERAND_TYPE_NONE },
6736 { STRING_COMMA_LEN ("DTPOFF"), { BFD_RELOC_386_TLS_LDO_32,
6737 BFD_RELOC_X86_64_DTPOFF32 },
6738 OPERAND_TYPE_IMM32_32S_64_DISP32_64 },
6739 { STRING_COMMA_LEN ("GOTNTPOFF"),{ BFD_RELOC_386_TLS_GOTIE,
6740 _dummy_first_bfd_reloc_code_real },
6741 OPERAND_TYPE_NONE },
6742 { STRING_COMMA_LEN ("INDNTPOFF"),{ BFD_RELOC_386_TLS_IE,
6743 _dummy_first_bfd_reloc_code_real },
6744 OPERAND_TYPE_NONE },
6745 { STRING_COMMA_LEN ("GOT"), { BFD_RELOC_386_GOT32,
6746 BFD_RELOC_X86_64_GOT32 },
6747 OPERAND_TYPE_IMM32_32S_64_DISP32 },
6748 { STRING_COMMA_LEN ("TLSDESC"), { BFD_RELOC_386_TLS_GOTDESC,
6749 BFD_RELOC_X86_64_GOTPC32_TLSDESC },
6750 OPERAND_TYPE_IMM32_32S_DISP32 },
6751 { STRING_COMMA_LEN ("TLSCALL"), { BFD_RELOC_386_TLS_DESC_CALL,
6752 BFD_RELOC_X86_64_TLSDESC_CALL },
6753 OPERAND_TYPE_IMM32_32S_DISP32 },
6758 #if defined (OBJ_MAYBE_ELF)
6763 for (cp = input_line_pointer; *cp != '@'; cp++)
6764 if (is_end_of_line[(unsigned char) *cp] || *cp == ',')
6767 for (j = 0; j < ARRAY_SIZE (gotrel); j++)
6769 int len = gotrel[j].len;
6770 if (strncasecmp (cp + 1, gotrel[j].str, len) == 0)
6772 if (gotrel[j].rel[object_64bit] != 0)
6775 char *tmpbuf, *past_reloc;
6777 *rel = gotrel[j].rel[object_64bit];
6781 if (flag_code != CODE_64BIT)
6783 types->bitfield.imm32 = 1;
6784 types->bitfield.disp32 = 1;
6787 *types = gotrel[j].types64;
6790 if (j != 0 && GOT_symbol == NULL)
6791 GOT_symbol = symbol_find_or_make (GLOBAL_OFFSET_TABLE_NAME);
6793 /* The length of the first part of our input line. */
6794 first = cp - input_line_pointer;
6796 /* The second part goes from after the reloc token until
6797 (and including) an end_of_line char or comma. */
6798 past_reloc = cp + 1 + len;
6800 while (!is_end_of_line[(unsigned char) *cp] && *cp != ',')
6802 second = cp + 1 - past_reloc;
6804 /* Allocate and copy string. The trailing NUL shouldn't
6805 be necessary, but be safe. */
6806 tmpbuf = (char *) xmalloc (first + second + 2);
6807 memcpy (tmpbuf, input_line_pointer, first);
6808 if (second != 0 && *past_reloc != ' ')
6809 /* Replace the relocation token with ' ', so that
6810 errors like foo@GOTOFF1 will be detected. */
6811 tmpbuf[first++] = ' ';
6813 /* Increment length by 1 if the relocation token is
6818 memcpy (tmpbuf + first, past_reloc, second);
6819 tmpbuf[first + second] = '\0';
6823 as_bad (_("@%s reloc is not supported with %d-bit output format"),
6824 gotrel[j].str, 1 << (5 + object_64bit));
6829 /* Might be a symbol version string. Don't as_bad here. */
6838 /* Parse operands of the form
6839 <symbol>@SECREL32+<nnn>
6841 If we find one, set up the correct relocation in RELOC and copy the
6842 input string, minus the `@SECREL32' into a malloc'd buffer for
6843 parsing by the calling routine. Return this buffer, and if ADJUST
6844 is non-null set it to the length of the string we removed from the
6845 input line. Otherwise return NULL.
6847 This function is copied from the ELF version above adjusted for PE targets. */
6850 lex_got (enum bfd_reloc_code_real *rel ATTRIBUTE_UNUSED,
6851 int *adjust ATTRIBUTE_UNUSED,
6852 i386_operand_type *types ATTRIBUTE_UNUSED)
6858 const enum bfd_reloc_code_real rel[2];
6859 const i386_operand_type types64;
6863 { STRING_COMMA_LEN ("SECREL32"), { BFD_RELOC_32_SECREL,
6864 BFD_RELOC_32_SECREL },
6865 OPERAND_TYPE_IMM32_32S_64_DISP32_64 },
6871 for (cp = input_line_pointer; *cp != '@'; cp++)
6872 if (is_end_of_line[(unsigned char) *cp] || *cp == ',')
6875 for (j = 0; j < ARRAY_SIZE (gotrel); j++)
6877 int len = gotrel[j].len;
6879 if (strncasecmp (cp + 1, gotrel[j].str, len) == 0)
6881 if (gotrel[j].rel[object_64bit] != 0)
6884 char *tmpbuf, *past_reloc;
6886 *rel = gotrel[j].rel[object_64bit];
6892 if (flag_code != CODE_64BIT)
6894 types->bitfield.imm32 = 1;
6895 types->bitfield.disp32 = 1;
6898 *types = gotrel[j].types64;
6901 /* The length of the first part of our input line. */
6902 first = cp - input_line_pointer;
6904 /* The second part goes from after the reloc token until
6905 (and including) an end_of_line char or comma. */
6906 past_reloc = cp + 1 + len;
6908 while (!is_end_of_line[(unsigned char) *cp] && *cp != ',')
6910 second = cp + 1 - past_reloc;
6912 /* Allocate and copy string. The trailing NUL shouldn't
6913 be necessary, but be safe. */
6914 tmpbuf = (char *) xmalloc (first + second + 2);
6915 memcpy (tmpbuf, input_line_pointer, first);
6916 if (second != 0 && *past_reloc != ' ')
6917 /* Replace the relocation token with ' ', so that
6918 errors like foo@SECLREL321 will be detected. */
6919 tmpbuf[first++] = ' ';
6920 memcpy (tmpbuf + first, past_reloc, second);
6921 tmpbuf[first + second] = '\0';
6925 as_bad (_("@%s reloc is not supported with %d-bit output format"),
6926 gotrel[j].str, 1 << (5 + object_64bit));
6931 /* Might be a symbol version string. Don't as_bad here. */
6938 x86_cons (expressionS *exp, int size)
6940 intel_syntax = -intel_syntax;
6943 if (size == 4 || (object_64bit && size == 8))
6945 /* Handle @GOTOFF and the like in an expression. */
6947 char *gotfree_input_line;
6950 save = input_line_pointer;
6951 gotfree_input_line = lex_got (&got_reloc, &adjust, NULL);
6952 if (gotfree_input_line)
6953 input_line_pointer = gotfree_input_line;
6957 if (gotfree_input_line)
6959 /* expression () has merrily parsed up to the end of line,
6960 or a comma - in the wrong buffer. Transfer how far
6961 input_line_pointer has moved to the right buffer. */
6962 input_line_pointer = (save
6963 + (input_line_pointer - gotfree_input_line)
6965 free (gotfree_input_line);
6966 if (exp->X_op == O_constant
6967 || exp->X_op == O_absent
6968 || exp->X_op == O_illegal
6969 || exp->X_op == O_register
6970 || exp->X_op == O_big)
6972 char c = *input_line_pointer;
6973 *input_line_pointer = 0;
6974 as_bad (_("missing or invalid expression `%s'"), save);
6975 *input_line_pointer = c;
6982 intel_syntax = -intel_syntax;
6985 i386_intel_simplify (exp);
6989 signed_cons (int size)
6991 if (flag_code == CODE_64BIT)
6999 pe_directive_secrel (int dummy ATTRIBUTE_UNUSED)
7006 if (exp.X_op == O_symbol)
7007 exp.X_op = O_secrel;
7009 emit_expr (&exp, 4);
7011 while (*input_line_pointer++ == ',');
7013 input_line_pointer--;
7014 demand_empty_rest_of_line ();
7019 i386_immediate (char *imm_start)
7021 char *save_input_line_pointer;
7022 char *gotfree_input_line;
7025 i386_operand_type types;
7027 operand_type_set (&types, ~0);
7029 if (i.imm_operands == MAX_IMMEDIATE_OPERANDS)
7031 as_bad (_("at most %d immediate operands are allowed"),
7032 MAX_IMMEDIATE_OPERANDS);
7036 exp = &im_expressions[i.imm_operands++];
7037 i.op[this_operand].imms = exp;
7039 if (is_space_char (*imm_start))
7042 save_input_line_pointer = input_line_pointer;
7043 input_line_pointer = imm_start;
7045 gotfree_input_line = lex_got (&i.reloc[this_operand], NULL, &types);
7046 if (gotfree_input_line)
7047 input_line_pointer = gotfree_input_line;
7049 exp_seg = expression (exp);
7052 if (*input_line_pointer)
7053 as_bad (_("junk `%s' after expression"), input_line_pointer);
7055 input_line_pointer = save_input_line_pointer;
7056 if (gotfree_input_line)
7058 free (gotfree_input_line);
7060 if (exp->X_op == O_constant || exp->X_op == O_register)
7061 exp->X_op = O_illegal;
7064 return i386_finalize_immediate (exp_seg, exp, types, imm_start);
7068 i386_finalize_immediate (segT exp_seg ATTRIBUTE_UNUSED, expressionS *exp,
7069 i386_operand_type types, const char *imm_start)
7071 if (exp->X_op == O_absent || exp->X_op == O_illegal || exp->X_op == O_big)
7074 as_bad (_("missing or invalid immediate expression `%s'"),
7078 else if (exp->X_op == O_constant)
7080 /* Size it properly later. */
7081 i.types[this_operand].bitfield.imm64 = 1;
7082 /* If not 64bit, sign extend val. */
7083 if (flag_code != CODE_64BIT
7084 && (exp->X_add_number & ~(((addressT) 2 << 31) - 1)) == 0)
7086 = (exp->X_add_number ^ ((addressT) 1 << 31)) - ((addressT) 1 << 31);
7088 #if (defined (OBJ_AOUT) || defined (OBJ_MAYBE_AOUT))
7089 else if (OUTPUT_FLAVOR == bfd_target_aout_flavour
7090 && exp_seg != absolute_section
7091 && exp_seg != text_section
7092 && exp_seg != data_section
7093 && exp_seg != bss_section
7094 && exp_seg != undefined_section
7095 && !bfd_is_com_section (exp_seg))
7097 as_bad (_("unimplemented segment %s in operand"), exp_seg->name);
7101 else if (!intel_syntax && exp->X_op == O_register)
7104 as_bad (_("illegal immediate register operand %s"), imm_start);
7109 /* This is an address. The size of the address will be
7110 determined later, depending on destination register,
7111 suffix, or the default for the section. */
7112 i.types[this_operand].bitfield.imm8 = 1;
7113 i.types[this_operand].bitfield.imm16 = 1;
7114 i.types[this_operand].bitfield.imm32 = 1;
7115 i.types[this_operand].bitfield.imm32s = 1;
7116 i.types[this_operand].bitfield.imm64 = 1;
7117 i.types[this_operand] = operand_type_and (i.types[this_operand],
7125 i386_scale (char *scale)
7128 char *save = input_line_pointer;
7130 input_line_pointer = scale;
7131 val = get_absolute_expression ();
7136 i.log2_scale_factor = 0;
7139 i.log2_scale_factor = 1;
7142 i.log2_scale_factor = 2;
7145 i.log2_scale_factor = 3;
7149 char sep = *input_line_pointer;
7151 *input_line_pointer = '\0';
7152 as_bad (_("expecting scale factor of 1, 2, 4, or 8: got `%s'"),
7154 *input_line_pointer = sep;
7155 input_line_pointer = save;
7159 if (i.log2_scale_factor != 0 && i.index_reg == 0)
7161 as_warn (_("scale factor of %d without an index register"),
7162 1 << i.log2_scale_factor);
7163 i.log2_scale_factor = 0;
7165 scale = input_line_pointer;
7166 input_line_pointer = save;
7171 i386_displacement (char *disp_start, char *disp_end)
7175 char *save_input_line_pointer;
7176 char *gotfree_input_line;
7178 i386_operand_type bigdisp, types = anydisp;
7181 if (i.disp_operands == MAX_MEMORY_OPERANDS)
7183 as_bad (_("at most %d displacement operands are allowed"),
7184 MAX_MEMORY_OPERANDS);
7188 operand_type_set (&bigdisp, 0);
7189 if ((i.types[this_operand].bitfield.jumpabsolute)
7190 || (!current_templates->start->opcode_modifier.jump
7191 && !current_templates->start->opcode_modifier.jumpdword))
7193 bigdisp.bitfield.disp32 = 1;
7194 override = (i.prefix[ADDR_PREFIX] != 0);
7195 if (flag_code == CODE_64BIT)
7199 bigdisp.bitfield.disp32s = 1;
7200 bigdisp.bitfield.disp64 = 1;
7203 else if ((flag_code == CODE_16BIT) ^ override)
7205 bigdisp.bitfield.disp32 = 0;
7206 bigdisp.bitfield.disp16 = 1;
7211 /* For PC-relative branches, the width of the displacement
7212 is dependent upon data size, not address size. */
7213 override = (i.prefix[DATA_PREFIX] != 0);
7214 if (flag_code == CODE_64BIT)
7216 if (override || i.suffix == WORD_MNEM_SUFFIX)
7217 bigdisp.bitfield.disp16 = 1;
7220 bigdisp.bitfield.disp32 = 1;
7221 bigdisp.bitfield.disp32s = 1;
7227 override = (i.suffix == (flag_code != CODE_16BIT
7229 : LONG_MNEM_SUFFIX));
7230 bigdisp.bitfield.disp32 = 1;
7231 if ((flag_code == CODE_16BIT) ^ override)
7233 bigdisp.bitfield.disp32 = 0;
7234 bigdisp.bitfield.disp16 = 1;
7238 i.types[this_operand] = operand_type_or (i.types[this_operand],
7241 exp = &disp_expressions[i.disp_operands];
7242 i.op[this_operand].disps = exp;
7244 save_input_line_pointer = input_line_pointer;
7245 input_line_pointer = disp_start;
7246 END_STRING_AND_SAVE (disp_end);
7248 #ifndef GCC_ASM_O_HACK
7249 #define GCC_ASM_O_HACK 0
7252 END_STRING_AND_SAVE (disp_end + 1);
7253 if (i.types[this_operand].bitfield.baseIndex
7254 && displacement_string_end[-1] == '+')
7256 /* This hack is to avoid a warning when using the "o"
7257 constraint within gcc asm statements.
7260 #define _set_tssldt_desc(n,addr,limit,type) \
7261 __asm__ __volatile__ ( \
7263 "movw %w1,2+%0\n\t" \
7265 "movb %b1,4+%0\n\t" \
7266 "movb %4,5+%0\n\t" \
7267 "movb $0,6+%0\n\t" \
7268 "movb %h1,7+%0\n\t" \
7270 : "=o"(*(n)) : "q" (addr), "ri"(limit), "i"(type))
7272 This works great except that the output assembler ends
7273 up looking a bit weird if it turns out that there is
7274 no offset. You end up producing code that looks like:
7287 So here we provide the missing zero. */
7289 *displacement_string_end = '0';
7292 gotfree_input_line = lex_got (&i.reloc[this_operand], NULL, &types);
7293 if (gotfree_input_line)
7294 input_line_pointer = gotfree_input_line;
7296 exp_seg = expression (exp);
7299 if (*input_line_pointer)
7300 as_bad (_("junk `%s' after expression"), input_line_pointer);
7302 RESTORE_END_STRING (disp_end + 1);
7304 input_line_pointer = save_input_line_pointer;
7305 if (gotfree_input_line)
7307 free (gotfree_input_line);
7309 if (exp->X_op == O_constant || exp->X_op == O_register)
7310 exp->X_op = O_illegal;
7313 ret = i386_finalize_displacement (exp_seg, exp, types, disp_start);
7315 RESTORE_END_STRING (disp_end);
7321 i386_finalize_displacement (segT exp_seg ATTRIBUTE_UNUSED, expressionS *exp,
7322 i386_operand_type types, const char *disp_start)
7324 i386_operand_type bigdisp;
7327 /* We do this to make sure that the section symbol is in
7328 the symbol table. We will ultimately change the relocation
7329 to be relative to the beginning of the section. */
7330 if (i.reloc[this_operand] == BFD_RELOC_386_GOTOFF
7331 || i.reloc[this_operand] == BFD_RELOC_X86_64_GOTPCREL
7332 || i.reloc[this_operand] == BFD_RELOC_X86_64_GOTOFF64)
7334 if (exp->X_op != O_symbol)
7337 if (S_IS_LOCAL (exp->X_add_symbol)
7338 && S_GET_SEGMENT (exp->X_add_symbol) != undefined_section
7339 && S_GET_SEGMENT (exp->X_add_symbol) != expr_section)
7340 section_symbol (S_GET_SEGMENT (exp->X_add_symbol));
7341 exp->X_op = O_subtract;
7342 exp->X_op_symbol = GOT_symbol;
7343 if (i.reloc[this_operand] == BFD_RELOC_X86_64_GOTPCREL)
7344 i.reloc[this_operand] = BFD_RELOC_32_PCREL;
7345 else if (i.reloc[this_operand] == BFD_RELOC_X86_64_GOTOFF64)
7346 i.reloc[this_operand] = BFD_RELOC_64;
7348 i.reloc[this_operand] = BFD_RELOC_32;
7351 else if (exp->X_op == O_absent
7352 || exp->X_op == O_illegal
7353 || exp->X_op == O_big)
7356 as_bad (_("missing or invalid displacement expression `%s'"),
7361 else if (flag_code == CODE_64BIT
7362 && !i.prefix[ADDR_PREFIX]
7363 && exp->X_op == O_constant)
7365 /* Since displacement is signed extended to 64bit, don't allow
7366 disp32 and turn off disp32s if they are out of range. */
7367 i.types[this_operand].bitfield.disp32 = 0;
7368 if (!fits_in_signed_long (exp->X_add_number))
7370 i.types[this_operand].bitfield.disp32s = 0;
7371 if (i.types[this_operand].bitfield.baseindex)
7373 as_bad (_("0x%lx out range of signed 32bit displacement"),
7374 (long) exp->X_add_number);
7380 #if (defined (OBJ_AOUT) || defined (OBJ_MAYBE_AOUT))
7381 else if (exp->X_op != O_constant
7382 && OUTPUT_FLAVOR == bfd_target_aout_flavour
7383 && exp_seg != absolute_section
7384 && exp_seg != text_section
7385 && exp_seg != data_section
7386 && exp_seg != bss_section
7387 && exp_seg != undefined_section
7388 && !bfd_is_com_section (exp_seg))
7390 as_bad (_("unimplemented segment %s in operand"), exp_seg->name);
7395 /* Check if this is a displacement only operand. */
7396 bigdisp = i.types[this_operand];
7397 bigdisp.bitfield.disp8 = 0;
7398 bigdisp.bitfield.disp16 = 0;
7399 bigdisp.bitfield.disp32 = 0;
7400 bigdisp.bitfield.disp32s = 0;
7401 bigdisp.bitfield.disp64 = 0;
7402 if (operand_type_all_zero (&bigdisp))
7403 i.types[this_operand] = operand_type_and (i.types[this_operand],
7409 /* Make sure the memory operand we've been dealt is valid.
7410 Return 1 on success, 0 on a failure. */
7413 i386_index_check (const char *operand_string)
7415 const char *kind = "base/index";
7416 enum flag_code addr_mode;
7418 if (i.prefix[ADDR_PREFIX])
7419 addr_mode = flag_code == CODE_32BIT ? CODE_16BIT : CODE_32BIT;
7422 addr_mode = flag_code;
7424 #if INFER_ADDR_PREFIX
7425 if (i.mem_operands == 0)
7427 /* Infer address prefix from the first memory operand. */
7428 const reg_entry *addr_reg = i.base_reg;
7430 if (addr_reg == NULL)
7431 addr_reg = i.index_reg;
7435 if (addr_reg->reg_num == RegEip
7436 || addr_reg->reg_num == RegEiz
7437 || addr_reg->reg_type.bitfield.reg32)
7438 addr_mode = CODE_32BIT;
7439 else if (flag_code != CODE_64BIT
7440 && addr_reg->reg_type.bitfield.reg16)
7441 addr_mode = CODE_16BIT;
7443 if (addr_mode != flag_code)
7445 i.prefix[ADDR_PREFIX] = ADDR_PREFIX_OPCODE;
7447 /* Change the size of any displacement too. At most one
7448 of Disp16 or Disp32 is set.
7449 FIXME. There doesn't seem to be any real need for
7450 separate Disp16 and Disp32 flags. The same goes for
7451 Imm16 and Imm32. Removing them would probably clean
7452 up the code quite a lot. */
7453 if (flag_code != CODE_64BIT
7454 && (i.types[this_operand].bitfield.disp16
7455 || i.types[this_operand].bitfield.disp32))
7456 i.types[this_operand]
7457 = operand_type_xor (i.types[this_operand], disp16_32);
7464 if (current_templates->start->opcode_modifier.isstring
7465 && !current_templates->start->opcode_modifier.immext
7466 && (current_templates->end[-1].opcode_modifier.isstring
7469 /* Memory operands of string insns are special in that they only allow
7470 a single register (rDI, rSI, or rBX) as their memory address. */
7471 const reg_entry *expected_reg;
7472 static const char *di_si[][2] =
7478 static const char *bx[] = { "ebx", "bx", "rbx" };
7480 kind = "string address";
7482 if (current_templates->start->opcode_modifier.w)
7484 i386_operand_type type = current_templates->end[-1].operand_types[0];
7486 if (!type.bitfield.baseindex
7487 || ((!i.mem_operands != !intel_syntax)
7488 && current_templates->end[-1].operand_types[1]
7489 .bitfield.baseindex))
7490 type = current_templates->end[-1].operand_types[1];
7491 expected_reg = hash_find (reg_hash,
7492 di_si[addr_mode][type.bitfield.esseg]);
7496 expected_reg = hash_find (reg_hash, bx[addr_mode]);
7498 if (i.base_reg != expected_reg
7500 || operand_type_check (i.types[this_operand], disp))
7502 /* The second memory operand must have the same size as
7506 && !((addr_mode == CODE_64BIT
7507 && i.base_reg->reg_type.bitfield.reg64)
7508 || (addr_mode == CODE_32BIT
7509 ? i.base_reg->reg_type.bitfield.reg32
7510 : i.base_reg->reg_type.bitfield.reg16)))
7513 as_warn (_("`%s' is not valid here (expected `%c%s%s%c')"),
7515 intel_syntax ? '[' : '(',
7517 expected_reg->reg_name,
7518 intel_syntax ? ']' : ')');
7525 as_bad (_("`%s' is not a valid %s expression"),
7526 operand_string, kind);
7531 if (addr_mode != CODE_16BIT)
7533 /* 32-bit/64-bit checks. */
7535 && (addr_mode == CODE_64BIT
7536 ? !i.base_reg->reg_type.bitfield.reg64
7537 : !i.base_reg->reg_type.bitfield.reg32)
7539 || (i.base_reg->reg_num
7540 != (addr_mode == CODE_64BIT ? RegRip : RegEip))))
7542 && !i.index_reg->reg_type.bitfield.regxmm
7543 && !i.index_reg->reg_type.bitfield.regymm
7544 && ((addr_mode == CODE_64BIT
7545 ? !(i.index_reg->reg_type.bitfield.reg64
7546 || i.index_reg->reg_num == RegRiz)
7547 : !(i.index_reg->reg_type.bitfield.reg32
7548 || i.index_reg->reg_num == RegEiz))
7549 || !i.index_reg->reg_type.bitfield.baseindex)))
7554 /* 16-bit checks. */
7556 && (!i.base_reg->reg_type.bitfield.reg16
7557 || !i.base_reg->reg_type.bitfield.baseindex))
7559 && (!i.index_reg->reg_type.bitfield.reg16
7560 || !i.index_reg->reg_type.bitfield.baseindex
7562 && i.base_reg->reg_num < 6
7563 && i.index_reg->reg_num >= 6
7564 && i.log2_scale_factor == 0))))
7571 /* Parse OPERAND_STRING into the i386_insn structure I. Returns zero
7575 i386_att_operand (char *operand_string)
7579 char *op_string = operand_string;
7581 if (is_space_char (*op_string))
7584 /* We check for an absolute prefix (differentiating,
7585 for example, 'jmp pc_relative_label' from 'jmp *absolute_label'. */
7586 if (*op_string == ABSOLUTE_PREFIX)
7589 if (is_space_char (*op_string))
7591 i.types[this_operand].bitfield.jumpabsolute = 1;
7594 /* Check if operand is a register. */
7595 if ((r = parse_register (op_string, &end_op)) != NULL)
7597 i386_operand_type temp;
7599 /* Check for a segment override by searching for ':' after a
7600 segment register. */
7602 if (is_space_char (*op_string))
7604 if (*op_string == ':'
7605 && (r->reg_type.bitfield.sreg2
7606 || r->reg_type.bitfield.sreg3))
7611 i.seg[i.mem_operands] = &es;
7614 i.seg[i.mem_operands] = &cs;
7617 i.seg[i.mem_operands] = &ss;
7620 i.seg[i.mem_operands] = &ds;
7623 i.seg[i.mem_operands] = &fs;
7626 i.seg[i.mem_operands] = &gs;
7630 /* Skip the ':' and whitespace. */
7632 if (is_space_char (*op_string))
7635 if (!is_digit_char (*op_string)
7636 && !is_identifier_char (*op_string)
7637 && *op_string != '('
7638 && *op_string != ABSOLUTE_PREFIX)
7640 as_bad (_("bad memory operand `%s'"), op_string);
7643 /* Handle case of %es:*foo. */
7644 if (*op_string == ABSOLUTE_PREFIX)
7647 if (is_space_char (*op_string))
7649 i.types[this_operand].bitfield.jumpabsolute = 1;
7651 goto do_memory_reference;
7655 as_bad (_("junk `%s' after register"), op_string);
7659 temp.bitfield.baseindex = 0;
7660 i.types[this_operand] = operand_type_or (i.types[this_operand],
7662 i.types[this_operand].bitfield.unspecified = 0;
7663 i.op[this_operand].regs = r;
7666 else if (*op_string == REGISTER_PREFIX)
7668 as_bad (_("bad register name `%s'"), op_string);
7671 else if (*op_string == IMMEDIATE_PREFIX)
7674 if (i.types[this_operand].bitfield.jumpabsolute)
7676 as_bad (_("immediate operand illegal with absolute jump"));
7679 if (!i386_immediate (op_string))
7682 else if (is_digit_char (*op_string)
7683 || is_identifier_char (*op_string)
7684 || *op_string == '(')
7686 /* This is a memory reference of some sort. */
7689 /* Start and end of displacement string expression (if found). */
7690 char *displacement_string_start;
7691 char *displacement_string_end;
7693 do_memory_reference:
7694 if ((i.mem_operands == 1
7695 && !current_templates->start->opcode_modifier.isstring)
7696 || i.mem_operands == 2)
7698 as_bad (_("too many memory references for `%s'"),
7699 current_templates->start->name);
7703 /* Check for base index form. We detect the base index form by
7704 looking for an ')' at the end of the operand, searching
7705 for the '(' matching it, and finding a REGISTER_PREFIX or ','
7707 base_string = op_string + strlen (op_string);
7710 if (is_space_char (*base_string))
7713 /* If we only have a displacement, set-up for it to be parsed later. */
7714 displacement_string_start = op_string;
7715 displacement_string_end = base_string + 1;
7717 if (*base_string == ')')
7720 unsigned int parens_balanced = 1;
7721 /* We've already checked that the number of left & right ()'s are
7722 equal, so this loop will not be infinite. */
7726 if (*base_string == ')')
7728 if (*base_string == '(')
7731 while (parens_balanced);
7733 temp_string = base_string;
7735 /* Skip past '(' and whitespace. */
7737 if (is_space_char (*base_string))
7740 if (*base_string == ','
7741 || ((i.base_reg = parse_register (base_string, &end_op))
7744 displacement_string_end = temp_string;
7746 i.types[this_operand].bitfield.baseindex = 1;
7750 base_string = end_op;
7751 if (is_space_char (*base_string))
7755 /* There may be an index reg or scale factor here. */
7756 if (*base_string == ',')
7759 if (is_space_char (*base_string))
7762 if ((i.index_reg = parse_register (base_string, &end_op))
7765 base_string = end_op;
7766 if (is_space_char (*base_string))
7768 if (*base_string == ',')
7771 if (is_space_char (*base_string))
7774 else if (*base_string != ')')
7776 as_bad (_("expecting `,' or `)' "
7777 "after index register in `%s'"),
7782 else if (*base_string == REGISTER_PREFIX)
7784 end_op = strchr (base_string, ',');
7787 as_bad (_("bad register name `%s'"), base_string);
7791 /* Check for scale factor. */
7792 if (*base_string != ')')
7794 char *end_scale = i386_scale (base_string);
7799 base_string = end_scale;
7800 if (is_space_char (*base_string))
7802 if (*base_string != ')')
7804 as_bad (_("expecting `)' "
7805 "after scale factor in `%s'"),
7810 else if (!i.index_reg)
7812 as_bad (_("expecting index register or scale factor "
7813 "after `,'; got '%c'"),
7818 else if (*base_string != ')')
7820 as_bad (_("expecting `,' or `)' "
7821 "after base register in `%s'"),
7826 else if (*base_string == REGISTER_PREFIX)
7828 end_op = strchr (base_string, ',');
7831 as_bad (_("bad register name `%s'"), base_string);
7836 /* If there's an expression beginning the operand, parse it,
7837 assuming displacement_string_start and
7838 displacement_string_end are meaningful. */
7839 if (displacement_string_start != displacement_string_end)
7841 if (!i386_displacement (displacement_string_start,
7842 displacement_string_end))
7846 /* Special case for (%dx) while doing input/output op. */
7848 && operand_type_equal (&i.base_reg->reg_type,
7849 ®16_inoutportreg)
7851 && i.log2_scale_factor == 0
7852 && i.seg[i.mem_operands] == 0
7853 && !operand_type_check (i.types[this_operand], disp))
7855 i.types[this_operand] = inoutportreg;
7859 if (i386_index_check (operand_string) == 0)
7861 i.types[this_operand].bitfield.mem = 1;
7866 /* It's not a memory operand; argh! */
7867 as_bad (_("invalid char %s beginning operand %d `%s'"),
7868 output_invalid (*op_string),
7873 return 1; /* Normal return. */
7876 /* Calculate the maximum variable size (i.e., excluding fr_fix)
7877 that an rs_machine_dependent frag may reach. */
7880 i386_frag_max_var (fragS *frag)
7882 /* The only relaxable frags are for jumps.
7883 Unconditional jumps can grow by 4 bytes and others by 5 bytes. */
7884 gas_assert (frag->fr_type == rs_machine_dependent);
7885 return TYPE_FROM_RELAX_STATE (frag->fr_subtype) == UNCOND_JUMP ? 4 : 5;
7888 /* md_estimate_size_before_relax()
7890 Called just before relax() for rs_machine_dependent frags. The x86
7891 assembler uses these frags to handle variable size jump
7894 Any symbol that is now undefined will not become defined.
7895 Return the correct fr_subtype in the frag.
7896 Return the initial "guess for variable size of frag" to caller.
7897 The guess is actually the growth beyond the fixed part. Whatever
7898 we do to grow the fixed or variable part contributes to our
7902 md_estimate_size_before_relax (fragS *fragP, segT segment)
7904 /* We've already got fragP->fr_subtype right; all we have to do is
7905 check for un-relaxable symbols. On an ELF system, we can't relax
7906 an externally visible symbol, because it may be overridden by a
7908 if (S_GET_SEGMENT (fragP->fr_symbol) != segment
7909 #if defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF)
7911 && (S_IS_EXTERNAL (fragP->fr_symbol)
7912 || S_IS_WEAK (fragP->fr_symbol)
7913 || ((symbol_get_bfdsym (fragP->fr_symbol)->flags
7914 & BSF_GNU_INDIRECT_FUNCTION))))
7916 #if defined (OBJ_COFF) && defined (TE_PE)
7917 || (OUTPUT_FLAVOR == bfd_target_coff_flavour
7918 && S_IS_WEAK (fragP->fr_symbol))
7922 /* Symbol is undefined in this segment, or we need to keep a
7923 reloc so that weak symbols can be overridden. */
7924 int size = (fragP->fr_subtype & CODE16) ? 2 : 4;
7925 enum bfd_reloc_code_real reloc_type;
7926 unsigned char *opcode;
7929 if (fragP->fr_var != NO_RELOC)
7930 reloc_type = (enum bfd_reloc_code_real) fragP->fr_var;
7932 reloc_type = BFD_RELOC_16_PCREL;
7934 reloc_type = BFD_RELOC_32_PCREL;
7936 old_fr_fix = fragP->fr_fix;
7937 opcode = (unsigned char *) fragP->fr_opcode;
7939 switch (TYPE_FROM_RELAX_STATE (fragP->fr_subtype))
7942 /* Make jmp (0xeb) a (d)word displacement jump. */
7944 fragP->fr_fix += size;
7945 fix_new (fragP, old_fr_fix, size,
7947 fragP->fr_offset, 1,
7953 && (!no_cond_jump_promotion || fragP->fr_var != NO_RELOC))
7955 /* Negate the condition, and branch past an
7956 unconditional jump. */
7959 /* Insert an unconditional jump. */
7961 /* We added two extra opcode bytes, and have a two byte
7963 fragP->fr_fix += 2 + 2;
7964 fix_new (fragP, old_fr_fix + 2, 2,
7966 fragP->fr_offset, 1,
7973 if (no_cond_jump_promotion && fragP->fr_var == NO_RELOC)
7978 fixP = fix_new (fragP, old_fr_fix, 1,
7980 fragP->fr_offset, 1,
7982 fixP->fx_signed = 1;
7986 /* This changes the byte-displacement jump 0x7N
7987 to the (d)word-displacement jump 0x0f,0x8N. */
7988 opcode[1] = opcode[0] + 0x10;
7989 opcode[0] = TWO_BYTE_OPCODE_ESCAPE;
7990 /* We've added an opcode byte. */
7991 fragP->fr_fix += 1 + size;
7992 fix_new (fragP, old_fr_fix + 1, size,
7994 fragP->fr_offset, 1,
7999 BAD_CASE (fragP->fr_subtype);
8003 return fragP->fr_fix - old_fr_fix;
8006 /* Guess size depending on current relax state. Initially the relax
8007 state will correspond to a short jump and we return 1, because
8008 the variable part of the frag (the branch offset) is one byte
8009 long. However, we can relax a section more than once and in that
8010 case we must either set fr_subtype back to the unrelaxed state,
8011 or return the value for the appropriate branch. */
8012 return md_relax_table[fragP->fr_subtype].rlx_length;
8015 /* Called after relax() is finished.
8017 In: Address of frag.
8018 fr_type == rs_machine_dependent.
8019 fr_subtype is what the address relaxed to.
8021 Out: Any fixSs and constants are set up.
8022 Caller will turn frag into a ".space 0". */
8025 md_convert_frag (bfd *abfd ATTRIBUTE_UNUSED, segT sec ATTRIBUTE_UNUSED,
8028 unsigned char *opcode;
8029 unsigned char *where_to_put_displacement = NULL;
8030 offsetT target_address;
8031 offsetT opcode_address;
8032 unsigned int extension = 0;
8033 offsetT displacement_from_opcode_start;
8035 opcode = (unsigned char *) fragP->fr_opcode;
8037 /* Address we want to reach in file space. */
8038 target_address = S_GET_VALUE (fragP->fr_symbol) + fragP->fr_offset;
8040 /* Address opcode resides at in file space. */
8041 opcode_address = fragP->fr_address + fragP->fr_fix;
8043 /* Displacement from opcode start to fill into instruction. */
8044 displacement_from_opcode_start = target_address - opcode_address;
8046 if ((fragP->fr_subtype & BIG) == 0)
8048 /* Don't have to change opcode. */
8049 extension = 1; /* 1 opcode + 1 displacement */
8050 where_to_put_displacement = &opcode[1];
8054 if (no_cond_jump_promotion
8055 && TYPE_FROM_RELAX_STATE (fragP->fr_subtype) != UNCOND_JUMP)
8056 as_warn_where (fragP->fr_file, fragP->fr_line,
8057 _("long jump required"));
8059 switch (fragP->fr_subtype)
8061 case ENCODE_RELAX_STATE (UNCOND_JUMP, BIG):
8062 extension = 4; /* 1 opcode + 4 displacement */
8064 where_to_put_displacement = &opcode[1];
8067 case ENCODE_RELAX_STATE (UNCOND_JUMP, BIG16):
8068 extension = 2; /* 1 opcode + 2 displacement */
8070 where_to_put_displacement = &opcode[1];
8073 case ENCODE_RELAX_STATE (COND_JUMP, BIG):
8074 case ENCODE_RELAX_STATE (COND_JUMP86, BIG):
8075 extension = 5; /* 2 opcode + 4 displacement */
8076 opcode[1] = opcode[0] + 0x10;
8077 opcode[0] = TWO_BYTE_OPCODE_ESCAPE;
8078 where_to_put_displacement = &opcode[2];
8081 case ENCODE_RELAX_STATE (COND_JUMP, BIG16):
8082 extension = 3; /* 2 opcode + 2 displacement */
8083 opcode[1] = opcode[0] + 0x10;
8084 opcode[0] = TWO_BYTE_OPCODE_ESCAPE;
8085 where_to_put_displacement = &opcode[2];
8088 case ENCODE_RELAX_STATE (COND_JUMP86, BIG16):
8093 where_to_put_displacement = &opcode[3];
8097 BAD_CASE (fragP->fr_subtype);
8102 /* If size if less then four we are sure that the operand fits,
8103 but if it's 4, then it could be that the displacement is larger
8105 if (DISP_SIZE_FROM_RELAX_STATE (fragP->fr_subtype) == 4
8107 && ((addressT) (displacement_from_opcode_start - extension
8108 + ((addressT) 1 << 31))
8109 > (((addressT) 2 << 31) - 1)))
8111 as_bad_where (fragP->fr_file, fragP->fr_line,
8112 _("jump target out of range"));
8113 /* Make us emit 0. */
8114 displacement_from_opcode_start = extension;
8116 /* Now put displacement after opcode. */
8117 md_number_to_chars ((char *) where_to_put_displacement,
8118 (valueT) (displacement_from_opcode_start - extension),
8119 DISP_SIZE_FROM_RELAX_STATE (fragP->fr_subtype));
8120 fragP->fr_fix += extension;
8123 /* Apply a fixup (fixP) to segment data, once it has been determined
8124 by our caller that we have all the info we need to fix it up.
8126 Parameter valP is the pointer to the value of the bits.
8128 On the 386, immediates, displacements, and data pointers are all in
8129 the same (little-endian) format, so we don't need to care about which
8133 md_apply_fix (fixS *fixP, valueT *valP, segT seg ATTRIBUTE_UNUSED)
8135 char *p = fixP->fx_where + fixP->fx_frag->fr_literal;
8136 valueT value = *valP;
8138 #if !defined (TE_Mach)
8141 switch (fixP->fx_r_type)
8147 fixP->fx_r_type = BFD_RELOC_64_PCREL;
8150 case BFD_RELOC_X86_64_32S:
8151 fixP->fx_r_type = BFD_RELOC_32_PCREL;
8154 fixP->fx_r_type = BFD_RELOC_16_PCREL;
8157 fixP->fx_r_type = BFD_RELOC_8_PCREL;
8162 if (fixP->fx_addsy != NULL
8163 && (fixP->fx_r_type == BFD_RELOC_32_PCREL
8164 || fixP->fx_r_type == BFD_RELOC_64_PCREL
8165 || fixP->fx_r_type == BFD_RELOC_16_PCREL
8166 || fixP->fx_r_type == BFD_RELOC_8_PCREL)
8167 && !use_rela_relocations)
8169 /* This is a hack. There should be a better way to handle this.
8170 This covers for the fact that bfd_install_relocation will
8171 subtract the current location (for partial_inplace, PC relative
8172 relocations); see more below. */
8176 || OUTPUT_FLAVOR == bfd_target_coff_flavour
8179 value += fixP->fx_where + fixP->fx_frag->fr_address;
8181 #if defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF)
8184 segT sym_seg = S_GET_SEGMENT (fixP->fx_addsy);
8187 || (symbol_section_p (fixP->fx_addsy)
8188 && sym_seg != absolute_section))
8189 && !generic_force_reloc (fixP))
8191 /* Yes, we add the values in twice. This is because
8192 bfd_install_relocation subtracts them out again. I think
8193 bfd_install_relocation is broken, but I don't dare change
8195 value += fixP->fx_where + fixP->fx_frag->fr_address;
8199 #if defined (OBJ_COFF) && defined (TE_PE)
8200 /* For some reason, the PE format does not store a
8201 section address offset for a PC relative symbol. */
8202 if (S_GET_SEGMENT (fixP->fx_addsy) != seg
8203 || S_IS_WEAK (fixP->fx_addsy))
8204 value += md_pcrel_from (fixP);
8207 #if defined (OBJ_COFF) && defined (TE_PE)
8208 if (fixP->fx_addsy != NULL && S_IS_WEAK (fixP->fx_addsy))
8210 value -= S_GET_VALUE (fixP->fx_addsy);
8214 /* Fix a few things - the dynamic linker expects certain values here,
8215 and we must not disappoint it. */
8216 #if defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF)
8217 if (IS_ELF && fixP->fx_addsy)
8218 switch (fixP->fx_r_type)
8220 case BFD_RELOC_386_PLT32:
8221 case BFD_RELOC_X86_64_PLT32:
8222 /* Make the jump instruction point to the address of the operand. At
8223 runtime we merely add the offset to the actual PLT entry. */
8227 case BFD_RELOC_386_TLS_GD:
8228 case BFD_RELOC_386_TLS_LDM:
8229 case BFD_RELOC_386_TLS_IE_32:
8230 case BFD_RELOC_386_TLS_IE:
8231 case BFD_RELOC_386_TLS_GOTIE:
8232 case BFD_RELOC_386_TLS_GOTDESC:
8233 case BFD_RELOC_X86_64_TLSGD:
8234 case BFD_RELOC_X86_64_TLSLD:
8235 case BFD_RELOC_X86_64_GOTTPOFF:
8236 case BFD_RELOC_X86_64_GOTPC32_TLSDESC:
8237 value = 0; /* Fully resolved at runtime. No addend. */
8239 case BFD_RELOC_386_TLS_LE:
8240 case BFD_RELOC_386_TLS_LDO_32:
8241 case BFD_RELOC_386_TLS_LE_32:
8242 case BFD_RELOC_X86_64_DTPOFF32:
8243 case BFD_RELOC_X86_64_DTPOFF64:
8244 case BFD_RELOC_X86_64_TPOFF32:
8245 case BFD_RELOC_X86_64_TPOFF64:
8246 S_SET_THREAD_LOCAL (fixP->fx_addsy);
8249 case BFD_RELOC_386_TLS_DESC_CALL:
8250 case BFD_RELOC_X86_64_TLSDESC_CALL:
8251 value = 0; /* Fully resolved at runtime. No addend. */
8252 S_SET_THREAD_LOCAL (fixP->fx_addsy);
8256 case BFD_RELOC_386_GOT32:
8257 case BFD_RELOC_X86_64_GOT32:
8258 value = 0; /* Fully resolved at runtime. No addend. */
8261 case BFD_RELOC_VTABLE_INHERIT:
8262 case BFD_RELOC_VTABLE_ENTRY:
8269 #endif /* defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF) */
8271 #endif /* !defined (TE_Mach) */
8273 /* Are we finished with this relocation now? */
8274 if (fixP->fx_addsy == NULL)
8276 #if defined (OBJ_COFF) && defined (TE_PE)
8277 else if (fixP->fx_addsy != NULL && S_IS_WEAK (fixP->fx_addsy))
8280 /* Remember value for tc_gen_reloc. */
8281 fixP->fx_addnumber = value;
8282 /* Clear out the frag for now. */
8286 else if (use_rela_relocations)
8288 fixP->fx_no_overflow = 1;
8289 /* Remember value for tc_gen_reloc. */
8290 fixP->fx_addnumber = value;
8294 md_number_to_chars (p, value, fixP->fx_size);
8298 md_atof (int type, char *litP, int *sizeP)
8300 /* This outputs the LITTLENUMs in REVERSE order;
8301 in accord with the bigendian 386. */
8302 return ieee_md_atof (type, litP, sizeP, FALSE);
8305 static char output_invalid_buf[sizeof (unsigned char) * 2 + 6];
8308 output_invalid (int c)
8311 snprintf (output_invalid_buf, sizeof (output_invalid_buf),
8314 snprintf (output_invalid_buf, sizeof (output_invalid_buf),
8315 "(0x%x)", (unsigned char) c);
8316 return output_invalid_buf;
8319 /* REG_STRING starts *before* REGISTER_PREFIX. */
8321 static const reg_entry *
8322 parse_real_register (char *reg_string, char **end_op)
8324 char *s = reg_string;
8326 char reg_name_given[MAX_REG_NAME_SIZE + 1];
8329 /* Skip possible REGISTER_PREFIX and possible whitespace. */
8330 if (*s == REGISTER_PREFIX)
8333 if (is_space_char (*s))
8337 while ((*p++ = register_chars[(unsigned char) *s]) != '\0')
8339 if (p >= reg_name_given + MAX_REG_NAME_SIZE)
8340 return (const reg_entry *) NULL;
8344 /* For naked regs, make sure that we are not dealing with an identifier.
8345 This prevents confusing an identifier like `eax_var' with register
8347 if (allow_naked_reg && identifier_chars[(unsigned char) *s])
8348 return (const reg_entry *) NULL;
8352 r = (const reg_entry *) hash_find (reg_hash, reg_name_given);
8354 /* Handle floating point regs, allowing spaces in the (i) part. */
8355 if (r == i386_regtab /* %st is first entry of table */)
8357 if (is_space_char (*s))
8362 if (is_space_char (*s))
8364 if (*s >= '0' && *s <= '7')
8368 if (is_space_char (*s))
8373 r = (const reg_entry *) hash_find (reg_hash, "st(0)");
8378 /* We have "%st(" then garbage. */
8379 return (const reg_entry *) NULL;
8383 if (r == NULL || allow_pseudo_reg)
8386 if (operand_type_all_zero (&r->reg_type))
8387 return (const reg_entry *) NULL;
8389 if ((r->reg_type.bitfield.reg32
8390 || r->reg_type.bitfield.sreg3
8391 || r->reg_type.bitfield.control
8392 || r->reg_type.bitfield.debug
8393 || r->reg_type.bitfield.test)
8394 && !cpu_arch_flags.bitfield.cpui386)
8395 return (const reg_entry *) NULL;
8397 if (r->reg_type.bitfield.floatreg
8398 && !cpu_arch_flags.bitfield.cpu8087
8399 && !cpu_arch_flags.bitfield.cpu287
8400 && !cpu_arch_flags.bitfield.cpu387)
8401 return (const reg_entry *) NULL;
8403 if (r->reg_type.bitfield.regmmx && !cpu_arch_flags.bitfield.cpummx)
8404 return (const reg_entry *) NULL;
8406 if (r->reg_type.bitfield.regxmm && !cpu_arch_flags.bitfield.cpusse)
8407 return (const reg_entry *) NULL;
8409 if (r->reg_type.bitfield.regymm && !cpu_arch_flags.bitfield.cpuavx)
8410 return (const reg_entry *) NULL;
8412 /* Don't allow fake index register unless allow_index_reg isn't 0. */
8413 if (!allow_index_reg
8414 && (r->reg_num == RegEiz || r->reg_num == RegRiz))
8415 return (const reg_entry *) NULL;
8417 if (((r->reg_flags & (RegRex64 | RegRex))
8418 || r->reg_type.bitfield.reg64)
8419 && (!cpu_arch_flags.bitfield.cpulm
8420 || !operand_type_equal (&r->reg_type, &control))
8421 && flag_code != CODE_64BIT)
8422 return (const reg_entry *) NULL;
8424 if (r->reg_type.bitfield.sreg3 && r->reg_num == RegFlat && !intel_syntax)
8425 return (const reg_entry *) NULL;
8430 /* REG_STRING starts *before* REGISTER_PREFIX. */
8432 static const reg_entry *
8433 parse_register (char *reg_string, char **end_op)
8437 if (*reg_string == REGISTER_PREFIX || allow_naked_reg)
8438 r = parse_real_register (reg_string, end_op);
8443 char *save = input_line_pointer;
8447 input_line_pointer = reg_string;
8448 c = get_symbol_end ();
8449 symbolP = symbol_find (reg_string);
8450 if (symbolP && S_GET_SEGMENT (symbolP) == reg_section)
8452 const expressionS *e = symbol_get_value_expression (symbolP);
8454 know (e->X_op == O_register);
8455 know (e->X_add_number >= 0
8456 && (valueT) e->X_add_number < i386_regtab_size);
8457 r = i386_regtab + e->X_add_number;
8458 *end_op = input_line_pointer;
8460 *input_line_pointer = c;
8461 input_line_pointer = save;
8467 i386_parse_name (char *name, expressionS *e, char *nextcharP)
8470 char *end = input_line_pointer;
8473 r = parse_register (name, &input_line_pointer);
8474 if (r && end <= input_line_pointer)
8476 *nextcharP = *input_line_pointer;
8477 *input_line_pointer = 0;
8478 e->X_op = O_register;
8479 e->X_add_number = r - i386_regtab;
8482 input_line_pointer = end;
8484 return intel_syntax ? i386_intel_parse_name (name, e) : 0;
8488 md_operand (expressionS *e)
8493 switch (*input_line_pointer)
8495 case REGISTER_PREFIX:
8496 r = parse_real_register (input_line_pointer, &end);
8499 e->X_op = O_register;
8500 e->X_add_number = r - i386_regtab;
8501 input_line_pointer = end;
8506 gas_assert (intel_syntax);
8507 end = input_line_pointer++;
8509 if (*input_line_pointer == ']')
8511 ++input_line_pointer;
8512 e->X_op_symbol = make_expr_symbol (e);
8513 e->X_add_symbol = NULL;
8514 e->X_add_number = 0;
8520 input_line_pointer = end;
8527 #if defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF)
8528 const char *md_shortopts = "kVQ:sqn";
8530 const char *md_shortopts = "qn";
8533 #define OPTION_32 (OPTION_MD_BASE + 0)
8534 #define OPTION_64 (OPTION_MD_BASE + 1)
8535 #define OPTION_DIVIDE (OPTION_MD_BASE + 2)
8536 #define OPTION_MARCH (OPTION_MD_BASE + 3)
8537 #define OPTION_MTUNE (OPTION_MD_BASE + 4)
8538 #define OPTION_MMNEMONIC (OPTION_MD_BASE + 5)
8539 #define OPTION_MSYNTAX (OPTION_MD_BASE + 6)
8540 #define OPTION_MINDEX_REG (OPTION_MD_BASE + 7)
8541 #define OPTION_MNAKED_REG (OPTION_MD_BASE + 8)
8542 #define OPTION_MOLD_GCC (OPTION_MD_BASE + 9)
8543 #define OPTION_MSSE2AVX (OPTION_MD_BASE + 10)
8544 #define OPTION_MSSE_CHECK (OPTION_MD_BASE + 11)
8545 #define OPTION_MOPERAND_CHECK (OPTION_MD_BASE + 12)
8546 #define OPTION_MAVXSCALAR (OPTION_MD_BASE + 13)
8547 #define OPTION_X32 (OPTION_MD_BASE + 14)
8549 struct option md_longopts[] =
8551 {"32", no_argument, NULL, OPTION_32},
8552 #if (defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF) \
8553 || defined (TE_PE) || defined (TE_PEP) || defined (OBJ_MACH_O))
8554 {"64", no_argument, NULL, OPTION_64},
8556 #if defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF)
8557 {"x32", no_argument, NULL, OPTION_X32},
8559 {"divide", no_argument, NULL, OPTION_DIVIDE},
8560 {"march", required_argument, NULL, OPTION_MARCH},
8561 {"mtune", required_argument, NULL, OPTION_MTUNE},
8562 {"mmnemonic", required_argument, NULL, OPTION_MMNEMONIC},
8563 {"msyntax", required_argument, NULL, OPTION_MSYNTAX},
8564 {"mindex-reg", no_argument, NULL, OPTION_MINDEX_REG},
8565 {"mnaked-reg", no_argument, NULL, OPTION_MNAKED_REG},
8566 {"mold-gcc", no_argument, NULL, OPTION_MOLD_GCC},
8567 {"msse2avx", no_argument, NULL, OPTION_MSSE2AVX},
8568 {"msse-check", required_argument, NULL, OPTION_MSSE_CHECK},
8569 {"moperand-check", required_argument, NULL, OPTION_MOPERAND_CHECK},
8570 {"mavxscalar", required_argument, NULL, OPTION_MAVXSCALAR},
8571 {NULL, no_argument, NULL, 0}
8573 size_t md_longopts_size = sizeof (md_longopts);
8576 md_parse_option (int c, char *arg)
8584 optimize_align_code = 0;
8591 #if defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF)
8592 /* -Qy, -Qn: SVR4 arguments controlling whether a .comment section
8593 should be emitted or not. FIXME: Not implemented. */
8597 /* -V: SVR4 argument to print version ID. */
8599 print_version_id ();
8602 /* -k: Ignore for FreeBSD compatibility. */
8607 /* -s: On i386 Solaris, this tells the native assembler to use
8608 .stab instead of .stab.excl. We always use .stab anyhow. */
8611 #if (defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF) \
8612 || defined (TE_PE) || defined (TE_PEP) || defined (OBJ_MACH_O))
8615 const char **list, **l;
8617 list = bfd_target_list ();
8618 for (l = list; *l != NULL; l++)
8619 if (CONST_STRNEQ (*l, "elf64-x86-64")
8620 || strcmp (*l, "coff-x86-64") == 0
8621 || strcmp (*l, "pe-x86-64") == 0
8622 || strcmp (*l, "pei-x86-64") == 0
8623 || strcmp (*l, "mach-o-x86-64") == 0)
8625 default_arch = "x86_64";
8629 as_fatal (_("no compiled in support for x86_64"));
8635 #if defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF)
8639 const char **list, **l;
8641 list = bfd_target_list ();
8642 for (l = list; *l != NULL; l++)
8643 if (CONST_STRNEQ (*l, "elf32-x86-64"))
8645 default_arch = "x86_64:32";
8649 as_fatal (_("no compiled in support for 32bit x86_64"));
8653 as_fatal (_("32bit x86_64 is only supported for ELF"));
8658 default_arch = "i386";
8662 #ifdef SVR4_COMMENT_CHARS
8667 n = (char *) xmalloc (strlen (i386_comment_chars) + 1);
8669 for (s = i386_comment_chars; *s != '\0'; s++)
8673 i386_comment_chars = n;
8679 arch = xstrdup (arg);
8683 as_fatal (_("invalid -march= option: `%s'"), arg);
8684 next = strchr (arch, '+');
8687 for (j = 0; j < ARRAY_SIZE (cpu_arch); j++)
8689 if (strcmp (arch, cpu_arch [j].name) == 0)
8692 if (! cpu_arch[j].flags.bitfield.cpui386)
8695 cpu_arch_name = cpu_arch[j].name;
8696 cpu_sub_arch_name = NULL;
8697 cpu_arch_flags = cpu_arch[j].flags;
8698 cpu_arch_isa = cpu_arch[j].type;
8699 cpu_arch_isa_flags = cpu_arch[j].flags;
8700 if (!cpu_arch_tune_set)
8702 cpu_arch_tune = cpu_arch_isa;
8703 cpu_arch_tune_flags = cpu_arch_isa_flags;
8707 else if (*cpu_arch [j].name == '.'
8708 && strcmp (arch, cpu_arch [j].name + 1) == 0)
8710 /* ISA entension. */
8711 i386_cpu_flags flags;
8713 if (!cpu_arch[j].negated)
8714 flags = cpu_flags_or (cpu_arch_flags,
8717 flags = cpu_flags_and_not (cpu_arch_flags,
8719 if (!cpu_flags_equal (&flags, &cpu_arch_flags))
8721 if (cpu_sub_arch_name)
8723 char *name = cpu_sub_arch_name;
8724 cpu_sub_arch_name = concat (name,
8726 (const char *) NULL);
8730 cpu_sub_arch_name = xstrdup (cpu_arch[j].name);
8731 cpu_arch_flags = flags;
8732 cpu_arch_isa_flags = flags;
8738 if (j >= ARRAY_SIZE (cpu_arch))
8739 as_fatal (_("invalid -march= option: `%s'"), arg);
8743 while (next != NULL );
8748 as_fatal (_("invalid -mtune= option: `%s'"), arg);
8749 for (j = 0; j < ARRAY_SIZE (cpu_arch); j++)
8751 if (strcmp (arg, cpu_arch [j].name) == 0)
8753 cpu_arch_tune_set = 1;
8754 cpu_arch_tune = cpu_arch [j].type;
8755 cpu_arch_tune_flags = cpu_arch[j].flags;
8759 if (j >= ARRAY_SIZE (cpu_arch))
8760 as_fatal (_("invalid -mtune= option: `%s'"), arg);
8763 case OPTION_MMNEMONIC:
8764 if (strcasecmp (arg, "att") == 0)
8766 else if (strcasecmp (arg, "intel") == 0)
8769 as_fatal (_("invalid -mmnemonic= option: `%s'"), arg);
8772 case OPTION_MSYNTAX:
8773 if (strcasecmp (arg, "att") == 0)
8775 else if (strcasecmp (arg, "intel") == 0)
8778 as_fatal (_("invalid -msyntax= option: `%s'"), arg);
8781 case OPTION_MINDEX_REG:
8782 allow_index_reg = 1;
8785 case OPTION_MNAKED_REG:
8786 allow_naked_reg = 1;
8789 case OPTION_MOLD_GCC:
8793 case OPTION_MSSE2AVX:
8797 case OPTION_MSSE_CHECK:
8798 if (strcasecmp (arg, "error") == 0)
8799 sse_check = check_error;
8800 else if (strcasecmp (arg, "warning") == 0)
8801 sse_check = check_warning;
8802 else if (strcasecmp (arg, "none") == 0)
8803 sse_check = check_none;
8805 as_fatal (_("invalid -msse-check= option: `%s'"), arg);
8808 case OPTION_MOPERAND_CHECK:
8809 if (strcasecmp (arg, "error") == 0)
8810 operand_check = check_error;
8811 else if (strcasecmp (arg, "warning") == 0)
8812 operand_check = check_warning;
8813 else if (strcasecmp (arg, "none") == 0)
8814 operand_check = check_none;
8816 as_fatal (_("invalid -moperand-check= option: `%s'"), arg);
8819 case OPTION_MAVXSCALAR:
8820 if (strcasecmp (arg, "128") == 0)
8822 else if (strcasecmp (arg, "256") == 0)
8825 as_fatal (_("invalid -mavxscalar= option: `%s'"), arg);
8834 #define MESSAGE_TEMPLATE \
8838 show_arch (FILE *stream, int ext, int check)
8840 static char message[] = MESSAGE_TEMPLATE;
8841 char *start = message + 27;
8843 int size = sizeof (MESSAGE_TEMPLATE);
8850 left = size - (start - message);
8851 for (j = 0; j < ARRAY_SIZE (cpu_arch); j++)
8853 /* Should it be skipped? */
8854 if (cpu_arch [j].skip)
8857 name = cpu_arch [j].name;
8858 len = cpu_arch [j].len;
8861 /* It is an extension. Skip if we aren't asked to show it. */
8872 /* It is an processor. Skip if we show only extension. */
8875 else if (check && ! cpu_arch[j].flags.bitfield.cpui386)
8877 /* It is an impossible processor - skip. */
8881 /* Reserve 2 spaces for ", " or ",\0" */
8884 /* Check if there is any room. */
8892 p = mempcpy (p, name, len);
8896 /* Output the current message now and start a new one. */
8899 fprintf (stream, "%s\n", message);
8901 left = size - (start - message) - len - 2;
8903 gas_assert (left >= 0);
8905 p = mempcpy (p, name, len);
8910 fprintf (stream, "%s\n", message);
8914 md_show_usage (FILE *stream)
8916 #if defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF)
8917 fprintf (stream, _("\
8919 -V print assembler version number\n\
8922 fprintf (stream, _("\
8923 -n Do not optimize code alignment\n\
8924 -q quieten some warnings\n"));
8925 #if defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF)
8926 fprintf (stream, _("\
8929 #if (defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF) \
8930 || defined (TE_PE) || defined (TE_PEP))
8931 fprintf (stream, _("\
8932 --32/--64/--x32 generate 32bit/64bit/x32 code\n"));
8934 #ifdef SVR4_COMMENT_CHARS
8935 fprintf (stream, _("\
8936 --divide do not treat `/' as a comment character\n"));
8938 fprintf (stream, _("\
8939 --divide ignored\n"));
8941 fprintf (stream, _("\
8942 -march=CPU[,+EXTENSION...]\n\
8943 generate code for CPU and EXTENSION, CPU is one of:\n"));
8944 show_arch (stream, 0, 1);
8945 fprintf (stream, _("\
8946 EXTENSION is combination of:\n"));
8947 show_arch (stream, 1, 0);
8948 fprintf (stream, _("\
8949 -mtune=CPU optimize for CPU, CPU is one of:\n"));
8950 show_arch (stream, 0, 0);
8951 fprintf (stream, _("\
8952 -msse2avx encode SSE instructions with VEX prefix\n"));
8953 fprintf (stream, _("\
8954 -msse-check=[none|error|warning]\n\
8955 check SSE instructions\n"));
8956 fprintf (stream, _("\
8957 -moperand-check=[none|error|warning]\n\
8958 check operand combinations for validity\n"));
8959 fprintf (stream, _("\
8960 -mavxscalar=[128|256] encode scalar AVX instructions with specific vector\n\
8962 fprintf (stream, _("\
8963 -mmnemonic=[att|intel] use AT&T/Intel mnemonic\n"));
8964 fprintf (stream, _("\
8965 -msyntax=[att|intel] use AT&T/Intel syntax\n"));
8966 fprintf (stream, _("\
8967 -mindex-reg support pseudo index registers\n"));
8968 fprintf (stream, _("\
8969 -mnaked-reg don't require `%%' prefix for registers\n"));
8970 fprintf (stream, _("\
8971 -mold-gcc support old (<= 2.8.1) versions of gcc\n"));
8974 #if ((defined (OBJ_MAYBE_COFF) && defined (OBJ_MAYBE_AOUT)) \
8975 || defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF) \
8976 || defined (TE_PE) || defined (TE_PEP) || defined (OBJ_MACH_O))
8978 /* Pick the target format to use. */
8981 i386_target_format (void)
8983 if (!strncmp (default_arch, "x86_64", 6))
8985 update_code_flag (CODE_64BIT, 1);
8986 if (default_arch[6] == '\0')
8987 x86_elf_abi = X86_64_ABI;
8989 x86_elf_abi = X86_64_X32_ABI;
8991 else if (!strcmp (default_arch, "i386"))
8992 update_code_flag (CODE_32BIT, 1);
8994 as_fatal (_("unknown architecture"));
8996 if (cpu_flags_all_zero (&cpu_arch_isa_flags))
8997 cpu_arch_isa_flags = cpu_arch[flag_code == CODE_64BIT].flags;
8998 if (cpu_flags_all_zero (&cpu_arch_tune_flags))
8999 cpu_arch_tune_flags = cpu_arch[flag_code == CODE_64BIT].flags;
9001 switch (OUTPUT_FLAVOR)
9003 #if defined (OBJ_MAYBE_AOUT) || defined (OBJ_AOUT)
9004 case bfd_target_aout_flavour:
9005 return AOUT_TARGET_FORMAT;
9007 #if defined (OBJ_MAYBE_COFF) || defined (OBJ_COFF)
9008 # if defined (TE_PE) || defined (TE_PEP)
9009 case bfd_target_coff_flavour:
9010 return flag_code == CODE_64BIT ? "pe-x86-64" : "pe-i386";
9011 # elif defined (TE_GO32)
9012 case bfd_target_coff_flavour:
9015 case bfd_target_coff_flavour:
9019 #if defined (OBJ_MAYBE_ELF) || defined (OBJ_ELF)
9020 case bfd_target_elf_flavour:
9024 switch (x86_elf_abi)
9027 format = ELF_TARGET_FORMAT;
9030 use_rela_relocations = 1;
9032 format = ELF_TARGET_FORMAT64;
9034 case X86_64_X32_ABI:
9035 use_rela_relocations = 1;
9037 disallow_64bit_reloc = 1;
9038 format = ELF_TARGET_FORMAT32;
9041 if (cpu_arch_isa == PROCESSOR_L1OM)
9043 if (x86_elf_abi != X86_64_ABI)
9044 as_fatal (_("Intel L1OM is 64bit only"));
9045 return ELF_TARGET_L1OM_FORMAT;
9047 if (cpu_arch_isa == PROCESSOR_K1OM)
9049 if (x86_elf_abi != X86_64_ABI)
9050 as_fatal (_("Intel K1OM is 64bit only"));
9051 return ELF_TARGET_K1OM_FORMAT;
9057 #if defined (OBJ_MACH_O)
9058 case bfd_target_mach_o_flavour:
9059 if (flag_code == CODE_64BIT)
9061 use_rela_relocations = 1;
9063 return "mach-o-x86-64";
9066 return "mach-o-i386";
9074 #endif /* OBJ_MAYBE_ more than one */
9076 #if (defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF))
9078 i386_elf_emit_arch_note (void)
9080 if (IS_ELF && cpu_arch_name != NULL)
9083 asection *seg = now_seg;
9084 subsegT subseg = now_subseg;
9085 Elf_Internal_Note i_note;
9086 Elf_External_Note e_note;
9087 asection *note_secp;
9090 /* Create the .note section. */
9091 note_secp = subseg_new (".note", 0);
9092 bfd_set_section_flags (stdoutput,
9094 SEC_HAS_CONTENTS | SEC_READONLY);
9096 /* Process the arch string. */
9097 len = strlen (cpu_arch_name);
9099 i_note.namesz = len + 1;
9101 i_note.type = NT_ARCH;
9102 p = frag_more (sizeof (e_note.namesz));
9103 md_number_to_chars (p, (valueT) i_note.namesz, sizeof (e_note.namesz));
9104 p = frag_more (sizeof (e_note.descsz));
9105 md_number_to_chars (p, (valueT) i_note.descsz, sizeof (e_note.descsz));
9106 p = frag_more (sizeof (e_note.type));
9107 md_number_to_chars (p, (valueT) i_note.type, sizeof (e_note.type));
9108 p = frag_more (len + 1);
9109 strcpy (p, cpu_arch_name);
9111 frag_align (2, 0, 0);
9113 subseg_set (seg, subseg);
9119 md_undefined_symbol (char *name)
9121 if (name[0] == GLOBAL_OFFSET_TABLE_NAME[0]
9122 && name[1] == GLOBAL_OFFSET_TABLE_NAME[1]
9123 && name[2] == GLOBAL_OFFSET_TABLE_NAME[2]
9124 && strcmp (name, GLOBAL_OFFSET_TABLE_NAME) == 0)
9128 if (symbol_find (name))
9129 as_bad (_("GOT already in symbol table"));
9130 GOT_symbol = symbol_new (name, undefined_section,
9131 (valueT) 0, &zero_address_frag);
9138 /* Round up a section size to the appropriate boundary. */
9141 md_section_align (segT segment ATTRIBUTE_UNUSED, valueT size)
9143 #if (defined (OBJ_AOUT) || defined (OBJ_MAYBE_AOUT))
9144 if (OUTPUT_FLAVOR == bfd_target_aout_flavour)
9146 /* For a.out, force the section size to be aligned. If we don't do
9147 this, BFD will align it for us, but it will not write out the
9148 final bytes of the section. This may be a bug in BFD, but it is
9149 easier to fix it here since that is how the other a.out targets
9153 align = bfd_get_section_alignment (stdoutput, segment);
9154 size = ((size + (1 << align) - 1) & ((valueT) -1 << align));
9161 /* On the i386, PC-relative offsets are relative to the start of the
9162 next instruction. That is, the address of the offset, plus its
9163 size, since the offset is always the last part of the insn. */
9166 md_pcrel_from (fixS *fixP)
9168 return fixP->fx_size + fixP->fx_where + fixP->fx_frag->fr_address;
9174 s_bss (int ignore ATTRIBUTE_UNUSED)
9178 #if defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF)
9180 obj_elf_section_change_hook ();
9182 temp = get_absolute_expression ();
9183 subseg_set (bss_section, (subsegT) temp);
9184 demand_empty_rest_of_line ();
9190 i386_validate_fix (fixS *fixp)
9192 if (fixp->fx_subsy && fixp->fx_subsy == GOT_symbol)
9194 if (fixp->fx_r_type == BFD_RELOC_32_PCREL)
9198 fixp->fx_r_type = BFD_RELOC_X86_64_GOTPCREL;
9203 fixp->fx_r_type = BFD_RELOC_386_GOTOFF;
9205 fixp->fx_r_type = BFD_RELOC_X86_64_GOTOFF64;
9212 tc_gen_reloc (asection *section ATTRIBUTE_UNUSED, fixS *fixp)
9215 bfd_reloc_code_real_type code;
9217 switch (fixp->fx_r_type)
9219 #if defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF)
9220 case BFD_RELOC_SIZE32:
9221 case BFD_RELOC_SIZE64:
9222 if (S_IS_DEFINED (fixp->fx_addsy)
9223 && !S_IS_EXTERNAL (fixp->fx_addsy))
9225 /* Resolve size relocation against local symbol to size of
9226 the symbol plus addend. */
9227 valueT value = S_GET_SIZE (fixp->fx_addsy) + fixp->fx_offset;
9228 if (fixp->fx_r_type == BFD_RELOC_SIZE32
9229 && !fits_in_unsigned_long (value))
9230 as_bad_where (fixp->fx_file, fixp->fx_line,
9231 _("symbol size computation overflow"));
9232 fixp->fx_addsy = NULL;
9233 fixp->fx_subsy = NULL;
9234 md_apply_fix (fixp, (valueT *) &value, NULL);
9239 case BFD_RELOC_X86_64_PLT32:
9240 case BFD_RELOC_X86_64_GOT32:
9241 case BFD_RELOC_X86_64_GOTPCREL:
9242 case BFD_RELOC_386_PLT32:
9243 case BFD_RELOC_386_GOT32:
9244 case BFD_RELOC_386_GOTOFF:
9245 case BFD_RELOC_386_GOTPC:
9246 case BFD_RELOC_386_TLS_GD:
9247 case BFD_RELOC_386_TLS_LDM:
9248 case BFD_RELOC_386_TLS_LDO_32:
9249 case BFD_RELOC_386_TLS_IE_32:
9250 case BFD_RELOC_386_TLS_IE:
9251 case BFD_RELOC_386_TLS_GOTIE:
9252 case BFD_RELOC_386_TLS_LE_32:
9253 case BFD_RELOC_386_TLS_LE:
9254 case BFD_RELOC_386_TLS_GOTDESC:
9255 case BFD_RELOC_386_TLS_DESC_CALL:
9256 case BFD_RELOC_X86_64_TLSGD:
9257 case BFD_RELOC_X86_64_TLSLD:
9258 case BFD_RELOC_X86_64_DTPOFF32:
9259 case BFD_RELOC_X86_64_DTPOFF64:
9260 case BFD_RELOC_X86_64_GOTTPOFF:
9261 case BFD_RELOC_X86_64_TPOFF32:
9262 case BFD_RELOC_X86_64_TPOFF64:
9263 case BFD_RELOC_X86_64_GOTOFF64:
9264 case BFD_RELOC_X86_64_GOTPC32:
9265 case BFD_RELOC_X86_64_GOT64:
9266 case BFD_RELOC_X86_64_GOTPCREL64:
9267 case BFD_RELOC_X86_64_GOTPC64:
9268 case BFD_RELOC_X86_64_GOTPLT64:
9269 case BFD_RELOC_X86_64_PLTOFF64:
9270 case BFD_RELOC_X86_64_GOTPC32_TLSDESC:
9271 case BFD_RELOC_X86_64_TLSDESC_CALL:
9273 case BFD_RELOC_VTABLE_ENTRY:
9274 case BFD_RELOC_VTABLE_INHERIT:
9276 case BFD_RELOC_32_SECREL:
9278 code = fixp->fx_r_type;
9280 case BFD_RELOC_X86_64_32S:
9281 if (!fixp->fx_pcrel)
9283 /* Don't turn BFD_RELOC_X86_64_32S into BFD_RELOC_32. */
9284 code = fixp->fx_r_type;
9290 switch (fixp->fx_size)
9293 as_bad_where (fixp->fx_file, fixp->fx_line,
9294 _("can not do %d byte pc-relative relocation"),
9296 code = BFD_RELOC_32_PCREL;
9298 case 1: code = BFD_RELOC_8_PCREL; break;
9299 case 2: code = BFD_RELOC_16_PCREL; break;
9300 case 4: code = BFD_RELOC_32_PCREL; break;
9302 case 8: code = BFD_RELOC_64_PCREL; break;
9308 switch (fixp->fx_size)
9311 as_bad_where (fixp->fx_file, fixp->fx_line,
9312 _("can not do %d byte relocation"),
9314 code = BFD_RELOC_32;
9316 case 1: code = BFD_RELOC_8; break;
9317 case 2: code = BFD_RELOC_16; break;
9318 case 4: code = BFD_RELOC_32; break;
9320 case 8: code = BFD_RELOC_64; break;
9327 if ((code == BFD_RELOC_32
9328 || code == BFD_RELOC_32_PCREL
9329 || code == BFD_RELOC_X86_64_32S)
9331 && fixp->fx_addsy == GOT_symbol)
9334 code = BFD_RELOC_386_GOTPC;
9336 code = BFD_RELOC_X86_64_GOTPC32;
9338 if ((code == BFD_RELOC_64 || code == BFD_RELOC_64_PCREL)
9340 && fixp->fx_addsy == GOT_symbol)
9342 code = BFD_RELOC_X86_64_GOTPC64;
9345 rel = (arelent *) xmalloc (sizeof (arelent));
9346 rel->sym_ptr_ptr = (asymbol **) xmalloc (sizeof (asymbol *));
9347 *rel->sym_ptr_ptr = symbol_get_bfdsym (fixp->fx_addsy);
9349 rel->address = fixp->fx_frag->fr_address + fixp->fx_where;
9351 if (!use_rela_relocations)
9353 /* HACK: Since i386 ELF uses Rel instead of Rela, encode the
9354 vtable entry to be used in the relocation's section offset. */
9355 if (fixp->fx_r_type == BFD_RELOC_VTABLE_ENTRY)
9356 rel->address = fixp->fx_offset;
9357 #if defined (OBJ_COFF) && defined (TE_PE)
9358 else if (fixp->fx_addsy && S_IS_WEAK (fixp->fx_addsy))
9359 rel->addend = fixp->fx_addnumber - (S_GET_VALUE (fixp->fx_addsy) * 2);
9364 /* Use the rela in 64bit mode. */
9367 if (disallow_64bit_reloc)
9370 case BFD_RELOC_X86_64_DTPOFF64:
9371 case BFD_RELOC_X86_64_TPOFF64:
9372 case BFD_RELOC_64_PCREL:
9373 case BFD_RELOC_X86_64_GOTOFF64:
9374 case BFD_RELOC_X86_64_GOT64:
9375 case BFD_RELOC_X86_64_GOTPCREL64:
9376 case BFD_RELOC_X86_64_GOTPC64:
9377 case BFD_RELOC_X86_64_GOTPLT64:
9378 case BFD_RELOC_X86_64_PLTOFF64:
9379 as_bad_where (fixp->fx_file, fixp->fx_line,
9380 _("cannot represent relocation type %s in x32 mode"),
9381 bfd_get_reloc_code_name (code));
9387 if (!fixp->fx_pcrel)
9388 rel->addend = fixp->fx_offset;
9392 case BFD_RELOC_X86_64_PLT32:
9393 case BFD_RELOC_X86_64_GOT32:
9394 case BFD_RELOC_X86_64_GOTPCREL:
9395 case BFD_RELOC_X86_64_TLSGD:
9396 case BFD_RELOC_X86_64_TLSLD:
9397 case BFD_RELOC_X86_64_GOTTPOFF:
9398 case BFD_RELOC_X86_64_GOTPC32_TLSDESC:
9399 case BFD_RELOC_X86_64_TLSDESC_CALL:
9400 rel->addend = fixp->fx_offset - fixp->fx_size;
9403 rel->addend = (section->vma
9405 + fixp->fx_addnumber
9406 + md_pcrel_from (fixp));
9411 rel->howto = bfd_reloc_type_lookup (stdoutput, code);
9412 if (rel->howto == NULL)
9414 as_bad_where (fixp->fx_file, fixp->fx_line,
9415 _("cannot represent relocation type %s"),
9416 bfd_get_reloc_code_name (code));
9417 /* Set howto to a garbage value so that we can keep going. */
9418 rel->howto = bfd_reloc_type_lookup (stdoutput, BFD_RELOC_32);
9419 gas_assert (rel->howto != NULL);
9425 #include "tc-i386-intel.c"
9428 tc_x86_parse_to_dw2regnum (expressionS *exp)
9430 int saved_naked_reg;
9431 char saved_register_dot;
9433 saved_naked_reg = allow_naked_reg;
9434 allow_naked_reg = 1;
9435 saved_register_dot = register_chars['.'];
9436 register_chars['.'] = '.';
9437 allow_pseudo_reg = 1;
9438 expression_and_evaluate (exp);
9439 allow_pseudo_reg = 0;
9440 register_chars['.'] = saved_register_dot;
9441 allow_naked_reg = saved_naked_reg;
9443 if (exp->X_op == O_register && exp->X_add_number >= 0)
9445 if ((addressT) exp->X_add_number < i386_regtab_size)
9447 exp->X_op = O_constant;
9448 exp->X_add_number = i386_regtab[exp->X_add_number]
9449 .dw2_regnum[flag_code >> 1];
9452 exp->X_op = O_illegal;
9457 tc_x86_frame_initial_instructions (void)
9459 static unsigned int sp_regno[2];
9461 if (!sp_regno[flag_code >> 1])
9463 char *saved_input = input_line_pointer;
9464 char sp[][4] = {"esp", "rsp"};
9467 input_line_pointer = sp[flag_code >> 1];
9468 tc_x86_parse_to_dw2regnum (&exp);
9469 gas_assert (exp.X_op == O_constant);
9470 sp_regno[flag_code >> 1] = exp.X_add_number;
9471 input_line_pointer = saved_input;
9474 cfi_add_CFA_def_cfa (sp_regno[flag_code >> 1], -x86_cie_data_alignment);
9475 cfi_add_CFA_offset (x86_dwarf2_return_column, x86_cie_data_alignment);
9479 x86_dwarf2_addr_size (void)
9481 #if defined (OBJ_MAYBE_ELF) || defined (OBJ_ELF)
9482 if (x86_elf_abi == X86_64_X32_ABI)
9485 return bfd_arch_bits_per_address (stdoutput) / 8;
9489 i386_elf_section_type (const char *str, size_t len)
9491 if (flag_code == CODE_64BIT
9492 && len == sizeof ("unwind") - 1
9493 && strncmp (str, "unwind", 6) == 0)
9494 return SHT_X86_64_UNWIND;
9501 i386_solaris_fix_up_eh_frame (segT sec)
9503 if (flag_code == CODE_64BIT)
9504 elf_section_type (sec) = SHT_X86_64_UNWIND;
9510 tc_pe_dwarf2_emit_offset (symbolS *symbol, unsigned int size)
9514 exp.X_op = O_secrel;
9515 exp.X_add_symbol = symbol;
9516 exp.X_add_number = 0;
9517 emit_expr (&exp, size);
9521 #if defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF)
9522 /* For ELF on x86-64, add support for SHF_X86_64_LARGE. */
9525 x86_64_section_letter (int letter, char **ptr_msg)
9527 if (flag_code == CODE_64BIT)
9530 return SHF_X86_64_LARGE;
9532 *ptr_msg = _("bad .section directive: want a,l,w,x,M,S,G,T in string");
9535 *ptr_msg = _("bad .section directive: want a,w,x,M,S,G,T in string");
9540 x86_64_section_word (char *str, size_t len)
9542 if (len == 5 && flag_code == CODE_64BIT && CONST_STRNEQ (str, "large"))
9543 return SHF_X86_64_LARGE;
9549 handle_large_common (int small ATTRIBUTE_UNUSED)
9551 if (flag_code != CODE_64BIT)
9553 s_comm_internal (0, elf_common_parse);
9554 as_warn (_(".largecomm supported only in 64bit mode, producing .comm"));
9558 static segT lbss_section;
9559 asection *saved_com_section_ptr = elf_com_section_ptr;
9560 asection *saved_bss_section = bss_section;
9562 if (lbss_section == NULL)
9564 flagword applicable;
9566 subsegT subseg = now_subseg;
9568 /* The .lbss section is for local .largecomm symbols. */
9569 lbss_section = subseg_new (".lbss", 0);
9570 applicable = bfd_applicable_section_flags (stdoutput);
9571 bfd_set_section_flags (stdoutput, lbss_section,
9572 applicable & SEC_ALLOC);
9573 seg_info (lbss_section)->bss = 1;
9575 subseg_set (seg, subseg);
9578 elf_com_section_ptr = &_bfd_elf_large_com_section;
9579 bss_section = lbss_section;
9581 s_comm_internal (0, elf_common_parse);
9583 elf_com_section_ptr = saved_com_section_ptr;
9584 bss_section = saved_bss_section;
9587 #endif /* OBJ_ELF || OBJ_MAYBE_ELF */