1 /* tc-ia64.c -- Assembler for the HP/Intel IA-64 architecture.
2 Copyright (C) 1998-2014 Free Software Foundation, Inc.
3 Contributed by David Mosberger-Tang <davidm@hpl.hp.com>
5 This file is part of GAS, the GNU Assembler.
7 GAS is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License as published by
9 the Free Software Foundation; either version 3, or (at your option)
12 GAS is distributed in the hope that it will be useful,
13 but WITHOUT ANY WARRANTY; without even the implied warranty of
14 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 GNU General Public License for more details.
17 You should have received a copy of the GNU General Public License
18 along with GAS; see the file COPYING. If not, write to
19 the Free Software Foundation, 51 Franklin Street - Fifth Floor,
20 Boston, MA 02110-1301, USA. */
34 - labels are wrong if automatic alignment is introduced
35 (e.g., checkout the second real10 definition in test-data.s)
37 <reg>.safe_across_calls and any other DV-related directives I don't
38 have documentation for.
39 verify mod-sched-brs reads/writes are checked/marked (and other
45 #include "safe-ctype.h"
46 #include "dwarf2dbg.h"
49 #include "opcode/ia64.h"
59 #define NELEMS(a) ((int) (sizeof (a)/sizeof ((a)[0])))
61 /* Some systems define MIN in, e.g., param.h. */
63 #define MIN(a,b) ((a) < (b) ? (a) : (b))
66 #define PREV_SLOT md.slot[(md.curr_slot + NUM_SLOTS - 1) % NUM_SLOTS]
67 #define CURR_SLOT md.slot[md.curr_slot]
69 #define O_pseudo_fixup (O_max + 1)
73 /* IA-64 ABI section pseudo-ops. */
74 SPECIAL_SECTION_BSS = 0,
76 SPECIAL_SECTION_SDATA,
77 SPECIAL_SECTION_RODATA,
78 SPECIAL_SECTION_COMMENT,
79 SPECIAL_SECTION_UNWIND,
80 SPECIAL_SECTION_UNWIND_INFO,
81 /* HPUX specific section pseudo-ops. */
82 SPECIAL_SECTION_INIT_ARRAY,
83 SPECIAL_SECTION_FINI_ARRAY,
100 FUNC_LT_FPTR_RELATIVE,
102 FUNC_LT_DTP_RELATIVE,
106 FUNC_SLOTCOUNT_RELOC,
113 REG_FR = (REG_GR + 128),
114 REG_AR = (REG_FR + 128),
115 REG_CR = (REG_AR + 128),
116 REG_DAHR = (REG_CR + 128),
117 REG_P = (REG_DAHR + 8),
118 REG_BR = (REG_P + 64),
119 REG_IP = (REG_BR + 8),
126 /* The following are pseudo-registers for use by gas only. */
138 /* The following pseudo-registers are used for unwind directives only: */
146 DYNREG_GR = 0, /* dynamic general purpose register */
147 DYNREG_FR, /* dynamic floating point register */
148 DYNREG_PR, /* dynamic predicate register */
152 enum operand_match_result
155 OPERAND_OUT_OF_RANGE,
159 /* On the ia64, we can't know the address of a text label until the
160 instructions are packed into a bundle. To handle this, we keep
161 track of the list of labels that appear in front of each
165 struct label_fix *next;
167 bfd_boolean dw2_mark_labels;
171 /* An internally used relocation. */
172 #define DUMMY_RELOC_IA64_SLOTCOUNT (BFD_RELOC_UNUSED + 1)
175 /* This is the endianness of the current section. */
176 extern int target_big_endian;
178 /* This is the default endianness. */
179 static int default_big_endian = TARGET_BYTES_BIG_ENDIAN;
181 void (*ia64_number_to_chars) (char *, valueT, int);
183 static void ia64_float_to_chars_bigendian (char *, LITTLENUM_TYPE *, int);
184 static void ia64_float_to_chars_littleendian (char *, LITTLENUM_TYPE *, int);
186 static void (*ia64_float_to_chars) (char *, LITTLENUM_TYPE *, int);
188 static struct hash_control *alias_hash;
189 static struct hash_control *alias_name_hash;
190 static struct hash_control *secalias_hash;
191 static struct hash_control *secalias_name_hash;
193 /* List of chars besides those in app.c:symbol_chars that can start an
194 operand. Used to prevent the scrubber eating vital white-space. */
195 const char ia64_symbol_chars[] = "@?";
197 /* Characters which always start a comment. */
198 const char comment_chars[] = "";
200 /* Characters which start a comment at the beginning of a line. */
201 const char line_comment_chars[] = "#";
203 /* Characters which may be used to separate multiple commands on a
205 const char line_separator_chars[] = ";{}";
207 /* Characters which are used to indicate an exponent in a floating
209 const char EXP_CHARS[] = "eE";
211 /* Characters which mean that a number is a floating point constant,
213 const char FLT_CHARS[] = "rRsSfFdDxXpP";
215 /* ia64-specific option processing: */
217 const char *md_shortopts = "m:N:x::";
219 struct option md_longopts[] =
221 #define OPTION_MCONSTANT_GP (OPTION_MD_BASE + 1)
222 {"mconstant-gp", no_argument, NULL, OPTION_MCONSTANT_GP},
223 #define OPTION_MAUTO_PIC (OPTION_MD_BASE + 2)
224 {"mauto-pic", no_argument, NULL, OPTION_MAUTO_PIC}
227 size_t md_longopts_size = sizeof (md_longopts);
231 struct hash_control *pseudo_hash; /* pseudo opcode hash table */
232 struct hash_control *reg_hash; /* register name hash table */
233 struct hash_control *dynreg_hash; /* dynamic register hash table */
234 struct hash_control *const_hash; /* constant hash table */
235 struct hash_control *entry_hash; /* code entry hint hash table */
237 /* If X_op is != O_absent, the registername for the instruction's
238 qualifying predicate. If NULL, p0 is assumed for instructions
239 that are predictable. */
242 /* Optimize for which CPU. */
249 /* What to do when hint.b is used. */
261 explicit_mode : 1, /* which mode we're in */
262 default_explicit_mode : 1, /* which mode is the default */
263 mode_explicitly_set : 1, /* was the current mode explicitly set? */
265 keep_pending_output : 1;
267 /* What to do when something is wrong with unwind directives. */
270 unwind_check_warning,
274 /* Each bundle consists of up to three instructions. We keep
275 track of four most recent instructions so we can correctly set
276 the end_of_insn_group for the last instruction in a bundle. */
278 int num_slots_in_use;
282 end_of_insn_group : 1,
283 manual_bundling_on : 1,
284 manual_bundling_off : 1,
285 loc_directive_seen : 1;
286 signed char user_template; /* user-selected template, if any */
287 unsigned char qp_regno; /* qualifying predicate */
288 /* This duplicates a good fraction of "struct fix" but we
289 can't use a "struct fix" instead since we can't call
290 fix_new_exp() until we know the address of the instruction. */
294 bfd_reloc_code_real_type code;
295 enum ia64_opnd opnd; /* type of operand in need of fix */
296 unsigned int is_pcrel : 1; /* is operand pc-relative? */
297 expressionS expr; /* the value to be inserted */
299 fixup[2]; /* at most two fixups per insn */
300 struct ia64_opcode *idesc;
301 struct label_fix *label_fixups;
302 struct label_fix *tag_fixups;
303 struct unw_rec_list *unwind_record; /* Unwind directive. */
306 unsigned int src_line;
307 struct dwarf2_line_info debug_line;
315 struct dynreg *next; /* next dynamic register */
317 unsigned short base; /* the base register number */
318 unsigned short num_regs; /* # of registers in this set */
320 *dynreg[DYNREG_NUM_TYPES], in, loc, out, rot;
322 flagword flags; /* ELF-header flags */
325 unsigned hint:1; /* is this hint currently valid? */
326 bfd_vma offset; /* mem.offset offset */
327 bfd_vma base; /* mem.offset base */
330 int path; /* number of alt. entry points seen */
331 const char **entry_labels; /* labels of all alternate paths in
332 the current DV-checking block. */
333 int maxpaths; /* size currently allocated for
336 int pointer_size; /* size in bytes of a pointer */
337 int pointer_size_shift; /* shift size of a pointer for alignment */
339 symbolS *indregsym[IND_RR - IND_CPUID + 1];
343 /* These are not const, because they are modified to MMI for non-itanium1
345 /* MFI bundle of nops. */
346 static unsigned char le_nop[16] =
348 0x0c, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00,
349 0x00, 0x02, 0x00, 0x00, 0x00, 0x00, 0x04, 0x00
351 /* MFI bundle of nops with stop-bit. */
352 static unsigned char le_nop_stop[16] =
354 0x0d, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00,
355 0x00, 0x02, 0x00, 0x00, 0x00, 0x00, 0x04, 0x00
358 /* application registers: */
364 #define AR_BSPSTORE 18
390 {"ar.k0", AR_K0}, {"ar.k1", AR_K0 + 1},
391 {"ar.k2", AR_K0 + 2}, {"ar.k3", AR_K0 + 3},
392 {"ar.k4", AR_K0 + 4}, {"ar.k5", AR_K0 + 5},
393 {"ar.k6", AR_K0 + 6}, {"ar.k7", AR_K7},
394 {"ar.rsc", AR_RSC}, {"ar.bsp", AR_BSP},
395 {"ar.bspstore", AR_BSPSTORE}, {"ar.rnat", AR_RNAT},
396 {"ar.fcr", AR_FCR}, {"ar.eflag", AR_EFLAG},
397 {"ar.csd", AR_CSD}, {"ar.ssd", AR_SSD},
398 {"ar.cflg", AR_CFLG}, {"ar.fsr", AR_FSR},
399 {"ar.fir", AR_FIR}, {"ar.fdr", AR_FDR},
400 {"ar.ccv", AR_CCV}, {"ar.unat", AR_UNAT},
401 {"ar.fpsr", AR_FPSR}, {"ar.itc", AR_ITC},
402 {"ar.ruc", AR_RUC}, {"ar.pfs", AR_PFS},
403 {"ar.lc", AR_LC}, {"ar.ec", AR_EC},
406 /* control registers: */
447 {"cr.gpta", CR_GPTA},
448 {"cr.ipsr", CR_IPSR},
452 {"cr.itir", CR_ITIR},
453 {"cr.iipa", CR_IIPA},
457 {"cr.iib0", CR_IIB0},
458 {"cr.iib1", CR_IIB1},
463 {"cr.irr0", CR_IRR0},
464 {"cr.irr1", CR_IRR0 + 1},
465 {"cr.irr2", CR_IRR0 + 2},
466 {"cr.irr3", CR_IRR3},
469 {"cr.cmcv", CR_CMCV},
470 {"cr.lrr0", CR_LRR0},
479 static const struct const_desc
486 /* PSR constant masks: */
489 {"psr.be", ((valueT) 1) << 1},
490 {"psr.up", ((valueT) 1) << 2},
491 {"psr.ac", ((valueT) 1) << 3},
492 {"psr.mfl", ((valueT) 1) << 4},
493 {"psr.mfh", ((valueT) 1) << 5},
495 {"psr.ic", ((valueT) 1) << 13},
496 {"psr.i", ((valueT) 1) << 14},
497 {"psr.pk", ((valueT) 1) << 15},
499 {"psr.dt", ((valueT) 1) << 17},
500 {"psr.dfl", ((valueT) 1) << 18},
501 {"psr.dfh", ((valueT) 1) << 19},
502 {"psr.sp", ((valueT) 1) << 20},
503 {"psr.pp", ((valueT) 1) << 21},
504 {"psr.di", ((valueT) 1) << 22},
505 {"psr.si", ((valueT) 1) << 23},
506 {"psr.db", ((valueT) 1) << 24},
507 {"psr.lp", ((valueT) 1) << 25},
508 {"psr.tb", ((valueT) 1) << 26},
509 {"psr.rt", ((valueT) 1) << 27},
510 /* 28-31: reserved */
511 /* 32-33: cpl (current privilege level) */
512 {"psr.is", ((valueT) 1) << 34},
513 {"psr.mc", ((valueT) 1) << 35},
514 {"psr.it", ((valueT) 1) << 36},
515 {"psr.id", ((valueT) 1) << 37},
516 {"psr.da", ((valueT) 1) << 38},
517 {"psr.dd", ((valueT) 1) << 39},
518 {"psr.ss", ((valueT) 1) << 40},
519 /* 41-42: ri (restart instruction) */
520 {"psr.ed", ((valueT) 1) << 43},
521 {"psr.bn", ((valueT) 1) << 44},
524 /* indirect register-sets/memory: */
533 { "CPUID", IND_CPUID },
534 { "cpuid", IND_CPUID },
543 { "dahr", IND_DAHR },
547 /* Pseudo functions used to indicate relocation types (these functions
548 start with an at sign (@). */
570 /* reloc pseudo functions (these must come first!): */
571 { "dtpmod", PSEUDO_FUNC_RELOC, { 0 } },
572 { "dtprel", PSEUDO_FUNC_RELOC, { 0 } },
573 { "fptr", PSEUDO_FUNC_RELOC, { 0 } },
574 { "gprel", PSEUDO_FUNC_RELOC, { 0 } },
575 { "ltoff", PSEUDO_FUNC_RELOC, { 0 } },
576 { "ltoffx", PSEUDO_FUNC_RELOC, { 0 } },
577 { "pcrel", PSEUDO_FUNC_RELOC, { 0 } },
578 { "pltoff", PSEUDO_FUNC_RELOC, { 0 } },
579 { "secrel", PSEUDO_FUNC_RELOC, { 0 } },
580 { "segrel", PSEUDO_FUNC_RELOC, { 0 } },
581 { "tprel", PSEUDO_FUNC_RELOC, { 0 } },
582 { "ltv", PSEUDO_FUNC_RELOC, { 0 } },
583 { NULL, 0, { 0 } }, /* placeholder for FUNC_LT_FPTR_RELATIVE */
584 { NULL, 0, { 0 } }, /* placeholder for FUNC_LT_DTP_MODULE */
585 { NULL, 0, { 0 } }, /* placeholder for FUNC_LT_DTP_RELATIVE */
586 { NULL, 0, { 0 } }, /* placeholder for FUNC_LT_TP_RELATIVE */
587 { "iplt", PSEUDO_FUNC_RELOC, { 0 } },
589 { "slotcount", PSEUDO_FUNC_RELOC, { 0 } },
592 /* mbtype4 constants: */
593 { "alt", PSEUDO_FUNC_CONST, { 0xa } },
594 { "brcst", PSEUDO_FUNC_CONST, { 0x0 } },
595 { "mix", PSEUDO_FUNC_CONST, { 0x8 } },
596 { "rev", PSEUDO_FUNC_CONST, { 0xb } },
597 { "shuf", PSEUDO_FUNC_CONST, { 0x9 } },
599 /* fclass constants: */
600 { "nat", PSEUDO_FUNC_CONST, { 0x100 } },
601 { "qnan", PSEUDO_FUNC_CONST, { 0x080 } },
602 { "snan", PSEUDO_FUNC_CONST, { 0x040 } },
603 { "pos", PSEUDO_FUNC_CONST, { 0x001 } },
604 { "neg", PSEUDO_FUNC_CONST, { 0x002 } },
605 { "zero", PSEUDO_FUNC_CONST, { 0x004 } },
606 { "unorm", PSEUDO_FUNC_CONST, { 0x008 } },
607 { "norm", PSEUDO_FUNC_CONST, { 0x010 } },
608 { "inf", PSEUDO_FUNC_CONST, { 0x020 } },
610 { "natval", PSEUDO_FUNC_CONST, { 0x100 } }, /* old usage */
612 /* hint constants: */
613 { "pause", PSEUDO_FUNC_CONST, { 0x0 } },
614 { "priority", PSEUDO_FUNC_CONST, { 0x1 } },
617 { "clz", PSEUDO_FUNC_CONST, { 32 } },
618 { "mpy", PSEUDO_FUNC_CONST, { 33 } },
619 { "datahints", PSEUDO_FUNC_CONST, { 34 } },
621 /* unwind-related constants: */
622 { "svr4", PSEUDO_FUNC_CONST, { ELFOSABI_NONE } },
623 { "hpux", PSEUDO_FUNC_CONST, { ELFOSABI_HPUX } },
624 { "nt", PSEUDO_FUNC_CONST, { 2 } }, /* conflicts w/ELFOSABI_NETBSD */
625 { "linux", PSEUDO_FUNC_CONST, { ELFOSABI_GNU } },
626 { "freebsd", PSEUDO_FUNC_CONST, { ELFOSABI_FREEBSD } },
627 { "openvms", PSEUDO_FUNC_CONST, { ELFOSABI_OPENVMS } },
628 { "nsk", PSEUDO_FUNC_CONST, { ELFOSABI_NSK } },
630 /* unwind-related registers: */
631 { "priunat",PSEUDO_FUNC_REG, { REG_PRIUNAT } }
634 /* 41-bit nop opcodes (one per unit): */
635 static const bfd_vma nop[IA64_NUM_UNITS] =
637 0x0000000000LL, /* NIL => break 0 */
638 0x0008000000LL, /* I-unit nop */
639 0x0008000000LL, /* M-unit nop */
640 0x4000000000LL, /* B-unit nop */
641 0x0008000000LL, /* F-unit nop */
642 0x0000000000LL, /* L-"unit" nop immediate */
643 0x0008000000LL, /* X-unit nop */
646 /* Can't be `const' as it's passed to input routines (which have the
647 habit of setting temporary sentinels. */
648 static char special_section_name[][20] =
650 {".bss"}, {".sbss"}, {".sdata"}, {".rodata"}, {".comment"},
651 {".IA_64.unwind"}, {".IA_64.unwind_info"},
652 {".init_array"}, {".fini_array"}
655 /* The best template for a particular sequence of up to three
657 #define N IA64_NUM_TYPES
658 static unsigned char best_template[N][N][N];
661 /* Resource dependencies currently in effect */
663 int depind; /* dependency index */
664 const struct ia64_dependency *dependency; /* actual dependency */
665 unsigned specific:1, /* is this a specific bit/regno? */
666 link_to_qp_branch:1; /* will a branch on the same QP clear it?*/
667 int index; /* specific regno/bit within dependency */
668 int note; /* optional qualifying note (0 if none) */
672 int insn_srlz; /* current insn serialization state */
673 int data_srlz; /* current data serialization state */
674 int qp_regno; /* qualifying predicate for this usage */
675 char *file; /* what file marked this dependency */
676 unsigned int line; /* what line marked this dependency */
677 struct mem_offset mem_offset; /* optional memory offset hint */
678 enum { CMP_NONE, CMP_OR, CMP_AND } cmp_type; /* OR or AND compare? */
679 int path; /* corresponding code entry index */
681 static int regdepslen = 0;
682 static int regdepstotlen = 0;
683 static const char *dv_mode[] = { "RAW", "WAW", "WAR" };
684 static const char *dv_sem[] = { "none", "implied", "impliedf",
685 "data", "instr", "specific", "stop", "other" };
686 static const char *dv_cmp_type[] = { "none", "OR", "AND" };
688 /* Current state of PR mutexation */
689 static struct qpmutex {
692 } *qp_mutexes = NULL; /* QP mutex bitmasks */
693 static int qp_mutexeslen = 0;
694 static int qp_mutexestotlen = 0;
695 static valueT qp_safe_across_calls = 0;
697 /* Current state of PR implications */
698 static struct qp_imply {
701 unsigned p2_branched:1;
703 } *qp_implies = NULL;
704 static int qp_implieslen = 0;
705 static int qp_impliestotlen = 0;
707 /* Keep track of static GR values so that indirect register usage can
708 sometimes be tracked. */
719 (((1 << (8 * sizeof(gr_values->path) - 2)) - 1) << 1) + 1,
725 /* Remember the alignment frag. */
726 static fragS *align_frag;
728 /* These are the routines required to output the various types of
731 /* A slot_number is a frag address plus the slot index (0-2). We use the
732 frag address here so that if there is a section switch in the middle of
733 a function, then instructions emitted to a different section are not
734 counted. Since there may be more than one frag for a function, this
735 means we also need to keep track of which frag this address belongs to
736 so we can compute inter-frag distances. This also nicely solves the
737 problem with nops emitted for align directives, which can't easily be
738 counted, but can easily be derived from frag sizes. */
740 typedef struct unw_rec_list {
742 unsigned long slot_number;
744 struct unw_rec_list *next;
747 #define SLOT_NUM_NOT_SET (unsigned)-1
749 /* Linked list of saved prologue counts. A very poor
750 implementation of a map from label numbers to prologue counts. */
751 typedef struct label_prologue_count
753 struct label_prologue_count *next;
754 unsigned long label_number;
755 unsigned int prologue_count;
756 } label_prologue_count;
758 typedef struct proc_pending
761 struct proc_pending *next;
766 /* Maintain a list of unwind entries for the current function. */
770 /* Any unwind entries that should be attached to the current slot
771 that an insn is being constructed for. */
772 unw_rec_list *current_entry;
774 /* These are used to create the unwind table entry for this function. */
775 proc_pending proc_pending;
776 symbolS *info; /* pointer to unwind info */
777 symbolS *personality_routine;
779 subsegT saved_text_subseg;
780 unsigned int force_unwind_entry : 1; /* force generation of unwind entry? */
782 /* TRUE if processing unwind directives in a prologue region. */
783 unsigned int prologue : 1;
784 unsigned int prologue_mask : 4;
785 unsigned int prologue_gr : 7;
786 unsigned int body : 1;
787 unsigned int insn : 1;
788 unsigned int prologue_count; /* number of .prologues seen so far */
789 /* Prologue counts at previous .label_state directives. */
790 struct label_prologue_count * saved_prologue_counts;
792 /* List of split up .save-s. */
793 unw_p_record *pending_saves;
796 /* The input value is a negated offset from psp, and specifies an address
797 psp - offset. The encoded value is psp + 16 - (4 * offset). Thus we
798 must add 16 and divide by 4 to get the encoded value. */
800 #define ENCODED_PSP_OFFSET(OFFSET) (((OFFSET) + 16) / 4)
802 typedef void (*vbyte_func) (int, char *, char *);
804 /* Forward declarations: */
805 static void dot_alias (int);
806 static int parse_operand_and_eval (expressionS *, int);
807 static void emit_one_bundle (void);
808 static bfd_reloc_code_real_type ia64_gen_real_reloc_type (struct symbol *,
809 bfd_reloc_code_real_type);
810 static void insn_group_break (int, int, int);
811 static void add_qp_mutex (valueT);
812 static void add_qp_imply (int, int);
813 static void clear_qp_mutex (valueT);
814 static void clear_qp_implies (valueT, valueT);
815 static void print_dependency (const char *, int);
816 static void instruction_serialization (void);
817 static void data_serialization (void);
818 static void output_R3_format (vbyte_func, unw_record_type, unsigned long);
819 static void output_B3_format (vbyte_func, unsigned long, unsigned long);
820 static void output_B4_format (vbyte_func, unw_record_type, unsigned long);
821 static void free_saved_prologue_counts (void);
823 /* Determine if application register REGNUM resides only in the integer
824 unit (as opposed to the memory unit). */
826 ar_is_only_in_integer_unit (int reg)
829 return reg >= 64 && reg <= 111;
832 /* Determine if application register REGNUM resides only in the memory
833 unit (as opposed to the integer unit). */
835 ar_is_only_in_memory_unit (int reg)
838 return reg >= 0 && reg <= 47;
841 /* Switch to section NAME and create section if necessary. It's
842 rather ugly that we have to manipulate input_line_pointer but I
843 don't see any other way to accomplish the same thing without
844 changing obj-elf.c (which may be the Right Thing, in the end). */
846 set_section (char *name)
848 char *saved_input_line_pointer;
850 saved_input_line_pointer = input_line_pointer;
851 input_line_pointer = name;
853 input_line_pointer = saved_input_line_pointer;
856 /* Map 's' to SHF_IA_64_SHORT. */
859 ia64_elf_section_letter (int letter, char **ptr_msg)
862 return SHF_IA_64_SHORT;
863 else if (letter == 'o')
864 return SHF_LINK_ORDER;
866 else if (letter == 'O')
867 return SHF_IA_64_VMS_OVERLAID;
868 else if (letter == 'g')
869 return SHF_IA_64_VMS_GLOBAL;
872 *ptr_msg = _("bad .section directive: want a,o,s,w,x,M,S,G,T in string");
876 /* Map SHF_IA_64_SHORT to SEC_SMALL_DATA. */
879 ia64_elf_section_flags (flagword flags,
881 int type ATTRIBUTE_UNUSED)
883 if (attr & SHF_IA_64_SHORT)
884 flags |= SEC_SMALL_DATA;
889 ia64_elf_section_type (const char *str, size_t len)
891 #define STREQ(s) ((len == sizeof (s) - 1) && (strncmp (str, s, sizeof (s) - 1) == 0))
893 if (STREQ (ELF_STRING_ia64_unwind_info))
896 if (STREQ (ELF_STRING_ia64_unwind_info_once))
899 if (STREQ (ELF_STRING_ia64_unwind))
900 return SHT_IA_64_UNWIND;
902 if (STREQ (ELF_STRING_ia64_unwind_once))
903 return SHT_IA_64_UNWIND;
905 if (STREQ ("unwind"))
906 return SHT_IA_64_UNWIND;
913 set_regstack (unsigned int ins,
921 sof = ins + locs + outs;
924 as_bad (_("Size of frame exceeds maximum of 96 registers"));
929 as_warn (_("Size of rotating registers exceeds frame size"));
932 md.in.base = REG_GR + 32;
933 md.loc.base = md.in.base + ins;
934 md.out.base = md.loc.base + locs;
936 md.in.num_regs = ins;
937 md.loc.num_regs = locs;
938 md.out.num_regs = outs;
939 md.rot.num_regs = rots;
944 ia64_flush_insns (void)
946 struct label_fix *lfix;
948 subsegT saved_subseg;
952 if (!md.last_text_seg)
956 saved_subseg = now_subseg;
958 subseg_set (md.last_text_seg, 0);
960 while (md.num_slots_in_use > 0)
961 emit_one_bundle (); /* force out queued instructions */
963 /* In case there are labels following the last instruction, resolve
966 for (lfix = CURR_SLOT.label_fixups; lfix; lfix = lfix->next)
968 symbol_set_value_now (lfix->sym);
969 mark |= lfix->dw2_mark_labels;
973 dwarf2_where (&CURR_SLOT.debug_line);
974 CURR_SLOT.debug_line.flags |= DWARF2_FLAG_BASIC_BLOCK;
975 dwarf2_gen_line_info (frag_now_fix (), &CURR_SLOT.debug_line);
976 dwarf2_consume_line_info ();
978 CURR_SLOT.label_fixups = 0;
980 for (lfix = CURR_SLOT.tag_fixups; lfix; lfix = lfix->next)
981 symbol_set_value_now (lfix->sym);
982 CURR_SLOT.tag_fixups = 0;
984 /* In case there are unwind directives following the last instruction,
985 resolve those now. We only handle prologue, body, and endp directives
986 here. Give an error for others. */
987 for (ptr = unwind.current_entry; ptr; ptr = ptr->next)
995 ptr->slot_number = (unsigned long) frag_more (0);
996 ptr->slot_frag = frag_now;
999 /* Allow any record which doesn't have a "t" field (i.e.,
1000 doesn't relate to a particular instruction). */
1016 as_bad (_("Unwind directive not followed by an instruction."));
1020 unwind.current_entry = NULL;
1022 subseg_set (saved_seg, saved_subseg);
1024 if (md.qp.X_op == O_register)
1025 as_bad (_("qualifying predicate not followed by instruction"));
1029 ia64_do_align (int nbytes)
1031 char *saved_input_line_pointer = input_line_pointer;
1033 input_line_pointer = "";
1034 s_align_bytes (nbytes);
1035 input_line_pointer = saved_input_line_pointer;
1039 ia64_cons_align (int nbytes)
1043 char *saved_input_line_pointer = input_line_pointer;
1044 input_line_pointer = "";
1045 s_align_bytes (nbytes);
1046 input_line_pointer = saved_input_line_pointer;
1052 /* .vms_common section, symbol, size, alignment */
1055 obj_elf_vms_common (int ignore ATTRIBUTE_UNUSED)
1064 segT current_seg = now_seg;
1065 subsegT current_subseg = now_subseg;
1069 sec_name = obj_elf_section_name ();
1070 if (sec_name == NULL)
1075 if (*input_line_pointer == ',')
1077 input_line_pointer++;
1082 as_bad (_("expected ',' after section name"));
1083 ignore_rest_of_line ();
1087 sym_name = input_line_pointer;
1088 c = get_symbol_end ();
1090 if (input_line_pointer == sym_name)
1092 *input_line_pointer = c;
1093 as_bad (_("expected symbol name"));
1094 ignore_rest_of_line ();
1098 symbolP = symbol_find_or_make (sym_name);
1099 *input_line_pointer = c;
1101 if ((S_IS_DEFINED (symbolP) || symbol_equated_p (symbolP))
1102 && !S_IS_COMMON (symbolP))
1104 as_bad (_("Ignoring attempt to re-define symbol"));
1105 ignore_rest_of_line ();
1111 if (*input_line_pointer == ',')
1113 input_line_pointer++;
1118 as_bad (_("expected ',' after symbol name"));
1119 ignore_rest_of_line ();
1123 temp = get_absolute_expression ();
1125 size &= ((offsetT) 2 << (stdoutput->arch_info->bits_per_address - 1)) - 1;
1128 as_warn (_("size (%ld) out of range, ignored"), (long) temp);
1129 ignore_rest_of_line ();
1135 if (*input_line_pointer == ',')
1137 input_line_pointer++;
1142 as_bad (_("expected ',' after symbol size"));
1143 ignore_rest_of_line ();
1147 log_align = get_absolute_expression ();
1149 demand_empty_rest_of_line ();
1151 obj_elf_change_section
1152 (sec_name, SHT_NOBITS,
1153 SHF_ALLOC | SHF_WRITE | SHF_IA_64_VMS_OVERLAID | SHF_IA_64_VMS_GLOBAL,
1156 S_SET_VALUE (symbolP, 0);
1157 S_SET_SIZE (symbolP, size);
1158 S_SET_EXTERNAL (symbolP);
1159 S_SET_SEGMENT (symbolP, now_seg);
1161 symbol_get_bfdsym (symbolP)->flags |= BSF_OBJECT;
1163 record_alignment (now_seg, log_align);
1165 cur_size = bfd_section_size (stdoutput, now_seg);
1166 if ((int) size > cur_size)
1169 = frag_var (rs_fill, 1, 1, (relax_substateT)0, NULL,
1170 (valueT)size - (valueT)cur_size, NULL);
1172 bfd_section_size (stdoutput, now_seg) = size;
1175 /* Switch back to current segment. */
1176 subseg_set (current_seg, current_subseg);
1178 #ifdef md_elf_section_change_hook
1179 md_elf_section_change_hook ();
1185 /* Output COUNT bytes to a memory location. */
1186 static char *vbyte_mem_ptr = NULL;
1189 output_vbyte_mem (int count, char *ptr, char *comment ATTRIBUTE_UNUSED)
1192 if (vbyte_mem_ptr == NULL)
1197 for (x = 0; x < count; x++)
1198 *(vbyte_mem_ptr++) = ptr[x];
1201 /* Count the number of bytes required for records. */
1202 static int vbyte_count = 0;
1204 count_output (int count,
1205 char *ptr ATTRIBUTE_UNUSED,
1206 char *comment ATTRIBUTE_UNUSED)
1208 vbyte_count += count;
1212 output_R1_format (vbyte_func f, unw_record_type rtype, int rlen)
1218 output_R3_format (f, rtype, rlen);
1224 else if (rtype != prologue)
1225 as_bad (_("record type is not valid"));
1227 byte = UNW_R1 | (r << 5) | (rlen & 0x1f);
1228 (*f) (1, &byte, NULL);
1232 output_R2_format (vbyte_func f, int mask, int grsave, unsigned long rlen)
1236 mask = (mask & 0x0f);
1237 grsave = (grsave & 0x7f);
1239 bytes[0] = (UNW_R2 | (mask >> 1));
1240 bytes[1] = (((mask & 0x01) << 7) | grsave);
1241 count += output_leb128 (bytes + 2, rlen, 0);
1242 (*f) (count, bytes, NULL);
1246 output_R3_format (vbyte_func f, unw_record_type rtype, unsigned long rlen)
1252 output_R1_format (f, rtype, rlen);
1258 else if (rtype != prologue)
1259 as_bad (_("record type is not valid"));
1260 bytes[0] = (UNW_R3 | r);
1261 count = output_leb128 (bytes + 1, rlen, 0);
1262 (*f) (count + 1, bytes, NULL);
1266 output_P1_format (vbyte_func f, int brmask)
1269 byte = UNW_P1 | (brmask & 0x1f);
1270 (*f) (1, &byte, NULL);
1274 output_P2_format (vbyte_func f, int brmask, int gr)
1277 brmask = (brmask & 0x1f);
1278 bytes[0] = UNW_P2 | (brmask >> 1);
1279 bytes[1] = (((brmask & 1) << 7) | gr);
1280 (*f) (2, bytes, NULL);
1284 output_P3_format (vbyte_func f, unw_record_type rtype, int reg)
1328 as_bad (_("Invalid record type for P3 format."));
1330 bytes[0] = (UNW_P3 | (r >> 1));
1331 bytes[1] = (((r & 1) << 7) | reg);
1332 (*f) (2, bytes, NULL);
1336 output_P4_format (vbyte_func f, unsigned char *imask, unsigned long imask_size)
1339 (*f) (imask_size, (char *) imask, NULL);
1343 output_P5_format (vbyte_func f, int grmask, unsigned long frmask)
1346 grmask = (grmask & 0x0f);
1349 bytes[1] = ((grmask << 4) | ((frmask & 0x000f0000) >> 16));
1350 bytes[2] = ((frmask & 0x0000ff00) >> 8);
1351 bytes[3] = (frmask & 0x000000ff);
1352 (*f) (4, bytes, NULL);
1356 output_P6_format (vbyte_func f, unw_record_type rtype, int rmask)
1361 if (rtype == gr_mem)
1363 else if (rtype != fr_mem)
1364 as_bad (_("Invalid record type for format P6"));
1365 byte = (UNW_P6 | (r << 4) | (rmask & 0x0f));
1366 (*f) (1, &byte, NULL);
1370 output_P7_format (vbyte_func f,
1371 unw_record_type rtype,
1378 count += output_leb128 (bytes + 1, w1, 0);
1383 count += output_leb128 (bytes + count, w2 >> 4, 0);
1433 bytes[0] = (UNW_P7 | r);
1434 (*f) (count, bytes, NULL);
1438 output_P8_format (vbyte_func f, unw_record_type rtype, unsigned long t)
1476 case bspstore_psprel:
1479 case bspstore_sprel:
1491 case priunat_when_gr:
1494 case priunat_psprel:
1500 case priunat_when_mem:
1507 count += output_leb128 (bytes + 2, t, 0);
1508 (*f) (count, bytes, NULL);
1512 output_P9_format (vbyte_func f, int grmask, int gr)
1516 bytes[1] = (grmask & 0x0f);
1517 bytes[2] = (gr & 0x7f);
1518 (*f) (3, bytes, NULL);
1522 output_P10_format (vbyte_func f, int abi, int context)
1526 bytes[1] = (abi & 0xff);
1527 bytes[2] = (context & 0xff);
1528 (*f) (3, bytes, NULL);
1532 output_B1_format (vbyte_func f, unw_record_type rtype, unsigned long label)
1538 output_B4_format (f, rtype, label);
1541 if (rtype == copy_state)
1543 else if (rtype != label_state)
1544 as_bad (_("Invalid record type for format B1"));
1546 byte = (UNW_B1 | (r << 5) | (label & 0x1f));
1547 (*f) (1, &byte, NULL);
1551 output_B2_format (vbyte_func f, unsigned long ecount, unsigned long t)
1557 output_B3_format (f, ecount, t);
1560 bytes[0] = (UNW_B2 | (ecount & 0x1f));
1561 count += output_leb128 (bytes + 1, t, 0);
1562 (*f) (count, bytes, NULL);
1566 output_B3_format (vbyte_func f, unsigned long ecount, unsigned long t)
1572 output_B2_format (f, ecount, t);
1576 count += output_leb128 (bytes + 1, t, 0);
1577 count += output_leb128 (bytes + count, ecount, 0);
1578 (*f) (count, bytes, NULL);
1582 output_B4_format (vbyte_func f, unw_record_type rtype, unsigned long label)
1589 output_B1_format (f, rtype, label);
1593 if (rtype == copy_state)
1595 else if (rtype != label_state)
1596 as_bad (_("Invalid record type for format B1"));
1598 bytes[0] = (UNW_B4 | (r << 3));
1599 count += output_leb128 (bytes + 1, label, 0);
1600 (*f) (count, bytes, NULL);
1604 format_ab_reg (int ab, int reg)
1609 ret = (ab << 5) | reg;
1614 output_X1_format (vbyte_func f,
1615 unw_record_type rtype,
1626 if (rtype == spill_sprel)
1628 else if (rtype != spill_psprel)
1629 as_bad (_("Invalid record type for format X1"));
1630 bytes[1] = ((r << 7) | format_ab_reg (ab, reg));
1631 count += output_leb128 (bytes + 2, t, 0);
1632 count += output_leb128 (bytes + count, w1, 0);
1633 (*f) (count, bytes, NULL);
1637 output_X2_format (vbyte_func f,
1648 bytes[1] = (((x & 1) << 7) | format_ab_reg (ab, reg));
1649 bytes[2] = (((y & 1) << 7) | (treg & 0x7f));
1650 count += output_leb128 (bytes + 3, t, 0);
1651 (*f) (count, bytes, NULL);
1655 output_X3_format (vbyte_func f,
1656 unw_record_type rtype,
1668 if (rtype == spill_sprel_p)
1670 else if (rtype != spill_psprel_p)
1671 as_bad (_("Invalid record type for format X3"));
1672 bytes[1] = ((r << 7) | (qp & 0x3f));
1673 bytes[2] = format_ab_reg (ab, reg);
1674 count += output_leb128 (bytes + 3, t, 0);
1675 count += output_leb128 (bytes + count, w1, 0);
1676 (*f) (count, bytes, NULL);
1680 output_X4_format (vbyte_func f,
1692 bytes[1] = (qp & 0x3f);
1693 bytes[2] = (((x & 1) << 7) | format_ab_reg (ab, reg));
1694 bytes[3] = (((y & 1) << 7) | (treg & 0x7f));
1695 count += output_leb128 (bytes + 4, t, 0);
1696 (*f) (count, bytes, NULL);
1699 /* This function checks whether there are any outstanding .save-s and
1700 discards them if so. */
1703 check_pending_save (void)
1705 if (unwind.pending_saves)
1707 unw_rec_list *cur, *prev;
1709 as_warn (_("Previous .save incomplete"));
1710 for (cur = unwind.list, prev = NULL; cur; )
1711 if (&cur->r.record.p == unwind.pending_saves)
1714 prev->next = cur->next;
1716 unwind.list = cur->next;
1717 if (cur == unwind.tail)
1719 if (cur == unwind.current_entry)
1720 unwind.current_entry = cur->next;
1721 /* Don't free the first discarded record, it's being used as
1722 terminator for (currently) br_gr and gr_gr processing, and
1723 also prevents leaving a dangling pointer to it in its
1725 cur->r.record.p.grmask = 0;
1726 cur->r.record.p.brmask = 0;
1727 cur->r.record.p.frmask = 0;
1728 prev = cur->r.record.p.next;
1729 cur->r.record.p.next = NULL;
1741 cur = cur->r.record.p.next;
1744 unwind.pending_saves = NULL;
1748 /* This function allocates a record list structure, and initializes fields. */
1750 static unw_rec_list *
1751 alloc_record (unw_record_type t)
1754 ptr = xmalloc (sizeof (*ptr));
1755 memset (ptr, 0, sizeof (*ptr));
1756 ptr->slot_number = SLOT_NUM_NOT_SET;
1761 /* Dummy unwind record used for calculating the length of the last prologue or
1764 static unw_rec_list *
1767 unw_rec_list *ptr = alloc_record (endp);
1771 static unw_rec_list *
1772 output_prologue (void)
1774 unw_rec_list *ptr = alloc_record (prologue);
1775 memset (&ptr->r.record.r.mask, 0, sizeof (ptr->r.record.r.mask));
1779 static unw_rec_list *
1780 output_prologue_gr (unsigned int saved_mask, unsigned int reg)
1782 unw_rec_list *ptr = alloc_record (prologue_gr);
1783 memset (&ptr->r.record.r.mask, 0, sizeof (ptr->r.record.r.mask));
1784 ptr->r.record.r.grmask = saved_mask;
1785 ptr->r.record.r.grsave = reg;
1789 static unw_rec_list *
1792 unw_rec_list *ptr = alloc_record (body);
1796 static unw_rec_list *
1797 output_mem_stack_f (unsigned int size)
1799 unw_rec_list *ptr = alloc_record (mem_stack_f);
1800 ptr->r.record.p.size = size;
1804 static unw_rec_list *
1805 output_mem_stack_v (void)
1807 unw_rec_list *ptr = alloc_record (mem_stack_v);
1811 static unw_rec_list *
1812 output_psp_gr (unsigned int gr)
1814 unw_rec_list *ptr = alloc_record (psp_gr);
1815 ptr->r.record.p.r.gr = gr;
1819 static unw_rec_list *
1820 output_psp_sprel (unsigned int offset)
1822 unw_rec_list *ptr = alloc_record (psp_sprel);
1823 ptr->r.record.p.off.sp = offset / 4;
1827 static unw_rec_list *
1828 output_rp_when (void)
1830 unw_rec_list *ptr = alloc_record (rp_when);
1834 static unw_rec_list *
1835 output_rp_gr (unsigned int gr)
1837 unw_rec_list *ptr = alloc_record (rp_gr);
1838 ptr->r.record.p.r.gr = gr;
1842 static unw_rec_list *
1843 output_rp_br (unsigned int br)
1845 unw_rec_list *ptr = alloc_record (rp_br);
1846 ptr->r.record.p.r.br = br;
1850 static unw_rec_list *
1851 output_rp_psprel (unsigned int offset)
1853 unw_rec_list *ptr = alloc_record (rp_psprel);
1854 ptr->r.record.p.off.psp = ENCODED_PSP_OFFSET (offset);
1858 static unw_rec_list *
1859 output_rp_sprel (unsigned int offset)
1861 unw_rec_list *ptr = alloc_record (rp_sprel);
1862 ptr->r.record.p.off.sp = offset / 4;
1866 static unw_rec_list *
1867 output_pfs_when (void)
1869 unw_rec_list *ptr = alloc_record (pfs_when);
1873 static unw_rec_list *
1874 output_pfs_gr (unsigned int gr)
1876 unw_rec_list *ptr = alloc_record (pfs_gr);
1877 ptr->r.record.p.r.gr = gr;
1881 static unw_rec_list *
1882 output_pfs_psprel (unsigned int offset)
1884 unw_rec_list *ptr = alloc_record (pfs_psprel);
1885 ptr->r.record.p.off.psp = ENCODED_PSP_OFFSET (offset);
1889 static unw_rec_list *
1890 output_pfs_sprel (unsigned int offset)
1892 unw_rec_list *ptr = alloc_record (pfs_sprel);
1893 ptr->r.record.p.off.sp = offset / 4;
1897 static unw_rec_list *
1898 output_preds_when (void)
1900 unw_rec_list *ptr = alloc_record (preds_when);
1904 static unw_rec_list *
1905 output_preds_gr (unsigned int gr)
1907 unw_rec_list *ptr = alloc_record (preds_gr);
1908 ptr->r.record.p.r.gr = gr;
1912 static unw_rec_list *
1913 output_preds_psprel (unsigned int offset)
1915 unw_rec_list *ptr = alloc_record (preds_psprel);
1916 ptr->r.record.p.off.psp = ENCODED_PSP_OFFSET (offset);
1920 static unw_rec_list *
1921 output_preds_sprel (unsigned int offset)
1923 unw_rec_list *ptr = alloc_record (preds_sprel);
1924 ptr->r.record.p.off.sp = offset / 4;
1928 static unw_rec_list *
1929 output_fr_mem (unsigned int mask)
1931 unw_rec_list *ptr = alloc_record (fr_mem);
1932 unw_rec_list *cur = ptr;
1934 ptr->r.record.p.frmask = mask;
1935 unwind.pending_saves = &ptr->r.record.p;
1938 unw_rec_list *prev = cur;
1940 /* Clear least significant set bit. */
1941 mask &= ~(mask & (~mask + 1));
1944 cur = alloc_record (fr_mem);
1945 cur->r.record.p.frmask = mask;
1946 /* Retain only least significant bit. */
1947 prev->r.record.p.frmask ^= mask;
1948 prev->r.record.p.next = cur;
1952 static unw_rec_list *
1953 output_frgr_mem (unsigned int gr_mask, unsigned int fr_mask)
1955 unw_rec_list *ptr = alloc_record (frgr_mem);
1956 unw_rec_list *cur = ptr;
1958 unwind.pending_saves = &cur->r.record.p;
1959 cur->r.record.p.frmask = fr_mask;
1962 unw_rec_list *prev = cur;
1964 /* Clear least significant set bit. */
1965 fr_mask &= ~(fr_mask & (~fr_mask + 1));
1966 if (!gr_mask && !fr_mask)
1968 cur = alloc_record (frgr_mem);
1969 cur->r.record.p.frmask = fr_mask;
1970 /* Retain only least significant bit. */
1971 prev->r.record.p.frmask ^= fr_mask;
1972 prev->r.record.p.next = cur;
1974 cur->r.record.p.grmask = gr_mask;
1977 unw_rec_list *prev = cur;
1979 /* Clear least significant set bit. */
1980 gr_mask &= ~(gr_mask & (~gr_mask + 1));
1983 cur = alloc_record (frgr_mem);
1984 cur->r.record.p.grmask = gr_mask;
1985 /* Retain only least significant bit. */
1986 prev->r.record.p.grmask ^= gr_mask;
1987 prev->r.record.p.next = cur;
1991 static unw_rec_list *
1992 output_gr_gr (unsigned int mask, unsigned int reg)
1994 unw_rec_list *ptr = alloc_record (gr_gr);
1995 unw_rec_list *cur = ptr;
1997 ptr->r.record.p.grmask = mask;
1998 ptr->r.record.p.r.gr = reg;
1999 unwind.pending_saves = &ptr->r.record.p;
2002 unw_rec_list *prev = cur;
2004 /* Clear least significant set bit. */
2005 mask &= ~(mask & (~mask + 1));
2008 cur = alloc_record (gr_gr);
2009 cur->r.record.p.grmask = mask;
2010 /* Indicate this record shouldn't be output. */
2011 cur->r.record.p.r.gr = REG_NUM;
2012 /* Retain only least significant bit. */
2013 prev->r.record.p.grmask ^= mask;
2014 prev->r.record.p.next = cur;
2018 static unw_rec_list *
2019 output_gr_mem (unsigned int mask)
2021 unw_rec_list *ptr = alloc_record (gr_mem);
2022 unw_rec_list *cur = ptr;
2024 ptr->r.record.p.grmask = mask;
2025 unwind.pending_saves = &ptr->r.record.p;
2028 unw_rec_list *prev = cur;
2030 /* Clear least significant set bit. */
2031 mask &= ~(mask & (~mask + 1));
2034 cur = alloc_record (gr_mem);
2035 cur->r.record.p.grmask = mask;
2036 /* Retain only least significant bit. */
2037 prev->r.record.p.grmask ^= mask;
2038 prev->r.record.p.next = cur;
2042 static unw_rec_list *
2043 output_br_mem (unsigned int mask)
2045 unw_rec_list *ptr = alloc_record (br_mem);
2046 unw_rec_list *cur = ptr;
2048 ptr->r.record.p.brmask = mask;
2049 unwind.pending_saves = &ptr->r.record.p;
2052 unw_rec_list *prev = cur;
2054 /* Clear least significant set bit. */
2055 mask &= ~(mask & (~mask + 1));
2058 cur = alloc_record (br_mem);
2059 cur->r.record.p.brmask = mask;
2060 /* Retain only least significant bit. */
2061 prev->r.record.p.brmask ^= mask;
2062 prev->r.record.p.next = cur;
2066 static unw_rec_list *
2067 output_br_gr (unsigned int mask, unsigned int reg)
2069 unw_rec_list *ptr = alloc_record (br_gr);
2070 unw_rec_list *cur = ptr;
2072 ptr->r.record.p.brmask = mask;
2073 ptr->r.record.p.r.gr = reg;
2074 unwind.pending_saves = &ptr->r.record.p;
2077 unw_rec_list *prev = cur;
2079 /* Clear least significant set bit. */
2080 mask &= ~(mask & (~mask + 1));
2083 cur = alloc_record (br_gr);
2084 cur->r.record.p.brmask = mask;
2085 /* Indicate this record shouldn't be output. */
2086 cur->r.record.p.r.gr = REG_NUM;
2087 /* Retain only least significant bit. */
2088 prev->r.record.p.brmask ^= mask;
2089 prev->r.record.p.next = cur;
2093 static unw_rec_list *
2094 output_spill_base (unsigned int offset)
2096 unw_rec_list *ptr = alloc_record (spill_base);
2097 ptr->r.record.p.off.psp = ENCODED_PSP_OFFSET (offset);
2101 static unw_rec_list *
2102 output_unat_when (void)
2104 unw_rec_list *ptr = alloc_record (unat_when);
2108 static unw_rec_list *
2109 output_unat_gr (unsigned int gr)
2111 unw_rec_list *ptr = alloc_record (unat_gr);
2112 ptr->r.record.p.r.gr = gr;
2116 static unw_rec_list *
2117 output_unat_psprel (unsigned int offset)
2119 unw_rec_list *ptr = alloc_record (unat_psprel);
2120 ptr->r.record.p.off.psp = ENCODED_PSP_OFFSET (offset);
2124 static unw_rec_list *
2125 output_unat_sprel (unsigned int offset)
2127 unw_rec_list *ptr = alloc_record (unat_sprel);
2128 ptr->r.record.p.off.sp = offset / 4;
2132 static unw_rec_list *
2133 output_lc_when (void)
2135 unw_rec_list *ptr = alloc_record (lc_when);
2139 static unw_rec_list *
2140 output_lc_gr (unsigned int gr)
2142 unw_rec_list *ptr = alloc_record (lc_gr);
2143 ptr->r.record.p.r.gr = gr;
2147 static unw_rec_list *
2148 output_lc_psprel (unsigned int offset)
2150 unw_rec_list *ptr = alloc_record (lc_psprel);
2151 ptr->r.record.p.off.psp = ENCODED_PSP_OFFSET (offset);
2155 static unw_rec_list *
2156 output_lc_sprel (unsigned int offset)
2158 unw_rec_list *ptr = alloc_record (lc_sprel);
2159 ptr->r.record.p.off.sp = offset / 4;
2163 static unw_rec_list *
2164 output_fpsr_when (void)
2166 unw_rec_list *ptr = alloc_record (fpsr_when);
2170 static unw_rec_list *
2171 output_fpsr_gr (unsigned int gr)
2173 unw_rec_list *ptr = alloc_record (fpsr_gr);
2174 ptr->r.record.p.r.gr = gr;
2178 static unw_rec_list *
2179 output_fpsr_psprel (unsigned int offset)
2181 unw_rec_list *ptr = alloc_record (fpsr_psprel);
2182 ptr->r.record.p.off.psp = ENCODED_PSP_OFFSET (offset);
2186 static unw_rec_list *
2187 output_fpsr_sprel (unsigned int offset)
2189 unw_rec_list *ptr = alloc_record (fpsr_sprel);
2190 ptr->r.record.p.off.sp = offset / 4;
2194 static unw_rec_list *
2195 output_priunat_when_gr (void)
2197 unw_rec_list *ptr = alloc_record (priunat_when_gr);
2201 static unw_rec_list *
2202 output_priunat_when_mem (void)
2204 unw_rec_list *ptr = alloc_record (priunat_when_mem);
2208 static unw_rec_list *
2209 output_priunat_gr (unsigned int gr)
2211 unw_rec_list *ptr = alloc_record (priunat_gr);
2212 ptr->r.record.p.r.gr = gr;
2216 static unw_rec_list *
2217 output_priunat_psprel (unsigned int offset)
2219 unw_rec_list *ptr = alloc_record (priunat_psprel);
2220 ptr->r.record.p.off.psp = ENCODED_PSP_OFFSET (offset);
2224 static unw_rec_list *
2225 output_priunat_sprel (unsigned int offset)
2227 unw_rec_list *ptr = alloc_record (priunat_sprel);
2228 ptr->r.record.p.off.sp = offset / 4;
2232 static unw_rec_list *
2233 output_bsp_when (void)
2235 unw_rec_list *ptr = alloc_record (bsp_when);
2239 static unw_rec_list *
2240 output_bsp_gr (unsigned int gr)
2242 unw_rec_list *ptr = alloc_record (bsp_gr);
2243 ptr->r.record.p.r.gr = gr;
2247 static unw_rec_list *
2248 output_bsp_psprel (unsigned int offset)
2250 unw_rec_list *ptr = alloc_record (bsp_psprel);
2251 ptr->r.record.p.off.psp = ENCODED_PSP_OFFSET (offset);
2255 static unw_rec_list *
2256 output_bsp_sprel (unsigned int offset)
2258 unw_rec_list *ptr = alloc_record (bsp_sprel);
2259 ptr->r.record.p.off.sp = offset / 4;
2263 static unw_rec_list *
2264 output_bspstore_when (void)
2266 unw_rec_list *ptr = alloc_record (bspstore_when);
2270 static unw_rec_list *
2271 output_bspstore_gr (unsigned int gr)
2273 unw_rec_list *ptr = alloc_record (bspstore_gr);
2274 ptr->r.record.p.r.gr = gr;
2278 static unw_rec_list *
2279 output_bspstore_psprel (unsigned int offset)
2281 unw_rec_list *ptr = alloc_record (bspstore_psprel);
2282 ptr->r.record.p.off.psp = ENCODED_PSP_OFFSET (offset);
2286 static unw_rec_list *
2287 output_bspstore_sprel (unsigned int offset)
2289 unw_rec_list *ptr = alloc_record (bspstore_sprel);
2290 ptr->r.record.p.off.sp = offset / 4;
2294 static unw_rec_list *
2295 output_rnat_when (void)
2297 unw_rec_list *ptr = alloc_record (rnat_when);
2301 static unw_rec_list *
2302 output_rnat_gr (unsigned int gr)
2304 unw_rec_list *ptr = alloc_record (rnat_gr);
2305 ptr->r.record.p.r.gr = gr;
2309 static unw_rec_list *
2310 output_rnat_psprel (unsigned int offset)
2312 unw_rec_list *ptr = alloc_record (rnat_psprel);
2313 ptr->r.record.p.off.psp = ENCODED_PSP_OFFSET (offset);
2317 static unw_rec_list *
2318 output_rnat_sprel (unsigned int offset)
2320 unw_rec_list *ptr = alloc_record (rnat_sprel);
2321 ptr->r.record.p.off.sp = offset / 4;
2325 static unw_rec_list *
2326 output_unwabi (unsigned long abi, unsigned long context)
2328 unw_rec_list *ptr = alloc_record (unwabi);
2329 ptr->r.record.p.abi = abi;
2330 ptr->r.record.p.context = context;
2334 static unw_rec_list *
2335 output_epilogue (unsigned long ecount)
2337 unw_rec_list *ptr = alloc_record (epilogue);
2338 ptr->r.record.b.ecount = ecount;
2342 static unw_rec_list *
2343 output_label_state (unsigned long label)
2345 unw_rec_list *ptr = alloc_record (label_state);
2346 ptr->r.record.b.label = label;
2350 static unw_rec_list *
2351 output_copy_state (unsigned long label)
2353 unw_rec_list *ptr = alloc_record (copy_state);
2354 ptr->r.record.b.label = label;
2358 static unw_rec_list *
2359 output_spill_psprel (unsigned int ab,
2361 unsigned int offset,
2362 unsigned int predicate)
2364 unw_rec_list *ptr = alloc_record (predicate ? spill_psprel_p : spill_psprel);
2365 ptr->r.record.x.ab = ab;
2366 ptr->r.record.x.reg = reg;
2367 ptr->r.record.x.where.pspoff = ENCODED_PSP_OFFSET (offset);
2368 ptr->r.record.x.qp = predicate;
2372 static unw_rec_list *
2373 output_spill_sprel (unsigned int ab,
2375 unsigned int offset,
2376 unsigned int predicate)
2378 unw_rec_list *ptr = alloc_record (predicate ? spill_sprel_p : spill_sprel);
2379 ptr->r.record.x.ab = ab;
2380 ptr->r.record.x.reg = reg;
2381 ptr->r.record.x.where.spoff = offset / 4;
2382 ptr->r.record.x.qp = predicate;
2386 static unw_rec_list *
2387 output_spill_reg (unsigned int ab,
2389 unsigned int targ_reg,
2391 unsigned int predicate)
2393 unw_rec_list *ptr = alloc_record (predicate ? spill_reg_p : spill_reg);
2394 ptr->r.record.x.ab = ab;
2395 ptr->r.record.x.reg = reg;
2396 ptr->r.record.x.where.reg = targ_reg;
2397 ptr->r.record.x.xy = xy;
2398 ptr->r.record.x.qp = predicate;
2402 /* Given a unw_rec_list process the correct format with the
2403 specified function. */
2406 process_one_record (unw_rec_list *ptr, vbyte_func f)
2408 unsigned int fr_mask, gr_mask;
2410 switch (ptr->r.type)
2412 /* This is a dummy record that takes up no space in the output. */
2420 /* These are taken care of by prologue/prologue_gr. */
2425 if (ptr->r.type == prologue_gr)
2426 output_R2_format (f, ptr->r.record.r.grmask,
2427 ptr->r.record.r.grsave, ptr->r.record.r.rlen);
2429 output_R1_format (f, ptr->r.type, ptr->r.record.r.rlen);
2431 /* Output descriptor(s) for union of register spills (if any). */
2432 gr_mask = ptr->r.record.r.mask.gr_mem;
2433 fr_mask = ptr->r.record.r.mask.fr_mem;
2436 if ((fr_mask & ~0xfUL) == 0)
2437 output_P6_format (f, fr_mem, fr_mask);
2440 output_P5_format (f, gr_mask, fr_mask);
2445 output_P6_format (f, gr_mem, gr_mask);
2446 if (ptr->r.record.r.mask.br_mem)
2447 output_P1_format (f, ptr->r.record.r.mask.br_mem);
2449 /* output imask descriptor if necessary: */
2450 if (ptr->r.record.r.mask.i)
2451 output_P4_format (f, ptr->r.record.r.mask.i,
2452 ptr->r.record.r.imask_size);
2456 output_R1_format (f, ptr->r.type, ptr->r.record.r.rlen);
2460 output_P7_format (f, ptr->r.type, ptr->r.record.p.t,
2461 ptr->r.record.p.size);
2474 output_P3_format (f, ptr->r.type, ptr->r.record.p.r.gr);
2477 output_P3_format (f, rp_br, ptr->r.record.p.r.br);
2480 output_P7_format (f, psp_sprel, ptr->r.record.p.off.sp, 0);
2488 output_P7_format (f, ptr->r.type, ptr->r.record.p.t, 0);
2497 output_P7_format (f, ptr->r.type, ptr->r.record.p.off.psp, 0);
2507 case bspstore_sprel:
2509 output_P8_format (f, ptr->r.type, ptr->r.record.p.off.sp);
2512 if (ptr->r.record.p.r.gr < REG_NUM)
2514 const unw_rec_list *cur = ptr;
2516 gr_mask = cur->r.record.p.grmask;
2517 while ((cur = cur->r.record.p.next) != NULL)
2518 gr_mask |= cur->r.record.p.grmask;
2519 output_P9_format (f, gr_mask, ptr->r.record.p.r.gr);
2523 if (ptr->r.record.p.r.gr < REG_NUM)
2525 const unw_rec_list *cur = ptr;
2527 gr_mask = cur->r.record.p.brmask;
2528 while ((cur = cur->r.record.p.next) != NULL)
2529 gr_mask |= cur->r.record.p.brmask;
2530 output_P2_format (f, gr_mask, ptr->r.record.p.r.gr);
2534 as_bad (_("spill_mask record unimplemented."));
2536 case priunat_when_gr:
2537 case priunat_when_mem:
2541 output_P8_format (f, ptr->r.type, ptr->r.record.p.t);
2543 case priunat_psprel:
2545 case bspstore_psprel:
2547 output_P8_format (f, ptr->r.type, ptr->r.record.p.off.psp);
2550 output_P10_format (f, ptr->r.record.p.abi, ptr->r.record.p.context);
2553 output_B3_format (f, ptr->r.record.b.ecount, ptr->r.record.b.t);
2557 output_B4_format (f, ptr->r.type, ptr->r.record.b.label);
2560 output_X1_format (f, ptr->r.type, ptr->r.record.x.ab,
2561 ptr->r.record.x.reg, ptr->r.record.x.t,
2562 ptr->r.record.x.where.pspoff);
2565 output_X1_format (f, ptr->r.type, ptr->r.record.x.ab,
2566 ptr->r.record.x.reg, ptr->r.record.x.t,
2567 ptr->r.record.x.where.spoff);
2570 output_X2_format (f, ptr->r.record.x.ab, ptr->r.record.x.reg,
2571 ptr->r.record.x.xy >> 1, ptr->r.record.x.xy,
2572 ptr->r.record.x.where.reg, ptr->r.record.x.t);
2574 case spill_psprel_p:
2575 output_X3_format (f, ptr->r.type, ptr->r.record.x.qp,
2576 ptr->r.record.x.ab, ptr->r.record.x.reg,
2577 ptr->r.record.x.t, ptr->r.record.x.where.pspoff);
2580 output_X3_format (f, ptr->r.type, ptr->r.record.x.qp,
2581 ptr->r.record.x.ab, ptr->r.record.x.reg,
2582 ptr->r.record.x.t, ptr->r.record.x.where.spoff);
2585 output_X4_format (f, ptr->r.record.x.qp, ptr->r.record.x.ab,
2586 ptr->r.record.x.reg, ptr->r.record.x.xy >> 1,
2587 ptr->r.record.x.xy, ptr->r.record.x.where.reg,
2591 as_bad (_("record_type_not_valid"));
2596 /* Given a unw_rec_list list, process all the records with
2597 the specified function. */
2599 process_unw_records (unw_rec_list *list, vbyte_func f)
2602 for (ptr = list; ptr; ptr = ptr->next)
2603 process_one_record (ptr, f);
2606 /* Determine the size of a record list in bytes. */
2608 calc_record_size (unw_rec_list *list)
2611 process_unw_records (list, count_output);
2615 /* Return the number of bits set in the input value.
2616 Perhaps this has a better place... */
2617 #if __GNUC__ > 3 || (__GNUC__ == 3 && __GNUC_MINOR__ >= 4)
2618 # define popcount __builtin_popcount
2621 popcount (unsigned x)
2623 static const unsigned char popcnt[16] =
2631 if (x < NELEMS (popcnt))
2633 return popcnt[x % NELEMS (popcnt)] + popcount (x / NELEMS (popcnt));
2637 /* Update IMASK bitmask to reflect the fact that one or more registers
2638 of type TYPE are saved starting at instruction with index T. If N
2639 bits are set in REGMASK, it is assumed that instructions T through
2640 T+N-1 save these registers.
2644 1: instruction saves next fp reg
2645 2: instruction saves next general reg
2646 3: instruction saves next branch reg */
2648 set_imask (unw_rec_list *region,
2649 unsigned long regmask,
2653 unsigned char *imask;
2654 unsigned long imask_size;
2658 imask = region->r.record.r.mask.i;
2659 imask_size = region->r.record.r.imask_size;
2662 imask_size = (region->r.record.r.rlen * 2 + 7) / 8 + 1;
2663 imask = xmalloc (imask_size);
2664 memset (imask, 0, imask_size);
2666 region->r.record.r.imask_size = imask_size;
2667 region->r.record.r.mask.i = imask;
2671 pos = 2 * (3 - t % 4);
2674 if (i >= imask_size)
2676 as_bad (_("Ignoring attempt to spill beyond end of region"));
2680 imask[i] |= (type & 0x3) << pos;
2682 regmask &= (regmask - 1);
2692 /* Return the number of instruction slots from FIRST_ADDR to SLOT_ADDR.
2693 SLOT_FRAG is the frag containing SLOT_ADDR, and FIRST_FRAG is the frag
2694 containing FIRST_ADDR. If BEFORE_RELAX, then we use worst-case estimates
2697 static unsigned long
2698 slot_index (unsigned long slot_addr,
2700 unsigned long first_addr,
2704 unsigned long s_index = 0;
2706 /* First time we are called, the initial address and frag are invalid. */
2707 if (first_addr == 0)
2710 /* If the two addresses are in different frags, then we need to add in
2711 the remaining size of this frag, and then the entire size of intermediate
2713 while (slot_frag != first_frag)
2715 unsigned long start_addr = (unsigned long) &first_frag->fr_literal;
2719 /* We can get the final addresses only during and after
2721 if (first_frag->fr_next && first_frag->fr_next->fr_address)
2722 s_index += 3 * ((first_frag->fr_next->fr_address
2723 - first_frag->fr_address
2724 - first_frag->fr_fix) >> 4);
2727 /* We don't know what the final addresses will be. We try our
2728 best to estimate. */
2729 switch (first_frag->fr_type)
2735 as_fatal (_("Only constant space allocation is supported"));
2741 /* Take alignment into account. Assume the worst case
2742 before relaxation. */
2743 s_index += 3 * ((1 << first_frag->fr_offset) >> 4);
2747 if (first_frag->fr_symbol)
2749 as_fatal (_("Only constant offsets are supported"));
2753 s_index += 3 * (first_frag->fr_offset >> 4);
2757 /* Add in the full size of the frag converted to instruction slots. */
2758 s_index += 3 * (first_frag->fr_fix >> 4);
2759 /* Subtract away the initial part before first_addr. */
2760 s_index -= (3 * ((first_addr >> 4) - (start_addr >> 4))
2761 + ((first_addr & 0x3) - (start_addr & 0x3)));
2763 /* Move to the beginning of the next frag. */
2764 first_frag = first_frag->fr_next;
2765 first_addr = (unsigned long) &first_frag->fr_literal;
2767 /* This can happen if there is section switching in the middle of a
2768 function, causing the frag chain for the function to be broken.
2769 It is too difficult to recover safely from this problem, so we just
2770 exit with an error. */
2771 if (first_frag == NULL)
2772 as_fatal (_("Section switching in code is not supported."));
2775 /* Add in the used part of the last frag. */
2776 s_index += (3 * ((slot_addr >> 4) - (first_addr >> 4))
2777 + ((slot_addr & 0x3) - (first_addr & 0x3)));
2781 /* Optimize unwind record directives. */
2783 static unw_rec_list *
2784 optimize_unw_records (unw_rec_list *list)
2789 /* If the only unwind record is ".prologue" or ".prologue" followed
2790 by ".body", then we can optimize the unwind directives away. */
2791 if (list->r.type == prologue
2792 && (list->next->r.type == endp
2793 || (list->next->r.type == body && list->next->next->r.type == endp)))
2799 /* Given a complete record list, process any records which have
2800 unresolved fields, (ie length counts for a prologue). After
2801 this has been run, all necessary information should be available
2802 within each record to generate an image. */
2805 fixup_unw_records (unw_rec_list *list, int before_relax)
2807 unw_rec_list *ptr, *region = 0;
2808 unsigned long first_addr = 0, rlen = 0, t;
2809 fragS *first_frag = 0;
2811 for (ptr = list; ptr; ptr = ptr->next)
2813 if (ptr->slot_number == SLOT_NUM_NOT_SET)
2814 as_bad (_(" Insn slot not set in unwind record."));
2815 t = slot_index (ptr->slot_number, ptr->slot_frag,
2816 first_addr, first_frag, before_relax);
2817 switch (ptr->r.type)
2825 unsigned long last_addr = 0;
2826 fragS *last_frag = NULL;
2828 first_addr = ptr->slot_number;
2829 first_frag = ptr->slot_frag;
2830 /* Find either the next body/prologue start, or the end of
2831 the function, and determine the size of the region. */
2832 for (last = ptr->next; last != NULL; last = last->next)
2833 if (last->r.type == prologue || last->r.type == prologue_gr
2834 || last->r.type == body || last->r.type == endp)
2836 last_addr = last->slot_number;
2837 last_frag = last->slot_frag;
2840 size = slot_index (last_addr, last_frag, first_addr, first_frag,
2842 rlen = ptr->r.record.r.rlen = size;
2843 if (ptr->r.type == body)
2844 /* End of region. */
2852 ptr->r.record.b.t = rlen - 1 - t;
2854 /* This happens when a memory-stack-less procedure uses a
2855 ".restore sp" directive at the end of a region to pop
2857 ptr->r.record.b.t = 0;
2868 case priunat_when_gr:
2869 case priunat_when_mem:
2873 ptr->r.record.p.t = t;
2881 case spill_psprel_p:
2882 ptr->r.record.x.t = t;
2888 as_bad (_("frgr_mem record before region record!"));
2891 region->r.record.r.mask.fr_mem |= ptr->r.record.p.frmask;
2892 region->r.record.r.mask.gr_mem |= ptr->r.record.p.grmask;
2893 set_imask (region, ptr->r.record.p.frmask, t, 1);
2894 set_imask (region, ptr->r.record.p.grmask, t, 2);
2899 as_bad (_("fr_mem record before region record!"));
2902 region->r.record.r.mask.fr_mem |= ptr->r.record.p.frmask;
2903 set_imask (region, ptr->r.record.p.frmask, t, 1);
2908 as_bad (_("gr_mem record before region record!"));
2911 region->r.record.r.mask.gr_mem |= ptr->r.record.p.grmask;
2912 set_imask (region, ptr->r.record.p.grmask, t, 2);
2917 as_bad (_("br_mem record before region record!"));
2920 region->r.record.r.mask.br_mem |= ptr->r.record.p.brmask;
2921 set_imask (region, ptr->r.record.p.brmask, t, 3);
2927 as_bad (_("gr_gr record before region record!"));
2930 set_imask (region, ptr->r.record.p.grmask, t, 2);
2935 as_bad (_("br_gr record before region record!"));
2938 set_imask (region, ptr->r.record.p.brmask, t, 3);
2947 /* Estimate the size of a frag before relaxing. We only have one type of frag
2948 to handle here, which is the unwind info frag. */
2951 ia64_estimate_size_before_relax (fragS *frag,
2952 asection *segtype ATTRIBUTE_UNUSED)
2957 /* ??? This code is identical to the first part of ia64_convert_frag. */
2958 list = (unw_rec_list *) frag->fr_opcode;
2959 fixup_unw_records (list, 0);
2961 len = calc_record_size (list);
2962 /* pad to pointer-size boundary. */
2963 pad = len % md.pointer_size;
2965 len += md.pointer_size - pad;
2966 /* Add 8 for the header. */
2968 /* Add a pointer for the personality offset. */
2969 if (frag->fr_offset)
2970 size += md.pointer_size;
2972 /* fr_var carries the max_chars that we created the fragment with.
2973 We must, of course, have allocated enough memory earlier. */
2974 gas_assert (frag->fr_var >= size);
2976 return frag->fr_fix + size;
2979 /* This function converts a rs_machine_dependent variant frag into a
2980 normal fill frag with the unwind image from the record list. */
2982 ia64_convert_frag (fragS *frag)
2988 /* ??? This code is identical to ia64_estimate_size_before_relax. */
2989 list = (unw_rec_list *) frag->fr_opcode;
2990 fixup_unw_records (list, 0);
2992 len = calc_record_size (list);
2993 /* pad to pointer-size boundary. */
2994 pad = len % md.pointer_size;
2996 len += md.pointer_size - pad;
2997 /* Add 8 for the header. */
2999 /* Add a pointer for the personality offset. */
3000 if (frag->fr_offset)
3001 size += md.pointer_size;
3003 /* fr_var carries the max_chars that we created the fragment with.
3004 We must, of course, have allocated enough memory earlier. */
3005 gas_assert (frag->fr_var >= size);
3007 /* Initialize the header area. fr_offset is initialized with
3008 unwind.personality_routine. */
3009 if (frag->fr_offset)
3011 if (md.flags & EF_IA_64_ABI64)
3012 flag_value = (bfd_vma) 3 << 32;
3014 /* 32-bit unwind info block. */
3015 flag_value = (bfd_vma) 0x1003 << 32;
3020 md_number_to_chars (frag->fr_literal,
3021 (((bfd_vma) 1 << 48) /* Version. */
3022 | flag_value /* U & E handler flags. */
3023 | (len / md.pointer_size)), /* Length. */
3026 /* Skip the header. */
3027 vbyte_mem_ptr = frag->fr_literal + 8;
3028 process_unw_records (list, output_vbyte_mem);
3030 /* Fill the padding bytes with zeros. */
3032 md_number_to_chars (frag->fr_literal + len + 8 - md.pointer_size + pad, 0,
3033 md.pointer_size - pad);
3034 /* Fill the unwind personality with zeros. */
3035 if (frag->fr_offset)
3036 md_number_to_chars (frag->fr_literal + size - md.pointer_size, 0,
3039 frag->fr_fix += size;
3040 frag->fr_type = rs_fill;
3042 frag->fr_offset = 0;
3046 parse_predicate_and_operand (expressionS *e, unsigned *qp, const char *po)
3048 int sep = parse_operand_and_eval (e, ',');
3050 *qp = e->X_add_number - REG_P;
3051 if (e->X_op != O_register || *qp > 63)
3053 as_bad (_("First operand to .%s must be a predicate"), po);
3057 as_warn (_("Pointless use of p0 as first operand to .%s"), po);
3059 sep = parse_operand_and_eval (e, ',');
3066 convert_expr_to_ab_reg (const expressionS *e,
3072 unsigned int reg = e->X_add_number;
3074 *ab = *regp = 0; /* Anything valid is good here. */
3076 if (e->X_op != O_register)
3077 reg = REG_GR; /* Anything invalid is good here. */
3079 if (reg >= (REG_GR + 4) && reg <= (REG_GR + 7))
3082 *regp = reg - REG_GR;
3084 else if ((reg >= (REG_FR + 2) && reg <= (REG_FR + 5))
3085 || (reg >= (REG_FR + 16) && reg <= (REG_FR + 31)))
3088 *regp = reg - REG_FR;
3090 else if (reg >= (REG_BR + 1) && reg <= (REG_BR + 5))
3093 *regp = reg - REG_BR;
3100 case REG_PR: *regp = 0; break;
3101 case REG_PSP: *regp = 1; break;
3102 case REG_PRIUNAT: *regp = 2; break;
3103 case REG_BR + 0: *regp = 3; break;
3104 case REG_AR + AR_BSP: *regp = 4; break;
3105 case REG_AR + AR_BSPSTORE: *regp = 5; break;
3106 case REG_AR + AR_RNAT: *regp = 6; break;
3107 case REG_AR + AR_UNAT: *regp = 7; break;
3108 case REG_AR + AR_FPSR: *regp = 8; break;
3109 case REG_AR + AR_PFS: *regp = 9; break;
3110 case REG_AR + AR_LC: *regp = 10; break;
3113 as_bad (_("Operand %d to .%s must be a preserved register"), n, po);
3120 convert_expr_to_xy_reg (const expressionS *e,
3126 unsigned int reg = e->X_add_number;
3128 *xy = *regp = 0; /* Anything valid is good here. */
3130 if (e->X_op != O_register)
3131 reg = REG_GR; /* Anything invalid is good here. */
3133 if (reg >= (REG_GR + 1) && reg <= (REG_GR + 127))
3136 *regp = reg - REG_GR;
3138 else if (reg >= (REG_FR + 2) && reg <= (REG_FR + 127))
3141 *regp = reg - REG_FR;
3143 else if (reg >= REG_BR && reg <= (REG_BR + 7))
3146 *regp = reg - REG_BR;
3149 as_bad (_("Operand %d to .%s must be a writable register"), n, po);
3155 /* The current frag is an alignment frag. */
3156 align_frag = frag_now;
3157 s_align_bytes (arg);
3161 dot_radix (int dummy ATTRIBUTE_UNUSED)
3168 if (is_it_end_of_statement ())
3170 radix = input_line_pointer;
3171 ch = get_symbol_end ();
3172 ia64_canonicalize_symbol_name (radix);
3173 if (strcasecmp (radix, "C"))
3174 as_bad (_("Radix `%s' unsupported or invalid"), radix);
3175 *input_line_pointer = ch;
3176 demand_empty_rest_of_line ();
3179 /* Helper function for .loc directives. If the assembler is not generating
3180 line number info, then we need to remember which instructions have a .loc
3181 directive, and only call dwarf2_gen_line_info for those instructions. */
3186 CURR_SLOT.loc_directive_seen = 1;
3187 dwarf2_directive_loc (x);
3190 /* .sbss, .bss etc. are macros that expand into ".section SECNAME". */
3192 dot_special_section (int which)
3194 set_section ((char *) special_section_name[which]);
3197 /* Return -1 for warning and 0 for error. */
3200 unwind_diagnostic (const char * region, const char *directive)
3202 if (md.unwind_check == unwind_check_warning)
3204 as_warn (_(".%s outside of %s"), directive, region);
3209 as_bad (_(".%s outside of %s"), directive, region);
3210 ignore_rest_of_line ();
3215 /* Return 1 if a directive is in a procedure, -1 if a directive isn't in
3216 a procedure but the unwind directive check is set to warning, 0 if
3217 a directive isn't in a procedure and the unwind directive check is set
3221 in_procedure (const char *directive)
3223 if (unwind.proc_pending.sym
3224 && (!unwind.saved_text_seg || strcmp (directive, "endp") == 0))
3226 return unwind_diagnostic ("procedure", directive);
3229 /* Return 1 if a directive is in a prologue, -1 if a directive isn't in
3230 a prologue but the unwind directive check is set to warning, 0 if
3231 a directive isn't in a prologue and the unwind directive check is set
3235 in_prologue (const char *directive)
3237 int in = in_procedure (directive);
3239 if (in > 0 && !unwind.prologue)
3240 in = unwind_diagnostic ("prologue", directive);
3241 check_pending_save ();
3245 /* Return 1 if a directive is in a body, -1 if a directive isn't in
3246 a body but the unwind directive check is set to warning, 0 if
3247 a directive isn't in a body and the unwind directive check is set
3251 in_body (const char *directive)
3253 int in = in_procedure (directive);
3255 if (in > 0 && !unwind.body)
3256 in = unwind_diagnostic ("body region", directive);
3261 add_unwind_entry (unw_rec_list *ptr, int sep)
3266 unwind.tail->next = ptr;
3271 /* The current entry can in fact be a chain of unwind entries. */
3272 if (unwind.current_entry == NULL)
3273 unwind.current_entry = ptr;
3276 /* The current entry can in fact be a chain of unwind entries. */
3277 if (unwind.current_entry == NULL)
3278 unwind.current_entry = ptr;
3282 /* Parse a tag permitted for the current directive. */
3286 ch = get_symbol_end ();
3287 /* FIXME: For now, just issue a warning that this isn't implemented. */
3294 as_warn (_("Tags on unwind pseudo-ops aren't supported, yet"));
3297 *input_line_pointer = ch;
3299 if (sep != NOT_A_CHAR)
3300 demand_empty_rest_of_line ();
3304 dot_fframe (int dummy ATTRIBUTE_UNUSED)
3309 if (!in_prologue ("fframe"))
3312 sep = parse_operand_and_eval (&e, ',');
3314 if (e.X_op != O_constant)
3316 as_bad (_("First operand to .fframe must be a constant"));
3319 add_unwind_entry (output_mem_stack_f (e.X_add_number), sep);
3323 dot_vframe (int dummy ATTRIBUTE_UNUSED)
3329 if (!in_prologue ("vframe"))
3332 sep = parse_operand_and_eval (&e, ',');
3333 reg = e.X_add_number - REG_GR;
3334 if (e.X_op != O_register || reg > 127)
3336 as_bad (_("First operand to .vframe must be a general register"));
3339 add_unwind_entry (output_mem_stack_v (), sep);
3340 if (! (unwind.prologue_mask & 2))
3341 add_unwind_entry (output_psp_gr (reg), NOT_A_CHAR);
3342 else if (reg != unwind.prologue_gr
3343 + (unsigned) popcount (unwind.prologue_mask & (-2 << 1)))
3344 as_warn (_("Operand of .vframe contradicts .prologue"));
3348 dot_vframesp (int psp)
3354 as_warn (_(".vframepsp is meaningless, assuming .vframesp was meant"));
3356 if (!in_prologue ("vframesp"))
3359 sep = parse_operand_and_eval (&e, ',');
3360 if (e.X_op != O_constant)
3362 as_bad (_("Operand to .vframesp must be a constant (sp-relative offset)"));
3365 add_unwind_entry (output_mem_stack_v (), sep);
3366 add_unwind_entry (output_psp_sprel (e.X_add_number), NOT_A_CHAR);
3370 dot_save (int dummy ATTRIBUTE_UNUSED)
3373 unsigned reg1, reg2;
3376 if (!in_prologue ("save"))
3379 sep = parse_operand_and_eval (&e1, ',');
3381 sep = parse_operand_and_eval (&e2, ',');
3385 reg1 = e1.X_add_number;
3386 /* Make sure its a valid ar.xxx reg, OR its br0, aka 'rp'. */
3387 if (e1.X_op != O_register)
3389 as_bad (_("First operand to .save not a register"));
3390 reg1 = REG_PR; /* Anything valid is good here. */
3392 reg2 = e2.X_add_number - REG_GR;
3393 if (e2.X_op != O_register || reg2 > 127)
3395 as_bad (_("Second operand to .save not a valid register"));
3400 case REG_AR + AR_BSP:
3401 add_unwind_entry (output_bsp_when (), sep);
3402 add_unwind_entry (output_bsp_gr (reg2), NOT_A_CHAR);
3404 case REG_AR + AR_BSPSTORE:
3405 add_unwind_entry (output_bspstore_when (), sep);
3406 add_unwind_entry (output_bspstore_gr (reg2), NOT_A_CHAR);
3408 case REG_AR + AR_RNAT:
3409 add_unwind_entry (output_rnat_when (), sep);
3410 add_unwind_entry (output_rnat_gr (reg2), NOT_A_CHAR);
3412 case REG_AR + AR_UNAT:
3413 add_unwind_entry (output_unat_when (), sep);
3414 add_unwind_entry (output_unat_gr (reg2), NOT_A_CHAR);
3416 case REG_AR + AR_FPSR:
3417 add_unwind_entry (output_fpsr_when (), sep);
3418 add_unwind_entry (output_fpsr_gr (reg2), NOT_A_CHAR);
3420 case REG_AR + AR_PFS:
3421 add_unwind_entry (output_pfs_when (), sep);
3422 if (! (unwind.prologue_mask & 4))
3423 add_unwind_entry (output_pfs_gr (reg2), NOT_A_CHAR);
3424 else if (reg2 != unwind.prologue_gr
3425 + (unsigned) popcount (unwind.prologue_mask & (-4 << 1)))
3426 as_warn (_("Second operand of .save contradicts .prologue"));
3428 case REG_AR + AR_LC:
3429 add_unwind_entry (output_lc_when (), sep);
3430 add_unwind_entry (output_lc_gr (reg2), NOT_A_CHAR);
3433 add_unwind_entry (output_rp_when (), sep);
3434 if (! (unwind.prologue_mask & 8))
3435 add_unwind_entry (output_rp_gr (reg2), NOT_A_CHAR);
3436 else if (reg2 != unwind.prologue_gr)
3437 as_warn (_("Second operand of .save contradicts .prologue"));
3440 add_unwind_entry (output_preds_when (), sep);
3441 if (! (unwind.prologue_mask & 1))
3442 add_unwind_entry (output_preds_gr (reg2), NOT_A_CHAR);
3443 else if (reg2 != unwind.prologue_gr
3444 + (unsigned) popcount (unwind.prologue_mask & (-1 << 1)))
3445 as_warn (_("Second operand of .save contradicts .prologue"));
3448 add_unwind_entry (output_priunat_when_gr (), sep);
3449 add_unwind_entry (output_priunat_gr (reg2), NOT_A_CHAR);
3452 as_bad (_("First operand to .save not a valid register"));
3453 add_unwind_entry (NULL, sep);
3459 dot_restore (int dummy ATTRIBUTE_UNUSED)
3462 unsigned long ecount; /* # of _additional_ regions to pop */
3465 if (!in_body ("restore"))
3468 sep = parse_operand_and_eval (&e1, ',');
3469 if (e1.X_op != O_register || e1.X_add_number != REG_GR + 12)
3470 as_bad (_("First operand to .restore must be stack pointer (sp)"));
3476 sep = parse_operand_and_eval (&e2, ',');
3477 if (e2.X_op != O_constant || e2.X_add_number < 0)
3479 as_bad (_("Second operand to .restore must be a constant >= 0"));
3480 e2.X_add_number = 0;
3482 ecount = e2.X_add_number;
3485 ecount = unwind.prologue_count - 1;
3487 if (ecount >= unwind.prologue_count)
3489 as_bad (_("Epilogue count of %lu exceeds number of nested prologues (%u)"),
3490 ecount + 1, unwind.prologue_count);
3494 add_unwind_entry (output_epilogue (ecount), sep);
3496 if (ecount < unwind.prologue_count)
3497 unwind.prologue_count -= ecount + 1;
3499 unwind.prologue_count = 0;
3503 dot_restorereg (int pred)
3505 unsigned int qp, ab, reg;
3508 const char * const po = pred ? "restorereg.p" : "restorereg";
3510 if (!in_procedure (po))
3514 sep = parse_predicate_and_operand (&e, &qp, po);
3517 sep = parse_operand_and_eval (&e, ',');
3520 convert_expr_to_ab_reg (&e, &ab, ®, po, 1 + pred);
3522 add_unwind_entry (output_spill_reg (ab, reg, 0, 0, qp), sep);
3525 static char *special_linkonce_name[] =
3527 ".gnu.linkonce.ia64unw.", ".gnu.linkonce.ia64unwi."
3531 start_unwind_section (const segT text_seg, int sec_index)
3534 Use a slightly ugly scheme to derive the unwind section names from
3535 the text section name:
3537 text sect. unwind table sect.
3538 name: name: comments:
3539 ---------- ----------------- --------------------------------
3541 .text.foo .IA_64.unwind.text.foo
3542 .foo .IA_64.unwind.foo
3544 .gnu.linkonce.ia64unw.foo
3545 _info .IA_64.unwind_info gas issues error message (ditto)
3546 _infoFOO .IA_64.unwind_infoFOO gas issues error message (ditto)
3548 This mapping is done so that:
3550 (a) An object file with unwind info only in .text will use
3551 unwind section names .IA_64.unwind and .IA_64.unwind_info.
3552 This follows the letter of the ABI and also ensures backwards
3553 compatibility with older toolchains.
3555 (b) An object file with unwind info in multiple text sections
3556 will use separate unwind sections for each text section.
3557 This allows us to properly set the "sh_info" and "sh_link"
3558 fields in SHT_IA_64_UNWIND as required by the ABI and also
3559 lets GNU ld support programs with multiple segments
3560 containing unwind info (as might be the case for certain
3561 embedded applications).
3563 (c) An error is issued if there would be a name clash.
3566 const char *text_name, *sec_text_name;
3568 const char *prefix = special_section_name [sec_index];
3570 size_t prefix_len, suffix_len, sec_name_len;
3572 sec_text_name = segment_name (text_seg);
3573 text_name = sec_text_name;
3574 if (strncmp (text_name, "_info", 5) == 0)
3576 as_bad (_("Illegal section name `%s' (causes unwind section name clash)"),
3578 ignore_rest_of_line ();
3581 if (strcmp (text_name, ".text") == 0)
3584 /* Build the unwind section name by appending the (possibly stripped)
3585 text section name to the unwind prefix. */
3587 if (strncmp (text_name, ".gnu.linkonce.t.",
3588 sizeof (".gnu.linkonce.t.") - 1) == 0)
3590 prefix = special_linkonce_name [sec_index - SPECIAL_SECTION_UNWIND];
3591 suffix += sizeof (".gnu.linkonce.t.") - 1;
3594 prefix_len = strlen (prefix);
3595 suffix_len = strlen (suffix);
3596 sec_name_len = prefix_len + suffix_len;
3597 sec_name = alloca (sec_name_len + 1);
3598 memcpy (sec_name, prefix, prefix_len);
3599 memcpy (sec_name + prefix_len, suffix, suffix_len);
3600 sec_name [sec_name_len] = '\0';
3602 /* Handle COMDAT group. */
3603 if ((text_seg->flags & SEC_LINK_ONCE) != 0
3604 && (elf_section_flags (text_seg) & SHF_GROUP) != 0)
3607 size_t len, group_name_len;
3608 const char *group_name = elf_group_name (text_seg);
3610 if (group_name == NULL)
3612 as_bad (_("Group section `%s' has no group signature"),
3614 ignore_rest_of_line ();
3617 /* We have to construct a fake section directive. */
3618 group_name_len = strlen (group_name);
3620 + 16 /* ,"aG",@progbits, */
3621 + group_name_len /* ,group_name */
3624 section = alloca (len + 1);
3625 memcpy (section, sec_name, sec_name_len);
3626 memcpy (section + sec_name_len, ",\"aG\",@progbits,", 16);
3627 memcpy (section + sec_name_len + 16, group_name, group_name_len);
3628 memcpy (section + len - 7, ",comdat", 7);
3629 section [len] = '\0';
3630 set_section (section);
3634 set_section (sec_name);
3635 bfd_set_section_flags (stdoutput, now_seg,
3636 SEC_LOAD | SEC_ALLOC | SEC_READONLY);
3639 elf_linked_to_section (now_seg) = text_seg;
3643 generate_unwind_image (const segT text_seg)
3648 /* Mark the end of the unwind info, so that we can compute the size of the
3649 last unwind region. */
3650 add_unwind_entry (output_endp (), NOT_A_CHAR);
3652 /* Force out pending instructions, to make sure all unwind records have
3653 a valid slot_number field. */
3654 ia64_flush_insns ();
3656 /* Generate the unwind record. */
3657 list = optimize_unw_records (unwind.list);
3658 fixup_unw_records (list, 1);
3659 size = calc_record_size (list);
3661 if (size > 0 || unwind.force_unwind_entry)
3663 unwind.force_unwind_entry = 0;
3664 /* pad to pointer-size boundary. */
3665 pad = size % md.pointer_size;
3667 size += md.pointer_size - pad;
3668 /* Add 8 for the header. */
3670 /* Add a pointer for the personality offset. */
3671 if (unwind.personality_routine)
3672 size += md.pointer_size;
3675 /* If there are unwind records, switch sections, and output the info. */
3679 bfd_reloc_code_real_type reloc;
3681 start_unwind_section (text_seg, SPECIAL_SECTION_UNWIND_INFO);
3683 /* Make sure the section has 4 byte alignment for ILP32 and
3684 8 byte alignment for LP64. */
3685 frag_align (md.pointer_size_shift, 0, 0);
3686 record_alignment (now_seg, md.pointer_size_shift);
3688 /* Set expression which points to start of unwind descriptor area. */
3689 unwind.info = expr_build_dot ();
3691 frag_var (rs_machine_dependent, size, size, 0, 0,
3692 (offsetT) (long) unwind.personality_routine,
3695 /* Add the personality address to the image. */
3696 if (unwind.personality_routine != 0)
3698 exp.X_op = O_symbol;
3699 exp.X_add_symbol = unwind.personality_routine;
3700 exp.X_add_number = 0;
3702 if (md.flags & EF_IA_64_BE)
3704 if (md.flags & EF_IA_64_ABI64)
3705 reloc = BFD_RELOC_IA64_LTOFF_FPTR64MSB;
3707 reloc = BFD_RELOC_IA64_LTOFF_FPTR32MSB;
3711 if (md.flags & EF_IA_64_ABI64)
3712 reloc = BFD_RELOC_IA64_LTOFF_FPTR64LSB;
3714 reloc = BFD_RELOC_IA64_LTOFF_FPTR32LSB;
3717 fix_new_exp (frag_now, frag_now_fix () - md.pointer_size,
3718 md.pointer_size, &exp, 0, reloc);
3719 unwind.personality_routine = 0;
3723 free_saved_prologue_counts ();
3724 unwind.list = unwind.tail = unwind.current_entry = NULL;
3728 dot_handlerdata (int dummy ATTRIBUTE_UNUSED)
3730 if (!in_procedure ("handlerdata"))
3732 unwind.force_unwind_entry = 1;
3734 /* Remember which segment we're in so we can switch back after .endp */
3735 unwind.saved_text_seg = now_seg;
3736 unwind.saved_text_subseg = now_subseg;
3738 /* Generate unwind info into unwind-info section and then leave that
3739 section as the currently active one so dataXX directives go into
3740 the language specific data area of the unwind info block. */
3741 generate_unwind_image (now_seg);
3742 demand_empty_rest_of_line ();
3746 dot_unwentry (int dummy ATTRIBUTE_UNUSED)
3748 if (!in_procedure ("unwentry"))
3750 unwind.force_unwind_entry = 1;
3751 demand_empty_rest_of_line ();
3755 dot_altrp (int dummy ATTRIBUTE_UNUSED)
3760 if (!in_prologue ("altrp"))
3763 parse_operand_and_eval (&e, 0);
3764 reg = e.X_add_number - REG_BR;
3765 if (e.X_op != O_register || reg > 7)
3767 as_bad (_("First operand to .altrp not a valid branch register"));
3770 add_unwind_entry (output_rp_br (reg), 0);
3774 dot_savemem (int psprel)
3779 const char * const po = psprel ? "savepsp" : "savesp";
3781 if (!in_prologue (po))
3784 sep = parse_operand_and_eval (&e1, ',');
3786 sep = parse_operand_and_eval (&e2, ',');
3790 reg1 = e1.X_add_number;
3791 val = e2.X_add_number;
3793 /* Make sure its a valid ar.xxx reg, OR its br0, aka 'rp'. */
3794 if (e1.X_op != O_register)
3796 as_bad (_("First operand to .%s not a register"), po);
3797 reg1 = REG_PR; /* Anything valid is good here. */
3799 if (e2.X_op != O_constant)
3801 as_bad (_("Second operand to .%s not a constant"), po);
3807 case REG_AR + AR_BSP:
3808 add_unwind_entry (output_bsp_when (), sep);
3809 add_unwind_entry ((psprel
3811 : output_bsp_sprel) (val), NOT_A_CHAR);
3813 case REG_AR + AR_BSPSTORE:
3814 add_unwind_entry (output_bspstore_when (), sep);
3815 add_unwind_entry ((psprel
3816 ? output_bspstore_psprel
3817 : output_bspstore_sprel) (val), NOT_A_CHAR);
3819 case REG_AR + AR_RNAT:
3820 add_unwind_entry (output_rnat_when (), sep);
3821 add_unwind_entry ((psprel
3822 ? output_rnat_psprel
3823 : output_rnat_sprel) (val), NOT_A_CHAR);
3825 case REG_AR + AR_UNAT:
3826 add_unwind_entry (output_unat_when (), sep);
3827 add_unwind_entry ((psprel
3828 ? output_unat_psprel
3829 : output_unat_sprel) (val), NOT_A_CHAR);
3831 case REG_AR + AR_FPSR:
3832 add_unwind_entry (output_fpsr_when (), sep);
3833 add_unwind_entry ((psprel
3834 ? output_fpsr_psprel
3835 : output_fpsr_sprel) (val), NOT_A_CHAR);
3837 case REG_AR + AR_PFS:
3838 add_unwind_entry (output_pfs_when (), sep);
3839 add_unwind_entry ((psprel
3841 : output_pfs_sprel) (val), NOT_A_CHAR);
3843 case REG_AR + AR_LC:
3844 add_unwind_entry (output_lc_when (), sep);
3845 add_unwind_entry ((psprel
3847 : output_lc_sprel) (val), NOT_A_CHAR);
3850 add_unwind_entry (output_rp_when (), sep);
3851 add_unwind_entry ((psprel
3853 : output_rp_sprel) (val), NOT_A_CHAR);
3856 add_unwind_entry (output_preds_when (), sep);
3857 add_unwind_entry ((psprel
3858 ? output_preds_psprel
3859 : output_preds_sprel) (val), NOT_A_CHAR);
3862 add_unwind_entry (output_priunat_when_mem (), sep);
3863 add_unwind_entry ((psprel
3864 ? output_priunat_psprel
3865 : output_priunat_sprel) (val), NOT_A_CHAR);
3868 as_bad (_("First operand to .%s not a valid register"), po);
3869 add_unwind_entry (NULL, sep);
3875 dot_saveg (int dummy ATTRIBUTE_UNUSED)
3881 if (!in_prologue ("save.g"))
3884 sep = parse_operand_and_eval (&e, ',');
3886 grmask = e.X_add_number;
3887 if (e.X_op != O_constant
3888 || e.X_add_number <= 0
3889 || e.X_add_number > 0xf)
3891 as_bad (_("First operand to .save.g must be a positive 4-bit constant"));
3898 int n = popcount (grmask);
3900 parse_operand_and_eval (&e, 0);
3901 reg = e.X_add_number - REG_GR;
3902 if (e.X_op != O_register || reg > 127)
3904 as_bad (_("Second operand to .save.g must be a general register"));
3907 else if (reg > 128U - n)
3909 as_bad (_("Second operand to .save.g must be the first of %d general registers"), n);
3912 add_unwind_entry (output_gr_gr (grmask, reg), 0);
3915 add_unwind_entry (output_gr_mem (grmask), 0);
3919 dot_savef (int dummy ATTRIBUTE_UNUSED)
3923 if (!in_prologue ("save.f"))
3926 parse_operand_and_eval (&e, 0);
3928 if (e.X_op != O_constant
3929 || e.X_add_number <= 0
3930 || e.X_add_number > 0xfffff)
3932 as_bad (_("Operand to .save.f must be a positive 20-bit constant"));
3935 add_unwind_entry (output_fr_mem (e.X_add_number), 0);
3939 dot_saveb (int dummy ATTRIBUTE_UNUSED)
3945 if (!in_prologue ("save.b"))
3948 sep = parse_operand_and_eval (&e, ',');
3950 brmask = e.X_add_number;
3951 if (e.X_op != O_constant
3952 || e.X_add_number <= 0
3953 || e.X_add_number > 0x1f)
3955 as_bad (_("First operand to .save.b must be a positive 5-bit constant"));
3962 int n = popcount (brmask);
3964 parse_operand_and_eval (&e, 0);
3965 reg = e.X_add_number - REG_GR;
3966 if (e.X_op != O_register || reg > 127)
3968 as_bad (_("Second operand to .save.b must be a general register"));
3971 else if (reg > 128U - n)
3973 as_bad (_("Second operand to .save.b must be the first of %d general registers"), n);
3976 add_unwind_entry (output_br_gr (brmask, reg), 0);
3979 add_unwind_entry (output_br_mem (brmask), 0);
3983 dot_savegf (int dummy ATTRIBUTE_UNUSED)
3987 if (!in_prologue ("save.gf"))
3990 if (parse_operand_and_eval (&e1, ',') == ',')
3991 parse_operand_and_eval (&e2, 0);
3995 if (e1.X_op != O_constant
3996 || e1.X_add_number < 0
3997 || e1.X_add_number > 0xf)
3999 as_bad (_("First operand to .save.gf must be a non-negative 4-bit constant"));
4001 e1.X_add_number = 0;
4003 if (e2.X_op != O_constant
4004 || e2.X_add_number < 0
4005 || e2.X_add_number > 0xfffff)
4007 as_bad (_("Second operand to .save.gf must be a non-negative 20-bit constant"));
4009 e2.X_add_number = 0;
4011 if (e1.X_op == O_constant
4012 && e2.X_op == O_constant
4013 && e1.X_add_number == 0
4014 && e2.X_add_number == 0)
4015 as_bad (_("Operands to .save.gf may not be both zero"));
4017 add_unwind_entry (output_frgr_mem (e1.X_add_number, e2.X_add_number), 0);
4021 dot_spill (int dummy ATTRIBUTE_UNUSED)
4025 if (!in_prologue ("spill"))
4028 parse_operand_and_eval (&e, 0);
4030 if (e.X_op != O_constant)
4032 as_bad (_("Operand to .spill must be a constant"));
4035 add_unwind_entry (output_spill_base (e.X_add_number), 0);
4039 dot_spillreg (int pred)
4042 unsigned int qp, ab, xy, reg, treg;
4044 const char * const po = pred ? "spillreg.p" : "spillreg";
4046 if (!in_procedure (po))
4050 sep = parse_predicate_and_operand (&e, &qp, po);
4053 sep = parse_operand_and_eval (&e, ',');
4056 convert_expr_to_ab_reg (&e, &ab, ®, po, 1 + pred);
4059 sep = parse_operand_and_eval (&e, ',');
4062 convert_expr_to_xy_reg (&e, &xy, &treg, po, 2 + pred);
4064 add_unwind_entry (output_spill_reg (ab, reg, treg, xy, qp), sep);
4068 dot_spillmem (int psprel)
4071 int pred = (psprel < 0), sep;
4072 unsigned int qp, ab, reg;
4078 po = psprel ? "spillpsp.p" : "spillsp.p";
4081 po = psprel ? "spillpsp" : "spillsp";
4083 if (!in_procedure (po))
4087 sep = parse_predicate_and_operand (&e, &qp, po);
4090 sep = parse_operand_and_eval (&e, ',');
4093 convert_expr_to_ab_reg (&e, &ab, ®, po, 1 + pred);
4096 sep = parse_operand_and_eval (&e, ',');
4099 if (e.X_op != O_constant)
4101 as_bad (_("Operand %d to .%s must be a constant"), 2 + pred, po);
4106 add_unwind_entry (output_spill_psprel (ab, reg, e.X_add_number, qp), sep);
4108 add_unwind_entry (output_spill_sprel (ab, reg, e.X_add_number, qp), sep);
4112 get_saved_prologue_count (unsigned long lbl)
4114 label_prologue_count *lpc = unwind.saved_prologue_counts;
4116 while (lpc != NULL && lpc->label_number != lbl)
4120 return lpc->prologue_count;
4122 as_bad (_("Missing .label_state %ld"), lbl);
4127 save_prologue_count (unsigned long lbl, unsigned int count)
4129 label_prologue_count *lpc = unwind.saved_prologue_counts;
4131 while (lpc != NULL && lpc->label_number != lbl)
4135 lpc->prologue_count = count;
4138 label_prologue_count *new_lpc = xmalloc (sizeof (* new_lpc));
4140 new_lpc->next = unwind.saved_prologue_counts;
4141 new_lpc->label_number = lbl;
4142 new_lpc->prologue_count = count;
4143 unwind.saved_prologue_counts = new_lpc;
4148 free_saved_prologue_counts ()
4150 label_prologue_count *lpc = unwind.saved_prologue_counts;
4151 label_prologue_count *next;
4160 unwind.saved_prologue_counts = NULL;
4164 dot_label_state (int dummy ATTRIBUTE_UNUSED)
4168 if (!in_body ("label_state"))
4171 parse_operand_and_eval (&e, 0);
4172 if (e.X_op == O_constant)
4173 save_prologue_count (e.X_add_number, unwind.prologue_count);
4176 as_bad (_("Operand to .label_state must be a constant"));
4179 add_unwind_entry (output_label_state (e.X_add_number), 0);
4183 dot_copy_state (int dummy ATTRIBUTE_UNUSED)
4187 if (!in_body ("copy_state"))
4190 parse_operand_and_eval (&e, 0);
4191 if (e.X_op == O_constant)
4192 unwind.prologue_count = get_saved_prologue_count (e.X_add_number);
4195 as_bad (_("Operand to .copy_state must be a constant"));
4198 add_unwind_entry (output_copy_state (e.X_add_number), 0);
4202 dot_unwabi (int dummy ATTRIBUTE_UNUSED)
4207 if (!in_prologue ("unwabi"))
4210 sep = parse_operand_and_eval (&e1, ',');
4212 parse_operand_and_eval (&e2, 0);
4216 if (e1.X_op != O_constant)
4218 as_bad (_("First operand to .unwabi must be a constant"));
4219 e1.X_add_number = 0;
4222 if (e2.X_op != O_constant)
4224 as_bad (_("Second operand to .unwabi must be a constant"));
4225 e2.X_add_number = 0;
4228 add_unwind_entry (output_unwabi (e1.X_add_number, e2.X_add_number), 0);
4232 dot_personality (int dummy ATTRIBUTE_UNUSED)
4235 if (!in_procedure ("personality"))
4238 name = input_line_pointer;
4239 c = get_symbol_end ();
4240 p = input_line_pointer;
4241 unwind.personality_routine = symbol_find_or_make (name);
4242 unwind.force_unwind_entry = 1;
4245 demand_empty_rest_of_line ();
4249 dot_proc (int dummy ATTRIBUTE_UNUSED)
4253 proc_pending *pending, *last_pending;
4255 if (unwind.proc_pending.sym)
4257 (md.unwind_check == unwind_check_warning
4259 : as_bad) (_("Missing .endp after previous .proc"));
4260 while (unwind.proc_pending.next)
4262 pending = unwind.proc_pending.next;
4263 unwind.proc_pending.next = pending->next;
4267 last_pending = NULL;
4269 /* Parse names of main and alternate entry points and mark them as
4270 function symbols: */
4274 name = input_line_pointer;
4275 c = get_symbol_end ();
4276 p = input_line_pointer;
4278 as_bad (_("Empty argument of .proc"));
4281 sym = symbol_find_or_make (name);
4282 if (S_IS_DEFINED (sym))
4283 as_bad (_("`%s' was already defined"), name);
4284 else if (!last_pending)
4286 unwind.proc_pending.sym = sym;
4287 last_pending = &unwind.proc_pending;
4291 pending = xmalloc (sizeof (*pending));
4293 last_pending = last_pending->next = pending;
4295 symbol_get_bfdsym (sym)->flags |= BSF_FUNCTION;
4299 if (*input_line_pointer != ',')
4301 ++input_line_pointer;
4305 unwind.proc_pending.sym = expr_build_dot ();
4306 last_pending = &unwind.proc_pending;
4308 last_pending->next = NULL;
4309 demand_empty_rest_of_line ();
4312 unwind.prologue = 0;
4313 unwind.prologue_count = 0;
4316 unwind.list = unwind.tail = unwind.current_entry = NULL;
4317 unwind.personality_routine = 0;
4321 dot_body (int dummy ATTRIBUTE_UNUSED)
4323 if (!in_procedure ("body"))
4325 if (!unwind.prologue && !unwind.body && unwind.insn)
4326 as_warn (_("Initial .body should precede any instructions"));
4327 check_pending_save ();
4329 unwind.prologue = 0;
4330 unwind.prologue_mask = 0;
4333 add_unwind_entry (output_body (), 0);
4337 dot_prologue (int dummy ATTRIBUTE_UNUSED)
4339 unsigned mask = 0, grsave = 0;
4341 if (!in_procedure ("prologue"))
4343 if (unwind.prologue)
4345 as_bad (_(".prologue within prologue"));
4346 ignore_rest_of_line ();
4349 if (!unwind.body && unwind.insn)
4350 as_warn (_("Initial .prologue should precede any instructions"));
4352 if (!is_it_end_of_statement ())
4355 int n, sep = parse_operand_and_eval (&e, ',');
4357 if (e.X_op != O_constant
4358 || e.X_add_number < 0
4359 || e.X_add_number > 0xf)
4360 as_bad (_("First operand to .prologue must be a positive 4-bit constant"));
4361 else if (e.X_add_number == 0)
4362 as_warn (_("Pointless use of zero first operand to .prologue"));
4364 mask = e.X_add_number;
4365 n = popcount (mask);
4368 parse_operand_and_eval (&e, 0);
4371 if (e.X_op == O_constant
4372 && e.X_add_number >= 0
4373 && e.X_add_number < 128)
4375 if (md.unwind_check == unwind_check_error)
4376 as_warn (_("Using a constant as second operand to .prologue is deprecated"));
4377 grsave = e.X_add_number;
4379 else if (e.X_op != O_register
4380 || (grsave = e.X_add_number - REG_GR) > 127)
4382 as_bad (_("Second operand to .prologue must be a general register"));
4385 else if (grsave > 128U - n)
4387 as_bad (_("Second operand to .prologue must be the first of %d general registers"), n);
4394 add_unwind_entry (output_prologue_gr (mask, grsave), 0);
4396 add_unwind_entry (output_prologue (), 0);
4398 unwind.prologue = 1;
4399 unwind.prologue_mask = mask;
4400 unwind.prologue_gr = grsave;
4402 ++unwind.prologue_count;
4406 dot_endp (int dummy ATTRIBUTE_UNUSED)
4409 int bytes_per_address;
4412 subsegT saved_subseg;
4413 proc_pending *pending;
4414 int unwind_check = md.unwind_check;
4416 md.unwind_check = unwind_check_error;
4417 if (!in_procedure ("endp"))
4419 md.unwind_check = unwind_check;
4421 if (unwind.saved_text_seg)
4423 saved_seg = unwind.saved_text_seg;
4424 saved_subseg = unwind.saved_text_subseg;
4425 unwind.saved_text_seg = NULL;
4429 saved_seg = now_seg;
4430 saved_subseg = now_subseg;
4433 insn_group_break (1, 0, 0);
4435 /* If there wasn't a .handlerdata, we haven't generated an image yet. */
4437 generate_unwind_image (saved_seg);
4439 if (unwind.info || unwind.force_unwind_entry)
4443 subseg_set (md.last_text_seg, 0);
4444 proc_end = expr_build_dot ();
4446 start_unwind_section (saved_seg, SPECIAL_SECTION_UNWIND);
4448 /* Make sure that section has 4 byte alignment for ILP32 and
4449 8 byte alignment for LP64. */
4450 record_alignment (now_seg, md.pointer_size_shift);
4452 /* Need space for 3 pointers for procedure start, procedure end,
4454 memset (frag_more (3 * md.pointer_size), 0, 3 * md.pointer_size);
4455 where = frag_now_fix () - (3 * md.pointer_size);
4456 bytes_per_address = bfd_arch_bits_per_address (stdoutput) / 8;
4458 /* Issue the values of a) Proc Begin, b) Proc End, c) Unwind Record. */
4459 e.X_op = O_pseudo_fixup;
4460 e.X_op_symbol = pseudo_func[FUNC_SEG_RELATIVE].u.sym;
4462 if (!S_IS_LOCAL (unwind.proc_pending.sym)
4463 && S_IS_DEFINED (unwind.proc_pending.sym))
4464 e.X_add_symbol = symbol_temp_new (S_GET_SEGMENT (unwind.proc_pending.sym),
4465 S_GET_VALUE (unwind.proc_pending.sym),
4466 symbol_get_frag (unwind.proc_pending.sym));
4468 e.X_add_symbol = unwind.proc_pending.sym;
4469 ia64_cons_fix_new (frag_now, where, bytes_per_address, &e,
4472 e.X_op = O_pseudo_fixup;
4473 e.X_op_symbol = pseudo_func[FUNC_SEG_RELATIVE].u.sym;
4475 e.X_add_symbol = proc_end;
4476 ia64_cons_fix_new (frag_now, where + bytes_per_address,
4477 bytes_per_address, &e, BFD_RELOC_NONE);
4481 e.X_op = O_pseudo_fixup;
4482 e.X_op_symbol = pseudo_func[FUNC_SEG_RELATIVE].u.sym;
4484 e.X_add_symbol = unwind.info;
4485 ia64_cons_fix_new (frag_now, where + (bytes_per_address * 2),
4486 bytes_per_address, &e, BFD_RELOC_NONE);
4489 subseg_set (saved_seg, saved_subseg);
4491 /* Set symbol sizes. */
4492 pending = &unwind.proc_pending;
4493 if (S_GET_NAME (pending->sym))
4497 symbolS *sym = pending->sym;
4499 if (!S_IS_DEFINED (sym))
4500 as_bad (_("`%s' was not defined within procedure"), S_GET_NAME (sym));
4501 else if (S_GET_SIZE (sym) == 0
4502 && symbol_get_obj (sym)->size == NULL)
4504 fragS *frag = symbol_get_frag (sym);
4508 if (frag == frag_now && SEG_NORMAL (now_seg))
4509 S_SET_SIZE (sym, frag_now_fix () - S_GET_VALUE (sym));
4512 symbol_get_obj (sym)->size =
4513 (expressionS *) xmalloc (sizeof (expressionS));
4514 symbol_get_obj (sym)->size->X_op = O_subtract;
4515 symbol_get_obj (sym)->size->X_add_symbol
4516 = symbol_new (FAKE_LABEL_NAME, now_seg,
4517 frag_now_fix (), frag_now);
4518 symbol_get_obj (sym)->size->X_op_symbol = sym;
4519 symbol_get_obj (sym)->size->X_add_number = 0;
4523 } while ((pending = pending->next) != NULL);
4526 /* Parse names of main and alternate entry points. */
4532 name = input_line_pointer;
4533 c = get_symbol_end ();
4534 p = input_line_pointer;
4536 (md.unwind_check == unwind_check_warning
4538 : as_bad) (_("Empty argument of .endp"));
4541 symbolS *sym = symbol_find (name);
4543 for (pending = &unwind.proc_pending; pending; pending = pending->next)
4545 if (sym == pending->sym)
4547 pending->sym = NULL;
4551 if (!sym || !pending)
4552 as_warn (_("`%s' was not specified with previous .proc"), name);
4556 if (*input_line_pointer != ',')
4558 ++input_line_pointer;
4560 demand_empty_rest_of_line ();
4562 /* Deliberately only checking for the main entry point here; the
4563 language spec even says all arguments to .endp are ignored. */
4564 if (unwind.proc_pending.sym
4565 && S_GET_NAME (unwind.proc_pending.sym)
4566 && strcmp (S_GET_NAME (unwind.proc_pending.sym), FAKE_LABEL_NAME))
4567 as_warn (_("`%s' should be an operand to this .endp"),
4568 S_GET_NAME (unwind.proc_pending.sym));
4569 while (unwind.proc_pending.next)
4571 pending = unwind.proc_pending.next;
4572 unwind.proc_pending.next = pending->next;
4575 unwind.proc_pending.sym = unwind.info = NULL;
4579 dot_template (int template_val)
4581 CURR_SLOT.user_template = template_val;
4585 dot_regstk (int dummy ATTRIBUTE_UNUSED)
4587 int ins, locs, outs, rots;
4589 if (is_it_end_of_statement ())
4590 ins = locs = outs = rots = 0;
4593 ins = get_absolute_expression ();
4594 if (*input_line_pointer++ != ',')
4596 locs = get_absolute_expression ();
4597 if (*input_line_pointer++ != ',')
4599 outs = get_absolute_expression ();
4600 if (*input_line_pointer++ != ',')
4602 rots = get_absolute_expression ();
4604 set_regstack (ins, locs, outs, rots);
4608 as_bad (_("Comma expected"));
4609 ignore_rest_of_line ();
4616 valueT num_alloced = 0;
4617 struct dynreg **drpp, *dr;
4618 int ch, base_reg = 0;
4624 case DYNREG_GR: base_reg = REG_GR + 32; break;
4625 case DYNREG_FR: base_reg = REG_FR + 32; break;
4626 case DYNREG_PR: base_reg = REG_P + 16; break;
4630 /* First, remove existing names from hash table. */
4631 for (dr = md.dynreg[type]; dr && dr->num_regs; dr = dr->next)
4633 hash_delete (md.dynreg_hash, dr->name, FALSE);
4634 /* FIXME: Free dr->name. */
4638 drpp = &md.dynreg[type];
4641 start = input_line_pointer;
4642 ch = get_symbol_end ();
4643 len = strlen (ia64_canonicalize_symbol_name (start));
4644 *input_line_pointer = ch;
4647 if (*input_line_pointer != '[')
4649 as_bad (_("Expected '['"));
4652 ++input_line_pointer; /* skip '[' */
4654 num_regs = get_absolute_expression ();
4656 if (*input_line_pointer++ != ']')
4658 as_bad (_("Expected ']'"));
4663 as_bad (_("Number of elements must be positive"));
4668 num_alloced += num_regs;
4672 if (num_alloced > md.rot.num_regs)
4674 as_bad (_("Used more than the declared %d rotating registers"),
4680 if (num_alloced > 96)
4682 as_bad (_("Used more than the available 96 rotating registers"));
4687 if (num_alloced > 48)
4689 as_bad (_("Used more than the available 48 rotating registers"));
4700 *drpp = obstack_alloc (¬es, sizeof (*dr));
4701 memset (*drpp, 0, sizeof (*dr));
4704 name = obstack_alloc (¬es, len + 1);
4705 memcpy (name, start, len);
4710 dr->num_regs = num_regs;
4711 dr->base = base_reg;
4713 base_reg += num_regs;
4715 if (hash_insert (md.dynreg_hash, name, dr))
4717 as_bad (_("Attempt to redefine register set `%s'"), name);
4718 obstack_free (¬es, name);
4722 if (*input_line_pointer != ',')
4724 ++input_line_pointer; /* skip comma */
4727 demand_empty_rest_of_line ();
4731 ignore_rest_of_line ();
4735 dot_byteorder (int byteorder)
4737 segment_info_type *seginfo = seg_info (now_seg);
4739 if (byteorder == -1)
4741 if (seginfo->tc_segment_info_data.endian == 0)
4742 seginfo->tc_segment_info_data.endian = default_big_endian ? 1 : 2;
4743 byteorder = seginfo->tc_segment_info_data.endian == 1;
4746 seginfo->tc_segment_info_data.endian = byteorder ? 1 : 2;
4748 if (target_big_endian != byteorder)
4750 target_big_endian = byteorder;
4751 if (target_big_endian)
4753 ia64_number_to_chars = number_to_chars_bigendian;
4754 ia64_float_to_chars = ia64_float_to_chars_bigendian;
4758 ia64_number_to_chars = number_to_chars_littleendian;
4759 ia64_float_to_chars = ia64_float_to_chars_littleendian;
4765 dot_psr (int dummy ATTRIBUTE_UNUSED)
4772 option = input_line_pointer;
4773 ch = get_symbol_end ();
4774 if (strcmp (option, "lsb") == 0)
4775 md.flags &= ~EF_IA_64_BE;
4776 else if (strcmp (option, "msb") == 0)
4777 md.flags |= EF_IA_64_BE;
4778 else if (strcmp (option, "abi32") == 0)
4779 md.flags &= ~EF_IA_64_ABI64;
4780 else if (strcmp (option, "abi64") == 0)
4781 md.flags |= EF_IA_64_ABI64;
4783 as_bad (_("Unknown psr option `%s'"), option);
4784 *input_line_pointer = ch;
4787 if (*input_line_pointer != ',')
4790 ++input_line_pointer;
4793 demand_empty_rest_of_line ();
4797 dot_ln (int dummy ATTRIBUTE_UNUSED)
4799 new_logical_line (0, get_absolute_expression ());
4800 demand_empty_rest_of_line ();
4804 cross_section (int ref, void (*builder) (int), int ua)
4807 int saved_auto_align;
4808 unsigned int section_count;
4811 start = input_line_pointer;
4817 name = demand_copy_C_string (&len);
4818 obstack_free(¬es, name);
4821 ignore_rest_of_line ();
4827 char c = get_symbol_end ();
4829 if (input_line_pointer == start)
4831 as_bad (_("Missing section name"));
4832 ignore_rest_of_line ();
4835 *input_line_pointer = c;
4837 end = input_line_pointer;
4839 if (*input_line_pointer != ',')
4841 as_bad (_("Comma expected after section name"));
4842 ignore_rest_of_line ();
4846 end = input_line_pointer + 1; /* skip comma */
4847 input_line_pointer = start;
4848 md.keep_pending_output = 1;
4849 section_count = bfd_count_sections (stdoutput);
4850 obj_elf_section (0);
4851 if (section_count != bfd_count_sections (stdoutput))
4852 as_warn (_("Creating sections with .xdataN/.xrealN/.xstringZ is deprecated."));
4853 input_line_pointer = end;
4854 saved_auto_align = md.auto_align;
4859 md.auto_align = saved_auto_align;
4860 obj_elf_previous (0);
4861 md.keep_pending_output = 0;
4865 dot_xdata (int size)
4867 cross_section (size, cons, 0);
4870 /* Why doesn't float_cons() call md_cons_align() the way cons() does? */
4873 stmt_float_cons (int kind)
4893 ia64_do_align (alignment);
4898 stmt_cons_ua (int size)
4900 int saved_auto_align = md.auto_align;
4904 md.auto_align = saved_auto_align;
4908 dot_xfloat_cons (int kind)
4910 cross_section (kind, stmt_float_cons, 0);
4914 dot_xstringer (int zero)
4916 cross_section (zero, stringer, 0);
4920 dot_xdata_ua (int size)
4922 cross_section (size, cons, 1);
4926 dot_xfloat_cons_ua (int kind)
4928 cross_section (kind, float_cons, 1);
4931 /* .reg.val <regname>,value */
4934 dot_reg_val (int dummy ATTRIBUTE_UNUSED)
4938 expression_and_evaluate (®);
4939 if (reg.X_op != O_register)
4941 as_bad (_("Register name expected"));
4942 ignore_rest_of_line ();
4944 else if (*input_line_pointer++ != ',')
4946 as_bad (_("Comma expected"));
4947 ignore_rest_of_line ();
4951 valueT value = get_absolute_expression ();
4952 int regno = reg.X_add_number;
4953 if (regno <= REG_GR || regno > REG_GR + 127)
4954 as_warn (_("Register value annotation ignored"));
4957 gr_values[regno - REG_GR].known = 1;
4958 gr_values[regno - REG_GR].value = value;
4959 gr_values[regno - REG_GR].path = md.path;
4962 demand_empty_rest_of_line ();
4967 .serialize.instruction
4970 dot_serialize (int type)
4972 insn_group_break (0, 0, 0);
4974 instruction_serialization ();
4976 data_serialization ();
4977 insn_group_break (0, 0, 0);
4978 demand_empty_rest_of_line ();
4981 /* select dv checking mode
4986 A stop is inserted when changing modes
4990 dot_dv_mode (int type)
4992 if (md.manual_bundling)
4993 as_warn (_("Directive invalid within a bundle"));
4995 if (type == 'E' || type == 'A')
4996 md.mode_explicitly_set = 0;
4998 md.mode_explicitly_set = 1;
5005 if (md.explicit_mode)
5006 insn_group_break (1, 0, 0);
5007 md.explicit_mode = 0;
5011 if (!md.explicit_mode)
5012 insn_group_break (1, 0, 0);
5013 md.explicit_mode = 1;
5017 if (md.explicit_mode != md.default_explicit_mode)
5018 insn_group_break (1, 0, 0);
5019 md.explicit_mode = md.default_explicit_mode;
5020 md.mode_explicitly_set = 0;
5026 print_prmask (valueT mask)
5030 for (regno = 0; regno < 64; regno++)
5032 if (mask & ((valueT) 1 << regno))
5034 fprintf (stderr, "%s p%d", comma, regno);
5041 .pred.rel.clear [p1 [,p2 [,...]]] (also .pred.rel "clear" or @clear)
5042 .pred.rel.imply p1, p2 (also .pred.rel "imply" or @imply)
5043 .pred.rel.mutex p1, p2 [,...] (also .pred.rel "mutex" or @mutex)
5044 .pred.safe_across_calls p1 [, p2 [,...]]
5048 dot_pred_rel (int type)
5052 int p1 = -1, p2 = -1;
5056 if (*input_line_pointer == '"')
5059 char *form = demand_copy_C_string (&len);
5061 if (strcmp (form, "mutex") == 0)
5063 else if (strcmp (form, "clear") == 0)
5065 else if (strcmp (form, "imply") == 0)
5067 obstack_free (¬es, form);
5069 else if (*input_line_pointer == '@')
5071 char *form = ++input_line_pointer;
5072 char c = get_symbol_end();
5074 if (strcmp (form, "mutex") == 0)
5076 else if (strcmp (form, "clear") == 0)
5078 else if (strcmp (form, "imply") == 0)
5080 *input_line_pointer = c;
5084 as_bad (_("Missing predicate relation type"));
5085 ignore_rest_of_line ();
5090 as_bad (_("Unrecognized predicate relation type"));
5091 ignore_rest_of_line ();
5094 if (*input_line_pointer == ',')
5095 ++input_line_pointer;
5103 expressionS pr, *pr1, *pr2;
5105 sep = parse_operand_and_eval (&pr, ',');
5106 if (pr.X_op == O_register
5107 && pr.X_add_number >= REG_P
5108 && pr.X_add_number <= REG_P + 63)
5110 regno = pr.X_add_number - REG_P;
5118 else if (type != 'i'
5119 && pr.X_op == O_subtract
5120 && (pr1 = symbol_get_value_expression (pr.X_add_symbol))
5121 && pr1->X_op == O_register
5122 && pr1->X_add_number >= REG_P
5123 && pr1->X_add_number <= REG_P + 63
5124 && (pr2 = symbol_get_value_expression (pr.X_op_symbol))
5125 && pr2->X_op == O_register
5126 && pr2->X_add_number >= REG_P
5127 && pr2->X_add_number <= REG_P + 63)
5132 regno = pr1->X_add_number - REG_P;
5133 stop = pr2->X_add_number - REG_P;
5136 as_bad (_("Bad register range"));
5137 ignore_rest_of_line ();
5140 bits = ((bits << stop) << 1) - (bits << regno);
5141 count += stop - regno + 1;
5145 as_bad (_("Predicate register expected"));
5146 ignore_rest_of_line ();
5150 as_warn (_("Duplicate predicate register ignored"));
5161 clear_qp_mutex (mask);
5162 clear_qp_implies (mask, (valueT) 0);
5165 if (count != 2 || p1 == -1 || p2 == -1)
5166 as_bad (_("Predicate source and target required"));
5167 else if (p1 == 0 || p2 == 0)
5168 as_bad (_("Use of p0 is not valid in this context"));
5170 add_qp_imply (p1, p2);
5175 as_bad (_("At least two PR arguments expected"));
5180 as_bad (_("Use of p0 is not valid in this context"));
5183 add_qp_mutex (mask);
5186 /* note that we don't override any existing relations */
5189 as_bad (_("At least one PR argument expected"));
5194 fprintf (stderr, "Safe across calls: ");
5195 print_prmask (mask);
5196 fprintf (stderr, "\n");
5198 qp_safe_across_calls = mask;
5201 demand_empty_rest_of_line ();
5204 /* .entry label [, label [, ...]]
5205 Hint to DV code that the given labels are to be considered entry points.
5206 Otherwise, only global labels are considered entry points. */
5209 dot_entry (int dummy ATTRIBUTE_UNUSED)
5218 name = input_line_pointer;
5219 c = get_symbol_end ();
5220 symbolP = symbol_find_or_make (name);
5222 err = hash_insert (md.entry_hash, S_GET_NAME (symbolP), (void *) symbolP);
5224 as_fatal (_("Inserting \"%s\" into entry hint table failed: %s"),
5227 *input_line_pointer = c;
5229 c = *input_line_pointer;
5232 input_line_pointer++;
5234 if (*input_line_pointer == '\n')
5240 demand_empty_rest_of_line ();
5243 /* .mem.offset offset, base
5244 "base" is used to distinguish between offsets from a different base. */
5247 dot_mem_offset (int dummy ATTRIBUTE_UNUSED)
5249 md.mem_offset.hint = 1;
5250 md.mem_offset.offset = get_absolute_expression ();
5251 if (*input_line_pointer != ',')
5253 as_bad (_("Comma expected"));
5254 ignore_rest_of_line ();
5257 ++input_line_pointer;
5258 md.mem_offset.base = get_absolute_expression ();
5259 demand_empty_rest_of_line ();
5262 /* ia64-specific pseudo-ops: */
5263 const pseudo_typeS md_pseudo_table[] =
5265 { "radix", dot_radix, 0 },
5266 { "lcomm", s_lcomm_bytes, 1 },
5267 { "loc", dot_loc, 0 },
5268 { "bss", dot_special_section, SPECIAL_SECTION_BSS },
5269 { "sbss", dot_special_section, SPECIAL_SECTION_SBSS },
5270 { "sdata", dot_special_section, SPECIAL_SECTION_SDATA },
5271 { "rodata", dot_special_section, SPECIAL_SECTION_RODATA },
5272 { "comment", dot_special_section, SPECIAL_SECTION_COMMENT },
5273 { "ia_64.unwind", dot_special_section, SPECIAL_SECTION_UNWIND },
5274 { "ia_64.unwind_info", dot_special_section, SPECIAL_SECTION_UNWIND_INFO },
5275 { "init_array", dot_special_section, SPECIAL_SECTION_INIT_ARRAY },
5276 { "fini_array", dot_special_section, SPECIAL_SECTION_FINI_ARRAY },
5277 { "proc", dot_proc, 0 },
5278 { "body", dot_body, 0 },
5279 { "prologue", dot_prologue, 0 },
5280 { "endp", dot_endp, 0 },
5282 { "fframe", dot_fframe, 0 },
5283 { "vframe", dot_vframe, 0 },
5284 { "vframesp", dot_vframesp, 0 },
5285 { "vframepsp", dot_vframesp, 1 },
5286 { "save", dot_save, 0 },
5287 { "restore", dot_restore, 0 },
5288 { "restorereg", dot_restorereg, 0 },
5289 { "restorereg.p", dot_restorereg, 1 },
5290 { "handlerdata", dot_handlerdata, 0 },
5291 { "unwentry", dot_unwentry, 0 },
5292 { "altrp", dot_altrp, 0 },
5293 { "savesp", dot_savemem, 0 },
5294 { "savepsp", dot_savemem, 1 },
5295 { "save.g", dot_saveg, 0 },
5296 { "save.f", dot_savef, 0 },
5297 { "save.b", dot_saveb, 0 },
5298 { "save.gf", dot_savegf, 0 },
5299 { "spill", dot_spill, 0 },
5300 { "spillreg", dot_spillreg, 0 },
5301 { "spillsp", dot_spillmem, 0 },
5302 { "spillpsp", dot_spillmem, 1 },
5303 { "spillreg.p", dot_spillreg, 1 },
5304 { "spillsp.p", dot_spillmem, ~0 },
5305 { "spillpsp.p", dot_spillmem, ~1 },
5306 { "label_state", dot_label_state, 0 },
5307 { "copy_state", dot_copy_state, 0 },
5308 { "unwabi", dot_unwabi, 0 },
5309 { "personality", dot_personality, 0 },
5310 { "mii", dot_template, 0x0 },
5311 { "mli", dot_template, 0x2 }, /* old format, for compatibility */
5312 { "mlx", dot_template, 0x2 },
5313 { "mmi", dot_template, 0x4 },
5314 { "mfi", dot_template, 0x6 },
5315 { "mmf", dot_template, 0x7 },
5316 { "mib", dot_template, 0x8 },
5317 { "mbb", dot_template, 0x9 },
5318 { "bbb", dot_template, 0xb },
5319 { "mmb", dot_template, 0xc },
5320 { "mfb", dot_template, 0xe },
5321 { "align", dot_align, 0 },
5322 { "regstk", dot_regstk, 0 },
5323 { "rotr", dot_rot, DYNREG_GR },
5324 { "rotf", dot_rot, DYNREG_FR },
5325 { "rotp", dot_rot, DYNREG_PR },
5326 { "lsb", dot_byteorder, 0 },
5327 { "msb", dot_byteorder, 1 },
5328 { "psr", dot_psr, 0 },
5329 { "alias", dot_alias, 0 },
5330 { "secalias", dot_alias, 1 },
5331 { "ln", dot_ln, 0 }, /* source line info (for debugging) */
5333 { "xdata1", dot_xdata, 1 },
5334 { "xdata2", dot_xdata, 2 },
5335 { "xdata4", dot_xdata, 4 },
5336 { "xdata8", dot_xdata, 8 },
5337 { "xdata16", dot_xdata, 16 },
5338 { "xreal4", dot_xfloat_cons, 'f' },
5339 { "xreal8", dot_xfloat_cons, 'd' },
5340 { "xreal10", dot_xfloat_cons, 'x' },
5341 { "xreal16", dot_xfloat_cons, 'X' },
5342 { "xstring", dot_xstringer, 8 + 0 },
5343 { "xstringz", dot_xstringer, 8 + 1 },
5345 /* unaligned versions: */
5346 { "xdata2.ua", dot_xdata_ua, 2 },
5347 { "xdata4.ua", dot_xdata_ua, 4 },
5348 { "xdata8.ua", dot_xdata_ua, 8 },
5349 { "xdata16.ua", dot_xdata_ua, 16 },
5350 { "xreal4.ua", dot_xfloat_cons_ua, 'f' },
5351 { "xreal8.ua", dot_xfloat_cons_ua, 'd' },
5352 { "xreal10.ua", dot_xfloat_cons_ua, 'x' },
5353 { "xreal16.ua", dot_xfloat_cons_ua, 'X' },
5355 /* annotations/DV checking support */
5356 { "entry", dot_entry, 0 },
5357 { "mem.offset", dot_mem_offset, 0 },
5358 { "pred.rel", dot_pred_rel, 0 },
5359 { "pred.rel.clear", dot_pred_rel, 'c' },
5360 { "pred.rel.imply", dot_pred_rel, 'i' },
5361 { "pred.rel.mutex", dot_pred_rel, 'm' },
5362 { "pred.safe_across_calls", dot_pred_rel, 's' },
5363 { "reg.val", dot_reg_val, 0 },
5364 { "serialize.data", dot_serialize, 0 },
5365 { "serialize.instruction", dot_serialize, 1 },
5366 { "auto", dot_dv_mode, 'a' },
5367 { "explicit", dot_dv_mode, 'e' },
5368 { "default", dot_dv_mode, 'd' },
5370 /* ??? These are needed to make gas/testsuite/gas/elf/ehopt.s work.
5371 IA-64 aligns data allocation pseudo-ops by default, so we have to
5372 tell it that these ones are supposed to be unaligned. Long term,
5373 should rewrite so that only IA-64 specific data allocation pseudo-ops
5374 are aligned by default. */
5375 {"2byte", stmt_cons_ua, 2},
5376 {"4byte", stmt_cons_ua, 4},
5377 {"8byte", stmt_cons_ua, 8},
5380 {"vms_common", obj_elf_vms_common, 0},
5386 static const struct pseudo_opcode
5389 void (*handler) (int);
5394 /* these are more like pseudo-ops, but don't start with a dot */
5395 { "data1", cons, 1 },
5396 { "data2", cons, 2 },
5397 { "data4", cons, 4 },
5398 { "data8", cons, 8 },
5399 { "data16", cons, 16 },
5400 { "real4", stmt_float_cons, 'f' },
5401 { "real8", stmt_float_cons, 'd' },
5402 { "real10", stmt_float_cons, 'x' },
5403 { "real16", stmt_float_cons, 'X' },
5404 { "string", stringer, 8 + 0 },
5405 { "stringz", stringer, 8 + 1 },
5407 /* unaligned versions: */
5408 { "data2.ua", stmt_cons_ua, 2 },
5409 { "data4.ua", stmt_cons_ua, 4 },
5410 { "data8.ua", stmt_cons_ua, 8 },
5411 { "data16.ua", stmt_cons_ua, 16 },
5412 { "real4.ua", float_cons, 'f' },
5413 { "real8.ua", float_cons, 'd' },
5414 { "real10.ua", float_cons, 'x' },
5415 { "real16.ua", float_cons, 'X' },
5418 /* Declare a register by creating a symbol for it and entering it in
5419 the symbol table. */
5422 declare_register (const char *name, unsigned int regnum)
5427 sym = symbol_create (name, reg_section, regnum, &zero_address_frag);
5429 err = hash_insert (md.reg_hash, S_GET_NAME (sym), (void *) sym);
5431 as_fatal ("Inserting \"%s\" into register table failed: %s",
5438 declare_register_set (const char *prefix,
5439 unsigned int num_regs,
5440 unsigned int base_regnum)
5445 for (i = 0; i < num_regs; ++i)
5447 snprintf (name, sizeof (name), "%s%u", prefix, i);
5448 declare_register (name, base_regnum + i);
5453 operand_width (enum ia64_opnd opnd)
5455 const struct ia64_operand *odesc = &elf64_ia64_operands[opnd];
5456 unsigned int bits = 0;
5460 for (i = 0; i < NELEMS (odesc->field) && odesc->field[i].bits; ++i)
5461 bits += odesc->field[i].bits;
5466 static enum operand_match_result
5467 operand_match (const struct ia64_opcode *idesc, int res_index, expressionS *e)
5469 enum ia64_opnd opnd = idesc->operands[res_index];
5470 int bits, relocatable = 0;
5471 struct insn_fix *fix;
5478 case IA64_OPND_AR_CCV:
5479 if (e->X_op == O_register && e->X_add_number == REG_AR + 32)
5480 return OPERAND_MATCH;
5483 case IA64_OPND_AR_CSD:
5484 if (e->X_op == O_register && e->X_add_number == REG_AR + 25)
5485 return OPERAND_MATCH;
5488 case IA64_OPND_AR_PFS:
5489 if (e->X_op == O_register && e->X_add_number == REG_AR + 64)
5490 return OPERAND_MATCH;
5494 if (e->X_op == O_register && e->X_add_number == REG_GR + 0)
5495 return OPERAND_MATCH;
5499 if (e->X_op == O_register && e->X_add_number == REG_IP)
5500 return OPERAND_MATCH;
5504 if (e->X_op == O_register && e->X_add_number == REG_PR)
5505 return OPERAND_MATCH;
5508 case IA64_OPND_PR_ROT:
5509 if (e->X_op == O_register && e->X_add_number == REG_PR_ROT)
5510 return OPERAND_MATCH;
5514 if (e->X_op == O_register && e->X_add_number == REG_PSR)
5515 return OPERAND_MATCH;
5518 case IA64_OPND_PSR_L:
5519 if (e->X_op == O_register && e->X_add_number == REG_PSR_L)
5520 return OPERAND_MATCH;
5523 case IA64_OPND_PSR_UM:
5524 if (e->X_op == O_register && e->X_add_number == REG_PSR_UM)
5525 return OPERAND_MATCH;
5529 if (e->X_op == O_constant)
5531 if (e->X_add_number == 1)
5532 return OPERAND_MATCH;
5534 return OPERAND_OUT_OF_RANGE;
5539 if (e->X_op == O_constant)
5541 if (e->X_add_number == 8)
5542 return OPERAND_MATCH;
5544 return OPERAND_OUT_OF_RANGE;
5549 if (e->X_op == O_constant)
5551 if (e->X_add_number == 16)
5552 return OPERAND_MATCH;
5554 return OPERAND_OUT_OF_RANGE;
5558 /* register operands: */
5561 if (e->X_op == O_register && e->X_add_number >= REG_AR
5562 && e->X_add_number < REG_AR + 128)
5563 return OPERAND_MATCH;
5568 if (e->X_op == O_register && e->X_add_number >= REG_BR
5569 && e->X_add_number < REG_BR + 8)
5570 return OPERAND_MATCH;
5574 if (e->X_op == O_register && e->X_add_number >= REG_CR
5575 && e->X_add_number < REG_CR + 128)
5576 return OPERAND_MATCH;
5579 case IA64_OPND_DAHR3:
5580 if (e->X_op == O_register && e->X_add_number >= REG_DAHR
5581 && e->X_add_number < REG_DAHR + 8)
5582 return OPERAND_MATCH;
5589 if (e->X_op == O_register && e->X_add_number >= REG_FR
5590 && e->X_add_number < REG_FR + 128)
5591 return OPERAND_MATCH;
5596 if (e->X_op == O_register && e->X_add_number >= REG_P
5597 && e->X_add_number < REG_P + 64)
5598 return OPERAND_MATCH;
5604 if (e->X_op == O_register && e->X_add_number >= REG_GR
5605 && e->X_add_number < REG_GR + 128)
5606 return OPERAND_MATCH;
5609 case IA64_OPND_R3_2:
5610 if (e->X_op == O_register && e->X_add_number >= REG_GR)
5612 if (e->X_add_number < REG_GR + 4)
5613 return OPERAND_MATCH;
5614 else if (e->X_add_number < REG_GR + 128)
5615 return OPERAND_OUT_OF_RANGE;
5619 /* indirect operands: */
5620 case IA64_OPND_CPUID_R3:
5621 case IA64_OPND_DBR_R3:
5622 case IA64_OPND_DTR_R3:
5623 case IA64_OPND_ITR_R3:
5624 case IA64_OPND_IBR_R3:
5625 case IA64_OPND_MSR_R3:
5626 case IA64_OPND_PKR_R3:
5627 case IA64_OPND_PMC_R3:
5628 case IA64_OPND_PMD_R3:
5629 case IA64_OPND_DAHR_R3:
5630 case IA64_OPND_RR_R3:
5631 if (e->X_op == O_index && e->X_op_symbol
5632 && (S_GET_VALUE (e->X_op_symbol) - IND_CPUID
5633 == opnd - IA64_OPND_CPUID_R3))
5634 return OPERAND_MATCH;
5638 if (e->X_op == O_index && !e->X_op_symbol)
5639 return OPERAND_MATCH;
5642 /* immediate operands: */
5643 case IA64_OPND_CNT2a:
5644 case IA64_OPND_LEN4:
5645 case IA64_OPND_LEN6:
5646 bits = operand_width (idesc->operands[res_index]);
5647 if (e->X_op == O_constant)
5649 if ((bfd_vma) (e->X_add_number - 1) < ((bfd_vma) 1 << bits))
5650 return OPERAND_MATCH;
5652 return OPERAND_OUT_OF_RANGE;
5656 case IA64_OPND_CNT2b:
5657 if (e->X_op == O_constant)
5659 if ((bfd_vma) (e->X_add_number - 1) < 3)
5660 return OPERAND_MATCH;
5662 return OPERAND_OUT_OF_RANGE;
5666 case IA64_OPND_CNT2c:
5667 val = e->X_add_number;
5668 if (e->X_op == O_constant)
5670 if ((val == 0 || val == 7 || val == 15 || val == 16))
5671 return OPERAND_MATCH;
5673 return OPERAND_OUT_OF_RANGE;
5678 /* SOR must be an integer multiple of 8 */
5679 if (e->X_op == O_constant && e->X_add_number & 0x7)
5680 return OPERAND_OUT_OF_RANGE;
5683 if (e->X_op == O_constant)
5685 if ((bfd_vma) e->X_add_number <= 96)
5686 return OPERAND_MATCH;
5688 return OPERAND_OUT_OF_RANGE;
5692 case IA64_OPND_IMMU62:
5693 if (e->X_op == O_constant)
5695 if ((bfd_vma) e->X_add_number < ((bfd_vma) 1 << 62))
5696 return OPERAND_MATCH;
5698 return OPERAND_OUT_OF_RANGE;
5702 /* FIXME -- need 62-bit relocation type */
5703 as_bad (_("62-bit relocation not yet implemented"));
5707 case IA64_OPND_IMMU64:
5708 if (e->X_op == O_symbol || e->X_op == O_pseudo_fixup
5709 || e->X_op == O_subtract)
5711 fix = CURR_SLOT.fixup + CURR_SLOT.num_fixups;
5712 fix->code = BFD_RELOC_IA64_IMM64;
5713 if (e->X_op != O_subtract)
5715 fix->code = ia64_gen_real_reloc_type (e->X_op_symbol, fix->code);
5716 if (e->X_op == O_pseudo_fixup)
5720 fix->opnd = idesc->operands[res_index];
5723 ++CURR_SLOT.num_fixups;
5724 return OPERAND_MATCH;
5726 else if (e->X_op == O_constant)
5727 return OPERAND_MATCH;
5730 case IA64_OPND_IMMU5b:
5731 if (e->X_op == O_constant)
5733 val = e->X_add_number;
5734 if (val >= 32 && val <= 63)
5735 return OPERAND_MATCH;
5737 return OPERAND_OUT_OF_RANGE;
5741 case IA64_OPND_CCNT5:
5742 case IA64_OPND_CNT5:
5743 case IA64_OPND_CNT6:
5744 case IA64_OPND_CPOS6a:
5745 case IA64_OPND_CPOS6b:
5746 case IA64_OPND_CPOS6c:
5747 case IA64_OPND_IMMU2:
5748 case IA64_OPND_IMMU7a:
5749 case IA64_OPND_IMMU7b:
5750 case IA64_OPND_IMMU16:
5751 case IA64_OPND_IMMU19:
5752 case IA64_OPND_IMMU21:
5753 case IA64_OPND_IMMU24:
5754 case IA64_OPND_MBTYPE4:
5755 case IA64_OPND_MHTYPE8:
5756 case IA64_OPND_POS6:
5757 bits = operand_width (idesc->operands[res_index]);
5758 if (e->X_op == O_constant)
5760 if ((bfd_vma) e->X_add_number < ((bfd_vma) 1 << bits))
5761 return OPERAND_MATCH;
5763 return OPERAND_OUT_OF_RANGE;
5767 case IA64_OPND_IMMU9:
5768 bits = operand_width (idesc->operands[res_index]);
5769 if (e->X_op == O_constant)
5771 if ((bfd_vma) e->X_add_number < ((bfd_vma) 1 << bits))
5773 int lobits = e->X_add_number & 0x3;
5774 if (((bfd_vma) e->X_add_number & 0x3C) != 0 && lobits == 0)
5775 e->X_add_number |= (bfd_vma) 0x3;
5776 return OPERAND_MATCH;
5779 return OPERAND_OUT_OF_RANGE;
5783 case IA64_OPND_IMM44:
5784 /* least 16 bits must be zero */
5785 if ((e->X_add_number & 0xffff) != 0)
5786 /* XXX technically, this is wrong: we should not be issuing warning
5787 messages until we're sure this instruction pattern is going to
5789 as_warn (_("lower 16 bits of mask ignored"));
5791 if (e->X_op == O_constant)
5793 if (((e->X_add_number >= 0
5794 && (bfd_vma) e->X_add_number < ((bfd_vma) 1 << 44))
5795 || (e->X_add_number < 0
5796 && (bfd_vma) -e->X_add_number <= ((bfd_vma) 1 << 44))))
5799 if (e->X_add_number >= 0
5800 && (e->X_add_number & ((bfd_vma) 1 << 43)) != 0)
5802 e->X_add_number |= ~(((bfd_vma) 1 << 44) - 1);
5804 return OPERAND_MATCH;
5807 return OPERAND_OUT_OF_RANGE;
5811 case IA64_OPND_IMM17:
5812 /* bit 0 is a don't care (pr0 is hardwired to 1) */
5813 if (e->X_op == O_constant)
5815 if (((e->X_add_number >= 0
5816 && (bfd_vma) e->X_add_number < ((bfd_vma) 1 << 17))
5817 || (e->X_add_number < 0
5818 && (bfd_vma) -e->X_add_number <= ((bfd_vma) 1 << 17))))
5821 if (e->X_add_number >= 0
5822 && (e->X_add_number & ((bfd_vma) 1 << 16)) != 0)
5824 e->X_add_number |= ~(((bfd_vma) 1 << 17) - 1);
5826 return OPERAND_MATCH;
5829 return OPERAND_OUT_OF_RANGE;
5833 case IA64_OPND_IMM14:
5834 case IA64_OPND_IMM22:
5836 case IA64_OPND_IMM1:
5837 case IA64_OPND_IMM8:
5838 case IA64_OPND_IMM8U4:
5839 case IA64_OPND_IMM8M1:
5840 case IA64_OPND_IMM8M1U4:
5841 case IA64_OPND_IMM8M1U8:
5842 case IA64_OPND_IMM9a:
5843 case IA64_OPND_IMM9b:
5844 bits = operand_width (idesc->operands[res_index]);
5845 if (relocatable && (e->X_op == O_symbol
5846 || e->X_op == O_subtract
5847 || e->X_op == O_pseudo_fixup))
5849 fix = CURR_SLOT.fixup + CURR_SLOT.num_fixups;
5851 if (idesc->operands[res_index] == IA64_OPND_IMM14)
5852 fix->code = BFD_RELOC_IA64_IMM14;
5854 fix->code = BFD_RELOC_IA64_IMM22;
5856 if (e->X_op != O_subtract)
5858 fix->code = ia64_gen_real_reloc_type (e->X_op_symbol, fix->code);
5859 if (e->X_op == O_pseudo_fixup)
5863 fix->opnd = idesc->operands[res_index];
5866 ++CURR_SLOT.num_fixups;
5867 return OPERAND_MATCH;
5869 else if (e->X_op != O_constant
5870 && ! (e->X_op == O_big && opnd == IA64_OPND_IMM8M1U8))
5871 return OPERAND_MISMATCH;
5873 if (opnd == IA64_OPND_IMM8M1U4)
5875 /* Zero is not valid for unsigned compares that take an adjusted
5876 constant immediate range. */
5877 if (e->X_add_number == 0)
5878 return OPERAND_OUT_OF_RANGE;
5880 /* Sign-extend 32-bit unsigned numbers, so that the following range
5881 checks will work. */
5882 val = e->X_add_number;
5883 if (((val & (~(bfd_vma) 0 << 32)) == 0)
5884 && ((val & ((bfd_vma) 1 << 31)) != 0))
5885 val = ((val << 32) >> 32);
5887 /* Check for 0x100000000. This is valid because
5888 0x100000000-1 is the same as ((uint32_t) -1). */
5889 if (val == ((bfd_signed_vma) 1 << 32))
5890 return OPERAND_MATCH;
5894 else if (opnd == IA64_OPND_IMM8M1U8)
5896 /* Zero is not valid for unsigned compares that take an adjusted
5897 constant immediate range. */
5898 if (e->X_add_number == 0)
5899 return OPERAND_OUT_OF_RANGE;
5901 /* Check for 0x10000000000000000. */
5902 if (e->X_op == O_big)
5904 if (generic_bignum[0] == 0
5905 && generic_bignum[1] == 0
5906 && generic_bignum[2] == 0
5907 && generic_bignum[3] == 0
5908 && generic_bignum[4] == 1)
5909 return OPERAND_MATCH;
5911 return OPERAND_OUT_OF_RANGE;
5914 val = e->X_add_number - 1;
5916 else if (opnd == IA64_OPND_IMM8M1)
5917 val = e->X_add_number - 1;
5918 else if (opnd == IA64_OPND_IMM8U4)
5920 /* Sign-extend 32-bit unsigned numbers, so that the following range
5921 checks will work. */
5922 val = e->X_add_number;
5923 if (((val & (~(bfd_vma) 0 << 32)) == 0)
5924 && ((val & ((bfd_vma) 1 << 31)) != 0))
5925 val = ((val << 32) >> 32);
5928 val = e->X_add_number;
5930 if ((val >= 0 && (bfd_vma) val < ((bfd_vma) 1 << (bits - 1)))
5931 || (val < 0 && (bfd_vma) -val <= ((bfd_vma) 1 << (bits - 1))))
5932 return OPERAND_MATCH;
5934 return OPERAND_OUT_OF_RANGE;
5936 case IA64_OPND_INC3:
5937 /* +/- 1, 4, 8, 16 */
5938 val = e->X_add_number;
5941 if (e->X_op == O_constant)
5943 if ((val == 1 || val == 4 || val == 8 || val == 16))
5944 return OPERAND_MATCH;
5946 return OPERAND_OUT_OF_RANGE;
5950 case IA64_OPND_TGT25:
5951 case IA64_OPND_TGT25b:
5952 case IA64_OPND_TGT25c:
5953 case IA64_OPND_TGT64:
5954 if (e->X_op == O_symbol)
5956 fix = CURR_SLOT.fixup + CURR_SLOT.num_fixups;
5957 if (opnd == IA64_OPND_TGT25)
5958 fix->code = BFD_RELOC_IA64_PCREL21F;
5959 else if (opnd == IA64_OPND_TGT25b)
5960 fix->code = BFD_RELOC_IA64_PCREL21M;
5961 else if (opnd == IA64_OPND_TGT25c)
5962 fix->code = BFD_RELOC_IA64_PCREL21B;
5963 else if (opnd == IA64_OPND_TGT64)
5964 fix->code = BFD_RELOC_IA64_PCREL60B;
5968 fix->code = ia64_gen_real_reloc_type (e->X_op_symbol, fix->code);
5969 fix->opnd = idesc->operands[res_index];
5972 ++CURR_SLOT.num_fixups;
5973 return OPERAND_MATCH;
5975 case IA64_OPND_TAG13:
5976 case IA64_OPND_TAG13b:
5980 return OPERAND_MATCH;
5983 fix = CURR_SLOT.fixup + CURR_SLOT.num_fixups;
5984 /* There are no external relocs for TAG13/TAG13b fields, so we
5985 create a dummy reloc. This will not live past md_apply_fix. */
5986 fix->code = BFD_RELOC_UNUSED;
5987 fix->code = ia64_gen_real_reloc_type (e->X_op_symbol, fix->code);
5988 fix->opnd = idesc->operands[res_index];
5991 ++CURR_SLOT.num_fixups;
5992 return OPERAND_MATCH;
5999 case IA64_OPND_LDXMOV:
6000 fix = CURR_SLOT.fixup + CURR_SLOT.num_fixups;
6001 fix->code = BFD_RELOC_IA64_LDXMOV;
6002 fix->opnd = idesc->operands[res_index];
6005 ++CURR_SLOT.num_fixups;
6006 return OPERAND_MATCH;
6008 case IA64_OPND_STRD5b:
6009 if (e->X_op == O_constant)
6011 /* 5-bit signed scaled by 64 */
6012 if ((e->X_add_number <= ( 0xf << 6 ))
6013 && (e->X_add_number >= -( 0x10 << 6 )))
6016 /* Must be a multiple of 64 */
6017 if ((e->X_add_number & 0x3f) != 0)
6018 as_warn (_("stride must be a multiple of 64; lower 6 bits ignored"));
6020 e->X_add_number &= ~ 0x3f;
6021 return OPERAND_MATCH;
6024 return OPERAND_OUT_OF_RANGE;
6027 case IA64_OPND_CNT6a:
6028 if (e->X_op == O_constant)
6030 /* 6-bit unsigned biased by 1 -- count 0 is meaningless */
6031 if ((e->X_add_number <= 64)
6032 && (e->X_add_number > 0) )
6034 return OPERAND_MATCH;
6037 return OPERAND_OUT_OF_RANGE;
6044 return OPERAND_MISMATCH;
6048 parse_operand (expressionS *e, int more)
6052 memset (e, 0, sizeof (*e));
6056 sep = *input_line_pointer;
6057 if (more && (sep == ',' || sep == more))
6058 ++input_line_pointer;
6063 parse_operand_and_eval (expressionS *e, int more)
6065 int sep = parse_operand (e, more);
6066 resolve_expression (e);
6071 parse_operand_maybe_eval (expressionS *e, int more, enum ia64_opnd op)
6073 int sep = parse_operand (e, more);
6076 case IA64_OPND_IMM14:
6077 case IA64_OPND_IMM22:
6078 case IA64_OPND_IMMU64:
6079 case IA64_OPND_TGT25:
6080 case IA64_OPND_TGT25b:
6081 case IA64_OPND_TGT25c:
6082 case IA64_OPND_TGT64:
6083 case IA64_OPND_TAG13:
6084 case IA64_OPND_TAG13b:
6085 case IA64_OPND_LDXMOV:
6088 resolve_expression (e);
6094 /* Returns the next entry in the opcode table that matches the one in
6095 IDESC, and frees the entry in IDESC. If no matching entry is
6096 found, NULL is returned instead. */
6098 static struct ia64_opcode *
6099 get_next_opcode (struct ia64_opcode *idesc)
6101 struct ia64_opcode *next = ia64_find_next_opcode (idesc);
6102 ia64_free_opcode (idesc);
6106 /* Parse the operands for the opcode and find the opcode variant that
6107 matches the specified operands, or NULL if no match is possible. */
6109 static struct ia64_opcode *
6110 parse_operands (struct ia64_opcode *idesc)
6112 int i = 0, highest_unmatched_operand, num_operands = 0, num_outputs = 0;
6113 int error_pos, out_of_range_pos, curr_out_of_range_pos, sep = 0;
6116 enum ia64_opnd expected_operand = IA64_OPND_NIL;
6117 enum operand_match_result result;
6119 char *first_arg = 0, *end, *saved_input_pointer;
6122 gas_assert (strlen (idesc->name) <= 128);
6124 strcpy (mnemonic, idesc->name);
6125 if (idesc->operands[2] == IA64_OPND_SOF
6126 || idesc->operands[1] == IA64_OPND_SOF)
6128 /* To make the common idiom "alloc loc?=ar.pfs,0,1,0,0" work, we
6129 can't parse the first operand until we have parsed the
6130 remaining operands of the "alloc" instruction. */
6132 first_arg = input_line_pointer;
6133 end = strchr (input_line_pointer, '=');
6136 as_bad (_("Expected separator `='"));
6139 input_line_pointer = end + 1;
6146 if (i < NELEMS (CURR_SLOT.opnd))
6148 sep = parse_operand_maybe_eval (CURR_SLOT.opnd + i, '=',
6149 idesc->operands[i]);
6150 if (CURR_SLOT.opnd[i].X_op == O_absent)
6157 sep = parse_operand (&dummy, '=');
6158 if (dummy.X_op == O_absent)
6164 if (sep != '=' && sep != ',')
6169 if (num_outputs > 0)
6170 as_bad (_("Duplicate equal sign (=) in instruction"));
6172 num_outputs = i + 1;
6177 as_bad (_("Illegal operand separator `%c'"), sep);
6181 if (idesc->operands[2] == IA64_OPND_SOF
6182 || idesc->operands[1] == IA64_OPND_SOF)
6184 /* Map alloc r1=ar.pfs,i,l,o,r to alloc r1=ar.pfs,(i+l+o),(i+l),r.
6185 Note, however, that due to that mapping operand numbers in error
6186 messages for any of the constant operands will not be correct. */
6187 know (strcmp (idesc->name, "alloc") == 0);
6188 /* The first operand hasn't been parsed/initialized, yet (but
6189 num_operands intentionally doesn't account for that). */
6190 i = num_operands > 4 ? 2 : 1;
6191 #define FORCE_CONST(n) (CURR_SLOT.opnd[n].X_op == O_constant \
6192 ? CURR_SLOT.opnd[n].X_add_number \
6194 sof = set_regstack (FORCE_CONST(i),
6197 FORCE_CONST(i + 3));
6200 /* now we can parse the first arg: */
6201 saved_input_pointer = input_line_pointer;
6202 input_line_pointer = first_arg;
6203 sep = parse_operand_maybe_eval (CURR_SLOT.opnd + 0, '=',
6204 idesc->operands[0]);
6206 --num_outputs; /* force error */
6207 input_line_pointer = saved_input_pointer;
6209 CURR_SLOT.opnd[i].X_add_number = sof;
6210 if (CURR_SLOT.opnd[i + 1].X_op == O_constant
6211 && CURR_SLOT.opnd[i + 2].X_op == O_constant)
6212 CURR_SLOT.opnd[i + 1].X_add_number
6213 = sof - CURR_SLOT.opnd[i + 2].X_add_number;
6215 CURR_SLOT.opnd[i + 1].X_op = O_illegal;
6216 CURR_SLOT.opnd[i + 2] = CURR_SLOT.opnd[i + 3];
6219 highest_unmatched_operand = -4;
6220 curr_out_of_range_pos = -1;
6222 for (; idesc; idesc = get_next_opcode (idesc))
6224 if (num_outputs != idesc->num_outputs)
6225 continue; /* mismatch in # of outputs */
6226 if (highest_unmatched_operand < 0)
6227 highest_unmatched_operand |= 1;
6228 if (num_operands > NELEMS (idesc->operands)
6229 || (num_operands < NELEMS (idesc->operands)
6230 && idesc->operands[num_operands])
6231 || (num_operands > 0 && !idesc->operands[num_operands - 1]))
6232 continue; /* mismatch in number of arguments */
6233 if (highest_unmatched_operand < 0)
6234 highest_unmatched_operand |= 2;
6236 CURR_SLOT.num_fixups = 0;
6238 /* Try to match all operands. If we see an out-of-range operand,
6239 then continue trying to match the rest of the operands, since if
6240 the rest match, then this idesc will give the best error message. */
6242 out_of_range_pos = -1;
6243 for (i = 0; i < num_operands && idesc->operands[i]; ++i)
6245 result = operand_match (idesc, i, CURR_SLOT.opnd + i);
6246 if (result != OPERAND_MATCH)
6248 if (result != OPERAND_OUT_OF_RANGE)
6250 if (out_of_range_pos < 0)
6251 /* remember position of the first out-of-range operand: */
6252 out_of_range_pos = i;
6256 /* If we did not match all operands, or if at least one operand was
6257 out-of-range, then this idesc does not match. Keep track of which
6258 idesc matched the most operands before failing. If we have two
6259 idescs that failed at the same position, and one had an out-of-range
6260 operand, then prefer the out-of-range operand. Thus if we have
6261 "add r0=0x1000000,r1" we get an error saying the constant is out
6262 of range instead of an error saying that the constant should have been
6265 if (i != num_operands || out_of_range_pos >= 0)
6267 if (i > highest_unmatched_operand
6268 || (i == highest_unmatched_operand
6269 && out_of_range_pos > curr_out_of_range_pos))
6271 highest_unmatched_operand = i;
6272 if (out_of_range_pos >= 0)
6274 expected_operand = idesc->operands[out_of_range_pos];
6275 error_pos = out_of_range_pos;
6279 expected_operand = idesc->operands[i];
6282 curr_out_of_range_pos = out_of_range_pos;
6291 if (expected_operand)
6292 as_bad (_("Operand %u of `%s' should be %s"),
6293 error_pos + 1, mnemonic,
6294 elf64_ia64_operands[expected_operand].desc);
6295 else if (highest_unmatched_operand < 0 && !(highest_unmatched_operand & 1))
6296 as_bad (_("Wrong number of output operands"));
6297 else if (highest_unmatched_operand < 0 && !(highest_unmatched_operand & 2))
6298 as_bad (_("Wrong number of input operands"));
6300 as_bad (_("Operand mismatch"));
6304 /* Check that the instruction doesn't use
6305 - r0, f0, or f1 as output operands
6306 - the same predicate twice as output operands
6307 - r0 as address of a base update load or store
6308 - the same GR as output and address of a base update load
6309 - two even- or two odd-numbered FRs as output operands of a floating
6310 point parallel load.
6311 At most two (conflicting) output (or output-like) operands can exist,
6312 (floating point parallel loads have three outputs, but the base register,
6313 if updated, cannot conflict with the actual outputs). */
6315 for (i = 0; i < num_operands; ++i)
6320 switch (idesc->operands[i])
6325 if (i < num_outputs)
6327 if (CURR_SLOT.opnd[i].X_add_number == REG_GR)
6330 reg1 = CURR_SLOT.opnd[i].X_add_number;
6332 reg2 = CURR_SLOT.opnd[i].X_add_number;
6337 if (i < num_outputs)
6340 reg1 = CURR_SLOT.opnd[i].X_add_number;
6342 reg2 = CURR_SLOT.opnd[i].X_add_number;
6349 if (i < num_outputs)
6351 if (CURR_SLOT.opnd[i].X_add_number >= REG_FR
6352 && CURR_SLOT.opnd[i].X_add_number <= REG_FR + 1)
6355 regno = CURR_SLOT.opnd[i].X_add_number - REG_FR;
6358 reg1 = CURR_SLOT.opnd[i].X_add_number;
6360 reg2 = CURR_SLOT.opnd[i].X_add_number;
6364 if (idesc->flags & IA64_OPCODE_POSTINC)
6366 if (CURR_SLOT.opnd[i].X_add_number == REG_GR)
6369 reg1 = CURR_SLOT.opnd[i].X_add_number;
6371 reg2 = CURR_SLOT.opnd[i].X_add_number;
6382 as_warn (_("Invalid use of `%c%d' as output operand"), reg_class, regno);
6385 as_warn (_("Invalid use of `r%d' as base update address operand"), regno);
6391 if (reg1 >= REG_GR && reg1 <= REG_GR + 127)
6396 else if (reg1 >= REG_P && reg1 <= REG_P + 63)
6401 else if (reg1 >= REG_FR && reg1 <= REG_FR + 127)
6409 as_warn (_("Invalid duplicate use of `%c%d'"), reg_class, reg1);
6411 else if (((reg1 >= REG_FR && reg1 <= REG_FR + 31
6412 && reg2 >= REG_FR && reg2 <= REG_FR + 31)
6413 || (reg1 >= REG_FR + 32 && reg1 <= REG_FR + 127
6414 && reg2 >= REG_FR + 32 && reg2 <= REG_FR + 127))
6415 && ! ((reg1 ^ reg2) & 1))
6416 as_warn (_("Invalid simultaneous use of `f%d' and `f%d'"),
6417 reg1 - REG_FR, reg2 - REG_FR);
6418 else if ((reg1 >= REG_FR && reg1 <= REG_FR + 31
6419 && reg2 >= REG_FR + 32 && reg2 <= REG_FR + 127)
6420 || (reg1 >= REG_FR + 32 && reg1 <= REG_FR + 127
6421 && reg2 >= REG_FR && reg2 <= REG_FR + 31))
6422 as_warn (_("Dangerous simultaneous use of `f%d' and `f%d'"),
6423 reg1 - REG_FR, reg2 - REG_FR);
6428 build_insn (struct slot *slot, bfd_vma *insnp)
6430 const struct ia64_operand *odesc, *o2desc;
6431 struct ia64_opcode *idesc = slot->idesc;
6437 insn = idesc->opcode | slot->qp_regno;
6439 for (i = 0; i < NELEMS (idesc->operands) && idesc->operands[i]; ++i)
6441 if (slot->opnd[i].X_op == O_register
6442 || slot->opnd[i].X_op == O_constant
6443 || slot->opnd[i].X_op == O_index)
6444 val = slot->opnd[i].X_add_number;
6445 else if (slot->opnd[i].X_op == O_big)
6447 /* This must be the value 0x10000000000000000. */
6448 gas_assert (idesc->operands[i] == IA64_OPND_IMM8M1U8);
6454 switch (idesc->operands[i])
6456 case IA64_OPND_IMMU64:
6457 *insnp++ = (val >> 22) & 0x1ffffffffffLL;
6458 insn |= (((val & 0x7f) << 13) | (((val >> 7) & 0x1ff) << 27)
6459 | (((val >> 16) & 0x1f) << 22) | (((val >> 21) & 0x1) << 21)
6460 | (((val >> 63) & 0x1) << 36));
6463 case IA64_OPND_IMMU62:
6464 val &= 0x3fffffffffffffffULL;
6465 if (val != slot->opnd[i].X_add_number)
6466 as_warn (_("Value truncated to 62 bits"));
6467 *insnp++ = (val >> 21) & 0x1ffffffffffLL;
6468 insn |= (((val & 0xfffff) << 6) | (((val >> 20) & 0x1) << 36));
6471 case IA64_OPND_TGT64:
6473 *insnp++ = ((val >> 20) & 0x7fffffffffLL) << 2;
6474 insn |= ((((val >> 59) & 0x1) << 36)
6475 | (((val >> 0) & 0xfffff) << 13));
6491 case IA64_OPND_DAHR3:
6510 case IA64_OPND_R3_2:
6511 case IA64_OPND_CPUID_R3:
6512 case IA64_OPND_DBR_R3:
6513 case IA64_OPND_DTR_R3:
6514 case IA64_OPND_ITR_R3:
6515 case IA64_OPND_IBR_R3:
6517 case IA64_OPND_MSR_R3:
6518 case IA64_OPND_PKR_R3:
6519 case IA64_OPND_PMC_R3:
6520 case IA64_OPND_PMD_R3:
6521 case IA64_OPND_DAHR_R3:
6522 case IA64_OPND_RR_R3:
6530 odesc = elf64_ia64_operands + idesc->operands[i];
6531 err = (*odesc->insert) (odesc, val, &insn);
6533 as_bad_where (slot->src_file, slot->src_line,
6534 _("Bad operand value: %s"), err);
6535 if (idesc->flags & IA64_OPCODE_PSEUDO)
6537 if ((idesc->flags & IA64_OPCODE_F2_EQ_F3)
6538 && odesc == elf64_ia64_operands + IA64_OPND_F3)
6540 o2desc = elf64_ia64_operands + IA64_OPND_F2;
6541 (*o2desc->insert) (o2desc, val, &insn);
6543 if ((idesc->flags & IA64_OPCODE_LEN_EQ_64MCNT)
6544 && (odesc == elf64_ia64_operands + IA64_OPND_CPOS6a
6545 || odesc == elf64_ia64_operands + IA64_OPND_POS6))
6547 o2desc = elf64_ia64_operands + IA64_OPND_LEN6;
6548 (*o2desc->insert) (o2desc, 64 - val, &insn);
6556 emit_one_bundle (void)
6558 int manual_bundling_off = 0, manual_bundling = 0;
6559 enum ia64_unit required_unit, insn_unit = 0;
6560 enum ia64_insn_type type[3], insn_type;
6561 unsigned int template_val, orig_template;
6562 bfd_vma insn[3] = { -1, -1, -1 };
6563 struct ia64_opcode *idesc;
6564 int end_of_insn_group = 0, user_template = -1;
6565 int n, i, j, first, curr, last_slot;
6566 bfd_vma t0 = 0, t1 = 0;
6567 struct label_fix *lfix;
6568 bfd_boolean mark_label;
6569 struct insn_fix *ifix;
6575 first = (md.curr_slot + NUM_SLOTS - md.num_slots_in_use) % NUM_SLOTS;
6576 know (first >= 0 && first < NUM_SLOTS);
6577 n = MIN (3, md.num_slots_in_use);
6579 /* Determine template: user user_template if specified, best match
6582 if (md.slot[first].user_template >= 0)
6583 user_template = template_val = md.slot[first].user_template;
6586 /* Auto select appropriate template. */
6587 memset (type, 0, sizeof (type));
6589 for (i = 0; i < n; ++i)
6591 if (md.slot[curr].label_fixups && i != 0)
6593 type[i] = md.slot[curr].idesc->type;
6594 curr = (curr + 1) % NUM_SLOTS;
6596 template_val = best_template[type[0]][type[1]][type[2]];
6599 /* initialize instructions with appropriate nops: */
6600 for (i = 0; i < 3; ++i)
6601 insn[i] = nop[ia64_templ_desc[template_val].exec_unit[i]];
6605 /* Check to see if this bundle is at an offset that is a multiple of 16-bytes
6606 from the start of the frag. */
6607 addr_mod = frag_now_fix () & 15;
6608 if (frag_now->has_code && frag_now->insn_addr != addr_mod)
6609 as_bad (_("instruction address is not a multiple of 16"));
6610 frag_now->insn_addr = addr_mod;
6611 frag_now->has_code = 1;
6613 /* now fill in slots with as many insns as possible: */
6615 idesc = md.slot[curr].idesc;
6616 end_of_insn_group = 0;
6618 for (i = 0; i < 3 && md.num_slots_in_use > 0; ++i)
6620 /* If we have unwind records, we may need to update some now. */
6621 unw_rec_list *ptr = md.slot[curr].unwind_record;
6622 unw_rec_list *end_ptr = NULL;
6626 /* Find the last prologue/body record in the list for the current
6627 insn, and set the slot number for all records up to that point.
6628 This needs to be done now, because prologue/body records refer to
6629 the current point, not the point after the instruction has been
6630 issued. This matters because there may have been nops emitted
6631 meanwhile. Any non-prologue non-body record followed by a
6632 prologue/body record must also refer to the current point. */
6633 unw_rec_list *last_ptr;
6635 for (j = 1; end_ptr == NULL && j < md.num_slots_in_use; ++j)
6636 end_ptr = md.slot[(curr + j) % NUM_SLOTS].unwind_record;
6637 for (last_ptr = NULL; ptr != end_ptr; ptr = ptr->next)
6638 if (ptr->r.type == prologue || ptr->r.type == prologue_gr
6639 || ptr->r.type == body)
6643 /* Make last_ptr point one after the last prologue/body
6645 last_ptr = last_ptr->next;
6646 for (ptr = md.slot[curr].unwind_record; ptr != last_ptr;
6649 ptr->slot_number = (unsigned long) f + i;
6650 ptr->slot_frag = frag_now;
6652 /* Remove the initialized records, so that we won't accidentally
6653 update them again if we insert a nop and continue. */
6654 md.slot[curr].unwind_record = last_ptr;
6658 manual_bundling_off = md.slot[curr].manual_bundling_off;
6659 if (md.slot[curr].manual_bundling_on)
6662 manual_bundling = 1;
6664 break; /* Need to start a new bundle. */
6667 /* If this instruction specifies a template, then it must be the first
6668 instruction of a bundle. */
6669 if (curr != first && md.slot[curr].user_template >= 0)
6672 if (idesc->flags & IA64_OPCODE_SLOT2)
6674 if (manual_bundling && !manual_bundling_off)
6676 as_bad_where (md.slot[curr].src_file, md.slot[curr].src_line,
6677 _("`%s' must be last in bundle"), idesc->name);
6679 manual_bundling = -1; /* Suppress meaningless post-loop errors. */
6683 if (idesc->flags & IA64_OPCODE_LAST)
6686 unsigned int required_template;
6688 /* If we need a stop bit after an M slot, our only choice is
6689 template 5 (M;;MI). If we need a stop bit after a B
6690 slot, our only choice is to place it at the end of the
6691 bundle, because the only available templates are MIB,
6692 MBB, BBB, MMB, and MFB. We don't handle anything other
6693 than M and B slots because these are the only kind of
6694 instructions that can have the IA64_OPCODE_LAST bit set. */
6695 required_template = template_val;
6696 switch (idesc->type)
6700 required_template = 5;
6708 as_bad_where (md.slot[curr].src_file, md.slot[curr].src_line,
6709 _("Internal error: don't know how to force %s to end of instruction group"),
6715 && (i > required_slot
6716 || (required_slot == 2 && !manual_bundling_off)
6717 || (user_template >= 0
6718 /* Changing from MMI to M;MI is OK. */
6719 && (template_val ^ required_template) > 1)))
6721 as_bad_where (md.slot[curr].src_file, md.slot[curr].src_line,
6722 _("`%s' must be last in instruction group"),
6724 if (i < 2 && required_slot == 2 && !manual_bundling_off)
6725 manual_bundling = -1; /* Suppress meaningless post-loop errors. */
6727 if (required_slot < i)
6728 /* Can't fit this instruction. */
6732 if (required_template != template_val)
6734 /* If we switch the template, we need to reset the NOPs
6735 after slot i. The slot-types of the instructions ahead
6736 of i never change, so we don't need to worry about
6737 changing NOPs in front of this slot. */
6738 for (j = i; j < 3; ++j)
6739 insn[j] = nop[ia64_templ_desc[required_template].exec_unit[j]];
6741 /* We just picked a template that includes the stop bit in the
6742 middle, so we don't need another one emitted later. */
6743 md.slot[curr].end_of_insn_group = 0;
6745 template_val = required_template;
6747 if (curr != first && md.slot[curr].label_fixups)
6749 if (manual_bundling)
6751 as_bad_where (md.slot[curr].src_file, md.slot[curr].src_line,
6752 _("Label must be first in a bundle"));
6753 manual_bundling = -1; /* Suppress meaningless post-loop errors. */
6755 /* This insn must go into the first slot of a bundle. */
6759 if (end_of_insn_group && md.num_slots_in_use >= 1)
6761 /* We need an instruction group boundary in the middle of a
6762 bundle. See if we can switch to an other template with
6763 an appropriate boundary. */
6765 orig_template = template_val;
6766 if (i == 1 && (user_template == 4
6767 || (user_template < 0
6768 && (ia64_templ_desc[template_val].exec_unit[0]
6772 end_of_insn_group = 0;
6774 else if (i == 2 && (user_template == 0
6775 || (user_template < 0
6776 && (ia64_templ_desc[template_val].exec_unit[1]
6778 /* This test makes sure we don't switch the template if
6779 the next instruction is one that needs to be first in
6780 an instruction group. Since all those instructions are
6781 in the M group, there is no way such an instruction can
6782 fit in this bundle even if we switch the template. The
6783 reason we have to check for this is that otherwise we
6784 may end up generating "MI;;I M.." which has the deadly
6785 effect that the second M instruction is no longer the
6786 first in the group! --davidm 99/12/16 */
6787 && (idesc->flags & IA64_OPCODE_FIRST) == 0)
6790 end_of_insn_group = 0;
6793 && user_template == 0
6794 && !(idesc->flags & IA64_OPCODE_FIRST))
6795 /* Use the next slot. */
6797 else if (curr != first)
6798 /* can't fit this insn */
6801 if (template_val != orig_template)
6802 /* if we switch the template, we need to reset the NOPs
6803 after slot i. The slot-types of the instructions ahead
6804 of i never change, so we don't need to worry about
6805 changing NOPs in front of this slot. */
6806 for (j = i; j < 3; ++j)
6807 insn[j] = nop[ia64_templ_desc[template_val].exec_unit[j]];
6809 required_unit = ia64_templ_desc[template_val].exec_unit[i];
6811 /* resolve dynamic opcodes such as "break", "hint", and "nop": */
6812 if (idesc->type == IA64_TYPE_DYN)
6814 enum ia64_opnd opnd1, opnd2;
6816 if ((strcmp (idesc->name, "nop") == 0)
6817 || (strcmp (idesc->name, "break") == 0))
6818 insn_unit = required_unit;
6819 else if (strcmp (idesc->name, "hint") == 0)
6821 insn_unit = required_unit;
6822 if (required_unit == IA64_UNIT_B)
6828 case hint_b_warning:
6829 as_warn (_("hint in B unit may be treated as nop"));
6832 /* When manual bundling is off and there is no
6833 user template, we choose a different unit so
6834 that hint won't go into the current slot. We
6835 will fill the current bundle with nops and
6836 try to put hint into the next bundle. */
6837 if (!manual_bundling && user_template < 0)
6838 insn_unit = IA64_UNIT_I;
6840 as_bad (_("hint in B unit can't be used"));
6845 else if (strcmp (idesc->name, "chk.s") == 0
6846 || strcmp (idesc->name, "mov") == 0)
6848 insn_unit = IA64_UNIT_M;
6849 if (required_unit == IA64_UNIT_I
6850 || (required_unit == IA64_UNIT_F && template_val == 6))
6851 insn_unit = IA64_UNIT_I;
6854 as_fatal (_("emit_one_bundle: unexpected dynamic op"));
6856 snprintf (mnemonic, sizeof (mnemonic), "%s.%c",
6857 idesc->name, "?imbfxx"[insn_unit]);
6858 opnd1 = idesc->operands[0];
6859 opnd2 = idesc->operands[1];
6860 ia64_free_opcode (idesc);
6861 idesc = ia64_find_opcode (mnemonic);
6862 /* moves to/from ARs have collisions */
6863 if (opnd1 == IA64_OPND_AR3 || opnd2 == IA64_OPND_AR3)
6865 while (idesc != NULL
6866 && (idesc->operands[0] != opnd1
6867 || idesc->operands[1] != opnd2))
6868 idesc = get_next_opcode (idesc);
6870 md.slot[curr].idesc = idesc;
6874 insn_type = idesc->type;
6875 insn_unit = IA64_UNIT_NIL;
6879 if (required_unit == IA64_UNIT_I || required_unit == IA64_UNIT_M)
6880 insn_unit = required_unit;
6882 case IA64_TYPE_X: insn_unit = IA64_UNIT_L; break;
6883 case IA64_TYPE_I: insn_unit = IA64_UNIT_I; break;
6884 case IA64_TYPE_M: insn_unit = IA64_UNIT_M; break;
6885 case IA64_TYPE_B: insn_unit = IA64_UNIT_B; break;
6886 case IA64_TYPE_F: insn_unit = IA64_UNIT_F; break;
6891 if (insn_unit != required_unit)
6892 continue; /* Try next slot. */
6894 /* Now is a good time to fix up the labels for this insn. */
6896 for (lfix = md.slot[curr].label_fixups; lfix; lfix = lfix->next)
6898 S_SET_VALUE (lfix->sym, frag_now_fix () - 16);
6899 symbol_set_frag (lfix->sym, frag_now);
6900 mark_label |= lfix->dw2_mark_labels;
6902 for (lfix = md.slot[curr].tag_fixups; lfix; lfix = lfix->next)
6904 S_SET_VALUE (lfix->sym, frag_now_fix () - 16 + i);
6905 symbol_set_frag (lfix->sym, frag_now);
6908 if (debug_type == DEBUG_DWARF2
6909 || md.slot[curr].loc_directive_seen
6912 bfd_vma addr = frag_now->fr_address + frag_now_fix () - 16 + i;
6914 md.slot[curr].loc_directive_seen = 0;
6916 md.slot[curr].debug_line.flags |= DWARF2_FLAG_BASIC_BLOCK;
6918 dwarf2_gen_line_info (addr, &md.slot[curr].debug_line);
6921 build_insn (md.slot + curr, insn + i);
6923 ptr = md.slot[curr].unwind_record;
6926 /* Set slot numbers for all remaining unwind records belonging to the
6927 current insn. There can not be any prologue/body unwind records
6929 for (; ptr != end_ptr; ptr = ptr->next)
6931 ptr->slot_number = (unsigned long) f + i;
6932 ptr->slot_frag = frag_now;
6934 md.slot[curr].unwind_record = NULL;
6937 for (j = 0; j < md.slot[curr].num_fixups; ++j)
6939 ifix = md.slot[curr].fixup + j;
6940 fix = fix_new_exp (frag_now, frag_now_fix () - 16 + i, 8,
6941 &ifix->expr, ifix->is_pcrel, ifix->code);
6942 fix->tc_fix_data.opnd = ifix->opnd;
6943 fix->fx_file = md.slot[curr].src_file;
6944 fix->fx_line = md.slot[curr].src_line;
6947 end_of_insn_group = md.slot[curr].end_of_insn_group;
6949 /* This adjustment to "i" must occur after the fix, otherwise the fix
6950 is assigned to the wrong slot, and the VMS linker complains. */
6951 if (required_unit == IA64_UNIT_L)
6954 /* skip one slot for long/X-unit instructions */
6957 --md.num_slots_in_use;
6961 ia64_free_opcode (md.slot[curr].idesc);
6962 memset (md.slot + curr, 0, sizeof (md.slot[curr]));
6963 md.slot[curr].user_template = -1;
6965 if (manual_bundling_off)
6967 manual_bundling = 0;
6970 curr = (curr + 1) % NUM_SLOTS;
6971 idesc = md.slot[curr].idesc;
6974 /* A user template was specified, but the first following instruction did
6975 not fit. This can happen with or without manual bundling. */
6976 if (md.num_slots_in_use > 0 && last_slot < 0)
6978 as_bad_where (md.slot[curr].src_file, md.slot[curr].src_line,
6979 _("`%s' does not fit into %s template"),
6980 idesc->name, ia64_templ_desc[template_val].name);
6981 /* Drop first insn so we don't livelock. */
6982 --md.num_slots_in_use;
6983 know (curr == first);
6984 ia64_free_opcode (md.slot[curr].idesc);
6985 memset (md.slot + curr, 0, sizeof (md.slot[curr]));
6986 md.slot[curr].user_template = -1;
6988 else if (manual_bundling > 0)
6990 if (md.num_slots_in_use > 0)
6993 as_bad_where (md.slot[curr].src_file, md.slot[curr].src_line,
6994 _("`%s' does not fit into bundle"), idesc->name);
6999 if (template_val == 2)
7001 else if (last_slot == 0)
7002 where = "slots 2 or 3";
7005 as_bad_where (md.slot[curr].src_file, md.slot[curr].src_line,
7006 _("`%s' can't go in %s of %s template"),
7007 idesc->name, where, ia64_templ_desc[template_val].name);
7011 as_bad_where (md.slot[curr].src_file, md.slot[curr].src_line,
7012 _("Missing '}' at end of file"));
7015 know (md.num_slots_in_use < NUM_SLOTS);
7017 t0 = end_of_insn_group | (template_val << 1) | (insn[0] << 5) | (insn[1] << 46);
7018 t1 = ((insn[1] >> 18) & 0x7fffff) | (insn[2] << 23);
7020 number_to_chars_littleendian (f + 0, t0, 8);
7021 number_to_chars_littleendian (f + 8, t1, 8);
7025 md_parse_option (int c, char *arg)
7030 /* Switches from the Intel assembler. */
7032 if (strcmp (arg, "ilp64") == 0
7033 || strcmp (arg, "lp64") == 0
7034 || strcmp (arg, "p64") == 0)
7036 md.flags |= EF_IA_64_ABI64;
7038 else if (strcmp (arg, "ilp32") == 0)
7040 md.flags &= ~EF_IA_64_ABI64;
7042 else if (strcmp (arg, "le") == 0)
7044 md.flags &= ~EF_IA_64_BE;
7045 default_big_endian = 0;
7047 else if (strcmp (arg, "be") == 0)
7049 md.flags |= EF_IA_64_BE;
7050 default_big_endian = 1;
7052 else if (strncmp (arg, "unwind-check=", 13) == 0)
7055 if (strcmp (arg, "warning") == 0)
7056 md.unwind_check = unwind_check_warning;
7057 else if (strcmp (arg, "error") == 0)
7058 md.unwind_check = unwind_check_error;
7062 else if (strncmp (arg, "hint.b=", 7) == 0)
7065 if (strcmp (arg, "ok") == 0)
7066 md.hint_b = hint_b_ok;
7067 else if (strcmp (arg, "warning") == 0)
7068 md.hint_b = hint_b_warning;
7069 else if (strcmp (arg, "error") == 0)
7070 md.hint_b = hint_b_error;
7074 else if (strncmp (arg, "tune=", 5) == 0)
7077 if (strcmp (arg, "itanium1") == 0)
7079 else if (strcmp (arg, "itanium2") == 0)
7089 if (strcmp (arg, "so") == 0)
7091 /* Suppress signon message. */
7093 else if (strcmp (arg, "pi") == 0)
7095 /* Reject privileged instructions. FIXME */
7097 else if (strcmp (arg, "us") == 0)
7099 /* Allow union of signed and unsigned range. FIXME */
7101 else if (strcmp (arg, "close_fcalls") == 0)
7103 /* Do not resolve global function calls. */
7110 /* temp[="prefix"] Insert temporary labels into the object file
7111 symbol table prefixed by "prefix".
7112 Default prefix is ":temp:".
7117 /* indirect=<tgt> Assume unannotated indirect branches behavior
7118 according to <tgt> --
7119 exit: branch out from the current context (default)
7120 labels: all labels in context may be branch targets
7122 if (strncmp (arg, "indirect=", 9) != 0)
7127 /* -X conflicts with an ignored option, use -x instead */
7129 if (!arg || strcmp (arg, "explicit") == 0)
7131 /* set default mode to explicit */
7132 md.default_explicit_mode = 1;
7135 else if (strcmp (arg, "auto") == 0)
7137 md.default_explicit_mode = 0;
7139 else if (strcmp (arg, "none") == 0)
7143 else if (strcmp (arg, "debug") == 0)
7147 else if (strcmp (arg, "debugx") == 0)
7149 md.default_explicit_mode = 1;
7152 else if (strcmp (arg, "debugn") == 0)
7159 as_bad (_("Unrecognized option '-x%s'"), arg);
7164 /* nops Print nops statistics. */
7167 /* GNU specific switches for gcc. */
7168 case OPTION_MCONSTANT_GP:
7169 md.flags |= EF_IA_64_CONS_GP;
7172 case OPTION_MAUTO_PIC:
7173 md.flags |= EF_IA_64_NOFUNCDESC_CONS_GP;
7184 md_show_usage (FILE *stream)
7188 --mconstant-gp mark output file as using the constant-GP model\n\
7189 (sets ELF header flag EF_IA_64_CONS_GP)\n\
7190 --mauto-pic mark output file as using the constant-GP model\n\
7191 without function descriptors (sets ELF header flag\n\
7192 EF_IA_64_NOFUNCDESC_CONS_GP)\n\
7193 -milp32|-milp64|-mlp64|-mp64 select data model (default -mlp64)\n\
7194 -mle | -mbe select little- or big-endian byte order (default -mle)\n\
7195 -mtune=[itanium1|itanium2]\n\
7196 tune for a specific CPU (default -mtune=itanium2)\n\
7197 -munwind-check=[warning|error]\n\
7198 unwind directive check (default -munwind-check=warning)\n\
7199 -mhint.b=[ok|warning|error]\n\
7200 hint.b check (default -mhint.b=error)\n\
7201 -x | -xexplicit turn on dependency violation checking\n"), stream);
7202 /* Note for translators: "automagically" can be translated as "automatically" here. */
7204 -xauto automagically remove dependency violations (default)\n\
7205 -xnone turn off dependency violation checking\n\
7206 -xdebug debug dependency violation checker\n\
7207 -xdebugn debug dependency violation checker but turn off\n\
7208 dependency violation checking\n\
7209 -xdebugx debug dependency violation checker and turn on\n\
7210 dependency violation checking\n"),
7215 ia64_after_parse_args (void)
7217 if (debug_type == DEBUG_STABS)
7218 as_fatal (_("--gstabs is not supported for ia64"));
7221 /* Return true if TYPE fits in TEMPL at SLOT. */
7224 match (int templ, int type, int slot)
7226 enum ia64_unit unit;
7229 unit = ia64_templ_desc[templ].exec_unit[slot];
7232 case IA64_TYPE_DYN: result = 1; break; /* for nop and break */
7234 result = (unit == IA64_UNIT_I || unit == IA64_UNIT_M);
7236 case IA64_TYPE_X: result = (unit == IA64_UNIT_L); break;
7237 case IA64_TYPE_I: result = (unit == IA64_UNIT_I); break;
7238 case IA64_TYPE_M: result = (unit == IA64_UNIT_M); break;
7239 case IA64_TYPE_B: result = (unit == IA64_UNIT_B); break;
7240 case IA64_TYPE_F: result = (unit == IA64_UNIT_F); break;
7241 default: result = 0; break;
7246 /* For Itanium 1, add a bit of extra goodness if a nop of type F or B would fit
7247 in TEMPL at SLOT. For Itanium 2, add a bit of extra goodness if a nop of
7248 type M or I would fit in TEMPL at SLOT. */
7251 extra_goodness (int templ, int slot)
7256 if (slot == 1 && match (templ, IA64_TYPE_F, slot))
7258 else if (slot == 2 && match (templ, IA64_TYPE_B, slot))
7264 if (match (templ, IA64_TYPE_M, slot)
7265 || match (templ, IA64_TYPE_I, slot))
7266 /* Favor M- and I-unit NOPs. We definitely want to avoid
7267 F-unit and B-unit may cause split-issue or less-than-optimal
7268 branch-prediction. */
7279 /* This function is called once, at assembler startup time. It sets
7280 up all the tables, etc. that the MD part of the assembler will need
7281 that can be determined before arguments are parsed. */
7285 int i, j, k, t, goodness, best, ok;
7290 md.explicit_mode = md.default_explicit_mode;
7292 bfd_set_section_alignment (stdoutput, text_section, 4);
7294 /* Make sure function pointers get initialized. */
7295 target_big_endian = -1;
7296 dot_byteorder (default_big_endian);
7298 alias_hash = hash_new ();
7299 alias_name_hash = hash_new ();
7300 secalias_hash = hash_new ();
7301 secalias_name_hash = hash_new ();
7303 pseudo_func[FUNC_DTP_MODULE].u.sym =
7304 symbol_new (".<dtpmod>", undefined_section, FUNC_DTP_MODULE,
7305 &zero_address_frag);
7307 pseudo_func[FUNC_DTP_RELATIVE].u.sym =
7308 symbol_new (".<dtprel>", undefined_section, FUNC_DTP_RELATIVE,
7309 &zero_address_frag);
7311 pseudo_func[FUNC_FPTR_RELATIVE].u.sym =
7312 symbol_new (".<fptr>", undefined_section, FUNC_FPTR_RELATIVE,
7313 &zero_address_frag);
7315 pseudo_func[FUNC_GP_RELATIVE].u.sym =
7316 symbol_new (".<gprel>", undefined_section, FUNC_GP_RELATIVE,
7317 &zero_address_frag);
7319 pseudo_func[FUNC_LT_RELATIVE].u.sym =
7320 symbol_new (".<ltoff>", undefined_section, FUNC_LT_RELATIVE,
7321 &zero_address_frag);
7323 pseudo_func[FUNC_LT_RELATIVE_X].u.sym =
7324 symbol_new (".<ltoffx>", undefined_section, FUNC_LT_RELATIVE_X,
7325 &zero_address_frag);
7327 pseudo_func[FUNC_PC_RELATIVE].u.sym =
7328 symbol_new (".<pcrel>", undefined_section, FUNC_PC_RELATIVE,
7329 &zero_address_frag);
7331 pseudo_func[FUNC_PLT_RELATIVE].u.sym =
7332 symbol_new (".<pltoff>", undefined_section, FUNC_PLT_RELATIVE,
7333 &zero_address_frag);
7335 pseudo_func[FUNC_SEC_RELATIVE].u.sym =
7336 symbol_new (".<secrel>", undefined_section, FUNC_SEC_RELATIVE,
7337 &zero_address_frag);
7339 pseudo_func[FUNC_SEG_RELATIVE].u.sym =
7340 symbol_new (".<segrel>", undefined_section, FUNC_SEG_RELATIVE,
7341 &zero_address_frag);
7343 pseudo_func[FUNC_TP_RELATIVE].u.sym =
7344 symbol_new (".<tprel>", undefined_section, FUNC_TP_RELATIVE,
7345 &zero_address_frag);
7347 pseudo_func[FUNC_LTV_RELATIVE].u.sym =
7348 symbol_new (".<ltv>", undefined_section, FUNC_LTV_RELATIVE,
7349 &zero_address_frag);
7351 pseudo_func[FUNC_LT_FPTR_RELATIVE].u.sym =
7352 symbol_new (".<ltoff.fptr>", undefined_section, FUNC_LT_FPTR_RELATIVE,
7353 &zero_address_frag);
7355 pseudo_func[FUNC_LT_DTP_MODULE].u.sym =
7356 symbol_new (".<ltoff.dtpmod>", undefined_section, FUNC_LT_DTP_MODULE,
7357 &zero_address_frag);
7359 pseudo_func[FUNC_LT_DTP_RELATIVE].u.sym =
7360 symbol_new (".<ltoff.dptrel>", undefined_section, FUNC_LT_DTP_RELATIVE,
7361 &zero_address_frag);
7363 pseudo_func[FUNC_LT_TP_RELATIVE].u.sym =
7364 symbol_new (".<ltoff.tprel>", undefined_section, FUNC_LT_TP_RELATIVE,
7365 &zero_address_frag);
7367 pseudo_func[FUNC_IPLT_RELOC].u.sym =
7368 symbol_new (".<iplt>", undefined_section, FUNC_IPLT_RELOC,
7369 &zero_address_frag);
7372 pseudo_func[FUNC_SLOTCOUNT_RELOC].u.sym =
7373 symbol_new (".<slotcount>", undefined_section, FUNC_SLOTCOUNT_RELOC,
7374 &zero_address_frag);
7377 if (md.tune != itanium1)
7379 /* Convert MFI NOPs bundles into MMI NOPs bundles. */
7381 le_nop_stop[0] = 0x9;
7384 /* Compute the table of best templates. We compute goodness as a
7385 base 4 value, in which each match counts for 3. Match-failures
7386 result in NOPs and we use extra_goodness() to pick the execution
7387 units that are best suited for issuing the NOP. */
7388 for (i = 0; i < IA64_NUM_TYPES; ++i)
7389 for (j = 0; j < IA64_NUM_TYPES; ++j)
7390 for (k = 0; k < IA64_NUM_TYPES; ++k)
7393 for (t = 0; t < NELEMS (ia64_templ_desc); ++t)
7396 if (match (t, i, 0))
7398 if (match (t, j, 1))
7400 if ((t == 2 && j == IA64_TYPE_X) || match (t, k, 2))
7401 goodness = 3 + 3 + 3;
7403 goodness = 3 + 3 + extra_goodness (t, 2);
7405 else if (match (t, j, 2))
7406 goodness = 3 + 3 + extra_goodness (t, 1);
7410 goodness += extra_goodness (t, 1);
7411 goodness += extra_goodness (t, 2);
7414 else if (match (t, i, 1))
7416 if ((t == 2 && i == IA64_TYPE_X) || match (t, j, 2))
7419 goodness = 3 + extra_goodness (t, 2);
7421 else if (match (t, i, 2))
7422 goodness = 3 + extra_goodness (t, 1);
7424 if (goodness > best)
7427 best_template[i][j][k] = t;
7432 #ifdef DEBUG_TEMPLATES
7433 /* For debugging changes to the best_template calculations. We don't care
7434 about combinations with invalid instructions, so start the loops at 1. */
7435 for (i = 0; i < IA64_NUM_TYPES; ++i)
7436 for (j = 0; j < IA64_NUM_TYPES; ++j)
7437 for (k = 0; k < IA64_NUM_TYPES; ++k)
7439 char type_letter[IA64_NUM_TYPES] = { 'n', 'a', 'i', 'm', 'b', 'f',
7441 fprintf (stderr, "%c%c%c %s\n", type_letter[i], type_letter[j],
7443 ia64_templ_desc[best_template[i][j][k]].name);
7447 for (i = 0; i < NUM_SLOTS; ++i)
7448 md.slot[i].user_template = -1;
7450 md.pseudo_hash = hash_new ();
7451 for (i = 0; i < NELEMS (pseudo_opcode); ++i)
7453 err = hash_insert (md.pseudo_hash, pseudo_opcode[i].name,
7454 (void *) (pseudo_opcode + i));
7456 as_fatal (_("ia64.md_begin: can't hash `%s': %s"),
7457 pseudo_opcode[i].name, err);
7460 md.reg_hash = hash_new ();
7461 md.dynreg_hash = hash_new ();
7462 md.const_hash = hash_new ();
7463 md.entry_hash = hash_new ();
7465 /* general registers: */
7466 declare_register_set ("r", 128, REG_GR);
7467 declare_register ("gp", REG_GR + 1);
7468 declare_register ("sp", REG_GR + 12);
7469 declare_register ("tp", REG_GR + 13);
7470 declare_register_set ("ret", 4, REG_GR + 8);
7472 /* floating point registers: */
7473 declare_register_set ("f", 128, REG_FR);
7474 declare_register_set ("farg", 8, REG_FR + 8);
7475 declare_register_set ("fret", 8, REG_FR + 8);
7477 /* branch registers: */
7478 declare_register_set ("b", 8, REG_BR);
7479 declare_register ("rp", REG_BR + 0);
7481 /* predicate registers: */
7482 declare_register_set ("p", 64, REG_P);
7483 declare_register ("pr", REG_PR);
7484 declare_register ("pr.rot", REG_PR_ROT);
7486 /* application registers: */
7487 declare_register_set ("ar", 128, REG_AR);
7488 for (i = 0; i < NELEMS (ar); ++i)
7489 declare_register (ar[i].name, REG_AR + ar[i].regnum);
7491 /* control registers: */
7492 declare_register_set ("cr", 128, REG_CR);
7493 for (i = 0; i < NELEMS (cr); ++i)
7494 declare_register (cr[i].name, REG_CR + cr[i].regnum);
7496 /* dahr registers: */
7497 declare_register_set ("dahr", 8, REG_DAHR);
7499 declare_register ("ip", REG_IP);
7500 declare_register ("cfm", REG_CFM);
7501 declare_register ("psr", REG_PSR);
7502 declare_register ("psr.l", REG_PSR_L);
7503 declare_register ("psr.um", REG_PSR_UM);
7505 for (i = 0; i < NELEMS (indirect_reg); ++i)
7507 unsigned int regnum = indirect_reg[i].regnum;
7509 md.indregsym[regnum - IND_CPUID] = declare_register (indirect_reg[i].name, regnum);
7512 /* pseudo-registers used to specify unwind info: */
7513 declare_register ("psp", REG_PSP);
7515 for (i = 0; i < NELEMS (const_bits); ++i)
7517 err = hash_insert (md.const_hash, const_bits[i].name,
7518 (void *) (const_bits + i));
7520 as_fatal (_("Inserting \"%s\" into constant hash table failed: %s"),
7524 /* Set the architecture and machine depending on defaults and command line
7526 if (md.flags & EF_IA_64_ABI64)
7527 ok = bfd_set_arch_mach (stdoutput, bfd_arch_ia64, bfd_mach_ia64_elf64);
7529 ok = bfd_set_arch_mach (stdoutput, bfd_arch_ia64, bfd_mach_ia64_elf32);
7532 as_warn (_("Could not set architecture and machine"));
7534 /* Set the pointer size and pointer shift size depending on md.flags */
7536 if (md.flags & EF_IA_64_ABI64)
7538 md.pointer_size = 8; /* pointers are 8 bytes */
7539 md.pointer_size_shift = 3; /* alignment is 8 bytes = 2^2 */
7543 md.pointer_size = 4; /* pointers are 4 bytes */
7544 md.pointer_size_shift = 2; /* alignment is 4 bytes = 2^2 */
7547 md.mem_offset.hint = 0;
7550 md.entry_labels = NULL;
7553 /* Set the default options in md. Cannot do this in md_begin because
7554 that is called after md_parse_option which is where we set the
7555 options in md based on command line options. */
7558 ia64_init (int argc ATTRIBUTE_UNUSED, char **argv ATTRIBUTE_UNUSED)
7560 md.flags = MD_FLAGS_DEFAULT;
7562 /* Don't turn on dependency checking for VMS, doesn't work. */
7565 /* FIXME: We should change it to unwind_check_error someday. */
7566 md.unwind_check = unwind_check_warning;
7567 md.hint_b = hint_b_error;
7571 /* Return a string for the target object file format. */
7574 ia64_target_format (void)
7576 if (OUTPUT_FLAVOR == bfd_target_elf_flavour)
7578 if (md.flags & EF_IA_64_BE)
7580 if (md.flags & EF_IA_64_ABI64)
7581 #if defined(TE_AIX50)
7582 return "elf64-ia64-aix-big";
7583 #elif defined(TE_HPUX)
7584 return "elf64-ia64-hpux-big";
7586 return "elf64-ia64-big";
7589 #if defined(TE_AIX50)
7590 return "elf32-ia64-aix-big";
7591 #elif defined(TE_HPUX)
7592 return "elf32-ia64-hpux-big";
7594 return "elf32-ia64-big";
7599 if (md.flags & EF_IA_64_ABI64)
7600 #if defined (TE_AIX50)
7601 return "elf64-ia64-aix-little";
7602 #elif defined (TE_VMS)
7604 md.flags |= EF_IA_64_ARCHVER_1;
7605 return "elf64-ia64-vms";
7608 return "elf64-ia64-little";
7612 return "elf32-ia64-aix-little";
7614 return "elf32-ia64-little";
7619 return "unknown-format";
7623 ia64_end_of_source (void)
7625 /* terminate insn group upon reaching end of file: */
7626 insn_group_break (1, 0, 0);
7628 /* emits slots we haven't written yet: */
7629 ia64_flush_insns ();
7631 bfd_set_private_flags (stdoutput, md.flags);
7633 md.mem_offset.hint = 0;
7637 ia64_start_line (void)
7642 /* Make sure we don't reference input_line_pointer[-1] when that's
7648 if (md.qp.X_op == O_register)
7649 as_bad (_("qualifying predicate not followed by instruction"));
7650 md.qp.X_op = O_absent;
7652 if (ignore_input ())
7655 if (input_line_pointer[0] == ';' && input_line_pointer[-1] == ';')
7657 if (md.detect_dv && !md.explicit_mode)
7664 as_warn (_("Explicit stops are ignored in auto mode"));
7668 insn_group_break (1, 0, 0);
7670 else if (input_line_pointer[-1] == '{')
7672 if (md.manual_bundling)
7673 as_warn (_("Found '{' when manual bundling is already turned on"));
7675 CURR_SLOT.manual_bundling_on = 1;
7676 md.manual_bundling = 1;
7678 /* Bundling is only acceptable in explicit mode
7679 or when in default automatic mode. */
7680 if (md.detect_dv && !md.explicit_mode)
7682 if (!md.mode_explicitly_set
7683 && !md.default_explicit_mode)
7686 as_warn (_("Found '{' after explicit switch to automatic mode"));
7689 else if (input_line_pointer[-1] == '}')
7691 if (!md.manual_bundling)
7692 as_warn (_("Found '}' when manual bundling is off"));
7694 PREV_SLOT.manual_bundling_off = 1;
7695 md.manual_bundling = 0;
7697 /* switch back to automatic mode, if applicable */
7700 && !md.mode_explicitly_set
7701 && !md.default_explicit_mode)
7706 /* This is a hook for ia64_frob_label, so that it can distinguish tags from
7708 static int defining_tag = 0;
7711 ia64_unrecognized_line (int ch)
7716 expression_and_evaluate (&md.qp);
7717 if (*input_line_pointer++ != ')')
7719 as_bad (_("Expected ')'"));
7722 if (md.qp.X_op != O_register)
7724 as_bad (_("Qualifying predicate expected"));
7727 if (md.qp.X_add_number < REG_P || md.qp.X_add_number >= REG_P + 64)
7729 as_bad (_("Predicate register expected"));
7741 if (md.qp.X_op == O_register)
7743 as_bad (_("Tag must come before qualifying predicate."));
7747 /* This implements just enough of read_a_source_file in read.c to
7748 recognize labels. */
7749 if (is_name_beginner (*input_line_pointer))
7751 s = input_line_pointer;
7752 c = get_symbol_end ();
7754 else if (LOCAL_LABELS_FB
7755 && ISDIGIT (*input_line_pointer))
7758 while (ISDIGIT (*input_line_pointer))
7759 temp = (temp * 10) + *input_line_pointer++ - '0';
7760 fb_label_instance_inc (temp);
7761 s = fb_label_name (temp, 0);
7762 c = *input_line_pointer;
7771 /* Put ':' back for error messages' sake. */
7772 *input_line_pointer++ = ':';
7773 as_bad (_("Expected ':'"));
7780 /* Put ':' back for error messages' sake. */
7781 *input_line_pointer++ = ':';
7782 if (*input_line_pointer++ != ']')
7784 as_bad (_("Expected ']'"));
7789 as_bad (_("Tag name expected"));
7799 /* Not a valid line. */
7804 ia64_frob_label (struct symbol *sym)
7806 struct label_fix *fix;
7808 /* Tags need special handling since they are not bundle breaks like
7812 fix = obstack_alloc (¬es, sizeof (*fix));
7814 fix->next = CURR_SLOT.tag_fixups;
7815 fix->dw2_mark_labels = FALSE;
7816 CURR_SLOT.tag_fixups = fix;
7821 if (bfd_get_section_flags (stdoutput, now_seg) & SEC_CODE)
7823 md.last_text_seg = now_seg;
7824 fix = obstack_alloc (¬es, sizeof (*fix));
7826 fix->next = CURR_SLOT.label_fixups;
7827 fix->dw2_mark_labels = dwarf2_loc_mark_labels;
7828 CURR_SLOT.label_fixups = fix;
7830 /* Keep track of how many code entry points we've seen. */
7831 if (md.path == md.maxpaths)
7834 md.entry_labels = (const char **)
7835 xrealloc ((void *) md.entry_labels,
7836 md.maxpaths * sizeof (char *));
7838 md.entry_labels[md.path++] = S_GET_NAME (sym);
7843 /* The HP-UX linker will give unresolved symbol errors for symbols
7844 that are declared but unused. This routine removes declared,
7845 unused symbols from an object. */
7847 ia64_frob_symbol (struct symbol *sym)
7849 if ((S_GET_SEGMENT (sym) == bfd_und_section_ptr && ! symbol_used_p (sym) &&
7850 ELF_ST_VISIBILITY (S_GET_OTHER (sym)) == STV_DEFAULT)
7851 || (S_GET_SEGMENT (sym) == bfd_abs_section_ptr
7852 && ! S_IS_EXTERNAL (sym)))
7859 ia64_flush_pending_output (void)
7861 if (!md.keep_pending_output
7862 && bfd_get_section_flags (stdoutput, now_seg) & SEC_CODE)
7864 /* ??? This causes many unnecessary stop bits to be emitted.
7865 Unfortunately, it isn't clear if it is safe to remove this. */
7866 insn_group_break (1, 0, 0);
7867 ia64_flush_insns ();
7871 /* Do ia64-specific expression optimization. All that's done here is
7872 to transform index expressions that are either due to the indexing
7873 of rotating registers or due to the indexing of indirect register
7876 ia64_optimize_expr (expressionS *l, operatorT op, expressionS *r)
7880 resolve_expression (l);
7881 if (l->X_op == O_register)
7883 unsigned num_regs = l->X_add_number >> 16;
7885 resolve_expression (r);
7888 /* Left side is a .rotX-allocated register. */
7889 if (r->X_op != O_constant)
7891 as_bad (_("Rotating register index must be a non-negative constant"));
7892 r->X_add_number = 0;
7894 else if ((valueT) r->X_add_number >= num_regs)
7896 as_bad (_("Index out of range 0..%u"), num_regs - 1);
7897 r->X_add_number = 0;
7899 l->X_add_number = (l->X_add_number & 0xffff) + r->X_add_number;
7902 else if (l->X_add_number >= IND_CPUID && l->X_add_number <= IND_RR)
7904 if (r->X_op != O_register
7905 || r->X_add_number < REG_GR
7906 || r->X_add_number > REG_GR + 127)
7908 as_bad (_("Indirect register index must be a general register"));
7909 r->X_add_number = REG_GR;
7912 l->X_op_symbol = md.indregsym[l->X_add_number - IND_CPUID];
7913 l->X_add_number = r->X_add_number;
7917 as_bad (_("Index can only be applied to rotating or indirect registers"));
7918 /* Fall back to some register use of which has as little as possible
7919 side effects, to minimize subsequent error messages. */
7920 l->X_op = O_register;
7921 l->X_add_number = REG_GR + 3;
7926 ia64_parse_name (char *name, expressionS *e, char *nextcharP)
7928 struct const_desc *cdesc;
7929 struct dynreg *dr = 0;
7936 enum pseudo_type pseudo_type = PSEUDO_FUNC_NONE;
7938 /* Find what relocation pseudo-function we're dealing with. */
7939 for (idx = 0; idx < NELEMS (pseudo_func); ++idx)
7940 if (pseudo_func[idx].name
7941 && pseudo_func[idx].name[0] == name[1]
7942 && strcmp (pseudo_func[idx].name + 1, name + 2) == 0)
7944 pseudo_type = pseudo_func[idx].type;
7947 switch (pseudo_type)
7949 case PSEUDO_FUNC_RELOC:
7950 end = input_line_pointer;
7951 if (*nextcharP != '(')
7953 as_bad (_("Expected '('"));
7957 ++input_line_pointer;
7959 if (*input_line_pointer != ')')
7961 as_bad (_("Missing ')'"));
7965 ++input_line_pointer;
7967 if (idx == FUNC_SLOTCOUNT_RELOC)
7969 /* @slotcount can accept any expression. Canonicalize. */
7970 e->X_add_symbol = make_expr_symbol (e);
7972 e->X_add_number = 0;
7975 if (e->X_op != O_symbol)
7977 if (e->X_op != O_pseudo_fixup)
7979 as_bad (_("Not a symbolic expression"));
7982 if (idx != FUNC_LT_RELATIVE)
7984 as_bad (_("Illegal combination of relocation functions"));
7987 switch (S_GET_VALUE (e->X_op_symbol))
7989 case FUNC_FPTR_RELATIVE:
7990 idx = FUNC_LT_FPTR_RELATIVE; break;
7991 case FUNC_DTP_MODULE:
7992 idx = FUNC_LT_DTP_MODULE; break;
7993 case FUNC_DTP_RELATIVE:
7994 idx = FUNC_LT_DTP_RELATIVE; break;
7995 case FUNC_TP_RELATIVE:
7996 idx = FUNC_LT_TP_RELATIVE; break;
7998 as_bad (_("Illegal combination of relocation functions"));
8002 /* Make sure gas doesn't get rid of local symbols that are used
8004 e->X_op = O_pseudo_fixup;
8005 e->X_op_symbol = pseudo_func[idx].u.sym;
8007 *nextcharP = *input_line_pointer;
8010 case PSEUDO_FUNC_CONST:
8011 e->X_op = O_constant;
8012 e->X_add_number = pseudo_func[idx].u.ival;
8015 case PSEUDO_FUNC_REG:
8016 e->X_op = O_register;
8017 e->X_add_number = pseudo_func[idx].u.ival;
8026 /* first see if NAME is a known register name: */
8027 sym = hash_find (md.reg_hash, name);
8030 e->X_op = O_register;
8031 e->X_add_number = S_GET_VALUE (sym);
8035 cdesc = hash_find (md.const_hash, name);
8038 e->X_op = O_constant;
8039 e->X_add_number = cdesc->value;
8043 /* check for inN, locN, or outN: */
8048 if (name[1] == 'n' && ISDIGIT (name[2]))
8056 if (name[1] == 'o' && name[2] == 'c' && ISDIGIT (name[3]))
8064 if (name[1] == 'u' && name[2] == 't' && ISDIGIT (name[3]))
8075 /* Ignore register numbers with leading zeroes, except zero itself. */
8076 if (dr && (name[idx] != '0' || name[idx + 1] == '\0'))
8078 unsigned long regnum;
8080 /* The name is inN, locN, or outN; parse the register number. */
8081 regnum = strtoul (name + idx, &end, 10);
8082 if (end > name + idx && *end == '\0' && regnum < 96)
8084 if (regnum >= dr->num_regs)
8087 as_bad (_("No current frame"));
8089 as_bad (_("Register number out of range 0..%u"),
8093 e->X_op = O_register;
8094 e->X_add_number = dr->base + regnum;
8099 end = alloca (strlen (name) + 1);
8101 name = ia64_canonicalize_symbol_name (end);
8102 if ((dr = hash_find (md.dynreg_hash, name)))
8104 /* We've got ourselves the name of a rotating register set.
8105 Store the base register number in the low 16 bits of
8106 X_add_number and the size of the register set in the top 16
8108 e->X_op = O_register;
8109 e->X_add_number = dr->base | (dr->num_regs << 16);
8115 /* Remove the '#' suffix that indicates a symbol as opposed to a register. */
8118 ia64_canonicalize_symbol_name (char *name)
8120 size_t len = strlen (name), full = len;
8122 while (len > 0 && name[len - 1] == '#')
8127 as_bad (_("Standalone `#' is illegal"));
8129 else if (len < full - 1)
8130 as_warn (_("Redundant `#' suffix operators"));
8135 /* Return true if idesc is a conditional branch instruction. This excludes
8136 the modulo scheduled branches, and br.ia. Mod-sched branches are excluded
8137 because they always read/write resources regardless of the value of the
8138 qualifying predicate. br.ia must always use p0, and hence is always
8139 taken. Thus this function returns true for branches which can fall
8140 through, and which use no resources if they do fall through. */
8143 is_conditional_branch (struct ia64_opcode *idesc)
8145 /* br is a conditional branch. Everything that starts with br. except
8146 br.ia, br.c{loop,top,exit}, and br.w{top,exit} is a conditional branch.
8147 Everything that starts with brl is a conditional branch. */
8148 return (idesc->name[0] == 'b' && idesc->name[1] == 'r'
8149 && (idesc->name[2] == '\0'
8150 || (idesc->name[2] == '.' && idesc->name[3] != 'i'
8151 && idesc->name[3] != 'c' && idesc->name[3] != 'w')
8152 || idesc->name[2] == 'l'
8153 /* br.cond, br.call, br.clr */
8154 || (idesc->name[2] == '.' && idesc->name[3] == 'c'
8155 && (idesc->name[4] == 'a' || idesc->name[4] == 'o'
8156 || (idesc->name[4] == 'l' && idesc->name[5] == 'r')))));
8159 /* Return whether the given opcode is a taken branch. If there's any doubt,
8163 is_taken_branch (struct ia64_opcode *idesc)
8165 return ((is_conditional_branch (idesc) && CURR_SLOT.qp_regno == 0)
8166 || strncmp (idesc->name, "br.ia", 5) == 0);
8169 /* Return whether the given opcode is an interruption or rfi. If there's any
8170 doubt, returns zero. */
8173 is_interruption_or_rfi (struct ia64_opcode *idesc)
8175 if (strcmp (idesc->name, "rfi") == 0)
8180 /* Returns the index of the given dependency in the opcode's list of chks, or
8181 -1 if there is no dependency. */
8184 depends_on (int depind, struct ia64_opcode *idesc)
8187 const struct ia64_opcode_dependency *dep = idesc->dependencies;
8188 for (i = 0; i < dep->nchks; i++)
8190 if (depind == DEP (dep->chks[i]))
8196 /* Determine a set of specific resources used for a particular resource
8197 class. Returns the number of specific resources identified For those
8198 cases which are not determinable statically, the resource returned is
8201 Meanings of value in 'NOTE':
8202 1) only read/write when the register number is explicitly encoded in the
8204 2) only read CFM when accessing a rotating GR, FR, or PR. mov pr only
8205 accesses CFM when qualifying predicate is in the rotating region.
8206 3) general register value is used to specify an indirect register; not
8207 determinable statically.
8208 4) only read the given resource when bits 7:0 of the indirect index
8209 register value does not match the register number of the resource; not
8210 determinable statically.
8211 5) all rules are implementation specific.
8212 6) only when both the index specified by the reader and the index specified
8213 by the writer have the same value in bits 63:61; not determinable
8215 7) only access the specified resource when the corresponding mask bit is
8217 8) PSR.dfh is only read when these insns reference FR32-127. PSR.dfl is
8218 only read when these insns reference FR2-31
8219 9) PSR.mfl is only written when these insns write FR2-31. PSR.mfh is only
8220 written when these insns write FR32-127
8221 10) The PSR.bn bit is only accessed when one of GR16-31 is specified in the
8223 11) The target predicates are written independently of PR[qp], but source
8224 registers are only read if PR[qp] is true. Since the state of PR[qp]
8225 cannot statically be determined, all source registers are marked used.
8226 12) This insn only reads the specified predicate register when that
8227 register is the PR[qp].
8228 13) This reference to ld-c only applies to the GR whose value is loaded
8229 with data returned from memory, not the post-incremented address register.
8230 14) The RSE resource includes the implementation-specific RSE internal
8231 state resources. At least one (and possibly more) of these resources are
8232 read by each instruction listed in IC:rse-readers. At least one (and
8233 possibly more) of these resources are written by each insn listed in
8235 15+16) Represents reserved instructions, which the assembler does not
8237 17) CR[TPR] has a RAW dependency only between mov-to-CR-TPR and
8238 mov-to-PSR-l or ssm instructions that set PSR.i, PSR.pp or PSR.up.
8240 Memory resources (i.e. locations in memory) are *not* marked or tracked by
8241 this code; there are no dependency violations based on memory access.
8244 #define MAX_SPECS 256
8249 specify_resource (const struct ia64_dependency *dep,
8250 struct ia64_opcode *idesc,
8251 /* is this a DV chk or a DV reg? */
8253 /* returned specific resources */
8254 struct rsrc specs[MAX_SPECS],
8255 /* resource note for this insn's usage */
8257 /* which execution path to examine */
8265 if (dep->mode == IA64_DV_WAW
8266 || (dep->mode == IA64_DV_RAW && type == DV_REG)
8267 || (dep->mode == IA64_DV_WAR && type == DV_CHK))
8270 /* template for any resources we identify */
8271 tmpl.dependency = dep;
8273 tmpl.insn_srlz = tmpl.data_srlz = 0;
8274 tmpl.qp_regno = CURR_SLOT.qp_regno;
8275 tmpl.link_to_qp_branch = 1;
8276 tmpl.mem_offset.hint = 0;
8277 tmpl.mem_offset.offset = 0;
8278 tmpl.mem_offset.base = 0;
8281 tmpl.cmp_type = CMP_NONE;
8288 as_warn (_("Unhandled dependency %s for %s (%s), note %d"), \
8289 dep->name, idesc->name, (rsrc_write?"write":"read"), note)
8290 #define KNOWN(REG) (gr_values[REG].known && gr_values[REG].path >= path)
8292 /* we don't need to track these */
8293 if (dep->semantics == IA64_DVS_NONE)
8296 switch (dep->specifier)
8301 if (idesc->operands[!rsrc_write] == IA64_OPND_AR3)
8303 int regno = CURR_SLOT.opnd[!rsrc_write].X_add_number - REG_AR;
8304 if (regno >= 0 && regno <= 7)
8306 specs[count] = tmpl;
8307 specs[count++].index = regno;
8313 for (i = 0; i < 8; i++)
8315 specs[count] = tmpl;
8316 specs[count++].index = i;
8325 case IA64_RS_AR_UNAT:
8326 /* This is a mov =AR or mov AR= instruction. */
8327 if (idesc->operands[!rsrc_write] == IA64_OPND_AR3)
8329 int regno = CURR_SLOT.opnd[!rsrc_write].X_add_number - REG_AR;
8330 if (regno == AR_UNAT)
8332 specs[count++] = tmpl;
8337 /* This is a spill/fill, or other instruction that modifies the
8340 /* Unless we can determine the specific bits used, mark the whole
8341 thing; bits 8:3 of the memory address indicate the bit used in
8342 UNAT. The .mem.offset hint may be used to eliminate a small
8343 subset of conflicts. */
8344 specs[count] = tmpl;
8345 if (md.mem_offset.hint)
8348 fprintf (stderr, " Using hint for spill/fill\n");
8349 /* The index isn't actually used, just set it to something
8350 approximating the bit index. */
8351 specs[count].index = (md.mem_offset.offset >> 3) & 0x3F;
8352 specs[count].mem_offset.hint = 1;
8353 specs[count].mem_offset.offset = md.mem_offset.offset;
8354 specs[count++].mem_offset.base = md.mem_offset.base;
8358 specs[count++].specific = 0;
8366 if (idesc->operands[!rsrc_write] == IA64_OPND_AR3)
8368 int regno = CURR_SLOT.opnd[!rsrc_write].X_add_number - REG_AR;
8369 if ((regno >= 8 && regno <= 15)
8370 || (regno >= 20 && regno <= 23)
8371 || (regno >= 31 && regno <= 39)
8372 || (regno >= 41 && regno <= 47)
8373 || (regno >= 67 && regno <= 111))
8375 specs[count] = tmpl;
8376 specs[count++].index = regno;
8389 if (idesc->operands[!rsrc_write] == IA64_OPND_AR3)
8391 int regno = CURR_SLOT.opnd[!rsrc_write].X_add_number - REG_AR;
8392 if ((regno >= 48 && regno <= 63)
8393 || (regno >= 112 && regno <= 127))
8395 specs[count] = tmpl;
8396 specs[count++].index = regno;
8402 for (i = 48; i < 64; i++)
8404 specs[count] = tmpl;
8405 specs[count++].index = i;
8407 for (i = 112; i < 128; i++)
8409 specs[count] = tmpl;
8410 specs[count++].index = i;
8428 for (i = 0; i < idesc->num_outputs; i++)
8429 if (idesc->operands[i] == IA64_OPND_B1
8430 || idesc->operands[i] == IA64_OPND_B2)
8432 specs[count] = tmpl;
8433 specs[count++].index =
8434 CURR_SLOT.opnd[i].X_add_number - REG_BR;
8439 for (i = idesc->num_outputs; i < NELEMS (idesc->operands); i++)
8440 if (idesc->operands[i] == IA64_OPND_B1
8441 || idesc->operands[i] == IA64_OPND_B2)
8443 specs[count] = tmpl;
8444 specs[count++].index =
8445 CURR_SLOT.opnd[i].X_add_number - REG_BR;
8451 case IA64_RS_CPUID: /* four or more registers */
8454 if (idesc->operands[!rsrc_write] == IA64_OPND_CPUID_R3)
8456 int regno = CURR_SLOT.opnd[!rsrc_write].X_add_number - REG_GR;
8457 if (regno >= 0 && regno < NELEMS (gr_values)
8460 specs[count] = tmpl;
8461 specs[count++].index = gr_values[regno].value & 0xFF;
8465 specs[count] = tmpl;
8466 specs[count++].specific = 0;
8476 case IA64_RS_DBR: /* four or more registers */
8479 if (idesc->operands[!rsrc_write] == IA64_OPND_DBR_R3)
8481 int regno = CURR_SLOT.opnd[!rsrc_write].X_add_number - REG_GR;
8482 if (regno >= 0 && regno < NELEMS (gr_values)
8485 specs[count] = tmpl;
8486 specs[count++].index = gr_values[regno].value & 0xFF;
8490 specs[count] = tmpl;
8491 specs[count++].specific = 0;
8495 else if (note == 0 && !rsrc_write)
8497 specs[count] = tmpl;
8498 specs[count++].specific = 0;
8506 case IA64_RS_IBR: /* four or more registers */
8509 if (idesc->operands[!rsrc_write] == IA64_OPND_IBR_R3)
8511 int regno = CURR_SLOT.opnd[!rsrc_write].X_add_number - REG_GR;
8512 if (regno >= 0 && regno < NELEMS (gr_values)
8515 specs[count] = tmpl;
8516 specs[count++].index = gr_values[regno].value & 0xFF;
8520 specs[count] = tmpl;
8521 specs[count++].specific = 0;
8534 /* These are implementation specific. Force all references to
8535 conflict with all other references. */
8536 specs[count] = tmpl;
8537 specs[count++].specific = 0;
8545 case IA64_RS_PKR: /* 16 or more registers */
8546 if (note == 3 || note == 4)
8548 if (idesc->operands[!rsrc_write] == IA64_OPND_PKR_R3)
8550 int regno = CURR_SLOT.opnd[!rsrc_write].X_add_number - REG_GR;
8551 if (regno >= 0 && regno < NELEMS (gr_values)
8556 specs[count] = tmpl;
8557 specs[count++].index = gr_values[regno].value & 0xFF;
8560 for (i = 0; i < NELEMS (gr_values); i++)
8562 /* Uses all registers *except* the one in R3. */
8563 if ((unsigned)i != (gr_values[regno].value & 0xFF))
8565 specs[count] = tmpl;
8566 specs[count++].index = i;
8572 specs[count] = tmpl;
8573 specs[count++].specific = 0;
8580 specs[count] = tmpl;
8581 specs[count++].specific = 0;
8585 case IA64_RS_PMC: /* four or more registers */
8588 if (idesc->operands[!rsrc_write] == IA64_OPND_PMC_R3
8589 || (!rsrc_write && idesc->operands[1] == IA64_OPND_PMD_R3))
8592 int reg_index = ((idesc->operands[1] == IA64_OPND_R3 && !rsrc_write)
8594 int regno = CURR_SLOT.opnd[reg_index].X_add_number - REG_GR;
8595 if (regno >= 0 && regno < NELEMS (gr_values)
8598 specs[count] = tmpl;
8599 specs[count++].index = gr_values[regno].value & 0xFF;
8603 specs[count] = tmpl;
8604 specs[count++].specific = 0;
8614 case IA64_RS_PMD: /* four or more registers */
8617 if (idesc->operands[!rsrc_write] == IA64_OPND_PMD_R3)
8619 int regno = CURR_SLOT.opnd[!rsrc_write].X_add_number - REG_GR;
8620 if (regno >= 0 && regno < NELEMS (gr_values)
8623 specs[count] = tmpl;
8624 specs[count++].index = gr_values[regno].value & 0xFF;
8628 specs[count] = tmpl;
8629 specs[count++].specific = 0;
8639 case IA64_RS_RR: /* eight registers */
8642 if (idesc->operands[!rsrc_write] == IA64_OPND_RR_R3)
8644 int regno = CURR_SLOT.opnd[!rsrc_write].X_add_number - REG_GR;
8645 if (regno >= 0 && regno < NELEMS (gr_values)
8648 specs[count] = tmpl;
8649 specs[count++].index = (gr_values[regno].value >> 61) & 0x7;
8653 specs[count] = tmpl;
8654 specs[count++].specific = 0;
8658 else if (note == 0 && !rsrc_write)
8660 specs[count] = tmpl;
8661 specs[count++].specific = 0;
8669 case IA64_RS_CR_IRR:
8672 /* handle mov-from-CR-IVR; it's a read that writes CR[IRR] */
8673 int regno = CURR_SLOT.opnd[1].X_add_number - REG_CR;
8675 && idesc->operands[1] == IA64_OPND_CR3
8678 for (i = 0; i < 4; i++)
8680 specs[count] = tmpl;
8681 specs[count++].index = CR_IRR0 + i;
8687 int regno = CURR_SLOT.opnd[!rsrc_write].X_add_number - REG_CR;
8688 if (idesc->operands[!rsrc_write] == IA64_OPND_CR3
8690 && regno <= CR_IRR3)
8692 specs[count] = tmpl;
8693 specs[count++].index = regno;
8702 case IA64_RS_CR_IIB:
8709 int regno = CURR_SLOT.opnd[!rsrc_write].X_add_number - REG_CR;
8710 if (idesc->operands[!rsrc_write] == IA64_OPND_CR3
8711 && (regno == CR_IIB0 || regno == CR_IIB1))
8713 specs[count] = tmpl;
8714 specs[count++].index = regno;
8719 case IA64_RS_CR_LRR:
8726 int regno = CURR_SLOT.opnd[!rsrc_write].X_add_number - REG_CR;
8727 if (idesc->operands[!rsrc_write] == IA64_OPND_CR3
8728 && (regno == CR_LRR0 || regno == CR_LRR1))
8730 specs[count] = tmpl;
8731 specs[count++].index = regno;
8739 if (idesc->operands[!rsrc_write] == IA64_OPND_CR3)
8741 specs[count] = tmpl;
8742 specs[count++].index =
8743 CURR_SLOT.opnd[!rsrc_write].X_add_number - REG_CR;
8755 if (idesc->operands[!rsrc_write] == IA64_OPND_DAHR3)
8757 specs[count] = tmpl;
8758 specs[count++].index =
8759 CURR_SLOT.opnd[!rsrc_write].X_add_number - REG_DAHR;
8774 else if (rsrc_write)
8776 if (dep->specifier == IA64_RS_FRb
8777 && idesc->operands[0] == IA64_OPND_F1)
8779 specs[count] = tmpl;
8780 specs[count++].index = CURR_SLOT.opnd[0].X_add_number - REG_FR;
8785 for (i = idesc->num_outputs; i < NELEMS (idesc->operands); i++)
8787 if (idesc->operands[i] == IA64_OPND_F2
8788 || idesc->operands[i] == IA64_OPND_F3
8789 || idesc->operands[i] == IA64_OPND_F4)
8791 specs[count] = tmpl;
8792 specs[count++].index =
8793 CURR_SLOT.opnd[i].X_add_number - REG_FR;
8802 /* This reference applies only to the GR whose value is loaded with
8803 data returned from memory. */
8804 specs[count] = tmpl;
8805 specs[count++].index = CURR_SLOT.opnd[0].X_add_number - REG_GR;
8811 for (i = 0; i < idesc->num_outputs; i++)
8812 if (idesc->operands[i] == IA64_OPND_R1
8813 || idesc->operands[i] == IA64_OPND_R2
8814 || idesc->operands[i] == IA64_OPND_R3)
8816 specs[count] = tmpl;
8817 specs[count++].index =
8818 CURR_SLOT.opnd[i].X_add_number - REG_GR;
8820 if (idesc->flags & IA64_OPCODE_POSTINC)
8821 for (i = 0; i < NELEMS (idesc->operands); i++)
8822 if (idesc->operands[i] == IA64_OPND_MR3)
8824 specs[count] = tmpl;
8825 specs[count++].index =
8826 CURR_SLOT.opnd[i].X_add_number - REG_GR;
8831 /* Look for anything that reads a GR. */
8832 for (i = 0; i < NELEMS (idesc->operands); i++)
8834 if (idesc->operands[i] == IA64_OPND_MR3
8835 || idesc->operands[i] == IA64_OPND_CPUID_R3
8836 || idesc->operands[i] == IA64_OPND_DBR_R3
8837 || idesc->operands[i] == IA64_OPND_IBR_R3
8838 || idesc->operands[i] == IA64_OPND_MSR_R3
8839 || idesc->operands[i] == IA64_OPND_PKR_R3
8840 || idesc->operands[i] == IA64_OPND_PMC_R3
8841 || idesc->operands[i] == IA64_OPND_PMD_R3
8842 || idesc->operands[i] == IA64_OPND_DAHR_R3
8843 || idesc->operands[i] == IA64_OPND_RR_R3
8844 || ((i >= idesc->num_outputs)
8845 && (idesc->operands[i] == IA64_OPND_R1
8846 || idesc->operands[i] == IA64_OPND_R2
8847 || idesc->operands[i] == IA64_OPND_R3
8848 /* addl source register. */
8849 || idesc->operands[i] == IA64_OPND_R3_2)))
8851 specs[count] = tmpl;
8852 specs[count++].index =
8853 CURR_SLOT.opnd[i].X_add_number - REG_GR;
8864 /* This is the same as IA64_RS_PRr, except that the register range is
8865 from 1 - 15, and there are no rotating register reads/writes here. */
8869 for (i = 1; i < 16; i++)
8871 specs[count] = tmpl;
8872 specs[count++].index = i;
8878 /* Mark only those registers indicated by the mask. */
8881 mask = CURR_SLOT.opnd[2].X_add_number;
8882 for (i = 1; i < 16; i++)
8883 if (mask & ((valueT) 1 << i))
8885 specs[count] = tmpl;
8886 specs[count++].index = i;
8894 else if (note == 11) /* note 11 implies note 1 as well */
8898 for (i = 0; i < idesc->num_outputs; i++)
8900 if (idesc->operands[i] == IA64_OPND_P1
8901 || idesc->operands[i] == IA64_OPND_P2)
8903 int regno = CURR_SLOT.opnd[i].X_add_number - REG_P;
8904 if (regno >= 1 && regno < 16)
8906 specs[count] = tmpl;
8907 specs[count++].index = regno;
8917 else if (note == 12)
8919 if (CURR_SLOT.qp_regno >= 1 && CURR_SLOT.qp_regno < 16)
8921 specs[count] = tmpl;
8922 specs[count++].index = CURR_SLOT.qp_regno;
8929 int p1 = CURR_SLOT.opnd[0].X_add_number - REG_P;
8930 int p2 = CURR_SLOT.opnd[1].X_add_number - REG_P;
8931 int or_andcm = strstr (idesc->name, "or.andcm") != NULL;
8932 int and_orcm = strstr (idesc->name, "and.orcm") != NULL;
8934 if ((idesc->operands[0] == IA64_OPND_P1
8935 || idesc->operands[0] == IA64_OPND_P2)
8936 && p1 >= 1 && p1 < 16)
8938 specs[count] = tmpl;
8939 specs[count].cmp_type =
8940 (or_andcm ? CMP_OR : (and_orcm ? CMP_AND : CMP_NONE));
8941 specs[count++].index = p1;
8943 if ((idesc->operands[1] == IA64_OPND_P1
8944 || idesc->operands[1] == IA64_OPND_P2)
8945 && p2 >= 1 && p2 < 16)
8947 specs[count] = tmpl;
8948 specs[count].cmp_type =
8949 (or_andcm ? CMP_AND : (and_orcm ? CMP_OR : CMP_NONE));
8950 specs[count++].index = p2;
8955 if (CURR_SLOT.qp_regno >= 1 && CURR_SLOT.qp_regno < 16)
8957 specs[count] = tmpl;
8958 specs[count++].index = CURR_SLOT.qp_regno;
8960 if (idesc->operands[1] == IA64_OPND_PR)
8962 for (i = 1; i < 16; i++)
8964 specs[count] = tmpl;
8965 specs[count++].index = i;
8976 /* This is the general case for PRs. IA64_RS_PR and IA64_RS_PR63 are
8977 simplified cases of this. */
8981 for (i = 16; i < 63; i++)
8983 specs[count] = tmpl;
8984 specs[count++].index = i;
8990 /* Mark only those registers indicated by the mask. */
8992 && idesc->operands[0] == IA64_OPND_PR)
8994 mask = CURR_SLOT.opnd[2].X_add_number;
8995 if (mask & ((valueT) 1 << 16))
8996 for (i = 16; i < 63; i++)
8998 specs[count] = tmpl;
8999 specs[count++].index = i;
9003 && idesc->operands[0] == IA64_OPND_PR_ROT)
9005 for (i = 16; i < 63; i++)
9007 specs[count] = tmpl;
9008 specs[count++].index = i;
9016 else if (note == 11) /* note 11 implies note 1 as well */
9020 for (i = 0; i < idesc->num_outputs; i++)
9022 if (idesc->operands[i] == IA64_OPND_P1
9023 || idesc->operands[i] == IA64_OPND_P2)
9025 int regno = CURR_SLOT.opnd[i].X_add_number - REG_P;
9026 if (regno >= 16 && regno < 63)
9028 specs[count] = tmpl;
9029 specs[count++].index = regno;
9039 else if (note == 12)
9041 if (CURR_SLOT.qp_regno >= 16 && CURR_SLOT.qp_regno < 63)
9043 specs[count] = tmpl;
9044 specs[count++].index = CURR_SLOT.qp_regno;
9051 int p1 = CURR_SLOT.opnd[0].X_add_number - REG_P;
9052 int p2 = CURR_SLOT.opnd[1].X_add_number - REG_P;
9053 int or_andcm = strstr (idesc->name, "or.andcm") != NULL;
9054 int and_orcm = strstr (idesc->name, "and.orcm") != NULL;
9056 if ((idesc->operands[0] == IA64_OPND_P1
9057 || idesc->operands[0] == IA64_OPND_P2)
9058 && p1 >= 16 && p1 < 63)
9060 specs[count] = tmpl;
9061 specs[count].cmp_type =
9062 (or_andcm ? CMP_OR : (and_orcm ? CMP_AND : CMP_NONE));
9063 specs[count++].index = p1;
9065 if ((idesc->operands[1] == IA64_OPND_P1
9066 || idesc->operands[1] == IA64_OPND_P2)
9067 && p2 >= 16 && p2 < 63)
9069 specs[count] = tmpl;
9070 specs[count].cmp_type =
9071 (or_andcm ? CMP_AND : (and_orcm ? CMP_OR : CMP_NONE));
9072 specs[count++].index = p2;
9077 if (CURR_SLOT.qp_regno >= 16 && CURR_SLOT.qp_regno < 63)
9079 specs[count] = tmpl;
9080 specs[count++].index = CURR_SLOT.qp_regno;
9082 if (idesc->operands[1] == IA64_OPND_PR)
9084 for (i = 16; i < 63; i++)
9086 specs[count] = tmpl;
9087 specs[count++].index = i;
9099 /* Verify that the instruction is using the PSR bit indicated in
9103 if (idesc->operands[!rsrc_write] == IA64_OPND_PSR_UM)
9105 if (dep->regindex < 6)
9107 specs[count++] = tmpl;
9110 else if (idesc->operands[!rsrc_write] == IA64_OPND_PSR)
9112 if (dep->regindex < 32
9113 || dep->regindex == 35
9114 || dep->regindex == 36
9115 || (!rsrc_write && dep->regindex == PSR_CPL))
9117 specs[count++] = tmpl;
9120 else if (idesc->operands[!rsrc_write] == IA64_OPND_PSR_L)
9122 if (dep->regindex < 32
9123 || dep->regindex == 35
9124 || dep->regindex == 36
9125 || (rsrc_write && dep->regindex == PSR_CPL))
9127 specs[count++] = tmpl;
9132 /* Several PSR bits have very specific dependencies. */
9133 switch (dep->regindex)
9136 specs[count++] = tmpl;
9141 specs[count++] = tmpl;
9145 /* Only certain CR accesses use PSR.ic */
9146 if (idesc->operands[0] == IA64_OPND_CR3
9147 || idesc->operands[1] == IA64_OPND_CR3)
9150 ((idesc->operands[0] == IA64_OPND_CR3)
9153 CURR_SLOT.opnd[reg_index].X_add_number - REG_CR;
9170 specs[count++] = tmpl;
9179 specs[count++] = tmpl;
9183 /* Only some AR accesses use cpl */
9184 if (idesc->operands[0] == IA64_OPND_AR3
9185 || idesc->operands[1] == IA64_OPND_AR3)
9188 ((idesc->operands[0] == IA64_OPND_AR3)
9191 CURR_SLOT.opnd[reg_index].X_add_number - REG_AR;
9198 && regno <= AR_K7))))
9200 specs[count++] = tmpl;
9205 specs[count++] = tmpl;
9215 if (idesc->operands[0] == IA64_OPND_IMMU24)
9217 mask = CURR_SLOT.opnd[0].X_add_number;
9223 if (mask & ((valueT) 1 << dep->regindex))
9225 specs[count++] = tmpl;
9230 int min = dep->regindex == PSR_DFL ? 2 : 32;
9231 int max = dep->regindex == PSR_DFL ? 31 : 127;
9232 /* dfh is read on FR32-127; dfl is read on FR2-31 */
9233 for (i = 0; i < NELEMS (idesc->operands); i++)
9235 if (idesc->operands[i] == IA64_OPND_F1
9236 || idesc->operands[i] == IA64_OPND_F2
9237 || idesc->operands[i] == IA64_OPND_F3
9238 || idesc->operands[i] == IA64_OPND_F4)
9240 int reg = CURR_SLOT.opnd[i].X_add_number - REG_FR;
9241 if (reg >= min && reg <= max)
9243 specs[count++] = tmpl;
9250 int min = dep->regindex == PSR_MFL ? 2 : 32;
9251 int max = dep->regindex == PSR_MFL ? 31 : 127;
9252 /* mfh is read on writes to FR32-127; mfl is read on writes to
9254 for (i = 0; i < idesc->num_outputs; i++)
9256 if (idesc->operands[i] == IA64_OPND_F1)
9258 int reg = CURR_SLOT.opnd[i].X_add_number - REG_FR;
9259 if (reg >= min && reg <= max)
9261 specs[count++] = tmpl;
9266 else if (note == 10)
9268 for (i = 0; i < NELEMS (idesc->operands); i++)
9270 if (idesc->operands[i] == IA64_OPND_R1
9271 || idesc->operands[i] == IA64_OPND_R2
9272 || idesc->operands[i] == IA64_OPND_R3)
9274 int regno = CURR_SLOT.opnd[i].X_add_number - REG_GR;
9275 if (regno >= 16 && regno <= 31)
9277 specs[count++] = tmpl;
9288 case IA64_RS_AR_FPSR:
9289 if (idesc->operands[!rsrc_write] == IA64_OPND_AR3)
9291 int regno = CURR_SLOT.opnd[!rsrc_write].X_add_number - REG_AR;
9292 if (regno == AR_FPSR)
9294 specs[count++] = tmpl;
9299 specs[count++] = tmpl;
9304 /* Handle all AR[REG] resources */
9305 if (note == 0 || note == 1)
9307 int regno = CURR_SLOT.opnd[!rsrc_write].X_add_number - REG_AR;
9308 if (idesc->operands[!rsrc_write] == IA64_OPND_AR3
9309 && regno == dep->regindex)
9311 specs[count++] = tmpl;
9313 /* other AR[REG] resources may be affected by AR accesses */
9314 else if (idesc->operands[0] == IA64_OPND_AR3)
9317 regno = CURR_SLOT.opnd[0].X_add_number - REG_AR;
9318 switch (dep->regindex)
9324 if (regno == AR_BSPSTORE)
9326 specs[count++] = tmpl;
9330 (regno == AR_BSPSTORE
9331 || regno == AR_RNAT))
9333 specs[count++] = tmpl;
9338 else if (idesc->operands[1] == IA64_OPND_AR3)
9341 regno = CURR_SLOT.opnd[1].X_add_number - REG_AR;
9342 switch (dep->regindex)
9347 if (regno == AR_BSPSTORE || regno == AR_RNAT)
9349 specs[count++] = tmpl;
9356 specs[count++] = tmpl;
9366 /* Handle all CR[REG] resources.
9367 ??? FIXME: The rule 17 isn't really handled correctly. */
9368 if (note == 0 || note == 1 || note == 17)
9370 if (idesc->operands[!rsrc_write] == IA64_OPND_CR3)
9372 int regno = CURR_SLOT.opnd[!rsrc_write].X_add_number - REG_CR;
9373 if (regno == dep->regindex)
9375 specs[count++] = tmpl;
9377 else if (!rsrc_write)
9379 /* Reads from CR[IVR] affect other resources. */
9380 if (regno == CR_IVR)
9382 if ((dep->regindex >= CR_IRR0
9383 && dep->regindex <= CR_IRR3)
9384 || dep->regindex == CR_TPR)
9386 specs[count++] = tmpl;
9393 specs[count++] = tmpl;
9402 case IA64_RS_INSERVICE:
9403 /* look for write of EOI (67) or read of IVR (65) */
9404 if ((idesc->operands[0] == IA64_OPND_CR3
9405 && CURR_SLOT.opnd[0].X_add_number - REG_CR == CR_EOI)
9406 || (idesc->operands[1] == IA64_OPND_CR3
9407 && CURR_SLOT.opnd[1].X_add_number - REG_CR == CR_IVR))
9409 specs[count++] = tmpl;
9416 specs[count++] = tmpl;
9427 specs[count++] = tmpl;
9431 /* Check if any of the registers accessed are in the rotating region.
9432 mov to/from pr accesses CFM only when qp_regno is in the rotating
9434 for (i = 0; i < NELEMS (idesc->operands); i++)
9436 if (idesc->operands[i] == IA64_OPND_R1
9437 || idesc->operands[i] == IA64_OPND_R2
9438 || idesc->operands[i] == IA64_OPND_R3)
9440 int num = CURR_SLOT.opnd[i].X_add_number - REG_GR;
9441 /* Assumes that md.rot.num_regs is always valid */
9442 if (md.rot.num_regs > 0
9444 && num < 31 + md.rot.num_regs)
9446 specs[count] = tmpl;
9447 specs[count++].specific = 0;
9450 else if (idesc->operands[i] == IA64_OPND_F1
9451 || idesc->operands[i] == IA64_OPND_F2
9452 || idesc->operands[i] == IA64_OPND_F3
9453 || idesc->operands[i] == IA64_OPND_F4)
9455 int num = CURR_SLOT.opnd[i].X_add_number - REG_FR;
9458 specs[count] = tmpl;
9459 specs[count++].specific = 0;
9462 else if (idesc->operands[i] == IA64_OPND_P1
9463 || idesc->operands[i] == IA64_OPND_P2)
9465 int num = CURR_SLOT.opnd[i].X_add_number - REG_P;
9468 specs[count] = tmpl;
9469 specs[count++].specific = 0;
9473 if (CURR_SLOT.qp_regno > 15)
9475 specs[count] = tmpl;
9476 specs[count++].specific = 0;
9481 /* This is the same as IA64_RS_PRr, except simplified to account for
9482 the fact that there is only one register. */
9486 specs[count++] = tmpl;
9491 if (idesc->operands[2] == IA64_OPND_IMM17)
9492 mask = CURR_SLOT.opnd[2].X_add_number;
9493 if (mask & ((valueT) 1 << 63))
9494 specs[count++] = tmpl;
9496 else if (note == 11)
9498 if ((idesc->operands[0] == IA64_OPND_P1
9499 && CURR_SLOT.opnd[0].X_add_number - REG_P == 63)
9500 || (idesc->operands[1] == IA64_OPND_P2
9501 && CURR_SLOT.opnd[1].X_add_number - REG_P == 63))
9503 specs[count++] = tmpl;
9506 else if (note == 12)
9508 if (CURR_SLOT.qp_regno == 63)
9510 specs[count++] = tmpl;
9517 int p1 = CURR_SLOT.opnd[0].X_add_number - REG_P;
9518 int p2 = CURR_SLOT.opnd[1].X_add_number - REG_P;
9519 int or_andcm = strstr (idesc->name, "or.andcm") != NULL;
9520 int and_orcm = strstr (idesc->name, "and.orcm") != NULL;
9523 && (idesc->operands[0] == IA64_OPND_P1
9524 || idesc->operands[0] == IA64_OPND_P2))
9526 specs[count] = tmpl;
9527 specs[count++].cmp_type =
9528 (or_andcm ? CMP_OR : (and_orcm ? CMP_AND : CMP_NONE));
9531 && (idesc->operands[1] == IA64_OPND_P1
9532 || idesc->operands[1] == IA64_OPND_P2))
9534 specs[count] = tmpl;
9535 specs[count++].cmp_type =
9536 (or_andcm ? CMP_AND : (and_orcm ? CMP_OR : CMP_NONE));
9541 if (CURR_SLOT.qp_regno == 63)
9543 specs[count++] = tmpl;
9554 /* FIXME we can identify some individual RSE written resources, but RSE
9555 read resources have not yet been completely identified, so for now
9556 treat RSE as a single resource */
9557 if (strncmp (idesc->name, "mov", 3) == 0)
9561 if (idesc->operands[0] == IA64_OPND_AR3
9562 && CURR_SLOT.opnd[0].X_add_number - REG_AR == AR_BSPSTORE)
9564 specs[count++] = tmpl;
9569 if (idesc->operands[0] == IA64_OPND_AR3)
9571 if (CURR_SLOT.opnd[0].X_add_number - REG_AR == AR_BSPSTORE
9572 || CURR_SLOT.opnd[0].X_add_number - REG_AR == AR_RNAT)
9574 specs[count++] = tmpl;
9577 else if (idesc->operands[1] == IA64_OPND_AR3)
9579 if (CURR_SLOT.opnd[1].X_add_number - REG_AR == AR_BSP
9580 || CURR_SLOT.opnd[1].X_add_number - REG_AR == AR_BSPSTORE
9581 || CURR_SLOT.opnd[1].X_add_number - REG_AR == AR_RNAT)
9583 specs[count++] = tmpl;
9590 specs[count++] = tmpl;
9595 /* FIXME -- do any of these need to be non-specific? */
9596 specs[count++] = tmpl;
9600 as_bad (_("Unrecognized dependency specifier %d\n"), dep->specifier);
9607 /* Clear branch flags on marked resources. This breaks the link between the
9608 QP of the marking instruction and a subsequent branch on the same QP. */
9611 clear_qp_branch_flag (valueT mask)
9614 for (i = 0; i < regdepslen; i++)
9616 valueT bit = ((valueT) 1 << regdeps[i].qp_regno);
9617 if ((bit & mask) != 0)
9619 regdeps[i].link_to_qp_branch = 0;
9624 /* MASK contains 2 and only 2 PRs which are mutually exclusive. Remove
9625 any mutexes which contain one of the PRs and create new ones when
9629 update_qp_mutex (valueT mask)
9635 while (i < qp_mutexeslen)
9637 if ((qp_mutexes[i].prmask & mask) != 0)
9639 /* If it destroys and creates the same mutex, do nothing. */
9640 if (qp_mutexes[i].prmask == mask
9641 && qp_mutexes[i].path == md.path)
9652 fprintf (stderr, " Clearing mutex relation");
9653 print_prmask (qp_mutexes[i].prmask);
9654 fprintf (stderr, "\n");
9657 /* Deal with the old mutex with more than 3+ PRs only if
9658 the new mutex on the same execution path with it.
9660 FIXME: The 3+ mutex support is incomplete.
9661 dot_pred_rel () may be a better place to fix it. */
9662 if (qp_mutexes[i].path == md.path)
9664 /* If it is a proper subset of the mutex, create a
9667 && (qp_mutexes[i].prmask & mask) == mask)
9670 qp_mutexes[i].prmask &= ~mask;
9671 if (qp_mutexes[i].prmask & (qp_mutexes[i].prmask - 1))
9673 /* Modify the mutex if there are more than one
9681 /* Remove the mutex. */
9682 qp_mutexes[i] = qp_mutexes[--qp_mutexeslen];
9690 add_qp_mutex (mask);
9695 /* Remove any mutexes which contain any of the PRs indicated in the mask.
9697 Any changes to a PR clears the mutex relations which include that PR. */
9700 clear_qp_mutex (valueT mask)
9705 while (i < qp_mutexeslen)
9707 if ((qp_mutexes[i].prmask & mask) != 0)
9711 fprintf (stderr, " Clearing mutex relation");
9712 print_prmask (qp_mutexes[i].prmask);
9713 fprintf (stderr, "\n");
9715 qp_mutexes[i] = qp_mutexes[--qp_mutexeslen];
9722 /* Clear implies relations which contain PRs in the given masks.
9723 P1_MASK indicates the source of the implies relation, while P2_MASK
9724 indicates the implied PR. */
9727 clear_qp_implies (valueT p1_mask, valueT p2_mask)
9732 while (i < qp_implieslen)
9734 if ((((valueT) 1 << qp_implies[i].p1) & p1_mask) != 0
9735 || (((valueT) 1 << qp_implies[i].p2) & p2_mask) != 0)
9738 fprintf (stderr, "Clearing implied relation PR%d->PR%d\n",
9739 qp_implies[i].p1, qp_implies[i].p2);
9740 qp_implies[i] = qp_implies[--qp_implieslen];
9747 /* Add the PRs specified to the list of implied relations. */
9750 add_qp_imply (int p1, int p2)
9756 /* p0 is not meaningful here. */
9757 if (p1 == 0 || p2 == 0)
9763 /* If it exists already, ignore it. */
9764 for (i = 0; i < qp_implieslen; i++)
9766 if (qp_implies[i].p1 == p1
9767 && qp_implies[i].p2 == p2
9768 && qp_implies[i].path == md.path
9769 && !qp_implies[i].p2_branched)
9773 if (qp_implieslen == qp_impliestotlen)
9775 qp_impliestotlen += 20;
9776 qp_implies = (struct qp_imply *)
9777 xrealloc ((void *) qp_implies,
9778 qp_impliestotlen * sizeof (struct qp_imply));
9781 fprintf (stderr, " Registering PR%d implies PR%d\n", p1, p2);
9782 qp_implies[qp_implieslen].p1 = p1;
9783 qp_implies[qp_implieslen].p2 = p2;
9784 qp_implies[qp_implieslen].path = md.path;
9785 qp_implies[qp_implieslen++].p2_branched = 0;
9787 /* Add in the implied transitive relations; for everything that p2 implies,
9788 make p1 imply that, too; for everything that implies p1, make it imply p2
9790 for (i = 0; i < qp_implieslen; i++)
9792 if (qp_implies[i].p1 == p2)
9793 add_qp_imply (p1, qp_implies[i].p2);
9794 if (qp_implies[i].p2 == p1)
9795 add_qp_imply (qp_implies[i].p1, p2);
9797 /* Add in mutex relations implied by this implies relation; for each mutex
9798 relation containing p2, duplicate it and replace p2 with p1. */
9799 bit = (valueT) 1 << p1;
9800 mask = (valueT) 1 << p2;
9801 for (i = 0; i < qp_mutexeslen; i++)
9803 if (qp_mutexes[i].prmask & mask)
9804 add_qp_mutex ((qp_mutexes[i].prmask & ~mask) | bit);
9808 /* Add the PRs specified in the mask to the mutex list; this means that only
9809 one of the PRs can be true at any time. PR0 should never be included in
9813 add_qp_mutex (valueT mask)
9818 if (qp_mutexeslen == qp_mutexestotlen)
9820 qp_mutexestotlen += 20;
9821 qp_mutexes = (struct qpmutex *)
9822 xrealloc ((void *) qp_mutexes,
9823 qp_mutexestotlen * sizeof (struct qpmutex));
9827 fprintf (stderr, " Registering mutex on");
9828 print_prmask (mask);
9829 fprintf (stderr, "\n");
9831 qp_mutexes[qp_mutexeslen].path = md.path;
9832 qp_mutexes[qp_mutexeslen++].prmask = mask;
9836 has_suffix_p (const char *name, const char *suffix)
9838 size_t namelen = strlen (name);
9839 size_t sufflen = strlen (suffix);
9841 if (namelen <= sufflen)
9843 return strcmp (name + namelen - sufflen, suffix) == 0;
9847 clear_register_values (void)
9851 fprintf (stderr, " Clearing register values\n");
9852 for (i = 1; i < NELEMS (gr_values); i++)
9853 gr_values[i].known = 0;
9856 /* Keep track of register values/changes which affect DV tracking.
9858 optimization note: should add a flag to classes of insns where otherwise we
9859 have to examine a group of strings to identify them. */
9862 note_register_values (struct ia64_opcode *idesc)
9864 valueT qp_changemask = 0;
9867 /* Invalidate values for registers being written to. */
9868 for (i = 0; i < idesc->num_outputs; i++)
9870 if (idesc->operands[i] == IA64_OPND_R1
9871 || idesc->operands[i] == IA64_OPND_R2
9872 || idesc->operands[i] == IA64_OPND_R3)
9874 int regno = CURR_SLOT.opnd[i].X_add_number - REG_GR;
9875 if (regno > 0 && regno < NELEMS (gr_values))
9876 gr_values[regno].known = 0;
9878 else if (idesc->operands[i] == IA64_OPND_R3_2)
9880 int regno = CURR_SLOT.opnd[i].X_add_number - REG_GR;
9881 if (regno > 0 && regno < 4)
9882 gr_values[regno].known = 0;
9884 else if (idesc->operands[i] == IA64_OPND_P1
9885 || idesc->operands[i] == IA64_OPND_P2)
9887 int regno = CURR_SLOT.opnd[i].X_add_number - REG_P;
9888 qp_changemask |= (valueT) 1 << regno;
9890 else if (idesc->operands[i] == IA64_OPND_PR)
9892 if (idesc->operands[2] & (valueT) 0x10000)
9893 qp_changemask = ~(valueT) 0x1FFFF | idesc->operands[2];
9895 qp_changemask = idesc->operands[2];
9898 else if (idesc->operands[i] == IA64_OPND_PR_ROT)
9900 if (idesc->operands[1] & ((valueT) 1 << 43))
9901 qp_changemask = -((valueT) 1 << 44) | idesc->operands[1];
9903 qp_changemask = idesc->operands[1];
9904 qp_changemask &= ~(valueT) 0xFFFF;
9909 /* Always clear qp branch flags on any PR change. */
9910 /* FIXME there may be exceptions for certain compares. */
9911 clear_qp_branch_flag (qp_changemask);
9913 /* Invalidate rotating registers on insns which affect RRBs in CFM. */
9914 if (idesc->flags & IA64_OPCODE_MOD_RRBS)
9916 qp_changemask |= ~(valueT) 0xFFFF;
9917 if (strcmp (idesc->name, "clrrrb.pr") != 0)
9919 for (i = 32; i < 32 + md.rot.num_regs; i++)
9920 gr_values[i].known = 0;
9922 clear_qp_mutex (qp_changemask);
9923 clear_qp_implies (qp_changemask, qp_changemask);
9925 /* After a call, all register values are undefined, except those marked
9927 else if (strncmp (idesc->name, "br.call", 6) == 0
9928 || strncmp (idesc->name, "brl.call", 7) == 0)
9930 /* FIXME keep GR values which are marked as "safe_across_calls" */
9931 clear_register_values ();
9932 clear_qp_mutex (~qp_safe_across_calls);
9933 clear_qp_implies (~qp_safe_across_calls, ~qp_safe_across_calls);
9934 clear_qp_branch_flag (~qp_safe_across_calls);
9936 else if (is_interruption_or_rfi (idesc)
9937 || is_taken_branch (idesc))
9939 clear_register_values ();
9940 clear_qp_mutex (~(valueT) 0);
9941 clear_qp_implies (~(valueT) 0, ~(valueT) 0);
9943 /* Look for mutex and implies relations. */
9944 else if ((idesc->operands[0] == IA64_OPND_P1
9945 || idesc->operands[0] == IA64_OPND_P2)
9946 && (idesc->operands[1] == IA64_OPND_P1
9947 || idesc->operands[1] == IA64_OPND_P2))
9949 int p1 = CURR_SLOT.opnd[0].X_add_number - REG_P;
9950 int p2 = CURR_SLOT.opnd[1].X_add_number - REG_P;
9951 valueT p1mask = (p1 != 0) ? (valueT) 1 << p1 : 0;
9952 valueT p2mask = (p2 != 0) ? (valueT) 1 << p2 : 0;
9954 /* If both PRs are PR0, we can't really do anything. */
9955 if (p1 == 0 && p2 == 0)
9958 fprintf (stderr, " Ignoring PRs due to inclusion of p0\n");
9960 /* In general, clear mutexes and implies which include P1 or P2,
9961 with the following exceptions. */
9962 else if (has_suffix_p (idesc->name, ".or.andcm")
9963 || has_suffix_p (idesc->name, ".and.orcm"))
9965 clear_qp_implies (p2mask, p1mask);
9967 else if (has_suffix_p (idesc->name, ".andcm")
9968 || has_suffix_p (idesc->name, ".and"))
9970 clear_qp_implies (0, p1mask | p2mask);
9972 else if (has_suffix_p (idesc->name, ".orcm")
9973 || has_suffix_p (idesc->name, ".or"))
9975 clear_qp_mutex (p1mask | p2mask);
9976 clear_qp_implies (p1mask | p2mask, 0);
9982 clear_qp_implies (p1mask | p2mask, p1mask | p2mask);
9984 /* If one of the PRs is PR0, we call clear_qp_mutex. */
9985 if (p1 == 0 || p2 == 0)
9986 clear_qp_mutex (p1mask | p2mask);
9988 added = update_qp_mutex (p1mask | p2mask);
9990 if (CURR_SLOT.qp_regno == 0
9991 || has_suffix_p (idesc->name, ".unc"))
9993 if (added == 0 && p1 && p2)
9994 add_qp_mutex (p1mask | p2mask);
9995 if (CURR_SLOT.qp_regno != 0)
9998 add_qp_imply (p1, CURR_SLOT.qp_regno);
10000 add_qp_imply (p2, CURR_SLOT.qp_regno);
10005 /* Look for mov imm insns into GRs. */
10006 else if (idesc->operands[0] == IA64_OPND_R1
10007 && (idesc->operands[1] == IA64_OPND_IMM22
10008 || idesc->operands[1] == IA64_OPND_IMMU64)
10009 && CURR_SLOT.opnd[1].X_op == O_constant
10010 && (strcmp (idesc->name, "mov") == 0
10011 || strcmp (idesc->name, "movl") == 0))
10013 int regno = CURR_SLOT.opnd[0].X_add_number - REG_GR;
10014 if (regno > 0 && regno < NELEMS (gr_values))
10016 gr_values[regno].known = 1;
10017 gr_values[regno].value = CURR_SLOT.opnd[1].X_add_number;
10018 gr_values[regno].path = md.path;
10021 fprintf (stderr, " Know gr%d = ", regno);
10022 fprintf_vma (stderr, gr_values[regno].value);
10023 fputs ("\n", stderr);
10027 /* Look for dep.z imm insns. */
10028 else if (idesc->operands[0] == IA64_OPND_R1
10029 && idesc->operands[1] == IA64_OPND_IMM8
10030 && strcmp (idesc->name, "dep.z") == 0)
10032 int regno = CURR_SLOT.opnd[0].X_add_number - REG_GR;
10033 if (regno > 0 && regno < NELEMS (gr_values))
10035 valueT value = CURR_SLOT.opnd[1].X_add_number;
10037 if (CURR_SLOT.opnd[3].X_add_number < 64)
10038 value &= ((valueT)1 << CURR_SLOT.opnd[3].X_add_number) - 1;
10039 value <<= CURR_SLOT.opnd[2].X_add_number;
10040 gr_values[regno].known = 1;
10041 gr_values[regno].value = value;
10042 gr_values[regno].path = md.path;
10045 fprintf (stderr, " Know gr%d = ", regno);
10046 fprintf_vma (stderr, gr_values[regno].value);
10047 fputs ("\n", stderr);
10053 clear_qp_mutex (qp_changemask);
10054 clear_qp_implies (qp_changemask, qp_changemask);
10058 /* Return whether the given predicate registers are currently mutex. */
10061 qp_mutex (int p1, int p2, int path)
10068 mask = ((valueT) 1 << p1) | (valueT) 1 << p2;
10069 for (i = 0; i < qp_mutexeslen; i++)
10071 if (qp_mutexes[i].path >= path
10072 && (qp_mutexes[i].prmask & mask) == mask)
10079 /* Return whether the given resource is in the given insn's list of chks
10080 Return 1 if the conflict is absolutely determined, 2 if it's a potential
10084 resources_match (struct rsrc *rs,
10085 struct ia64_opcode *idesc,
10090 struct rsrc specs[MAX_SPECS];
10093 /* If the marked resource's qp_regno and the given qp_regno are mutex,
10094 we don't need to check. One exception is note 11, which indicates that
10095 target predicates are written regardless of PR[qp]. */
10096 if (qp_mutex (rs->qp_regno, qp_regno, path)
10100 count = specify_resource (rs->dependency, idesc, DV_CHK, specs, note, path);
10101 while (count-- > 0)
10103 /* UNAT checking is a bit more specific than other resources */
10104 if (rs->dependency->specifier == IA64_RS_AR_UNAT
10105 && specs[count].mem_offset.hint
10106 && rs->mem_offset.hint)
10108 if (rs->mem_offset.base == specs[count].mem_offset.base)
10110 if (((rs->mem_offset.offset >> 3) & 0x3F) ==
10111 ((specs[count].mem_offset.offset >> 3) & 0x3F))
10118 /* Skip apparent PR write conflicts where both writes are an AND or both
10119 writes are an OR. */
10120 if (rs->dependency->specifier == IA64_RS_PR
10121 || rs->dependency->specifier == IA64_RS_PRr
10122 || rs->dependency->specifier == IA64_RS_PR63)
10124 if (specs[count].cmp_type != CMP_NONE
10125 && specs[count].cmp_type == rs->cmp_type)
10128 fprintf (stderr, " %s on parallel compare allowed (PR%d)\n",
10129 dv_mode[rs->dependency->mode],
10130 rs->dependency->specifier != IA64_RS_PR63 ?
10131 specs[count].index : 63);
10136 " %s on parallel compare conflict %s vs %s on PR%d\n",
10137 dv_mode[rs->dependency->mode],
10138 dv_cmp_type[rs->cmp_type],
10139 dv_cmp_type[specs[count].cmp_type],
10140 rs->dependency->specifier != IA64_RS_PR63 ?
10141 specs[count].index : 63);
10145 /* If either resource is not specific, conservatively assume a conflict
10147 if (!specs[count].specific || !rs->specific)
10149 else if (specs[count].index == rs->index)
10156 /* Indicate an instruction group break; if INSERT_STOP is non-zero, then
10157 insert a stop to create the break. Update all resource dependencies
10158 appropriately. If QP_REGNO is non-zero, only apply the break to resources
10159 which use the same QP_REGNO and have the link_to_qp_branch flag set.
10160 If SAVE_CURRENT is non-zero, don't affect resources marked by the current
10164 insn_group_break (int insert_stop, int qp_regno, int save_current)
10168 if (insert_stop && md.num_slots_in_use > 0)
10169 PREV_SLOT.end_of_insn_group = 1;
10173 fprintf (stderr, " Insn group break%s",
10174 (insert_stop ? " (w/stop)" : ""));
10176 fprintf (stderr, " effective for QP=%d", qp_regno);
10177 fprintf (stderr, "\n");
10181 while (i < regdepslen)
10183 const struct ia64_dependency *dep = regdeps[i].dependency;
10186 && regdeps[i].qp_regno != qp_regno)
10193 && CURR_SLOT.src_file == regdeps[i].file
10194 && CURR_SLOT.src_line == regdeps[i].line)
10200 /* clear dependencies which are automatically cleared by a stop, or
10201 those that have reached the appropriate state of insn serialization */
10202 if (dep->semantics == IA64_DVS_IMPLIED
10203 || dep->semantics == IA64_DVS_IMPLIEDF
10204 || regdeps[i].insn_srlz == STATE_SRLZ)
10206 print_dependency ("Removing", i);
10207 regdeps[i] = regdeps[--regdepslen];
10211 if (dep->semantics == IA64_DVS_DATA
10212 || dep->semantics == IA64_DVS_INSTR
10213 || dep->semantics == IA64_DVS_SPECIFIC)
10215 if (regdeps[i].insn_srlz == STATE_NONE)
10216 regdeps[i].insn_srlz = STATE_STOP;
10217 if (regdeps[i].data_srlz == STATE_NONE)
10218 regdeps[i].data_srlz = STATE_STOP;
10225 /* Add the given resource usage spec to the list of active dependencies. */
10228 mark_resource (struct ia64_opcode *idesc ATTRIBUTE_UNUSED,
10229 const struct ia64_dependency *dep ATTRIBUTE_UNUSED,
10234 if (regdepslen == regdepstotlen)
10236 regdepstotlen += 20;
10237 regdeps = (struct rsrc *)
10238 xrealloc ((void *) regdeps,
10239 regdepstotlen * sizeof (struct rsrc));
10242 regdeps[regdepslen] = *spec;
10243 regdeps[regdepslen].depind = depind;
10244 regdeps[regdepslen].path = path;
10245 regdeps[regdepslen].file = CURR_SLOT.src_file;
10246 regdeps[regdepslen].line = CURR_SLOT.src_line;
10248 print_dependency ("Adding", regdepslen);
10254 print_dependency (const char *action, int depind)
10258 fprintf (stderr, " %s %s '%s'",
10259 action, dv_mode[(regdeps[depind].dependency)->mode],
10260 (regdeps[depind].dependency)->name);
10261 if (regdeps[depind].specific && regdeps[depind].index >= 0)
10262 fprintf (stderr, " (%d)", regdeps[depind].index);
10263 if (regdeps[depind].mem_offset.hint)
10265 fputs (" ", stderr);
10266 fprintf_vma (stderr, regdeps[depind].mem_offset.base);
10267 fputs ("+", stderr);
10268 fprintf_vma (stderr, regdeps[depind].mem_offset.offset);
10270 fprintf (stderr, "\n");
10275 instruction_serialization (void)
10279 fprintf (stderr, " Instruction serialization\n");
10280 for (i = 0; i < regdepslen; i++)
10281 if (regdeps[i].insn_srlz == STATE_STOP)
10282 regdeps[i].insn_srlz = STATE_SRLZ;
10286 data_serialization (void)
10290 fprintf (stderr, " Data serialization\n");
10291 while (i < regdepslen)
10293 if (regdeps[i].data_srlz == STATE_STOP
10294 /* Note: as of 991210, all "other" dependencies are cleared by a
10295 data serialization. This might change with new tables */
10296 || (regdeps[i].dependency)->semantics == IA64_DVS_OTHER)
10298 print_dependency ("Removing", i);
10299 regdeps[i] = regdeps[--regdepslen];
10306 /* Insert stops and serializations as needed to avoid DVs. */
10309 remove_marked_resource (struct rsrc *rs)
10311 switch (rs->dependency->semantics)
10313 case IA64_DVS_SPECIFIC:
10315 fprintf (stderr, "Implementation-specific, assume worst case...\n");
10316 /* ...fall through... */
10317 case IA64_DVS_INSTR:
10319 fprintf (stderr, "Inserting instr serialization\n");
10320 if (rs->insn_srlz < STATE_STOP)
10321 insn_group_break (1, 0, 0);
10322 if (rs->insn_srlz < STATE_SRLZ)
10324 struct slot oldslot = CURR_SLOT;
10325 /* Manually jam a srlz.i insn into the stream */
10326 memset (&CURR_SLOT, 0, sizeof (CURR_SLOT));
10327 CURR_SLOT.user_template = -1;
10328 CURR_SLOT.idesc = ia64_find_opcode ("srlz.i");
10329 instruction_serialization ();
10330 md.curr_slot = (md.curr_slot + 1) % NUM_SLOTS;
10331 if (++md.num_slots_in_use >= NUM_SLOTS)
10332 emit_one_bundle ();
10333 CURR_SLOT = oldslot;
10335 insn_group_break (1, 0, 0);
10337 case IA64_DVS_OTHER: /* as of rev2 (991220) of the DV tables, all
10338 "other" types of DV are eliminated
10339 by a data serialization */
10340 case IA64_DVS_DATA:
10342 fprintf (stderr, "Inserting data serialization\n");
10343 if (rs->data_srlz < STATE_STOP)
10344 insn_group_break (1, 0, 0);
10346 struct slot oldslot = CURR_SLOT;
10347 /* Manually jam a srlz.d insn into the stream */
10348 memset (&CURR_SLOT, 0, sizeof (CURR_SLOT));
10349 CURR_SLOT.user_template = -1;
10350 CURR_SLOT.idesc = ia64_find_opcode ("srlz.d");
10351 data_serialization ();
10352 md.curr_slot = (md.curr_slot + 1) % NUM_SLOTS;
10353 if (++md.num_slots_in_use >= NUM_SLOTS)
10354 emit_one_bundle ();
10355 CURR_SLOT = oldslot;
10358 case IA64_DVS_IMPLIED:
10359 case IA64_DVS_IMPLIEDF:
10361 fprintf (stderr, "Inserting stop\n");
10362 insn_group_break (1, 0, 0);
10369 /* Check the resources used by the given opcode against the current dependency
10372 The check is run once for each execution path encountered. In this case,
10373 a unique execution path is the sequence of instructions following a code
10374 entry point, e.g. the following has three execution paths, one starting
10375 at L0, one at L1, and one at L2.
10384 check_dependencies (struct ia64_opcode *idesc)
10386 const struct ia64_opcode_dependency *opdeps = idesc->dependencies;
10390 /* Note that the number of marked resources may change within the
10391 loop if in auto mode. */
10393 while (i < regdepslen)
10395 struct rsrc *rs = ®deps[i];
10396 const struct ia64_dependency *dep = rs->dependency;
10399 int start_over = 0;
10401 if (dep->semantics == IA64_DVS_NONE
10402 || (chkind = depends_on (rs->depind, idesc)) == -1)
10408 note = NOTE (opdeps->chks[chkind]);
10410 /* Check this resource against each execution path seen thus far. */
10411 for (path = 0; path <= md.path; path++)
10415 /* If the dependency wasn't on the path being checked, ignore it. */
10416 if (rs->path < path)
10419 /* If the QP for this insn implies a QP which has branched, don't
10420 bother checking. Ed. NOTE: I don't think this check is terribly
10421 useful; what's the point of generating code which will only be
10422 reached if its QP is zero?
10423 This code was specifically inserted to handle the following code,
10424 based on notes from Intel's DV checking code, where p1 implies p2.
10430 if (CURR_SLOT.qp_regno != 0)
10434 for (implies = 0; implies < qp_implieslen; implies++)
10436 if (qp_implies[implies].path >= path
10437 && qp_implies[implies].p1 == CURR_SLOT.qp_regno
10438 && qp_implies[implies].p2_branched)
10448 if ((matchtype = resources_match (rs, idesc, note,
10449 CURR_SLOT.qp_regno, path)) != 0)
10452 char pathmsg[256] = "";
10453 char indexmsg[256] = "";
10454 int certain = (matchtype == 1 && CURR_SLOT.qp_regno == 0);
10457 snprintf (pathmsg, sizeof (pathmsg),
10458 " when entry is at label '%s'",
10459 md.entry_labels[path - 1]);
10460 if (matchtype == 1 && rs->index >= 0)
10461 snprintf (indexmsg, sizeof (indexmsg),
10462 ", specific resource number is %d",
10464 snprintf (msg, sizeof (msg),
10465 "Use of '%s' %s %s dependency '%s' (%s)%s%s",
10467 (certain ? "violates" : "may violate"),
10468 dv_mode[dep->mode], dep->name,
10469 dv_sem[dep->semantics],
10470 pathmsg, indexmsg);
10472 if (md.explicit_mode)
10474 as_warn ("%s", msg);
10475 if (path < md.path)
10476 as_warn (_("Only the first path encountering the conflict is reported"));
10477 as_warn_where (rs->file, rs->line,
10478 _("This is the location of the conflicting usage"));
10479 /* Don't bother checking other paths, to avoid duplicating
10480 the same warning */
10486 fprintf (stderr, "%s @ %s:%d\n", msg, rs->file, rs->line);
10488 remove_marked_resource (rs);
10490 /* since the set of dependencies has changed, start over */
10491 /* FIXME -- since we're removing dvs as we go, we
10492 probably don't really need to start over... */
10505 /* Register new dependencies based on the given opcode. */
10508 mark_resources (struct ia64_opcode *idesc)
10511 const struct ia64_opcode_dependency *opdeps = idesc->dependencies;
10512 int add_only_qp_reads = 0;
10514 /* A conditional branch only uses its resources if it is taken; if it is
10515 taken, we stop following that path. The other branch types effectively
10516 *always* write their resources. If it's not taken, register only QP
10518 if (is_conditional_branch (idesc) || is_interruption_or_rfi (idesc))
10520 add_only_qp_reads = 1;
10524 fprintf (stderr, "Registering '%s' resource usage\n", idesc->name);
10526 for (i = 0; i < opdeps->nregs; i++)
10528 const struct ia64_dependency *dep;
10529 struct rsrc specs[MAX_SPECS];
10534 dep = ia64_find_dependency (opdeps->regs[i]);
10535 note = NOTE (opdeps->regs[i]);
10537 if (add_only_qp_reads
10538 && !(dep->mode == IA64_DV_WAR
10539 && (dep->specifier == IA64_RS_PR
10540 || dep->specifier == IA64_RS_PRr
10541 || dep->specifier == IA64_RS_PR63)))
10544 count = specify_resource (dep, idesc, DV_REG, specs, note, md.path);
10546 while (count-- > 0)
10548 mark_resource (idesc, dep, &specs[count],
10549 DEP (opdeps->regs[i]), md.path);
10552 /* The execution path may affect register values, which may in turn
10553 affect which indirect-access resources are accessed. */
10554 switch (dep->specifier)
10558 case IA64_RS_CPUID:
10566 for (path = 0; path < md.path; path++)
10568 count = specify_resource (dep, idesc, DV_REG, specs, note, path);
10569 while (count-- > 0)
10570 mark_resource (idesc, dep, &specs[count],
10571 DEP (opdeps->regs[i]), path);
10578 /* Remove dependencies when they no longer apply. */
10581 update_dependencies (struct ia64_opcode *idesc)
10585 if (strcmp (idesc->name, "srlz.i") == 0)
10587 instruction_serialization ();
10589 else if (strcmp (idesc->name, "srlz.d") == 0)
10591 data_serialization ();
10593 else if (is_interruption_or_rfi (idesc)
10594 || is_taken_branch (idesc))
10596 /* Although technically the taken branch doesn't clear dependencies
10597 which require a srlz.[id], we don't follow the branch; the next
10598 instruction is assumed to start with a clean slate. */
10602 else if (is_conditional_branch (idesc)
10603 && CURR_SLOT.qp_regno != 0)
10605 int is_call = strstr (idesc->name, ".call") != NULL;
10607 for (i = 0; i < qp_implieslen; i++)
10609 /* If the conditional branch's predicate is implied by the predicate
10610 in an existing dependency, remove that dependency. */
10611 if (qp_implies[i].p2 == CURR_SLOT.qp_regno)
10614 /* Note that this implied predicate takes a branch so that if
10615 a later insn generates a DV but its predicate implies this
10616 one, we can avoid the false DV warning. */
10617 qp_implies[i].p2_branched = 1;
10618 while (depind < regdepslen)
10620 if (regdeps[depind].qp_regno == qp_implies[i].p1)
10622 print_dependency ("Removing", depind);
10623 regdeps[depind] = regdeps[--regdepslen];
10630 /* Any marked resources which have this same predicate should be
10631 cleared, provided that the QP hasn't been modified between the
10632 marking instruction and the branch. */
10635 insn_group_break (0, CURR_SLOT.qp_regno, 1);
10640 while (i < regdepslen)
10642 if (regdeps[i].qp_regno == CURR_SLOT.qp_regno
10643 && regdeps[i].link_to_qp_branch
10644 && (regdeps[i].file != CURR_SLOT.src_file
10645 || regdeps[i].line != CURR_SLOT.src_line))
10647 /* Treat like a taken branch */
10648 print_dependency ("Removing", i);
10649 regdeps[i] = regdeps[--regdepslen];
10658 /* Examine the current instruction for dependency violations. */
10661 check_dv (struct ia64_opcode *idesc)
10665 fprintf (stderr, "Checking %s for violations (line %d, %d/%d)\n",
10666 idesc->name, CURR_SLOT.src_line,
10667 idesc->dependencies->nchks,
10668 idesc->dependencies->nregs);
10671 /* Look through the list of currently marked resources; if the current
10672 instruction has the dependency in its chks list which uses that resource,
10673 check against the specific resources used. */
10674 check_dependencies (idesc);
10676 /* Look up the instruction's regdeps (RAW writes, WAW writes, and WAR reads),
10677 then add them to the list of marked resources. */
10678 mark_resources (idesc);
10680 /* There are several types of dependency semantics, and each has its own
10681 requirements for being cleared
10683 Instruction serialization (insns separated by interruption, rfi, or
10684 writer + srlz.i + reader, all in separate groups) clears DVS_INSTR.
10686 Data serialization (instruction serialization, or writer + srlz.d +
10687 reader, where writer and srlz.d are in separate groups) clears
10688 DVS_DATA. (This also clears DVS_OTHER, but that is not guaranteed to
10689 always be the case).
10691 Instruction group break (groups separated by stop, taken branch,
10692 interruption or rfi) clears DVS_IMPLIED and DVS_IMPLIEDF.
10694 update_dependencies (idesc);
10696 /* Sometimes, knowing a register value allows us to avoid giving a false DV
10697 warning. Keep track of as many as possible that are useful. */
10698 note_register_values (idesc);
10700 /* We don't need or want this anymore. */
10701 md.mem_offset.hint = 0;
10706 /* Translate one line of assembly. Pseudo ops and labels do not show
10709 md_assemble (char *str)
10711 char *saved_input_line_pointer, *mnemonic;
10712 const struct pseudo_opcode *pdesc;
10713 struct ia64_opcode *idesc;
10714 unsigned char qp_regno;
10715 unsigned int flags;
10718 saved_input_line_pointer = input_line_pointer;
10719 input_line_pointer = str;
10721 /* extract the opcode (mnemonic): */
10723 mnemonic = input_line_pointer;
10724 ch = get_symbol_end ();
10725 pdesc = (struct pseudo_opcode *) hash_find (md.pseudo_hash, mnemonic);
10728 *input_line_pointer = ch;
10729 (*pdesc->handler) (pdesc->arg);
10733 /* Find the instruction descriptor matching the arguments. */
10735 idesc = ia64_find_opcode (mnemonic);
10736 *input_line_pointer = ch;
10739 as_bad (_("Unknown opcode `%s'"), mnemonic);
10743 idesc = parse_operands (idesc);
10747 /* Handle the dynamic ops we can handle now: */
10748 if (idesc->type == IA64_TYPE_DYN)
10750 if (strcmp (idesc->name, "add") == 0)
10752 if (CURR_SLOT.opnd[2].X_op == O_register
10753 && CURR_SLOT.opnd[2].X_add_number < 4)
10757 ia64_free_opcode (idesc);
10758 idesc = ia64_find_opcode (mnemonic);
10760 else if (strcmp (idesc->name, "mov") == 0)
10762 enum ia64_opnd opnd1, opnd2;
10765 opnd1 = idesc->operands[0];
10766 opnd2 = idesc->operands[1];
10767 if (opnd1 == IA64_OPND_AR3)
10769 else if (opnd2 == IA64_OPND_AR3)
10773 if (CURR_SLOT.opnd[rop].X_op == O_register)
10775 if (ar_is_only_in_integer_unit (CURR_SLOT.opnd[rop].X_add_number))
10776 mnemonic = "mov.i";
10777 else if (ar_is_only_in_memory_unit (CURR_SLOT.opnd[rop].X_add_number))
10778 mnemonic = "mov.m";
10786 ia64_free_opcode (idesc);
10787 idesc = ia64_find_opcode (mnemonic);
10788 while (idesc != NULL
10789 && (idesc->operands[0] != opnd1
10790 || idesc->operands[1] != opnd2))
10791 idesc = get_next_opcode (idesc);
10795 else if (strcmp (idesc->name, "mov.i") == 0
10796 || strcmp (idesc->name, "mov.m") == 0)
10798 enum ia64_opnd opnd1, opnd2;
10801 opnd1 = idesc->operands[0];
10802 opnd2 = idesc->operands[1];
10803 if (opnd1 == IA64_OPND_AR3)
10805 else if (opnd2 == IA64_OPND_AR3)
10809 if (CURR_SLOT.opnd[rop].X_op == O_register)
10812 if (ar_is_only_in_integer_unit (CURR_SLOT.opnd[rop].X_add_number))
10814 else if (ar_is_only_in_memory_unit (CURR_SLOT.opnd[rop].X_add_number))
10816 if (unit != 'a' && unit != idesc->name [4])
10817 as_bad (_("AR %d can only be accessed by %c-unit"),
10818 (int) (CURR_SLOT.opnd[rop].X_add_number - REG_AR),
10822 else if (strcmp (idesc->name, "hint.b") == 0)
10828 case hint_b_warning:
10829 as_warn (_("hint.b may be treated as nop"));
10832 as_bad (_("hint.b shouldn't be used"));
10838 if (md.qp.X_op == O_register)
10840 qp_regno = md.qp.X_add_number - REG_P;
10841 md.qp.X_op = O_absent;
10844 flags = idesc->flags;
10846 if ((flags & IA64_OPCODE_FIRST) != 0)
10848 /* The alignment frag has to end with a stop bit only if the
10849 next instruction after the alignment directive has to be
10850 the first instruction in an instruction group. */
10853 while (align_frag->fr_type != rs_align_code)
10855 align_frag = align_frag->fr_next;
10859 /* align_frag can be NULL if there are directives in
10861 if (align_frag && align_frag->fr_next == frag_now)
10862 align_frag->tc_frag_data = 1;
10865 insn_group_break (1, 0, 0);
10869 if ((flags & IA64_OPCODE_NO_PRED) != 0 && qp_regno != 0)
10871 as_bad (_("`%s' cannot be predicated"), idesc->name);
10875 /* Build the instruction. */
10876 CURR_SLOT.qp_regno = qp_regno;
10877 CURR_SLOT.idesc = idesc;
10878 as_where (&CURR_SLOT.src_file, &CURR_SLOT.src_line);
10879 dwarf2_where (&CURR_SLOT.debug_line);
10880 dwarf2_consume_line_info ();
10882 /* Add unwind entries, if there are any. */
10883 if (unwind.current_entry)
10885 CURR_SLOT.unwind_record = unwind.current_entry;
10886 unwind.current_entry = NULL;
10888 if (unwind.pending_saves)
10890 if (unwind.pending_saves->next)
10892 /* Attach the next pending save to the next slot so that its
10893 slot number will get set correctly. */
10894 add_unwind_entry (unwind.pending_saves->next, NOT_A_CHAR);
10895 unwind.pending_saves = &unwind.pending_saves->next->r.record.p;
10898 unwind.pending_saves = NULL;
10900 if (unwind.proc_pending.sym && S_IS_DEFINED (unwind.proc_pending.sym))
10903 /* Check for dependency violations. */
10907 md.curr_slot = (md.curr_slot + 1) % NUM_SLOTS;
10908 if (++md.num_slots_in_use >= NUM_SLOTS)
10909 emit_one_bundle ();
10911 if ((flags & IA64_OPCODE_LAST) != 0)
10912 insn_group_break (1, 0, 0);
10914 md.last_text_seg = now_seg;
10917 input_line_pointer = saved_input_line_pointer;
10920 /* Called when symbol NAME cannot be found in the symbol table.
10921 Should be used for dynamic valued symbols only. */
10924 md_undefined_symbol (char *name ATTRIBUTE_UNUSED)
10929 /* Called for any expression that can not be recognized. When the
10930 function is called, `input_line_pointer' will point to the start of
10934 md_operand (expressionS *e)
10936 switch (*input_line_pointer)
10939 ++input_line_pointer;
10940 expression_and_evaluate (e);
10941 if (*input_line_pointer != ']')
10943 as_bad (_("Closing bracket missing"));
10948 if (e->X_op != O_register
10949 || e->X_add_number < REG_GR
10950 || e->X_add_number > REG_GR + 127)
10952 as_bad (_("Index must be a general register"));
10953 e->X_add_number = REG_GR;
10956 ++input_line_pointer;
10967 ignore_rest_of_line ();
10970 /* Return 1 if it's OK to adjust a reloc by replacing the symbol with
10971 a section symbol plus some offset. For relocs involving @fptr(),
10972 directives we don't want such adjustments since we need to have the
10973 original symbol's name in the reloc. */
10975 ia64_fix_adjustable (fixS *fix)
10977 /* Prevent all adjustments to global symbols */
10978 if (S_IS_EXTERNAL (fix->fx_addsy) || S_IS_WEAK (fix->fx_addsy))
10981 switch (fix->fx_r_type)
10983 case BFD_RELOC_IA64_FPTR64I:
10984 case BFD_RELOC_IA64_FPTR32MSB:
10985 case BFD_RELOC_IA64_FPTR32LSB:
10986 case BFD_RELOC_IA64_FPTR64MSB:
10987 case BFD_RELOC_IA64_FPTR64LSB:
10988 case BFD_RELOC_IA64_LTOFF_FPTR22:
10989 case BFD_RELOC_IA64_LTOFF_FPTR64I:
10999 ia64_force_relocation (fixS *fix)
11001 switch (fix->fx_r_type)
11003 case BFD_RELOC_IA64_FPTR64I:
11004 case BFD_RELOC_IA64_FPTR32MSB:
11005 case BFD_RELOC_IA64_FPTR32LSB:
11006 case BFD_RELOC_IA64_FPTR64MSB:
11007 case BFD_RELOC_IA64_FPTR64LSB:
11009 case BFD_RELOC_IA64_LTOFF22:
11010 case BFD_RELOC_IA64_LTOFF64I:
11011 case BFD_RELOC_IA64_LTOFF_FPTR22:
11012 case BFD_RELOC_IA64_LTOFF_FPTR64I:
11013 case BFD_RELOC_IA64_PLTOFF22:
11014 case BFD_RELOC_IA64_PLTOFF64I:
11015 case BFD_RELOC_IA64_PLTOFF64MSB:
11016 case BFD_RELOC_IA64_PLTOFF64LSB:
11018 case BFD_RELOC_IA64_LTOFF22X:
11019 case BFD_RELOC_IA64_LDXMOV:
11026 return generic_force_reloc (fix);
11029 /* Decide from what point a pc-relative relocation is relative to,
11030 relative to the pc-relative fixup. Er, relatively speaking. */
11032 ia64_pcrel_from_section (fixS *fix, segT sec)
11034 unsigned long off = fix->fx_frag->fr_address + fix->fx_where;
11036 if (bfd_get_section_flags (stdoutput, sec) & SEC_CODE)
11043 /* Used to emit section-relative relocs for the dwarf2 debug data. */
11045 ia64_dwarf2_emit_offset (symbolS *symbol, unsigned int size)
11049 exp.X_op = O_pseudo_fixup;
11050 exp.X_op_symbol = pseudo_func[FUNC_SEC_RELATIVE].u.sym;
11051 exp.X_add_number = 0;
11052 exp.X_add_symbol = symbol;
11053 emit_expr (&exp, size);
11056 /* This is called whenever some data item (not an instruction) needs a
11057 fixup. We pick the right reloc code depending on the byteorder
11058 currently in effect. */
11060 ia64_cons_fix_new (fragS *f, int where, int nbytes, expressionS *exp,
11061 bfd_reloc_code_real_type code)
11067 /* There are no reloc for 8 and 16 bit quantities, but we allow
11068 them here since they will work fine as long as the expression
11069 is fully defined at the end of the pass over the source file. */
11070 case 1: code = BFD_RELOC_8; break;
11071 case 2: code = BFD_RELOC_16; break;
11073 if (target_big_endian)
11074 code = BFD_RELOC_IA64_DIR32MSB;
11076 code = BFD_RELOC_IA64_DIR32LSB;
11080 /* In 32-bit mode, data8 could mean function descriptors too. */
11081 if (exp->X_op == O_pseudo_fixup
11082 && exp->X_op_symbol
11083 && S_GET_VALUE (exp->X_op_symbol) == FUNC_IPLT_RELOC
11084 && !(md.flags & EF_IA_64_ABI64))
11086 if (target_big_endian)
11087 code = BFD_RELOC_IA64_IPLTMSB;
11089 code = BFD_RELOC_IA64_IPLTLSB;
11090 exp->X_op = O_symbol;
11095 if (target_big_endian)
11096 code = BFD_RELOC_IA64_DIR64MSB;
11098 code = BFD_RELOC_IA64_DIR64LSB;
11103 if (exp->X_op == O_pseudo_fixup
11104 && exp->X_op_symbol
11105 && S_GET_VALUE (exp->X_op_symbol) == FUNC_IPLT_RELOC)
11107 if (target_big_endian)
11108 code = BFD_RELOC_IA64_IPLTMSB;
11110 code = BFD_RELOC_IA64_IPLTLSB;
11111 exp->X_op = O_symbol;
11117 as_bad (_("Unsupported fixup size %d"), nbytes);
11118 ignore_rest_of_line ();
11122 if (exp->X_op == O_pseudo_fixup)
11124 exp->X_op = O_symbol;
11125 code = ia64_gen_real_reloc_type (exp->X_op_symbol, code);
11126 /* ??? If code unchanged, unsupported. */
11129 fix = fix_new_exp (f, where, nbytes, exp, 0, code);
11130 /* We need to store the byte order in effect in case we're going
11131 to fix an 8 or 16 bit relocation (for which there no real
11132 relocs available). See md_apply_fix(). */
11133 fix->tc_fix_data.bigendian = target_big_endian;
11136 /* Return the actual relocation we wish to associate with the pseudo
11137 reloc described by SYM and R_TYPE. SYM should be one of the
11138 symbols in the pseudo_func array, or NULL. */
11140 static bfd_reloc_code_real_type
11141 ia64_gen_real_reloc_type (struct symbol *sym, bfd_reloc_code_real_type r_type)
11143 bfd_reloc_code_real_type newr = 0;
11144 const char *type = NULL, *suffix = "";
11151 switch (S_GET_VALUE (sym))
11153 case FUNC_FPTR_RELATIVE:
11156 case BFD_RELOC_IA64_IMM64: newr = BFD_RELOC_IA64_FPTR64I; break;
11157 case BFD_RELOC_IA64_DIR32MSB: newr = BFD_RELOC_IA64_FPTR32MSB; break;
11158 case BFD_RELOC_IA64_DIR32LSB: newr = BFD_RELOC_IA64_FPTR32LSB; break;
11159 case BFD_RELOC_IA64_DIR64MSB: newr = BFD_RELOC_IA64_FPTR64MSB; break;
11160 case BFD_RELOC_IA64_DIR64LSB: newr = BFD_RELOC_IA64_FPTR64LSB; break;
11161 default: type = "FPTR"; break;
11165 case FUNC_GP_RELATIVE:
11168 case BFD_RELOC_IA64_IMM22: newr = BFD_RELOC_IA64_GPREL22; break;
11169 case BFD_RELOC_IA64_IMM64: newr = BFD_RELOC_IA64_GPREL64I; break;
11170 case BFD_RELOC_IA64_DIR32MSB: newr = BFD_RELOC_IA64_GPREL32MSB; break;
11171 case BFD_RELOC_IA64_DIR32LSB: newr = BFD_RELOC_IA64_GPREL32LSB; break;
11172 case BFD_RELOC_IA64_DIR64MSB: newr = BFD_RELOC_IA64_GPREL64MSB; break;
11173 case BFD_RELOC_IA64_DIR64LSB: newr = BFD_RELOC_IA64_GPREL64LSB; break;
11174 default: type = "GPREL"; break;
11178 case FUNC_LT_RELATIVE:
11181 case BFD_RELOC_IA64_IMM22: newr = BFD_RELOC_IA64_LTOFF22; break;
11182 case BFD_RELOC_IA64_IMM64: newr = BFD_RELOC_IA64_LTOFF64I; break;
11183 default: type = "LTOFF"; break;
11187 case FUNC_LT_RELATIVE_X:
11190 case BFD_RELOC_IA64_IMM22: newr = BFD_RELOC_IA64_LTOFF22X; break;
11191 default: type = "LTOFF"; suffix = "X"; break;
11195 case FUNC_PC_RELATIVE:
11198 case BFD_RELOC_IA64_IMM22: newr = BFD_RELOC_IA64_PCREL22; break;
11199 case BFD_RELOC_IA64_IMM64: newr = BFD_RELOC_IA64_PCREL64I; break;
11200 case BFD_RELOC_IA64_DIR32MSB: newr = BFD_RELOC_IA64_PCREL32MSB; break;
11201 case BFD_RELOC_IA64_DIR32LSB: newr = BFD_RELOC_IA64_PCREL32LSB; break;
11202 case BFD_RELOC_IA64_DIR64MSB: newr = BFD_RELOC_IA64_PCREL64MSB; break;
11203 case BFD_RELOC_IA64_DIR64LSB: newr = BFD_RELOC_IA64_PCREL64LSB; break;
11204 default: type = "PCREL"; break;
11208 case FUNC_PLT_RELATIVE:
11211 case BFD_RELOC_IA64_IMM22: newr = BFD_RELOC_IA64_PLTOFF22; break;
11212 case BFD_RELOC_IA64_IMM64: newr = BFD_RELOC_IA64_PLTOFF64I; break;
11213 case BFD_RELOC_IA64_DIR64MSB: newr = BFD_RELOC_IA64_PLTOFF64MSB;break;
11214 case BFD_RELOC_IA64_DIR64LSB: newr = BFD_RELOC_IA64_PLTOFF64LSB;break;
11215 default: type = "PLTOFF"; break;
11219 case FUNC_SEC_RELATIVE:
11222 case BFD_RELOC_IA64_DIR32MSB: newr = BFD_RELOC_IA64_SECREL32MSB;break;
11223 case BFD_RELOC_IA64_DIR32LSB: newr = BFD_RELOC_IA64_SECREL32LSB;break;
11224 case BFD_RELOC_IA64_DIR64MSB: newr = BFD_RELOC_IA64_SECREL64MSB;break;
11225 case BFD_RELOC_IA64_DIR64LSB: newr = BFD_RELOC_IA64_SECREL64LSB;break;
11226 default: type = "SECREL"; break;
11230 case FUNC_SEG_RELATIVE:
11233 case BFD_RELOC_IA64_DIR32MSB: newr = BFD_RELOC_IA64_SEGREL32MSB;break;
11234 case BFD_RELOC_IA64_DIR32LSB: newr = BFD_RELOC_IA64_SEGREL32LSB;break;
11235 case BFD_RELOC_IA64_DIR64MSB: newr = BFD_RELOC_IA64_SEGREL64MSB;break;
11236 case BFD_RELOC_IA64_DIR64LSB: newr = BFD_RELOC_IA64_SEGREL64LSB;break;
11237 default: type = "SEGREL"; break;
11241 case FUNC_LTV_RELATIVE:
11244 case BFD_RELOC_IA64_DIR32MSB: newr = BFD_RELOC_IA64_LTV32MSB; break;
11245 case BFD_RELOC_IA64_DIR32LSB: newr = BFD_RELOC_IA64_LTV32LSB; break;
11246 case BFD_RELOC_IA64_DIR64MSB: newr = BFD_RELOC_IA64_LTV64MSB; break;
11247 case BFD_RELOC_IA64_DIR64LSB: newr = BFD_RELOC_IA64_LTV64LSB; break;
11248 default: type = "LTV"; break;
11252 case FUNC_LT_FPTR_RELATIVE:
11255 case BFD_RELOC_IA64_IMM22:
11256 newr = BFD_RELOC_IA64_LTOFF_FPTR22; break;
11257 case BFD_RELOC_IA64_IMM64:
11258 newr = BFD_RELOC_IA64_LTOFF_FPTR64I; break;
11259 case BFD_RELOC_IA64_DIR32MSB:
11260 newr = BFD_RELOC_IA64_LTOFF_FPTR32MSB; break;
11261 case BFD_RELOC_IA64_DIR32LSB:
11262 newr = BFD_RELOC_IA64_LTOFF_FPTR32LSB; break;
11263 case BFD_RELOC_IA64_DIR64MSB:
11264 newr = BFD_RELOC_IA64_LTOFF_FPTR64MSB; break;
11265 case BFD_RELOC_IA64_DIR64LSB:
11266 newr = BFD_RELOC_IA64_LTOFF_FPTR64LSB; break;
11268 type = "LTOFF_FPTR"; break;
11272 case FUNC_TP_RELATIVE:
11275 case BFD_RELOC_IA64_IMM14: newr = BFD_RELOC_IA64_TPREL14; break;
11276 case BFD_RELOC_IA64_IMM22: newr = BFD_RELOC_IA64_TPREL22; break;
11277 case BFD_RELOC_IA64_IMM64: newr = BFD_RELOC_IA64_TPREL64I; break;
11278 case BFD_RELOC_IA64_DIR64MSB: newr = BFD_RELOC_IA64_TPREL64MSB; break;
11279 case BFD_RELOC_IA64_DIR64LSB: newr = BFD_RELOC_IA64_TPREL64LSB; break;
11280 default: type = "TPREL"; break;
11284 case FUNC_LT_TP_RELATIVE:
11287 case BFD_RELOC_IA64_IMM22:
11288 newr = BFD_RELOC_IA64_LTOFF_TPREL22; break;
11290 type = "LTOFF_TPREL"; break;
11294 case FUNC_DTP_MODULE:
11297 case BFD_RELOC_IA64_DIR64MSB:
11298 newr = BFD_RELOC_IA64_DTPMOD64MSB; break;
11299 case BFD_RELOC_IA64_DIR64LSB:
11300 newr = BFD_RELOC_IA64_DTPMOD64LSB; break;
11302 type = "DTPMOD"; break;
11306 case FUNC_LT_DTP_MODULE:
11309 case BFD_RELOC_IA64_IMM22:
11310 newr = BFD_RELOC_IA64_LTOFF_DTPMOD22; break;
11312 type = "LTOFF_DTPMOD"; break;
11316 case FUNC_DTP_RELATIVE:
11319 case BFD_RELOC_IA64_DIR32MSB:
11320 newr = BFD_RELOC_IA64_DTPREL32MSB; break;
11321 case BFD_RELOC_IA64_DIR32LSB:
11322 newr = BFD_RELOC_IA64_DTPREL32LSB; break;
11323 case BFD_RELOC_IA64_DIR64MSB:
11324 newr = BFD_RELOC_IA64_DTPREL64MSB; break;
11325 case BFD_RELOC_IA64_DIR64LSB:
11326 newr = BFD_RELOC_IA64_DTPREL64LSB; break;
11327 case BFD_RELOC_IA64_IMM14:
11328 newr = BFD_RELOC_IA64_DTPREL14; break;
11329 case BFD_RELOC_IA64_IMM22:
11330 newr = BFD_RELOC_IA64_DTPREL22; break;
11331 case BFD_RELOC_IA64_IMM64:
11332 newr = BFD_RELOC_IA64_DTPREL64I; break;
11334 type = "DTPREL"; break;
11338 case FUNC_LT_DTP_RELATIVE:
11341 case BFD_RELOC_IA64_IMM22:
11342 newr = BFD_RELOC_IA64_LTOFF_DTPREL22; break;
11344 type = "LTOFF_DTPREL"; break;
11348 case FUNC_IPLT_RELOC:
11351 case BFD_RELOC_IA64_IPLTMSB: return r_type;
11352 case BFD_RELOC_IA64_IPLTLSB: return r_type;
11353 default: type = "IPLT"; break;
11358 case FUNC_SLOTCOUNT_RELOC:
11359 return DUMMY_RELOC_IA64_SLOTCOUNT;
11376 case BFD_RELOC_IA64_DIR32MSB: width = 32; suffix = "MSB"; break;
11377 case BFD_RELOC_IA64_DIR32LSB: width = 32; suffix = "LSB"; break;
11378 case BFD_RELOC_IA64_DIR64MSB: width = 64; suffix = "MSB"; break;
11379 case BFD_RELOC_IA64_DIR64LSB: width = 64; suffix = "LSB"; break;
11380 case BFD_RELOC_UNUSED: width = 13; break;
11381 case BFD_RELOC_IA64_IMM14: width = 14; break;
11382 case BFD_RELOC_IA64_IMM22: width = 22; break;
11383 case BFD_RELOC_IA64_IMM64: width = 64; suffix = "I"; break;
11387 /* This should be an error, but since previously there wasn't any
11388 diagnostic here, don't make it fail because of this for now. */
11389 as_warn (_("Cannot express %s%d%s relocation"), type, width, suffix);
11394 /* Here is where generate the appropriate reloc for pseudo relocation
11397 ia64_validate_fix (fixS *fix)
11399 switch (fix->fx_r_type)
11401 case BFD_RELOC_IA64_FPTR64I:
11402 case BFD_RELOC_IA64_FPTR32MSB:
11403 case BFD_RELOC_IA64_FPTR64LSB:
11404 case BFD_RELOC_IA64_LTOFF_FPTR22:
11405 case BFD_RELOC_IA64_LTOFF_FPTR64I:
11406 if (fix->fx_offset != 0)
11407 as_bad_where (fix->fx_file, fix->fx_line,
11408 _("No addend allowed in @fptr() relocation"));
11416 fix_insn (fixS *fix, const struct ia64_operand *odesc, valueT value)
11418 bfd_vma insn[3], t0, t1, control_bits;
11423 slot = fix->fx_where & 0x3;
11424 fixpos = fix->fx_frag->fr_literal + (fix->fx_where - slot);
11426 /* Bundles are always in little-endian byte order */
11427 t0 = bfd_getl64 (fixpos);
11428 t1 = bfd_getl64 (fixpos + 8);
11429 control_bits = t0 & 0x1f;
11430 insn[0] = (t0 >> 5) & 0x1ffffffffffLL;
11431 insn[1] = ((t0 >> 46) & 0x3ffff) | ((t1 & 0x7fffff) << 18);
11432 insn[2] = (t1 >> 23) & 0x1ffffffffffLL;
11435 if (odesc - elf64_ia64_operands == IA64_OPND_IMMU64)
11437 insn[1] = (value >> 22) & 0x1ffffffffffLL;
11438 insn[2] |= (((value & 0x7f) << 13)
11439 | (((value >> 7) & 0x1ff) << 27)
11440 | (((value >> 16) & 0x1f) << 22)
11441 | (((value >> 21) & 0x1) << 21)
11442 | (((value >> 63) & 0x1) << 36));
11444 else if (odesc - elf64_ia64_operands == IA64_OPND_IMMU62)
11446 if (value & ~0x3fffffffffffffffULL)
11447 err = _("integer operand out of range");
11448 insn[1] = (value >> 21) & 0x1ffffffffffLL;
11449 insn[2] |= (((value & 0xfffff) << 6) | (((value >> 20) & 0x1) << 36));
11451 else if (odesc - elf64_ia64_operands == IA64_OPND_TGT64)
11454 insn[1] = ((value >> 20) & 0x7fffffffffLL) << 2;
11455 insn[2] |= ((((value >> 59) & 0x1) << 36)
11456 | (((value >> 0) & 0xfffff) << 13));
11459 err = (*odesc->insert) (odesc, value, insn + slot);
11462 as_bad_where (fix->fx_file, fix->fx_line, "%s", err);
11464 t0 = control_bits | (insn[0] << 5) | (insn[1] << 46);
11465 t1 = ((insn[1] >> 18) & 0x7fffff) | (insn[2] << 23);
11466 number_to_chars_littleendian (fixpos + 0, t0, 8);
11467 number_to_chars_littleendian (fixpos + 8, t1, 8);
11470 /* Attempt to simplify or even eliminate a fixup. The return value is
11471 ignored; perhaps it was once meaningful, but now it is historical.
11472 To indicate that a fixup has been eliminated, set FIXP->FX_DONE.
11474 If fixp->fx_addsy is non-NULL, we'll have to generate a reloc entry
11478 md_apply_fix (fixS *fix, valueT *valP, segT seg ATTRIBUTE_UNUSED)
11481 valueT value = *valP;
11483 fixpos = fix->fx_frag->fr_literal + fix->fx_where;
11487 switch (fix->fx_r_type)
11489 case BFD_RELOC_IA64_PCREL21B: break;
11490 case BFD_RELOC_IA64_PCREL21BI: break;
11491 case BFD_RELOC_IA64_PCREL21F: break;
11492 case BFD_RELOC_IA64_PCREL21M: break;
11493 case BFD_RELOC_IA64_PCREL60B: break;
11494 case BFD_RELOC_IA64_PCREL22: break;
11495 case BFD_RELOC_IA64_PCREL64I: break;
11496 case BFD_RELOC_IA64_PCREL32MSB: break;
11497 case BFD_RELOC_IA64_PCREL32LSB: break;
11498 case BFD_RELOC_IA64_PCREL64MSB: break;
11499 case BFD_RELOC_IA64_PCREL64LSB: break;
11501 fix->fx_r_type = ia64_gen_real_reloc_type (pseudo_func[FUNC_PC_RELATIVE].u.sym,
11508 switch ((unsigned) fix->fx_r_type)
11510 case BFD_RELOC_UNUSED:
11511 /* This must be a TAG13 or TAG13b operand. There are no external
11512 relocs defined for them, so we must give an error. */
11513 as_bad_where (fix->fx_file, fix->fx_line,
11514 _("%s must have a constant value"),
11515 elf64_ia64_operands[fix->tc_fix_data.opnd].desc);
11519 case BFD_RELOC_IA64_TPREL14:
11520 case BFD_RELOC_IA64_TPREL22:
11521 case BFD_RELOC_IA64_TPREL64I:
11522 case BFD_RELOC_IA64_LTOFF_TPREL22:
11523 case BFD_RELOC_IA64_LTOFF_DTPMOD22:
11524 case BFD_RELOC_IA64_DTPREL14:
11525 case BFD_RELOC_IA64_DTPREL22:
11526 case BFD_RELOC_IA64_DTPREL64I:
11527 case BFD_RELOC_IA64_LTOFF_DTPREL22:
11528 S_SET_THREAD_LOCAL (fix->fx_addsy);
11532 case DUMMY_RELOC_IA64_SLOTCOUNT:
11533 as_bad_where (fix->fx_file, fix->fx_line,
11534 _("cannot resolve @slotcount parameter"));
11543 else if (fix->tc_fix_data.opnd == IA64_OPND_NIL)
11546 if (fix->fx_r_type == DUMMY_RELOC_IA64_SLOTCOUNT)
11548 /* For @slotcount, convert an addresses difference to a slots
11552 v = (value >> 4) * 3;
11553 switch (value & 0x0f)
11567 as_bad (_("invalid @slotcount value"));
11573 if (fix->tc_fix_data.bigendian)
11574 number_to_chars_bigendian (fixpos, value, fix->fx_size);
11576 number_to_chars_littleendian (fixpos, value, fix->fx_size);
11581 fix_insn (fix, elf64_ia64_operands + fix->tc_fix_data.opnd, value);
11586 /* Generate the BFD reloc to be stuck in the object file from the
11587 fixup used internally in the assembler. */
11590 tc_gen_reloc (asection *sec ATTRIBUTE_UNUSED, fixS *fixp)
11594 reloc = xmalloc (sizeof (*reloc));
11595 reloc->sym_ptr_ptr = (asymbol **) xmalloc (sizeof (asymbol *));
11596 *reloc->sym_ptr_ptr = symbol_get_bfdsym (fixp->fx_addsy);
11597 reloc->address = fixp->fx_frag->fr_address + fixp->fx_where;
11598 reloc->addend = fixp->fx_offset;
11599 reloc->howto = bfd_reloc_type_lookup (stdoutput, fixp->fx_r_type);
11603 as_bad_where (fixp->fx_file, fixp->fx_line,
11604 _("Cannot represent %s relocation in object file"),
11605 bfd_get_reloc_code_name (fixp->fx_r_type));
11612 /* Turn a string in input_line_pointer into a floating point constant
11613 of type TYPE, and store the appropriate bytes in *LIT. The number
11614 of LITTLENUMS emitted is stored in *SIZE. An error message is
11615 returned, or NULL on OK. */
11617 #define MAX_LITTLENUMS 5
11620 md_atof (int type, char *lit, int *size)
11622 LITTLENUM_TYPE words[MAX_LITTLENUMS];
11652 return _("Unrecognized or unsupported floating point constant");
11654 t = atof_ieee (input_line_pointer, type, words);
11656 input_line_pointer = t;
11658 (*ia64_float_to_chars) (lit, words, prec);
11662 /* It is 10 byte floating point with 6 byte padding. */
11663 memset (&lit [10], 0, 6);
11664 *size = 8 * sizeof (LITTLENUM_TYPE);
11667 *size = prec * sizeof (LITTLENUM_TYPE);
11672 /* Handle ia64 specific semantics of the align directive. */
11675 ia64_md_do_align (int n ATTRIBUTE_UNUSED,
11676 const char *fill ATTRIBUTE_UNUSED,
11677 int len ATTRIBUTE_UNUSED,
11678 int max ATTRIBUTE_UNUSED)
11680 if (subseg_text_p (now_seg))
11681 ia64_flush_insns ();
11684 /* This is called from HANDLE_ALIGN in write.c. Fill in the contents
11685 of an rs_align_code fragment. */
11688 ia64_handle_align (fragS *fragp)
11692 const unsigned char *nop_type;
11694 if (fragp->fr_type != rs_align_code)
11697 /* Check if this frag has to end with a stop bit. */
11698 nop_type = fragp->tc_frag_data ? le_nop_stop : le_nop;
11700 bytes = fragp->fr_next->fr_address - fragp->fr_address - fragp->fr_fix;
11701 p = fragp->fr_literal + fragp->fr_fix;
11703 /* If no paddings are needed, we check if we need a stop bit. */
11704 if (!bytes && fragp->tc_frag_data)
11706 if (fragp->fr_fix < 16)
11708 /* FIXME: It won't work with
11710 alloc r32=ar.pfs,1,2,4,0
11714 as_bad_where (fragp->fr_file, fragp->fr_line,
11715 _("Can't add stop bit to mark end of instruction group"));
11718 /* Bundles are always in little-endian byte order. Make sure
11719 the previous bundle has the stop bit. */
11723 /* Make sure we are on a 16-byte boundary, in case someone has been
11724 putting data into a text section. */
11727 int fix = bytes & 15;
11728 memset (p, 0, fix);
11731 fragp->fr_fix += fix;
11734 /* Instruction bundles are always little-endian. */
11735 memcpy (p, nop_type, 16);
11736 fragp->fr_var = 16;
11740 ia64_float_to_chars_bigendian (char *lit, LITTLENUM_TYPE *words,
11745 number_to_chars_bigendian (lit, (long) (*words++),
11746 sizeof (LITTLENUM_TYPE));
11747 lit += sizeof (LITTLENUM_TYPE);
11752 ia64_float_to_chars_littleendian (char *lit, LITTLENUM_TYPE *words,
11757 number_to_chars_littleendian (lit, (long) (words[prec]),
11758 sizeof (LITTLENUM_TYPE));
11759 lit += sizeof (LITTLENUM_TYPE);
11764 ia64_elf_section_change_hook (void)
11766 if (elf_section_type (now_seg) == SHT_IA_64_UNWIND
11767 && elf_linked_to_section (now_seg) == NULL)
11768 elf_linked_to_section (now_seg) = text_section;
11769 dot_byteorder (-1);
11772 /* Check if a label should be made global. */
11774 ia64_check_label (symbolS *label)
11776 if (*input_line_pointer == ':')
11778 S_SET_EXTERNAL (label);
11779 input_line_pointer++;
11783 /* Used to remember where .alias and .secalias directives are seen. We
11784 will rename symbol and section names when we are about to output
11785 the relocatable file. */
11788 char *file; /* The file where the directive is seen. */
11789 unsigned int line; /* The line number the directive is at. */
11790 const char *name; /* The original name of the symbol. */
11793 /* Called for .alias and .secalias directives. If SECTION is 1, it is
11794 .secalias. Otherwise, it is .alias. */
11796 dot_alias (int section)
11798 char *name, *alias;
11802 const char *error_string;
11805 struct hash_control *ahash, *nhash;
11808 name = input_line_pointer;
11809 delim = get_symbol_end ();
11810 end_name = input_line_pointer;
11813 if (name == end_name)
11815 as_bad (_("expected symbol name"));
11816 ignore_rest_of_line ();
11820 SKIP_WHITESPACE ();
11822 if (*input_line_pointer != ',')
11825 as_bad (_("expected comma after \"%s\""), name);
11827 ignore_rest_of_line ();
11831 input_line_pointer++;
11833 ia64_canonicalize_symbol_name (name);
11835 /* We call demand_copy_C_string to check if alias string is valid.
11836 There should be a closing `"' and no `\0' in the string. */
11837 alias = demand_copy_C_string (&len);
11840 ignore_rest_of_line ();
11844 /* Make a copy of name string. */
11845 len = strlen (name) + 1;
11846 obstack_grow (¬es, name, len);
11847 name = obstack_finish (¬es);
11852 ahash = secalias_hash;
11853 nhash = secalias_name_hash;
11858 ahash = alias_hash;
11859 nhash = alias_name_hash;
11862 /* Check if alias has been used before. */
11863 h = (struct alias *) hash_find (ahash, alias);
11866 if (strcmp (h->name, name))
11867 as_bad (_("`%s' is already the alias of %s `%s'"),
11868 alias, kind, h->name);
11872 /* Check if name already has an alias. */
11873 a = (const char *) hash_find (nhash, name);
11876 if (strcmp (a, alias))
11877 as_bad (_("%s `%s' already has an alias `%s'"), kind, name, a);
11881 h = (struct alias *) xmalloc (sizeof (struct alias));
11882 as_where (&h->file, &h->line);
11885 error_string = hash_jam (ahash, alias, (void *) h);
11888 as_fatal (_("inserting \"%s\" into %s alias hash table failed: %s"),
11889 alias, kind, error_string);
11893 error_string = hash_jam (nhash, name, (void *) alias);
11896 as_fatal (_("inserting \"%s\" into %s name hash table failed: %s"),
11897 alias, kind, error_string);
11899 obstack_free (¬es, name);
11900 obstack_free (¬es, alias);
11903 demand_empty_rest_of_line ();
11906 /* It renames the original symbol name to its alias. */
11908 do_alias (const char *alias, void *value)
11910 struct alias *h = (struct alias *) value;
11911 symbolS *sym = symbol_find (h->name);
11916 /* Uses .alias extensively to alias CRTL functions to same with
11917 decc$ prefix. Sometimes function gets optimized away and a
11918 warning results, which should be suppressed. */
11919 if (strncmp (alias, "decc$", 5) != 0)
11921 as_warn_where (h->file, h->line,
11922 _("symbol `%s' aliased to `%s' is not used"),
11926 S_SET_NAME (sym, (char *) alias);
11929 /* Called from write_object_file. */
11931 ia64_adjust_symtab (void)
11933 hash_traverse (alias_hash, do_alias);
11936 /* It renames the original section name to its alias. */
11938 do_secalias (const char *alias, void *value)
11940 struct alias *h = (struct alias *) value;
11941 segT sec = bfd_get_section_by_name (stdoutput, h->name);
11944 as_warn_where (h->file, h->line,
11945 _("section `%s' aliased to `%s' is not used"),
11951 /* Called from write_object_file. */
11953 ia64_frob_file (void)
11955 hash_traverse (secalias_hash, do_secalias);
11959 #define NT_VMS_MHD 1
11960 #define NT_VMS_LNM 2
11962 /* Integrity VMS 8.x identifies it's ELF modules with a standard ELF
11965 /* Manufacture a VMS-like time string. */
11967 get_vms_time (char *Now)
11973 pnt = ctime (&timeb);
11979 sprintf (Now, "%2s-%3s-%s %s", pnt + 8, pnt + 4, pnt + 20, pnt + 11);
11983 ia64_vms_note (void)
11986 asection *seg = now_seg;
11987 subsegT subseg = now_subseg;
11988 asection *secp = NULL;
11993 /* Create the .note section. */
11995 secp = subseg_new (".note", 0);
11996 bfd_set_section_flags (stdoutput,
11998 SEC_HAS_CONTENTS | SEC_READONLY);
12000 /* Module header note (MHD). */
12001 bname = xstrdup (lbasename (out_file_name));
12002 if ((p = strrchr (bname, '.')))
12005 /* VMS note header is 24 bytes long. */
12006 p = frag_more (8 + 8 + 8);
12007 number_to_chars_littleendian (p + 0, 8, 8);
12008 number_to_chars_littleendian (p + 8, 40 + strlen (bname), 8);
12009 number_to_chars_littleendian (p + 16, NT_VMS_MHD, 8);
12012 strcpy (p, "IPF/VMS");
12014 p = frag_more (17 + 17 + strlen (bname) + 1 + 5);
12016 strcpy (p + 17, "24-FEB-2005 15:00");
12019 p += strlen (bname) + 1;
12021 strcpy (p, "V1.0");
12023 frag_align (3, 0, 0);
12025 /* Language processor name note. */
12026 sprintf (buf, "GNU assembler version %s (%s) using BFD version %s",
12027 VERSION, TARGET_ALIAS, BFD_VERSION_STRING);
12029 p = frag_more (8 + 8 + 8);
12030 number_to_chars_littleendian (p + 0, 8, 8);
12031 number_to_chars_littleendian (p + 8, strlen (buf) + 1, 8);
12032 number_to_chars_littleendian (p + 16, NT_VMS_LNM, 8);
12035 strcpy (p, "IPF/VMS");
12037 p = frag_more (strlen (buf) + 1);
12040 frag_align (3, 0, 0);
12042 secp = subseg_new (".vms_display_name_info", 0);
12043 bfd_set_section_flags (stdoutput,
12045 SEC_HAS_CONTENTS | SEC_READONLY);
12047 /* This symbol should be passed on the command line and be variable
12048 according to language. */
12049 sym = symbol_new ("__gnat_vms_display_name@gnat_demangler_rtl",
12050 absolute_section, 0, &zero_address_frag);
12051 symbol_table_insert (sym);
12052 symbol_get_bfdsym (sym)->flags |= BSF_DEBUGGING | BSF_DYNAMIC;
12055 /* Format 3 of VMS demangler Spec. */
12056 number_to_chars_littleendian (p, 3, 4);
12059 /* Place holder for symbol table index of above symbol. */
12060 number_to_chars_littleendian (p, -1, 4);
12062 frag_align (3, 0, 0);
12064 /* We probably can't restore the current segment, for there likely
12065 isn't one yet... */
12067 subseg_set (seg, subseg);
12070 #endif /* TE_VMS */