1 /* tc-ia64.c -- Assembler for the HP/Intel IA-64 architecture.
2 Copyright 1998, 1999, 2000, 2001, 2002, 2003, 2004
3 Free Software Foundation, Inc.
4 Contributed by David Mosberger-Tang <davidm@hpl.hp.com>
6 This file is part of GAS, the GNU Assembler.
8 GAS is free software; you can redistribute it and/or modify
9 it under the terms of the GNU General Public License as published by
10 the Free Software Foundation; either version 2, or (at your option)
13 GAS is distributed in the hope that it will be useful,
14 but WITHOUT ANY WARRANTY; without even the implied warranty of
15 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 GNU General Public License for more details.
18 You should have received a copy of the GNU General Public License
19 along with GAS; see the file COPYING. If not, write to
20 the Free Software Foundation, 59 Temple Place - Suite 330,
21 Boston, MA 02111-1307, USA. */
35 - labels are wrong if automatic alignment is introduced
36 (e.g., checkout the second real10 definition in test-data.s)
38 <reg>.safe_across_calls and any other DV-related directives I don't
39 have documentation for.
40 verify mod-sched-brs reads/writes are checked/marked (and other
46 #include "safe-ctype.h"
47 #include "dwarf2dbg.h"
50 #include "opcode/ia64.h"
54 #define NELEMS(a) ((int) (sizeof (a)/sizeof ((a)[0])))
55 #define MIN(a,b) ((a) < (b) ? (a) : (b))
58 #define PREV_SLOT md.slot[(md.curr_slot + NUM_SLOTS - 1) % NUM_SLOTS]
59 #define CURR_SLOT md.slot[md.curr_slot]
61 #define O_pseudo_fixup (O_max + 1)
65 /* IA-64 ABI section pseudo-ops. */
66 SPECIAL_SECTION_BSS = 0,
68 SPECIAL_SECTION_SDATA,
69 SPECIAL_SECTION_RODATA,
70 SPECIAL_SECTION_COMMENT,
71 SPECIAL_SECTION_UNWIND,
72 SPECIAL_SECTION_UNWIND_INFO,
73 /* HPUX specific section pseudo-ops. */
74 SPECIAL_SECTION_INIT_ARRAY,
75 SPECIAL_SECTION_FINI_ARRAY,
92 FUNC_LT_FPTR_RELATIVE,
102 REG_FR = (REG_GR + 128),
103 REG_AR = (REG_FR + 128),
104 REG_CR = (REG_AR + 128),
105 REG_P = (REG_CR + 128),
106 REG_BR = (REG_P + 64),
107 REG_IP = (REG_BR + 8),
114 /* The following are pseudo-registers for use by gas only. */
126 /* The following pseudo-registers are used for unwind directives only: */
134 DYNREG_GR = 0, /* dynamic general purpose register */
135 DYNREG_FR, /* dynamic floating point register */
136 DYNREG_PR, /* dynamic predicate register */
140 enum operand_match_result
143 OPERAND_OUT_OF_RANGE,
147 /* On the ia64, we can't know the address of a text label until the
148 instructions are packed into a bundle. To handle this, we keep
149 track of the list of labels that appear in front of each
153 struct label_fix *next;
157 /* This is the endianness of the current section. */
158 extern int target_big_endian;
160 /* This is the default endianness. */
161 static int default_big_endian = TARGET_BYTES_BIG_ENDIAN;
163 void (*ia64_number_to_chars) PARAMS ((char *, valueT, int));
165 static void ia64_float_to_chars_bigendian
166 PARAMS ((char *, LITTLENUM_TYPE *, int));
167 static void ia64_float_to_chars_littleendian
168 PARAMS ((char *, LITTLENUM_TYPE *, int));
169 static void (*ia64_float_to_chars)
170 PARAMS ((char *, LITTLENUM_TYPE *, int));
172 static struct hash_control *alias_hash;
173 static struct hash_control *alias_name_hash;
174 static struct hash_control *secalias_hash;
175 static struct hash_control *secalias_name_hash;
177 /* Characters which always start a comment. */
178 const char comment_chars[] = "";
180 /* Characters which start a comment at the beginning of a line. */
181 const char line_comment_chars[] = "#";
183 /* Characters which may be used to separate multiple commands on a
185 const char line_separator_chars[] = ";";
187 /* Characters which are used to indicate an exponent in a floating
189 const char EXP_CHARS[] = "eE";
191 /* Characters which mean that a number is a floating point constant,
193 const char FLT_CHARS[] = "rRsSfFdDxXpP";
195 /* ia64-specific option processing: */
197 const char *md_shortopts = "m:N:x::";
199 struct option md_longopts[] =
201 #define OPTION_MCONSTANT_GP (OPTION_MD_BASE + 1)
202 {"mconstant-gp", no_argument, NULL, OPTION_MCONSTANT_GP},
203 #define OPTION_MAUTO_PIC (OPTION_MD_BASE + 2)
204 {"mauto-pic", no_argument, NULL, OPTION_MAUTO_PIC}
207 size_t md_longopts_size = sizeof (md_longopts);
211 struct hash_control *pseudo_hash; /* pseudo opcode hash table */
212 struct hash_control *reg_hash; /* register name hash table */
213 struct hash_control *dynreg_hash; /* dynamic register hash table */
214 struct hash_control *const_hash; /* constant hash table */
215 struct hash_control *entry_hash; /* code entry hint hash table */
217 symbolS *regsym[REG_NUM];
219 /* If X_op is != O_absent, the registername for the instruction's
220 qualifying predicate. If NULL, p0 is assumed for instructions
221 that are predicatable. */
228 explicit_mode : 1, /* which mode we're in */
229 default_explicit_mode : 1, /* which mode is the default */
230 mode_explicitly_set : 1, /* was the current mode explicitly set? */
232 keep_pending_output : 1;
234 /* Each bundle consists of up to three instructions. We keep
235 track of four most recent instructions so we can correctly set
236 the end_of_insn_group for the last instruction in a bundle. */
238 int num_slots_in_use;
242 end_of_insn_group : 1,
243 manual_bundling_on : 1,
244 manual_bundling_off : 1,
245 loc_directive_seen : 1;
246 signed char user_template; /* user-selected template, if any */
247 unsigned char qp_regno; /* qualifying predicate */
248 /* This duplicates a good fraction of "struct fix" but we
249 can't use a "struct fix" instead since we can't call
250 fix_new_exp() until we know the address of the instruction. */
254 bfd_reloc_code_real_type code;
255 enum ia64_opnd opnd; /* type of operand in need of fix */
256 unsigned int is_pcrel : 1; /* is operand pc-relative? */
257 expressionS expr; /* the value to be inserted */
259 fixup[2]; /* at most two fixups per insn */
260 struct ia64_opcode *idesc;
261 struct label_fix *label_fixups;
262 struct label_fix *tag_fixups;
263 struct unw_rec_list *unwind_record; /* Unwind directive. */
266 unsigned int src_line;
267 struct dwarf2_line_info debug_line;
275 struct dynreg *next; /* next dynamic register */
277 unsigned short base; /* the base register number */
278 unsigned short num_regs; /* # of registers in this set */
280 *dynreg[DYNREG_NUM_TYPES], in, loc, out, rot;
282 flagword flags; /* ELF-header flags */
285 unsigned hint:1; /* is this hint currently valid? */
286 bfd_vma offset; /* mem.offset offset */
287 bfd_vma base; /* mem.offset base */
290 int path; /* number of alt. entry points seen */
291 const char **entry_labels; /* labels of all alternate paths in
292 the current DV-checking block. */
293 int maxpaths; /* size currently allocated for
295 /* Support for hardware errata workarounds. */
297 /* Record data about the last three insn groups. */
300 /* B-step workaround.
301 For each predicate register, this is set if the corresponding insn
302 group conditionally sets this register with one of the affected
305 /* B-step workaround.
306 For each general register, this is set if the corresponding insn
307 a) is conditional one one of the predicate registers for which
308 P_REG_SET is 1 in the corresponding entry of the previous group,
309 b) sets this general register with one of the affected
311 int g_reg_set_conditionally[128];
315 int pointer_size; /* size in bytes of a pointer */
316 int pointer_size_shift; /* shift size of a pointer for alignment */
320 /* application registers: */
326 #define AR_BSPSTORE 18
341 {"ar.k0", 0}, {"ar.k1", 1}, {"ar.k2", 2}, {"ar.k3", 3},
342 {"ar.k4", 4}, {"ar.k5", 5}, {"ar.k6", 6}, {"ar.k7", 7},
343 {"ar.rsc", 16}, {"ar.bsp", 17},
344 {"ar.bspstore", 18}, {"ar.rnat", 19},
345 {"ar.fcr", 21}, {"ar.eflag", 24},
346 {"ar.csd", 25}, {"ar.ssd", 26},
347 {"ar.cflg", 27}, {"ar.fsr", 28},
348 {"ar.fir", 29}, {"ar.fdr", 30},
349 {"ar.ccv", 32}, {"ar.unat", 36},
350 {"ar.fpsr", 40}, {"ar.itc", 44},
351 {"ar.pfs", 64}, {"ar.lc", 65},
372 /* control registers: */
414 static const struct const_desc
421 /* PSR constant masks: */
424 {"psr.be", ((valueT) 1) << 1},
425 {"psr.up", ((valueT) 1) << 2},
426 {"psr.ac", ((valueT) 1) << 3},
427 {"psr.mfl", ((valueT) 1) << 4},
428 {"psr.mfh", ((valueT) 1) << 5},
430 {"psr.ic", ((valueT) 1) << 13},
431 {"psr.i", ((valueT) 1) << 14},
432 {"psr.pk", ((valueT) 1) << 15},
434 {"psr.dt", ((valueT) 1) << 17},
435 {"psr.dfl", ((valueT) 1) << 18},
436 {"psr.dfh", ((valueT) 1) << 19},
437 {"psr.sp", ((valueT) 1) << 20},
438 {"psr.pp", ((valueT) 1) << 21},
439 {"psr.di", ((valueT) 1) << 22},
440 {"psr.si", ((valueT) 1) << 23},
441 {"psr.db", ((valueT) 1) << 24},
442 {"psr.lp", ((valueT) 1) << 25},
443 {"psr.tb", ((valueT) 1) << 26},
444 {"psr.rt", ((valueT) 1) << 27},
445 /* 28-31: reserved */
446 /* 32-33: cpl (current privilege level) */
447 {"psr.is", ((valueT) 1) << 34},
448 {"psr.mc", ((valueT) 1) << 35},
449 {"psr.it", ((valueT) 1) << 36},
450 {"psr.id", ((valueT) 1) << 37},
451 {"psr.da", ((valueT) 1) << 38},
452 {"psr.dd", ((valueT) 1) << 39},
453 {"psr.ss", ((valueT) 1) << 40},
454 /* 41-42: ri (restart instruction) */
455 {"psr.ed", ((valueT) 1) << 43},
456 {"psr.bn", ((valueT) 1) << 44},
459 /* indirect register-sets/memory: */
468 { "CPUID", IND_CPUID },
469 { "cpuid", IND_CPUID },
481 /* Pseudo functions used to indicate relocation types (these functions
482 start with an at sign (@). */
504 /* reloc pseudo functions (these must come first!): */
505 { "dtpmod", PSEUDO_FUNC_RELOC, { 0 } },
506 { "dtprel", PSEUDO_FUNC_RELOC, { 0 } },
507 { "fptr", PSEUDO_FUNC_RELOC, { 0 } },
508 { "gprel", PSEUDO_FUNC_RELOC, { 0 } },
509 { "ltoff", PSEUDO_FUNC_RELOC, { 0 } },
510 { "ltoffx", PSEUDO_FUNC_RELOC, { 0 } },
511 { "pcrel", PSEUDO_FUNC_RELOC, { 0 } },
512 { "pltoff", PSEUDO_FUNC_RELOC, { 0 } },
513 { "secrel", PSEUDO_FUNC_RELOC, { 0 } },
514 { "segrel", PSEUDO_FUNC_RELOC, { 0 } },
515 { "tprel", PSEUDO_FUNC_RELOC, { 0 } },
516 { "ltv", PSEUDO_FUNC_RELOC, { 0 } },
517 { "", 0, { 0 } }, /* placeholder for FUNC_LT_FPTR_RELATIVE */
518 { "", 0, { 0 } }, /* placeholder for FUNC_LT_DTP_MODULE */
519 { "", 0, { 0 } }, /* placeholder for FUNC_LT_DTP_RELATIVE */
520 { "", 0, { 0 } }, /* placeholder for FUNC_LT_TP_RELATIVE */
521 { "iplt", PSEUDO_FUNC_RELOC, { 0 } },
523 /* mbtype4 constants: */
524 { "alt", PSEUDO_FUNC_CONST, { 0xa } },
525 { "brcst", PSEUDO_FUNC_CONST, { 0x0 } },
526 { "mix", PSEUDO_FUNC_CONST, { 0x8 } },
527 { "rev", PSEUDO_FUNC_CONST, { 0xb } },
528 { "shuf", PSEUDO_FUNC_CONST, { 0x9 } },
530 /* fclass constants: */
531 { "nat", PSEUDO_FUNC_CONST, { 0x100 } },
532 { "qnan", PSEUDO_FUNC_CONST, { 0x080 } },
533 { "snan", PSEUDO_FUNC_CONST, { 0x040 } },
534 { "pos", PSEUDO_FUNC_CONST, { 0x001 } },
535 { "neg", PSEUDO_FUNC_CONST, { 0x002 } },
536 { "zero", PSEUDO_FUNC_CONST, { 0x004 } },
537 { "unorm", PSEUDO_FUNC_CONST, { 0x008 } },
538 { "norm", PSEUDO_FUNC_CONST, { 0x010 } },
539 { "inf", PSEUDO_FUNC_CONST, { 0x020 } },
541 { "natval", PSEUDO_FUNC_CONST, { 0x100 } }, /* old usage */
543 /* hint constants: */
544 { "pause", PSEUDO_FUNC_CONST, { 0x0 } },
546 /* unwind-related constants: */
547 { "svr4", PSEUDO_FUNC_CONST, { ELFOSABI_NONE } },
548 { "hpux", PSEUDO_FUNC_CONST, { ELFOSABI_HPUX } },
549 { "nt", PSEUDO_FUNC_CONST, { 2 } }, /* conflicts w/ELFOSABI_NETBSD */
550 { "linux", PSEUDO_FUNC_CONST, { ELFOSABI_LINUX } },
551 { "freebsd", PSEUDO_FUNC_CONST, { ELFOSABI_FREEBSD } },
552 { "openvms", PSEUDO_FUNC_CONST, { ELFOSABI_OPENVMS } },
553 { "nsk", PSEUDO_FUNC_CONST, { ELFOSABI_NSK } },
555 /* unwind-related registers: */
556 { "priunat",PSEUDO_FUNC_REG, { REG_PRIUNAT } }
559 /* 41-bit nop opcodes (one per unit): */
560 static const bfd_vma nop[IA64_NUM_UNITS] =
562 0x0000000000LL, /* NIL => break 0 */
563 0x0008000000LL, /* I-unit nop */
564 0x0008000000LL, /* M-unit nop */
565 0x4000000000LL, /* B-unit nop */
566 0x0008000000LL, /* F-unit nop */
567 0x0008000000LL, /* L-"unit" nop */
568 0x0008000000LL, /* X-unit nop */
571 /* Can't be `const' as it's passed to input routines (which have the
572 habit of setting temporary sentinels. */
573 static char special_section_name[][20] =
575 {".bss"}, {".sbss"}, {".sdata"}, {".rodata"}, {".comment"},
576 {".IA_64.unwind"}, {".IA_64.unwind_info"},
577 {".init_array"}, {".fini_array"}
580 /* The best template for a particular sequence of up to three
582 #define N IA64_NUM_TYPES
583 static unsigned char best_template[N][N][N];
586 /* Resource dependencies currently in effect */
588 int depind; /* dependency index */
589 const struct ia64_dependency *dependency; /* actual dependency */
590 unsigned specific:1, /* is this a specific bit/regno? */
591 link_to_qp_branch:1; /* will a branch on the same QP clear it?*/
592 int index; /* specific regno/bit within dependency */
593 int note; /* optional qualifying note (0 if none) */
597 int insn_srlz; /* current insn serialization state */
598 int data_srlz; /* current data serialization state */
599 int qp_regno; /* qualifying predicate for this usage */
600 char *file; /* what file marked this dependency */
601 unsigned int line; /* what line marked this dependency */
602 struct mem_offset mem_offset; /* optional memory offset hint */
603 enum { CMP_NONE, CMP_OR, CMP_AND } cmp_type; /* OR or AND compare? */
604 int path; /* corresponding code entry index */
606 static int regdepslen = 0;
607 static int regdepstotlen = 0;
608 static const char *dv_mode[] = { "RAW", "WAW", "WAR" };
609 static const char *dv_sem[] = { "none", "implied", "impliedf",
610 "data", "instr", "specific", "stop", "other" };
611 static const char *dv_cmp_type[] = { "none", "OR", "AND" };
613 /* Current state of PR mutexation */
614 static struct qpmutex {
617 } *qp_mutexes = NULL; /* QP mutex bitmasks */
618 static int qp_mutexeslen = 0;
619 static int qp_mutexestotlen = 0;
620 static valueT qp_safe_across_calls = 0;
622 /* Current state of PR implications */
623 static struct qp_imply {
626 unsigned p2_branched:1;
628 } *qp_implies = NULL;
629 static int qp_implieslen = 0;
630 static int qp_impliestotlen = 0;
632 /* Keep track of static GR values so that indirect register usage can
633 sometimes be tracked. */
638 } gr_values[128] = {{ 1, 0, 0 }};
640 /* Remember the alignment frag. */
641 static fragS *align_frag;
643 /* These are the routines required to output the various types of
646 /* A slot_number is a frag address plus the slot index (0-2). We use the
647 frag address here so that if there is a section switch in the middle of
648 a function, then instructions emitted to a different section are not
649 counted. Since there may be more than one frag for a function, this
650 means we also need to keep track of which frag this address belongs to
651 so we can compute inter-frag distances. This also nicely solves the
652 problem with nops emitted for align directives, which can't easily be
653 counted, but can easily be derived from frag sizes. */
655 typedef struct unw_rec_list {
657 unsigned long slot_number;
659 unsigned long next_slot_number;
660 fragS *next_slot_frag;
661 struct unw_rec_list *next;
664 #define SLOT_NUM_NOT_SET (unsigned)-1
666 /* Linked list of saved prologue counts. A very poor
667 implementation of a map from label numbers to prologue counts. */
668 typedef struct label_prologue_count
670 struct label_prologue_count *next;
671 unsigned long label_number;
672 unsigned int prologue_count;
673 } label_prologue_count;
677 /* Maintain a list of unwind entries for the current function. */
681 /* Any unwind entires that should be attached to the current slot
682 that an insn is being constructed for. */
683 unw_rec_list *current_entry;
685 /* These are used to create the unwind table entry for this function. */
688 symbolS *info; /* pointer to unwind info */
689 symbolS *personality_routine;
691 subsegT saved_text_subseg;
692 unsigned int force_unwind_entry : 1; /* force generation of unwind entry? */
694 /* TRUE if processing unwind directives in a prologue region. */
697 unsigned int prologue_count; /* number of .prologues seen so far */
698 /* Prologue counts at previous .label_state directives. */
699 struct label_prologue_count * saved_prologue_counts;
702 /* The input value is a negated offset from psp, and specifies an address
703 psp - offset. The encoded value is psp + 16 - (4 * offset). Thus we
704 must add 16 and divide by 4 to get the encoded value. */
706 #define ENCODED_PSP_OFFSET(OFFSET) (((OFFSET) + 16) / 4)
708 typedef void (*vbyte_func) PARAMS ((int, char *, char *));
710 /* Forward declarations: */
711 static void set_section PARAMS ((char *name));
712 static unsigned int set_regstack PARAMS ((unsigned int, unsigned int,
713 unsigned int, unsigned int));
714 static void dot_align (int);
715 static void dot_radix PARAMS ((int));
716 static void dot_special_section PARAMS ((int));
717 static void dot_proc PARAMS ((int));
718 static void dot_fframe PARAMS ((int));
719 static void dot_vframe PARAMS ((int));
720 static void dot_vframesp PARAMS ((int));
721 static void dot_vframepsp PARAMS ((int));
722 static void dot_save PARAMS ((int));
723 static void dot_restore PARAMS ((int));
724 static void dot_restorereg PARAMS ((int));
725 static void dot_restorereg_p PARAMS ((int));
726 static void dot_handlerdata PARAMS ((int));
727 static void dot_unwentry PARAMS ((int));
728 static void dot_altrp PARAMS ((int));
729 static void dot_savemem PARAMS ((int));
730 static void dot_saveg PARAMS ((int));
731 static void dot_savef PARAMS ((int));
732 static void dot_saveb PARAMS ((int));
733 static void dot_savegf PARAMS ((int));
734 static void dot_spill PARAMS ((int));
735 static void dot_spillreg PARAMS ((int));
736 static void dot_spillmem PARAMS ((int));
737 static void dot_spillreg_p PARAMS ((int));
738 static void dot_spillmem_p PARAMS ((int));
739 static void dot_label_state PARAMS ((int));
740 static void dot_copy_state PARAMS ((int));
741 static void dot_unwabi PARAMS ((int));
742 static void dot_personality PARAMS ((int));
743 static void dot_body PARAMS ((int));
744 static void dot_prologue PARAMS ((int));
745 static void dot_endp PARAMS ((int));
746 static void dot_template PARAMS ((int));
747 static void dot_regstk PARAMS ((int));
748 static void dot_rot PARAMS ((int));
749 static void dot_byteorder PARAMS ((int));
750 static void dot_psr PARAMS ((int));
751 static void dot_alias PARAMS ((int));
752 static void dot_ln PARAMS ((int));
753 static char *parse_section_name PARAMS ((void));
754 static void dot_xdata PARAMS ((int));
755 static void stmt_float_cons PARAMS ((int));
756 static void stmt_cons_ua PARAMS ((int));
757 static void dot_xfloat_cons PARAMS ((int));
758 static void dot_xstringer PARAMS ((int));
759 static void dot_xdata_ua PARAMS ((int));
760 static void dot_xfloat_cons_ua PARAMS ((int));
761 static void print_prmask PARAMS ((valueT mask));
762 static void dot_pred_rel PARAMS ((int));
763 static void dot_reg_val PARAMS ((int));
764 static void dot_serialize PARAMS ((int));
765 static void dot_dv_mode PARAMS ((int));
766 static void dot_entry PARAMS ((int));
767 static void dot_mem_offset PARAMS ((int));
768 static void add_unwind_entry PARAMS((unw_rec_list *ptr));
769 static symbolS *declare_register PARAMS ((const char *name, int regnum));
770 static void declare_register_set PARAMS ((const char *, int, int));
771 static unsigned int operand_width PARAMS ((enum ia64_opnd));
772 static enum operand_match_result operand_match PARAMS ((const struct ia64_opcode *idesc,
775 static int parse_operand PARAMS ((expressionS *e));
776 static struct ia64_opcode * parse_operands PARAMS ((struct ia64_opcode *));
777 static int errata_nop_necessary_p PARAMS ((struct slot *, enum ia64_unit));
778 static void build_insn PARAMS ((struct slot *, bfd_vma *));
779 static void emit_one_bundle PARAMS ((void));
780 static void fix_insn PARAMS ((fixS *, const struct ia64_operand *, valueT));
781 static bfd_reloc_code_real_type ia64_gen_real_reloc_type PARAMS ((struct symbol *sym,
782 bfd_reloc_code_real_type r_type));
783 static void insn_group_break PARAMS ((int, int, int));
784 static void mark_resource PARAMS ((struct ia64_opcode *, const struct ia64_dependency *,
785 struct rsrc *, int depind, int path));
786 static void add_qp_mutex PARAMS((valueT mask));
787 static void add_qp_imply PARAMS((int p1, int p2));
788 static void clear_qp_branch_flag PARAMS((valueT mask));
789 static void clear_qp_mutex PARAMS((valueT mask));
790 static void clear_qp_implies PARAMS((valueT p1_mask, valueT p2_mask));
791 static int has_suffix_p PARAMS((const char *, const char *));
792 static void clear_register_values PARAMS ((void));
793 static void print_dependency PARAMS ((const char *action, int depind));
794 static void instruction_serialization PARAMS ((void));
795 static void data_serialization PARAMS ((void));
796 static void remove_marked_resource PARAMS ((struct rsrc *));
797 static int is_conditional_branch PARAMS ((struct ia64_opcode *));
798 static int is_taken_branch PARAMS ((struct ia64_opcode *));
799 static int is_interruption_or_rfi PARAMS ((struct ia64_opcode *));
800 static int depends_on PARAMS ((int, struct ia64_opcode *));
801 static int specify_resource PARAMS ((const struct ia64_dependency *,
802 struct ia64_opcode *, int, struct rsrc [], int, int));
803 static int check_dv PARAMS((struct ia64_opcode *idesc));
804 static void check_dependencies PARAMS((struct ia64_opcode *));
805 static void mark_resources PARAMS((struct ia64_opcode *));
806 static void update_dependencies PARAMS((struct ia64_opcode *));
807 static void note_register_values PARAMS((struct ia64_opcode *));
808 static int qp_mutex PARAMS ((int, int, int));
809 static int resources_match PARAMS ((struct rsrc *, struct ia64_opcode *, int, int, int));
810 static void output_vbyte_mem PARAMS ((int, char *, char *));
811 static void count_output PARAMS ((int, char *, char *));
812 static void output_R1_format PARAMS ((vbyte_func, unw_record_type, int));
813 static void output_R2_format PARAMS ((vbyte_func, int, int, unsigned long));
814 static void output_R3_format PARAMS ((vbyte_func, unw_record_type, unsigned long));
815 static void output_P1_format PARAMS ((vbyte_func, int));
816 static void output_P2_format PARAMS ((vbyte_func, int, int));
817 static void output_P3_format PARAMS ((vbyte_func, unw_record_type, int));
818 static void output_P4_format PARAMS ((vbyte_func, unsigned char *, unsigned long));
819 static void output_P5_format PARAMS ((vbyte_func, int, unsigned long));
820 static void output_P6_format PARAMS ((vbyte_func, unw_record_type, int));
821 static void output_P7_format PARAMS ((vbyte_func, unw_record_type, unsigned long, unsigned long));
822 static void output_P8_format PARAMS ((vbyte_func, unw_record_type, unsigned long));
823 static void output_P9_format PARAMS ((vbyte_func, int, int));
824 static void output_P10_format PARAMS ((vbyte_func, int, int));
825 static void output_B1_format PARAMS ((vbyte_func, unw_record_type, unsigned long));
826 static void output_B2_format PARAMS ((vbyte_func, unsigned long, unsigned long));
827 static void output_B3_format PARAMS ((vbyte_func, unsigned long, unsigned long));
828 static void output_B4_format PARAMS ((vbyte_func, unw_record_type, unsigned long));
829 static char format_ab_reg PARAMS ((int, int));
830 static void output_X1_format PARAMS ((vbyte_func, unw_record_type, int, int, unsigned long,
832 static void output_X2_format PARAMS ((vbyte_func, int, int, int, int, int, unsigned long));
833 static void output_X3_format PARAMS ((vbyte_func, unw_record_type, int, int, int, unsigned long,
835 static void output_X4_format PARAMS ((vbyte_func, int, int, int, int, int, int, unsigned long));
836 static unw_rec_list *output_endp PARAMS ((void));
837 static unw_rec_list *output_prologue PARAMS ((void));
838 static unw_rec_list *output_prologue_gr PARAMS ((unsigned int, unsigned int));
839 static unw_rec_list *output_body PARAMS ((void));
840 static unw_rec_list *output_mem_stack_f PARAMS ((unsigned int));
841 static unw_rec_list *output_mem_stack_v PARAMS ((void));
842 static unw_rec_list *output_psp_gr PARAMS ((unsigned int));
843 static unw_rec_list *output_psp_sprel PARAMS ((unsigned int));
844 static unw_rec_list *output_rp_when PARAMS ((void));
845 static unw_rec_list *output_rp_gr PARAMS ((unsigned int));
846 static unw_rec_list *output_rp_br PARAMS ((unsigned int));
847 static unw_rec_list *output_rp_psprel PARAMS ((unsigned int));
848 static unw_rec_list *output_rp_sprel PARAMS ((unsigned int));
849 static unw_rec_list *output_pfs_when PARAMS ((void));
850 static unw_rec_list *output_pfs_gr PARAMS ((unsigned int));
851 static unw_rec_list *output_pfs_psprel PARAMS ((unsigned int));
852 static unw_rec_list *output_pfs_sprel PARAMS ((unsigned int));
853 static unw_rec_list *output_preds_when PARAMS ((void));
854 static unw_rec_list *output_preds_gr PARAMS ((unsigned int));
855 static unw_rec_list *output_preds_psprel PARAMS ((unsigned int));
856 static unw_rec_list *output_preds_sprel PARAMS ((unsigned int));
857 static unw_rec_list *output_fr_mem PARAMS ((unsigned int));
858 static unw_rec_list *output_frgr_mem PARAMS ((unsigned int, unsigned int));
859 static unw_rec_list *output_gr_gr PARAMS ((unsigned int, unsigned int));
860 static unw_rec_list *output_gr_mem PARAMS ((unsigned int));
861 static unw_rec_list *output_br_mem PARAMS ((unsigned int));
862 static unw_rec_list *output_br_gr PARAMS ((unsigned int, unsigned int));
863 static unw_rec_list *output_spill_base PARAMS ((unsigned int));
864 static unw_rec_list *output_unat_when PARAMS ((void));
865 static unw_rec_list *output_unat_gr PARAMS ((unsigned int));
866 static unw_rec_list *output_unat_psprel PARAMS ((unsigned int));
867 static unw_rec_list *output_unat_sprel PARAMS ((unsigned int));
868 static unw_rec_list *output_lc_when PARAMS ((void));
869 static unw_rec_list *output_lc_gr PARAMS ((unsigned int));
870 static unw_rec_list *output_lc_psprel PARAMS ((unsigned int));
871 static unw_rec_list *output_lc_sprel PARAMS ((unsigned int));
872 static unw_rec_list *output_fpsr_when PARAMS ((void));
873 static unw_rec_list *output_fpsr_gr PARAMS ((unsigned int));
874 static unw_rec_list *output_fpsr_psprel PARAMS ((unsigned int));
875 static unw_rec_list *output_fpsr_sprel PARAMS ((unsigned int));
876 static unw_rec_list *output_priunat_when_gr PARAMS ((void));
877 static unw_rec_list *output_priunat_when_mem PARAMS ((void));
878 static unw_rec_list *output_priunat_gr PARAMS ((unsigned int));
879 static unw_rec_list *output_priunat_psprel PARAMS ((unsigned int));
880 static unw_rec_list *output_priunat_sprel PARAMS ((unsigned int));
881 static unw_rec_list *output_bsp_when PARAMS ((void));
882 static unw_rec_list *output_bsp_gr PARAMS ((unsigned int));
883 static unw_rec_list *output_bsp_psprel PARAMS ((unsigned int));
884 static unw_rec_list *output_bsp_sprel PARAMS ((unsigned int));
885 static unw_rec_list *output_bspstore_when PARAMS ((void));
886 static unw_rec_list *output_bspstore_gr PARAMS ((unsigned int));
887 static unw_rec_list *output_bspstore_psprel PARAMS ((unsigned int));
888 static unw_rec_list *output_bspstore_sprel PARAMS ((unsigned int));
889 static unw_rec_list *output_rnat_when PARAMS ((void));
890 static unw_rec_list *output_rnat_gr PARAMS ((unsigned int));
891 static unw_rec_list *output_rnat_psprel PARAMS ((unsigned int));
892 static unw_rec_list *output_rnat_sprel PARAMS ((unsigned int));
893 static unw_rec_list *output_unwabi PARAMS ((unsigned long, unsigned long));
894 static unw_rec_list *output_epilogue PARAMS ((unsigned long));
895 static unw_rec_list *output_label_state PARAMS ((unsigned long));
896 static unw_rec_list *output_copy_state PARAMS ((unsigned long));
897 static unw_rec_list *output_spill_psprel PARAMS ((unsigned int, unsigned int, unsigned int));
898 static unw_rec_list *output_spill_sprel PARAMS ((unsigned int, unsigned int, unsigned int));
899 static unw_rec_list *output_spill_psprel_p PARAMS ((unsigned int, unsigned int, unsigned int,
901 static unw_rec_list *output_spill_sprel_p PARAMS ((unsigned int, unsigned int, unsigned int,
903 static unw_rec_list *output_spill_reg PARAMS ((unsigned int, unsigned int, unsigned int,
905 static unw_rec_list *output_spill_reg_p PARAMS ((unsigned int, unsigned int, unsigned int,
906 unsigned int, unsigned int));
907 static void process_one_record PARAMS ((unw_rec_list *, vbyte_func));
908 static void process_unw_records PARAMS ((unw_rec_list *, vbyte_func));
909 static int calc_record_size PARAMS ((unw_rec_list *));
910 static void set_imask PARAMS ((unw_rec_list *, unsigned long, unsigned long, unsigned int));
911 static unsigned long slot_index PARAMS ((unsigned long, fragS *,
912 unsigned long, fragS *,
914 static unw_rec_list *optimize_unw_records PARAMS ((unw_rec_list *));
915 static void fixup_unw_records PARAMS ((unw_rec_list *, int));
916 static int convert_expr_to_ab_reg PARAMS ((expressionS *, unsigned int *, unsigned int *));
917 static int convert_expr_to_xy_reg PARAMS ((expressionS *, unsigned int *, unsigned int *));
918 static unsigned int get_saved_prologue_count PARAMS ((unsigned long));
919 static void save_prologue_count PARAMS ((unsigned long, unsigned int));
920 static void free_saved_prologue_counts PARAMS ((void));
922 /* Determine if application register REGNUM resides only in the integer
923 unit (as opposed to the memory unit). */
925 ar_is_only_in_integer_unit (int reg)
928 return reg >= 64 && reg <= 111;
931 /* Determine if application register REGNUM resides only in the memory
932 unit (as opposed to the integer unit). */
934 ar_is_only_in_memory_unit (int reg)
937 return reg >= 0 && reg <= 47;
940 /* Switch to section NAME and create section if necessary. It's
941 rather ugly that we have to manipulate input_line_pointer but I
942 don't see any other way to accomplish the same thing without
943 changing obj-elf.c (which may be the Right Thing, in the end). */
948 char *saved_input_line_pointer;
950 saved_input_line_pointer = input_line_pointer;
951 input_line_pointer = name;
953 input_line_pointer = saved_input_line_pointer;
956 /* Map 's' to SHF_IA_64_SHORT. */
959 ia64_elf_section_letter (letter, ptr_msg)
964 return SHF_IA_64_SHORT;
965 else if (letter == 'o')
966 return SHF_LINK_ORDER;
968 *ptr_msg = _("Bad .section directive: want a,o,s,w,x,M,S,G,T in string");
972 /* Map SHF_IA_64_SHORT to SEC_SMALL_DATA. */
975 ia64_elf_section_flags (flags, attr, type)
977 int attr, type ATTRIBUTE_UNUSED;
979 if (attr & SHF_IA_64_SHORT)
980 flags |= SEC_SMALL_DATA;
985 ia64_elf_section_type (str, len)
989 #define STREQ(s) ((len == sizeof (s) - 1) && (strncmp (str, s, sizeof (s) - 1) == 0))
991 if (STREQ (ELF_STRING_ia64_unwind_info))
994 if (STREQ (ELF_STRING_ia64_unwind_info_once))
997 if (STREQ (ELF_STRING_ia64_unwind))
998 return SHT_IA_64_UNWIND;
1000 if (STREQ (ELF_STRING_ia64_unwind_once))
1001 return SHT_IA_64_UNWIND;
1003 if (STREQ ("unwind"))
1004 return SHT_IA_64_UNWIND;
1011 set_regstack (ins, locs, outs, rots)
1012 unsigned int ins, locs, outs, rots;
1014 /* Size of frame. */
1017 sof = ins + locs + outs;
1020 as_bad ("Size of frame exceeds maximum of 96 registers");
1025 as_warn ("Size of rotating registers exceeds frame size");
1028 md.in.base = REG_GR + 32;
1029 md.loc.base = md.in.base + ins;
1030 md.out.base = md.loc.base + locs;
1032 md.in.num_regs = ins;
1033 md.loc.num_regs = locs;
1034 md.out.num_regs = outs;
1035 md.rot.num_regs = rots;
1042 struct label_fix *lfix;
1044 subsegT saved_subseg;
1047 if (!md.last_text_seg)
1050 saved_seg = now_seg;
1051 saved_subseg = now_subseg;
1053 subseg_set (md.last_text_seg, 0);
1055 while (md.num_slots_in_use > 0)
1056 emit_one_bundle (); /* force out queued instructions */
1058 /* In case there are labels following the last instruction, resolve
1060 for (lfix = CURR_SLOT.label_fixups; lfix; lfix = lfix->next)
1062 S_SET_VALUE (lfix->sym, frag_now_fix ());
1063 symbol_set_frag (lfix->sym, frag_now);
1065 CURR_SLOT.label_fixups = 0;
1066 for (lfix = CURR_SLOT.tag_fixups; lfix; lfix = lfix->next)
1068 S_SET_VALUE (lfix->sym, frag_now_fix ());
1069 symbol_set_frag (lfix->sym, frag_now);
1071 CURR_SLOT.tag_fixups = 0;
1073 /* In case there are unwind directives following the last instruction,
1074 resolve those now. We only handle prologue, body, and endp directives
1075 here. Give an error for others. */
1076 for (ptr = unwind.current_entry; ptr; ptr = ptr->next)
1078 switch (ptr->r.type)
1084 ptr->slot_number = (unsigned long) frag_more (0);
1085 ptr->slot_frag = frag_now;
1088 /* Allow any record which doesn't have a "t" field (i.e.,
1089 doesn't relate to a particular instruction). */
1105 as_bad (_("Unwind directive not followed by an instruction."));
1109 unwind.current_entry = NULL;
1111 subseg_set (saved_seg, saved_subseg);
1113 if (md.qp.X_op == O_register)
1114 as_bad ("qualifying predicate not followed by instruction");
1118 ia64_do_align (int nbytes)
1120 char *saved_input_line_pointer = input_line_pointer;
1122 input_line_pointer = "";
1123 s_align_bytes (nbytes);
1124 input_line_pointer = saved_input_line_pointer;
1128 ia64_cons_align (nbytes)
1133 char *saved_input_line_pointer = input_line_pointer;
1134 input_line_pointer = "";
1135 s_align_bytes (nbytes);
1136 input_line_pointer = saved_input_line_pointer;
1140 /* Output COUNT bytes to a memory location. */
1141 static unsigned char *vbyte_mem_ptr = NULL;
1144 output_vbyte_mem (count, ptr, comment)
1147 char *comment ATTRIBUTE_UNUSED;
1150 if (vbyte_mem_ptr == NULL)
1155 for (x = 0; x < count; x++)
1156 *(vbyte_mem_ptr++) = ptr[x];
1159 /* Count the number of bytes required for records. */
1160 static int vbyte_count = 0;
1162 count_output (count, ptr, comment)
1164 char *ptr ATTRIBUTE_UNUSED;
1165 char *comment ATTRIBUTE_UNUSED;
1167 vbyte_count += count;
1171 output_R1_format (f, rtype, rlen)
1173 unw_record_type rtype;
1180 output_R3_format (f, rtype, rlen);
1186 else if (rtype != prologue)
1187 as_bad ("record type is not valid");
1189 byte = UNW_R1 | (r << 5) | (rlen & 0x1f);
1190 (*f) (1, &byte, NULL);
1194 output_R2_format (f, mask, grsave, rlen)
1201 mask = (mask & 0x0f);
1202 grsave = (grsave & 0x7f);
1204 bytes[0] = (UNW_R2 | (mask >> 1));
1205 bytes[1] = (((mask & 0x01) << 7) | grsave);
1206 count += output_leb128 (bytes + 2, rlen, 0);
1207 (*f) (count, bytes, NULL);
1211 output_R3_format (f, rtype, rlen)
1213 unw_record_type rtype;
1220 output_R1_format (f, rtype, rlen);
1226 else if (rtype != prologue)
1227 as_bad ("record type is not valid");
1228 bytes[0] = (UNW_R3 | r);
1229 count = output_leb128 (bytes + 1, rlen, 0);
1230 (*f) (count + 1, bytes, NULL);
1234 output_P1_format (f, brmask)
1239 byte = UNW_P1 | (brmask & 0x1f);
1240 (*f) (1, &byte, NULL);
1244 output_P2_format (f, brmask, gr)
1250 brmask = (brmask & 0x1f);
1251 bytes[0] = UNW_P2 | (brmask >> 1);
1252 bytes[1] = (((brmask & 1) << 7) | gr);
1253 (*f) (2, bytes, NULL);
1257 output_P3_format (f, rtype, reg)
1259 unw_record_type rtype;
1304 as_bad ("Invalid record type for P3 format.");
1306 bytes[0] = (UNW_P3 | (r >> 1));
1307 bytes[1] = (((r & 1) << 7) | reg);
1308 (*f) (2, bytes, NULL);
1312 output_P4_format (f, imask, imask_size)
1314 unsigned char *imask;
1315 unsigned long imask_size;
1318 (*f) (imask_size, imask, NULL);
1322 output_P5_format (f, grmask, frmask)
1325 unsigned long frmask;
1328 grmask = (grmask & 0x0f);
1331 bytes[1] = ((grmask << 4) | ((frmask & 0x000f0000) >> 16));
1332 bytes[2] = ((frmask & 0x0000ff00) >> 8);
1333 bytes[3] = (frmask & 0x000000ff);
1334 (*f) (4, bytes, NULL);
1338 output_P6_format (f, rtype, rmask)
1340 unw_record_type rtype;
1346 if (rtype == gr_mem)
1348 else if (rtype != fr_mem)
1349 as_bad ("Invalid record type for format P6");
1350 byte = (UNW_P6 | (r << 4) | (rmask & 0x0f));
1351 (*f) (1, &byte, NULL);
1355 output_P7_format (f, rtype, w1, w2)
1357 unw_record_type rtype;
1364 count += output_leb128 (bytes + 1, w1, 0);
1369 count += output_leb128 (bytes + count, w2 >> 4, 0);
1419 bytes[0] = (UNW_P7 | r);
1420 (*f) (count, bytes, NULL);
1424 output_P8_format (f, rtype, t)
1426 unw_record_type rtype;
1465 case bspstore_psprel:
1468 case bspstore_sprel:
1480 case priunat_when_gr:
1483 case priunat_psprel:
1489 case priunat_when_mem:
1496 count += output_leb128 (bytes + 2, t, 0);
1497 (*f) (count, bytes, NULL);
1501 output_P9_format (f, grmask, gr)
1508 bytes[1] = (grmask & 0x0f);
1509 bytes[2] = (gr & 0x7f);
1510 (*f) (3, bytes, NULL);
1514 output_P10_format (f, abi, context)
1521 bytes[1] = (abi & 0xff);
1522 bytes[2] = (context & 0xff);
1523 (*f) (3, bytes, NULL);
1527 output_B1_format (f, rtype, label)
1529 unw_record_type rtype;
1530 unsigned long label;
1536 output_B4_format (f, rtype, label);
1539 if (rtype == copy_state)
1541 else if (rtype != label_state)
1542 as_bad ("Invalid record type for format B1");
1544 byte = (UNW_B1 | (r << 5) | (label & 0x1f));
1545 (*f) (1, &byte, NULL);
1549 output_B2_format (f, ecount, t)
1551 unsigned long ecount;
1558 output_B3_format (f, ecount, t);
1561 bytes[0] = (UNW_B2 | (ecount & 0x1f));
1562 count += output_leb128 (bytes + 1, t, 0);
1563 (*f) (count, bytes, NULL);
1567 output_B3_format (f, ecount, t)
1569 unsigned long ecount;
1576 output_B2_format (f, ecount, t);
1580 count += output_leb128 (bytes + 1, t, 0);
1581 count += output_leb128 (bytes + count, ecount, 0);
1582 (*f) (count, bytes, NULL);
1586 output_B4_format (f, rtype, label)
1588 unw_record_type rtype;
1589 unsigned long label;
1596 output_B1_format (f, rtype, label);
1600 if (rtype == copy_state)
1602 else if (rtype != label_state)
1603 as_bad ("Invalid record type for format B1");
1605 bytes[0] = (UNW_B4 | (r << 3));
1606 count += output_leb128 (bytes + 1, label, 0);
1607 (*f) (count, bytes, NULL);
1611 format_ab_reg (ab, reg)
1618 ret = (ab << 5) | reg;
1623 output_X1_format (f, rtype, ab, reg, t, w1)
1625 unw_record_type rtype;
1635 if (rtype == spill_sprel)
1637 else if (rtype != spill_psprel)
1638 as_bad ("Invalid record type for format X1");
1639 bytes[1] = ((r << 7) | format_ab_reg (ab, reg));
1640 count += output_leb128 (bytes + 2, t, 0);
1641 count += output_leb128 (bytes + count, w1, 0);
1642 (*f) (count, bytes, NULL);
1646 output_X2_format (f, ab, reg, x, y, treg, t)
1655 bytes[1] = (((x & 1) << 7) | format_ab_reg (ab, reg));
1656 bytes[2] = (((y & 1) << 7) | (treg & 0x7f));
1657 count += output_leb128 (bytes + 3, t, 0);
1658 (*f) (count, bytes, NULL);
1662 output_X3_format (f, rtype, qp, ab, reg, t, w1)
1664 unw_record_type rtype;
1675 if (rtype == spill_sprel_p)
1677 else if (rtype != spill_psprel_p)
1678 as_bad ("Invalid record type for format X3");
1679 bytes[1] = ((r << 7) | (qp & 0x3f));
1680 bytes[2] = format_ab_reg (ab, reg);
1681 count += output_leb128 (bytes + 3, t, 0);
1682 count += output_leb128 (bytes + count, w1, 0);
1683 (*f) (count, bytes, NULL);
1687 output_X4_format (f, qp, ab, reg, x, y, treg, t)
1697 bytes[1] = (qp & 0x3f);
1698 bytes[2] = (((x & 1) << 7) | format_ab_reg (ab, reg));
1699 bytes[3] = (((y & 1) << 7) | (treg & 0x7f));
1700 count += output_leb128 (bytes + 4, t, 0);
1701 (*f) (count, bytes, NULL);
1704 /* This function allocates a record list structure, and initializes fields. */
1706 static unw_rec_list *
1707 alloc_record (unw_record_type t)
1710 ptr = xmalloc (sizeof (*ptr));
1712 ptr->slot_number = SLOT_NUM_NOT_SET;
1714 ptr->next_slot_number = 0;
1715 ptr->next_slot_frag = 0;
1719 /* Dummy unwind record used for calculating the length of the last prologue or
1722 static unw_rec_list *
1725 unw_rec_list *ptr = alloc_record (endp);
1729 static unw_rec_list *
1732 unw_rec_list *ptr = alloc_record (prologue);
1733 memset (&ptr->r.record.r.mask, 0, sizeof (ptr->r.record.r.mask));
1737 static unw_rec_list *
1738 output_prologue_gr (saved_mask, reg)
1739 unsigned int saved_mask;
1742 unw_rec_list *ptr = alloc_record (prologue_gr);
1743 memset (&ptr->r.record.r.mask, 0, sizeof (ptr->r.record.r.mask));
1744 ptr->r.record.r.grmask = saved_mask;
1745 ptr->r.record.r.grsave = reg;
1749 static unw_rec_list *
1752 unw_rec_list *ptr = alloc_record (body);
1756 static unw_rec_list *
1757 output_mem_stack_f (size)
1760 unw_rec_list *ptr = alloc_record (mem_stack_f);
1761 ptr->r.record.p.size = size;
1765 static unw_rec_list *
1766 output_mem_stack_v ()
1768 unw_rec_list *ptr = alloc_record (mem_stack_v);
1772 static unw_rec_list *
1776 unw_rec_list *ptr = alloc_record (psp_gr);
1777 ptr->r.record.p.gr = gr;
1781 static unw_rec_list *
1782 output_psp_sprel (offset)
1783 unsigned int offset;
1785 unw_rec_list *ptr = alloc_record (psp_sprel);
1786 ptr->r.record.p.spoff = offset / 4;
1790 static unw_rec_list *
1793 unw_rec_list *ptr = alloc_record (rp_when);
1797 static unw_rec_list *
1801 unw_rec_list *ptr = alloc_record (rp_gr);
1802 ptr->r.record.p.gr = gr;
1806 static unw_rec_list *
1810 unw_rec_list *ptr = alloc_record (rp_br);
1811 ptr->r.record.p.br = br;
1815 static unw_rec_list *
1816 output_rp_psprel (offset)
1817 unsigned int offset;
1819 unw_rec_list *ptr = alloc_record (rp_psprel);
1820 ptr->r.record.p.pspoff = ENCODED_PSP_OFFSET (offset);
1824 static unw_rec_list *
1825 output_rp_sprel (offset)
1826 unsigned int offset;
1828 unw_rec_list *ptr = alloc_record (rp_sprel);
1829 ptr->r.record.p.spoff = offset / 4;
1833 static unw_rec_list *
1836 unw_rec_list *ptr = alloc_record (pfs_when);
1840 static unw_rec_list *
1844 unw_rec_list *ptr = alloc_record (pfs_gr);
1845 ptr->r.record.p.gr = gr;
1849 static unw_rec_list *
1850 output_pfs_psprel (offset)
1851 unsigned int offset;
1853 unw_rec_list *ptr = alloc_record (pfs_psprel);
1854 ptr->r.record.p.pspoff = ENCODED_PSP_OFFSET (offset);
1858 static unw_rec_list *
1859 output_pfs_sprel (offset)
1860 unsigned int offset;
1862 unw_rec_list *ptr = alloc_record (pfs_sprel);
1863 ptr->r.record.p.spoff = offset / 4;
1867 static unw_rec_list *
1868 output_preds_when ()
1870 unw_rec_list *ptr = alloc_record (preds_when);
1874 static unw_rec_list *
1875 output_preds_gr (gr)
1878 unw_rec_list *ptr = alloc_record (preds_gr);
1879 ptr->r.record.p.gr = gr;
1883 static unw_rec_list *
1884 output_preds_psprel (offset)
1885 unsigned int offset;
1887 unw_rec_list *ptr = alloc_record (preds_psprel);
1888 ptr->r.record.p.pspoff = ENCODED_PSP_OFFSET (offset);
1892 static unw_rec_list *
1893 output_preds_sprel (offset)
1894 unsigned int offset;
1896 unw_rec_list *ptr = alloc_record (preds_sprel);
1897 ptr->r.record.p.spoff = offset / 4;
1901 static unw_rec_list *
1902 output_fr_mem (mask)
1905 unw_rec_list *ptr = alloc_record (fr_mem);
1906 ptr->r.record.p.rmask = mask;
1910 static unw_rec_list *
1911 output_frgr_mem (gr_mask, fr_mask)
1912 unsigned int gr_mask;
1913 unsigned int fr_mask;
1915 unw_rec_list *ptr = alloc_record (frgr_mem);
1916 ptr->r.record.p.grmask = gr_mask;
1917 ptr->r.record.p.frmask = fr_mask;
1921 static unw_rec_list *
1922 output_gr_gr (mask, reg)
1926 unw_rec_list *ptr = alloc_record (gr_gr);
1927 ptr->r.record.p.grmask = mask;
1928 ptr->r.record.p.gr = reg;
1932 static unw_rec_list *
1933 output_gr_mem (mask)
1936 unw_rec_list *ptr = alloc_record (gr_mem);
1937 ptr->r.record.p.rmask = mask;
1941 static unw_rec_list *
1942 output_br_mem (unsigned int mask)
1944 unw_rec_list *ptr = alloc_record (br_mem);
1945 ptr->r.record.p.brmask = mask;
1949 static unw_rec_list *
1950 output_br_gr (save_mask, reg)
1951 unsigned int save_mask;
1954 unw_rec_list *ptr = alloc_record (br_gr);
1955 ptr->r.record.p.brmask = save_mask;
1956 ptr->r.record.p.gr = reg;
1960 static unw_rec_list *
1961 output_spill_base (offset)
1962 unsigned int offset;
1964 unw_rec_list *ptr = alloc_record (spill_base);
1965 ptr->r.record.p.pspoff = ENCODED_PSP_OFFSET (offset);
1969 static unw_rec_list *
1972 unw_rec_list *ptr = alloc_record (unat_when);
1976 static unw_rec_list *
1980 unw_rec_list *ptr = alloc_record (unat_gr);
1981 ptr->r.record.p.gr = gr;
1985 static unw_rec_list *
1986 output_unat_psprel (offset)
1987 unsigned int offset;
1989 unw_rec_list *ptr = alloc_record (unat_psprel);
1990 ptr->r.record.p.pspoff = ENCODED_PSP_OFFSET (offset);
1994 static unw_rec_list *
1995 output_unat_sprel (offset)
1996 unsigned int offset;
1998 unw_rec_list *ptr = alloc_record (unat_sprel);
1999 ptr->r.record.p.spoff = offset / 4;
2003 static unw_rec_list *
2006 unw_rec_list *ptr = alloc_record (lc_when);
2010 static unw_rec_list *
2014 unw_rec_list *ptr = alloc_record (lc_gr);
2015 ptr->r.record.p.gr = gr;
2019 static unw_rec_list *
2020 output_lc_psprel (offset)
2021 unsigned int offset;
2023 unw_rec_list *ptr = alloc_record (lc_psprel);
2024 ptr->r.record.p.pspoff = ENCODED_PSP_OFFSET (offset);
2028 static unw_rec_list *
2029 output_lc_sprel (offset)
2030 unsigned int offset;
2032 unw_rec_list *ptr = alloc_record (lc_sprel);
2033 ptr->r.record.p.spoff = offset / 4;
2037 static unw_rec_list *
2040 unw_rec_list *ptr = alloc_record (fpsr_when);
2044 static unw_rec_list *
2048 unw_rec_list *ptr = alloc_record (fpsr_gr);
2049 ptr->r.record.p.gr = gr;
2053 static unw_rec_list *
2054 output_fpsr_psprel (offset)
2055 unsigned int offset;
2057 unw_rec_list *ptr = alloc_record (fpsr_psprel);
2058 ptr->r.record.p.pspoff = ENCODED_PSP_OFFSET (offset);
2062 static unw_rec_list *
2063 output_fpsr_sprel (offset)
2064 unsigned int offset;
2066 unw_rec_list *ptr = alloc_record (fpsr_sprel);
2067 ptr->r.record.p.spoff = offset / 4;
2071 static unw_rec_list *
2072 output_priunat_when_gr ()
2074 unw_rec_list *ptr = alloc_record (priunat_when_gr);
2078 static unw_rec_list *
2079 output_priunat_when_mem ()
2081 unw_rec_list *ptr = alloc_record (priunat_when_mem);
2085 static unw_rec_list *
2086 output_priunat_gr (gr)
2089 unw_rec_list *ptr = alloc_record (priunat_gr);
2090 ptr->r.record.p.gr = gr;
2094 static unw_rec_list *
2095 output_priunat_psprel (offset)
2096 unsigned int offset;
2098 unw_rec_list *ptr = alloc_record (priunat_psprel);
2099 ptr->r.record.p.pspoff = ENCODED_PSP_OFFSET (offset);
2103 static unw_rec_list *
2104 output_priunat_sprel (offset)
2105 unsigned int offset;
2107 unw_rec_list *ptr = alloc_record (priunat_sprel);
2108 ptr->r.record.p.spoff = offset / 4;
2112 static unw_rec_list *
2115 unw_rec_list *ptr = alloc_record (bsp_when);
2119 static unw_rec_list *
2123 unw_rec_list *ptr = alloc_record (bsp_gr);
2124 ptr->r.record.p.gr = gr;
2128 static unw_rec_list *
2129 output_bsp_psprel (offset)
2130 unsigned int offset;
2132 unw_rec_list *ptr = alloc_record (bsp_psprel);
2133 ptr->r.record.p.pspoff = ENCODED_PSP_OFFSET (offset);
2137 static unw_rec_list *
2138 output_bsp_sprel (offset)
2139 unsigned int offset;
2141 unw_rec_list *ptr = alloc_record (bsp_sprel);
2142 ptr->r.record.p.spoff = offset / 4;
2146 static unw_rec_list *
2147 output_bspstore_when ()
2149 unw_rec_list *ptr = alloc_record (bspstore_when);
2153 static unw_rec_list *
2154 output_bspstore_gr (gr)
2157 unw_rec_list *ptr = alloc_record (bspstore_gr);
2158 ptr->r.record.p.gr = gr;
2162 static unw_rec_list *
2163 output_bspstore_psprel (offset)
2164 unsigned int offset;
2166 unw_rec_list *ptr = alloc_record (bspstore_psprel);
2167 ptr->r.record.p.pspoff = ENCODED_PSP_OFFSET (offset);
2171 static unw_rec_list *
2172 output_bspstore_sprel (offset)
2173 unsigned int offset;
2175 unw_rec_list *ptr = alloc_record (bspstore_sprel);
2176 ptr->r.record.p.spoff = offset / 4;
2180 static unw_rec_list *
2183 unw_rec_list *ptr = alloc_record (rnat_when);
2187 static unw_rec_list *
2191 unw_rec_list *ptr = alloc_record (rnat_gr);
2192 ptr->r.record.p.gr = gr;
2196 static unw_rec_list *
2197 output_rnat_psprel (offset)
2198 unsigned int offset;
2200 unw_rec_list *ptr = alloc_record (rnat_psprel);
2201 ptr->r.record.p.pspoff = ENCODED_PSP_OFFSET (offset);
2205 static unw_rec_list *
2206 output_rnat_sprel (offset)
2207 unsigned int offset;
2209 unw_rec_list *ptr = alloc_record (rnat_sprel);
2210 ptr->r.record.p.spoff = offset / 4;
2214 static unw_rec_list *
2215 output_unwabi (abi, context)
2217 unsigned long context;
2219 unw_rec_list *ptr = alloc_record (unwabi);
2220 ptr->r.record.p.abi = abi;
2221 ptr->r.record.p.context = context;
2225 static unw_rec_list *
2226 output_epilogue (unsigned long ecount)
2228 unw_rec_list *ptr = alloc_record (epilogue);
2229 ptr->r.record.b.ecount = ecount;
2233 static unw_rec_list *
2234 output_label_state (unsigned long label)
2236 unw_rec_list *ptr = alloc_record (label_state);
2237 ptr->r.record.b.label = label;
2241 static unw_rec_list *
2242 output_copy_state (unsigned long label)
2244 unw_rec_list *ptr = alloc_record (copy_state);
2245 ptr->r.record.b.label = label;
2249 static unw_rec_list *
2250 output_spill_psprel (ab, reg, offset)
2253 unsigned int offset;
2255 unw_rec_list *ptr = alloc_record (spill_psprel);
2256 ptr->r.record.x.ab = ab;
2257 ptr->r.record.x.reg = reg;
2258 ptr->r.record.x.pspoff = ENCODED_PSP_OFFSET (offset);
2262 static unw_rec_list *
2263 output_spill_sprel (ab, reg, offset)
2266 unsigned int offset;
2268 unw_rec_list *ptr = alloc_record (spill_sprel);
2269 ptr->r.record.x.ab = ab;
2270 ptr->r.record.x.reg = reg;
2271 ptr->r.record.x.spoff = offset / 4;
2275 static unw_rec_list *
2276 output_spill_psprel_p (ab, reg, offset, predicate)
2279 unsigned int offset;
2280 unsigned int predicate;
2282 unw_rec_list *ptr = alloc_record (spill_psprel_p);
2283 ptr->r.record.x.ab = ab;
2284 ptr->r.record.x.reg = reg;
2285 ptr->r.record.x.pspoff = ENCODED_PSP_OFFSET (offset);
2286 ptr->r.record.x.qp = predicate;
2290 static unw_rec_list *
2291 output_spill_sprel_p (ab, reg, offset, predicate)
2294 unsigned int offset;
2295 unsigned int predicate;
2297 unw_rec_list *ptr = alloc_record (spill_sprel_p);
2298 ptr->r.record.x.ab = ab;
2299 ptr->r.record.x.reg = reg;
2300 ptr->r.record.x.spoff = offset / 4;
2301 ptr->r.record.x.qp = predicate;
2305 static unw_rec_list *
2306 output_spill_reg (ab, reg, targ_reg, xy)
2309 unsigned int targ_reg;
2312 unw_rec_list *ptr = alloc_record (spill_reg);
2313 ptr->r.record.x.ab = ab;
2314 ptr->r.record.x.reg = reg;
2315 ptr->r.record.x.treg = targ_reg;
2316 ptr->r.record.x.xy = xy;
2320 static unw_rec_list *
2321 output_spill_reg_p (ab, reg, targ_reg, xy, predicate)
2324 unsigned int targ_reg;
2326 unsigned int predicate;
2328 unw_rec_list *ptr = alloc_record (spill_reg_p);
2329 ptr->r.record.x.ab = ab;
2330 ptr->r.record.x.reg = reg;
2331 ptr->r.record.x.treg = targ_reg;
2332 ptr->r.record.x.xy = xy;
2333 ptr->r.record.x.qp = predicate;
2337 /* Given a unw_rec_list process the correct format with the
2338 specified function. */
2341 process_one_record (ptr, f)
2345 unsigned long fr_mask, gr_mask;
2347 switch (ptr->r.type)
2349 /* This is a dummy record that takes up no space in the output. */
2357 /* These are taken care of by prologue/prologue_gr. */
2362 if (ptr->r.type == prologue_gr)
2363 output_R2_format (f, ptr->r.record.r.grmask,
2364 ptr->r.record.r.grsave, ptr->r.record.r.rlen);
2366 output_R1_format (f, ptr->r.type, ptr->r.record.r.rlen);
2368 /* Output descriptor(s) for union of register spills (if any). */
2369 gr_mask = ptr->r.record.r.mask.gr_mem;
2370 fr_mask = ptr->r.record.r.mask.fr_mem;
2373 if ((fr_mask & ~0xfUL) == 0)
2374 output_P6_format (f, fr_mem, fr_mask);
2377 output_P5_format (f, gr_mask, fr_mask);
2382 output_P6_format (f, gr_mem, gr_mask);
2383 if (ptr->r.record.r.mask.br_mem)
2384 output_P1_format (f, ptr->r.record.r.mask.br_mem);
2386 /* output imask descriptor if necessary: */
2387 if (ptr->r.record.r.mask.i)
2388 output_P4_format (f, ptr->r.record.r.mask.i,
2389 ptr->r.record.r.imask_size);
2393 output_R1_format (f, ptr->r.type, ptr->r.record.r.rlen);
2397 output_P7_format (f, ptr->r.type, ptr->r.record.p.t,
2398 ptr->r.record.p.size);
2411 output_P3_format (f, ptr->r.type, ptr->r.record.p.gr);
2414 output_P3_format (f, rp_br, ptr->r.record.p.br);
2417 output_P7_format (f, psp_sprel, ptr->r.record.p.spoff, 0);
2425 output_P7_format (f, ptr->r.type, ptr->r.record.p.t, 0);
2434 output_P7_format (f, ptr->r.type, ptr->r.record.p.pspoff, 0);
2444 case bspstore_sprel:
2446 output_P8_format (f, ptr->r.type, ptr->r.record.p.spoff);
2449 output_P9_format (f, ptr->r.record.p.grmask, ptr->r.record.p.gr);
2452 output_P2_format (f, ptr->r.record.p.brmask, ptr->r.record.p.gr);
2455 as_bad ("spill_mask record unimplemented.");
2457 case priunat_when_gr:
2458 case priunat_when_mem:
2462 output_P8_format (f, ptr->r.type, ptr->r.record.p.t);
2464 case priunat_psprel:
2466 case bspstore_psprel:
2468 output_P8_format (f, ptr->r.type, ptr->r.record.p.pspoff);
2471 output_P10_format (f, ptr->r.record.p.abi, ptr->r.record.p.context);
2474 output_B3_format (f, ptr->r.record.b.ecount, ptr->r.record.b.t);
2478 output_B4_format (f, ptr->r.type, ptr->r.record.b.label);
2481 output_X1_format (f, ptr->r.type, ptr->r.record.x.ab,
2482 ptr->r.record.x.reg, ptr->r.record.x.t,
2483 ptr->r.record.x.pspoff);
2486 output_X1_format (f, ptr->r.type, ptr->r.record.x.ab,
2487 ptr->r.record.x.reg, ptr->r.record.x.t,
2488 ptr->r.record.x.spoff);
2491 output_X2_format (f, ptr->r.record.x.ab, ptr->r.record.x.reg,
2492 ptr->r.record.x.xy >> 1, ptr->r.record.x.xy,
2493 ptr->r.record.x.treg, ptr->r.record.x.t);
2495 case spill_psprel_p:
2496 output_X3_format (f, ptr->r.type, ptr->r.record.x.qp,
2497 ptr->r.record.x.ab, ptr->r.record.x.reg,
2498 ptr->r.record.x.t, ptr->r.record.x.pspoff);
2501 output_X3_format (f, ptr->r.type, ptr->r.record.x.qp,
2502 ptr->r.record.x.ab, ptr->r.record.x.reg,
2503 ptr->r.record.x.t, ptr->r.record.x.spoff);
2506 output_X4_format (f, ptr->r.record.x.qp, ptr->r.record.x.ab,
2507 ptr->r.record.x.reg, ptr->r.record.x.xy >> 1,
2508 ptr->r.record.x.xy, ptr->r.record.x.treg,
2512 as_bad ("record_type_not_valid");
2517 /* Given a unw_rec_list list, process all the records with
2518 the specified function. */
2520 process_unw_records (list, f)
2525 for (ptr = list; ptr; ptr = ptr->next)
2526 process_one_record (ptr, f);
2529 /* Determine the size of a record list in bytes. */
2531 calc_record_size (list)
2535 process_unw_records (list, count_output);
2539 /* Update IMASK bitmask to reflect the fact that one or more registers
2540 of type TYPE are saved starting at instruction with index T. If N
2541 bits are set in REGMASK, it is assumed that instructions T through
2542 T+N-1 save these registers.
2546 1: instruction saves next fp reg
2547 2: instruction saves next general reg
2548 3: instruction saves next branch reg */
2550 set_imask (region, regmask, t, type)
2551 unw_rec_list *region;
2552 unsigned long regmask;
2556 unsigned char *imask;
2557 unsigned long imask_size;
2561 imask = region->r.record.r.mask.i;
2562 imask_size = region->r.record.r.imask_size;
2565 imask_size = (region->r.record.r.rlen * 2 + 7) / 8 + 1;
2566 imask = xmalloc (imask_size);
2567 memset (imask, 0, imask_size);
2569 region->r.record.r.imask_size = imask_size;
2570 region->r.record.r.mask.i = imask;
2574 pos = 2 * (3 - t % 4);
2577 if (i >= imask_size)
2579 as_bad ("Ignoring attempt to spill beyond end of region");
2583 imask[i] |= (type & 0x3) << pos;
2585 regmask &= (regmask - 1);
2595 /* Return the number of instruction slots from FIRST_ADDR to SLOT_ADDR.
2596 SLOT_FRAG is the frag containing SLOT_ADDR, and FIRST_FRAG is the frag
2597 containing FIRST_ADDR. If BEFORE_RELAX, then we use worst-case estimates
2601 slot_index (slot_addr, slot_frag, first_addr, first_frag, before_relax)
2602 unsigned long slot_addr;
2604 unsigned long first_addr;
2608 unsigned long index = 0;
2610 /* First time we are called, the initial address and frag are invalid. */
2611 if (first_addr == 0)
2614 /* If the two addresses are in different frags, then we need to add in
2615 the remaining size of this frag, and then the entire size of intermediate
2617 while (slot_frag != first_frag)
2619 unsigned long start_addr = (unsigned long) &first_frag->fr_literal;
2623 /* We can get the final addresses only during and after
2625 if (first_frag->fr_next && first_frag->fr_next->fr_address)
2626 index += 3 * ((first_frag->fr_next->fr_address
2627 - first_frag->fr_address
2628 - first_frag->fr_fix) >> 4);
2631 /* We don't know what the final addresses will be. We try our
2632 best to estimate. */
2633 switch (first_frag->fr_type)
2639 as_fatal ("only constant space allocation is supported");
2645 /* Take alignment into account. Assume the worst case
2646 before relaxation. */
2647 index += 3 * ((1 << first_frag->fr_offset) >> 4);
2651 if (first_frag->fr_symbol)
2653 as_fatal ("only constant offsets are supported");
2657 index += 3 * (first_frag->fr_offset >> 4);
2661 /* Add in the full size of the frag converted to instruction slots. */
2662 index += 3 * (first_frag->fr_fix >> 4);
2663 /* Subtract away the initial part before first_addr. */
2664 index -= (3 * ((first_addr >> 4) - (start_addr >> 4))
2665 + ((first_addr & 0x3) - (start_addr & 0x3)));
2667 /* Move to the beginning of the next frag. */
2668 first_frag = first_frag->fr_next;
2669 first_addr = (unsigned long) &first_frag->fr_literal;
2672 /* Add in the used part of the last frag. */
2673 index += (3 * ((slot_addr >> 4) - (first_addr >> 4))
2674 + ((slot_addr & 0x3) - (first_addr & 0x3)));
2678 /* Optimize unwind record directives. */
2680 static unw_rec_list *
2681 optimize_unw_records (list)
2687 /* If the only unwind record is ".prologue" or ".prologue" followed
2688 by ".body", then we can optimize the unwind directives away. */
2689 if (list->r.type == prologue
2690 && (list->next->r.type == endp
2691 || (list->next->r.type == body && list->next->next->r.type == endp)))
2697 /* Given a complete record list, process any records which have
2698 unresolved fields, (ie length counts for a prologue). After
2699 this has been run, all necessary information should be available
2700 within each record to generate an image. */
2703 fixup_unw_records (list, before_relax)
2707 unw_rec_list *ptr, *region = 0;
2708 unsigned long first_addr = 0, rlen = 0, t;
2709 fragS *first_frag = 0;
2711 for (ptr = list; ptr; ptr = ptr->next)
2713 if (ptr->slot_number == SLOT_NUM_NOT_SET)
2714 as_bad (" Insn slot not set in unwind record.");
2715 t = slot_index (ptr->slot_number, ptr->slot_frag,
2716 first_addr, first_frag, before_relax);
2717 switch (ptr->r.type)
2725 unsigned long last_addr = 0;
2726 fragS *last_frag = NULL;
2728 first_addr = ptr->slot_number;
2729 first_frag = ptr->slot_frag;
2730 /* Find either the next body/prologue start, or the end of
2731 the function, and determine the size of the region. */
2732 for (last = ptr->next; last != NULL; last = last->next)
2733 if (last->r.type == prologue || last->r.type == prologue_gr
2734 || last->r.type == body || last->r.type == endp)
2736 last_addr = last->slot_number;
2737 last_frag = last->slot_frag;
2740 size = slot_index (last_addr, last_frag, first_addr, first_frag,
2742 rlen = ptr->r.record.r.rlen = size;
2743 if (ptr->r.type == body)
2744 /* End of region. */
2752 ptr->r.record.b.t = rlen - 1 - t;
2754 /* This happens when a memory-stack-less procedure uses a
2755 ".restore sp" directive at the end of a region to pop
2757 ptr->r.record.b.t = 0;
2768 case priunat_when_gr:
2769 case priunat_when_mem:
2773 ptr->r.record.p.t = t;
2781 case spill_psprel_p:
2782 ptr->r.record.x.t = t;
2788 as_bad ("frgr_mem record before region record!\n");
2791 region->r.record.r.mask.fr_mem |= ptr->r.record.p.frmask;
2792 region->r.record.r.mask.gr_mem |= ptr->r.record.p.grmask;
2793 set_imask (region, ptr->r.record.p.frmask, t, 1);
2794 set_imask (region, ptr->r.record.p.grmask, t, 2);
2799 as_bad ("fr_mem record before region record!\n");
2802 region->r.record.r.mask.fr_mem |= ptr->r.record.p.rmask;
2803 set_imask (region, ptr->r.record.p.rmask, t, 1);
2808 as_bad ("gr_mem record before region record!\n");
2811 region->r.record.r.mask.gr_mem |= ptr->r.record.p.rmask;
2812 set_imask (region, ptr->r.record.p.rmask, t, 2);
2817 as_bad ("br_mem record before region record!\n");
2820 region->r.record.r.mask.br_mem |= ptr->r.record.p.brmask;
2821 set_imask (region, ptr->r.record.p.brmask, t, 3);
2827 as_bad ("gr_gr record before region record!\n");
2830 set_imask (region, ptr->r.record.p.grmask, t, 2);
2835 as_bad ("br_gr record before region record!\n");
2838 set_imask (region, ptr->r.record.p.brmask, t, 3);
2847 /* Estimate the size of a frag before relaxing. We only have one type of frag
2848 to handle here, which is the unwind info frag. */
2851 ia64_estimate_size_before_relax (fragS *frag,
2852 asection *segtype ATTRIBUTE_UNUSED)
2857 /* ??? This code is identical to the first part of ia64_convert_frag. */
2858 list = (unw_rec_list *) frag->fr_opcode;
2859 fixup_unw_records (list, 0);
2861 len = calc_record_size (list);
2862 /* pad to pointer-size boundary. */
2863 pad = len % md.pointer_size;
2865 len += md.pointer_size - pad;
2866 /* Add 8 for the header + a pointer for the personality offset. */
2867 size = len + 8 + md.pointer_size;
2869 /* fr_var carries the max_chars that we created the fragment with.
2870 We must, of course, have allocated enough memory earlier. */
2871 assert (frag->fr_var >= size);
2873 return frag->fr_fix + size;
2876 /* This function converts a rs_machine_dependent variant frag into a
2877 normal fill frag with the unwind image from the the record list. */
2879 ia64_convert_frag (fragS *frag)
2885 /* ??? This code is identical to ia64_estimate_size_before_relax. */
2886 list = (unw_rec_list *) frag->fr_opcode;
2887 fixup_unw_records (list, 0);
2889 len = calc_record_size (list);
2890 /* pad to pointer-size boundary. */
2891 pad = len % md.pointer_size;
2893 len += md.pointer_size - pad;
2894 /* Add 8 for the header + a pointer for the personality offset. */
2895 size = len + 8 + md.pointer_size;
2897 /* fr_var carries the max_chars that we created the fragment with.
2898 We must, of course, have allocated enough memory earlier. */
2899 assert (frag->fr_var >= size);
2901 /* Initialize the header area. fr_offset is initialized with
2902 unwind.personality_routine. */
2903 if (frag->fr_offset)
2905 if (md.flags & EF_IA_64_ABI64)
2906 flag_value = (bfd_vma) 3 << 32;
2908 /* 32-bit unwind info block. */
2909 flag_value = (bfd_vma) 0x1003 << 32;
2914 md_number_to_chars (frag->fr_literal,
2915 (((bfd_vma) 1 << 48) /* Version. */
2916 | flag_value /* U & E handler flags. */
2917 | (len / md.pointer_size)), /* Length. */
2920 /* Skip the header. */
2921 vbyte_mem_ptr = frag->fr_literal + 8;
2922 process_unw_records (list, output_vbyte_mem);
2924 /* Fill the padding bytes with zeros. */
2926 md_number_to_chars (frag->fr_literal + len + 8 - md.pointer_size + pad, 0,
2927 md.pointer_size - pad);
2929 frag->fr_fix += size;
2930 frag->fr_type = rs_fill;
2932 frag->fr_offset = 0;
2936 convert_expr_to_ab_reg (e, ab, regp)
2943 if (e->X_op != O_register)
2946 reg = e->X_add_number;
2947 if (reg >= (REG_GR + 4) && reg <= (REG_GR + 7))
2950 *regp = reg - REG_GR;
2952 else if ((reg >= (REG_FR + 2) && reg <= (REG_FR + 5))
2953 || (reg >= (REG_FR + 16) && reg <= (REG_FR + 31)))
2956 *regp = reg - REG_FR;
2958 else if (reg >= (REG_BR + 1) && reg <= (REG_BR + 5))
2961 *regp = reg - REG_BR;
2968 case REG_PR: *regp = 0; break;
2969 case REG_PSP: *regp = 1; break;
2970 case REG_PRIUNAT: *regp = 2; break;
2971 case REG_BR + 0: *regp = 3; break;
2972 case REG_AR + AR_BSP: *regp = 4; break;
2973 case REG_AR + AR_BSPSTORE: *regp = 5; break;
2974 case REG_AR + AR_RNAT: *regp = 6; break;
2975 case REG_AR + AR_UNAT: *regp = 7; break;
2976 case REG_AR + AR_FPSR: *regp = 8; break;
2977 case REG_AR + AR_PFS: *regp = 9; break;
2978 case REG_AR + AR_LC: *regp = 10; break;
2988 convert_expr_to_xy_reg (e, xy, regp)
2995 if (e->X_op != O_register)
2998 reg = e->X_add_number;
3000 if (/* reg >= REG_GR && */ reg <= (REG_GR + 127))
3003 *regp = reg - REG_GR;
3005 else if (reg >= REG_FR && reg <= (REG_FR + 127))
3008 *regp = reg - REG_FR;
3010 else if (reg >= REG_BR && reg <= (REG_BR + 7))
3013 *regp = reg - REG_BR;
3023 /* The current frag is an alignment frag. */
3024 align_frag = frag_now;
3025 s_align_bytes (arg);
3030 int dummy ATTRIBUTE_UNUSED;
3035 radix = *input_line_pointer++;
3037 if (radix != 'C' && !is_end_of_line[(unsigned char) radix])
3039 as_bad ("Radix `%c' unsupported", *input_line_pointer);
3040 ignore_rest_of_line ();
3045 /* Helper function for .loc directives. If the assembler is not generating
3046 line number info, then we need to remember which instructions have a .loc
3047 directive, and only call dwarf2_gen_line_info for those instructions. */
3052 CURR_SLOT.loc_directive_seen = 1;
3053 dwarf2_directive_loc (x);
3056 /* .sbss, .bss etc. are macros that expand into ".section SECNAME". */
3058 dot_special_section (which)
3061 set_section ((char *) special_section_name[which]);
3065 add_unwind_entry (ptr)
3069 unwind.tail->next = ptr;
3074 /* The current entry can in fact be a chain of unwind entries. */
3075 if (unwind.current_entry == NULL)
3076 unwind.current_entry = ptr;
3081 int dummy ATTRIBUTE_UNUSED;
3087 if (e.X_op != O_constant)
3088 as_bad ("Operand to .fframe must be a constant");
3090 add_unwind_entry (output_mem_stack_f (e.X_add_number));
3095 int dummy ATTRIBUTE_UNUSED;
3101 reg = e.X_add_number - REG_GR;
3102 if (e.X_op == O_register && reg < 128)
3104 add_unwind_entry (output_mem_stack_v ());
3105 if (! (unwind.prologue_mask & 2))
3106 add_unwind_entry (output_psp_gr (reg));
3109 as_bad ("First operand to .vframe must be a general register");
3113 dot_vframesp (dummy)
3114 int dummy ATTRIBUTE_UNUSED;
3119 if (e.X_op == O_constant)
3121 add_unwind_entry (output_mem_stack_v ());
3122 add_unwind_entry (output_psp_sprel (e.X_add_number));
3125 as_bad ("Operand to .vframesp must be a constant (sp-relative offset)");
3129 dot_vframepsp (dummy)
3130 int dummy ATTRIBUTE_UNUSED;
3135 if (e.X_op == O_constant)
3137 add_unwind_entry (output_mem_stack_v ());
3138 add_unwind_entry (output_psp_sprel (e.X_add_number));
3141 as_bad ("Operand to .vframepsp must be a constant (psp-relative offset)");
3146 int dummy ATTRIBUTE_UNUSED;
3152 sep = parse_operand (&e1);
3154 as_bad ("No second operand to .save");
3155 sep = parse_operand (&e2);
3157 reg1 = e1.X_add_number;
3158 reg2 = e2.X_add_number - REG_GR;
3160 /* Make sure its a valid ar.xxx reg, OR its br0, aka 'rp'. */
3161 if (e1.X_op == O_register)
3163 if (e2.X_op == O_register && reg2 >= 0 && reg2 < 128)
3167 case REG_AR + AR_BSP:
3168 add_unwind_entry (output_bsp_when ());
3169 add_unwind_entry (output_bsp_gr (reg2));
3171 case REG_AR + AR_BSPSTORE:
3172 add_unwind_entry (output_bspstore_when ());
3173 add_unwind_entry (output_bspstore_gr (reg2));
3175 case REG_AR + AR_RNAT:
3176 add_unwind_entry (output_rnat_when ());
3177 add_unwind_entry (output_rnat_gr (reg2));
3179 case REG_AR + AR_UNAT:
3180 add_unwind_entry (output_unat_when ());
3181 add_unwind_entry (output_unat_gr (reg2));
3183 case REG_AR + AR_FPSR:
3184 add_unwind_entry (output_fpsr_when ());
3185 add_unwind_entry (output_fpsr_gr (reg2));
3187 case REG_AR + AR_PFS:
3188 add_unwind_entry (output_pfs_when ());
3189 if (! (unwind.prologue_mask & 4))
3190 add_unwind_entry (output_pfs_gr (reg2));
3192 case REG_AR + AR_LC:
3193 add_unwind_entry (output_lc_when ());
3194 add_unwind_entry (output_lc_gr (reg2));
3197 add_unwind_entry (output_rp_when ());
3198 if (! (unwind.prologue_mask & 8))
3199 add_unwind_entry (output_rp_gr (reg2));
3202 add_unwind_entry (output_preds_when ());
3203 if (! (unwind.prologue_mask & 1))
3204 add_unwind_entry (output_preds_gr (reg2));
3207 add_unwind_entry (output_priunat_when_gr ());
3208 add_unwind_entry (output_priunat_gr (reg2));
3211 as_bad ("First operand not a valid register");
3215 as_bad (" Second operand not a valid register");
3218 as_bad ("First operand not a register");
3223 int dummy ATTRIBUTE_UNUSED;
3226 unsigned long ecount; /* # of _additional_ regions to pop */
3229 sep = parse_operand (&e1);
3230 if (e1.X_op != O_register || e1.X_add_number != REG_GR + 12)
3232 as_bad ("First operand to .restore must be stack pointer (sp)");
3238 parse_operand (&e2);
3239 if (e2.X_op != O_constant || e2.X_add_number < 0)
3241 as_bad ("Second operand to .restore must be a constant >= 0");
3244 ecount = e2.X_add_number;
3247 ecount = unwind.prologue_count - 1;
3249 if (ecount >= unwind.prologue_count)
3251 as_bad ("Epilogue count of %lu exceeds number of nested prologues (%u)",
3252 ecount + 1, unwind.prologue_count);
3256 add_unwind_entry (output_epilogue (ecount));
3258 if (ecount < unwind.prologue_count)
3259 unwind.prologue_count -= ecount + 1;
3261 unwind.prologue_count = 0;
3265 dot_restorereg (dummy)
3266 int dummy ATTRIBUTE_UNUSED;
3268 unsigned int ab, reg;
3273 if (!convert_expr_to_ab_reg (&e, &ab, ®))
3275 as_bad ("First operand to .restorereg must be a preserved register");
3278 add_unwind_entry (output_spill_reg (ab, reg, 0, 0));
3282 dot_restorereg_p (dummy)
3283 int dummy ATTRIBUTE_UNUSED;
3285 unsigned int qp, ab, reg;
3289 sep = parse_operand (&e1);
3292 as_bad ("No second operand to .restorereg.p");
3296 parse_operand (&e2);
3298 qp = e1.X_add_number - REG_P;
3299 if (e1.X_op != O_register || qp > 63)
3301 as_bad ("First operand to .restorereg.p must be a predicate");
3305 if (!convert_expr_to_ab_reg (&e2, &ab, ®))
3307 as_bad ("Second operand to .restorereg.p must be a preserved register");
3310 add_unwind_entry (output_spill_reg_p (ab, reg, 0, 0, qp));
3313 static char *special_linkonce_name[] =
3315 ".gnu.linkonce.ia64unw.", ".gnu.linkonce.ia64unwi."
3319 start_unwind_section (const segT text_seg, int sec_index, int linkonce_empty)
3322 Use a slightly ugly scheme to derive the unwind section names from
3323 the text section name:
3325 text sect. unwind table sect.
3326 name: name: comments:
3327 ---------- ----------------- --------------------------------
3329 .text.foo .IA_64.unwind.text.foo
3330 .foo .IA_64.unwind.foo
3332 .gnu.linkonce.ia64unw.foo
3333 _info .IA_64.unwind_info gas issues error message (ditto)
3334 _infoFOO .IA_64.unwind_infoFOO gas issues error message (ditto)
3336 This mapping is done so that:
3338 (a) An object file with unwind info only in .text will use
3339 unwind section names .IA_64.unwind and .IA_64.unwind_info.
3340 This follows the letter of the ABI and also ensures backwards
3341 compatibility with older toolchains.
3343 (b) An object file with unwind info in multiple text sections
3344 will use separate unwind sections for each text section.
3345 This allows us to properly set the "sh_info" and "sh_link"
3346 fields in SHT_IA_64_UNWIND as required by the ABI and also
3347 lets GNU ld support programs with multiple segments
3348 containing unwind info (as might be the case for certain
3349 embedded applications).
3351 (c) An error is issued if there would be a name clash.
3354 const char *text_name, *sec_text_name;
3356 const char *prefix = special_section_name [sec_index];
3358 size_t prefix_len, suffix_len, sec_name_len;
3360 sec_text_name = segment_name (text_seg);
3361 text_name = sec_text_name;
3362 if (strncmp (text_name, "_info", 5) == 0)
3364 as_bad ("Illegal section name `%s' (causes unwind section name clash)",
3366 ignore_rest_of_line ();
3369 if (strcmp (text_name, ".text") == 0)
3372 /* Build the unwind section name by appending the (possibly stripped)
3373 text section name to the unwind prefix. */
3375 if (strncmp (text_name, ".gnu.linkonce.t.",
3376 sizeof (".gnu.linkonce.t.") - 1) == 0)
3378 prefix = special_linkonce_name [sec_index - SPECIAL_SECTION_UNWIND];
3379 suffix += sizeof (".gnu.linkonce.t.") - 1;
3381 else if (linkonce_empty)
3384 prefix_len = strlen (prefix);
3385 suffix_len = strlen (suffix);
3386 sec_name_len = prefix_len + suffix_len;
3387 sec_name = alloca (sec_name_len + 1);
3388 memcpy (sec_name, prefix, prefix_len);
3389 memcpy (sec_name + prefix_len, suffix, suffix_len);
3390 sec_name [sec_name_len] = '\0';
3392 /* Handle COMDAT group. */
3393 if (suffix == text_name && (text_seg->flags & SEC_LINK_ONCE) != 0)
3396 size_t len, group_name_len;
3397 const char *group_name = elf_group_name (text_seg);
3399 if (group_name == NULL)
3401 as_bad ("Group section `%s' has no group signature",
3403 ignore_rest_of_line ();
3406 /* We have to construct a fake section directive. */
3407 group_name_len = strlen (group_name);
3409 + 16 /* ,"aG",@progbits, */
3410 + group_name_len /* ,group_name */
3413 section = alloca (len + 1);
3414 memcpy (section, sec_name, sec_name_len);
3415 memcpy (section + sec_name_len, ",\"aG\",@progbits,", 16);
3416 memcpy (section + sec_name_len + 16, group_name, group_name_len);
3417 memcpy (section + len - 7, ",comdat", 7);
3418 section [len] = '\0';
3419 set_section (section);
3423 set_section (sec_name);
3424 bfd_set_section_flags (stdoutput, now_seg,
3425 SEC_LOAD | SEC_ALLOC | SEC_READONLY);
3428 elf_linked_to_section (now_seg) = text_seg;
3432 generate_unwind_image (const segT text_seg)
3437 /* Mark the end of the unwind info, so that we can compute the size of the
3438 last unwind region. */
3439 add_unwind_entry (output_endp ());
3441 /* Force out pending instructions, to make sure all unwind records have
3442 a valid slot_number field. */
3443 ia64_flush_insns ();
3445 /* Generate the unwind record. */
3446 list = optimize_unw_records (unwind.list);
3447 fixup_unw_records (list, 1);
3448 size = calc_record_size (list);
3450 if (size > 0 || unwind.force_unwind_entry)
3452 unwind.force_unwind_entry = 0;
3453 /* pad to pointer-size boundary. */
3454 pad = size % md.pointer_size;
3456 size += md.pointer_size - pad;
3457 /* Add 8 for the header + a pointer for the personality
3459 size += 8 + md.pointer_size;
3462 /* If there are unwind records, switch sections, and output the info. */
3466 bfd_reloc_code_real_type reloc;
3468 start_unwind_section (text_seg, SPECIAL_SECTION_UNWIND_INFO, 0);
3470 /* Make sure the section has 4 byte alignment for ILP32 and
3471 8 byte alignment for LP64. */
3472 frag_align (md.pointer_size_shift, 0, 0);
3473 record_alignment (now_seg, md.pointer_size_shift);
3475 /* Set expression which points to start of unwind descriptor area. */
3476 unwind.info = expr_build_dot ();
3478 frag_var (rs_machine_dependent, size, size, 0, 0,
3479 (offsetT) (long) unwind.personality_routine,
3482 /* Add the personality address to the image. */
3483 if (unwind.personality_routine != 0)
3485 exp.X_op = O_symbol;
3486 exp.X_add_symbol = unwind.personality_routine;
3487 exp.X_add_number = 0;
3489 if (md.flags & EF_IA_64_BE)
3491 if (md.flags & EF_IA_64_ABI64)
3492 reloc = BFD_RELOC_IA64_LTOFF_FPTR64MSB;
3494 reloc = BFD_RELOC_IA64_LTOFF_FPTR32MSB;
3498 if (md.flags & EF_IA_64_ABI64)
3499 reloc = BFD_RELOC_IA64_LTOFF_FPTR64LSB;
3501 reloc = BFD_RELOC_IA64_LTOFF_FPTR32LSB;
3504 fix_new_exp (frag_now, frag_now_fix () - md.pointer_size,
3505 md.pointer_size, &exp, 0, reloc);
3506 unwind.personality_routine = 0;
3510 start_unwind_section (text_seg, SPECIAL_SECTION_UNWIND_INFO, 1);
3512 free_saved_prologue_counts ();
3513 unwind.list = unwind.tail = unwind.current_entry = NULL;
3517 dot_handlerdata (dummy)
3518 int dummy ATTRIBUTE_UNUSED;
3520 unwind.force_unwind_entry = 1;
3522 /* Remember which segment we're in so we can switch back after .endp */
3523 unwind.saved_text_seg = now_seg;
3524 unwind.saved_text_subseg = now_subseg;
3526 /* Generate unwind info into unwind-info section and then leave that
3527 section as the currently active one so dataXX directives go into
3528 the language specific data area of the unwind info block. */
3529 generate_unwind_image (now_seg);
3530 demand_empty_rest_of_line ();
3534 dot_unwentry (dummy)
3535 int dummy ATTRIBUTE_UNUSED;
3537 unwind.force_unwind_entry = 1;
3538 demand_empty_rest_of_line ();
3543 int dummy ATTRIBUTE_UNUSED;
3549 reg = e.X_add_number - REG_BR;
3550 if (e.X_op == O_register && reg < 8)
3551 add_unwind_entry (output_rp_br (reg));
3553 as_bad ("First operand not a valid branch register");
3557 dot_savemem (psprel)
3564 sep = parse_operand (&e1);
3566 as_bad ("No second operand to .save%ssp", psprel ? "p" : "");
3567 sep = parse_operand (&e2);
3569 reg1 = e1.X_add_number;
3570 val = e2.X_add_number;
3572 /* Make sure its a valid ar.xxx reg, OR its br0, aka 'rp'. */
3573 if (e1.X_op == O_register)
3575 if (e2.X_op == O_constant)
3579 case REG_AR + AR_BSP:
3580 add_unwind_entry (output_bsp_when ());
3581 add_unwind_entry ((psprel
3583 : output_bsp_sprel) (val));
3585 case REG_AR + AR_BSPSTORE:
3586 add_unwind_entry (output_bspstore_when ());
3587 add_unwind_entry ((psprel
3588 ? output_bspstore_psprel
3589 : output_bspstore_sprel) (val));
3591 case REG_AR + AR_RNAT:
3592 add_unwind_entry (output_rnat_when ());
3593 add_unwind_entry ((psprel
3594 ? output_rnat_psprel
3595 : output_rnat_sprel) (val));
3597 case REG_AR + AR_UNAT:
3598 add_unwind_entry (output_unat_when ());
3599 add_unwind_entry ((psprel
3600 ? output_unat_psprel
3601 : output_unat_sprel) (val));
3603 case REG_AR + AR_FPSR:
3604 add_unwind_entry (output_fpsr_when ());
3605 add_unwind_entry ((psprel
3606 ? output_fpsr_psprel
3607 : output_fpsr_sprel) (val));
3609 case REG_AR + AR_PFS:
3610 add_unwind_entry (output_pfs_when ());
3611 add_unwind_entry ((psprel
3613 : output_pfs_sprel) (val));
3615 case REG_AR + AR_LC:
3616 add_unwind_entry (output_lc_when ());
3617 add_unwind_entry ((psprel
3619 : output_lc_sprel) (val));
3622 add_unwind_entry (output_rp_when ());
3623 add_unwind_entry ((psprel
3625 : output_rp_sprel) (val));
3628 add_unwind_entry (output_preds_when ());
3629 add_unwind_entry ((psprel
3630 ? output_preds_psprel
3631 : output_preds_sprel) (val));
3634 add_unwind_entry (output_priunat_when_mem ());
3635 add_unwind_entry ((psprel
3636 ? output_priunat_psprel
3637 : output_priunat_sprel) (val));
3640 as_bad ("First operand not a valid register");
3644 as_bad (" Second operand not a valid constant");
3647 as_bad ("First operand not a register");
3652 int dummy ATTRIBUTE_UNUSED;
3656 sep = parse_operand (&e1);
3658 parse_operand (&e2);
3660 if (e1.X_op != O_constant)
3661 as_bad ("First operand to .save.g must be a constant.");
3664 int grmask = e1.X_add_number;
3666 add_unwind_entry (output_gr_mem (grmask));
3669 int reg = e2.X_add_number - REG_GR;
3670 if (e2.X_op == O_register && reg >= 0 && reg < 128)
3671 add_unwind_entry (output_gr_gr (grmask, reg));
3673 as_bad ("Second operand is an invalid register.");
3680 int dummy ATTRIBUTE_UNUSED;
3684 sep = parse_operand (&e1);
3686 if (e1.X_op != O_constant)
3687 as_bad ("Operand to .save.f must be a constant.");
3689 add_unwind_entry (output_fr_mem (e1.X_add_number));
3694 int dummy ATTRIBUTE_UNUSED;
3701 sep = parse_operand (&e1);
3702 if (e1.X_op != O_constant)
3704 as_bad ("First operand to .save.b must be a constant.");
3707 brmask = e1.X_add_number;
3711 sep = parse_operand (&e2);
3712 reg = e2.X_add_number - REG_GR;
3713 if (e2.X_op != O_register || reg > 127)
3715 as_bad ("Second operand to .save.b must be a general register.");
3718 add_unwind_entry (output_br_gr (brmask, e2.X_add_number));
3721 add_unwind_entry (output_br_mem (brmask));
3723 if (!is_end_of_line[sep] && !is_it_end_of_statement ())
3724 demand_empty_rest_of_line ();
3729 int dummy ATTRIBUTE_UNUSED;
3733 sep = parse_operand (&e1);
3735 parse_operand (&e2);
3737 if (e1.X_op != O_constant || sep != ',' || e2.X_op != O_constant)
3738 as_bad ("Both operands of .save.gf must be constants.");
3741 int grmask = e1.X_add_number;
3742 int frmask = e2.X_add_number;
3743 add_unwind_entry (output_frgr_mem (grmask, frmask));
3749 int dummy ATTRIBUTE_UNUSED;
3754 sep = parse_operand (&e);
3755 if (!is_end_of_line[sep] && !is_it_end_of_statement ())
3756 demand_empty_rest_of_line ();
3758 if (e.X_op != O_constant)
3759 as_bad ("Operand to .spill must be a constant");
3761 add_unwind_entry (output_spill_base (e.X_add_number));
3765 dot_spillreg (dummy)
3766 int dummy ATTRIBUTE_UNUSED;
3768 int sep, ab, xy, reg, treg;
3771 sep = parse_operand (&e1);
3774 as_bad ("No second operand to .spillreg");
3778 parse_operand (&e2);
3780 if (!convert_expr_to_ab_reg (&e1, &ab, ®))
3782 as_bad ("First operand to .spillreg must be a preserved register");
3786 if (!convert_expr_to_xy_reg (&e2, &xy, &treg))
3788 as_bad ("Second operand to .spillreg must be a register");
3792 add_unwind_entry (output_spill_reg (ab, reg, treg, xy));
3796 dot_spillmem (psprel)
3802 sep = parse_operand (&e1);
3805 as_bad ("Second operand missing");
3809 parse_operand (&e2);
3811 if (!convert_expr_to_ab_reg (&e1, &ab, ®))
3813 as_bad ("First operand to .spill%s must be a preserved register",
3814 psprel ? "psp" : "sp");
3818 if (e2.X_op != O_constant)
3820 as_bad ("Second operand to .spill%s must be a constant",
3821 psprel ? "psp" : "sp");
3826 add_unwind_entry (output_spill_psprel (ab, reg, e2.X_add_number));
3828 add_unwind_entry (output_spill_sprel (ab, reg, e2.X_add_number));
3832 dot_spillreg_p (dummy)
3833 int dummy ATTRIBUTE_UNUSED;
3835 int sep, ab, xy, reg, treg;
3836 expressionS e1, e2, e3;
3839 sep = parse_operand (&e1);
3842 as_bad ("No second and third operand to .spillreg.p");
3846 sep = parse_operand (&e2);
3849 as_bad ("No third operand to .spillreg.p");
3853 parse_operand (&e3);
3855 qp = e1.X_add_number - REG_P;
3857 if (e1.X_op != O_register || qp > 63)
3859 as_bad ("First operand to .spillreg.p must be a predicate");
3863 if (!convert_expr_to_ab_reg (&e2, &ab, ®))
3865 as_bad ("Second operand to .spillreg.p must be a preserved register");
3869 if (!convert_expr_to_xy_reg (&e3, &xy, &treg))
3871 as_bad ("Third operand to .spillreg.p must be a register");
3875 add_unwind_entry (output_spill_reg_p (ab, reg, treg, xy, qp));
3879 dot_spillmem_p (psprel)
3882 expressionS e1, e2, e3;
3886 sep = parse_operand (&e1);
3889 as_bad ("Second operand missing");
3893 parse_operand (&e2);
3896 as_bad ("Second operand missing");
3900 parse_operand (&e3);
3902 qp = e1.X_add_number - REG_P;
3903 if (e1.X_op != O_register || qp > 63)
3905 as_bad ("First operand to .spill%s_p must be a predicate",
3906 psprel ? "psp" : "sp");
3910 if (!convert_expr_to_ab_reg (&e2, &ab, ®))
3912 as_bad ("Second operand to .spill%s_p must be a preserved register",
3913 psprel ? "psp" : "sp");
3917 if (e3.X_op != O_constant)
3919 as_bad ("Third operand to .spill%s_p must be a constant",
3920 psprel ? "psp" : "sp");
3925 add_unwind_entry (output_spill_psprel_p (ab, reg, e3.X_add_number, qp));
3927 add_unwind_entry (output_spill_sprel_p (ab, reg, e3.X_add_number, qp));
3931 get_saved_prologue_count (lbl)
3934 label_prologue_count *lpc = unwind.saved_prologue_counts;
3936 while (lpc != NULL && lpc->label_number != lbl)
3940 return lpc->prologue_count;
3942 as_bad ("Missing .label_state %ld", lbl);
3947 save_prologue_count (lbl, count)
3951 label_prologue_count *lpc = unwind.saved_prologue_counts;
3953 while (lpc != NULL && lpc->label_number != lbl)
3957 lpc->prologue_count = count;
3960 label_prologue_count *new_lpc = xmalloc (sizeof (* new_lpc));
3962 new_lpc->next = unwind.saved_prologue_counts;
3963 new_lpc->label_number = lbl;
3964 new_lpc->prologue_count = count;
3965 unwind.saved_prologue_counts = new_lpc;
3970 free_saved_prologue_counts ()
3972 label_prologue_count *lpc = unwind.saved_prologue_counts;
3973 label_prologue_count *next;
3982 unwind.saved_prologue_counts = NULL;
3986 dot_label_state (dummy)
3987 int dummy ATTRIBUTE_UNUSED;
3992 if (e.X_op != O_constant)
3994 as_bad ("Operand to .label_state must be a constant");
3997 add_unwind_entry (output_label_state (e.X_add_number));
3998 save_prologue_count (e.X_add_number, unwind.prologue_count);
4002 dot_copy_state (dummy)
4003 int dummy ATTRIBUTE_UNUSED;
4008 if (e.X_op != O_constant)
4010 as_bad ("Operand to .copy_state must be a constant");
4013 add_unwind_entry (output_copy_state (e.X_add_number));
4014 unwind.prologue_count = get_saved_prologue_count (e.X_add_number);
4019 int dummy ATTRIBUTE_UNUSED;
4024 sep = parse_operand (&e1);
4027 as_bad ("Second operand to .unwabi missing");
4030 sep = parse_operand (&e2);
4031 if (!is_end_of_line[sep] && !is_it_end_of_statement ())
4032 demand_empty_rest_of_line ();
4034 if (e1.X_op != O_constant)
4036 as_bad ("First operand to .unwabi must be a constant");
4040 if (e2.X_op != O_constant)
4042 as_bad ("Second operand to .unwabi must be a constant");
4046 add_unwind_entry (output_unwabi (e1.X_add_number, e2.X_add_number));
4050 dot_personality (dummy)
4051 int dummy ATTRIBUTE_UNUSED;
4055 name = input_line_pointer;
4056 c = get_symbol_end ();
4057 p = input_line_pointer;
4058 unwind.personality_routine = symbol_find_or_make (name);
4059 unwind.force_unwind_entry = 1;
4062 demand_empty_rest_of_line ();
4067 int dummy ATTRIBUTE_UNUSED;
4072 unwind.proc_start = expr_build_dot ();
4073 /* Parse names of main and alternate entry points and mark them as
4074 function symbols: */
4078 name = input_line_pointer;
4079 c = get_symbol_end ();
4080 p = input_line_pointer;
4081 sym = symbol_find_or_make (name);
4082 if (unwind.proc_start == 0)
4084 unwind.proc_start = sym;
4086 symbol_get_bfdsym (sym)->flags |= BSF_FUNCTION;
4089 if (*input_line_pointer != ',')
4091 ++input_line_pointer;
4093 demand_empty_rest_of_line ();
4096 unwind.prologue_count = 0;
4097 unwind.list = unwind.tail = unwind.current_entry = NULL;
4098 unwind.personality_routine = 0;
4103 int dummy ATTRIBUTE_UNUSED;
4105 unwind.prologue = 0;
4106 unwind.prologue_mask = 0;
4108 add_unwind_entry (output_body ());
4109 demand_empty_rest_of_line ();
4113 dot_prologue (dummy)
4114 int dummy ATTRIBUTE_UNUSED;
4117 int mask = 0, grsave = 0;
4119 if (!is_it_end_of_statement ())
4122 sep = parse_operand (&e1);
4124 as_bad ("No second operand to .prologue");
4125 sep = parse_operand (&e2);
4126 if (!is_end_of_line[sep] && !is_it_end_of_statement ())
4127 demand_empty_rest_of_line ();
4129 if (e1.X_op == O_constant)
4131 mask = e1.X_add_number;
4133 if (e2.X_op == O_constant)
4134 grsave = e2.X_add_number;
4135 else if (e2.X_op == O_register
4136 && (grsave = e2.X_add_number - REG_GR) < 128)
4139 as_bad ("Second operand not a constant or general register");
4141 add_unwind_entry (output_prologue_gr (mask, grsave));
4144 as_bad ("First operand not a constant");
4147 add_unwind_entry (output_prologue ());
4149 unwind.prologue = 1;
4150 unwind.prologue_mask = mask;
4151 ++unwind.prologue_count;
4156 int dummy ATTRIBUTE_UNUSED;
4160 int bytes_per_address;
4163 subsegT saved_subseg;
4167 if (unwind.saved_text_seg)
4169 saved_seg = unwind.saved_text_seg;
4170 saved_subseg = unwind.saved_text_subseg;
4171 unwind.saved_text_seg = NULL;
4175 saved_seg = now_seg;
4176 saved_subseg = now_subseg;
4179 insn_group_break (1, 0, 0);
4181 /* If there wasn't a .handlerdata, we haven't generated an image yet. */
4183 generate_unwind_image (saved_seg);
4185 if (unwind.info || unwind.force_unwind_entry)
4187 subseg_set (md.last_text_seg, 0);
4188 unwind.proc_end = expr_build_dot ();
4190 start_unwind_section (saved_seg, SPECIAL_SECTION_UNWIND, 0);
4192 /* Make sure that section has 4 byte alignment for ILP32 and
4193 8 byte alignment for LP64. */
4194 record_alignment (now_seg, md.pointer_size_shift);
4196 /* Need space for 3 pointers for procedure start, procedure end,
4198 ptr = frag_more (3 * md.pointer_size);
4199 where = frag_now_fix () - (3 * md.pointer_size);
4200 bytes_per_address = bfd_arch_bits_per_address (stdoutput) / 8;
4202 /* Issue the values of a) Proc Begin, b) Proc End, c) Unwind Record. */
4203 e.X_op = O_pseudo_fixup;
4204 e.X_op_symbol = pseudo_func[FUNC_SEG_RELATIVE].u.sym;
4206 e.X_add_symbol = unwind.proc_start;
4207 ia64_cons_fix_new (frag_now, where, bytes_per_address, &e);
4209 e.X_op = O_pseudo_fixup;
4210 e.X_op_symbol = pseudo_func[FUNC_SEG_RELATIVE].u.sym;
4212 e.X_add_symbol = unwind.proc_end;
4213 ia64_cons_fix_new (frag_now, where + bytes_per_address,
4214 bytes_per_address, &e);
4218 e.X_op = O_pseudo_fixup;
4219 e.X_op_symbol = pseudo_func[FUNC_SEG_RELATIVE].u.sym;
4221 e.X_add_symbol = unwind.info;
4222 ia64_cons_fix_new (frag_now, where + (bytes_per_address * 2),
4223 bytes_per_address, &e);
4226 md_number_to_chars (ptr + (bytes_per_address * 2), 0,
4231 start_unwind_section (saved_seg, SPECIAL_SECTION_UNWIND, 1);
4233 subseg_set (saved_seg, saved_subseg);
4235 /* Parse names of main and alternate entry points and set symbol sizes. */
4239 name = input_line_pointer;
4240 c = get_symbol_end ();
4241 p = input_line_pointer;
4242 sym = symbol_find (name);
4243 if (sym && unwind.proc_start
4244 && (symbol_get_bfdsym (sym)->flags & BSF_FUNCTION)
4245 && S_GET_SIZE (sym) == 0 && symbol_get_obj (sym)->size == NULL)
4247 fragS *fr = symbol_get_frag (unwind.proc_start);
4248 fragS *frag = symbol_get_frag (sym);
4250 /* Check whether the function label is at or beyond last
4252 while (fr && fr != frag)
4256 if (frag == frag_now && SEG_NORMAL (now_seg))
4257 S_SET_SIZE (sym, frag_now_fix () - S_GET_VALUE (sym));
4260 symbol_get_obj (sym)->size =
4261 (expressionS *) xmalloc (sizeof (expressionS));
4262 symbol_get_obj (sym)->size->X_op = O_subtract;
4263 symbol_get_obj (sym)->size->X_add_symbol
4264 = symbol_new (FAKE_LABEL_NAME, now_seg,
4265 frag_now_fix (), frag_now);
4266 symbol_get_obj (sym)->size->X_op_symbol = sym;
4267 symbol_get_obj (sym)->size->X_add_number = 0;
4273 if (*input_line_pointer != ',')
4275 ++input_line_pointer;
4277 demand_empty_rest_of_line ();
4278 unwind.proc_start = unwind.proc_end = unwind.info = 0;
4282 dot_template (template)
4285 CURR_SLOT.user_template = template;
4290 int dummy ATTRIBUTE_UNUSED;
4292 int ins, locs, outs, rots;
4294 if (is_it_end_of_statement ())
4295 ins = locs = outs = rots = 0;
4298 ins = get_absolute_expression ();
4299 if (*input_line_pointer++ != ',')
4301 locs = get_absolute_expression ();
4302 if (*input_line_pointer++ != ',')
4304 outs = get_absolute_expression ();
4305 if (*input_line_pointer++ != ',')
4307 rots = get_absolute_expression ();
4309 set_regstack (ins, locs, outs, rots);
4313 as_bad ("Comma expected");
4314 ignore_rest_of_line ();
4321 unsigned num_regs, num_alloced = 0;
4322 struct dynreg **drpp, *dr;
4323 int ch, base_reg = 0;
4329 case DYNREG_GR: base_reg = REG_GR + 32; break;
4330 case DYNREG_FR: base_reg = REG_FR + 32; break;
4331 case DYNREG_PR: base_reg = REG_P + 16; break;
4335 /* First, remove existing names from hash table. */
4336 for (dr = md.dynreg[type]; dr && dr->num_regs; dr = dr->next)
4338 hash_delete (md.dynreg_hash, dr->name);
4342 drpp = &md.dynreg[type];
4345 start = input_line_pointer;
4346 ch = get_symbol_end ();
4347 *input_line_pointer = ch;
4348 len = (input_line_pointer - start);
4351 if (*input_line_pointer != '[')
4353 as_bad ("Expected '['");
4356 ++input_line_pointer; /* skip '[' */
4358 num_regs = get_absolute_expression ();
4360 if (*input_line_pointer++ != ']')
4362 as_bad ("Expected ']'");
4367 num_alloced += num_regs;
4371 if (num_alloced > md.rot.num_regs)
4373 as_bad ("Used more than the declared %d rotating registers",
4379 if (num_alloced > 96)
4381 as_bad ("Used more than the available 96 rotating registers");
4386 if (num_alloced > 48)
4388 as_bad ("Used more than the available 48 rotating registers");
4397 name = obstack_alloc (¬es, len + 1);
4398 memcpy (name, start, len);
4403 *drpp = obstack_alloc (¬es, sizeof (*dr));
4404 memset (*drpp, 0, sizeof (*dr));
4409 dr->num_regs = num_regs;
4410 dr->base = base_reg;
4412 base_reg += num_regs;
4414 if (hash_insert (md.dynreg_hash, name, dr))
4416 as_bad ("Attempt to redefine register set `%s'", name);
4420 if (*input_line_pointer != ',')
4422 ++input_line_pointer; /* skip comma */
4425 demand_empty_rest_of_line ();
4429 ignore_rest_of_line ();
4433 dot_byteorder (byteorder)
4436 segment_info_type *seginfo = seg_info (now_seg);
4438 if (byteorder == -1)
4440 if (seginfo->tc_segment_info_data.endian == 0)
4441 seginfo->tc_segment_info_data.endian = default_big_endian ? 1 : 2;
4442 byteorder = seginfo->tc_segment_info_data.endian == 1;
4445 seginfo->tc_segment_info_data.endian = byteorder ? 1 : 2;
4447 if (target_big_endian != byteorder)
4449 target_big_endian = byteorder;
4450 if (target_big_endian)
4452 ia64_number_to_chars = number_to_chars_bigendian;
4453 ia64_float_to_chars = ia64_float_to_chars_bigendian;
4457 ia64_number_to_chars = number_to_chars_littleendian;
4458 ia64_float_to_chars = ia64_float_to_chars_littleendian;
4465 int dummy ATTRIBUTE_UNUSED;
4472 option = input_line_pointer;
4473 ch = get_symbol_end ();
4474 if (strcmp (option, "lsb") == 0)
4475 md.flags &= ~EF_IA_64_BE;
4476 else if (strcmp (option, "msb") == 0)
4477 md.flags |= EF_IA_64_BE;
4478 else if (strcmp (option, "abi32") == 0)
4479 md.flags &= ~EF_IA_64_ABI64;
4480 else if (strcmp (option, "abi64") == 0)
4481 md.flags |= EF_IA_64_ABI64;
4483 as_bad ("Unknown psr option `%s'", option);
4484 *input_line_pointer = ch;
4487 if (*input_line_pointer != ',')
4490 ++input_line_pointer;
4493 demand_empty_rest_of_line ();
4498 int dummy ATTRIBUTE_UNUSED;
4500 new_logical_line (0, get_absolute_expression ());
4501 demand_empty_rest_of_line ();
4505 parse_section_name ()
4511 if (*input_line_pointer != '"')
4513 as_bad ("Missing section name");
4514 ignore_rest_of_line ();
4517 name = demand_copy_C_string (&len);
4520 ignore_rest_of_line ();
4524 if (*input_line_pointer != ',')
4526 as_bad ("Comma expected after section name");
4527 ignore_rest_of_line ();
4530 ++input_line_pointer; /* skip comma */
4538 char *name = parse_section_name ();
4542 md.keep_pending_output = 1;
4545 obj_elf_previous (0);
4546 md.keep_pending_output = 0;
4549 /* Why doesn't float_cons() call md_cons_align() the way cons() does? */
4552 stmt_float_cons (kind)
4573 ia64_do_align (alignment);
4581 int saved_auto_align = md.auto_align;
4585 md.auto_align = saved_auto_align;
4589 dot_xfloat_cons (kind)
4592 char *name = parse_section_name ();
4596 md.keep_pending_output = 1;
4598 stmt_float_cons (kind);
4599 obj_elf_previous (0);
4600 md.keep_pending_output = 0;
4604 dot_xstringer (zero)
4607 char *name = parse_section_name ();
4611 md.keep_pending_output = 1;
4614 obj_elf_previous (0);
4615 md.keep_pending_output = 0;
4622 int saved_auto_align = md.auto_align;
4623 char *name = parse_section_name ();
4627 md.keep_pending_output = 1;
4631 md.auto_align = saved_auto_align;
4632 obj_elf_previous (0);
4633 md.keep_pending_output = 0;
4637 dot_xfloat_cons_ua (kind)
4640 int saved_auto_align = md.auto_align;
4641 char *name = parse_section_name ();
4645 md.keep_pending_output = 1;
4648 stmt_float_cons (kind);
4649 md.auto_align = saved_auto_align;
4650 obj_elf_previous (0);
4651 md.keep_pending_output = 0;
4654 /* .reg.val <regname>,value */
4658 int dummy ATTRIBUTE_UNUSED;
4663 if (reg.X_op != O_register)
4665 as_bad (_("Register name expected"));
4666 ignore_rest_of_line ();
4668 else if (*input_line_pointer++ != ',')
4670 as_bad (_("Comma expected"));
4671 ignore_rest_of_line ();
4675 valueT value = get_absolute_expression ();
4676 int regno = reg.X_add_number;
4677 if (regno < REG_GR || regno > REG_GR + 128)
4678 as_warn (_("Register value annotation ignored"));
4681 gr_values[regno - REG_GR].known = 1;
4682 gr_values[regno - REG_GR].value = value;
4683 gr_values[regno - REG_GR].path = md.path;
4686 demand_empty_rest_of_line ();
4691 .serialize.instruction
4694 dot_serialize (type)
4697 insn_group_break (0, 0, 0);
4699 instruction_serialization ();
4701 data_serialization ();
4702 insn_group_break (0, 0, 0);
4703 demand_empty_rest_of_line ();
4706 /* select dv checking mode
4711 A stop is inserted when changing modes
4718 if (md.manual_bundling)
4719 as_warn (_("Directive invalid within a bundle"));
4721 if (type == 'E' || type == 'A')
4722 md.mode_explicitly_set = 0;
4724 md.mode_explicitly_set = 1;
4731 if (md.explicit_mode)
4732 insn_group_break (1, 0, 0);
4733 md.explicit_mode = 0;
4737 if (!md.explicit_mode)
4738 insn_group_break (1, 0, 0);
4739 md.explicit_mode = 1;
4743 if (md.explicit_mode != md.default_explicit_mode)
4744 insn_group_break (1, 0, 0);
4745 md.explicit_mode = md.default_explicit_mode;
4746 md.mode_explicitly_set = 0;
4757 for (regno = 0; regno < 64; regno++)
4759 if (mask & ((valueT) 1 << regno))
4761 fprintf (stderr, "%s p%d", comma, regno);
4768 .pred.rel.clear [p1 [,p2 [,...]]] (also .pred.rel "clear")
4769 .pred.rel.imply p1, p2 (also .pred.rel "imply")
4770 .pred.rel.mutex p1, p2 [,...] (also .pred.rel "mutex")
4771 .pred.safe_across_calls p1 [, p2 [,...]]
4780 int p1 = -1, p2 = -1;
4784 if (*input_line_pointer != '"')
4786 as_bad (_("Missing predicate relation type"));
4787 ignore_rest_of_line ();
4793 char *form = demand_copy_C_string (&len);
4794 if (strcmp (form, "mutex") == 0)
4796 else if (strcmp (form, "clear") == 0)
4798 else if (strcmp (form, "imply") == 0)
4802 as_bad (_("Unrecognized predicate relation type"));
4803 ignore_rest_of_line ();
4807 if (*input_line_pointer == ',')
4808 ++input_line_pointer;
4818 if (TOUPPER (*input_line_pointer) != 'P'
4819 || (regno = atoi (++input_line_pointer)) < 0
4822 as_bad (_("Predicate register expected"));
4823 ignore_rest_of_line ();
4826 while (ISDIGIT (*input_line_pointer))
4827 ++input_line_pointer;
4834 as_warn (_("Duplicate predicate register ignored"));
4837 /* See if it's a range. */
4838 if (*input_line_pointer == '-')
4841 ++input_line_pointer;
4843 if (TOUPPER (*input_line_pointer) != 'P'
4844 || (regno = atoi (++input_line_pointer)) < 0
4847 as_bad (_("Predicate register expected"));
4848 ignore_rest_of_line ();
4851 while (ISDIGIT (*input_line_pointer))
4852 ++input_line_pointer;
4856 as_bad (_("Bad register range"));
4857 ignore_rest_of_line ();
4868 if (*input_line_pointer != ',')
4870 ++input_line_pointer;
4879 clear_qp_mutex (mask);
4880 clear_qp_implies (mask, (valueT) 0);
4883 if (count != 2 || p1 == -1 || p2 == -1)
4884 as_bad (_("Predicate source and target required"));
4885 else if (p1 == 0 || p2 == 0)
4886 as_bad (_("Use of p0 is not valid in this context"));
4888 add_qp_imply (p1, p2);
4893 as_bad (_("At least two PR arguments expected"));
4898 as_bad (_("Use of p0 is not valid in this context"));
4901 add_qp_mutex (mask);
4904 /* note that we don't override any existing relations */
4907 as_bad (_("At least one PR argument expected"));
4912 fprintf (stderr, "Safe across calls: ");
4913 print_prmask (mask);
4914 fprintf (stderr, "\n");
4916 qp_safe_across_calls = mask;
4919 demand_empty_rest_of_line ();
4922 /* .entry label [, label [, ...]]
4923 Hint to DV code that the given labels are to be considered entry points.
4924 Otherwise, only global labels are considered entry points. */
4928 int dummy ATTRIBUTE_UNUSED;
4937 name = input_line_pointer;
4938 c = get_symbol_end ();
4939 symbolP = symbol_find_or_make (name);
4941 err = hash_insert (md.entry_hash, S_GET_NAME (symbolP), (PTR) symbolP);
4943 as_fatal (_("Inserting \"%s\" into entry hint table failed: %s"),
4946 *input_line_pointer = c;
4948 c = *input_line_pointer;
4951 input_line_pointer++;
4953 if (*input_line_pointer == '\n')
4959 demand_empty_rest_of_line ();
4962 /* .mem.offset offset, base
4963 "base" is used to distinguish between offsets from a different base. */
4966 dot_mem_offset (dummy)
4967 int dummy ATTRIBUTE_UNUSED;
4969 md.mem_offset.hint = 1;
4970 md.mem_offset.offset = get_absolute_expression ();
4971 if (*input_line_pointer != ',')
4973 as_bad (_("Comma expected"));
4974 ignore_rest_of_line ();
4977 ++input_line_pointer;
4978 md.mem_offset.base = get_absolute_expression ();
4979 demand_empty_rest_of_line ();
4982 /* ia64-specific pseudo-ops: */
4983 const pseudo_typeS md_pseudo_table[] =
4985 { "radix", dot_radix, 0 },
4986 { "lcomm", s_lcomm_bytes, 1 },
4987 { "loc", dot_loc, 0 },
4988 { "bss", dot_special_section, SPECIAL_SECTION_BSS },
4989 { "sbss", dot_special_section, SPECIAL_SECTION_SBSS },
4990 { "sdata", dot_special_section, SPECIAL_SECTION_SDATA },
4991 { "rodata", dot_special_section, SPECIAL_SECTION_RODATA },
4992 { "comment", dot_special_section, SPECIAL_SECTION_COMMENT },
4993 { "ia_64.unwind", dot_special_section, SPECIAL_SECTION_UNWIND },
4994 { "ia_64.unwind_info", dot_special_section, SPECIAL_SECTION_UNWIND_INFO },
4995 { "init_array", dot_special_section, SPECIAL_SECTION_INIT_ARRAY },
4996 { "fini_array", dot_special_section, SPECIAL_SECTION_FINI_ARRAY },
4997 { "proc", dot_proc, 0 },
4998 { "body", dot_body, 0 },
4999 { "prologue", dot_prologue, 0 },
5000 { "endp", dot_endp, 0 },
5002 { "fframe", dot_fframe, 0 },
5003 { "vframe", dot_vframe, 0 },
5004 { "vframesp", dot_vframesp, 0 },
5005 { "vframepsp", dot_vframepsp, 0 },
5006 { "save", dot_save, 0 },
5007 { "restore", dot_restore, 0 },
5008 { "restorereg", dot_restorereg, 0 },
5009 { "restorereg.p", dot_restorereg_p, 0 },
5010 { "handlerdata", dot_handlerdata, 0 },
5011 { "unwentry", dot_unwentry, 0 },
5012 { "altrp", dot_altrp, 0 },
5013 { "savesp", dot_savemem, 0 },
5014 { "savepsp", dot_savemem, 1 },
5015 { "save.g", dot_saveg, 0 },
5016 { "save.f", dot_savef, 0 },
5017 { "save.b", dot_saveb, 0 },
5018 { "save.gf", dot_savegf, 0 },
5019 { "spill", dot_spill, 0 },
5020 { "spillreg", dot_spillreg, 0 },
5021 { "spillsp", dot_spillmem, 0 },
5022 { "spillpsp", dot_spillmem, 1 },
5023 { "spillreg.p", dot_spillreg_p, 0 },
5024 { "spillsp.p", dot_spillmem_p, 0 },
5025 { "spillpsp.p", dot_spillmem_p, 1 },
5026 { "label_state", dot_label_state, 0 },
5027 { "copy_state", dot_copy_state, 0 },
5028 { "unwabi", dot_unwabi, 0 },
5029 { "personality", dot_personality, 0 },
5031 { "estate", dot_estate, 0 },
5033 { "mii", dot_template, 0x0 },
5034 { "mli", dot_template, 0x2 }, /* old format, for compatibility */
5035 { "mlx", dot_template, 0x2 },
5036 { "mmi", dot_template, 0x4 },
5037 { "mfi", dot_template, 0x6 },
5038 { "mmf", dot_template, 0x7 },
5039 { "mib", dot_template, 0x8 },
5040 { "mbb", dot_template, 0x9 },
5041 { "bbb", dot_template, 0xb },
5042 { "mmb", dot_template, 0xc },
5043 { "mfb", dot_template, 0xe },
5045 { "lb", dot_scope, 0 },
5046 { "le", dot_scope, 1 },
5048 { "align", dot_align, 0 },
5049 { "regstk", dot_regstk, 0 },
5050 { "rotr", dot_rot, DYNREG_GR },
5051 { "rotf", dot_rot, DYNREG_FR },
5052 { "rotp", dot_rot, DYNREG_PR },
5053 { "lsb", dot_byteorder, 0 },
5054 { "msb", dot_byteorder, 1 },
5055 { "psr", dot_psr, 0 },
5056 { "alias", dot_alias, 0 },
5057 { "secalias", dot_alias, 1 },
5058 { "ln", dot_ln, 0 }, /* source line info (for debugging) */
5060 { "xdata1", dot_xdata, 1 },
5061 { "xdata2", dot_xdata, 2 },
5062 { "xdata4", dot_xdata, 4 },
5063 { "xdata8", dot_xdata, 8 },
5064 { "xreal4", dot_xfloat_cons, 'f' },
5065 { "xreal8", dot_xfloat_cons, 'd' },
5066 { "xreal10", dot_xfloat_cons, 'x' },
5067 { "xreal16", dot_xfloat_cons, 'X' },
5068 { "xstring", dot_xstringer, 0 },
5069 { "xstringz", dot_xstringer, 1 },
5071 /* unaligned versions: */
5072 { "xdata2.ua", dot_xdata_ua, 2 },
5073 { "xdata4.ua", dot_xdata_ua, 4 },
5074 { "xdata8.ua", dot_xdata_ua, 8 },
5075 { "xreal4.ua", dot_xfloat_cons_ua, 'f' },
5076 { "xreal8.ua", dot_xfloat_cons_ua, 'd' },
5077 { "xreal10.ua", dot_xfloat_cons_ua, 'x' },
5078 { "xreal16.ua", dot_xfloat_cons_ua, 'X' },
5080 /* annotations/DV checking support */
5081 { "entry", dot_entry, 0 },
5082 { "mem.offset", dot_mem_offset, 0 },
5083 { "pred.rel", dot_pred_rel, 0 },
5084 { "pred.rel.clear", dot_pred_rel, 'c' },
5085 { "pred.rel.imply", dot_pred_rel, 'i' },
5086 { "pred.rel.mutex", dot_pred_rel, 'm' },
5087 { "pred.safe_across_calls", dot_pred_rel, 's' },
5088 { "reg.val", dot_reg_val, 0 },
5089 { "serialize.data", dot_serialize, 0 },
5090 { "serialize.instruction", dot_serialize, 1 },
5091 { "auto", dot_dv_mode, 'a' },
5092 { "explicit", dot_dv_mode, 'e' },
5093 { "default", dot_dv_mode, 'd' },
5095 /* ??? These are needed to make gas/testsuite/gas/elf/ehopt.s work.
5096 IA-64 aligns data allocation pseudo-ops by default, so we have to
5097 tell it that these ones are supposed to be unaligned. Long term,
5098 should rewrite so that only IA-64 specific data allocation pseudo-ops
5099 are aligned by default. */
5100 {"2byte", stmt_cons_ua, 2},
5101 {"4byte", stmt_cons_ua, 4},
5102 {"8byte", stmt_cons_ua, 8},
5107 static const struct pseudo_opcode
5110 void (*handler) (int);
5115 /* these are more like pseudo-ops, but don't start with a dot */
5116 { "data1", cons, 1 },
5117 { "data2", cons, 2 },
5118 { "data4", cons, 4 },
5119 { "data8", cons, 8 },
5120 { "data16", cons, 16 },
5121 { "real4", stmt_float_cons, 'f' },
5122 { "real8", stmt_float_cons, 'd' },
5123 { "real10", stmt_float_cons, 'x' },
5124 { "real16", stmt_float_cons, 'X' },
5125 { "string", stringer, 0 },
5126 { "stringz", stringer, 1 },
5128 /* unaligned versions: */
5129 { "data2.ua", stmt_cons_ua, 2 },
5130 { "data4.ua", stmt_cons_ua, 4 },
5131 { "data8.ua", stmt_cons_ua, 8 },
5132 { "data16.ua", stmt_cons_ua, 16 },
5133 { "real4.ua", float_cons, 'f' },
5134 { "real8.ua", float_cons, 'd' },
5135 { "real10.ua", float_cons, 'x' },
5136 { "real16.ua", float_cons, 'X' },
5139 /* Declare a register by creating a symbol for it and entering it in
5140 the symbol table. */
5143 declare_register (name, regnum)
5150 sym = symbol_new (name, reg_section, regnum, &zero_address_frag);
5152 err = hash_insert (md.reg_hash, S_GET_NAME (sym), (PTR) sym);
5154 as_fatal ("Inserting \"%s\" into register table failed: %s",
5161 declare_register_set (prefix, num_regs, base_regnum)
5169 for (i = 0; i < num_regs; ++i)
5171 sprintf (name, "%s%u", prefix, i);
5172 declare_register (name, base_regnum + i);
5177 operand_width (opnd)
5178 enum ia64_opnd opnd;
5180 const struct ia64_operand *odesc = &elf64_ia64_operands[opnd];
5181 unsigned int bits = 0;
5185 for (i = 0; i < NELEMS (odesc->field) && odesc->field[i].bits; ++i)
5186 bits += odesc->field[i].bits;
5191 static enum operand_match_result
5192 operand_match (idesc, index, e)
5193 const struct ia64_opcode *idesc;
5197 enum ia64_opnd opnd = idesc->operands[index];
5198 int bits, relocatable = 0;
5199 struct insn_fix *fix;
5206 case IA64_OPND_AR_CCV:
5207 if (e->X_op == O_register && e->X_add_number == REG_AR + 32)
5208 return OPERAND_MATCH;
5211 case IA64_OPND_AR_CSD:
5212 if (e->X_op == O_register && e->X_add_number == REG_AR + 25)
5213 return OPERAND_MATCH;
5216 case IA64_OPND_AR_PFS:
5217 if (e->X_op == O_register && e->X_add_number == REG_AR + 64)
5218 return OPERAND_MATCH;
5222 if (e->X_op == O_register && e->X_add_number == REG_GR + 0)
5223 return OPERAND_MATCH;
5227 if (e->X_op == O_register && e->X_add_number == REG_IP)
5228 return OPERAND_MATCH;
5232 if (e->X_op == O_register && e->X_add_number == REG_PR)
5233 return OPERAND_MATCH;
5236 case IA64_OPND_PR_ROT:
5237 if (e->X_op == O_register && e->X_add_number == REG_PR_ROT)
5238 return OPERAND_MATCH;
5242 if (e->X_op == O_register && e->X_add_number == REG_PSR)
5243 return OPERAND_MATCH;
5246 case IA64_OPND_PSR_L:
5247 if (e->X_op == O_register && e->X_add_number == REG_PSR_L)
5248 return OPERAND_MATCH;
5251 case IA64_OPND_PSR_UM:
5252 if (e->X_op == O_register && e->X_add_number == REG_PSR_UM)
5253 return OPERAND_MATCH;
5257 if (e->X_op == O_constant)
5259 if (e->X_add_number == 1)
5260 return OPERAND_MATCH;
5262 return OPERAND_OUT_OF_RANGE;
5267 if (e->X_op == O_constant)
5269 if (e->X_add_number == 8)
5270 return OPERAND_MATCH;
5272 return OPERAND_OUT_OF_RANGE;
5277 if (e->X_op == O_constant)
5279 if (e->X_add_number == 16)
5280 return OPERAND_MATCH;
5282 return OPERAND_OUT_OF_RANGE;
5286 /* register operands: */
5289 if (e->X_op == O_register && e->X_add_number >= REG_AR
5290 && e->X_add_number < REG_AR + 128)
5291 return OPERAND_MATCH;
5296 if (e->X_op == O_register && e->X_add_number >= REG_BR
5297 && e->X_add_number < REG_BR + 8)
5298 return OPERAND_MATCH;
5302 if (e->X_op == O_register && e->X_add_number >= REG_CR
5303 && e->X_add_number < REG_CR + 128)
5304 return OPERAND_MATCH;
5311 if (e->X_op == O_register && e->X_add_number >= REG_FR
5312 && e->X_add_number < REG_FR + 128)
5313 return OPERAND_MATCH;
5318 if (e->X_op == O_register && e->X_add_number >= REG_P
5319 && e->X_add_number < REG_P + 64)
5320 return OPERAND_MATCH;
5326 if (e->X_op == O_register && e->X_add_number >= REG_GR
5327 && e->X_add_number < REG_GR + 128)
5328 return OPERAND_MATCH;
5331 case IA64_OPND_R3_2:
5332 if (e->X_op == O_register && e->X_add_number >= REG_GR)
5334 if (e->X_add_number < REG_GR + 4)
5335 return OPERAND_MATCH;
5336 else if (e->X_add_number < REG_GR + 128)
5337 return OPERAND_OUT_OF_RANGE;
5341 /* indirect operands: */
5342 case IA64_OPND_CPUID_R3:
5343 case IA64_OPND_DBR_R3:
5344 case IA64_OPND_DTR_R3:
5345 case IA64_OPND_ITR_R3:
5346 case IA64_OPND_IBR_R3:
5347 case IA64_OPND_MSR_R3:
5348 case IA64_OPND_PKR_R3:
5349 case IA64_OPND_PMC_R3:
5350 case IA64_OPND_PMD_R3:
5351 case IA64_OPND_RR_R3:
5352 if (e->X_op == O_index && e->X_op_symbol
5353 && (S_GET_VALUE (e->X_op_symbol) - IND_CPUID
5354 == opnd - IA64_OPND_CPUID_R3))
5355 return OPERAND_MATCH;
5359 if (e->X_op == O_index && !e->X_op_symbol)
5360 return OPERAND_MATCH;
5363 /* immediate operands: */
5364 case IA64_OPND_CNT2a:
5365 case IA64_OPND_LEN4:
5366 case IA64_OPND_LEN6:
5367 bits = operand_width (idesc->operands[index]);
5368 if (e->X_op == O_constant)
5370 if ((bfd_vma) (e->X_add_number - 1) < ((bfd_vma) 1 << bits))
5371 return OPERAND_MATCH;
5373 return OPERAND_OUT_OF_RANGE;
5377 case IA64_OPND_CNT2b:
5378 if (e->X_op == O_constant)
5380 if ((bfd_vma) (e->X_add_number - 1) < 3)
5381 return OPERAND_MATCH;
5383 return OPERAND_OUT_OF_RANGE;
5387 case IA64_OPND_CNT2c:
5388 val = e->X_add_number;
5389 if (e->X_op == O_constant)
5391 if ((val == 0 || val == 7 || val == 15 || val == 16))
5392 return OPERAND_MATCH;
5394 return OPERAND_OUT_OF_RANGE;
5399 /* SOR must be an integer multiple of 8 */
5400 if (e->X_op == O_constant && e->X_add_number & 0x7)
5401 return OPERAND_OUT_OF_RANGE;
5404 if (e->X_op == O_constant)
5406 if ((bfd_vma) e->X_add_number <= 96)
5407 return OPERAND_MATCH;
5409 return OPERAND_OUT_OF_RANGE;
5413 case IA64_OPND_IMMU62:
5414 if (e->X_op == O_constant)
5416 if ((bfd_vma) e->X_add_number < ((bfd_vma) 1 << 62))
5417 return OPERAND_MATCH;
5419 return OPERAND_OUT_OF_RANGE;
5423 /* FIXME -- need 62-bit relocation type */
5424 as_bad (_("62-bit relocation not yet implemented"));
5428 case IA64_OPND_IMMU64:
5429 if (e->X_op == O_symbol || e->X_op == O_pseudo_fixup
5430 || e->X_op == O_subtract)
5432 fix = CURR_SLOT.fixup + CURR_SLOT.num_fixups;
5433 fix->code = BFD_RELOC_IA64_IMM64;
5434 if (e->X_op != O_subtract)
5436 fix->code = ia64_gen_real_reloc_type (e->X_op_symbol, fix->code);
5437 if (e->X_op == O_pseudo_fixup)
5441 fix->opnd = idesc->operands[index];
5444 ++CURR_SLOT.num_fixups;
5445 return OPERAND_MATCH;
5447 else if (e->X_op == O_constant)
5448 return OPERAND_MATCH;
5451 case IA64_OPND_CCNT5:
5452 case IA64_OPND_CNT5:
5453 case IA64_OPND_CNT6:
5454 case IA64_OPND_CPOS6a:
5455 case IA64_OPND_CPOS6b:
5456 case IA64_OPND_CPOS6c:
5457 case IA64_OPND_IMMU2:
5458 case IA64_OPND_IMMU7a:
5459 case IA64_OPND_IMMU7b:
5460 case IA64_OPND_IMMU21:
5461 case IA64_OPND_IMMU24:
5462 case IA64_OPND_MBTYPE4:
5463 case IA64_OPND_MHTYPE8:
5464 case IA64_OPND_POS6:
5465 bits = operand_width (idesc->operands[index]);
5466 if (e->X_op == O_constant)
5468 if ((bfd_vma) e->X_add_number < ((bfd_vma) 1 << bits))
5469 return OPERAND_MATCH;
5471 return OPERAND_OUT_OF_RANGE;
5475 case IA64_OPND_IMMU9:
5476 bits = operand_width (idesc->operands[index]);
5477 if (e->X_op == O_constant)
5479 if ((bfd_vma) e->X_add_number < ((bfd_vma) 1 << bits))
5481 int lobits = e->X_add_number & 0x3;
5482 if (((bfd_vma) e->X_add_number & 0x3C) != 0 && lobits == 0)
5483 e->X_add_number |= (bfd_vma) 0x3;
5484 return OPERAND_MATCH;
5487 return OPERAND_OUT_OF_RANGE;
5491 case IA64_OPND_IMM44:
5492 /* least 16 bits must be zero */
5493 if ((e->X_add_number & 0xffff) != 0)
5494 /* XXX technically, this is wrong: we should not be issuing warning
5495 messages until we're sure this instruction pattern is going to
5497 as_warn (_("lower 16 bits of mask ignored"));
5499 if (e->X_op == O_constant)
5501 if (((e->X_add_number >= 0
5502 && (bfd_vma) e->X_add_number < ((bfd_vma) 1 << 44))
5503 || (e->X_add_number < 0
5504 && (bfd_vma) -e->X_add_number <= ((bfd_vma) 1 << 44))))
5507 if (e->X_add_number >= 0
5508 && (e->X_add_number & ((bfd_vma) 1 << 43)) != 0)
5510 e->X_add_number |= ~(((bfd_vma) 1 << 44) - 1);
5512 return OPERAND_MATCH;
5515 return OPERAND_OUT_OF_RANGE;
5519 case IA64_OPND_IMM17:
5520 /* bit 0 is a don't care (pr0 is hardwired to 1) */
5521 if (e->X_op == O_constant)
5523 if (((e->X_add_number >= 0
5524 && (bfd_vma) e->X_add_number < ((bfd_vma) 1 << 17))
5525 || (e->X_add_number < 0
5526 && (bfd_vma) -e->X_add_number <= ((bfd_vma) 1 << 17))))
5529 if (e->X_add_number >= 0
5530 && (e->X_add_number & ((bfd_vma) 1 << 16)) != 0)
5532 e->X_add_number |= ~(((bfd_vma) 1 << 17) - 1);
5534 return OPERAND_MATCH;
5537 return OPERAND_OUT_OF_RANGE;
5541 case IA64_OPND_IMM14:
5542 case IA64_OPND_IMM22:
5544 case IA64_OPND_IMM1:
5545 case IA64_OPND_IMM8:
5546 case IA64_OPND_IMM8U4:
5547 case IA64_OPND_IMM8M1:
5548 case IA64_OPND_IMM8M1U4:
5549 case IA64_OPND_IMM8M1U8:
5550 case IA64_OPND_IMM9a:
5551 case IA64_OPND_IMM9b:
5552 bits = operand_width (idesc->operands[index]);
5553 if (relocatable && (e->X_op == O_symbol
5554 || e->X_op == O_subtract
5555 || e->X_op == O_pseudo_fixup))
5557 fix = CURR_SLOT.fixup + CURR_SLOT.num_fixups;
5559 if (idesc->operands[index] == IA64_OPND_IMM14)
5560 fix->code = BFD_RELOC_IA64_IMM14;
5562 fix->code = BFD_RELOC_IA64_IMM22;
5564 if (e->X_op != O_subtract)
5566 fix->code = ia64_gen_real_reloc_type (e->X_op_symbol, fix->code);
5567 if (e->X_op == O_pseudo_fixup)
5571 fix->opnd = idesc->operands[index];
5574 ++CURR_SLOT.num_fixups;
5575 return OPERAND_MATCH;
5577 else if (e->X_op != O_constant
5578 && ! (e->X_op == O_big && opnd == IA64_OPND_IMM8M1U8))
5579 return OPERAND_MISMATCH;
5581 if (opnd == IA64_OPND_IMM8M1U4)
5583 /* Zero is not valid for unsigned compares that take an adjusted
5584 constant immediate range. */
5585 if (e->X_add_number == 0)
5586 return OPERAND_OUT_OF_RANGE;
5588 /* Sign-extend 32-bit unsigned numbers, so that the following range
5589 checks will work. */
5590 val = e->X_add_number;
5591 if (((val & (~(bfd_vma) 0 << 32)) == 0)
5592 && ((val & ((bfd_vma) 1 << 31)) != 0))
5593 val = ((val << 32) >> 32);
5595 /* Check for 0x100000000. This is valid because
5596 0x100000000-1 is the same as ((uint32_t) -1). */
5597 if (val == ((bfd_signed_vma) 1 << 32))
5598 return OPERAND_MATCH;
5602 else if (opnd == IA64_OPND_IMM8M1U8)
5604 /* Zero is not valid for unsigned compares that take an adjusted
5605 constant immediate range. */
5606 if (e->X_add_number == 0)
5607 return OPERAND_OUT_OF_RANGE;
5609 /* Check for 0x10000000000000000. */
5610 if (e->X_op == O_big)
5612 if (generic_bignum[0] == 0
5613 && generic_bignum[1] == 0
5614 && generic_bignum[2] == 0
5615 && generic_bignum[3] == 0
5616 && generic_bignum[4] == 1)
5617 return OPERAND_MATCH;
5619 return OPERAND_OUT_OF_RANGE;
5622 val = e->X_add_number - 1;
5624 else if (opnd == IA64_OPND_IMM8M1)
5625 val = e->X_add_number - 1;
5626 else if (opnd == IA64_OPND_IMM8U4)
5628 /* Sign-extend 32-bit unsigned numbers, so that the following range
5629 checks will work. */
5630 val = e->X_add_number;
5631 if (((val & (~(bfd_vma) 0 << 32)) == 0)
5632 && ((val & ((bfd_vma) 1 << 31)) != 0))
5633 val = ((val << 32) >> 32);
5636 val = e->X_add_number;
5638 if ((val >= 0 && (bfd_vma) val < ((bfd_vma) 1 << (bits - 1)))
5639 || (val < 0 && (bfd_vma) -val <= ((bfd_vma) 1 << (bits - 1))))
5640 return OPERAND_MATCH;
5642 return OPERAND_OUT_OF_RANGE;
5644 case IA64_OPND_INC3:
5645 /* +/- 1, 4, 8, 16 */
5646 val = e->X_add_number;
5649 if (e->X_op == O_constant)
5651 if ((val == 1 || val == 4 || val == 8 || val == 16))
5652 return OPERAND_MATCH;
5654 return OPERAND_OUT_OF_RANGE;
5658 case IA64_OPND_TGT25:
5659 case IA64_OPND_TGT25b:
5660 case IA64_OPND_TGT25c:
5661 case IA64_OPND_TGT64:
5662 if (e->X_op == O_symbol)
5664 fix = CURR_SLOT.fixup + CURR_SLOT.num_fixups;
5665 if (opnd == IA64_OPND_TGT25)
5666 fix->code = BFD_RELOC_IA64_PCREL21F;
5667 else if (opnd == IA64_OPND_TGT25b)
5668 fix->code = BFD_RELOC_IA64_PCREL21M;
5669 else if (opnd == IA64_OPND_TGT25c)
5670 fix->code = BFD_RELOC_IA64_PCREL21B;
5671 else if (opnd == IA64_OPND_TGT64)
5672 fix->code = BFD_RELOC_IA64_PCREL60B;
5676 fix->code = ia64_gen_real_reloc_type (e->X_op_symbol, fix->code);
5677 fix->opnd = idesc->operands[index];
5680 ++CURR_SLOT.num_fixups;
5681 return OPERAND_MATCH;
5683 case IA64_OPND_TAG13:
5684 case IA64_OPND_TAG13b:
5688 return OPERAND_MATCH;
5691 fix = CURR_SLOT.fixup + CURR_SLOT.num_fixups;
5692 /* There are no external relocs for TAG13/TAG13b fields, so we
5693 create a dummy reloc. This will not live past md_apply_fix3. */
5694 fix->code = BFD_RELOC_UNUSED;
5695 fix->code = ia64_gen_real_reloc_type (e->X_op_symbol, fix->code);
5696 fix->opnd = idesc->operands[index];
5699 ++CURR_SLOT.num_fixups;
5700 return OPERAND_MATCH;
5707 case IA64_OPND_LDXMOV:
5708 fix = CURR_SLOT.fixup + CURR_SLOT.num_fixups;
5709 fix->code = BFD_RELOC_IA64_LDXMOV;
5710 fix->opnd = idesc->operands[index];
5713 ++CURR_SLOT.num_fixups;
5714 return OPERAND_MATCH;
5719 return OPERAND_MISMATCH;
5728 memset (e, 0, sizeof (*e));
5731 if (*input_line_pointer != '}')
5733 sep = *input_line_pointer++;
5737 if (!md.manual_bundling)
5738 as_warn ("Found '}' when manual bundling is off");
5740 CURR_SLOT.manual_bundling_off = 1;
5741 md.manual_bundling = 0;
5747 /* Returns the next entry in the opcode table that matches the one in
5748 IDESC, and frees the entry in IDESC. If no matching entry is
5749 found, NULL is returned instead. */
5751 static struct ia64_opcode *
5752 get_next_opcode (struct ia64_opcode *idesc)
5754 struct ia64_opcode *next = ia64_find_next_opcode (idesc);
5755 ia64_free_opcode (idesc);
5759 /* Parse the operands for the opcode and find the opcode variant that
5760 matches the specified operands, or NULL if no match is possible. */
5762 static struct ia64_opcode *
5763 parse_operands (idesc)
5764 struct ia64_opcode *idesc;
5766 int i = 0, highest_unmatched_operand, num_operands = 0, num_outputs = 0;
5767 int error_pos, out_of_range_pos, curr_out_of_range_pos, sep = 0;
5768 enum ia64_opnd expected_operand = IA64_OPND_NIL;
5769 enum operand_match_result result;
5771 char *first_arg = 0, *end, *saved_input_pointer;
5774 assert (strlen (idesc->name) <= 128);
5776 strcpy (mnemonic, idesc->name);
5777 if (idesc->operands[2] == IA64_OPND_SOF)
5779 /* To make the common idiom "alloc loc?=ar.pfs,0,1,0,0" work, we
5780 can't parse the first operand until we have parsed the
5781 remaining operands of the "alloc" instruction. */
5783 first_arg = input_line_pointer;
5784 end = strchr (input_line_pointer, '=');
5787 as_bad ("Expected separator `='");
5790 input_line_pointer = end + 1;
5795 for (; i < NELEMS (CURR_SLOT.opnd); ++i)
5797 sep = parse_operand (CURR_SLOT.opnd + i);
5798 if (CURR_SLOT.opnd[i].X_op == O_absent)
5803 if (sep != '=' && sep != ',')
5808 if (num_outputs > 0)
5809 as_bad ("Duplicate equal sign (=) in instruction");
5811 num_outputs = i + 1;
5816 as_bad ("Illegal operand separator `%c'", sep);
5820 if (idesc->operands[2] == IA64_OPND_SOF)
5822 /* map alloc r1=ar.pfs,i,l,o,r to alloc r1=ar.pfs,(i+l+o),(i+l),r */
5823 know (strcmp (idesc->name, "alloc") == 0);
5824 if (num_operands == 5 /* first_arg not included in this count! */
5825 && CURR_SLOT.opnd[2].X_op == O_constant
5826 && CURR_SLOT.opnd[3].X_op == O_constant
5827 && CURR_SLOT.opnd[4].X_op == O_constant
5828 && CURR_SLOT.opnd[5].X_op == O_constant)
5830 sof = set_regstack (CURR_SLOT.opnd[2].X_add_number,
5831 CURR_SLOT.opnd[3].X_add_number,
5832 CURR_SLOT.opnd[4].X_add_number,
5833 CURR_SLOT.opnd[5].X_add_number);
5835 /* now we can parse the first arg: */
5836 saved_input_pointer = input_line_pointer;
5837 input_line_pointer = first_arg;
5838 sep = parse_operand (CURR_SLOT.opnd + 0);
5840 --num_outputs; /* force error */
5841 input_line_pointer = saved_input_pointer;
5843 CURR_SLOT.opnd[2].X_add_number = sof;
5844 CURR_SLOT.opnd[3].X_add_number
5845 = sof - CURR_SLOT.opnd[4].X_add_number;
5846 CURR_SLOT.opnd[4] = CURR_SLOT.opnd[5];
5850 highest_unmatched_operand = 0;
5851 curr_out_of_range_pos = -1;
5853 expected_operand = idesc->operands[0];
5854 for (; idesc; idesc = get_next_opcode (idesc))
5856 if (num_outputs != idesc->num_outputs)
5857 continue; /* mismatch in # of outputs */
5859 CURR_SLOT.num_fixups = 0;
5861 /* Try to match all operands. If we see an out-of-range operand,
5862 then continue trying to match the rest of the operands, since if
5863 the rest match, then this idesc will give the best error message. */
5865 out_of_range_pos = -1;
5866 for (i = 0; i < num_operands && idesc->operands[i]; ++i)
5868 result = operand_match (idesc, i, CURR_SLOT.opnd + i);
5869 if (result != OPERAND_MATCH)
5871 if (result != OPERAND_OUT_OF_RANGE)
5873 if (out_of_range_pos < 0)
5874 /* remember position of the first out-of-range operand: */
5875 out_of_range_pos = i;
5879 /* If we did not match all operands, or if at least one operand was
5880 out-of-range, then this idesc does not match. Keep track of which
5881 idesc matched the most operands before failing. If we have two
5882 idescs that failed at the same position, and one had an out-of-range
5883 operand, then prefer the out-of-range operand. Thus if we have
5884 "add r0=0x1000000,r1" we get an error saying the constant is out
5885 of range instead of an error saying that the constant should have been
5888 if (i != num_operands || out_of_range_pos >= 0)
5890 if (i > highest_unmatched_operand
5891 || (i == highest_unmatched_operand
5892 && out_of_range_pos > curr_out_of_range_pos))
5894 highest_unmatched_operand = i;
5895 if (out_of_range_pos >= 0)
5897 expected_operand = idesc->operands[out_of_range_pos];
5898 error_pos = out_of_range_pos;
5902 expected_operand = idesc->operands[i];
5905 curr_out_of_range_pos = out_of_range_pos;
5910 if (num_operands < NELEMS (idesc->operands)
5911 && idesc->operands[num_operands])
5912 continue; /* mismatch in number of arguments */
5918 if (expected_operand)
5919 as_bad ("Operand %u of `%s' should be %s",
5920 error_pos + 1, mnemonic,
5921 elf64_ia64_operands[expected_operand].desc);
5923 as_bad ("Operand mismatch");
5929 /* Keep track of state necessary to determine whether a NOP is necessary
5930 to avoid an erratum in A and B step Itanium chips, and return 1 if we
5931 detect a case where additional NOPs may be necessary. */
5933 errata_nop_necessary_p (slot, insn_unit)
5935 enum ia64_unit insn_unit;
5938 struct group *this_group = md.last_groups + md.group_idx;
5939 struct group *prev_group = md.last_groups + (md.group_idx + 2) % 3;
5940 struct ia64_opcode *idesc = slot->idesc;
5942 /* Test whether this could be the first insn in a problematic sequence. */
5943 if (insn_unit == IA64_UNIT_F)
5945 for (i = 0; i < idesc->num_outputs; i++)
5946 if (idesc->operands[i] == IA64_OPND_P1
5947 || idesc->operands[i] == IA64_OPND_P2)
5949 int regno = slot->opnd[i].X_add_number - REG_P;
5950 /* Ignore invalid operands; they generate errors elsewhere. */
5953 this_group->p_reg_set[regno] = 1;
5957 /* Test whether this could be the second insn in a problematic sequence. */
5958 if (insn_unit == IA64_UNIT_M && slot->qp_regno > 0
5959 && prev_group->p_reg_set[slot->qp_regno])
5961 for (i = 0; i < idesc->num_outputs; i++)
5962 if (idesc->operands[i] == IA64_OPND_R1
5963 || idesc->operands[i] == IA64_OPND_R2
5964 || idesc->operands[i] == IA64_OPND_R3)
5966 int regno = slot->opnd[i].X_add_number - REG_GR;
5967 /* Ignore invalid operands; they generate errors elsewhere. */
5970 if (strncmp (idesc->name, "add", 3) != 0
5971 && strncmp (idesc->name, "sub", 3) != 0
5972 && strncmp (idesc->name, "shladd", 6) != 0
5973 && (idesc->flags & IA64_OPCODE_POSTINC) == 0)
5974 this_group->g_reg_set_conditionally[regno] = 1;
5978 /* Test whether this could be the third insn in a problematic sequence. */
5979 for (i = 0; i < NELEMS (idesc->operands) && idesc->operands[i]; i++)
5981 if (/* For fc, ptc, ptr, tak, thash, tpa, ttag, probe, ptr, ptc. */
5982 idesc->operands[i] == IA64_OPND_R3
5983 /* For mov indirect. */
5984 || idesc->operands[i] == IA64_OPND_RR_R3
5985 || idesc->operands[i] == IA64_OPND_DBR_R3
5986 || idesc->operands[i] == IA64_OPND_IBR_R3
5987 || idesc->operands[i] == IA64_OPND_PKR_R3
5988 || idesc->operands[i] == IA64_OPND_PMC_R3
5989 || idesc->operands[i] == IA64_OPND_PMD_R3
5990 || idesc->operands[i] == IA64_OPND_MSR_R3
5991 || idesc->operands[i] == IA64_OPND_CPUID_R3
5993 || idesc->operands[i] == IA64_OPND_ITR_R3
5994 || idesc->operands[i] == IA64_OPND_DTR_R3
5995 /* Normal memory addresses (load, store, xchg, cmpxchg, etc.). */
5996 || idesc->operands[i] == IA64_OPND_MR3)
5998 int regno = slot->opnd[i].X_add_number - REG_GR;
5999 /* Ignore invalid operands; they generate errors elsewhere. */
6002 if (idesc->operands[i] == IA64_OPND_R3)
6004 if (strcmp (idesc->name, "fc") != 0
6005 && strcmp (idesc->name, "tak") != 0
6006 && strcmp (idesc->name, "thash") != 0
6007 && strcmp (idesc->name, "tpa") != 0
6008 && strcmp (idesc->name, "ttag") != 0
6009 && strncmp (idesc->name, "ptr", 3) != 0
6010 && strncmp (idesc->name, "ptc", 3) != 0
6011 && strncmp (idesc->name, "probe", 5) != 0)
6014 if (prev_group->g_reg_set_conditionally[regno])
6022 build_insn (slot, insnp)
6026 const struct ia64_operand *odesc, *o2desc;
6027 struct ia64_opcode *idesc = slot->idesc;
6028 bfd_signed_vma insn, val;
6032 insn = idesc->opcode | slot->qp_regno;
6034 for (i = 0; i < NELEMS (idesc->operands) && idesc->operands[i]; ++i)
6036 if (slot->opnd[i].X_op == O_register
6037 || slot->opnd[i].X_op == O_constant
6038 || slot->opnd[i].X_op == O_index)
6039 val = slot->opnd[i].X_add_number;
6040 else if (slot->opnd[i].X_op == O_big)
6042 /* This must be the value 0x10000000000000000. */
6043 assert (idesc->operands[i] == IA64_OPND_IMM8M1U8);
6049 switch (idesc->operands[i])
6051 case IA64_OPND_IMMU64:
6052 *insnp++ = (val >> 22) & 0x1ffffffffffLL;
6053 insn |= (((val & 0x7f) << 13) | (((val >> 7) & 0x1ff) << 27)
6054 | (((val >> 16) & 0x1f) << 22) | (((val >> 21) & 0x1) << 21)
6055 | (((val >> 63) & 0x1) << 36));
6058 case IA64_OPND_IMMU62:
6059 val &= 0x3fffffffffffffffULL;
6060 if (val != slot->opnd[i].X_add_number)
6061 as_warn (_("Value truncated to 62 bits"));
6062 *insnp++ = (val >> 21) & 0x1ffffffffffLL;
6063 insn |= (((val & 0xfffff) << 6) | (((val >> 20) & 0x1) << 36));
6066 case IA64_OPND_TGT64:
6068 *insnp++ = ((val >> 20) & 0x7fffffffffLL) << 2;
6069 insn |= ((((val >> 59) & 0x1) << 36)
6070 | (((val >> 0) & 0xfffff) << 13));
6101 case IA64_OPND_R3_2:
6102 case IA64_OPND_CPUID_R3:
6103 case IA64_OPND_DBR_R3:
6104 case IA64_OPND_DTR_R3:
6105 case IA64_OPND_ITR_R3:
6106 case IA64_OPND_IBR_R3:
6108 case IA64_OPND_MSR_R3:
6109 case IA64_OPND_PKR_R3:
6110 case IA64_OPND_PMC_R3:
6111 case IA64_OPND_PMD_R3:
6112 case IA64_OPND_RR_R3:
6120 odesc = elf64_ia64_operands + idesc->operands[i];
6121 err = (*odesc->insert) (odesc, val, &insn);
6123 as_bad_where (slot->src_file, slot->src_line,
6124 "Bad operand value: %s", err);
6125 if (idesc->flags & IA64_OPCODE_PSEUDO)
6127 if ((idesc->flags & IA64_OPCODE_F2_EQ_F3)
6128 && odesc == elf64_ia64_operands + IA64_OPND_F3)
6130 o2desc = elf64_ia64_operands + IA64_OPND_F2;
6131 (*o2desc->insert) (o2desc, val, &insn);
6133 if ((idesc->flags & IA64_OPCODE_LEN_EQ_64MCNT)
6134 && (odesc == elf64_ia64_operands + IA64_OPND_CPOS6a
6135 || odesc == elf64_ia64_operands + IA64_OPND_POS6))
6137 o2desc = elf64_ia64_operands + IA64_OPND_LEN6;
6138 (*o2desc->insert) (o2desc, 64 - val, &insn);
6148 unsigned int manual_bundling_on = 0, manual_bundling_off = 0;
6149 unsigned int manual_bundling = 0;
6150 enum ia64_unit required_unit, insn_unit = 0;
6151 enum ia64_insn_type type[3], insn_type;
6152 unsigned int template, orig_template;
6153 bfd_vma insn[3] = { -1, -1, -1 };
6154 struct ia64_opcode *idesc;
6155 int end_of_insn_group = 0, user_template = -1;
6156 int n, i, j, first, curr;
6157 unw_rec_list *ptr, *last_ptr, *end_ptr;
6158 bfd_vma t0 = 0, t1 = 0;
6159 struct label_fix *lfix;
6160 struct insn_fix *ifix;
6166 first = (md.curr_slot + NUM_SLOTS - md.num_slots_in_use) % NUM_SLOTS;
6167 know (first >= 0 & first < NUM_SLOTS);
6168 n = MIN (3, md.num_slots_in_use);
6170 /* Determine template: user user_template if specified, best match
6173 if (md.slot[first].user_template >= 0)
6174 user_template = template = md.slot[first].user_template;
6177 /* Auto select appropriate template. */
6178 memset (type, 0, sizeof (type));
6180 for (i = 0; i < n; ++i)
6182 if (md.slot[curr].label_fixups && i != 0)
6184 type[i] = md.slot[curr].idesc->type;
6185 curr = (curr + 1) % NUM_SLOTS;
6187 template = best_template[type[0]][type[1]][type[2]];
6190 /* initialize instructions with appropriate nops: */
6191 for (i = 0; i < 3; ++i)
6192 insn[i] = nop[ia64_templ_desc[template].exec_unit[i]];
6196 /* Check to see if this bundle is at an offset that is a multiple of 16-bytes
6197 from the start of the frag. */
6198 addr_mod = frag_now_fix () & 15;
6199 if (frag_now->has_code && frag_now->insn_addr != addr_mod)
6200 as_bad (_("instruction address is not a multiple of 16"));
6201 frag_now->insn_addr = addr_mod;
6202 frag_now->has_code = 1;
6204 /* now fill in slots with as many insns as possible: */
6206 idesc = md.slot[curr].idesc;
6207 end_of_insn_group = 0;
6208 for (i = 0; i < 3 && md.num_slots_in_use > 0; ++i)
6210 /* If we have unwind records, we may need to update some now. */
6211 ptr = md.slot[curr].unwind_record;
6214 /* Find the last prologue/body record in the list for the current
6215 insn, and set the slot number for all records up to that point.
6216 This needs to be done now, because prologue/body records refer to
6217 the current point, not the point after the instruction has been
6218 issued. This matters because there may have been nops emitted
6219 meanwhile. Any non-prologue non-body record followed by a
6220 prologue/body record must also refer to the current point. */
6222 end_ptr = md.slot[(curr + 1) % NUM_SLOTS].unwind_record;
6223 for (; ptr != end_ptr; ptr = ptr->next)
6224 if (ptr->r.type == prologue || ptr->r.type == prologue_gr
6225 || ptr->r.type == body)
6229 /* Make last_ptr point one after the last prologue/body
6231 last_ptr = last_ptr->next;
6232 for (ptr = md.slot[curr].unwind_record; ptr != last_ptr;
6235 ptr->slot_number = (unsigned long) f + i;
6236 ptr->slot_frag = frag_now;
6238 /* Remove the initialized records, so that we won't accidentally
6239 update them again if we insert a nop and continue. */
6240 md.slot[curr].unwind_record = last_ptr;
6244 if (idesc->flags & IA64_OPCODE_SLOT2)
6246 if (manual_bundling && i != 2)
6247 as_bad_where (md.slot[curr].src_file, md.slot[curr].src_line,
6248 "`%s' must be last in bundle", idesc->name);
6252 if (idesc->flags & IA64_OPCODE_LAST)
6255 unsigned int required_template;
6257 /* If we need a stop bit after an M slot, our only choice is
6258 template 5 (M;;MI). If we need a stop bit after a B
6259 slot, our only choice is to place it at the end of the
6260 bundle, because the only available templates are MIB,
6261 MBB, BBB, MMB, and MFB. We don't handle anything other
6262 than M and B slots because these are the only kind of
6263 instructions that can have the IA64_OPCODE_LAST bit set. */
6264 required_template = template;
6265 switch (idesc->type)
6269 required_template = 5;
6277 as_bad_where (md.slot[curr].src_file, md.slot[curr].src_line,
6278 "Internal error: don't know how to force %s to end"
6279 "of instruction group", idesc->name);
6283 if (manual_bundling && i != required_slot)
6284 as_bad_where (md.slot[curr].src_file, md.slot[curr].src_line,
6285 "`%s' must be last in instruction group",
6287 if (required_slot < i)
6288 /* Can't fit this instruction. */
6292 if (required_template != template)
6294 /* If we switch the template, we need to reset the NOPs
6295 after slot i. The slot-types of the instructions ahead
6296 of i never change, so we don't need to worry about
6297 changing NOPs in front of this slot. */
6298 for (j = i; j < 3; ++j)
6299 insn[j] = nop[ia64_templ_desc[required_template].exec_unit[j]];
6301 template = required_template;
6303 if (curr != first && md.slot[curr].label_fixups)
6305 if (manual_bundling_on)
6306 as_bad_where (md.slot[curr].src_file, md.slot[curr].src_line,
6307 "Label must be first in a bundle");
6308 /* This insn must go into the first slot of a bundle. */
6312 manual_bundling_on = md.slot[curr].manual_bundling_on;
6313 manual_bundling_off = md.slot[curr].manual_bundling_off;
6315 if (manual_bundling_on)
6318 manual_bundling = 1;
6320 break; /* need to start a new bundle */
6323 if (end_of_insn_group && md.num_slots_in_use >= 1)
6325 /* We need an instruction group boundary in the middle of a
6326 bundle. See if we can switch to an other template with
6327 an appropriate boundary. */
6329 orig_template = template;
6330 if (i == 1 && (user_template == 4
6331 || (user_template < 0
6332 && (ia64_templ_desc[template].exec_unit[0]
6336 end_of_insn_group = 0;
6338 else if (i == 2 && (user_template == 0
6339 || (user_template < 0
6340 && (ia64_templ_desc[template].exec_unit[1]
6342 /* This test makes sure we don't switch the template if
6343 the next instruction is one that needs to be first in
6344 an instruction group. Since all those instructions are
6345 in the M group, there is no way such an instruction can
6346 fit in this bundle even if we switch the template. The
6347 reason we have to check for this is that otherwise we
6348 may end up generating "MI;;I M.." which has the deadly
6349 effect that the second M instruction is no longer the
6350 first in the bundle! --davidm 99/12/16 */
6351 && (idesc->flags & IA64_OPCODE_FIRST) == 0)
6354 end_of_insn_group = 0;
6356 else if (curr != first)
6357 /* can't fit this insn */
6360 if (template != orig_template)
6361 /* if we switch the template, we need to reset the NOPs
6362 after slot i. The slot-types of the instructions ahead
6363 of i never change, so we don't need to worry about
6364 changing NOPs in front of this slot. */
6365 for (j = i; j < 3; ++j)
6366 insn[j] = nop[ia64_templ_desc[template].exec_unit[j]];
6368 required_unit = ia64_templ_desc[template].exec_unit[i];
6370 /* resolve dynamic opcodes such as "break", "hint", and "nop": */
6371 if (idesc->type == IA64_TYPE_DYN)
6373 if ((strcmp (idesc->name, "nop") == 0)
6374 || (strcmp (idesc->name, "hint") == 0)
6375 || (strcmp (idesc->name, "break") == 0))
6376 insn_unit = required_unit;
6377 else if (strcmp (idesc->name, "chk.s") == 0)
6379 insn_unit = IA64_UNIT_M;
6380 if (required_unit == IA64_UNIT_I)
6381 insn_unit = IA64_UNIT_I;
6384 as_fatal ("emit_one_bundle: unexpected dynamic op");
6386 sprintf (mnemonic, "%s.%c", idesc->name, "?imbf??"[insn_unit]);
6387 ia64_free_opcode (idesc);
6388 md.slot[curr].idesc = idesc = ia64_find_opcode (mnemonic);
6390 know (!idesc->next); /* no resolved dynamic ops have collisions */
6395 insn_type = idesc->type;
6396 insn_unit = IA64_UNIT_NIL;
6400 if (required_unit == IA64_UNIT_I || required_unit == IA64_UNIT_M)
6401 insn_unit = required_unit;
6403 case IA64_TYPE_X: insn_unit = IA64_UNIT_L; break;
6404 case IA64_TYPE_I: insn_unit = IA64_UNIT_I; break;
6405 case IA64_TYPE_M: insn_unit = IA64_UNIT_M; break;
6406 case IA64_TYPE_B: insn_unit = IA64_UNIT_B; break;
6407 case IA64_TYPE_F: insn_unit = IA64_UNIT_F; break;
6412 if (insn_unit != required_unit)
6414 if (required_unit == IA64_UNIT_L
6415 && insn_unit == IA64_UNIT_I
6416 && !(idesc->flags & IA64_OPCODE_X_IN_MLX))
6418 /* we got ourselves an MLX template but the current
6419 instruction isn't an X-unit, or an I-unit instruction
6420 that can go into the X slot of an MLX template. Duh. */
6421 if (md.num_slots_in_use >= NUM_SLOTS)
6423 as_bad_where (md.slot[curr].src_file,
6424 md.slot[curr].src_line,
6425 "`%s' can't go in X slot of "
6426 "MLX template", idesc->name);
6427 /* drop this insn so we don't livelock: */
6428 --md.num_slots_in_use;
6432 continue; /* try next slot */
6435 if (debug_type == DEBUG_DWARF2 || md.slot[curr].loc_directive_seen)
6437 bfd_vma addr = frag_now->fr_address + frag_now_fix () - 16 + i;
6439 md.slot[curr].loc_directive_seen = 0;
6440 dwarf2_gen_line_info (addr, &md.slot[curr].debug_line);
6443 if (errata_nop_necessary_p (md.slot + curr, insn_unit))
6444 as_warn (_("Additional NOP may be necessary to workaround Itanium processor A/B step errata"));
6446 build_insn (md.slot + curr, insn + i);
6448 ptr = md.slot[curr].unwind_record;
6451 /* Set slot numbers for all remaining unwind records belonging to the
6452 current insn. There can not be any prologue/body unwind records
6454 end_ptr = md.slot[(curr + 1) % NUM_SLOTS].unwind_record;
6455 for (; ptr != end_ptr; ptr = ptr->next)
6457 ptr->slot_number = (unsigned long) f + i;
6458 ptr->slot_frag = frag_now;
6460 md.slot[curr].unwind_record = NULL;
6463 if (required_unit == IA64_UNIT_L)
6466 /* skip one slot for long/X-unit instructions */
6469 --md.num_slots_in_use;
6471 /* now is a good time to fix up the labels for this insn: */
6472 for (lfix = md.slot[curr].label_fixups; lfix; lfix = lfix->next)
6474 S_SET_VALUE (lfix->sym, frag_now_fix () - 16);
6475 symbol_set_frag (lfix->sym, frag_now);
6477 /* and fix up the tags also. */
6478 for (lfix = md.slot[curr].tag_fixups; lfix; lfix = lfix->next)
6480 S_SET_VALUE (lfix->sym, frag_now_fix () - 16 + i);
6481 symbol_set_frag (lfix->sym, frag_now);
6484 for (j = 0; j < md.slot[curr].num_fixups; ++j)
6486 ifix = md.slot[curr].fixup + j;
6487 fix = fix_new_exp (frag_now, frag_now_fix () - 16 + i, 8,
6488 &ifix->expr, ifix->is_pcrel, ifix->code);
6489 fix->tc_fix_data.opnd = ifix->opnd;
6490 fix->fx_plt = (fix->fx_r_type == BFD_RELOC_IA64_PLTOFF22);
6491 fix->fx_file = md.slot[curr].src_file;
6492 fix->fx_line = md.slot[curr].src_line;
6495 end_of_insn_group = md.slot[curr].end_of_insn_group;
6497 if (end_of_insn_group)
6499 md.group_idx = (md.group_idx + 1) % 3;
6500 memset (md.last_groups + md.group_idx, 0, sizeof md.last_groups[0]);
6504 ia64_free_opcode (md.slot[curr].idesc);
6505 memset (md.slot + curr, 0, sizeof (md.slot[curr]));
6506 md.slot[curr].user_template = -1;
6508 if (manual_bundling_off)
6510 manual_bundling = 0;
6513 curr = (curr + 1) % NUM_SLOTS;
6514 idesc = md.slot[curr].idesc;
6516 if (manual_bundling)
6518 if (md.num_slots_in_use > 0)
6520 as_bad_where (md.slot[curr].src_file, md.slot[curr].src_line,
6521 "`%s' does not fit into %s template",
6522 idesc->name, ia64_templ_desc[template].name);
6523 --md.num_slots_in_use;
6526 as_bad_where (md.slot[curr].src_file, md.slot[curr].src_line,
6527 "Missing '}' at end of file");
6529 know (md.num_slots_in_use < NUM_SLOTS);
6531 t0 = end_of_insn_group | (template << 1) | (insn[0] << 5) | (insn[1] << 46);
6532 t1 = ((insn[1] >> 18) & 0x7fffff) | (insn[2] << 23);
6534 number_to_chars_littleendian (f + 0, t0, 8);
6535 number_to_chars_littleendian (f + 8, t1, 8);
6539 unwind.list->next_slot_number = (unsigned long) f + 16;
6540 unwind.list->next_slot_frag = frag_now;
6545 md_parse_option (c, arg)
6552 /* Switches from the Intel assembler. */
6554 if (strcmp (arg, "ilp64") == 0
6555 || strcmp (arg, "lp64") == 0
6556 || strcmp (arg, "p64") == 0)
6558 md.flags |= EF_IA_64_ABI64;
6560 else if (strcmp (arg, "ilp32") == 0)
6562 md.flags &= ~EF_IA_64_ABI64;
6564 else if (strcmp (arg, "le") == 0)
6566 md.flags &= ~EF_IA_64_BE;
6567 default_big_endian = 0;
6569 else if (strcmp (arg, "be") == 0)
6571 md.flags |= EF_IA_64_BE;
6572 default_big_endian = 1;
6579 if (strcmp (arg, "so") == 0)
6581 /* Suppress signon message. */
6583 else if (strcmp (arg, "pi") == 0)
6585 /* Reject privileged instructions. FIXME */
6587 else if (strcmp (arg, "us") == 0)
6589 /* Allow union of signed and unsigned range. FIXME */
6591 else if (strcmp (arg, "close_fcalls") == 0)
6593 /* Do not resolve global function calls. */
6600 /* temp[="prefix"] Insert temporary labels into the object file
6601 symbol table prefixed by "prefix".
6602 Default prefix is ":temp:".
6607 /* indirect=<tgt> Assume unannotated indirect branches behavior
6608 according to <tgt> --
6609 exit: branch out from the current context (default)
6610 labels: all labels in context may be branch targets
6612 if (strncmp (arg, "indirect=", 9) != 0)
6617 /* -X conflicts with an ignored option, use -x instead */
6619 if (!arg || strcmp (arg, "explicit") == 0)
6621 /* set default mode to explicit */
6622 md.default_explicit_mode = 1;
6625 else if (strcmp (arg, "auto") == 0)
6627 md.default_explicit_mode = 0;
6629 else if (strcmp (arg, "debug") == 0)
6633 else if (strcmp (arg, "debugx") == 0)
6635 md.default_explicit_mode = 1;
6640 as_bad (_("Unrecognized option '-x%s'"), arg);
6645 /* nops Print nops statistics. */
6648 /* GNU specific switches for gcc. */
6649 case OPTION_MCONSTANT_GP:
6650 md.flags |= EF_IA_64_CONS_GP;
6653 case OPTION_MAUTO_PIC:
6654 md.flags |= EF_IA_64_NOFUNCDESC_CONS_GP;
6665 md_show_usage (stream)
6670 --mconstant-gp mark output file as using the constant-GP model\n\
6671 (sets ELF header flag EF_IA_64_CONS_GP)\n\
6672 --mauto-pic mark output file as using the constant-GP model\n\
6673 without function descriptors (sets ELF header flag\n\
6674 EF_IA_64_NOFUNCDESC_CONS_GP)\n\
6675 -milp32|-milp64|-mlp64|-mp64 select data model (default -mlp64)\n\
6676 -mle | -mbe select little- or big-endian byte order (default -mle)\n\
6677 -x | -xexplicit turn on dependency violation checking (default)\n\
6678 -xauto automagically remove dependency violations\n\
6679 -xdebug debug dependency violation checker\n"),
6684 ia64_after_parse_args ()
6686 if (debug_type == DEBUG_STABS)
6687 as_fatal (_("--gstabs is not supported for ia64"));
6690 /* Return true if TYPE fits in TEMPL at SLOT. */
6693 match (int templ, int type, int slot)
6695 enum ia64_unit unit;
6698 unit = ia64_templ_desc[templ].exec_unit[slot];
6701 case IA64_TYPE_DYN: result = 1; break; /* for nop and break */
6703 result = (unit == IA64_UNIT_I || unit == IA64_UNIT_M);
6705 case IA64_TYPE_X: result = (unit == IA64_UNIT_L); break;
6706 case IA64_TYPE_I: result = (unit == IA64_UNIT_I); break;
6707 case IA64_TYPE_M: result = (unit == IA64_UNIT_M); break;
6708 case IA64_TYPE_B: result = (unit == IA64_UNIT_B); break;
6709 case IA64_TYPE_F: result = (unit == IA64_UNIT_F); break;
6710 default: result = 0; break;
6715 /* Add a bit of extra goodness if a nop of type F or B would fit
6716 in TEMPL at SLOT. */
6719 extra_goodness (int templ, int slot)
6721 if (slot == 1 && match (templ, IA64_TYPE_F, slot))
6723 if (slot == 2 && match (templ, IA64_TYPE_B, slot))
6728 /* This function is called once, at assembler startup time. It sets
6729 up all the tables, etc. that the MD part of the assembler will need
6730 that can be determined before arguments are parsed. */
6734 int i, j, k, t, total, ar_base, cr_base, goodness, best, regnum, ok;
6739 md.explicit_mode = md.default_explicit_mode;
6741 bfd_set_section_alignment (stdoutput, text_section, 4);
6743 /* Make sure function pointers get initialized. */
6744 target_big_endian = -1;
6745 dot_byteorder (default_big_endian);
6747 alias_hash = hash_new ();
6748 alias_name_hash = hash_new ();
6749 secalias_hash = hash_new ();
6750 secalias_name_hash = hash_new ();
6752 pseudo_func[FUNC_DTP_MODULE].u.sym =
6753 symbol_new (".<dtpmod>", undefined_section, FUNC_DTP_MODULE,
6754 &zero_address_frag);
6756 pseudo_func[FUNC_DTP_RELATIVE].u.sym =
6757 symbol_new (".<dtprel>", undefined_section, FUNC_DTP_RELATIVE,
6758 &zero_address_frag);
6760 pseudo_func[FUNC_FPTR_RELATIVE].u.sym =
6761 symbol_new (".<fptr>", undefined_section, FUNC_FPTR_RELATIVE,
6762 &zero_address_frag);
6764 pseudo_func[FUNC_GP_RELATIVE].u.sym =
6765 symbol_new (".<gprel>", undefined_section, FUNC_GP_RELATIVE,
6766 &zero_address_frag);
6768 pseudo_func[FUNC_LT_RELATIVE].u.sym =
6769 symbol_new (".<ltoff>", undefined_section, FUNC_LT_RELATIVE,
6770 &zero_address_frag);
6772 pseudo_func[FUNC_LT_RELATIVE_X].u.sym =
6773 symbol_new (".<ltoffx>", undefined_section, FUNC_LT_RELATIVE_X,
6774 &zero_address_frag);
6776 pseudo_func[FUNC_PC_RELATIVE].u.sym =
6777 symbol_new (".<pcrel>", undefined_section, FUNC_PC_RELATIVE,
6778 &zero_address_frag);
6780 pseudo_func[FUNC_PLT_RELATIVE].u.sym =
6781 symbol_new (".<pltoff>", undefined_section, FUNC_PLT_RELATIVE,
6782 &zero_address_frag);
6784 pseudo_func[FUNC_SEC_RELATIVE].u.sym =
6785 symbol_new (".<secrel>", undefined_section, FUNC_SEC_RELATIVE,
6786 &zero_address_frag);
6788 pseudo_func[FUNC_SEG_RELATIVE].u.sym =
6789 symbol_new (".<segrel>", undefined_section, FUNC_SEG_RELATIVE,
6790 &zero_address_frag);
6792 pseudo_func[FUNC_TP_RELATIVE].u.sym =
6793 symbol_new (".<tprel>", undefined_section, FUNC_TP_RELATIVE,
6794 &zero_address_frag);
6796 pseudo_func[FUNC_LTV_RELATIVE].u.sym =
6797 symbol_new (".<ltv>", undefined_section, FUNC_LTV_RELATIVE,
6798 &zero_address_frag);
6800 pseudo_func[FUNC_LT_FPTR_RELATIVE].u.sym =
6801 symbol_new (".<ltoff.fptr>", undefined_section, FUNC_LT_FPTR_RELATIVE,
6802 &zero_address_frag);
6804 pseudo_func[FUNC_LT_DTP_MODULE].u.sym =
6805 symbol_new (".<ltoff.dtpmod>", undefined_section, FUNC_LT_DTP_MODULE,
6806 &zero_address_frag);
6808 pseudo_func[FUNC_LT_DTP_RELATIVE].u.sym =
6809 symbol_new (".<ltoff.dptrel>", undefined_section, FUNC_LT_DTP_RELATIVE,
6810 &zero_address_frag);
6812 pseudo_func[FUNC_LT_TP_RELATIVE].u.sym =
6813 symbol_new (".<ltoff.tprel>", undefined_section, FUNC_LT_TP_RELATIVE,
6814 &zero_address_frag);
6816 pseudo_func[FUNC_IPLT_RELOC].u.sym =
6817 symbol_new (".<iplt>", undefined_section, FUNC_IPLT_RELOC,
6818 &zero_address_frag);
6820 /* Compute the table of best templates. We compute goodness as a
6821 base 4 value, in which each match counts for 3, each F counts
6822 for 2, each B counts for 1. This should maximize the number of
6823 F and B nops in the chosen bundles, which is good because these
6824 pipelines are least likely to be overcommitted. */
6825 for (i = 0; i < IA64_NUM_TYPES; ++i)
6826 for (j = 0; j < IA64_NUM_TYPES; ++j)
6827 for (k = 0; k < IA64_NUM_TYPES; ++k)
6830 for (t = 0; t < NELEMS (ia64_templ_desc); ++t)
6833 if (match (t, i, 0))
6835 if (match (t, j, 1))
6837 if (match (t, k, 2))
6838 goodness = 3 + 3 + 3;
6840 goodness = 3 + 3 + extra_goodness (t, 2);
6842 else if (match (t, j, 2))
6843 goodness = 3 + 3 + extra_goodness (t, 1);
6847 goodness += extra_goodness (t, 1);
6848 goodness += extra_goodness (t, 2);
6851 else if (match (t, i, 1))
6853 if (match (t, j, 2))
6856 goodness = 3 + extra_goodness (t, 2);
6858 else if (match (t, i, 2))
6859 goodness = 3 + extra_goodness (t, 1);
6861 if (goodness > best)
6864 best_template[i][j][k] = t;
6869 for (i = 0; i < NUM_SLOTS; ++i)
6870 md.slot[i].user_template = -1;
6872 md.pseudo_hash = hash_new ();
6873 for (i = 0; i < NELEMS (pseudo_opcode); ++i)
6875 err = hash_insert (md.pseudo_hash, pseudo_opcode[i].name,
6876 (void *) (pseudo_opcode + i));
6878 as_fatal ("ia64.md_begin: can't hash `%s': %s",
6879 pseudo_opcode[i].name, err);
6882 md.reg_hash = hash_new ();
6883 md.dynreg_hash = hash_new ();
6884 md.const_hash = hash_new ();
6885 md.entry_hash = hash_new ();
6887 /* general registers: */
6890 for (i = 0; i < total; ++i)
6892 sprintf (name, "r%d", i - REG_GR);
6893 md.regsym[i] = declare_register (name, i);
6896 /* floating point registers: */
6898 for (; i < total; ++i)
6900 sprintf (name, "f%d", i - REG_FR);
6901 md.regsym[i] = declare_register (name, i);
6904 /* application registers: */
6907 for (; i < total; ++i)
6909 sprintf (name, "ar%d", i - REG_AR);
6910 md.regsym[i] = declare_register (name, i);
6913 /* control registers: */
6916 for (; i < total; ++i)
6918 sprintf (name, "cr%d", i - REG_CR);
6919 md.regsym[i] = declare_register (name, i);
6922 /* predicate registers: */
6924 for (; i < total; ++i)
6926 sprintf (name, "p%d", i - REG_P);
6927 md.regsym[i] = declare_register (name, i);
6930 /* branch registers: */
6932 for (; i < total; ++i)
6934 sprintf (name, "b%d", i - REG_BR);
6935 md.regsym[i] = declare_register (name, i);
6938 md.regsym[REG_IP] = declare_register ("ip", REG_IP);
6939 md.regsym[REG_CFM] = declare_register ("cfm", REG_CFM);
6940 md.regsym[REG_PR] = declare_register ("pr", REG_PR);
6941 md.regsym[REG_PR_ROT] = declare_register ("pr.rot", REG_PR_ROT);
6942 md.regsym[REG_PSR] = declare_register ("psr", REG_PSR);
6943 md.regsym[REG_PSR_L] = declare_register ("psr.l", REG_PSR_L);
6944 md.regsym[REG_PSR_UM] = declare_register ("psr.um", REG_PSR_UM);
6946 for (i = 0; i < NELEMS (indirect_reg); ++i)
6948 regnum = indirect_reg[i].regnum;
6949 md.regsym[regnum] = declare_register (indirect_reg[i].name, regnum);
6952 /* define synonyms for application registers: */
6953 for (i = REG_AR; i < REG_AR + NELEMS (ar); ++i)
6954 md.regsym[i] = declare_register (ar[i - REG_AR].name,
6955 REG_AR + ar[i - REG_AR].regnum);
6957 /* define synonyms for control registers: */
6958 for (i = REG_CR; i < REG_CR + NELEMS (cr); ++i)
6959 md.regsym[i] = declare_register (cr[i - REG_CR].name,
6960 REG_CR + cr[i - REG_CR].regnum);
6962 declare_register ("gp", REG_GR + 1);
6963 declare_register ("sp", REG_GR + 12);
6964 declare_register ("rp", REG_BR + 0);
6966 /* pseudo-registers used to specify unwind info: */
6967 declare_register ("psp", REG_PSP);
6969 declare_register_set ("ret", 4, REG_GR + 8);
6970 declare_register_set ("farg", 8, REG_FR + 8);
6971 declare_register_set ("fret", 8, REG_FR + 8);
6973 for (i = 0; i < NELEMS (const_bits); ++i)
6975 err = hash_insert (md.const_hash, const_bits[i].name,
6976 (PTR) (const_bits + i));
6978 as_fatal ("Inserting \"%s\" into constant hash table failed: %s",
6982 /* Set the architecture and machine depending on defaults and command line
6984 if (md.flags & EF_IA_64_ABI64)
6985 ok = bfd_set_arch_mach (stdoutput, bfd_arch_ia64, bfd_mach_ia64_elf64);
6987 ok = bfd_set_arch_mach (stdoutput, bfd_arch_ia64, bfd_mach_ia64_elf32);
6990 as_warn (_("Could not set architecture and machine"));
6992 /* Set the pointer size and pointer shift size depending on md.flags */
6994 if (md.flags & EF_IA_64_ABI64)
6996 md.pointer_size = 8; /* pointers are 8 bytes */
6997 md.pointer_size_shift = 3; /* alignment is 8 bytes = 2^2 */
7001 md.pointer_size = 4; /* pointers are 4 bytes */
7002 md.pointer_size_shift = 2; /* alignment is 4 bytes = 2^2 */
7005 md.mem_offset.hint = 0;
7008 md.entry_labels = NULL;
7011 /* Set the elf type to 64 bit ABI by default. Cannot do this in md_begin
7012 because that is called after md_parse_option which is where we do the
7013 dynamic changing of md.flags based on -mlp64 or -milp32. Also, set the
7014 default endianness. */
7017 ia64_init (argc, argv)
7018 int argc ATTRIBUTE_UNUSED;
7019 char **argv ATTRIBUTE_UNUSED;
7021 md.flags = MD_FLAGS_DEFAULT;
7024 /* Return a string for the target object file format. */
7027 ia64_target_format ()
7029 if (OUTPUT_FLAVOR == bfd_target_elf_flavour)
7031 if (md.flags & EF_IA_64_BE)
7033 if (md.flags & EF_IA_64_ABI64)
7034 #if defined(TE_AIX50)
7035 return "elf64-ia64-aix-big";
7036 #elif defined(TE_HPUX)
7037 return "elf64-ia64-hpux-big";
7039 return "elf64-ia64-big";
7042 #if defined(TE_AIX50)
7043 return "elf32-ia64-aix-big";
7044 #elif defined(TE_HPUX)
7045 return "elf32-ia64-hpux-big";
7047 return "elf32-ia64-big";
7052 if (md.flags & EF_IA_64_ABI64)
7054 return "elf64-ia64-aix-little";
7056 return "elf64-ia64-little";
7060 return "elf32-ia64-aix-little";
7062 return "elf32-ia64-little";
7067 return "unknown-format";
7071 ia64_end_of_source ()
7073 /* terminate insn group upon reaching end of file: */
7074 insn_group_break (1, 0, 0);
7076 /* emits slots we haven't written yet: */
7077 ia64_flush_insns ();
7079 bfd_set_private_flags (stdoutput, md.flags);
7081 md.mem_offset.hint = 0;
7087 if (md.qp.X_op == O_register)
7088 as_bad ("qualifying predicate not followed by instruction");
7089 md.qp.X_op = O_absent;
7091 if (ignore_input ())
7094 if (input_line_pointer[0] == ';' && input_line_pointer[-1] == ';')
7096 if (md.detect_dv && !md.explicit_mode)
7097 as_warn (_("Explicit stops are ignored in auto mode"));
7099 insn_group_break (1, 0, 0);
7103 /* This is a hook for ia64_frob_label, so that it can distinguish tags from
7105 static int defining_tag = 0;
7108 ia64_unrecognized_line (ch)
7114 expression (&md.qp);
7115 if (*input_line_pointer++ != ')')
7117 as_bad ("Expected ')'");
7120 if (md.qp.X_op != O_register)
7122 as_bad ("Qualifying predicate expected");
7125 if (md.qp.X_add_number < REG_P || md.qp.X_add_number >= REG_P + 64)
7127 as_bad ("Predicate register expected");
7133 if (md.manual_bundling)
7134 as_warn ("Found '{' when manual bundling is already turned on");
7136 CURR_SLOT.manual_bundling_on = 1;
7137 md.manual_bundling = 1;
7139 /* Bundling is only acceptable in explicit mode
7140 or when in default automatic mode. */
7141 if (md.detect_dv && !md.explicit_mode)
7143 if (!md.mode_explicitly_set
7144 && !md.default_explicit_mode)
7147 as_warn (_("Found '{' after explicit switch to automatic mode"));
7152 if (!md.manual_bundling)
7153 as_warn ("Found '}' when manual bundling is off");
7155 PREV_SLOT.manual_bundling_off = 1;
7156 md.manual_bundling = 0;
7158 /* switch back to automatic mode, if applicable */
7161 && !md.mode_explicitly_set
7162 && !md.default_explicit_mode)
7165 /* Allow '{' to follow on the same line. We also allow ";;", but that
7166 happens automatically because ';' is an end of line marker. */
7168 if (input_line_pointer[0] == '{')
7170 input_line_pointer++;
7171 return ia64_unrecognized_line ('{');
7174 demand_empty_rest_of_line ();
7184 if (md.qp.X_op == O_register)
7186 as_bad ("Tag must come before qualifying predicate.");
7190 /* This implements just enough of read_a_source_file in read.c to
7191 recognize labels. */
7192 if (is_name_beginner (*input_line_pointer))
7194 s = input_line_pointer;
7195 c = get_symbol_end ();
7197 else if (LOCAL_LABELS_FB
7198 && ISDIGIT (*input_line_pointer))
7201 while (ISDIGIT (*input_line_pointer))
7202 temp = (temp * 10) + *input_line_pointer++ - '0';
7203 fb_label_instance_inc (temp);
7204 s = fb_label_name (temp, 0);
7205 c = *input_line_pointer;
7214 /* Put ':' back for error messages' sake. */
7215 *input_line_pointer++ = ':';
7216 as_bad ("Expected ':'");
7223 /* Put ':' back for error messages' sake. */
7224 *input_line_pointer++ = ':';
7225 if (*input_line_pointer++ != ']')
7227 as_bad ("Expected ']'");
7232 as_bad ("Tag name expected");
7242 /* Not a valid line. */
7247 ia64_frob_label (sym)
7250 struct label_fix *fix;
7252 /* Tags need special handling since they are not bundle breaks like
7256 fix = obstack_alloc (¬es, sizeof (*fix));
7258 fix->next = CURR_SLOT.tag_fixups;
7259 CURR_SLOT.tag_fixups = fix;
7264 if (bfd_get_section_flags (stdoutput, now_seg) & SEC_CODE)
7266 md.last_text_seg = now_seg;
7267 fix = obstack_alloc (¬es, sizeof (*fix));
7269 fix->next = CURR_SLOT.label_fixups;
7270 CURR_SLOT.label_fixups = fix;
7272 /* Keep track of how many code entry points we've seen. */
7273 if (md.path == md.maxpaths)
7276 md.entry_labels = (const char **)
7277 xrealloc ((void *) md.entry_labels,
7278 md.maxpaths * sizeof (char *));
7280 md.entry_labels[md.path++] = S_GET_NAME (sym);
7285 /* The HP-UX linker will give unresolved symbol errors for symbols
7286 that are declared but unused. This routine removes declared,
7287 unused symbols from an object. */
7289 ia64_frob_symbol (sym)
7292 if ((S_GET_SEGMENT (sym) == &bfd_und_section && ! symbol_used_p (sym) &&
7293 ELF_ST_VISIBILITY (S_GET_OTHER (sym)) == STV_DEFAULT)
7294 || (S_GET_SEGMENT (sym) == &bfd_abs_section
7295 && ! S_IS_EXTERNAL (sym)))
7302 ia64_flush_pending_output ()
7304 if (!md.keep_pending_output
7305 && bfd_get_section_flags (stdoutput, now_seg) & SEC_CODE)
7307 /* ??? This causes many unnecessary stop bits to be emitted.
7308 Unfortunately, it isn't clear if it is safe to remove this. */
7309 insn_group_break (1, 0, 0);
7310 ia64_flush_insns ();
7314 /* Do ia64-specific expression optimization. All that's done here is
7315 to transform index expressions that are either due to the indexing
7316 of rotating registers or due to the indexing of indirect register
7319 ia64_optimize_expr (l, op, r)
7328 if (l->X_op == O_register && r->X_op == O_constant)
7330 num_regs = (l->X_add_number >> 16);
7331 if ((unsigned) r->X_add_number >= num_regs)
7334 as_bad ("No current frame");
7336 as_bad ("Index out of range 0..%u", num_regs - 1);
7337 r->X_add_number = 0;
7339 l->X_add_number = (l->X_add_number & 0xffff) + r->X_add_number;
7342 else if (l->X_op == O_register && r->X_op == O_register)
7344 if (l->X_add_number < IND_CPUID || l->X_add_number > IND_RR
7345 || l->X_add_number == IND_MEM)
7347 as_bad ("Indirect register set name expected");
7348 l->X_add_number = IND_CPUID;
7351 l->X_op_symbol = md.regsym[l->X_add_number];
7352 l->X_add_number = r->X_add_number;
7360 ia64_parse_name (name, e)
7364 struct const_desc *cdesc;
7365 struct dynreg *dr = 0;
7366 unsigned int regnum;
7370 /* first see if NAME is a known register name: */
7371 sym = hash_find (md.reg_hash, name);
7374 e->X_op = O_register;
7375 e->X_add_number = S_GET_VALUE (sym);
7379 cdesc = hash_find (md.const_hash, name);
7382 e->X_op = O_constant;
7383 e->X_add_number = cdesc->value;
7387 /* check for inN, locN, or outN: */
7391 if (name[1] == 'n' && ISDIGIT (name[2]))
7399 if (name[1] == 'o' && name[2] == 'c' && ISDIGIT (name[3]))
7407 if (name[1] == 'u' && name[2] == 't' && ISDIGIT (name[3]))
7420 /* The name is inN, locN, or outN; parse the register number. */
7421 regnum = strtoul (name, &end, 10);
7422 if (end > name && *end == '\0')
7424 if ((unsigned) regnum >= dr->num_regs)
7427 as_bad ("No current frame");
7429 as_bad ("Register number out of range 0..%u",
7433 e->X_op = O_register;
7434 e->X_add_number = dr->base + regnum;
7439 if ((dr = hash_find (md.dynreg_hash, name)))
7441 /* We've got ourselves the name of a rotating register set.
7442 Store the base register number in the low 16 bits of
7443 X_add_number and the size of the register set in the top 16
7445 e->X_op = O_register;
7446 e->X_add_number = dr->base | (dr->num_regs << 16);
7452 /* Remove the '#' suffix that indicates a symbol as opposed to a register. */
7455 ia64_canonicalize_symbol_name (name)
7458 size_t len = strlen (name);
7459 if (len > 1 && name[len - 1] == '#')
7460 name[len - 1] = '\0';
7464 /* Return true if idesc is a conditional branch instruction. This excludes
7465 the modulo scheduled branches, and br.ia. Mod-sched branches are excluded
7466 because they always read/write resources regardless of the value of the
7467 qualifying predicate. br.ia must always use p0, and hence is always
7468 taken. Thus this function returns true for branches which can fall
7469 through, and which use no resources if they do fall through. */
7472 is_conditional_branch (idesc)
7473 struct ia64_opcode *idesc;
7475 /* br is a conditional branch. Everything that starts with br. except
7476 br.ia, br.c{loop,top,exit}, and br.w{top,exit} is a conditional branch.
7477 Everything that starts with brl is a conditional branch. */
7478 return (idesc->name[0] == 'b' && idesc->name[1] == 'r'
7479 && (idesc->name[2] == '\0'
7480 || (idesc->name[2] == '.' && idesc->name[3] != 'i'
7481 && idesc->name[3] != 'c' && idesc->name[3] != 'w')
7482 || idesc->name[2] == 'l'
7483 /* br.cond, br.call, br.clr */
7484 || (idesc->name[2] == '.' && idesc->name[3] == 'c'
7485 && (idesc->name[4] == 'a' || idesc->name[4] == 'o'
7486 || (idesc->name[4] == 'l' && idesc->name[5] == 'r')))));
7489 /* Return whether the given opcode is a taken branch. If there's any doubt,
7493 is_taken_branch (idesc)
7494 struct ia64_opcode *idesc;
7496 return ((is_conditional_branch (idesc) && CURR_SLOT.qp_regno == 0)
7497 || strncmp (idesc->name, "br.ia", 5) == 0);
7500 /* Return whether the given opcode is an interruption or rfi. If there's any
7501 doubt, returns zero. */
7504 is_interruption_or_rfi (idesc)
7505 struct ia64_opcode *idesc;
7507 if (strcmp (idesc->name, "rfi") == 0)
7512 /* Returns the index of the given dependency in the opcode's list of chks, or
7513 -1 if there is no dependency. */
7516 depends_on (depind, idesc)
7518 struct ia64_opcode *idesc;
7521 const struct ia64_opcode_dependency *dep = idesc->dependencies;
7522 for (i = 0; i < dep->nchks; i++)
7524 if (depind == DEP (dep->chks[i]))
7530 /* Determine a set of specific resources used for a particular resource
7531 class. Returns the number of specific resources identified For those
7532 cases which are not determinable statically, the resource returned is
7535 Meanings of value in 'NOTE':
7536 1) only read/write when the register number is explicitly encoded in the
7538 2) only read CFM when accessing a rotating GR, FR, or PR. mov pr only
7539 accesses CFM when qualifying predicate is in the rotating region.
7540 3) general register value is used to specify an indirect register; not
7541 determinable statically.
7542 4) only read the given resource when bits 7:0 of the indirect index
7543 register value does not match the register number of the resource; not
7544 determinable statically.
7545 5) all rules are implementation specific.
7546 6) only when both the index specified by the reader and the index specified
7547 by the writer have the same value in bits 63:61; not determinable
7549 7) only access the specified resource when the corresponding mask bit is
7551 8) PSR.dfh is only read when these insns reference FR32-127. PSR.dfl is
7552 only read when these insns reference FR2-31
7553 9) PSR.mfl is only written when these insns write FR2-31. PSR.mfh is only
7554 written when these insns write FR32-127
7555 10) The PSR.bn bit is only accessed when one of GR16-31 is specified in the
7557 11) The target predicates are written independently of PR[qp], but source
7558 registers are only read if PR[qp] is true. Since the state of PR[qp]
7559 cannot statically be determined, all source registers are marked used.
7560 12) This insn only reads the specified predicate register when that
7561 register is the PR[qp].
7562 13) This reference to ld-c only applies to teh GR whose value is loaded
7563 with data returned from memory, not the post-incremented address register.
7564 14) The RSE resource includes the implementation-specific RSE internal
7565 state resources. At least one (and possibly more) of these resources are
7566 read by each instruction listed in IC:rse-readers. At least one (and
7567 possibly more) of these resources are written by each insn listed in
7569 15+16) Represents reserved instructions, which the assembler does not
7572 Memory resources (i.e. locations in memory) are *not* marked or tracked by
7573 this code; there are no dependency violations based on memory access.
7576 #define MAX_SPECS 256
7581 specify_resource (dep, idesc, type, specs, note, path)
7582 const struct ia64_dependency *dep;
7583 struct ia64_opcode *idesc;
7584 int type; /* is this a DV chk or a DV reg? */
7585 struct rsrc specs[MAX_SPECS]; /* returned specific resources */
7586 int note; /* resource note for this insn's usage */
7587 int path; /* which execution path to examine */
7594 if (dep->mode == IA64_DV_WAW
7595 || (dep->mode == IA64_DV_RAW && type == DV_REG)
7596 || (dep->mode == IA64_DV_WAR && type == DV_CHK))
7599 /* template for any resources we identify */
7600 tmpl.dependency = dep;
7602 tmpl.insn_srlz = tmpl.data_srlz = 0;
7603 tmpl.qp_regno = CURR_SLOT.qp_regno;
7604 tmpl.link_to_qp_branch = 1;
7605 tmpl.mem_offset.hint = 0;
7608 tmpl.cmp_type = CMP_NONE;
7611 as_warn (_("Unhandled dependency %s for %s (%s), note %d"), \
7612 dep->name, idesc->name, (rsrc_write?"write":"read"), note)
7613 #define KNOWN(REG) (gr_values[REG].known && gr_values[REG].path >= path)
7615 /* we don't need to track these */
7616 if (dep->semantics == IA64_DVS_NONE)
7619 switch (dep->specifier)
7624 if (idesc->operands[!rsrc_write] == IA64_OPND_AR3)
7626 int regno = CURR_SLOT.opnd[!rsrc_write].X_add_number - REG_AR;
7627 if (regno >= 0 && regno <= 7)
7629 specs[count] = tmpl;
7630 specs[count++].index = regno;
7636 for (i = 0; i < 8; i++)
7638 specs[count] = tmpl;
7639 specs[count++].index = i;
7648 case IA64_RS_AR_UNAT:
7649 /* This is a mov =AR or mov AR= instruction. */
7650 if (idesc->operands[!rsrc_write] == IA64_OPND_AR3)
7652 int regno = CURR_SLOT.opnd[!rsrc_write].X_add_number - REG_AR;
7653 if (regno == AR_UNAT)
7655 specs[count++] = tmpl;
7660 /* This is a spill/fill, or other instruction that modifies the
7663 /* Unless we can determine the specific bits used, mark the whole
7664 thing; bits 8:3 of the memory address indicate the bit used in
7665 UNAT. The .mem.offset hint may be used to eliminate a small
7666 subset of conflicts. */
7667 specs[count] = tmpl;
7668 if (md.mem_offset.hint)
7671 fprintf (stderr, " Using hint for spill/fill\n");
7672 /* The index isn't actually used, just set it to something
7673 approximating the bit index. */
7674 specs[count].index = (md.mem_offset.offset >> 3) & 0x3F;
7675 specs[count].mem_offset.hint = 1;
7676 specs[count].mem_offset.offset = md.mem_offset.offset;
7677 specs[count++].mem_offset.base = md.mem_offset.base;
7681 specs[count++].specific = 0;
7689 if (idesc->operands[!rsrc_write] == IA64_OPND_AR3)
7691 int regno = CURR_SLOT.opnd[!rsrc_write].X_add_number - REG_AR;
7692 if ((regno >= 8 && regno <= 15)
7693 || (regno >= 20 && regno <= 23)
7694 || (regno >= 31 && regno <= 39)
7695 || (regno >= 41 && regno <= 47)
7696 || (regno >= 67 && regno <= 111))
7698 specs[count] = tmpl;
7699 specs[count++].index = regno;
7712 if (idesc->operands[!rsrc_write] == IA64_OPND_AR3)
7714 int regno = CURR_SLOT.opnd[!rsrc_write].X_add_number - REG_AR;
7715 if ((regno >= 48 && regno <= 63)
7716 || (regno >= 112 && regno <= 127))
7718 specs[count] = tmpl;
7719 specs[count++].index = regno;
7725 for (i = 48; i < 64; i++)
7727 specs[count] = tmpl;
7728 specs[count++].index = i;
7730 for (i = 112; i < 128; i++)
7732 specs[count] = tmpl;
7733 specs[count++].index = i;
7751 for (i = 0; i < idesc->num_outputs; i++)
7752 if (idesc->operands[i] == IA64_OPND_B1
7753 || idesc->operands[i] == IA64_OPND_B2)
7755 specs[count] = tmpl;
7756 specs[count++].index =
7757 CURR_SLOT.opnd[i].X_add_number - REG_BR;
7762 for (i = idesc->num_outputs; i < NELEMS (idesc->operands); i++)
7763 if (idesc->operands[i] == IA64_OPND_B1
7764 || idesc->operands[i] == IA64_OPND_B2)
7766 specs[count] = tmpl;
7767 specs[count++].index =
7768 CURR_SLOT.opnd[i].X_add_number - REG_BR;
7774 case IA64_RS_CPUID: /* four or more registers */
7777 if (idesc->operands[!rsrc_write] == IA64_OPND_CPUID_R3)
7779 int regno = CURR_SLOT.opnd[!rsrc_write].X_add_number - REG_GR;
7780 if (regno >= 0 && regno < NELEMS (gr_values)
7783 specs[count] = tmpl;
7784 specs[count++].index = gr_values[regno].value & 0xFF;
7788 specs[count] = tmpl;
7789 specs[count++].specific = 0;
7799 case IA64_RS_DBR: /* four or more registers */
7802 if (idesc->operands[!rsrc_write] == IA64_OPND_DBR_R3)
7804 int regno = CURR_SLOT.opnd[!rsrc_write].X_add_number - REG_GR;
7805 if (regno >= 0 && regno < NELEMS (gr_values)
7808 specs[count] = tmpl;
7809 specs[count++].index = gr_values[regno].value & 0xFF;
7813 specs[count] = tmpl;
7814 specs[count++].specific = 0;
7818 else if (note == 0 && !rsrc_write)
7820 specs[count] = tmpl;
7821 specs[count++].specific = 0;
7829 case IA64_RS_IBR: /* four or more registers */
7832 if (idesc->operands[!rsrc_write] == IA64_OPND_IBR_R3)
7834 int regno = CURR_SLOT.opnd[!rsrc_write].X_add_number - REG_GR;
7835 if (regno >= 0 && regno < NELEMS (gr_values)
7838 specs[count] = tmpl;
7839 specs[count++].index = gr_values[regno].value & 0xFF;
7843 specs[count] = tmpl;
7844 specs[count++].specific = 0;
7857 /* These are implementation specific. Force all references to
7858 conflict with all other references. */
7859 specs[count] = tmpl;
7860 specs[count++].specific = 0;
7868 case IA64_RS_PKR: /* 16 or more registers */
7869 if (note == 3 || note == 4)
7871 if (idesc->operands[!rsrc_write] == IA64_OPND_PKR_R3)
7873 int regno = CURR_SLOT.opnd[!rsrc_write].X_add_number - REG_GR;
7874 if (regno >= 0 && regno < NELEMS (gr_values)
7879 specs[count] = tmpl;
7880 specs[count++].index = gr_values[regno].value & 0xFF;
7883 for (i = 0; i < NELEMS (gr_values); i++)
7885 /* Uses all registers *except* the one in R3. */
7886 if ((unsigned)i != (gr_values[regno].value & 0xFF))
7888 specs[count] = tmpl;
7889 specs[count++].index = i;
7895 specs[count] = tmpl;
7896 specs[count++].specific = 0;
7903 specs[count] = tmpl;
7904 specs[count++].specific = 0;
7908 case IA64_RS_PMC: /* four or more registers */
7911 if (idesc->operands[!rsrc_write] == IA64_OPND_PMC_R3
7912 || (!rsrc_write && idesc->operands[1] == IA64_OPND_PMD_R3))
7915 int index = ((idesc->operands[1] == IA64_OPND_R3 && !rsrc_write)
7917 int regno = CURR_SLOT.opnd[index].X_add_number - REG_GR;
7918 if (regno >= 0 && regno < NELEMS (gr_values)
7921 specs[count] = tmpl;
7922 specs[count++].index = gr_values[regno].value & 0xFF;
7926 specs[count] = tmpl;
7927 specs[count++].specific = 0;
7937 case IA64_RS_PMD: /* four or more registers */
7940 if (idesc->operands[!rsrc_write] == IA64_OPND_PMD_R3)
7942 int regno = CURR_SLOT.opnd[!rsrc_write].X_add_number - REG_GR;
7943 if (regno >= 0 && regno < NELEMS (gr_values)
7946 specs[count] = tmpl;
7947 specs[count++].index = gr_values[regno].value & 0xFF;
7951 specs[count] = tmpl;
7952 specs[count++].specific = 0;
7962 case IA64_RS_RR: /* eight registers */
7965 if (idesc->operands[!rsrc_write] == IA64_OPND_RR_R3)
7967 int regno = CURR_SLOT.opnd[!rsrc_write].X_add_number - REG_GR;
7968 if (regno >= 0 && regno < NELEMS (gr_values)
7971 specs[count] = tmpl;
7972 specs[count++].index = (gr_values[regno].value >> 61) & 0x7;
7976 specs[count] = tmpl;
7977 specs[count++].specific = 0;
7981 else if (note == 0 && !rsrc_write)
7983 specs[count] = tmpl;
7984 specs[count++].specific = 0;
7992 case IA64_RS_CR_IRR:
7995 /* handle mov-from-CR-IVR; it's a read that writes CR[IRR] */
7996 int regno = CURR_SLOT.opnd[1].X_add_number - REG_CR;
7998 && idesc->operands[1] == IA64_OPND_CR3
8001 for (i = 0; i < 4; i++)
8003 specs[count] = tmpl;
8004 specs[count++].index = CR_IRR0 + i;
8010 int regno = CURR_SLOT.opnd[!rsrc_write].X_add_number - REG_CR;
8011 if (idesc->operands[!rsrc_write] == IA64_OPND_CR3
8013 && regno <= CR_IRR3)
8015 specs[count] = tmpl;
8016 specs[count++].index = regno;
8025 case IA64_RS_CR_LRR:
8032 int regno = CURR_SLOT.opnd[!rsrc_write].X_add_number - REG_CR;
8033 if (idesc->operands[!rsrc_write] == IA64_OPND_CR3
8034 && (regno == CR_LRR0 || regno == CR_LRR1))
8036 specs[count] = tmpl;
8037 specs[count++].index = regno;
8045 if (idesc->operands[!rsrc_write] == IA64_OPND_CR3)
8047 specs[count] = tmpl;
8048 specs[count++].index =
8049 CURR_SLOT.opnd[!rsrc_write].X_add_number - REG_CR;
8064 else if (rsrc_write)
8066 if (dep->specifier == IA64_RS_FRb
8067 && idesc->operands[0] == IA64_OPND_F1)
8069 specs[count] = tmpl;
8070 specs[count++].index = CURR_SLOT.opnd[0].X_add_number - REG_FR;
8075 for (i = idesc->num_outputs; i < NELEMS (idesc->operands); i++)
8077 if (idesc->operands[i] == IA64_OPND_F2
8078 || idesc->operands[i] == IA64_OPND_F3
8079 || idesc->operands[i] == IA64_OPND_F4)
8081 specs[count] = tmpl;
8082 specs[count++].index =
8083 CURR_SLOT.opnd[i].X_add_number - REG_FR;
8092 /* This reference applies only to the GR whose value is loaded with
8093 data returned from memory. */
8094 specs[count] = tmpl;
8095 specs[count++].index = CURR_SLOT.opnd[0].X_add_number - REG_GR;
8101 for (i = 0; i < idesc->num_outputs; i++)
8102 if (idesc->operands[i] == IA64_OPND_R1
8103 || idesc->operands[i] == IA64_OPND_R2
8104 || idesc->operands[i] == IA64_OPND_R3)
8106 specs[count] = tmpl;
8107 specs[count++].index =
8108 CURR_SLOT.opnd[i].X_add_number - REG_GR;
8110 if (idesc->flags & IA64_OPCODE_POSTINC)
8111 for (i = 0; i < NELEMS (idesc->operands); i++)
8112 if (idesc->operands[i] == IA64_OPND_MR3)
8114 specs[count] = tmpl;
8115 specs[count++].index =
8116 CURR_SLOT.opnd[i].X_add_number - REG_GR;
8121 /* Look for anything that reads a GR. */
8122 for (i = 0; i < NELEMS (idesc->operands); i++)
8124 if (idesc->operands[i] == IA64_OPND_MR3
8125 || idesc->operands[i] == IA64_OPND_CPUID_R3
8126 || idesc->operands[i] == IA64_OPND_DBR_R3
8127 || idesc->operands[i] == IA64_OPND_IBR_R3
8128 || idesc->operands[i] == IA64_OPND_MSR_R3
8129 || idesc->operands[i] == IA64_OPND_PKR_R3
8130 || idesc->operands[i] == IA64_OPND_PMC_R3
8131 || idesc->operands[i] == IA64_OPND_PMD_R3
8132 || idesc->operands[i] == IA64_OPND_RR_R3
8133 || ((i >= idesc->num_outputs)
8134 && (idesc->operands[i] == IA64_OPND_R1
8135 || idesc->operands[i] == IA64_OPND_R2
8136 || idesc->operands[i] == IA64_OPND_R3
8137 /* addl source register. */
8138 || idesc->operands[i] == IA64_OPND_R3_2)))
8140 specs[count] = tmpl;
8141 specs[count++].index =
8142 CURR_SLOT.opnd[i].X_add_number - REG_GR;
8153 /* This is the same as IA64_RS_PRr, except that the register range is
8154 from 1 - 15, and there are no rotating register reads/writes here. */
8158 for (i = 1; i < 16; i++)
8160 specs[count] = tmpl;
8161 specs[count++].index = i;
8167 /* Mark only those registers indicated by the mask. */
8170 mask = CURR_SLOT.opnd[2].X_add_number;
8171 for (i = 1; i < 16; i++)
8172 if (mask & ((valueT) 1 << i))
8174 specs[count] = tmpl;
8175 specs[count++].index = i;
8183 else if (note == 11) /* note 11 implies note 1 as well */
8187 for (i = 0; i < idesc->num_outputs; i++)
8189 if (idesc->operands[i] == IA64_OPND_P1
8190 || idesc->operands[i] == IA64_OPND_P2)
8192 int regno = CURR_SLOT.opnd[i].X_add_number - REG_P;
8193 if (regno >= 1 && regno < 16)
8195 specs[count] = tmpl;
8196 specs[count++].index = regno;
8206 else if (note == 12)
8208 if (CURR_SLOT.qp_regno >= 1 && CURR_SLOT.qp_regno < 16)
8210 specs[count] = tmpl;
8211 specs[count++].index = CURR_SLOT.qp_regno;
8218 int p1 = CURR_SLOT.opnd[0].X_add_number - REG_P;
8219 int p2 = CURR_SLOT.opnd[1].X_add_number - REG_P;
8220 int or_andcm = strstr (idesc->name, "or.andcm") != NULL;
8221 int and_orcm = strstr (idesc->name, "and.orcm") != NULL;
8223 if ((idesc->operands[0] == IA64_OPND_P1
8224 || idesc->operands[0] == IA64_OPND_P2)
8225 && p1 >= 1 && p1 < 16)
8227 specs[count] = tmpl;
8228 specs[count].cmp_type =
8229 (or_andcm ? CMP_OR : (and_orcm ? CMP_AND : CMP_NONE));
8230 specs[count++].index = p1;
8232 if ((idesc->operands[1] == IA64_OPND_P1
8233 || idesc->operands[1] == IA64_OPND_P2)
8234 && p2 >= 1 && p2 < 16)
8236 specs[count] = tmpl;
8237 specs[count].cmp_type =
8238 (or_andcm ? CMP_AND : (and_orcm ? CMP_OR : CMP_NONE));
8239 specs[count++].index = p2;
8244 if (CURR_SLOT.qp_regno >= 1 && CURR_SLOT.qp_regno < 16)
8246 specs[count] = tmpl;
8247 specs[count++].index = CURR_SLOT.qp_regno;
8249 if (idesc->operands[1] == IA64_OPND_PR)
8251 for (i = 1; i < 16; i++)
8253 specs[count] = tmpl;
8254 specs[count++].index = i;
8265 /* This is the general case for PRs. IA64_RS_PR and IA64_RS_PR63 are
8266 simplified cases of this. */
8270 for (i = 16; i < 63; i++)
8272 specs[count] = tmpl;
8273 specs[count++].index = i;
8279 /* Mark only those registers indicated by the mask. */
8281 && idesc->operands[0] == IA64_OPND_PR)
8283 mask = CURR_SLOT.opnd[2].X_add_number;
8284 if (mask & ((valueT) 1 << 16))
8285 for (i = 16; i < 63; i++)
8287 specs[count] = tmpl;
8288 specs[count++].index = i;
8292 && idesc->operands[0] == IA64_OPND_PR_ROT)
8294 for (i = 16; i < 63; i++)
8296 specs[count] = tmpl;
8297 specs[count++].index = i;
8305 else if (note == 11) /* note 11 implies note 1 as well */
8309 for (i = 0; i < idesc->num_outputs; i++)
8311 if (idesc->operands[i] == IA64_OPND_P1
8312 || idesc->operands[i] == IA64_OPND_P2)
8314 int regno = CURR_SLOT.opnd[i].X_add_number - REG_P;
8315 if (regno >= 16 && regno < 63)
8317 specs[count] = tmpl;
8318 specs[count++].index = regno;
8328 else if (note == 12)
8330 if (CURR_SLOT.qp_regno >= 16 && CURR_SLOT.qp_regno < 63)
8332 specs[count] = tmpl;
8333 specs[count++].index = CURR_SLOT.qp_regno;
8340 int p1 = CURR_SLOT.opnd[0].X_add_number - REG_P;
8341 int p2 = CURR_SLOT.opnd[1].X_add_number - REG_P;
8342 int or_andcm = strstr (idesc->name, "or.andcm") != NULL;
8343 int and_orcm = strstr (idesc->name, "and.orcm") != NULL;
8345 if ((idesc->operands[0] == IA64_OPND_P1
8346 || idesc->operands[0] == IA64_OPND_P2)
8347 && p1 >= 16 && p1 < 63)
8349 specs[count] = tmpl;
8350 specs[count].cmp_type =
8351 (or_andcm ? CMP_OR : (and_orcm ? CMP_AND : CMP_NONE));
8352 specs[count++].index = p1;
8354 if ((idesc->operands[1] == IA64_OPND_P1
8355 || idesc->operands[1] == IA64_OPND_P2)
8356 && p2 >= 16 && p2 < 63)
8358 specs[count] = tmpl;
8359 specs[count].cmp_type =
8360 (or_andcm ? CMP_AND : (and_orcm ? CMP_OR : CMP_NONE));
8361 specs[count++].index = p2;
8366 if (CURR_SLOT.qp_regno >= 16 && CURR_SLOT.qp_regno < 63)
8368 specs[count] = tmpl;
8369 specs[count++].index = CURR_SLOT.qp_regno;
8371 if (idesc->operands[1] == IA64_OPND_PR)
8373 for (i = 16; i < 63; i++)
8375 specs[count] = tmpl;
8376 specs[count++].index = i;
8388 /* Verify that the instruction is using the PSR bit indicated in
8392 if (idesc->operands[!rsrc_write] == IA64_OPND_PSR_UM)
8394 if (dep->regindex < 6)
8396 specs[count++] = tmpl;
8399 else if (idesc->operands[!rsrc_write] == IA64_OPND_PSR)
8401 if (dep->regindex < 32
8402 || dep->regindex == 35
8403 || dep->regindex == 36
8404 || (!rsrc_write && dep->regindex == PSR_CPL))
8406 specs[count++] = tmpl;
8409 else if (idesc->operands[!rsrc_write] == IA64_OPND_PSR_L)
8411 if (dep->regindex < 32
8412 || dep->regindex == 35
8413 || dep->regindex == 36
8414 || (rsrc_write && dep->regindex == PSR_CPL))
8416 specs[count++] = tmpl;
8421 /* Several PSR bits have very specific dependencies. */
8422 switch (dep->regindex)
8425 specs[count++] = tmpl;
8430 specs[count++] = tmpl;
8434 /* Only certain CR accesses use PSR.ic */
8435 if (idesc->operands[0] == IA64_OPND_CR3
8436 || idesc->operands[1] == IA64_OPND_CR3)
8439 ((idesc->operands[0] == IA64_OPND_CR3)
8442 CURR_SLOT.opnd[index].X_add_number - REG_CR;
8457 specs[count++] = tmpl;
8466 specs[count++] = tmpl;
8470 /* Only some AR accesses use cpl */
8471 if (idesc->operands[0] == IA64_OPND_AR3
8472 || idesc->operands[1] == IA64_OPND_AR3)
8475 ((idesc->operands[0] == IA64_OPND_AR3)
8478 CURR_SLOT.opnd[index].X_add_number - REG_AR;
8485 && regno <= AR_K7))))
8487 specs[count++] = tmpl;
8492 specs[count++] = tmpl;
8502 if (idesc->operands[0] == IA64_OPND_IMMU24)
8504 mask = CURR_SLOT.opnd[0].X_add_number;
8510 if (mask & ((valueT) 1 << dep->regindex))
8512 specs[count++] = tmpl;
8517 int min = dep->regindex == PSR_DFL ? 2 : 32;
8518 int max = dep->regindex == PSR_DFL ? 31 : 127;
8519 /* dfh is read on FR32-127; dfl is read on FR2-31 */
8520 for (i = 0; i < NELEMS (idesc->operands); i++)
8522 if (idesc->operands[i] == IA64_OPND_F1
8523 || idesc->operands[i] == IA64_OPND_F2
8524 || idesc->operands[i] == IA64_OPND_F3
8525 || idesc->operands[i] == IA64_OPND_F4)
8527 int reg = CURR_SLOT.opnd[i].X_add_number - REG_FR;
8528 if (reg >= min && reg <= max)
8530 specs[count++] = tmpl;
8537 int min = dep->regindex == PSR_MFL ? 2 : 32;
8538 int max = dep->regindex == PSR_MFL ? 31 : 127;
8539 /* mfh is read on writes to FR32-127; mfl is read on writes to
8541 for (i = 0; i < idesc->num_outputs; i++)
8543 if (idesc->operands[i] == IA64_OPND_F1)
8545 int reg = CURR_SLOT.opnd[i].X_add_number - REG_FR;
8546 if (reg >= min && reg <= max)
8548 specs[count++] = tmpl;
8553 else if (note == 10)
8555 for (i = 0; i < NELEMS (idesc->operands); i++)
8557 if (idesc->operands[i] == IA64_OPND_R1
8558 || idesc->operands[i] == IA64_OPND_R2
8559 || idesc->operands[i] == IA64_OPND_R3)
8561 int regno = CURR_SLOT.opnd[i].X_add_number - REG_GR;
8562 if (regno >= 16 && regno <= 31)
8564 specs[count++] = tmpl;
8575 case IA64_RS_AR_FPSR:
8576 if (idesc->operands[!rsrc_write] == IA64_OPND_AR3)
8578 int regno = CURR_SLOT.opnd[!rsrc_write].X_add_number - REG_AR;
8579 if (regno == AR_FPSR)
8581 specs[count++] = tmpl;
8586 specs[count++] = tmpl;
8591 /* Handle all AR[REG] resources */
8592 if (note == 0 || note == 1)
8594 int regno = CURR_SLOT.opnd[!rsrc_write].X_add_number - REG_AR;
8595 if (idesc->operands[!rsrc_write] == IA64_OPND_AR3
8596 && regno == dep->regindex)
8598 specs[count++] = tmpl;
8600 /* other AR[REG] resources may be affected by AR accesses */
8601 else if (idesc->operands[0] == IA64_OPND_AR3)
8604 regno = CURR_SLOT.opnd[0].X_add_number - REG_AR;
8605 switch (dep->regindex)
8611 if (regno == AR_BSPSTORE)
8613 specs[count++] = tmpl;
8617 (regno == AR_BSPSTORE
8618 || regno == AR_RNAT))
8620 specs[count++] = tmpl;
8625 else if (idesc->operands[1] == IA64_OPND_AR3)
8628 regno = CURR_SLOT.opnd[1].X_add_number - REG_AR;
8629 switch (dep->regindex)
8634 if (regno == AR_BSPSTORE || regno == AR_RNAT)
8636 specs[count++] = tmpl;
8643 specs[count++] = tmpl;
8653 /* Handle all CR[REG] resources */
8654 if (note == 0 || note == 1)
8656 if (idesc->operands[!rsrc_write] == IA64_OPND_CR3)
8658 int regno = CURR_SLOT.opnd[!rsrc_write].X_add_number - REG_CR;
8659 if (regno == dep->regindex)
8661 specs[count++] = tmpl;
8663 else if (!rsrc_write)
8665 /* Reads from CR[IVR] affect other resources. */
8666 if (regno == CR_IVR)
8668 if ((dep->regindex >= CR_IRR0
8669 && dep->regindex <= CR_IRR3)
8670 || dep->regindex == CR_TPR)
8672 specs[count++] = tmpl;
8679 specs[count++] = tmpl;
8688 case IA64_RS_INSERVICE:
8689 /* look for write of EOI (67) or read of IVR (65) */
8690 if ((idesc->operands[0] == IA64_OPND_CR3
8691 && CURR_SLOT.opnd[0].X_add_number - REG_CR == CR_EOI)
8692 || (idesc->operands[1] == IA64_OPND_CR3
8693 && CURR_SLOT.opnd[1].X_add_number - REG_CR == CR_IVR))
8695 specs[count++] = tmpl;
8702 specs[count++] = tmpl;
8713 specs[count++] = tmpl;
8717 /* Check if any of the registers accessed are in the rotating region.
8718 mov to/from pr accesses CFM only when qp_regno is in the rotating
8720 for (i = 0; i < NELEMS (idesc->operands); i++)
8722 if (idesc->operands[i] == IA64_OPND_R1
8723 || idesc->operands[i] == IA64_OPND_R2
8724 || idesc->operands[i] == IA64_OPND_R3)
8726 int num = CURR_SLOT.opnd[i].X_add_number - REG_GR;
8727 /* Assumes that md.rot.num_regs is always valid */
8728 if (md.rot.num_regs > 0
8730 && num < 31 + md.rot.num_regs)
8732 specs[count] = tmpl;
8733 specs[count++].specific = 0;
8736 else if (idesc->operands[i] == IA64_OPND_F1
8737 || idesc->operands[i] == IA64_OPND_F2
8738 || idesc->operands[i] == IA64_OPND_F3
8739 || idesc->operands[i] == IA64_OPND_F4)
8741 int num = CURR_SLOT.opnd[i].X_add_number - REG_FR;
8744 specs[count] = tmpl;
8745 specs[count++].specific = 0;
8748 else if (idesc->operands[i] == IA64_OPND_P1
8749 || idesc->operands[i] == IA64_OPND_P2)
8751 int num = CURR_SLOT.opnd[i].X_add_number - REG_P;
8754 specs[count] = tmpl;
8755 specs[count++].specific = 0;
8759 if (CURR_SLOT.qp_regno > 15)
8761 specs[count] = tmpl;
8762 specs[count++].specific = 0;
8767 /* This is the same as IA64_RS_PRr, except simplified to account for
8768 the fact that there is only one register. */
8772 specs[count++] = tmpl;
8777 if (idesc->operands[2] == IA64_OPND_IMM17)
8778 mask = CURR_SLOT.opnd[2].X_add_number;
8779 if (mask & ((valueT) 1 << 63))
8780 specs[count++] = tmpl;
8782 else if (note == 11)
8784 if ((idesc->operands[0] == IA64_OPND_P1
8785 && CURR_SLOT.opnd[0].X_add_number - REG_P == 63)
8786 || (idesc->operands[1] == IA64_OPND_P2
8787 && CURR_SLOT.opnd[1].X_add_number - REG_P == 63))
8789 specs[count++] = tmpl;
8792 else if (note == 12)
8794 if (CURR_SLOT.qp_regno == 63)
8796 specs[count++] = tmpl;
8803 int p1 = CURR_SLOT.opnd[0].X_add_number - REG_P;
8804 int p2 = CURR_SLOT.opnd[1].X_add_number - REG_P;
8805 int or_andcm = strstr (idesc->name, "or.andcm") != NULL;
8806 int and_orcm = strstr (idesc->name, "and.orcm") != NULL;
8809 && (idesc->operands[0] == IA64_OPND_P1
8810 || idesc->operands[0] == IA64_OPND_P2))
8812 specs[count] = tmpl;
8813 specs[count++].cmp_type =
8814 (or_andcm ? CMP_OR : (and_orcm ? CMP_AND : CMP_NONE));
8817 && (idesc->operands[1] == IA64_OPND_P1
8818 || idesc->operands[1] == IA64_OPND_P2))
8820 specs[count] = tmpl;
8821 specs[count++].cmp_type =
8822 (or_andcm ? CMP_AND : (and_orcm ? CMP_OR : CMP_NONE));
8827 if (CURR_SLOT.qp_regno == 63)
8829 specs[count++] = tmpl;
8840 /* FIXME we can identify some individual RSE written resources, but RSE
8841 read resources have not yet been completely identified, so for now
8842 treat RSE as a single resource */
8843 if (strncmp (idesc->name, "mov", 3) == 0)
8847 if (idesc->operands[0] == IA64_OPND_AR3
8848 && CURR_SLOT.opnd[0].X_add_number - REG_AR == AR_BSPSTORE)
8850 specs[count] = tmpl;
8851 specs[count++].index = 0; /* IA64_RSE_BSPLOAD/RNATBITINDEX */
8856 if (idesc->operands[0] == IA64_OPND_AR3)
8858 if (CURR_SLOT.opnd[0].X_add_number - REG_AR == AR_BSPSTORE
8859 || CURR_SLOT.opnd[0].X_add_number - REG_AR == AR_RNAT)
8861 specs[count++] = tmpl;
8864 else if (idesc->operands[1] == IA64_OPND_AR3)
8866 if (CURR_SLOT.opnd[1].X_add_number - REG_AR == AR_BSP
8867 || CURR_SLOT.opnd[1].X_add_number - REG_AR == AR_BSPSTORE
8868 || CURR_SLOT.opnd[1].X_add_number - REG_AR == AR_RNAT)
8870 specs[count++] = tmpl;
8877 specs[count++] = tmpl;
8882 /* FIXME -- do any of these need to be non-specific? */
8883 specs[count++] = tmpl;
8887 as_bad (_("Unrecognized dependency specifier %d\n"), dep->specifier);
8894 /* Clear branch flags on marked resources. This breaks the link between the
8895 QP of the marking instruction and a subsequent branch on the same QP. */
8898 clear_qp_branch_flag (mask)
8902 for (i = 0; i < regdepslen; i++)
8904 valueT bit = ((valueT) 1 << regdeps[i].qp_regno);
8905 if ((bit & mask) != 0)
8907 regdeps[i].link_to_qp_branch = 0;
8912 /* MASK contains 2 and only 2 PRs which are mutually exclusive. Remove
8913 any mutexes which contain one of the PRs and create new ones when
8917 update_qp_mutex (valueT mask)
8923 while (i < qp_mutexeslen)
8925 if ((qp_mutexes[i].prmask & mask) != 0)
8927 /* If it destroys and creates the same mutex, do nothing. */
8928 if (qp_mutexes[i].prmask == mask
8929 && qp_mutexes[i].path == md.path)
8940 fprintf (stderr, " Clearing mutex relation");
8941 print_prmask (qp_mutexes[i].prmask);
8942 fprintf (stderr, "\n");
8945 /* Deal with the old mutex with more than 3+ PRs only if
8946 the new mutex on the same execution path with it.
8948 FIXME: The 3+ mutex support is incomplete.
8949 dot_pred_rel () may be a better place to fix it. */
8950 if (qp_mutexes[i].path == md.path)
8952 /* If it is a proper subset of the mutex, create a
8955 && (qp_mutexes[i].prmask & mask) == mask)
8958 qp_mutexes[i].prmask &= ~mask;
8959 if (qp_mutexes[i].prmask & (qp_mutexes[i].prmask - 1))
8961 /* Modify the mutex if there are more than one
8969 /* Remove the mutex. */
8970 qp_mutexes[i] = qp_mutexes[--qp_mutexeslen];
8978 add_qp_mutex (mask);
8983 /* Remove any mutexes which contain any of the PRs indicated in the mask.
8985 Any changes to a PR clears the mutex relations which include that PR. */
8988 clear_qp_mutex (mask)
8994 while (i < qp_mutexeslen)
8996 if ((qp_mutexes[i].prmask & mask) != 0)
9000 fprintf (stderr, " Clearing mutex relation");
9001 print_prmask (qp_mutexes[i].prmask);
9002 fprintf (stderr, "\n");
9004 qp_mutexes[i] = qp_mutexes[--qp_mutexeslen];
9011 /* Clear implies relations which contain PRs in the given masks.
9012 P1_MASK indicates the source of the implies relation, while P2_MASK
9013 indicates the implied PR. */
9016 clear_qp_implies (p1_mask, p2_mask)
9023 while (i < qp_implieslen)
9025 if ((((valueT) 1 << qp_implies[i].p1) & p1_mask) != 0
9026 || (((valueT) 1 << qp_implies[i].p2) & p2_mask) != 0)
9029 fprintf (stderr, "Clearing implied relation PR%d->PR%d\n",
9030 qp_implies[i].p1, qp_implies[i].p2);
9031 qp_implies[i] = qp_implies[--qp_implieslen];
9038 /* Add the PRs specified to the list of implied relations. */
9041 add_qp_imply (p1, p2)
9048 /* p0 is not meaningful here. */
9049 if (p1 == 0 || p2 == 0)
9055 /* If it exists already, ignore it. */
9056 for (i = 0; i < qp_implieslen; i++)
9058 if (qp_implies[i].p1 == p1
9059 && qp_implies[i].p2 == p2
9060 && qp_implies[i].path == md.path
9061 && !qp_implies[i].p2_branched)
9065 if (qp_implieslen == qp_impliestotlen)
9067 qp_impliestotlen += 20;
9068 qp_implies = (struct qp_imply *)
9069 xrealloc ((void *) qp_implies,
9070 qp_impliestotlen * sizeof (struct qp_imply));
9073 fprintf (stderr, " Registering PR%d implies PR%d\n", p1, p2);
9074 qp_implies[qp_implieslen].p1 = p1;
9075 qp_implies[qp_implieslen].p2 = p2;
9076 qp_implies[qp_implieslen].path = md.path;
9077 qp_implies[qp_implieslen++].p2_branched = 0;
9079 /* Add in the implied transitive relations; for everything that p2 implies,
9080 make p1 imply that, too; for everything that implies p1, make it imply p2
9082 for (i = 0; i < qp_implieslen; i++)
9084 if (qp_implies[i].p1 == p2)
9085 add_qp_imply (p1, qp_implies[i].p2);
9086 if (qp_implies[i].p2 == p1)
9087 add_qp_imply (qp_implies[i].p1, p2);
9089 /* Add in mutex relations implied by this implies relation; for each mutex
9090 relation containing p2, duplicate it and replace p2 with p1. */
9091 bit = (valueT) 1 << p1;
9092 mask = (valueT) 1 << p2;
9093 for (i = 0; i < qp_mutexeslen; i++)
9095 if (qp_mutexes[i].prmask & mask)
9096 add_qp_mutex ((qp_mutexes[i].prmask & ~mask) | bit);
9100 /* Add the PRs specified in the mask to the mutex list; this means that only
9101 one of the PRs can be true at any time. PR0 should never be included in
9111 if (qp_mutexeslen == qp_mutexestotlen)
9113 qp_mutexestotlen += 20;
9114 qp_mutexes = (struct qpmutex *)
9115 xrealloc ((void *) qp_mutexes,
9116 qp_mutexestotlen * sizeof (struct qpmutex));
9120 fprintf (stderr, " Registering mutex on");
9121 print_prmask (mask);
9122 fprintf (stderr, "\n");
9124 qp_mutexes[qp_mutexeslen].path = md.path;
9125 qp_mutexes[qp_mutexeslen++].prmask = mask;
9129 has_suffix_p (name, suffix)
9133 size_t namelen = strlen (name);
9134 size_t sufflen = strlen (suffix);
9136 if (namelen <= sufflen)
9138 return strcmp (name + namelen - sufflen, suffix) == 0;
9142 clear_register_values ()
9146 fprintf (stderr, " Clearing register values\n");
9147 for (i = 1; i < NELEMS (gr_values); i++)
9148 gr_values[i].known = 0;
9151 /* Keep track of register values/changes which affect DV tracking.
9153 optimization note: should add a flag to classes of insns where otherwise we
9154 have to examine a group of strings to identify them. */
9157 note_register_values (idesc)
9158 struct ia64_opcode *idesc;
9160 valueT qp_changemask = 0;
9163 /* Invalidate values for registers being written to. */
9164 for (i = 0; i < idesc->num_outputs; i++)
9166 if (idesc->operands[i] == IA64_OPND_R1
9167 || idesc->operands[i] == IA64_OPND_R2
9168 || idesc->operands[i] == IA64_OPND_R3)
9170 int regno = CURR_SLOT.opnd[i].X_add_number - REG_GR;
9171 if (regno > 0 && regno < NELEMS (gr_values))
9172 gr_values[regno].known = 0;
9174 else if (idesc->operands[i] == IA64_OPND_R3_2)
9176 int regno = CURR_SLOT.opnd[i].X_add_number - REG_GR;
9177 if (regno > 0 && regno < 4)
9178 gr_values[regno].known = 0;
9180 else if (idesc->operands[i] == IA64_OPND_P1
9181 || idesc->operands[i] == IA64_OPND_P2)
9183 int regno = CURR_SLOT.opnd[i].X_add_number - REG_P;
9184 qp_changemask |= (valueT) 1 << regno;
9186 else if (idesc->operands[i] == IA64_OPND_PR)
9188 if (idesc->operands[2] & (valueT) 0x10000)
9189 qp_changemask = ~(valueT) 0x1FFFF | idesc->operands[2];
9191 qp_changemask = idesc->operands[2];
9194 else if (idesc->operands[i] == IA64_OPND_PR_ROT)
9196 if (idesc->operands[1] & ((valueT) 1 << 43))
9197 qp_changemask = -((valueT) 1 << 44) | idesc->operands[1];
9199 qp_changemask = idesc->operands[1];
9200 qp_changemask &= ~(valueT) 0xFFFF;
9205 /* Always clear qp branch flags on any PR change. */
9206 /* FIXME there may be exceptions for certain compares. */
9207 clear_qp_branch_flag (qp_changemask);
9209 /* Invalidate rotating registers on insns which affect RRBs in CFM. */
9210 if (idesc->flags & IA64_OPCODE_MOD_RRBS)
9212 qp_changemask |= ~(valueT) 0xFFFF;
9213 if (strcmp (idesc->name, "clrrrb.pr") != 0)
9215 for (i = 32; i < 32 + md.rot.num_regs; i++)
9216 gr_values[i].known = 0;
9218 clear_qp_mutex (qp_changemask);
9219 clear_qp_implies (qp_changemask, qp_changemask);
9221 /* After a call, all register values are undefined, except those marked
9223 else if (strncmp (idesc->name, "br.call", 6) == 0
9224 || strncmp (idesc->name, "brl.call", 7) == 0)
9226 /* FIXME keep GR values which are marked as "safe_across_calls" */
9227 clear_register_values ();
9228 clear_qp_mutex (~qp_safe_across_calls);
9229 clear_qp_implies (~qp_safe_across_calls, ~qp_safe_across_calls);
9230 clear_qp_branch_flag (~qp_safe_across_calls);
9232 else if (is_interruption_or_rfi (idesc)
9233 || is_taken_branch (idesc))
9235 clear_register_values ();
9236 clear_qp_mutex (~(valueT) 0);
9237 clear_qp_implies (~(valueT) 0, ~(valueT) 0);
9239 /* Look for mutex and implies relations. */
9240 else if ((idesc->operands[0] == IA64_OPND_P1
9241 || idesc->operands[0] == IA64_OPND_P2)
9242 && (idesc->operands[1] == IA64_OPND_P1
9243 || idesc->operands[1] == IA64_OPND_P2))
9245 int p1 = CURR_SLOT.opnd[0].X_add_number - REG_P;
9246 int p2 = CURR_SLOT.opnd[1].X_add_number - REG_P;
9247 valueT p1mask = (p1 != 0) ? (valueT) 1 << p1 : 0;
9248 valueT p2mask = (p2 != 0) ? (valueT) 1 << p2 : 0;
9250 /* If both PRs are PR0, we can't really do anything. */
9251 if (p1 == 0 && p2 == 0)
9254 fprintf (stderr, " Ignoring PRs due to inclusion of p0\n");
9256 /* In general, clear mutexes and implies which include P1 or P2,
9257 with the following exceptions. */
9258 else if (has_suffix_p (idesc->name, ".or.andcm")
9259 || has_suffix_p (idesc->name, ".and.orcm"))
9261 clear_qp_implies (p2mask, p1mask);
9263 else if (has_suffix_p (idesc->name, ".andcm")
9264 || has_suffix_p (idesc->name, ".and"))
9266 clear_qp_implies (0, p1mask | p2mask);
9268 else if (has_suffix_p (idesc->name, ".orcm")
9269 || has_suffix_p (idesc->name, ".or"))
9271 clear_qp_mutex (p1mask | p2mask);
9272 clear_qp_implies (p1mask | p2mask, 0);
9278 clear_qp_implies (p1mask | p2mask, p1mask | p2mask);
9280 /* If one of the PRs is PR0, we call clear_qp_mutex. */
9281 if (p1 == 0 || p2 == 0)
9282 clear_qp_mutex (p1mask | p2mask);
9284 added = update_qp_mutex (p1mask | p2mask);
9286 if (CURR_SLOT.qp_regno == 0
9287 || has_suffix_p (idesc->name, ".unc"))
9289 if (added == 0 && p1 && p2)
9290 add_qp_mutex (p1mask | p2mask);
9291 if (CURR_SLOT.qp_regno != 0)
9294 add_qp_imply (p1, CURR_SLOT.qp_regno);
9296 add_qp_imply (p2, CURR_SLOT.qp_regno);
9301 /* Look for mov imm insns into GRs. */
9302 else if (idesc->operands[0] == IA64_OPND_R1
9303 && (idesc->operands[1] == IA64_OPND_IMM22
9304 || idesc->operands[1] == IA64_OPND_IMMU64)
9305 && (strcmp (idesc->name, "mov") == 0
9306 || strcmp (idesc->name, "movl") == 0))
9308 int regno = CURR_SLOT.opnd[0].X_add_number - REG_GR;
9309 if (regno > 0 && regno < NELEMS (gr_values))
9311 gr_values[regno].known = 1;
9312 gr_values[regno].value = CURR_SLOT.opnd[1].X_add_number;
9313 gr_values[regno].path = md.path;
9316 fprintf (stderr, " Know gr%d = ", regno);
9317 fprintf_vma (stderr, gr_values[regno].value);
9318 fputs ("\n", stderr);
9324 clear_qp_mutex (qp_changemask);
9325 clear_qp_implies (qp_changemask, qp_changemask);
9329 /* Return whether the given predicate registers are currently mutex. */
9332 qp_mutex (p1, p2, path)
9342 mask = ((valueT) 1 << p1) | (valueT) 1 << p2;
9343 for (i = 0; i < qp_mutexeslen; i++)
9345 if (qp_mutexes[i].path >= path
9346 && (qp_mutexes[i].prmask & mask) == mask)
9353 /* Return whether the given resource is in the given insn's list of chks
9354 Return 1 if the conflict is absolutely determined, 2 if it's a potential
9358 resources_match (rs, idesc, note, qp_regno, path)
9360 struct ia64_opcode *idesc;
9365 struct rsrc specs[MAX_SPECS];
9368 /* If the marked resource's qp_regno and the given qp_regno are mutex,
9369 we don't need to check. One exception is note 11, which indicates that
9370 target predicates are written regardless of PR[qp]. */
9371 if (qp_mutex (rs->qp_regno, qp_regno, path)
9375 count = specify_resource (rs->dependency, idesc, DV_CHK, specs, note, path);
9378 /* UNAT checking is a bit more specific than other resources */
9379 if (rs->dependency->specifier == IA64_RS_AR_UNAT
9380 && specs[count].mem_offset.hint
9381 && rs->mem_offset.hint)
9383 if (rs->mem_offset.base == specs[count].mem_offset.base)
9385 if (((rs->mem_offset.offset >> 3) & 0x3F) ==
9386 ((specs[count].mem_offset.offset >> 3) & 0x3F))
9393 /* Skip apparent PR write conflicts where both writes are an AND or both
9394 writes are an OR. */
9395 if (rs->dependency->specifier == IA64_RS_PR
9396 || rs->dependency->specifier == IA64_RS_PRr
9397 || rs->dependency->specifier == IA64_RS_PR63)
9399 if (specs[count].cmp_type != CMP_NONE
9400 && specs[count].cmp_type == rs->cmp_type)
9403 fprintf (stderr, " %s on parallel compare allowed (PR%d)\n",
9404 dv_mode[rs->dependency->mode],
9405 rs->dependency->specifier != IA64_RS_PR63 ?
9406 specs[count].index : 63);
9411 " %s on parallel compare conflict %s vs %s on PR%d\n",
9412 dv_mode[rs->dependency->mode],
9413 dv_cmp_type[rs->cmp_type],
9414 dv_cmp_type[specs[count].cmp_type],
9415 rs->dependency->specifier != IA64_RS_PR63 ?
9416 specs[count].index : 63);
9420 /* If either resource is not specific, conservatively assume a conflict
9422 if (!specs[count].specific || !rs->specific)
9424 else if (specs[count].index == rs->index)
9429 fprintf (stderr, " No %s conflicts\n", rs->dependency->name);
9435 /* Indicate an instruction group break; if INSERT_STOP is non-zero, then
9436 insert a stop to create the break. Update all resource dependencies
9437 appropriately. If QP_REGNO is non-zero, only apply the break to resources
9438 which use the same QP_REGNO and have the link_to_qp_branch flag set.
9439 If SAVE_CURRENT is non-zero, don't affect resources marked by the current
9443 insn_group_break (insert_stop, qp_regno, save_current)
9450 if (insert_stop && md.num_slots_in_use > 0)
9451 PREV_SLOT.end_of_insn_group = 1;
9455 fprintf (stderr, " Insn group break%s",
9456 (insert_stop ? " (w/stop)" : ""));
9458 fprintf (stderr, " effective for QP=%d", qp_regno);
9459 fprintf (stderr, "\n");
9463 while (i < regdepslen)
9465 const struct ia64_dependency *dep = regdeps[i].dependency;
9468 && regdeps[i].qp_regno != qp_regno)
9475 && CURR_SLOT.src_file == regdeps[i].file
9476 && CURR_SLOT.src_line == regdeps[i].line)
9482 /* clear dependencies which are automatically cleared by a stop, or
9483 those that have reached the appropriate state of insn serialization */
9484 if (dep->semantics == IA64_DVS_IMPLIED
9485 || dep->semantics == IA64_DVS_IMPLIEDF
9486 || regdeps[i].insn_srlz == STATE_SRLZ)
9488 print_dependency ("Removing", i);
9489 regdeps[i] = regdeps[--regdepslen];
9493 if (dep->semantics == IA64_DVS_DATA
9494 || dep->semantics == IA64_DVS_INSTR
9495 || dep->semantics == IA64_DVS_SPECIFIC)
9497 if (regdeps[i].insn_srlz == STATE_NONE)
9498 regdeps[i].insn_srlz = STATE_STOP;
9499 if (regdeps[i].data_srlz == STATE_NONE)
9500 regdeps[i].data_srlz = STATE_STOP;
9507 /* Add the given resource usage spec to the list of active dependencies. */
9510 mark_resource (idesc, dep, spec, depind, path)
9511 struct ia64_opcode *idesc ATTRIBUTE_UNUSED;
9512 const struct ia64_dependency *dep ATTRIBUTE_UNUSED;
9517 if (regdepslen == regdepstotlen)
9519 regdepstotlen += 20;
9520 regdeps = (struct rsrc *)
9521 xrealloc ((void *) regdeps,
9522 regdepstotlen * sizeof (struct rsrc));
9525 regdeps[regdepslen] = *spec;
9526 regdeps[regdepslen].depind = depind;
9527 regdeps[regdepslen].path = path;
9528 regdeps[regdepslen].file = CURR_SLOT.src_file;
9529 regdeps[regdepslen].line = CURR_SLOT.src_line;
9531 print_dependency ("Adding", regdepslen);
9537 print_dependency (action, depind)
9543 fprintf (stderr, " %s %s '%s'",
9544 action, dv_mode[(regdeps[depind].dependency)->mode],
9545 (regdeps[depind].dependency)->name);
9546 if (regdeps[depind].specific && regdeps[depind].index != 0)
9547 fprintf (stderr, " (%d)", regdeps[depind].index);
9548 if (regdeps[depind].mem_offset.hint)
9550 fputs (" ", stderr);
9551 fprintf_vma (stderr, regdeps[depind].mem_offset.base);
9552 fputs ("+", stderr);
9553 fprintf_vma (stderr, regdeps[depind].mem_offset.offset);
9555 fprintf (stderr, "\n");
9560 instruction_serialization ()
9564 fprintf (stderr, " Instruction serialization\n");
9565 for (i = 0; i < regdepslen; i++)
9566 if (regdeps[i].insn_srlz == STATE_STOP)
9567 regdeps[i].insn_srlz = STATE_SRLZ;
9571 data_serialization ()
9575 fprintf (stderr, " Data serialization\n");
9576 while (i < regdepslen)
9578 if (regdeps[i].data_srlz == STATE_STOP
9579 /* Note: as of 991210, all "other" dependencies are cleared by a
9580 data serialization. This might change with new tables */
9581 || (regdeps[i].dependency)->semantics == IA64_DVS_OTHER)
9583 print_dependency ("Removing", i);
9584 regdeps[i] = regdeps[--regdepslen];
9591 /* Insert stops and serializations as needed to avoid DVs. */
9594 remove_marked_resource (rs)
9597 switch (rs->dependency->semantics)
9599 case IA64_DVS_SPECIFIC:
9601 fprintf (stderr, "Implementation-specific, assume worst case...\n");
9602 /* ...fall through... */
9603 case IA64_DVS_INSTR:
9605 fprintf (stderr, "Inserting instr serialization\n");
9606 if (rs->insn_srlz < STATE_STOP)
9607 insn_group_break (1, 0, 0);
9608 if (rs->insn_srlz < STATE_SRLZ)
9610 struct slot oldslot = CURR_SLOT;
9611 /* Manually jam a srlz.i insn into the stream */
9612 memset (&CURR_SLOT, 0, sizeof (CURR_SLOT));
9613 CURR_SLOT.idesc = ia64_find_opcode ("srlz.i");
9614 instruction_serialization ();
9615 md.curr_slot = (md.curr_slot + 1) % NUM_SLOTS;
9616 if (++md.num_slots_in_use >= NUM_SLOTS)
9618 CURR_SLOT = oldslot;
9620 insn_group_break (1, 0, 0);
9622 case IA64_DVS_OTHER: /* as of rev2 (991220) of the DV tables, all
9623 "other" types of DV are eliminated
9624 by a data serialization */
9627 fprintf (stderr, "Inserting data serialization\n");
9628 if (rs->data_srlz < STATE_STOP)
9629 insn_group_break (1, 0, 0);
9631 struct slot oldslot = CURR_SLOT;
9632 /* Manually jam a srlz.d insn into the stream */
9633 memset (&CURR_SLOT, 0, sizeof (CURR_SLOT));
9634 CURR_SLOT.idesc = ia64_find_opcode ("srlz.d");
9635 data_serialization ();
9636 md.curr_slot = (md.curr_slot + 1) % NUM_SLOTS;
9637 if (++md.num_slots_in_use >= NUM_SLOTS)
9639 CURR_SLOT = oldslot;
9642 case IA64_DVS_IMPLIED:
9643 case IA64_DVS_IMPLIEDF:
9645 fprintf (stderr, "Inserting stop\n");
9646 insn_group_break (1, 0, 0);
9653 /* Check the resources used by the given opcode against the current dependency
9656 The check is run once for each execution path encountered. In this case,
9657 a unique execution path is the sequence of instructions following a code
9658 entry point, e.g. the following has three execution paths, one starting
9659 at L0, one at L1, and one at L2.
9668 check_dependencies (idesc)
9669 struct ia64_opcode *idesc;
9671 const struct ia64_opcode_dependency *opdeps = idesc->dependencies;
9675 /* Note that the number of marked resources may change within the
9676 loop if in auto mode. */
9678 while (i < regdepslen)
9680 struct rsrc *rs = ®deps[i];
9681 const struct ia64_dependency *dep = rs->dependency;
9686 if (dep->semantics == IA64_DVS_NONE
9687 || (chkind = depends_on (rs->depind, idesc)) == -1)
9693 note = NOTE (opdeps->chks[chkind]);
9695 /* Check this resource against each execution path seen thus far. */
9696 for (path = 0; path <= md.path; path++)
9700 /* If the dependency wasn't on the path being checked, ignore it. */
9701 if (rs->path < path)
9704 /* If the QP for this insn implies a QP which has branched, don't
9705 bother checking. Ed. NOTE: I don't think this check is terribly
9706 useful; what's the point of generating code which will only be
9707 reached if its QP is zero?
9708 This code was specifically inserted to handle the following code,
9709 based on notes from Intel's DV checking code, where p1 implies p2.
9715 if (CURR_SLOT.qp_regno != 0)
9719 for (implies = 0; implies < qp_implieslen; implies++)
9721 if (qp_implies[implies].path >= path
9722 && qp_implies[implies].p1 == CURR_SLOT.qp_regno
9723 && qp_implies[implies].p2_branched)
9733 if ((matchtype = resources_match (rs, idesc, note,
9734 CURR_SLOT.qp_regno, path)) != 0)
9737 char pathmsg[256] = "";
9738 char indexmsg[256] = "";
9739 int certain = (matchtype == 1 && CURR_SLOT.qp_regno == 0);
9742 sprintf (pathmsg, " when entry is at label '%s'",
9743 md.entry_labels[path - 1]);
9744 if (rs->specific && rs->index != 0)
9745 sprintf (indexmsg, ", specific resource number is %d",
9747 sprintf (msg, "Use of '%s' %s %s dependency '%s' (%s)%s%s",
9749 (certain ? "violates" : "may violate"),
9750 dv_mode[dep->mode], dep->name,
9751 dv_sem[dep->semantics],
9754 if (md.explicit_mode)
9756 as_warn ("%s", msg);
9758 as_warn (_("Only the first path encountering the conflict "
9760 as_warn_where (rs->file, rs->line,
9761 _("This is the location of the "
9762 "conflicting usage"));
9763 /* Don't bother checking other paths, to avoid duplicating
9770 fprintf (stderr, "%s @ %s:%d\n", msg, rs->file, rs->line);
9772 remove_marked_resource (rs);
9774 /* since the set of dependencies has changed, start over */
9775 /* FIXME -- since we're removing dvs as we go, we
9776 probably don't really need to start over... */
9789 /* Register new dependencies based on the given opcode. */
9792 mark_resources (idesc)
9793 struct ia64_opcode *idesc;
9796 const struct ia64_opcode_dependency *opdeps = idesc->dependencies;
9797 int add_only_qp_reads = 0;
9799 /* A conditional branch only uses its resources if it is taken; if it is
9800 taken, we stop following that path. The other branch types effectively
9801 *always* write their resources. If it's not taken, register only QP
9803 if (is_conditional_branch (idesc) || is_interruption_or_rfi (idesc))
9805 add_only_qp_reads = 1;
9809 fprintf (stderr, "Registering '%s' resource usage\n", idesc->name);
9811 for (i = 0; i < opdeps->nregs; i++)
9813 const struct ia64_dependency *dep;
9814 struct rsrc specs[MAX_SPECS];
9819 dep = ia64_find_dependency (opdeps->regs[i]);
9820 note = NOTE (opdeps->regs[i]);
9822 if (add_only_qp_reads
9823 && !(dep->mode == IA64_DV_WAR
9824 && (dep->specifier == IA64_RS_PR
9825 || dep->specifier == IA64_RS_PRr
9826 || dep->specifier == IA64_RS_PR63)))
9829 count = specify_resource (dep, idesc, DV_REG, specs, note, md.path);
9832 if (md.debug_dv && !count)
9833 fprintf (stderr, " No %s %s usage found (path %d)\n",
9834 dv_mode[dep->mode], dep->name, md.path);
9839 mark_resource (idesc, dep, &specs[count],
9840 DEP (opdeps->regs[i]), md.path);
9843 /* The execution path may affect register values, which may in turn
9844 affect which indirect-access resources are accessed. */
9845 switch (dep->specifier)
9857 for (path = 0; path < md.path; path++)
9859 count = specify_resource (dep, idesc, DV_REG, specs, note, path);
9861 mark_resource (idesc, dep, &specs[count],
9862 DEP (opdeps->regs[i]), path);
9869 /* Remove dependencies when they no longer apply. */
9872 update_dependencies (idesc)
9873 struct ia64_opcode *idesc;
9877 if (strcmp (idesc->name, "srlz.i") == 0)
9879 instruction_serialization ();
9881 else if (strcmp (idesc->name, "srlz.d") == 0)
9883 data_serialization ();
9885 else if (is_interruption_or_rfi (idesc)
9886 || is_taken_branch (idesc))
9888 /* Although technically the taken branch doesn't clear dependencies
9889 which require a srlz.[id], we don't follow the branch; the next
9890 instruction is assumed to start with a clean slate. */
9894 else if (is_conditional_branch (idesc)
9895 && CURR_SLOT.qp_regno != 0)
9897 int is_call = strstr (idesc->name, ".call") != NULL;
9899 for (i = 0; i < qp_implieslen; i++)
9901 /* If the conditional branch's predicate is implied by the predicate
9902 in an existing dependency, remove that dependency. */
9903 if (qp_implies[i].p2 == CURR_SLOT.qp_regno)
9906 /* Note that this implied predicate takes a branch so that if
9907 a later insn generates a DV but its predicate implies this
9908 one, we can avoid the false DV warning. */
9909 qp_implies[i].p2_branched = 1;
9910 while (depind < regdepslen)
9912 if (regdeps[depind].qp_regno == qp_implies[i].p1)
9914 print_dependency ("Removing", depind);
9915 regdeps[depind] = regdeps[--regdepslen];
9922 /* Any marked resources which have this same predicate should be
9923 cleared, provided that the QP hasn't been modified between the
9924 marking instruction and the branch. */
9927 insn_group_break (0, CURR_SLOT.qp_regno, 1);
9932 while (i < regdepslen)
9934 if (regdeps[i].qp_regno == CURR_SLOT.qp_regno
9935 && regdeps[i].link_to_qp_branch
9936 && (regdeps[i].file != CURR_SLOT.src_file
9937 || regdeps[i].line != CURR_SLOT.src_line))
9939 /* Treat like a taken branch */
9940 print_dependency ("Removing", i);
9941 regdeps[i] = regdeps[--regdepslen];
9950 /* Examine the current instruction for dependency violations. */
9954 struct ia64_opcode *idesc;
9958 fprintf (stderr, "Checking %s for violations (line %d, %d/%d)\n",
9959 idesc->name, CURR_SLOT.src_line,
9960 idesc->dependencies->nchks,
9961 idesc->dependencies->nregs);
9964 /* Look through the list of currently marked resources; if the current
9965 instruction has the dependency in its chks list which uses that resource,
9966 check against the specific resources used. */
9967 check_dependencies (idesc);
9969 /* Look up the instruction's regdeps (RAW writes, WAW writes, and WAR reads),
9970 then add them to the list of marked resources. */
9971 mark_resources (idesc);
9973 /* There are several types of dependency semantics, and each has its own
9974 requirements for being cleared
9976 Instruction serialization (insns separated by interruption, rfi, or
9977 writer + srlz.i + reader, all in separate groups) clears DVS_INSTR.
9979 Data serialization (instruction serialization, or writer + srlz.d +
9980 reader, where writer and srlz.d are in separate groups) clears
9981 DVS_DATA. (This also clears DVS_OTHER, but that is not guaranteed to
9982 always be the case).
9984 Instruction group break (groups separated by stop, taken branch,
9985 interruption or rfi) clears DVS_IMPLIED and DVS_IMPLIEDF.
9987 update_dependencies (idesc);
9989 /* Sometimes, knowing a register value allows us to avoid giving a false DV
9990 warning. Keep track of as many as possible that are useful. */
9991 note_register_values (idesc);
9993 /* We don't need or want this anymore. */
9994 md.mem_offset.hint = 0;
9999 /* Translate one line of assembly. Pseudo ops and labels do not show
10005 char *saved_input_line_pointer, *mnemonic;
10006 const struct pseudo_opcode *pdesc;
10007 struct ia64_opcode *idesc;
10008 unsigned char qp_regno;
10009 unsigned int flags;
10012 saved_input_line_pointer = input_line_pointer;
10013 input_line_pointer = str;
10015 /* extract the opcode (mnemonic): */
10017 mnemonic = input_line_pointer;
10018 ch = get_symbol_end ();
10019 pdesc = (struct pseudo_opcode *) hash_find (md.pseudo_hash, mnemonic);
10022 *input_line_pointer = ch;
10023 (*pdesc->handler) (pdesc->arg);
10027 /* Find the instruction descriptor matching the arguments. */
10029 idesc = ia64_find_opcode (mnemonic);
10030 *input_line_pointer = ch;
10033 as_bad ("Unknown opcode `%s'", mnemonic);
10037 idesc = parse_operands (idesc);
10041 /* Handle the dynamic ops we can handle now: */
10042 if (idesc->type == IA64_TYPE_DYN)
10044 if (strcmp (idesc->name, "add") == 0)
10046 if (CURR_SLOT.opnd[2].X_op == O_register
10047 && CURR_SLOT.opnd[2].X_add_number < 4)
10051 ia64_free_opcode (idesc);
10052 idesc = ia64_find_opcode (mnemonic);
10054 know (!idesc->next);
10057 else if (strcmp (idesc->name, "mov") == 0)
10059 enum ia64_opnd opnd1, opnd2;
10062 opnd1 = idesc->operands[0];
10063 opnd2 = idesc->operands[1];
10064 if (opnd1 == IA64_OPND_AR3)
10066 else if (opnd2 == IA64_OPND_AR3)
10070 if (CURR_SLOT.opnd[rop].X_op == O_register)
10072 if (ar_is_only_in_integer_unit (CURR_SLOT.opnd[rop].X_add_number))
10073 mnemonic = "mov.i";
10075 mnemonic = "mov.m";
10079 ia64_free_opcode (idesc);
10080 idesc = ia64_find_opcode (mnemonic);
10081 while (idesc != NULL
10082 && (idesc->operands[0] != opnd1
10083 || idesc->operands[1] != opnd2))
10084 idesc = get_next_opcode (idesc);
10087 else if (strcmp (idesc->name, "mov.i") == 0
10088 || strcmp (idesc->name, "mov.m") == 0)
10090 enum ia64_opnd opnd1, opnd2;
10093 opnd1 = idesc->operands[0];
10094 opnd2 = idesc->operands[1];
10095 if (opnd1 == IA64_OPND_AR3)
10097 else if (opnd2 == IA64_OPND_AR3)
10101 if (CURR_SLOT.opnd[rop].X_op == O_register)
10104 if (ar_is_only_in_integer_unit (CURR_SLOT.opnd[rop].X_add_number))
10106 else if (ar_is_only_in_memory_unit (CURR_SLOT.opnd[rop].X_add_number))
10108 if (unit != 'a' && unit != idesc->name [4])
10109 as_bad ("AR %d cannot be accessed by %c-unit",
10110 (int) (CURR_SLOT.opnd[rop].X_add_number - REG_AR),
10116 if (md.qp.X_op == O_register)
10118 qp_regno = md.qp.X_add_number - REG_P;
10119 md.qp.X_op = O_absent;
10122 flags = idesc->flags;
10124 if ((flags & IA64_OPCODE_FIRST) != 0)
10126 /* The alignment frag has to end with a stop bit only if the
10127 next instruction after the alignment directive has to be
10128 the first instruction in an instruction group. */
10131 while (align_frag->fr_type != rs_align_code)
10133 align_frag = align_frag->fr_next;
10137 /* align_frag can be NULL if there are directives in
10139 if (align_frag && align_frag->fr_next == frag_now)
10140 align_frag->tc_frag_data = 1;
10143 insn_group_break (1, 0, 0);
10147 if ((flags & IA64_OPCODE_NO_PRED) != 0 && qp_regno != 0)
10149 as_bad ("`%s' cannot be predicated", idesc->name);
10153 /* Build the instruction. */
10154 CURR_SLOT.qp_regno = qp_regno;
10155 CURR_SLOT.idesc = idesc;
10156 as_where (&CURR_SLOT.src_file, &CURR_SLOT.src_line);
10157 dwarf2_where (&CURR_SLOT.debug_line);
10159 /* Add unwind entry, if there is one. */
10160 if (unwind.current_entry)
10162 CURR_SLOT.unwind_record = unwind.current_entry;
10163 unwind.current_entry = NULL;
10166 /* Check for dependency violations. */
10170 md.curr_slot = (md.curr_slot + 1) % NUM_SLOTS;
10171 if (++md.num_slots_in_use >= NUM_SLOTS)
10172 emit_one_bundle ();
10174 if ((flags & IA64_OPCODE_LAST) != 0)
10175 insn_group_break (1, 0, 0);
10177 md.last_text_seg = now_seg;
10180 input_line_pointer = saved_input_line_pointer;
10183 /* Called when symbol NAME cannot be found in the symbol table.
10184 Should be used for dynamic valued symbols only. */
10187 md_undefined_symbol (name)
10188 char *name ATTRIBUTE_UNUSED;
10193 /* Called for any expression that can not be recognized. When the
10194 function is called, `input_line_pointer' will point to the start of
10201 enum pseudo_type pseudo_type;
10206 switch (*input_line_pointer)
10209 /* Find what relocation pseudo-function we're dealing with. */
10211 ch = *++input_line_pointer;
10212 for (i = 0; i < NELEMS (pseudo_func); ++i)
10213 if (pseudo_func[i].name && pseudo_func[i].name[0] == ch)
10215 len = strlen (pseudo_func[i].name);
10216 if (strncmp (pseudo_func[i].name + 1,
10217 input_line_pointer + 1, len - 1) == 0
10218 && !is_part_of_name (input_line_pointer[len]))
10220 input_line_pointer += len;
10221 pseudo_type = pseudo_func[i].type;
10225 switch (pseudo_type)
10227 case PSEUDO_FUNC_RELOC:
10228 SKIP_WHITESPACE ();
10229 if (*input_line_pointer != '(')
10231 as_bad ("Expected '('");
10235 ++input_line_pointer;
10237 if (*input_line_pointer++ != ')')
10239 as_bad ("Missing ')'");
10242 if (e->X_op != O_symbol)
10244 if (e->X_op != O_pseudo_fixup)
10246 as_bad ("Not a symbolic expression");
10249 if (i != FUNC_LT_RELATIVE)
10251 as_bad ("Illegal combination of relocation functions");
10254 switch (S_GET_VALUE (e->X_op_symbol))
10256 case FUNC_FPTR_RELATIVE:
10257 i = FUNC_LT_FPTR_RELATIVE; break;
10258 case FUNC_DTP_MODULE:
10259 i = FUNC_LT_DTP_MODULE; break;
10260 case FUNC_DTP_RELATIVE:
10261 i = FUNC_LT_DTP_RELATIVE; break;
10262 case FUNC_TP_RELATIVE:
10263 i = FUNC_LT_TP_RELATIVE; break;
10265 as_bad ("Illegal combination of relocation functions");
10269 /* Make sure gas doesn't get rid of local symbols that are used
10271 e->X_op = O_pseudo_fixup;
10272 e->X_op_symbol = pseudo_func[i].u.sym;
10275 case PSEUDO_FUNC_CONST:
10276 e->X_op = O_constant;
10277 e->X_add_number = pseudo_func[i].u.ival;
10280 case PSEUDO_FUNC_REG:
10281 e->X_op = O_register;
10282 e->X_add_number = pseudo_func[i].u.ival;
10286 name = input_line_pointer - 1;
10288 as_bad ("Unknown pseudo function `%s'", name);
10294 ++input_line_pointer;
10296 if (*input_line_pointer != ']')
10298 as_bad ("Closing bracket misssing");
10303 if (e->X_op != O_register)
10304 as_bad ("Register expected as index");
10306 ++input_line_pointer;
10317 ignore_rest_of_line ();
10320 /* Return 1 if it's OK to adjust a reloc by replacing the symbol with
10321 a section symbol plus some offset. For relocs involving @fptr(),
10322 directives we don't want such adjustments since we need to have the
10323 original symbol's name in the reloc. */
10325 ia64_fix_adjustable (fix)
10328 /* Prevent all adjustments to global symbols */
10329 if (S_IS_EXTERN (fix->fx_addsy) || S_IS_WEAK (fix->fx_addsy))
10332 switch (fix->fx_r_type)
10334 case BFD_RELOC_IA64_FPTR64I:
10335 case BFD_RELOC_IA64_FPTR32MSB:
10336 case BFD_RELOC_IA64_FPTR32LSB:
10337 case BFD_RELOC_IA64_FPTR64MSB:
10338 case BFD_RELOC_IA64_FPTR64LSB:
10339 case BFD_RELOC_IA64_LTOFF_FPTR22:
10340 case BFD_RELOC_IA64_LTOFF_FPTR64I:
10350 ia64_force_relocation (fix)
10353 switch (fix->fx_r_type)
10355 case BFD_RELOC_IA64_FPTR64I:
10356 case BFD_RELOC_IA64_FPTR32MSB:
10357 case BFD_RELOC_IA64_FPTR32LSB:
10358 case BFD_RELOC_IA64_FPTR64MSB:
10359 case BFD_RELOC_IA64_FPTR64LSB:
10361 case BFD_RELOC_IA64_LTOFF22:
10362 case BFD_RELOC_IA64_LTOFF64I:
10363 case BFD_RELOC_IA64_LTOFF_FPTR22:
10364 case BFD_RELOC_IA64_LTOFF_FPTR64I:
10365 case BFD_RELOC_IA64_PLTOFF22:
10366 case BFD_RELOC_IA64_PLTOFF64I:
10367 case BFD_RELOC_IA64_PLTOFF64MSB:
10368 case BFD_RELOC_IA64_PLTOFF64LSB:
10370 case BFD_RELOC_IA64_LTOFF22X:
10371 case BFD_RELOC_IA64_LDXMOV:
10378 return generic_force_reloc (fix);
10381 /* Decide from what point a pc-relative relocation is relative to,
10382 relative to the pc-relative fixup. Er, relatively speaking. */
10384 ia64_pcrel_from_section (fix, sec)
10388 unsigned long off = fix->fx_frag->fr_address + fix->fx_where;
10390 if (bfd_get_section_flags (stdoutput, sec) & SEC_CODE)
10397 /* Used to emit section-relative relocs for the dwarf2 debug data. */
10399 ia64_dwarf2_emit_offset (symbolS *symbol, unsigned int size)
10403 expr.X_op = O_pseudo_fixup;
10404 expr.X_op_symbol = pseudo_func[FUNC_SEC_RELATIVE].u.sym;
10405 expr.X_add_number = 0;
10406 expr.X_add_symbol = symbol;
10407 emit_expr (&expr, size);
10410 /* This is called whenever some data item (not an instruction) needs a
10411 fixup. We pick the right reloc code depending on the byteorder
10412 currently in effect. */
10414 ia64_cons_fix_new (f, where, nbytes, exp)
10420 bfd_reloc_code_real_type code;
10425 /* There are no reloc for 8 and 16 bit quantities, but we allow
10426 them here since they will work fine as long as the expression
10427 is fully defined at the end of the pass over the source file. */
10428 case 1: code = BFD_RELOC_8; break;
10429 case 2: code = BFD_RELOC_16; break;
10431 if (target_big_endian)
10432 code = BFD_RELOC_IA64_DIR32MSB;
10434 code = BFD_RELOC_IA64_DIR32LSB;
10438 /* In 32-bit mode, data8 could mean function descriptors too. */
10439 if (exp->X_op == O_pseudo_fixup
10440 && exp->X_op_symbol
10441 && S_GET_VALUE (exp->X_op_symbol) == FUNC_IPLT_RELOC
10442 && !(md.flags & EF_IA_64_ABI64))
10444 if (target_big_endian)
10445 code = BFD_RELOC_IA64_IPLTMSB;
10447 code = BFD_RELOC_IA64_IPLTLSB;
10448 exp->X_op = O_symbol;
10453 if (target_big_endian)
10454 code = BFD_RELOC_IA64_DIR64MSB;
10456 code = BFD_RELOC_IA64_DIR64LSB;
10461 if (exp->X_op == O_pseudo_fixup
10462 && exp->X_op_symbol
10463 && S_GET_VALUE (exp->X_op_symbol) == FUNC_IPLT_RELOC)
10465 if (target_big_endian)
10466 code = BFD_RELOC_IA64_IPLTMSB;
10468 code = BFD_RELOC_IA64_IPLTLSB;
10469 exp->X_op = O_symbol;
10475 as_bad ("Unsupported fixup size %d", nbytes);
10476 ignore_rest_of_line ();
10480 if (exp->X_op == O_pseudo_fixup)
10482 exp->X_op = O_symbol;
10483 code = ia64_gen_real_reloc_type (exp->X_op_symbol, code);
10484 /* ??? If code unchanged, unsupported. */
10487 fix = fix_new_exp (f, where, nbytes, exp, 0, code);
10488 /* We need to store the byte order in effect in case we're going
10489 to fix an 8 or 16 bit relocation (for which there no real
10490 relocs available). See md_apply_fix3(). */
10491 fix->tc_fix_data.bigendian = target_big_endian;
10494 /* Return the actual relocation we wish to associate with the pseudo
10495 reloc described by SYM and R_TYPE. SYM should be one of the
10496 symbols in the pseudo_func array, or NULL. */
10498 static bfd_reloc_code_real_type
10499 ia64_gen_real_reloc_type (sym, r_type)
10500 struct symbol *sym;
10501 bfd_reloc_code_real_type r_type;
10503 bfd_reloc_code_real_type new = 0;
10510 switch (S_GET_VALUE (sym))
10512 case FUNC_FPTR_RELATIVE:
10515 case BFD_RELOC_IA64_IMM64: new = BFD_RELOC_IA64_FPTR64I; break;
10516 case BFD_RELOC_IA64_DIR32MSB: new = BFD_RELOC_IA64_FPTR32MSB; break;
10517 case BFD_RELOC_IA64_DIR32LSB: new = BFD_RELOC_IA64_FPTR32LSB; break;
10518 case BFD_RELOC_IA64_DIR64MSB: new = BFD_RELOC_IA64_FPTR64MSB; break;
10519 case BFD_RELOC_IA64_DIR64LSB: new = BFD_RELOC_IA64_FPTR64LSB; break;
10524 case FUNC_GP_RELATIVE:
10527 case BFD_RELOC_IA64_IMM22: new = BFD_RELOC_IA64_GPREL22; break;
10528 case BFD_RELOC_IA64_IMM64: new = BFD_RELOC_IA64_GPREL64I; break;
10529 case BFD_RELOC_IA64_DIR32MSB: new = BFD_RELOC_IA64_GPREL32MSB; break;
10530 case BFD_RELOC_IA64_DIR32LSB: new = BFD_RELOC_IA64_GPREL32LSB; break;
10531 case BFD_RELOC_IA64_DIR64MSB: new = BFD_RELOC_IA64_GPREL64MSB; break;
10532 case BFD_RELOC_IA64_DIR64LSB: new = BFD_RELOC_IA64_GPREL64LSB; break;
10537 case FUNC_LT_RELATIVE:
10540 case BFD_RELOC_IA64_IMM22: new = BFD_RELOC_IA64_LTOFF22; break;
10541 case BFD_RELOC_IA64_IMM64: new = BFD_RELOC_IA64_LTOFF64I; break;
10546 case FUNC_LT_RELATIVE_X:
10549 case BFD_RELOC_IA64_IMM22: new = BFD_RELOC_IA64_LTOFF22X; break;
10554 case FUNC_PC_RELATIVE:
10557 case BFD_RELOC_IA64_IMM22: new = BFD_RELOC_IA64_PCREL22; break;
10558 case BFD_RELOC_IA64_IMM64: new = BFD_RELOC_IA64_PCREL64I; break;
10559 case BFD_RELOC_IA64_DIR32MSB: new = BFD_RELOC_IA64_PCREL32MSB; break;
10560 case BFD_RELOC_IA64_DIR32LSB: new = BFD_RELOC_IA64_PCREL32LSB; break;
10561 case BFD_RELOC_IA64_DIR64MSB: new = BFD_RELOC_IA64_PCREL64MSB; break;
10562 case BFD_RELOC_IA64_DIR64LSB: new = BFD_RELOC_IA64_PCREL64LSB; break;
10567 case FUNC_PLT_RELATIVE:
10570 case BFD_RELOC_IA64_IMM22: new = BFD_RELOC_IA64_PLTOFF22; break;
10571 case BFD_RELOC_IA64_IMM64: new = BFD_RELOC_IA64_PLTOFF64I; break;
10572 case BFD_RELOC_IA64_DIR64MSB: new = BFD_RELOC_IA64_PLTOFF64MSB;break;
10573 case BFD_RELOC_IA64_DIR64LSB: new = BFD_RELOC_IA64_PLTOFF64LSB;break;
10578 case FUNC_SEC_RELATIVE:
10581 case BFD_RELOC_IA64_DIR32MSB: new = BFD_RELOC_IA64_SECREL32MSB;break;
10582 case BFD_RELOC_IA64_DIR32LSB: new = BFD_RELOC_IA64_SECREL32LSB;break;
10583 case BFD_RELOC_IA64_DIR64MSB: new = BFD_RELOC_IA64_SECREL64MSB;break;
10584 case BFD_RELOC_IA64_DIR64LSB: new = BFD_RELOC_IA64_SECREL64LSB;break;
10589 case FUNC_SEG_RELATIVE:
10592 case BFD_RELOC_IA64_DIR32MSB: new = BFD_RELOC_IA64_SEGREL32MSB;break;
10593 case BFD_RELOC_IA64_DIR32LSB: new = BFD_RELOC_IA64_SEGREL32LSB;break;
10594 case BFD_RELOC_IA64_DIR64MSB: new = BFD_RELOC_IA64_SEGREL64MSB;break;
10595 case BFD_RELOC_IA64_DIR64LSB: new = BFD_RELOC_IA64_SEGREL64LSB;break;
10600 case FUNC_LTV_RELATIVE:
10603 case BFD_RELOC_IA64_DIR32MSB: new = BFD_RELOC_IA64_LTV32MSB; break;
10604 case BFD_RELOC_IA64_DIR32LSB: new = BFD_RELOC_IA64_LTV32LSB; break;
10605 case BFD_RELOC_IA64_DIR64MSB: new = BFD_RELOC_IA64_LTV64MSB; break;
10606 case BFD_RELOC_IA64_DIR64LSB: new = BFD_RELOC_IA64_LTV64LSB; break;
10611 case FUNC_LT_FPTR_RELATIVE:
10614 case BFD_RELOC_IA64_IMM22:
10615 new = BFD_RELOC_IA64_LTOFF_FPTR22; break;
10616 case BFD_RELOC_IA64_IMM64:
10617 new = BFD_RELOC_IA64_LTOFF_FPTR64I; break;
10623 case FUNC_TP_RELATIVE:
10626 case BFD_RELOC_IA64_IMM14:
10627 new = BFD_RELOC_IA64_TPREL14; break;
10628 case BFD_RELOC_IA64_IMM22:
10629 new = BFD_RELOC_IA64_TPREL22; break;
10630 case BFD_RELOC_IA64_IMM64:
10631 new = BFD_RELOC_IA64_TPREL64I; break;
10637 case FUNC_LT_TP_RELATIVE:
10640 case BFD_RELOC_IA64_IMM22:
10641 new = BFD_RELOC_IA64_LTOFF_TPREL22; break;
10647 case FUNC_LT_DTP_MODULE:
10650 case BFD_RELOC_IA64_IMM22:
10651 new = BFD_RELOC_IA64_LTOFF_DTPMOD22; break;
10657 case FUNC_DTP_RELATIVE:
10660 case BFD_RELOC_IA64_DIR64MSB:
10661 new = BFD_RELOC_IA64_DTPREL64MSB; break;
10662 case BFD_RELOC_IA64_DIR64LSB:
10663 new = BFD_RELOC_IA64_DTPREL64LSB; break;
10664 case BFD_RELOC_IA64_IMM14:
10665 new = BFD_RELOC_IA64_DTPREL14; break;
10666 case BFD_RELOC_IA64_IMM22:
10667 new = BFD_RELOC_IA64_DTPREL22; break;
10668 case BFD_RELOC_IA64_IMM64:
10669 new = BFD_RELOC_IA64_DTPREL64I; break;
10675 case FUNC_LT_DTP_RELATIVE:
10678 case BFD_RELOC_IA64_IMM22:
10679 new = BFD_RELOC_IA64_LTOFF_DTPREL22; break;
10685 case FUNC_IPLT_RELOC:
10692 /* Hmmmm. Should this ever occur? */
10699 /* Here is where generate the appropriate reloc for pseudo relocation
10702 ia64_validate_fix (fix)
10705 switch (fix->fx_r_type)
10707 case BFD_RELOC_IA64_FPTR64I:
10708 case BFD_RELOC_IA64_FPTR32MSB:
10709 case BFD_RELOC_IA64_FPTR64LSB:
10710 case BFD_RELOC_IA64_LTOFF_FPTR22:
10711 case BFD_RELOC_IA64_LTOFF_FPTR64I:
10712 if (fix->fx_offset != 0)
10713 as_bad_where (fix->fx_file, fix->fx_line,
10714 "No addend allowed in @fptr() relocation");
10722 fix_insn (fix, odesc, value)
10724 const struct ia64_operand *odesc;
10727 bfd_vma insn[3], t0, t1, control_bits;
10732 slot = fix->fx_where & 0x3;
10733 fixpos = fix->fx_frag->fr_literal + (fix->fx_where - slot);
10735 /* Bundles are always in little-endian byte order */
10736 t0 = bfd_getl64 (fixpos);
10737 t1 = bfd_getl64 (fixpos + 8);
10738 control_bits = t0 & 0x1f;
10739 insn[0] = (t0 >> 5) & 0x1ffffffffffLL;
10740 insn[1] = ((t0 >> 46) & 0x3ffff) | ((t1 & 0x7fffff) << 18);
10741 insn[2] = (t1 >> 23) & 0x1ffffffffffLL;
10744 if (odesc - elf64_ia64_operands == IA64_OPND_IMMU64)
10746 insn[1] = (value >> 22) & 0x1ffffffffffLL;
10747 insn[2] |= (((value & 0x7f) << 13)
10748 | (((value >> 7) & 0x1ff) << 27)
10749 | (((value >> 16) & 0x1f) << 22)
10750 | (((value >> 21) & 0x1) << 21)
10751 | (((value >> 63) & 0x1) << 36));
10753 else if (odesc - elf64_ia64_operands == IA64_OPND_IMMU62)
10755 if (value & ~0x3fffffffffffffffULL)
10756 err = "integer operand out of range";
10757 insn[1] = (value >> 21) & 0x1ffffffffffLL;
10758 insn[2] |= (((value & 0xfffff) << 6) | (((value >> 20) & 0x1) << 36));
10760 else if (odesc - elf64_ia64_operands == IA64_OPND_TGT64)
10763 insn[1] = ((value >> 20) & 0x7fffffffffLL) << 2;
10764 insn[2] |= ((((value >> 59) & 0x1) << 36)
10765 | (((value >> 0) & 0xfffff) << 13));
10768 err = (*odesc->insert) (odesc, value, insn + slot);
10771 as_bad_where (fix->fx_file, fix->fx_line, err);
10773 t0 = control_bits | (insn[0] << 5) | (insn[1] << 46);
10774 t1 = ((insn[1] >> 18) & 0x7fffff) | (insn[2] << 23);
10775 number_to_chars_littleendian (fixpos + 0, t0, 8);
10776 number_to_chars_littleendian (fixpos + 8, t1, 8);
10779 /* Attempt to simplify or even eliminate a fixup. The return value is
10780 ignored; perhaps it was once meaningful, but now it is historical.
10781 To indicate that a fixup has been eliminated, set FIXP->FX_DONE.
10783 If fixp->fx_addsy is non-NULL, we'll have to generate a reloc entry
10787 md_apply_fix3 (fix, valP, seg)
10790 segT seg ATTRIBUTE_UNUSED;
10793 valueT value = *valP;
10795 fixpos = fix->fx_frag->fr_literal + fix->fx_where;
10799 switch (fix->fx_r_type)
10801 case BFD_RELOC_IA64_DIR32MSB:
10802 fix->fx_r_type = BFD_RELOC_IA64_PCREL32MSB;
10805 case BFD_RELOC_IA64_DIR32LSB:
10806 fix->fx_r_type = BFD_RELOC_IA64_PCREL32LSB;
10809 case BFD_RELOC_IA64_DIR64MSB:
10810 fix->fx_r_type = BFD_RELOC_IA64_PCREL64MSB;
10813 case BFD_RELOC_IA64_DIR64LSB:
10814 fix->fx_r_type = BFD_RELOC_IA64_PCREL64LSB;
10823 switch (fix->fx_r_type)
10825 case BFD_RELOC_UNUSED:
10826 /* This must be a TAG13 or TAG13b operand. There are no external
10827 relocs defined for them, so we must give an error. */
10828 as_bad_where (fix->fx_file, fix->fx_line,
10829 "%s must have a constant value",
10830 elf64_ia64_operands[fix->tc_fix_data.opnd].desc);
10834 case BFD_RELOC_IA64_TPREL14:
10835 case BFD_RELOC_IA64_TPREL22:
10836 case BFD_RELOC_IA64_TPREL64I:
10837 case BFD_RELOC_IA64_LTOFF_TPREL22:
10838 case BFD_RELOC_IA64_LTOFF_DTPMOD22:
10839 case BFD_RELOC_IA64_DTPREL14:
10840 case BFD_RELOC_IA64_DTPREL22:
10841 case BFD_RELOC_IA64_DTPREL64I:
10842 case BFD_RELOC_IA64_LTOFF_DTPREL22:
10843 S_SET_THREAD_LOCAL (fix->fx_addsy);
10850 else if (fix->tc_fix_data.opnd == IA64_OPND_NIL)
10852 if (fix->tc_fix_data.bigendian)
10853 number_to_chars_bigendian (fixpos, value, fix->fx_size);
10855 number_to_chars_littleendian (fixpos, value, fix->fx_size);
10860 fix_insn (fix, elf64_ia64_operands + fix->tc_fix_data.opnd, value);
10865 /* Generate the BFD reloc to be stuck in the object file from the
10866 fixup used internally in the assembler. */
10869 tc_gen_reloc (sec, fixp)
10870 asection *sec ATTRIBUTE_UNUSED;
10875 reloc = xmalloc (sizeof (*reloc));
10876 reloc->sym_ptr_ptr = (asymbol **) xmalloc (sizeof (asymbol *));
10877 *reloc->sym_ptr_ptr = symbol_get_bfdsym (fixp->fx_addsy);
10878 reloc->address = fixp->fx_frag->fr_address + fixp->fx_where;
10879 reloc->addend = fixp->fx_offset;
10880 reloc->howto = bfd_reloc_type_lookup (stdoutput, fixp->fx_r_type);
10884 as_bad_where (fixp->fx_file, fixp->fx_line,
10885 "Cannot represent %s relocation in object file",
10886 bfd_get_reloc_code_name (fixp->fx_r_type));
10891 /* Turn a string in input_line_pointer into a floating point constant
10892 of type TYPE, and store the appropriate bytes in *LIT. The number
10893 of LITTLENUMS emitted is stored in *SIZE. An error message is
10894 returned, or NULL on OK. */
10896 #define MAX_LITTLENUMS 5
10899 md_atof (type, lit, size)
10904 LITTLENUM_TYPE words[MAX_LITTLENUMS];
10934 return "Bad call to MD_ATOF()";
10936 t = atof_ieee (input_line_pointer, type, words);
10938 input_line_pointer = t;
10940 (*ia64_float_to_chars) (lit, words, prec);
10944 /* It is 10 byte floating point with 6 byte padding. */
10945 memset (&lit [10], 0, 6);
10946 *size = 8 * sizeof (LITTLENUM_TYPE);
10949 *size = prec * sizeof (LITTLENUM_TYPE);
10954 /* Handle ia64 specific semantics of the align directive. */
10957 ia64_md_do_align (n, fill, len, max)
10958 int n ATTRIBUTE_UNUSED;
10959 const char *fill ATTRIBUTE_UNUSED;
10960 int len ATTRIBUTE_UNUSED;
10961 int max ATTRIBUTE_UNUSED;
10963 if (subseg_text_p (now_seg))
10964 ia64_flush_insns ();
10967 /* This is called from HANDLE_ALIGN in write.c. Fill in the contents
10968 of an rs_align_code fragment. */
10971 ia64_handle_align (fragp)
10974 /* Use mfi bundle of nops with no stop bits. */
10975 static const unsigned char le_nop[]
10976 = { 0x0c, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00,
10977 0x00, 0x02, 0x00, 0x00, 0x00, 0x00, 0x04, 0x00};
10978 static const unsigned char le_nop_stop[]
10979 = { 0x0d, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00,
10980 0x00, 0x02, 0x00, 0x00, 0x00, 0x00, 0x04, 0x00};
10984 const unsigned char *nop;
10986 if (fragp->fr_type != rs_align_code)
10989 /* Check if this frag has to end with a stop bit. */
10990 nop = fragp->tc_frag_data ? le_nop_stop : le_nop;
10992 bytes = fragp->fr_next->fr_address - fragp->fr_address - fragp->fr_fix;
10993 p = fragp->fr_literal + fragp->fr_fix;
10995 /* If no paddings are needed, we check if we need a stop bit. */
10996 if (!bytes && fragp->tc_frag_data)
10998 if (fragp->fr_fix < 16)
11000 /* FIXME: It won't work with
11002 alloc r32=ar.pfs,1,2,4,0
11006 as_bad_where (fragp->fr_file, fragp->fr_line,
11007 _("Can't add stop bit to mark end of instruction group"));
11010 /* Bundles are always in little-endian byte order. Make sure
11011 the previous bundle has the stop bit. */
11015 /* Make sure we are on a 16-byte boundary, in case someone has been
11016 putting data into a text section. */
11019 int fix = bytes & 15;
11020 memset (p, 0, fix);
11023 fragp->fr_fix += fix;
11026 /* Instruction bundles are always little-endian. */
11027 memcpy (p, nop, 16);
11028 fragp->fr_var = 16;
11032 ia64_float_to_chars_bigendian (char *lit, LITTLENUM_TYPE *words,
11037 number_to_chars_bigendian (lit, (long) (*words++),
11038 sizeof (LITTLENUM_TYPE));
11039 lit += sizeof (LITTLENUM_TYPE);
11044 ia64_float_to_chars_littleendian (char *lit, LITTLENUM_TYPE *words,
11049 number_to_chars_littleendian (lit, (long) (words[prec]),
11050 sizeof (LITTLENUM_TYPE));
11051 lit += sizeof (LITTLENUM_TYPE);
11056 ia64_elf_section_change_hook (void)
11058 if (elf_section_type (now_seg) == SHT_IA_64_UNWIND
11059 && elf_linked_to_section (now_seg) == NULL)
11060 elf_linked_to_section (now_seg) = text_section;
11061 dot_byteorder (-1);
11064 /* Check if a label should be made global. */
11066 ia64_check_label (symbolS *label)
11068 if (*input_line_pointer == ':')
11070 S_SET_EXTERNAL (label);
11071 input_line_pointer++;
11075 /* Used to remember where .alias and .secalias directives are seen. We
11076 will rename symbol and section names when we are about to output
11077 the relocatable file. */
11080 char *file; /* The file where the directive is seen. */
11081 unsigned int line; /* The line number the directive is at. */
11082 const char *name; /* The orignale name of the symbol. */
11085 /* Called for .alias and .secalias directives. If SECTION is 1, it is
11086 .secalias. Otherwise, it is .alias. */
11088 dot_alias (int section)
11090 char *name, *alias;
11094 const char *error_string;
11097 struct hash_control *ahash, *nhash;
11100 name = input_line_pointer;
11101 delim = get_symbol_end ();
11102 end_name = input_line_pointer;
11105 if (name == end_name)
11107 as_bad (_("expected symbol name"));
11108 discard_rest_of_line ();
11112 SKIP_WHITESPACE ();
11114 if (*input_line_pointer != ',')
11117 as_bad (_("expected comma after \"%s\""), name);
11119 ignore_rest_of_line ();
11123 input_line_pointer++;
11126 /* We call demand_copy_C_string to check if alias string is valid.
11127 There should be a closing `"' and no `\0' in the string. */
11128 alias = demand_copy_C_string (&len);
11131 ignore_rest_of_line ();
11135 /* Make a copy of name string. */
11136 len = strlen (name) + 1;
11137 obstack_grow (¬es, name, len);
11138 name = obstack_finish (¬es);
11143 ahash = secalias_hash;
11144 nhash = secalias_name_hash;
11149 ahash = alias_hash;
11150 nhash = alias_name_hash;
11153 /* Check if alias has been used before. */
11154 h = (struct alias *) hash_find (ahash, alias);
11157 if (strcmp (h->name, name))
11158 as_bad (_("`%s' is already the alias of %s `%s'"),
11159 alias, kind, h->name);
11163 /* Check if name already has an alias. */
11164 a = (const char *) hash_find (nhash, name);
11167 if (strcmp (a, alias))
11168 as_bad (_("%s `%s' already has an alias `%s'"), kind, name, a);
11172 h = (struct alias *) xmalloc (sizeof (struct alias));
11173 as_where (&h->file, &h->line);
11176 error_string = hash_jam (ahash, alias, (PTR) h);
11179 as_fatal (_("inserting \"%s\" into %s alias hash table failed: %s"),
11180 alias, kind, error_string);
11184 error_string = hash_jam (nhash, name, (PTR) alias);
11187 as_fatal (_("inserting \"%s\" into %s name hash table failed: %s"),
11188 alias, kind, error_string);
11190 obstack_free (¬es, name);
11191 obstack_free (¬es, alias);
11194 demand_empty_rest_of_line ();
11197 /* It renames the original symbol name to its alias. */
11199 do_alias (const char *alias, PTR value)
11201 struct alias *h = (struct alias *) value;
11202 symbolS *sym = symbol_find (h->name);
11205 as_warn_where (h->file, h->line,
11206 _("symbol `%s' aliased to `%s' is not used"),
11209 S_SET_NAME (sym, (char *) alias);
11212 /* Called from write_object_file. */
11214 ia64_adjust_symtab (void)
11216 hash_traverse (alias_hash, do_alias);
11219 /* It renames the original section name to its alias. */
11221 do_secalias (const char *alias, PTR value)
11223 struct alias *h = (struct alias *) value;
11224 segT sec = bfd_get_section_by_name (stdoutput, h->name);
11227 as_warn_where (h->file, h->line,
11228 _("section `%s' aliased to `%s' is not used"),
11234 /* Called from write_object_file. */
11236 ia64_frob_file (void)
11238 hash_traverse (secalias_hash, do_secalias);