1 /* tc-ia64.c -- Assembler for the HP/Intel IA-64 architecture.
2 Copyright 1998, 1999, 2000, 2001 Free Software Foundation, Inc.
3 Contributed by David Mosberger-Tang <davidm@hpl.hp.com>
5 This file is part of GAS, the GNU Assembler.
7 GAS is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License as published by
9 the Free Software Foundation; either version 2, or (at your option)
12 GAS is distributed in the hope that it will be useful,
13 but WITHOUT ANY WARRANTY; without even the implied warranty of
14 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 GNU General Public License for more details.
17 You should have received a copy of the GNU General Public License
18 along with GAS; see the file COPYING. If not, write to
19 the Free Software Foundation, 59 Temple Place - Suite 330,
20 Boston, MA 02111-1307, USA. */
35 - labels are wrong if automatic alignment is introduced
36 (e.g., checkout the second real10 definition in test-data.s)
38 <reg>.safe_across_calls and any other DV-related directives I don't
39 have documentation for.
40 verify mod-sched-brs reads/writes are checked/marked (and other
46 #include "dwarf2dbg.h"
49 #include "opcode/ia64.h"
53 #define NELEMS(a) ((int) (sizeof (a)/sizeof ((a)[0])))
54 #define MIN(a,b) ((a) < (b) ? (a) : (b))
57 #define PREV_SLOT md.slot[(md.curr_slot + NUM_SLOTS - 1) % NUM_SLOTS]
58 #define CURR_SLOT md.slot[md.curr_slot]
60 #define O_pseudo_fixup (O_max + 1)
64 SPECIAL_SECTION_BSS = 0,
66 SPECIAL_SECTION_SDATA,
67 SPECIAL_SECTION_RODATA,
68 SPECIAL_SECTION_COMMENT,
69 SPECIAL_SECTION_UNWIND,
70 SPECIAL_SECTION_UNWIND_INFO
83 FUNC_LT_FPTR_RELATIVE,
89 REG_FR = (REG_GR + 128),
90 REG_AR = (REG_FR + 128),
91 REG_CR = (REG_AR + 128),
92 REG_P = (REG_CR + 128),
93 REG_BR = (REG_P + 64),
94 REG_IP = (REG_BR + 8),
101 /* The following are pseudo-registers for use by gas only. */
113 /* The following pseudo-registers are used for unwind directives only: */
121 DYNREG_GR = 0, /* dynamic general purpose register */
122 DYNREG_FR, /* dynamic floating point register */
123 DYNREG_PR, /* dynamic predicate register */
127 enum operand_match_result
130 OPERAND_OUT_OF_RANGE,
134 /* On the ia64, we can't know the address of a text label until the
135 instructions are packed into a bundle. To handle this, we keep
136 track of the list of labels that appear in front of each
140 struct label_fix *next;
144 extern int target_big_endian;
146 /* Characters which always start a comment. */
147 const char comment_chars[] = "";
149 /* Characters which start a comment at the beginning of a line. */
150 const char line_comment_chars[] = "#";
152 /* Characters which may be used to separate multiple commands on a
154 const char line_separator_chars[] = ";";
156 /* Characters which are used to indicate an exponent in a floating
158 const char EXP_CHARS[] = "eE";
160 /* Characters which mean that a number is a floating point constant,
162 const char FLT_CHARS[] = "rRsSfFdDxXpP";
164 /* ia64-specific option processing: */
166 const char *md_shortopts = "m:N:x::";
168 struct option md_longopts[] =
170 #define OPTION_MCONSTANT_GP (OPTION_MD_BASE + 1)
171 {"mconstant-gp", no_argument, NULL, OPTION_MCONSTANT_GP},
172 #define OPTION_MAUTO_PIC (OPTION_MD_BASE + 2)
173 {"mauto-pic", no_argument, NULL, OPTION_MAUTO_PIC}
176 size_t md_longopts_size = sizeof (md_longopts);
180 struct hash_control *pseudo_hash; /* pseudo opcode hash table */
181 struct hash_control *reg_hash; /* register name hash table */
182 struct hash_control *dynreg_hash; /* dynamic register hash table */
183 struct hash_control *const_hash; /* constant hash table */
184 struct hash_control *entry_hash; /* code entry hint hash table */
186 symbolS *regsym[REG_NUM];
188 /* If X_op is != O_absent, the registername for the instruction's
189 qualifying predicate. If NULL, p0 is assumed for instructions
190 that are predicatable. */
197 explicit_mode : 1, /* which mode we're in */
198 default_explicit_mode : 1, /* which mode is the default */
199 mode_explicitly_set : 1, /* was the current mode explicitly set? */
201 keep_pending_output : 1;
203 /* Each bundle consists of up to three instructions. We keep
204 track of four most recent instructions so we can correctly set
205 the end_of_insn_group for the last instruction in a bundle. */
207 int num_slots_in_use;
211 end_of_insn_group : 1,
212 manual_bundling_on : 1,
213 manual_bundling_off : 1;
214 signed char user_template; /* user-selected template, if any */
215 unsigned char qp_regno; /* qualifying predicate */
216 /* This duplicates a good fraction of "struct fix" but we
217 can't use a "struct fix" instead since we can't call
218 fix_new_exp() until we know the address of the instruction. */
222 bfd_reloc_code_real_type code;
223 enum ia64_opnd opnd; /* type of operand in need of fix */
224 unsigned int is_pcrel : 1; /* is operand pc-relative? */
225 expressionS expr; /* the value to be inserted */
227 fixup[2]; /* at most two fixups per insn */
228 struct ia64_opcode *idesc;
229 struct label_fix *label_fixups;
230 struct label_fix *tag_fixups;
231 struct unw_rec_list *unwind_record; /* Unwind directive. */
234 unsigned int src_line;
235 struct dwarf2_line_info debug_line;
243 struct dynreg *next; /* next dynamic register */
245 unsigned short base; /* the base register number */
246 unsigned short num_regs; /* # of registers in this set */
248 *dynreg[DYNREG_NUM_TYPES], in, loc, out, rot;
250 flagword flags; /* ELF-header flags */
253 unsigned hint:1; /* is this hint currently valid? */
254 bfd_vma offset; /* mem.offset offset */
255 bfd_vma base; /* mem.offset base */
258 int path; /* number of alt. entry points seen */
259 const char **entry_labels; /* labels of all alternate paths in
260 the current DV-checking block. */
261 int maxpaths; /* size currently allocated for
263 /* Support for hardware errata workarounds. */
265 /* Record data about the last three insn groups. */
268 /* B-step workaround.
269 For each predicate register, this is set if the corresponding insn
270 group conditionally sets this register with one of the affected
273 /* B-step workaround.
274 For each general register, this is set if the corresponding insn
275 a) is conditional one one of the predicate registers for which
276 P_REG_SET is 1 in the corresponding entry of the previous group,
277 b) sets this general register with one of the affected
279 int g_reg_set_conditionally[128];
285 /* application registers: */
291 #define AR_BSPSTORE 18
306 {"ar.k0", 0}, {"ar.k1", 1}, {"ar.k2", 2}, {"ar.k3", 3},
307 {"ar.k4", 4}, {"ar.k5", 5}, {"ar.k6", 6}, {"ar.k7", 7},
308 {"ar.rsc", 16}, {"ar.bsp", 17},
309 {"ar.bspstore", 18}, {"ar.rnat", 19},
310 {"ar.fcr", 21}, {"ar.eflag", 24},
311 {"ar.csd", 25}, {"ar.ssd", 26},
312 {"ar.cflg", 27}, {"ar.fsr", 28},
313 {"ar.fir", 29}, {"ar.fdr", 30},
314 {"ar.ccv", 32}, {"ar.unat", 36},
315 {"ar.fpsr", 40}, {"ar.itc", 44},
316 {"ar.pfs", 64}, {"ar.lc", 65},
337 /* control registers: */
379 static const struct const_desc
386 /* PSR constant masks: */
389 {"psr.be", ((valueT) 1) << 1},
390 {"psr.up", ((valueT) 1) << 2},
391 {"psr.ac", ((valueT) 1) << 3},
392 {"psr.mfl", ((valueT) 1) << 4},
393 {"psr.mfh", ((valueT) 1) << 5},
395 {"psr.ic", ((valueT) 1) << 13},
396 {"psr.i", ((valueT) 1) << 14},
397 {"psr.pk", ((valueT) 1) << 15},
399 {"psr.dt", ((valueT) 1) << 17},
400 {"psr.dfl", ((valueT) 1) << 18},
401 {"psr.dfh", ((valueT) 1) << 19},
402 {"psr.sp", ((valueT) 1) << 20},
403 {"psr.pp", ((valueT) 1) << 21},
404 {"psr.di", ((valueT) 1) << 22},
405 {"psr.si", ((valueT) 1) << 23},
406 {"psr.db", ((valueT) 1) << 24},
407 {"psr.lp", ((valueT) 1) << 25},
408 {"psr.tb", ((valueT) 1) << 26},
409 {"psr.rt", ((valueT) 1) << 27},
410 /* 28-31: reserved */
411 /* 32-33: cpl (current privilege level) */
412 {"psr.is", ((valueT) 1) << 34},
413 {"psr.mc", ((valueT) 1) << 35},
414 {"psr.it", ((valueT) 1) << 36},
415 {"psr.id", ((valueT) 1) << 37},
416 {"psr.da", ((valueT) 1) << 38},
417 {"psr.dd", ((valueT) 1) << 39},
418 {"psr.ss", ((valueT) 1) << 40},
419 /* 41-42: ri (restart instruction) */
420 {"psr.ed", ((valueT) 1) << 43},
421 {"psr.bn", ((valueT) 1) << 44},
424 /* indirect register-sets/memory: */
433 { "CPUID", IND_CPUID },
434 { "cpuid", IND_CPUID },
446 /* Pseudo functions used to indicate relocation types (these functions
447 start with an at sign (@). */
469 /* reloc pseudo functions (these must come first!): */
470 { "fptr", PSEUDO_FUNC_RELOC, { 0 } },
471 { "gprel", PSEUDO_FUNC_RELOC, { 0 } },
472 { "ltoff", PSEUDO_FUNC_RELOC, { 0 } },
473 { "pcrel", PSEUDO_FUNC_RELOC, { 0 } },
474 { "pltoff", PSEUDO_FUNC_RELOC, { 0 } },
475 { "secrel", PSEUDO_FUNC_RELOC, { 0 } },
476 { "segrel", PSEUDO_FUNC_RELOC, { 0 } },
477 { "ltv", PSEUDO_FUNC_RELOC, { 0 } },
478 { "", 0, { 0 } }, /* placeholder for FUNC_LT_FPTR_RELATIVE */
480 /* mbtype4 constants: */
481 { "alt", PSEUDO_FUNC_CONST, { 0xa } },
482 { "brcst", PSEUDO_FUNC_CONST, { 0x0 } },
483 { "mix", PSEUDO_FUNC_CONST, { 0x8 } },
484 { "rev", PSEUDO_FUNC_CONST, { 0xb } },
485 { "shuf", PSEUDO_FUNC_CONST, { 0x9 } },
487 /* fclass constants: */
488 { "nat", PSEUDO_FUNC_CONST, { 0x100 } },
489 { "qnan", PSEUDO_FUNC_CONST, { 0x080 } },
490 { "snan", PSEUDO_FUNC_CONST, { 0x040 } },
491 { "pos", PSEUDO_FUNC_CONST, { 0x001 } },
492 { "neg", PSEUDO_FUNC_CONST, { 0x002 } },
493 { "zero", PSEUDO_FUNC_CONST, { 0x004 } },
494 { "unorm", PSEUDO_FUNC_CONST, { 0x008 } },
495 { "norm", PSEUDO_FUNC_CONST, { 0x010 } },
496 { "inf", PSEUDO_FUNC_CONST, { 0x020 } },
498 { "natval", PSEUDO_FUNC_CONST, { 0x100 } }, /* old usage */
500 /* unwind-related constants: */
501 { "svr4", PSEUDO_FUNC_CONST, { 0 } },
502 { "hpux", PSEUDO_FUNC_CONST, { 1 } },
503 { "nt", PSEUDO_FUNC_CONST, { 2 } },
505 /* unwind-related registers: */
506 { "priunat",PSEUDO_FUNC_REG, { REG_PRIUNAT } }
509 /* 41-bit nop opcodes (one per unit): */
510 static const bfd_vma nop[IA64_NUM_UNITS] =
512 0x0000000000LL, /* NIL => break 0 */
513 0x0008000000LL, /* I-unit nop */
514 0x0008000000LL, /* M-unit nop */
515 0x4000000000LL, /* B-unit nop */
516 0x0008000000LL, /* F-unit nop */
517 0x0008000000LL, /* L-"unit" nop */
518 0x0008000000LL, /* X-unit nop */
521 /* Can't be `const' as it's passed to input routines (which have the
522 habit of setting temporary sentinels. */
523 static char special_section_name[][20] =
525 {".bss"}, {".sbss"}, {".sdata"}, {".rodata"}, {".comment"},
526 {".IA_64.unwind"}, {".IA_64.unwind_info"}
529 static char *special_linkonce_name[] =
531 ".gnu.linkonce.ia64unw.", ".gnu.linkonce.ia64unwi."
534 /* The best template for a particular sequence of up to three
536 #define N IA64_NUM_TYPES
537 static unsigned char best_template[N][N][N];
540 /* Resource dependencies currently in effect */
542 int depind; /* dependency index */
543 const struct ia64_dependency *dependency; /* actual dependency */
544 unsigned specific:1, /* is this a specific bit/regno? */
545 link_to_qp_branch:1; /* will a branch on the same QP clear it?*/
546 int index; /* specific regno/bit within dependency */
547 int note; /* optional qualifying note (0 if none) */
551 int insn_srlz; /* current insn serialization state */
552 int data_srlz; /* current data serialization state */
553 int qp_regno; /* qualifying predicate for this usage */
554 char *file; /* what file marked this dependency */
555 unsigned int line; /* what line marked this dependency */
556 struct mem_offset mem_offset; /* optional memory offset hint */
557 enum { CMP_NONE, CMP_OR, CMP_AND } cmp_type; /* OR or AND compare? */
558 int path; /* corresponding code entry index */
560 static int regdepslen = 0;
561 static int regdepstotlen = 0;
562 static const char *dv_mode[] = { "RAW", "WAW", "WAR" };
563 static const char *dv_sem[] = { "none", "implied", "impliedf",
564 "data", "instr", "specific", "stop", "other" };
565 static const char *dv_cmp_type[] = { "none", "OR", "AND" };
567 /* Current state of PR mutexation */
568 static struct qpmutex {
571 } *qp_mutexes = NULL; /* QP mutex bitmasks */
572 static int qp_mutexeslen = 0;
573 static int qp_mutexestotlen = 0;
574 static valueT qp_safe_across_calls = 0;
576 /* Current state of PR implications */
577 static struct qp_imply {
580 unsigned p2_branched:1;
582 } *qp_implies = NULL;
583 static int qp_implieslen = 0;
584 static int qp_impliestotlen = 0;
586 /* Keep track of static GR values so that indirect register usage can
587 sometimes be tracked. */
592 } gr_values[128] = {{ 1, 0, 0 }};
594 /* These are the routines required to output the various types of
597 /* A slot_number is a frag address plus the slot index (0-2). We use the
598 frag address here so that if there is a section switch in the middle of
599 a function, then instructions emitted to a different section are not
600 counted. Since there may be more than one frag for a function, this
601 means we also need to keep track of which frag this address belongs to
602 so we can compute inter-frag distances. This also nicely solves the
603 problem with nops emitted for align directives, which can't easily be
604 counted, but can easily be derived from frag sizes. */
606 typedef struct unw_rec_list {
608 unsigned long slot_number;
610 struct unw_rec_list *next;
613 #define SLOT_NUM_NOT_SET (unsigned)-1
617 unsigned long next_slot_number;
618 fragS *next_slot_frag;
620 /* Maintain a list of unwind entries for the current function. */
624 /* Any unwind entires that should be attached to the current slot
625 that an insn is being constructed for. */
626 unw_rec_list *current_entry;
628 /* These are used to create the unwind table entry for this function. */
631 symbolS *info; /* pointer to unwind info */
632 symbolS *personality_routine;
634 subsegT saved_text_subseg;
635 unsigned int force_unwind_entry : 1; /* force generation of unwind entry? */
637 /* TRUE if processing unwind directives in a prologue region. */
640 unsigned int prologue_count; /* number of .prologues seen so far */
643 typedef void (*vbyte_func) PARAMS ((int, char *, char *));
645 /* Forward delarations: */
646 static int ar_is_in_integer_unit PARAMS ((int regnum));
647 static void set_section PARAMS ((char *name));
648 static unsigned int set_regstack PARAMS ((unsigned int, unsigned int,
649 unsigned int, unsigned int));
650 static void dot_radix PARAMS ((int));
651 static void dot_special_section PARAMS ((int));
652 static void dot_proc PARAMS ((int));
653 static void dot_fframe PARAMS ((int));
654 static void dot_vframe PARAMS ((int));
655 static void dot_vframesp PARAMS ((int));
656 static void dot_vframepsp PARAMS ((int));
657 static void dot_save PARAMS ((int));
658 static void dot_restore PARAMS ((int));
659 static void dot_restorereg PARAMS ((int));
660 static void dot_restorereg_p PARAMS ((int));
661 static void dot_handlerdata PARAMS ((int));
662 static void dot_unwentry PARAMS ((int));
663 static void dot_altrp PARAMS ((int));
664 static void dot_savemem PARAMS ((int));
665 static void dot_saveg PARAMS ((int));
666 static void dot_savef PARAMS ((int));
667 static void dot_saveb PARAMS ((int));
668 static void dot_savegf PARAMS ((int));
669 static void dot_spill PARAMS ((int));
670 static void dot_spillreg PARAMS ((int));
671 static void dot_spillmem PARAMS ((int));
672 static void dot_spillreg_p PARAMS ((int));
673 static void dot_spillmem_p PARAMS ((int));
674 static void dot_label_state PARAMS ((int));
675 static void dot_copy_state PARAMS ((int));
676 static void dot_unwabi PARAMS ((int));
677 static void dot_personality PARAMS ((int));
678 static void dot_body PARAMS ((int));
679 static void dot_prologue PARAMS ((int));
680 static void dot_endp PARAMS ((int));
681 static void dot_template PARAMS ((int));
682 static void dot_regstk PARAMS ((int));
683 static void dot_rot PARAMS ((int));
684 static void dot_byteorder PARAMS ((int));
685 static void dot_psr PARAMS ((int));
686 static void dot_alias PARAMS ((int));
687 static void dot_ln PARAMS ((int));
688 static char *parse_section_name PARAMS ((void));
689 static void dot_xdata PARAMS ((int));
690 static void stmt_float_cons PARAMS ((int));
691 static void stmt_cons_ua PARAMS ((int));
692 static void dot_xfloat_cons PARAMS ((int));
693 static void dot_xstringer PARAMS ((int));
694 static void dot_xdata_ua PARAMS ((int));
695 static void dot_xfloat_cons_ua PARAMS ((int));
696 static void print_prmask PARAMS ((valueT mask));
697 static void dot_pred_rel PARAMS ((int));
698 static void dot_reg_val PARAMS ((int));
699 static void dot_dv_mode PARAMS ((int));
700 static void dot_entry PARAMS ((int));
701 static void dot_mem_offset PARAMS ((int));
702 static void add_unwind_entry PARAMS((unw_rec_list *ptr));
703 static symbolS *declare_register PARAMS ((const char *name, int regnum));
704 static void declare_register_set PARAMS ((const char *, int, int));
705 static unsigned int operand_width PARAMS ((enum ia64_opnd));
706 static enum operand_match_result operand_match PARAMS ((const struct ia64_opcode *idesc,
709 static int parse_operand PARAMS ((expressionS *e));
710 static struct ia64_opcode * parse_operands PARAMS ((struct ia64_opcode *));
711 static void build_insn PARAMS ((struct slot *, bfd_vma *));
712 static void emit_one_bundle PARAMS ((void));
713 static void fix_insn PARAMS ((fixS *, const struct ia64_operand *, valueT));
714 static bfd_reloc_code_real_type ia64_gen_real_reloc_type PARAMS ((struct symbol *sym,
715 bfd_reloc_code_real_type r_type));
716 static void insn_group_break PARAMS ((int, int, int));
717 static void mark_resource PARAMS ((struct ia64_opcode *, const struct ia64_dependency *,
718 struct rsrc *, int depind, int path));
719 static void add_qp_mutex PARAMS((valueT mask));
720 static void add_qp_imply PARAMS((int p1, int p2));
721 static void clear_qp_branch_flag PARAMS((valueT mask));
722 static void clear_qp_mutex PARAMS((valueT mask));
723 static void clear_qp_implies PARAMS((valueT p1_mask, valueT p2_mask));
724 static void clear_register_values PARAMS ((void));
725 static void print_dependency PARAMS ((const char *action, int depind));
726 static void instruction_serialization PARAMS ((void));
727 static void data_serialization PARAMS ((void));
728 static void remove_marked_resource PARAMS ((struct rsrc *));
729 static int is_conditional_branch PARAMS ((struct ia64_opcode *));
730 static int is_taken_branch PARAMS ((struct ia64_opcode *));
731 static int is_interruption_or_rfi PARAMS ((struct ia64_opcode *));
732 static int depends_on PARAMS ((int, struct ia64_opcode *));
733 static int specify_resource PARAMS ((const struct ia64_dependency *,
734 struct ia64_opcode *, int, struct rsrc [], int, int));
735 static int check_dv PARAMS((struct ia64_opcode *idesc));
736 static void check_dependencies PARAMS((struct ia64_opcode *));
737 static void mark_resources PARAMS((struct ia64_opcode *));
738 static void update_dependencies PARAMS((struct ia64_opcode *));
739 static void note_register_values PARAMS((struct ia64_opcode *));
740 static int qp_mutex PARAMS ((int, int, int));
741 static int resources_match PARAMS ((struct rsrc *, struct ia64_opcode *, int, int, int));
742 static void output_vbyte_mem PARAMS ((int, char *, char *));
743 static void count_output PARAMS ((int, char *, char *));
744 static void output_R1_format PARAMS ((vbyte_func, unw_record_type, int));
745 static void output_R2_format PARAMS ((vbyte_func, int, int, unsigned long));
746 static void output_R3_format PARAMS ((vbyte_func, unw_record_type, unsigned long));
747 static void output_P1_format PARAMS ((vbyte_func, int));
748 static void output_P2_format PARAMS ((vbyte_func, int, int));
749 static void output_P3_format PARAMS ((vbyte_func, unw_record_type, int));
750 static void output_P4_format PARAMS ((vbyte_func, unsigned char *, unsigned long));
751 static void output_P5_format PARAMS ((vbyte_func, int, unsigned long));
752 static void output_P6_format PARAMS ((vbyte_func, unw_record_type, int));
753 static void output_P7_format PARAMS ((vbyte_func, unw_record_type, unsigned long, unsigned long));
754 static void output_P8_format PARAMS ((vbyte_func, unw_record_type, unsigned long));
755 static void output_P9_format PARAMS ((vbyte_func, int, int));
756 static void output_P10_format PARAMS ((vbyte_func, int, int));
757 static void output_B1_format PARAMS ((vbyte_func, unw_record_type, unsigned long));
758 static void output_B2_format PARAMS ((vbyte_func, unsigned long, unsigned long));
759 static void output_B3_format PARAMS ((vbyte_func, unsigned long, unsigned long));
760 static void output_B4_format PARAMS ((vbyte_func, unw_record_type, unsigned long));
761 static char format_ab_reg PARAMS ((int, int));
762 static void output_X1_format PARAMS ((vbyte_func, unw_record_type, int, int, unsigned long,
764 static void output_X2_format PARAMS ((vbyte_func, int, int, int, int, int, unsigned long));
765 static void output_X3_format PARAMS ((vbyte_func, unw_record_type, int, int, int, unsigned long,
767 static void output_X4_format PARAMS ((vbyte_func, int, int, int, int, int, int, unsigned long));
768 static void free_list_records PARAMS ((unw_rec_list *));
769 static unw_rec_list *output_prologue PARAMS ((void));
770 static unw_rec_list *output_prologue_gr PARAMS ((unsigned int, unsigned int));
771 static unw_rec_list *output_body PARAMS ((void));
772 static unw_rec_list *output_mem_stack_f PARAMS ((unsigned int));
773 static unw_rec_list *output_mem_stack_v PARAMS ((void));
774 static unw_rec_list *output_psp_gr PARAMS ((unsigned int));
775 static unw_rec_list *output_psp_sprel PARAMS ((unsigned int));
776 static unw_rec_list *output_rp_when PARAMS ((void));
777 static unw_rec_list *output_rp_gr PARAMS ((unsigned int));
778 static unw_rec_list *output_rp_br PARAMS ((unsigned int));
779 static unw_rec_list *output_rp_psprel PARAMS ((unsigned int));
780 static unw_rec_list *output_rp_sprel PARAMS ((unsigned int));
781 static unw_rec_list *output_pfs_when PARAMS ((void));
782 static unw_rec_list *output_pfs_gr PARAMS ((unsigned int));
783 static unw_rec_list *output_pfs_psprel PARAMS ((unsigned int));
784 static unw_rec_list *output_pfs_sprel PARAMS ((unsigned int));
785 static unw_rec_list *output_preds_when PARAMS ((void));
786 static unw_rec_list *output_preds_gr PARAMS ((unsigned int));
787 static unw_rec_list *output_preds_psprel PARAMS ((unsigned int));
788 static unw_rec_list *output_preds_sprel PARAMS ((unsigned int));
789 static unw_rec_list *output_fr_mem PARAMS ((unsigned int));
790 static unw_rec_list *output_frgr_mem PARAMS ((unsigned int, unsigned int));
791 static unw_rec_list *output_gr_gr PARAMS ((unsigned int, unsigned int));
792 static unw_rec_list *output_gr_mem PARAMS ((unsigned int));
793 static unw_rec_list *output_br_mem PARAMS ((unsigned int));
794 static unw_rec_list *output_br_gr PARAMS ((unsigned int, unsigned int));
795 static unw_rec_list *output_spill_base PARAMS ((unsigned int));
796 static unw_rec_list *output_unat_when PARAMS ((void));
797 static unw_rec_list *output_unat_gr PARAMS ((unsigned int));
798 static unw_rec_list *output_unat_psprel PARAMS ((unsigned int));
799 static unw_rec_list *output_unat_sprel PARAMS ((unsigned int));
800 static unw_rec_list *output_lc_when PARAMS ((void));
801 static unw_rec_list *output_lc_gr PARAMS ((unsigned int));
802 static unw_rec_list *output_lc_psprel PARAMS ((unsigned int));
803 static unw_rec_list *output_lc_sprel PARAMS ((unsigned int));
804 static unw_rec_list *output_fpsr_when PARAMS ((void));
805 static unw_rec_list *output_fpsr_gr PARAMS ((unsigned int));
806 static unw_rec_list *output_fpsr_psprel PARAMS ((unsigned int));
807 static unw_rec_list *output_fpsr_sprel PARAMS ((unsigned int));
808 static unw_rec_list *output_priunat_when_gr PARAMS ((void));
809 static unw_rec_list *output_priunat_when_mem PARAMS ((void));
810 static unw_rec_list *output_priunat_gr PARAMS ((unsigned int));
811 static unw_rec_list *output_priunat_psprel PARAMS ((unsigned int));
812 static unw_rec_list *output_priunat_sprel PARAMS ((unsigned int));
813 static unw_rec_list *output_bsp_when PARAMS ((void));
814 static unw_rec_list *output_bsp_gr PARAMS ((unsigned int));
815 static unw_rec_list *output_bsp_psprel PARAMS ((unsigned int));
816 static unw_rec_list *output_bsp_sprel PARAMS ((unsigned int));
817 static unw_rec_list *output_bspstore_when PARAMS ((void));
818 static unw_rec_list *output_bspstore_gr PARAMS ((unsigned int));
819 static unw_rec_list *output_bspstore_psprel PARAMS ((unsigned int));
820 static unw_rec_list *output_bspstore_sprel PARAMS ((unsigned int));
821 static unw_rec_list *output_rnat_when PARAMS ((void));
822 static unw_rec_list *output_rnat_gr PARAMS ((unsigned int));
823 static unw_rec_list *output_rnat_psprel PARAMS ((unsigned int));
824 static unw_rec_list *output_rnat_sprel PARAMS ((unsigned int));
825 static unw_rec_list *output_unwabi PARAMS ((unsigned long, unsigned long));
826 static unw_rec_list *output_epilogue PARAMS ((unsigned long));
827 static unw_rec_list *output_label_state PARAMS ((unsigned long));
828 static unw_rec_list *output_copy_state PARAMS ((unsigned long));
829 static unw_rec_list *output_spill_psprel PARAMS ((unsigned int, unsigned int, unsigned int));
830 static unw_rec_list *output_spill_sprel PARAMS ((unsigned int, unsigned int, unsigned int));
831 static unw_rec_list *output_spill_psprel_p PARAMS ((unsigned int, unsigned int, unsigned int,
833 static unw_rec_list *output_spill_sprel_p PARAMS ((unsigned int, unsigned int, unsigned int,
835 static unw_rec_list *output_spill_reg PARAMS ((unsigned int, unsigned int, unsigned int,
837 static unw_rec_list *output_spill_reg_p PARAMS ((unsigned int, unsigned int, unsigned int,
838 unsigned int, unsigned int));
839 static void process_one_record PARAMS ((unw_rec_list *, vbyte_func));
840 static void process_unw_records PARAMS ((unw_rec_list *, vbyte_func));
841 static int calc_record_size PARAMS ((unw_rec_list *));
842 static void set_imask PARAMS ((unw_rec_list *, unsigned long, unsigned long, unsigned int));
843 static int count_bits PARAMS ((unsigned long));
844 static unsigned long slot_index PARAMS ((unsigned long, fragS *,
845 unsigned long, fragS *));
846 static unw_rec_list *optimize_unw_records PARAMS ((unw_rec_list *));
847 static void fixup_unw_records PARAMS ((unw_rec_list *));
848 static int output_unw_records PARAMS ((unw_rec_list *, void **));
849 static int convert_expr_to_ab_reg PARAMS ((expressionS *, unsigned int *, unsigned int *));
850 static int convert_expr_to_xy_reg PARAMS ((expressionS *, unsigned int *, unsigned int *));
851 static int generate_unwind_image PARAMS ((const char *));
853 /* Build the unwind section name by appending the (possibly stripped)
854 text section NAME to the unwind PREFIX. The resulting string
855 pointer is assigned to RESULT. The string is allocated on the
856 stack, so this must be a macro... */
857 #define make_unw_section_name(special, text_name, result) \
859 char *_prefix = special_section_name[special]; \
860 char *_suffix = text_name; \
861 size_t _prefix_len, _suffix_len; \
863 if (strncmp (text_name, ".gnu.linkonce.t.", \
864 sizeof (".gnu.linkonce.t.") - 1) == 0) \
866 _prefix = special_linkonce_name[special - SPECIAL_SECTION_UNWIND]; \
867 _suffix += sizeof (".gnu.linkonce.t.") - 1; \
869 _prefix_len = strlen (_prefix), _suffix_len = strlen (_suffix); \
870 _result = alloca (_prefix_len + _suffix_len + 1); \
871 memcpy(_result, _prefix, _prefix_len); \
872 memcpy(_result + _prefix_len, _suffix, _suffix_len); \
873 _result[_prefix_len + _suffix_len] = '\0'; \
878 /* Determine if application register REGNUM resides in the integer
879 unit (as opposed to the memory unit). */
881 ar_is_in_integer_unit (reg)
886 return (reg == 64 /* pfs */
887 || reg == 65 /* lc */
888 || reg == 66 /* ec */
889 /* ??? ias accepts and puts these in the integer unit. */
890 || (reg >= 112 && reg <= 127));
893 /* Switch to section NAME and create section if necessary. It's
894 rather ugly that we have to manipulate input_line_pointer but I
895 don't see any other way to accomplish the same thing without
896 changing obj-elf.c (which may be the Right Thing, in the end). */
901 char *saved_input_line_pointer;
903 saved_input_line_pointer = input_line_pointer;
904 input_line_pointer = name;
906 input_line_pointer = saved_input_line_pointer;
909 /* Map SHF_IA_64_SHORT to SEC_SMALL_DATA. */
912 ia64_elf_section_flags (flags, attr, type)
914 int attr, type ATTRIBUTE_UNUSED;
916 if (attr & SHF_IA_64_SHORT)
917 flags |= SEC_SMALL_DATA;
922 ia64_elf_section_type (str, len)
926 len = sizeof (ELF_STRING_ia64_unwind_info) - 1;
927 if (strncmp (str, ELF_STRING_ia64_unwind_info, len) == 0)
930 len = sizeof (ELF_STRING_ia64_unwind_info_once) - 1;
931 if (strncmp (str, ELF_STRING_ia64_unwind_info_once, len) == 0)
934 len = sizeof (ELF_STRING_ia64_unwind) - 1;
935 if (strncmp (str, ELF_STRING_ia64_unwind, len) == 0)
936 return SHT_IA_64_UNWIND;
938 len = sizeof (ELF_STRING_ia64_unwind_once) - 1;
939 if (strncmp (str, ELF_STRING_ia64_unwind_once, len) == 0)
940 return SHT_IA_64_UNWIND;
946 set_regstack (ins, locs, outs, rots)
947 unsigned int ins, locs, outs, rots;
952 sof = ins + locs + outs;
955 as_bad ("Size of frame exceeds maximum of 96 registers");
960 as_warn ("Size of rotating registers exceeds frame size");
963 md.in.base = REG_GR + 32;
964 md.loc.base = md.in.base + ins;
965 md.out.base = md.loc.base + locs;
967 md.in.num_regs = ins;
968 md.loc.num_regs = locs;
969 md.out.num_regs = outs;
970 md.rot.num_regs = rots;
977 struct label_fix *lfix;
979 subsegT saved_subseg;
982 if (!md.last_text_seg)
986 saved_subseg = now_subseg;
988 subseg_set (md.last_text_seg, 0);
990 while (md.num_slots_in_use > 0)
991 emit_one_bundle (); /* force out queued instructions */
993 /* In case there are labels following the last instruction, resolve
995 for (lfix = CURR_SLOT.label_fixups; lfix; lfix = lfix->next)
997 S_SET_VALUE (lfix->sym, frag_now_fix ());
998 symbol_set_frag (lfix->sym, frag_now);
1000 CURR_SLOT.label_fixups = 0;
1001 for (lfix = CURR_SLOT.tag_fixups; lfix; lfix = lfix->next)
1003 S_SET_VALUE (lfix->sym, frag_now_fix ());
1004 symbol_set_frag (lfix->sym, frag_now);
1006 CURR_SLOT.tag_fixups = 0;
1008 /* In case there are unwind directives following the last instruction,
1009 resolve those now. We only handle body and prologue directives here.
1010 Give an error for others. */
1011 for (ptr = unwind.current_entry; ptr; ptr = ptr->next)
1013 if (ptr->r.type == prologue || ptr->r.type == prologue_gr
1014 || ptr->r.type == body)
1016 ptr->slot_number = (unsigned long) frag_more (0);
1017 ptr->slot_frag = frag_now;
1020 as_bad (_("Unwind directive not followed by an instruction."));
1022 unwind.current_entry = NULL;
1024 subseg_set (saved_seg, saved_subseg);
1026 if (md.qp.X_op == O_register)
1027 as_bad ("qualifying predicate not followed by instruction");
1031 ia64_do_align (nbytes)
1034 char *saved_input_line_pointer = input_line_pointer;
1036 input_line_pointer = "";
1037 s_align_bytes (nbytes);
1038 input_line_pointer = saved_input_line_pointer;
1042 ia64_cons_align (nbytes)
1047 char *saved_input_line_pointer = input_line_pointer;
1048 input_line_pointer = "";
1049 s_align_bytes (nbytes);
1050 input_line_pointer = saved_input_line_pointer;
1054 /* Output COUNT bytes to a memory location. */
1055 static unsigned char *vbyte_mem_ptr = NULL;
1058 output_vbyte_mem (count, ptr, comment)
1061 char *comment ATTRIBUTE_UNUSED;
1064 if (vbyte_mem_ptr == NULL)
1069 for (x = 0; x < count; x++)
1070 *(vbyte_mem_ptr++) = ptr[x];
1073 /* Count the number of bytes required for records. */
1074 static int vbyte_count = 0;
1076 count_output (count, ptr, comment)
1078 char *ptr ATTRIBUTE_UNUSED;
1079 char *comment ATTRIBUTE_UNUSED;
1081 vbyte_count += count;
1085 output_R1_format (f, rtype, rlen)
1087 unw_record_type rtype;
1094 output_R3_format (f, rtype, rlen);
1100 else if (rtype != prologue)
1101 as_bad ("record type is not valid");
1103 byte = UNW_R1 | (r << 5) | (rlen & 0x1f);
1104 (*f) (1, &byte, NULL);
1108 output_R2_format (f, mask, grsave, rlen)
1115 mask = (mask & 0x0f);
1116 grsave = (grsave & 0x7f);
1118 bytes[0] = (UNW_R2 | (mask >> 1));
1119 bytes[1] = (((mask & 0x01) << 7) | grsave);
1120 count += output_leb128 (bytes + 2, rlen, 0);
1121 (*f) (count, bytes, NULL);
1125 output_R3_format (f, rtype, rlen)
1127 unw_record_type rtype;
1134 output_R1_format (f, rtype, rlen);
1140 else if (rtype != prologue)
1141 as_bad ("record type is not valid");
1142 bytes[0] = (UNW_R3 | r);
1143 count = output_leb128 (bytes + 1, rlen, 0);
1144 (*f) (count + 1, bytes, NULL);
1148 output_P1_format (f, brmask)
1153 byte = UNW_P1 | (brmask & 0x1f);
1154 (*f) (1, &byte, NULL);
1158 output_P2_format (f, brmask, gr)
1164 brmask = (brmask & 0x1f);
1165 bytes[0] = UNW_P2 | (brmask >> 1);
1166 bytes[1] = (((brmask & 1) << 7) | gr);
1167 (*f) (2, bytes, NULL);
1171 output_P3_format (f, rtype, reg)
1173 unw_record_type rtype;
1218 as_bad ("Invalid record type for P3 format.");
1220 bytes[0] = (UNW_P3 | (r >> 1));
1221 bytes[1] = (((r & 1) << 7) | reg);
1222 (*f) (2, bytes, NULL);
1226 output_P4_format (f, imask, imask_size)
1228 unsigned char *imask;
1229 unsigned long imask_size;
1232 (*f) (imask_size, imask, NULL);
1236 output_P5_format (f, grmask, frmask)
1239 unsigned long frmask;
1242 grmask = (grmask & 0x0f);
1245 bytes[1] = ((grmask << 4) | ((frmask & 0x000f0000) >> 16));
1246 bytes[2] = ((frmask & 0x0000ff00) >> 8);
1247 bytes[3] = (frmask & 0x000000ff);
1248 (*f) (4, bytes, NULL);
1252 output_P6_format (f, rtype, rmask)
1254 unw_record_type rtype;
1260 if (rtype == gr_mem)
1262 else if (rtype != fr_mem)
1263 as_bad ("Invalid record type for format P6");
1264 byte = (UNW_P6 | (r << 4) | (rmask & 0x0f));
1265 (*f) (1, &byte, NULL);
1269 output_P7_format (f, rtype, w1, w2)
1271 unw_record_type rtype;
1278 count += output_leb128 (bytes + 1, w1, 0);
1283 count += output_leb128 (bytes + count, w2 >> 4, 0);
1333 bytes[0] = (UNW_P7 | r);
1334 (*f) (count, bytes, NULL);
1338 output_P8_format (f, rtype, t)
1340 unw_record_type rtype;
1379 case bspstore_psprel:
1382 case bspstore_sprel:
1394 case priunat_when_gr:
1397 case priunat_psprel:
1403 case priunat_when_mem:
1410 count += output_leb128 (bytes + 2, t, 0);
1411 (*f) (count, bytes, NULL);
1415 output_P9_format (f, grmask, gr)
1422 bytes[1] = (grmask & 0x0f);
1423 bytes[2] = (gr & 0x7f);
1424 (*f) (3, bytes, NULL);
1428 output_P10_format (f, abi, context)
1435 bytes[1] = (abi & 0xff);
1436 bytes[2] = (context & 0xff);
1437 (*f) (3, bytes, NULL);
1441 output_B1_format (f, rtype, label)
1443 unw_record_type rtype;
1444 unsigned long label;
1450 output_B4_format (f, rtype, label);
1453 if (rtype == copy_state)
1455 else if (rtype != label_state)
1456 as_bad ("Invalid record type for format B1");
1458 byte = (UNW_B1 | (r << 5) | (label & 0x1f));
1459 (*f) (1, &byte, NULL);
1463 output_B2_format (f, ecount, t)
1465 unsigned long ecount;
1472 output_B3_format (f, ecount, t);
1475 bytes[0] = (UNW_B2 | (ecount & 0x1f));
1476 count += output_leb128 (bytes + 1, t, 0);
1477 (*f) (count, bytes, NULL);
1481 output_B3_format (f, ecount, t)
1483 unsigned long ecount;
1490 output_B2_format (f, ecount, t);
1494 count += output_leb128 (bytes + 1, t, 0);
1495 count += output_leb128 (bytes + count, ecount, 0);
1496 (*f) (count, bytes, NULL);
1500 output_B4_format (f, rtype, label)
1502 unw_record_type rtype;
1503 unsigned long label;
1510 output_B1_format (f, rtype, label);
1514 if (rtype == copy_state)
1516 else if (rtype != label_state)
1517 as_bad ("Invalid record type for format B1");
1519 bytes[0] = (UNW_B4 | (r << 3));
1520 count += output_leb128 (bytes + 1, label, 0);
1521 (*f) (count, bytes, NULL);
1525 format_ab_reg (ab, reg)
1532 ret = (ab << 5) | reg;
1537 output_X1_format (f, rtype, ab, reg, t, w1)
1539 unw_record_type rtype;
1549 if (rtype == spill_sprel)
1551 else if (rtype != spill_psprel)
1552 as_bad ("Invalid record type for format X1");
1553 bytes[1] = ((r << 7) | format_ab_reg (ab, reg));
1554 count += output_leb128 (bytes + 2, t, 0);
1555 count += output_leb128 (bytes + count, w1, 0);
1556 (*f) (count, bytes, NULL);
1560 output_X2_format (f, ab, reg, x, y, treg, t)
1569 bytes[1] = (((x & 1) << 7) | format_ab_reg (ab, reg));
1570 bytes[2] = (((y & 1) << 7) | (treg & 0x7f));
1571 count += output_leb128 (bytes + 3, t, 0);
1572 (*f) (count, bytes, NULL);
1576 output_X3_format (f, rtype, qp, ab, reg, t, w1)
1578 unw_record_type rtype;
1589 if (rtype == spill_sprel_p)
1591 else if (rtype != spill_psprel_p)
1592 as_bad ("Invalid record type for format X3");
1593 bytes[1] = ((r << 7) | (qp & 0x3f));
1594 bytes[2] = format_ab_reg (ab, reg);
1595 count += output_leb128 (bytes + 3, t, 0);
1596 count += output_leb128 (bytes + count, w1, 0);
1597 (*f) (count, bytes, NULL);
1601 output_X4_format (f, qp, ab, reg, x, y, treg, t)
1611 bytes[1] = (qp & 0x3f);
1612 bytes[2] = (((x & 1) << 7) | format_ab_reg (ab, reg));
1613 bytes[3] = (((y & 1) << 7) | (treg & 0x7f));
1614 count += output_leb128 (bytes + 4, t, 0);
1615 (*f) (count, bytes, NULL);
1618 /* This function allocates a record list structure, and initializes fields. */
1620 static unw_rec_list *
1621 alloc_record (unw_record_type t)
1624 ptr = xmalloc (sizeof (*ptr));
1626 ptr->slot_number = SLOT_NUM_NOT_SET;
1631 /* This function frees an entire list of record structures. */
1634 free_list_records (unw_rec_list *first)
1637 for (ptr = first; ptr != NULL;)
1639 unw_rec_list *tmp = ptr;
1641 if ((tmp->r.type == prologue || tmp->r.type == prologue_gr)
1642 && tmp->r.record.r.mask.i)
1643 free (tmp->r.record.r.mask.i);
1650 static unw_rec_list *
1653 unw_rec_list *ptr = alloc_record (prologue);
1654 memset (&ptr->r.record.r.mask, 0, sizeof (ptr->r.record.r.mask));
1658 static unw_rec_list *
1659 output_prologue_gr (saved_mask, reg)
1660 unsigned int saved_mask;
1663 unw_rec_list *ptr = alloc_record (prologue_gr);
1664 memset (&ptr->r.record.r.mask, 0, sizeof (ptr->r.record.r.mask));
1665 ptr->r.record.r.grmask = saved_mask;
1666 ptr->r.record.r.grsave = reg;
1670 static unw_rec_list *
1673 unw_rec_list *ptr = alloc_record (body);
1677 static unw_rec_list *
1678 output_mem_stack_f (size)
1681 unw_rec_list *ptr = alloc_record (mem_stack_f);
1682 ptr->r.record.p.size = size;
1686 static unw_rec_list *
1687 output_mem_stack_v ()
1689 unw_rec_list *ptr = alloc_record (mem_stack_v);
1693 static unw_rec_list *
1697 unw_rec_list *ptr = alloc_record (psp_gr);
1698 ptr->r.record.p.gr = gr;
1702 static unw_rec_list *
1703 output_psp_sprel (offset)
1704 unsigned int offset;
1706 unw_rec_list *ptr = alloc_record (psp_sprel);
1707 ptr->r.record.p.spoff = offset / 4;
1711 static unw_rec_list *
1714 unw_rec_list *ptr = alloc_record (rp_when);
1718 static unw_rec_list *
1722 unw_rec_list *ptr = alloc_record (rp_gr);
1723 ptr->r.record.p.gr = gr;
1727 static unw_rec_list *
1731 unw_rec_list *ptr = alloc_record (rp_br);
1732 ptr->r.record.p.br = br;
1736 static unw_rec_list *
1737 output_rp_psprel (offset)
1738 unsigned int offset;
1740 unw_rec_list *ptr = alloc_record (rp_psprel);
1741 ptr->r.record.p.pspoff = offset / 4;
1745 static unw_rec_list *
1746 output_rp_sprel (offset)
1747 unsigned int offset;
1749 unw_rec_list *ptr = alloc_record (rp_sprel);
1750 ptr->r.record.p.spoff = offset / 4;
1754 static unw_rec_list *
1757 unw_rec_list *ptr = alloc_record (pfs_when);
1761 static unw_rec_list *
1765 unw_rec_list *ptr = alloc_record (pfs_gr);
1766 ptr->r.record.p.gr = gr;
1770 static unw_rec_list *
1771 output_pfs_psprel (offset)
1772 unsigned int offset;
1774 unw_rec_list *ptr = alloc_record (pfs_psprel);
1775 ptr->r.record.p.pspoff = offset / 4;
1779 static unw_rec_list *
1780 output_pfs_sprel (offset)
1781 unsigned int offset;
1783 unw_rec_list *ptr = alloc_record (pfs_sprel);
1784 ptr->r.record.p.spoff = offset / 4;
1788 static unw_rec_list *
1789 output_preds_when ()
1791 unw_rec_list *ptr = alloc_record (preds_when);
1795 static unw_rec_list *
1796 output_preds_gr (gr)
1799 unw_rec_list *ptr = alloc_record (preds_gr);
1800 ptr->r.record.p.gr = gr;
1804 static unw_rec_list *
1805 output_preds_psprel (offset)
1806 unsigned int offset;
1808 unw_rec_list *ptr = alloc_record (preds_psprel);
1809 ptr->r.record.p.pspoff = offset / 4;
1813 static unw_rec_list *
1814 output_preds_sprel (offset)
1815 unsigned int offset;
1817 unw_rec_list *ptr = alloc_record (preds_sprel);
1818 ptr->r.record.p.spoff = offset / 4;
1822 static unw_rec_list *
1823 output_fr_mem (mask)
1826 unw_rec_list *ptr = alloc_record (fr_mem);
1827 ptr->r.record.p.rmask = mask;
1831 static unw_rec_list *
1832 output_frgr_mem (gr_mask, fr_mask)
1833 unsigned int gr_mask;
1834 unsigned int fr_mask;
1836 unw_rec_list *ptr = alloc_record (frgr_mem);
1837 ptr->r.record.p.grmask = gr_mask;
1838 ptr->r.record.p.frmask = fr_mask;
1842 static unw_rec_list *
1843 output_gr_gr (mask, reg)
1847 unw_rec_list *ptr = alloc_record (gr_gr);
1848 ptr->r.record.p.grmask = mask;
1849 ptr->r.record.p.gr = reg;
1853 static unw_rec_list *
1854 output_gr_mem (mask)
1857 unw_rec_list *ptr = alloc_record (gr_mem);
1858 ptr->r.record.p.rmask = mask;
1862 static unw_rec_list *
1863 output_br_mem (unsigned int mask)
1865 unw_rec_list *ptr = alloc_record (br_mem);
1866 ptr->r.record.p.brmask = mask;
1870 static unw_rec_list *
1871 output_br_gr (save_mask, reg)
1872 unsigned int save_mask;
1875 unw_rec_list *ptr = alloc_record (br_gr);
1876 ptr->r.record.p.brmask = save_mask;
1877 ptr->r.record.p.gr = reg;
1881 static unw_rec_list *
1882 output_spill_base (offset)
1883 unsigned int offset;
1885 unw_rec_list *ptr = alloc_record (spill_base);
1886 ptr->r.record.p.pspoff = offset / 4;
1890 static unw_rec_list *
1893 unw_rec_list *ptr = alloc_record (unat_when);
1897 static unw_rec_list *
1901 unw_rec_list *ptr = alloc_record (unat_gr);
1902 ptr->r.record.p.gr = gr;
1906 static unw_rec_list *
1907 output_unat_psprel (offset)
1908 unsigned int offset;
1910 unw_rec_list *ptr = alloc_record (unat_psprel);
1911 ptr->r.record.p.pspoff = offset / 4;
1915 static unw_rec_list *
1916 output_unat_sprel (offset)
1917 unsigned int offset;
1919 unw_rec_list *ptr = alloc_record (unat_sprel);
1920 ptr->r.record.p.spoff = offset / 4;
1924 static unw_rec_list *
1927 unw_rec_list *ptr = alloc_record (lc_when);
1931 static unw_rec_list *
1935 unw_rec_list *ptr = alloc_record (lc_gr);
1936 ptr->r.record.p.gr = gr;
1940 static unw_rec_list *
1941 output_lc_psprel (offset)
1942 unsigned int offset;
1944 unw_rec_list *ptr = alloc_record (lc_psprel);
1945 ptr->r.record.p.pspoff = offset / 4;
1949 static unw_rec_list *
1950 output_lc_sprel (offset)
1951 unsigned int offset;
1953 unw_rec_list *ptr = alloc_record (lc_sprel);
1954 ptr->r.record.p.spoff = offset / 4;
1958 static unw_rec_list *
1961 unw_rec_list *ptr = alloc_record (fpsr_when);
1965 static unw_rec_list *
1969 unw_rec_list *ptr = alloc_record (fpsr_gr);
1970 ptr->r.record.p.gr = gr;
1974 static unw_rec_list *
1975 output_fpsr_psprel (offset)
1976 unsigned int offset;
1978 unw_rec_list *ptr = alloc_record (fpsr_psprel);
1979 ptr->r.record.p.pspoff = offset / 4;
1983 static unw_rec_list *
1984 output_fpsr_sprel (offset)
1985 unsigned int offset;
1987 unw_rec_list *ptr = alloc_record (fpsr_sprel);
1988 ptr->r.record.p.spoff = offset / 4;
1992 static unw_rec_list *
1993 output_priunat_when_gr ()
1995 unw_rec_list *ptr = alloc_record (priunat_when_gr);
1999 static unw_rec_list *
2000 output_priunat_when_mem ()
2002 unw_rec_list *ptr = alloc_record (priunat_when_mem);
2006 static unw_rec_list *
2007 output_priunat_gr (gr)
2010 unw_rec_list *ptr = alloc_record (priunat_gr);
2011 ptr->r.record.p.gr = gr;
2015 static unw_rec_list *
2016 output_priunat_psprel (offset)
2017 unsigned int offset;
2019 unw_rec_list *ptr = alloc_record (priunat_psprel);
2020 ptr->r.record.p.pspoff = offset / 4;
2024 static unw_rec_list *
2025 output_priunat_sprel (offset)
2026 unsigned int offset;
2028 unw_rec_list *ptr = alloc_record (priunat_sprel);
2029 ptr->r.record.p.spoff = offset / 4;
2033 static unw_rec_list *
2036 unw_rec_list *ptr = alloc_record (bsp_when);
2040 static unw_rec_list *
2044 unw_rec_list *ptr = alloc_record (bsp_gr);
2045 ptr->r.record.p.gr = gr;
2049 static unw_rec_list *
2050 output_bsp_psprel (offset)
2051 unsigned int offset;
2053 unw_rec_list *ptr = alloc_record (bsp_psprel);
2054 ptr->r.record.p.pspoff = offset / 4;
2058 static unw_rec_list *
2059 output_bsp_sprel (offset)
2060 unsigned int offset;
2062 unw_rec_list *ptr = alloc_record (bsp_sprel);
2063 ptr->r.record.p.spoff = offset / 4;
2067 static unw_rec_list *
2068 output_bspstore_when ()
2070 unw_rec_list *ptr = alloc_record (bspstore_when);
2074 static unw_rec_list *
2075 output_bspstore_gr (gr)
2078 unw_rec_list *ptr = alloc_record (bspstore_gr);
2079 ptr->r.record.p.gr = gr;
2083 static unw_rec_list *
2084 output_bspstore_psprel (offset)
2085 unsigned int offset;
2087 unw_rec_list *ptr = alloc_record (bspstore_psprel);
2088 ptr->r.record.p.pspoff = offset / 4;
2092 static unw_rec_list *
2093 output_bspstore_sprel (offset)
2094 unsigned int offset;
2096 unw_rec_list *ptr = alloc_record (bspstore_sprel);
2097 ptr->r.record.p.spoff = offset / 4;
2101 static unw_rec_list *
2104 unw_rec_list *ptr = alloc_record (rnat_when);
2108 static unw_rec_list *
2112 unw_rec_list *ptr = alloc_record (rnat_gr);
2113 ptr->r.record.p.gr = gr;
2117 static unw_rec_list *
2118 output_rnat_psprel (offset)
2119 unsigned int offset;
2121 unw_rec_list *ptr = alloc_record (rnat_psprel);
2122 ptr->r.record.p.pspoff = offset / 4;
2126 static unw_rec_list *
2127 output_rnat_sprel (offset)
2128 unsigned int offset;
2130 unw_rec_list *ptr = alloc_record (rnat_sprel);
2131 ptr->r.record.p.spoff = offset / 4;
2135 static unw_rec_list *
2136 output_unwabi (abi, context)
2138 unsigned long context;
2140 unw_rec_list *ptr = alloc_record (unwabi);
2141 ptr->r.record.p.abi = abi;
2142 ptr->r.record.p.context = context;
2146 static unw_rec_list *
2147 output_epilogue (unsigned long ecount)
2149 unw_rec_list *ptr = alloc_record (epilogue);
2150 ptr->r.record.b.ecount = ecount;
2154 static unw_rec_list *
2155 output_label_state (unsigned long label)
2157 unw_rec_list *ptr = alloc_record (label_state);
2158 ptr->r.record.b.label = label;
2162 static unw_rec_list *
2163 output_copy_state (unsigned long label)
2165 unw_rec_list *ptr = alloc_record (copy_state);
2166 ptr->r.record.b.label = label;
2170 static unw_rec_list *
2171 output_spill_psprel (ab, reg, offset)
2174 unsigned int offset;
2176 unw_rec_list *ptr = alloc_record (spill_psprel);
2177 ptr->r.record.x.ab = ab;
2178 ptr->r.record.x.reg = reg;
2179 ptr->r.record.x.pspoff = offset / 4;
2183 static unw_rec_list *
2184 output_spill_sprel (ab, reg, offset)
2187 unsigned int offset;
2189 unw_rec_list *ptr = alloc_record (spill_sprel);
2190 ptr->r.record.x.ab = ab;
2191 ptr->r.record.x.reg = reg;
2192 ptr->r.record.x.spoff = offset / 4;
2196 static unw_rec_list *
2197 output_spill_psprel_p (ab, reg, offset, predicate)
2200 unsigned int offset;
2201 unsigned int predicate;
2203 unw_rec_list *ptr = alloc_record (spill_psprel_p);
2204 ptr->r.record.x.ab = ab;
2205 ptr->r.record.x.reg = reg;
2206 ptr->r.record.x.pspoff = offset / 4;
2207 ptr->r.record.x.qp = predicate;
2211 static unw_rec_list *
2212 output_spill_sprel_p (ab, reg, offset, predicate)
2215 unsigned int offset;
2216 unsigned int predicate;
2218 unw_rec_list *ptr = alloc_record (spill_sprel_p);
2219 ptr->r.record.x.ab = ab;
2220 ptr->r.record.x.reg = reg;
2221 ptr->r.record.x.spoff = offset / 4;
2222 ptr->r.record.x.qp = predicate;
2226 static unw_rec_list *
2227 output_spill_reg (ab, reg, targ_reg, xy)
2230 unsigned int targ_reg;
2233 unw_rec_list *ptr = alloc_record (spill_reg);
2234 ptr->r.record.x.ab = ab;
2235 ptr->r.record.x.reg = reg;
2236 ptr->r.record.x.treg = targ_reg;
2237 ptr->r.record.x.xy = xy;
2241 static unw_rec_list *
2242 output_spill_reg_p (ab, reg, targ_reg, xy, predicate)
2245 unsigned int targ_reg;
2247 unsigned int predicate;
2249 unw_rec_list *ptr = alloc_record (spill_reg_p);
2250 ptr->r.record.x.ab = ab;
2251 ptr->r.record.x.reg = reg;
2252 ptr->r.record.x.treg = targ_reg;
2253 ptr->r.record.x.xy = xy;
2254 ptr->r.record.x.qp = predicate;
2258 /* Given a unw_rec_list process the correct format with the
2259 specified function. */
2262 process_one_record (ptr, f)
2266 unsigned long fr_mask, gr_mask;
2268 switch (ptr->r.type)
2274 /* These are taken care of by prologue/prologue_gr. */
2279 if (ptr->r.type == prologue_gr)
2280 output_R2_format (f, ptr->r.record.r.grmask,
2281 ptr->r.record.r.grsave, ptr->r.record.r.rlen);
2283 output_R1_format (f, ptr->r.type, ptr->r.record.r.rlen);
2285 /* Output descriptor(s) for union of register spills (if any). */
2286 gr_mask = ptr->r.record.r.mask.gr_mem;
2287 fr_mask = ptr->r.record.r.mask.fr_mem;
2290 if ((fr_mask & ~0xfUL) == 0)
2291 output_P6_format (f, fr_mem, fr_mask);
2294 output_P5_format (f, gr_mask, fr_mask);
2299 output_P6_format (f, gr_mem, gr_mask);
2300 if (ptr->r.record.r.mask.br_mem)
2301 output_P1_format (f, ptr->r.record.r.mask.br_mem);
2303 /* output imask descriptor if necessary: */
2304 if (ptr->r.record.r.mask.i)
2305 output_P4_format (f, ptr->r.record.r.mask.i,
2306 ptr->r.record.r.imask_size);
2310 output_R1_format (f, ptr->r.type, ptr->r.record.r.rlen);
2314 output_P7_format (f, ptr->r.type, ptr->r.record.p.t,
2315 ptr->r.record.p.size);
2328 output_P3_format (f, ptr->r.type, ptr->r.record.p.gr);
2331 output_P3_format (f, rp_br, ptr->r.record.p.br);
2334 output_P7_format (f, psp_sprel, ptr->r.record.p.spoff, 0);
2342 output_P7_format (f, ptr->r.type, ptr->r.record.p.t, 0);
2351 output_P7_format (f, ptr->r.type, ptr->r.record.p.pspoff, 0);
2361 case bspstore_sprel:
2363 output_P8_format (f, ptr->r.type, ptr->r.record.p.spoff);
2366 output_P9_format (f, ptr->r.record.p.grmask, ptr->r.record.p.gr);
2369 output_P2_format (f, ptr->r.record.p.brmask, ptr->r.record.p.gr);
2372 as_bad ("spill_mask record unimplemented.");
2374 case priunat_when_gr:
2375 case priunat_when_mem:
2379 output_P8_format (f, ptr->r.type, ptr->r.record.p.t);
2381 case priunat_psprel:
2383 case bspstore_psprel:
2385 output_P8_format (f, ptr->r.type, ptr->r.record.p.pspoff);
2388 output_P10_format (f, ptr->r.record.p.abi, ptr->r.record.p.context);
2391 output_B3_format (f, ptr->r.record.b.ecount, ptr->r.record.b.t);
2395 output_B4_format (f, ptr->r.type, ptr->r.record.b.label);
2398 output_X1_format (f, ptr->r.type, ptr->r.record.x.ab,
2399 ptr->r.record.x.reg, ptr->r.record.x.t,
2400 ptr->r.record.x.pspoff);
2403 output_X1_format (f, ptr->r.type, ptr->r.record.x.ab,
2404 ptr->r.record.x.reg, ptr->r.record.x.t,
2405 ptr->r.record.x.spoff);
2408 output_X2_format (f, ptr->r.record.x.ab, ptr->r.record.x.reg,
2409 ptr->r.record.x.xy >> 1, ptr->r.record.x.xy,
2410 ptr->r.record.x.treg, ptr->r.record.x.t);
2412 case spill_psprel_p:
2413 output_X3_format (f, ptr->r.type, ptr->r.record.x.qp,
2414 ptr->r.record.x.ab, ptr->r.record.x.reg,
2415 ptr->r.record.x.t, ptr->r.record.x.pspoff);
2418 output_X3_format (f, ptr->r.type, ptr->r.record.x.qp,
2419 ptr->r.record.x.ab, ptr->r.record.x.reg,
2420 ptr->r.record.x.t, ptr->r.record.x.spoff);
2423 output_X4_format (f, ptr->r.record.x.qp, ptr->r.record.x.ab,
2424 ptr->r.record.x.reg, ptr->r.record.x.xy >> 1,
2425 ptr->r.record.x.xy, ptr->r.record.x.treg,
2429 as_bad ("record_type_not_valid");
2434 /* Given a unw_rec_list list, process all the records with
2435 the specified function. */
2437 process_unw_records (list, f)
2442 for (ptr = list; ptr; ptr = ptr->next)
2443 process_one_record (ptr, f);
2446 /* Determine the size of a record list in bytes. */
2448 calc_record_size (list)
2452 process_unw_records (list, count_output);
2456 /* Update IMASK bitmask to reflect the fact that one or more registers
2457 of type TYPE are saved starting at instruction with index T. If N
2458 bits are set in REGMASK, it is assumed that instructions T through
2459 T+N-1 save these registers.
2463 1: instruction saves next fp reg
2464 2: instruction saves next general reg
2465 3: instruction saves next branch reg */
2467 set_imask (region, regmask, t, type)
2468 unw_rec_list *region;
2469 unsigned long regmask;
2473 unsigned char *imask;
2474 unsigned long imask_size;
2478 imask = region->r.record.r.mask.i;
2479 imask_size = region->r.record.r.imask_size;
2482 imask_size = (region->r.record.r.rlen * 2 + 7) / 8 + 1;
2483 imask = xmalloc (imask_size);
2484 memset (imask, 0, imask_size);
2486 region->r.record.r.imask_size = imask_size;
2487 region->r.record.r.mask.i = imask;
2491 pos = 2 * (3 - t % 4);
2494 if (i >= imask_size)
2496 as_bad ("Ignoring attempt to spill beyond end of region");
2500 imask[i] |= (type & 0x3) << pos;
2502 regmask &= (regmask - 1);
2513 count_bits (unsigned long mask)
2525 /* Return the number of instruction slots from FIRST_ADDR to SLOT_ADDR.
2526 SLOT_FRAG is the frag containing SLOT_ADDR, and FIRST_FRAG is the frag
2527 containing FIRST_ADDR. */
2530 slot_index (slot_addr, slot_frag, first_addr, first_frag)
2531 unsigned long slot_addr;
2533 unsigned long first_addr;
2536 unsigned long index = 0;
2538 /* First time we are called, the initial address and frag are invalid. */
2539 if (first_addr == 0)
2542 /* If the two addresses are in different frags, then we need to add in
2543 the remaining size of this frag, and then the entire size of intermediate
2545 while (slot_frag != first_frag)
2547 unsigned long start_addr = (unsigned long) &first_frag->fr_literal;
2549 /* Add in the full size of the frag converted to instruction slots. */
2550 index += 3 * (first_frag->fr_fix >> 4);
2551 /* Subtract away the initial part before first_addr. */
2552 index -= (3 * ((first_addr >> 4) - (start_addr >> 4))
2553 + ((first_addr & 0x3) - (start_addr & 0x3)));
2555 /* Move to the beginning of the next frag. */
2556 first_frag = first_frag->fr_next;
2557 first_addr = (unsigned long) &first_frag->fr_literal;
2560 /* Add in the used part of the last frag. */
2561 index += (3 * ((slot_addr >> 4) - (first_addr >> 4))
2562 + ((slot_addr & 0x3) - (first_addr & 0x3)));
2566 /* Optimize unwind record directives. */
2568 static unw_rec_list *
2569 optimize_unw_records (list)
2575 /* If the only unwind record is ".prologue" or ".prologue" followed
2576 by ".body", then we can optimize the unwind directives away. */
2577 if (list->r.type == prologue
2578 && (list->next == NULL
2579 || (list->next->r.type == body && list->next->next == NULL)))
2585 /* Given a complete record list, process any records which have
2586 unresolved fields, (ie length counts for a prologue). After
2587 this has been run, all neccessary information should be available
2588 within each record to generate an image. */
2591 fixup_unw_records (list)
2594 unw_rec_list *ptr, *region = 0;
2595 unsigned long first_addr = 0, rlen = 0, t;
2596 fragS *first_frag = 0;
2598 for (ptr = list; ptr; ptr = ptr->next)
2600 if (ptr->slot_number == SLOT_NUM_NOT_SET)
2601 as_bad (" Insn slot not set in unwind record.");
2602 t = slot_index (ptr->slot_number, ptr->slot_frag,
2603 first_addr, first_frag);
2604 switch (ptr->r.type)
2611 int size, dir_len = 0;
2612 unsigned long last_addr;
2615 first_addr = ptr->slot_number;
2616 first_frag = ptr->slot_frag;
2617 ptr->slot_number = 0;
2618 /* Find either the next body/prologue start, or the end of
2619 the list, and determine the size of the region. */
2620 last_addr = unwind.next_slot_number;
2621 last_frag = unwind.next_slot_frag;
2622 for (last = ptr->next; last != NULL; last = last->next)
2623 if (last->r.type == prologue || last->r.type == prologue_gr
2624 || last->r.type == body)
2626 last_addr = last->slot_number;
2627 last_frag = last->slot_frag;
2630 else if (!last->next)
2632 /* In the absence of an explicit .body directive,
2633 the prologue ends after the last instruction
2634 covered by an unwind directive. */
2635 if (ptr->r.type != body)
2637 last_addr = last->slot_number;
2638 last_frag = last->slot_frag;
2639 switch (last->r.type)
2642 dir_len = (count_bits (last->r.record.p.frmask)
2643 + count_bits (last->r.record.p.grmask));
2647 dir_len += count_bits (last->r.record.p.rmask);
2651 dir_len += count_bits (last->r.record.p.brmask);
2654 dir_len += count_bits (last->r.record.p.grmask);
2663 size = (slot_index (last_addr, last_frag, first_addr, first_frag)
2665 rlen = ptr->r.record.r.rlen = size;
2670 ptr->r.record.b.t = rlen - 1 - t;
2681 case priunat_when_gr:
2682 case priunat_when_mem:
2686 ptr->r.record.p.t = t;
2694 case spill_psprel_p:
2695 ptr->r.record.x.t = t;
2701 as_bad ("frgr_mem record before region record!\n");
2704 region->r.record.r.mask.fr_mem |= ptr->r.record.p.frmask;
2705 region->r.record.r.mask.gr_mem |= ptr->r.record.p.grmask;
2706 set_imask (region, ptr->r.record.p.frmask, t, 1);
2707 set_imask (region, ptr->r.record.p.grmask, t, 2);
2712 as_bad ("fr_mem record before region record!\n");
2715 region->r.record.r.mask.fr_mem |= ptr->r.record.p.rmask;
2716 set_imask (region, ptr->r.record.p.rmask, t, 1);
2721 as_bad ("gr_mem record before region record!\n");
2724 region->r.record.r.mask.gr_mem |= ptr->r.record.p.rmask;
2725 set_imask (region, ptr->r.record.p.rmask, t, 2);
2730 as_bad ("br_mem record before region record!\n");
2733 region->r.record.r.mask.br_mem |= ptr->r.record.p.brmask;
2734 set_imask (region, ptr->r.record.p.brmask, t, 3);
2740 as_bad ("gr_gr record before region record!\n");
2743 set_imask (region, ptr->r.record.p.grmask, t, 2);
2748 as_bad ("br_gr record before region record!\n");
2751 set_imask (region, ptr->r.record.p.brmask, t, 3);
2760 /* Generate an unwind image from a record list. Returns the number of
2761 bytes in the resulting image. The memory image itselof is returned
2762 in the 'ptr' parameter. */
2764 output_unw_records (list, ptr)
2768 int size, x, extra = 0;
2773 list = optimize_unw_records (list);
2774 fixup_unw_records (list);
2775 size = calc_record_size (list);
2777 /* pad to 8 byte boundry. */
2782 if (size > 0 || unwind.force_unwind_entry)
2784 unwind.force_unwind_entry = 0;
2786 /* Add 8 for the header + 8 more bytes for the personality offset. */
2787 mem = xmalloc (size + extra + 16);
2789 vbyte_mem_ptr = mem + 8;
2790 /* Clear the padding area and personality. */
2791 memset (mem + 8 + size, 0 , extra + 8);
2792 /* Initialize the header area. */
2793 md_number_to_chars (mem,
2794 (((bfd_vma) 1 << 48) /* version */
2795 | (unwind.personality_routine
2796 ? ((bfd_vma) 3 << 32) /* U & E handler flags */
2798 | ((size + extra) / 8)), /* length (dwords) */
2801 process_unw_records (list, output_vbyte_mem);
2811 convert_expr_to_ab_reg (e, ab, regp)
2818 if (e->X_op != O_register)
2821 reg = e->X_add_number;
2822 if (reg >= (REG_GR + 4) && reg <= (REG_GR + 7))
2825 *regp = reg - REG_GR;
2827 else if ((reg >= (REG_FR + 2) && reg <= (REG_FR + 5))
2828 || (reg >= (REG_FR + 16) && reg <= (REG_FR + 31)))
2831 *regp = reg - REG_FR;
2833 else if (reg >= (REG_BR + 1) && reg <= (REG_BR + 5))
2836 *regp = reg - REG_BR;
2843 case REG_PR: *regp = 0; break;
2844 case REG_PSP: *regp = 1; break;
2845 case REG_PRIUNAT: *regp = 2; break;
2846 case REG_BR + 0: *regp = 3; break;
2847 case REG_AR + AR_BSP: *regp = 4; break;
2848 case REG_AR + AR_BSPSTORE: *regp = 5; break;
2849 case REG_AR + AR_RNAT: *regp = 6; break;
2850 case REG_AR + AR_UNAT: *regp = 7; break;
2851 case REG_AR + AR_FPSR: *regp = 8; break;
2852 case REG_AR + AR_PFS: *regp = 9; break;
2853 case REG_AR + AR_LC: *regp = 10; break;
2863 convert_expr_to_xy_reg (e, xy, regp)
2870 if (e->X_op != O_register)
2873 reg = e->X_add_number;
2875 if (/* reg >= REG_GR && */ reg <= (REG_GR + 127))
2878 *regp = reg - REG_GR;
2880 else if (reg >= REG_FR && reg <= (REG_FR + 127))
2883 *regp = reg - REG_FR;
2885 else if (reg >= REG_BR && reg <= (REG_BR + 7))
2888 *regp = reg - REG_BR;
2897 int dummy ATTRIBUTE_UNUSED;
2902 radix = *input_line_pointer++;
2904 if (radix != 'C' && !is_end_of_line[(unsigned char) radix])
2906 as_bad ("Radix `%c' unsupported", *input_line_pointer);
2907 ignore_rest_of_line ();
2912 /* .sbss, .bss etc. are macros that expand into ".section SECNAME". */
2914 dot_special_section (which)
2917 set_section ((char *) special_section_name[which]);
2921 add_unwind_entry (ptr)
2925 unwind.tail->next = ptr;
2930 /* The current entry can in fact be a chain of unwind entries. */
2931 if (unwind.current_entry == NULL)
2932 unwind.current_entry = ptr;
2937 int dummy ATTRIBUTE_UNUSED;
2943 if (e.X_op != O_constant)
2944 as_bad ("Operand to .fframe must be a constant");
2946 add_unwind_entry (output_mem_stack_f (e.X_add_number));
2951 int dummy ATTRIBUTE_UNUSED;
2957 reg = e.X_add_number - REG_GR;
2958 if (e.X_op == O_register && reg < 128)
2960 add_unwind_entry (output_mem_stack_v ());
2961 if (! (unwind.prologue_mask & 2))
2962 add_unwind_entry (output_psp_gr (reg));
2965 as_bad ("First operand to .vframe must be a general register");
2969 dot_vframesp (dummy)
2970 int dummy ATTRIBUTE_UNUSED;
2975 if (e.X_op == O_constant)
2977 add_unwind_entry (output_mem_stack_v ());
2978 add_unwind_entry (output_psp_sprel (e.X_add_number));
2981 as_bad ("First operand to .vframesp must be a general register");
2985 dot_vframepsp (dummy)
2986 int dummy ATTRIBUTE_UNUSED;
2991 if (e.X_op == O_constant)
2993 add_unwind_entry (output_mem_stack_v ());
2994 add_unwind_entry (output_psp_sprel (e.X_add_number));
2997 as_bad ("First operand to .vframepsp must be a general register");
3002 int dummy ATTRIBUTE_UNUSED;
3008 sep = parse_operand (&e1);
3010 as_bad ("No second operand to .save");
3011 sep = parse_operand (&e2);
3013 reg1 = e1.X_add_number;
3014 reg2 = e2.X_add_number - REG_GR;
3016 /* Make sure its a valid ar.xxx reg, OR its br0, aka 'rp'. */
3017 if (e1.X_op == O_register)
3019 if (e2.X_op == O_register && reg2 >= 0 && reg2 < 128)
3023 case REG_AR + AR_BSP:
3024 add_unwind_entry (output_bsp_when ());
3025 add_unwind_entry (output_bsp_gr (reg2));
3027 case REG_AR + AR_BSPSTORE:
3028 add_unwind_entry (output_bspstore_when ());
3029 add_unwind_entry (output_bspstore_gr (reg2));
3031 case REG_AR + AR_RNAT:
3032 add_unwind_entry (output_rnat_when ());
3033 add_unwind_entry (output_rnat_gr (reg2));
3035 case REG_AR + AR_UNAT:
3036 add_unwind_entry (output_unat_when ());
3037 add_unwind_entry (output_unat_gr (reg2));
3039 case REG_AR + AR_FPSR:
3040 add_unwind_entry (output_fpsr_when ());
3041 add_unwind_entry (output_fpsr_gr (reg2));
3043 case REG_AR + AR_PFS:
3044 add_unwind_entry (output_pfs_when ());
3045 if (! (unwind.prologue_mask & 4))
3046 add_unwind_entry (output_pfs_gr (reg2));
3048 case REG_AR + AR_LC:
3049 add_unwind_entry (output_lc_when ());
3050 add_unwind_entry (output_lc_gr (reg2));
3053 add_unwind_entry (output_rp_when ());
3054 if (! (unwind.prologue_mask & 8))
3055 add_unwind_entry (output_rp_gr (reg2));
3058 add_unwind_entry (output_preds_when ());
3059 if (! (unwind.prologue_mask & 1))
3060 add_unwind_entry (output_preds_gr (reg2));
3063 add_unwind_entry (output_priunat_when_gr ());
3064 add_unwind_entry (output_priunat_gr (reg2));
3067 as_bad ("First operand not a valid register");
3071 as_bad (" Second operand not a valid register");
3074 as_bad ("First operand not a register");
3079 int dummy ATTRIBUTE_UNUSED;
3082 unsigned long ecount; /* # of _additional_ regions to pop */
3085 sep = parse_operand (&e1);
3086 if (e1.X_op != O_register || e1.X_add_number != REG_GR + 12)
3088 as_bad ("First operand to .restore must be stack pointer (sp)");
3094 parse_operand (&e2);
3095 if (e2.X_op != O_constant || e2.X_add_number < 0)
3097 as_bad ("Second operand to .restore must be a constant >= 0");
3100 ecount = e2.X_add_number;
3103 ecount = unwind.prologue_count - 1;
3104 add_unwind_entry (output_epilogue (ecount));
3106 if (ecount < unwind.prologue_count)
3107 unwind.prologue_count -= ecount + 1;
3109 unwind.prologue_count = 0;
3113 dot_restorereg (dummy)
3114 int dummy ATTRIBUTE_UNUSED;
3116 unsigned int ab, reg;
3121 if (!convert_expr_to_ab_reg (&e, &ab, ®))
3123 as_bad ("First operand to .restorereg must be a preserved register");
3126 add_unwind_entry (output_spill_reg (ab, reg, 0, 0));
3130 dot_restorereg_p (dummy)
3131 int dummy ATTRIBUTE_UNUSED;
3133 unsigned int qp, ab, reg;
3137 sep = parse_operand (&e1);
3140 as_bad ("No second operand to .restorereg.p");
3144 parse_operand (&e2);
3146 qp = e1.X_add_number - REG_P;
3147 if (e1.X_op != O_register || qp > 63)
3149 as_bad ("First operand to .restorereg.p must be a predicate");
3153 if (!convert_expr_to_ab_reg (&e2, &ab, ®))
3155 as_bad ("Second operand to .restorereg.p must be a preserved register");
3158 add_unwind_entry (output_spill_reg_p (ab, reg, 0, 0, qp));
3162 generate_unwind_image (text_name)
3163 const char *text_name;
3166 unsigned char *unw_rec;
3168 /* Force out pending instructions, to make sure all unwind records have
3169 a valid slot_number field. */
3170 ia64_flush_insns ();
3172 /* Generate the unwind record. */
3173 size = output_unw_records (unwind.list, (void **) &unw_rec);
3175 as_bad ("Unwind record is not a multiple of 8 bytes.");
3177 /* If there are unwind records, switch sections, and output the info. */
3180 unsigned char *where;
3184 make_unw_section_name (SPECIAL_SECTION_UNWIND_INFO, text_name, sec_name);
3185 set_section (sec_name);
3186 bfd_set_section_flags (stdoutput, now_seg,
3187 SEC_LOAD | SEC_ALLOC | SEC_READONLY);
3189 /* Make sure the section has 8 byte alignment. */
3190 frag_align (3, 0, 0);
3191 record_alignment (now_seg, 3);
3193 /* Set expression which points to start of unwind descriptor area. */
3194 unwind.info = expr_build_dot ();
3196 where = (unsigned char *) frag_more (size);
3198 /* Issue a label for this address, and keep track of it to put it
3199 in the unwind section. */
3201 /* Copy the information from the unwind record into this section. The
3202 data is already in the correct byte order. */
3203 memcpy (where, unw_rec, size);
3205 /* Add the personality address to the image. */
3206 if (unwind.personality_routine != 0)
3208 exp.X_op = O_symbol;
3209 exp.X_add_symbol = unwind.personality_routine;
3210 exp.X_add_number = 0;
3211 fix_new_exp (frag_now, frag_now_fix () - 8, 8,
3212 &exp, 0, BFD_RELOC_IA64_LTOFF_FPTR64LSB);
3213 unwind.personality_routine = 0;
3217 free_list_records (unwind.list);
3218 unwind.list = unwind.tail = unwind.current_entry = NULL;
3224 dot_handlerdata (dummy)
3225 int dummy ATTRIBUTE_UNUSED;
3227 const char *text_name = segment_name (now_seg);
3229 /* If text section name starts with ".text" (which it should),
3230 strip this prefix off. */
3231 if (strcmp (text_name, ".text") == 0)
3234 unwind.force_unwind_entry = 1;
3236 /* Remember which segment we're in so we can switch back after .endp */
3237 unwind.saved_text_seg = now_seg;
3238 unwind.saved_text_subseg = now_subseg;
3240 /* Generate unwind info into unwind-info section and then leave that
3241 section as the currently active one so dataXX directives go into
3242 the language specific data area of the unwind info block. */
3243 generate_unwind_image (text_name);
3244 demand_empty_rest_of_line ();
3248 dot_unwentry (dummy)
3249 int dummy ATTRIBUTE_UNUSED;
3251 unwind.force_unwind_entry = 1;
3252 demand_empty_rest_of_line ();
3257 int dummy ATTRIBUTE_UNUSED;
3263 reg = e.X_add_number - REG_BR;
3264 if (e.X_op == O_register && reg < 8)
3265 add_unwind_entry (output_rp_br (reg));
3267 as_bad ("First operand not a valid branch register");
3271 dot_savemem (psprel)
3278 sep = parse_operand (&e1);
3280 as_bad ("No second operand to .save%ssp", psprel ? "p" : "");
3281 sep = parse_operand (&e2);
3283 reg1 = e1.X_add_number;
3284 val = e2.X_add_number;
3286 /* Make sure its a valid ar.xxx reg, OR its br0, aka 'rp'. */
3287 if (e1.X_op == O_register)
3289 if (e2.X_op == O_constant)
3293 case REG_AR + AR_BSP:
3294 add_unwind_entry (output_bsp_when ());
3295 add_unwind_entry ((psprel
3297 : output_bsp_sprel) (val));
3299 case REG_AR + AR_BSPSTORE:
3300 add_unwind_entry (output_bspstore_when ());
3301 add_unwind_entry ((psprel
3302 ? output_bspstore_psprel
3303 : output_bspstore_sprel) (val));
3305 case REG_AR + AR_RNAT:
3306 add_unwind_entry (output_rnat_when ());
3307 add_unwind_entry ((psprel
3308 ? output_rnat_psprel
3309 : output_rnat_sprel) (val));
3311 case REG_AR + AR_UNAT:
3312 add_unwind_entry (output_unat_when ());
3313 add_unwind_entry ((psprel
3314 ? output_unat_psprel
3315 : output_unat_sprel) (val));
3317 case REG_AR + AR_FPSR:
3318 add_unwind_entry (output_fpsr_when ());
3319 add_unwind_entry ((psprel
3320 ? output_fpsr_psprel
3321 : output_fpsr_sprel) (val));
3323 case REG_AR + AR_PFS:
3324 add_unwind_entry (output_pfs_when ());
3325 add_unwind_entry ((psprel
3327 : output_pfs_sprel) (val));
3329 case REG_AR + AR_LC:
3330 add_unwind_entry (output_lc_when ());
3331 add_unwind_entry ((psprel
3333 : output_lc_sprel) (val));
3336 add_unwind_entry (output_rp_when ());
3337 add_unwind_entry ((psprel
3339 : output_rp_sprel) (val));
3342 add_unwind_entry (output_preds_when ());
3343 add_unwind_entry ((psprel
3344 ? output_preds_psprel
3345 : output_preds_sprel) (val));
3348 add_unwind_entry (output_priunat_when_mem ());
3349 add_unwind_entry ((psprel
3350 ? output_priunat_psprel
3351 : output_priunat_sprel) (val));
3354 as_bad ("First operand not a valid register");
3358 as_bad (" Second operand not a valid constant");
3361 as_bad ("First operand not a register");
3366 int dummy ATTRIBUTE_UNUSED;
3370 sep = parse_operand (&e1);
3372 parse_operand (&e2);
3374 if (e1.X_op != O_constant)
3375 as_bad ("First operand to .save.g must be a constant.");
3378 int grmask = e1.X_add_number;
3380 add_unwind_entry (output_gr_mem (grmask));
3383 int reg = e2.X_add_number - REG_GR;
3384 if (e2.X_op == O_register && reg >= 0 && reg < 128)
3385 add_unwind_entry (output_gr_gr (grmask, reg));
3387 as_bad ("Second operand is an invalid register.");
3394 int dummy ATTRIBUTE_UNUSED;
3398 sep = parse_operand (&e1);
3400 if (e1.X_op != O_constant)
3401 as_bad ("Operand to .save.f must be a constant.");
3403 add_unwind_entry (output_fr_mem (e1.X_add_number));
3408 int dummy ATTRIBUTE_UNUSED;
3415 sep = parse_operand (&e1);
3416 if (e1.X_op != O_constant)
3418 as_bad ("First operand to .save.b must be a constant.");
3421 brmask = e1.X_add_number;
3425 sep = parse_operand (&e2);
3426 reg = e2.X_add_number - REG_GR;
3427 if (e2.X_op != O_register || reg > 127)
3429 as_bad ("Second operand to .save.b must be a general register.");
3432 add_unwind_entry (output_br_gr (brmask, e2.X_add_number));
3435 add_unwind_entry (output_br_mem (brmask));
3437 if (!is_end_of_line[sep] && !is_it_end_of_statement ())
3438 ignore_rest_of_line ();
3443 int dummy ATTRIBUTE_UNUSED;
3447 sep = parse_operand (&e1);
3449 parse_operand (&e2);
3451 if (e1.X_op != O_constant || sep != ',' || e2.X_op != O_constant)
3452 as_bad ("Both operands of .save.gf must be constants.");
3455 int grmask = e1.X_add_number;
3456 int frmask = e2.X_add_number;
3457 add_unwind_entry (output_frgr_mem (grmask, frmask));
3463 int dummy ATTRIBUTE_UNUSED;
3468 sep = parse_operand (&e);
3469 if (!is_end_of_line[sep] && !is_it_end_of_statement ())
3470 ignore_rest_of_line ();
3472 if (e.X_op != O_constant)
3473 as_bad ("Operand to .spill must be a constant");
3475 add_unwind_entry (output_spill_base (e.X_add_number));
3479 dot_spillreg (dummy)
3480 int dummy ATTRIBUTE_UNUSED;
3482 int sep, ab, xy, reg, treg;
3485 sep = parse_operand (&e1);
3488 as_bad ("No second operand to .spillreg");
3492 parse_operand (&e2);
3494 if (!convert_expr_to_ab_reg (&e1, &ab, ®))
3496 as_bad ("First operand to .spillreg must be a preserved register");
3500 if (!convert_expr_to_xy_reg (&e2, &xy, &treg))
3502 as_bad ("Second operand to .spillreg must be a register");
3506 add_unwind_entry (output_spill_reg (ab, reg, treg, xy));
3510 dot_spillmem (psprel)
3516 sep = parse_operand (&e1);
3519 as_bad ("Second operand missing");
3523 parse_operand (&e2);
3525 if (!convert_expr_to_ab_reg (&e1, &ab, ®))
3527 as_bad ("First operand to .spill%s must be a preserved register",
3528 psprel ? "psp" : "sp");
3532 if (e2.X_op != O_constant)
3534 as_bad ("Second operand to .spill%s must be a constant",
3535 psprel ? "psp" : "sp");
3540 add_unwind_entry (output_spill_psprel (ab, reg, e2.X_add_number));
3542 add_unwind_entry (output_spill_sprel (ab, reg, e2.X_add_number));
3546 dot_spillreg_p (dummy)
3547 int dummy ATTRIBUTE_UNUSED;
3549 int sep, ab, xy, reg, treg;
3550 expressionS e1, e2, e3;
3553 sep = parse_operand (&e1);
3556 as_bad ("No second and third operand to .spillreg.p");
3560 sep = parse_operand (&e2);
3563 as_bad ("No third operand to .spillreg.p");
3567 parse_operand (&e3);
3569 qp = e1.X_add_number - REG_P;
3571 if (e1.X_op != O_register || qp > 63)
3573 as_bad ("First operand to .spillreg.p must be a predicate");
3577 if (!convert_expr_to_ab_reg (&e2, &ab, ®))
3579 as_bad ("Second operand to .spillreg.p must be a preserved register");
3583 if (!convert_expr_to_xy_reg (&e3, &xy, &treg))
3585 as_bad ("Third operand to .spillreg.p must be a register");
3589 add_unwind_entry (output_spill_reg_p (ab, reg, treg, xy, qp));
3593 dot_spillmem_p (psprel)
3596 expressionS e1, e2, e3;
3600 sep = parse_operand (&e1);
3603 as_bad ("Second operand missing");
3607 parse_operand (&e2);
3610 as_bad ("Second operand missing");
3614 parse_operand (&e3);
3616 qp = e1.X_add_number - REG_P;
3617 if (e1.X_op != O_register || qp > 63)
3619 as_bad ("First operand to .spill%s_p must be a predicate",
3620 psprel ? "psp" : "sp");
3624 if (!convert_expr_to_ab_reg (&e2, &ab, ®))
3626 as_bad ("Second operand to .spill%s_p must be a preserved register",
3627 psprel ? "psp" : "sp");
3631 if (e3.X_op != O_constant)
3633 as_bad ("Third operand to .spill%s_p must be a constant",
3634 psprel ? "psp" : "sp");
3639 add_unwind_entry (output_spill_psprel_p (ab, reg, e3.X_add_number, qp));
3641 add_unwind_entry (output_spill_sprel_p (ab, reg, e3.X_add_number, qp));
3645 dot_label_state (dummy)
3646 int dummy ATTRIBUTE_UNUSED;
3651 if (e.X_op != O_constant)
3653 as_bad ("Operand to .label_state must be a constant");
3656 add_unwind_entry (output_label_state (e.X_add_number));
3660 dot_copy_state (dummy)
3661 int dummy ATTRIBUTE_UNUSED;
3666 if (e.X_op != O_constant)
3668 as_bad ("Operand to .copy_state must be a constant");
3671 add_unwind_entry (output_copy_state (e.X_add_number));
3676 int dummy ATTRIBUTE_UNUSED;
3681 sep = parse_operand (&e1);
3684 as_bad ("Second operand to .unwabi missing");
3687 sep = parse_operand (&e2);
3688 if (!is_end_of_line[sep] && !is_it_end_of_statement ())
3689 ignore_rest_of_line ();
3691 if (e1.X_op != O_constant)
3693 as_bad ("First operand to .unwabi must be a constant");
3697 if (e2.X_op != O_constant)
3699 as_bad ("Second operand to .unwabi must be a constant");
3703 add_unwind_entry (output_unwabi (e1.X_add_number, e2.X_add_number));
3707 dot_personality (dummy)
3708 int dummy ATTRIBUTE_UNUSED;
3712 name = input_line_pointer;
3713 c = get_symbol_end ();
3714 p = input_line_pointer;
3715 unwind.personality_routine = symbol_find_or_make (name);
3716 unwind.force_unwind_entry = 1;
3719 demand_empty_rest_of_line ();
3724 int dummy ATTRIBUTE_UNUSED;
3729 unwind.proc_start = expr_build_dot ();
3730 /* Parse names of main and alternate entry points and mark them as
3731 function symbols: */
3735 name = input_line_pointer;
3736 c = get_symbol_end ();
3737 p = input_line_pointer;
3738 sym = symbol_find_or_make (name);
3739 if (unwind.proc_start == 0)
3741 unwind.proc_start = sym;
3743 symbol_get_bfdsym (sym)->flags |= BSF_FUNCTION;
3746 if (*input_line_pointer != ',')
3748 ++input_line_pointer;
3750 demand_empty_rest_of_line ();
3753 unwind.prologue_count = 0;
3754 unwind.list = unwind.tail = unwind.current_entry = NULL;
3755 unwind.personality_routine = 0;
3760 int dummy ATTRIBUTE_UNUSED;
3762 unwind.prologue = 0;
3763 unwind.prologue_mask = 0;
3765 add_unwind_entry (output_body ());
3766 demand_empty_rest_of_line ();
3770 dot_prologue (dummy)
3771 int dummy ATTRIBUTE_UNUSED;
3774 int mask = 0, grsave = 0;
3776 if (!is_it_end_of_statement ())
3779 sep = parse_operand (&e1);
3781 as_bad ("No second operand to .prologue");
3782 sep = parse_operand (&e2);
3783 if (!is_end_of_line[sep] && !is_it_end_of_statement ())
3784 ignore_rest_of_line ();
3786 if (e1.X_op == O_constant)
3788 mask = e1.X_add_number;
3790 if (e2.X_op == O_constant)
3791 grsave = e2.X_add_number;
3792 else if (e2.X_op == O_register
3793 && (grsave = e2.X_add_number - REG_GR) < 128)
3796 as_bad ("Second operand not a constant or general register");
3798 add_unwind_entry (output_prologue_gr (mask, grsave));
3801 as_bad ("First operand not a constant");
3804 add_unwind_entry (output_prologue ());
3806 unwind.prologue = 1;
3807 unwind.prologue_mask = mask;
3808 ++unwind.prologue_count;
3813 int dummy ATTRIBUTE_UNUSED;
3817 int bytes_per_address;
3820 subsegT saved_subseg;
3821 const char *sec_name, *text_name;
3823 if (unwind.saved_text_seg)
3825 saved_seg = unwind.saved_text_seg;
3826 saved_subseg = unwind.saved_text_subseg;
3827 unwind.saved_text_seg = NULL;
3831 saved_seg = now_seg;
3832 saved_subseg = now_subseg;
3836 Use a slightly ugly scheme to derive the unwind section names from
3837 the text section name:
3839 text sect. unwind table sect.
3840 name: name: comments:
3841 ---------- ----------------- --------------------------------
3843 .text.foo .IA_64.unwind.text.foo
3844 .foo .IA_64.unwind.foo
3846 .gnu.linkonce.ia64unw.foo
3847 _info .IA_64.unwind_info gas issues error message (ditto)
3848 _infoFOO .IA_64.unwind_infoFOO gas issues error message (ditto)
3850 This mapping is done so that:
3852 (a) An object file with unwind info only in .text will use
3853 unwind section names .IA_64.unwind and .IA_64.unwind_info.
3854 This follows the letter of the ABI and also ensures backwards
3855 compatibility with older toolchains.
3857 (b) An object file with unwind info in multiple text sections
3858 will use separate unwind sections for each text section.
3859 This allows us to properly set the "sh_info" and "sh_link"
3860 fields in SHT_IA_64_UNWIND as required by the ABI and also
3861 lets GNU ld support programs with multiple segments
3862 containing unwind info (as might be the case for certain
3863 embedded applications).
3865 (c) An error is issued if there would be a name clash.
3867 text_name = segment_name (saved_seg);
3868 if (strncmp (text_name, "_info", 5) == 0)
3870 as_bad ("Illegal section name `%s' (causes unwind section name clash)",
3872 ignore_rest_of_line ();
3875 if (strcmp (text_name, ".text") == 0)
3879 demand_empty_rest_of_line ();
3881 insn_group_break (1, 0, 0);
3883 /* If there wasn't a .handlerdata, we haven't generated an image yet. */
3885 generate_unwind_image (text_name);
3887 if (unwind.info || unwind.force_unwind_entry)
3889 subseg_set (md.last_text_seg, 0);
3890 unwind.proc_end = expr_build_dot ();
3892 make_unw_section_name (SPECIAL_SECTION_UNWIND, text_name, sec_name);
3893 set_section ((char *) sec_name);
3894 bfd_set_section_flags (stdoutput, now_seg,
3895 SEC_LOAD | SEC_ALLOC | SEC_READONLY);
3897 /* Make sure the section has 8 byte alignment. */
3898 record_alignment (now_seg, 3);
3900 ptr = frag_more (24);
3901 where = frag_now_fix () - 24;
3902 bytes_per_address = bfd_arch_bits_per_address (stdoutput) / 8;
3904 /* Issue the values of a) Proc Begin, b) Proc End, c) Unwind Record. */
3905 e.X_op = O_pseudo_fixup;
3906 e.X_op_symbol = pseudo_func[FUNC_SEG_RELATIVE].u.sym;
3908 e.X_add_symbol = unwind.proc_start;
3909 ia64_cons_fix_new (frag_now, where, bytes_per_address, &e);
3911 e.X_op = O_pseudo_fixup;
3912 e.X_op_symbol = pseudo_func[FUNC_SEG_RELATIVE].u.sym;
3914 e.X_add_symbol = unwind.proc_end;
3915 ia64_cons_fix_new (frag_now, where + bytes_per_address,
3916 bytes_per_address, &e);
3920 e.X_op = O_pseudo_fixup;
3921 e.X_op_symbol = pseudo_func[FUNC_SEG_RELATIVE].u.sym;
3923 e.X_add_symbol = unwind.info;
3924 ia64_cons_fix_new (frag_now, where + (bytes_per_address * 2),
3925 bytes_per_address, &e);
3928 md_number_to_chars (ptr + (bytes_per_address * 2), 0,
3932 subseg_set (saved_seg, saved_subseg);
3933 unwind.proc_start = unwind.proc_end = unwind.info = 0;
3937 dot_template (template)
3940 CURR_SLOT.user_template = template;
3945 int dummy ATTRIBUTE_UNUSED;
3947 int ins, locs, outs, rots;
3949 if (is_it_end_of_statement ())
3950 ins = locs = outs = rots = 0;
3953 ins = get_absolute_expression ();
3954 if (*input_line_pointer++ != ',')
3956 locs = get_absolute_expression ();
3957 if (*input_line_pointer++ != ',')
3959 outs = get_absolute_expression ();
3960 if (*input_line_pointer++ != ',')
3962 rots = get_absolute_expression ();
3964 set_regstack (ins, locs, outs, rots);
3968 as_bad ("Comma expected");
3969 ignore_rest_of_line ();
3976 unsigned num_regs, num_alloced = 0;
3977 struct dynreg **drpp, *dr;
3978 int ch, base_reg = 0;
3984 case DYNREG_GR: base_reg = REG_GR + 32; break;
3985 case DYNREG_FR: base_reg = REG_FR + 32; break;
3986 case DYNREG_PR: base_reg = REG_P + 16; break;
3990 /* First, remove existing names from hash table. */
3991 for (dr = md.dynreg[type]; dr && dr->num_regs; dr = dr->next)
3993 hash_delete (md.dynreg_hash, dr->name);
3997 drpp = &md.dynreg[type];
4000 start = input_line_pointer;
4001 ch = get_symbol_end ();
4002 *input_line_pointer = ch;
4003 len = (input_line_pointer - start);
4006 if (*input_line_pointer != '[')
4008 as_bad ("Expected '['");
4011 ++input_line_pointer; /* skip '[' */
4013 num_regs = get_absolute_expression ();
4015 if (*input_line_pointer++ != ']')
4017 as_bad ("Expected ']'");
4022 num_alloced += num_regs;
4026 if (num_alloced > md.rot.num_regs)
4028 as_bad ("Used more than the declared %d rotating registers",
4034 if (num_alloced > 96)
4036 as_bad ("Used more than the available 96 rotating registers");
4041 if (num_alloced > 48)
4043 as_bad ("Used more than the available 48 rotating registers");
4052 name = obstack_alloc (¬es, len + 1);
4053 memcpy (name, start, len);
4058 *drpp = obstack_alloc (¬es, sizeof (*dr));
4059 memset (*drpp, 0, sizeof (*dr));
4064 dr->num_regs = num_regs;
4065 dr->base = base_reg;
4067 base_reg += num_regs;
4069 if (hash_insert (md.dynreg_hash, name, dr))
4071 as_bad ("Attempt to redefine register set `%s'", name);
4075 if (*input_line_pointer != ',')
4077 ++input_line_pointer; /* skip comma */
4080 demand_empty_rest_of_line ();
4084 ignore_rest_of_line ();
4088 dot_byteorder (byteorder)
4091 target_big_endian = byteorder;
4096 int dummy ATTRIBUTE_UNUSED;
4103 option = input_line_pointer;
4104 ch = get_symbol_end ();
4105 if (strcmp (option, "lsb") == 0)
4106 md.flags &= ~EF_IA_64_BE;
4107 else if (strcmp (option, "msb") == 0)
4108 md.flags |= EF_IA_64_BE;
4109 else if (strcmp (option, "abi32") == 0)
4110 md.flags &= ~EF_IA_64_ABI64;
4111 else if (strcmp (option, "abi64") == 0)
4112 md.flags |= EF_IA_64_ABI64;
4114 as_bad ("Unknown psr option `%s'", option);
4115 *input_line_pointer = ch;
4118 if (*input_line_pointer != ',')
4121 ++input_line_pointer;
4124 demand_empty_rest_of_line ();
4129 int dummy ATTRIBUTE_UNUSED;
4131 as_bad (".alias not implemented yet");
4136 int dummy ATTRIBUTE_UNUSED;
4138 new_logical_line (0, get_absolute_expression ());
4139 demand_empty_rest_of_line ();
4143 parse_section_name ()
4149 if (*input_line_pointer != '"')
4151 as_bad ("Missing section name");
4152 ignore_rest_of_line ();
4155 name = demand_copy_C_string (&len);
4158 ignore_rest_of_line ();
4162 if (*input_line_pointer != ',')
4164 as_bad ("Comma expected after section name");
4165 ignore_rest_of_line ();
4168 ++input_line_pointer; /* skip comma */
4176 char *name = parse_section_name ();
4180 md.keep_pending_output = 1;
4183 obj_elf_previous (0);
4184 md.keep_pending_output = 0;
4187 /* Why doesn't float_cons() call md_cons_align() the way cons() does? */
4190 stmt_float_cons (kind)
4197 case 'd': size = 8; break;
4198 case 'x': size = 10; break;
4205 ia64_do_align (size);
4213 int saved_auto_align = md.auto_align;
4217 md.auto_align = saved_auto_align;
4221 dot_xfloat_cons (kind)
4224 char *name = parse_section_name ();
4228 md.keep_pending_output = 1;
4230 stmt_float_cons (kind);
4231 obj_elf_previous (0);
4232 md.keep_pending_output = 0;
4236 dot_xstringer (zero)
4239 char *name = parse_section_name ();
4243 md.keep_pending_output = 1;
4246 obj_elf_previous (0);
4247 md.keep_pending_output = 0;
4254 int saved_auto_align = md.auto_align;
4255 char *name = parse_section_name ();
4259 md.keep_pending_output = 1;
4263 md.auto_align = saved_auto_align;
4264 obj_elf_previous (0);
4265 md.keep_pending_output = 0;
4269 dot_xfloat_cons_ua (kind)
4272 int saved_auto_align = md.auto_align;
4273 char *name = parse_section_name ();
4277 md.keep_pending_output = 1;
4280 stmt_float_cons (kind);
4281 md.auto_align = saved_auto_align;
4282 obj_elf_previous (0);
4283 md.keep_pending_output = 0;
4286 /* .reg.val <regname>,value */
4290 int dummy ATTRIBUTE_UNUSED;
4295 if (reg.X_op != O_register)
4297 as_bad (_("Register name expected"));
4298 ignore_rest_of_line ();
4300 else if (*input_line_pointer++ != ',')
4302 as_bad (_("Comma expected"));
4303 ignore_rest_of_line ();
4307 valueT value = get_absolute_expression ();
4308 int regno = reg.X_add_number;
4309 if (regno < REG_GR || regno > REG_GR + 128)
4310 as_warn (_("Register value annotation ignored"));
4313 gr_values[regno - REG_GR].known = 1;
4314 gr_values[regno - REG_GR].value = value;
4315 gr_values[regno - REG_GR].path = md.path;
4318 demand_empty_rest_of_line ();
4321 /* select dv checking mode
4326 A stop is inserted when changing modes
4333 if (md.manual_bundling)
4334 as_warn (_("Directive invalid within a bundle"));
4336 if (type == 'E' || type == 'A')
4337 md.mode_explicitly_set = 0;
4339 md.mode_explicitly_set = 1;
4346 if (md.explicit_mode)
4347 insn_group_break (1, 0, 0);
4348 md.explicit_mode = 0;
4352 if (!md.explicit_mode)
4353 insn_group_break (1, 0, 0);
4354 md.explicit_mode = 1;
4358 if (md.explicit_mode != md.default_explicit_mode)
4359 insn_group_break (1, 0, 0);
4360 md.explicit_mode = md.default_explicit_mode;
4361 md.mode_explicitly_set = 0;
4372 for (regno = 0; regno < 64; regno++)
4374 if (mask & ((valueT) 1 << regno))
4376 fprintf (stderr, "%s p%d", comma, regno);
4383 .pred.rel.clear [p1 [,p2 [,...]]] (also .pred.rel "clear")
4384 .pred.rel.imply p1, p2 (also .pred.rel "imply")
4385 .pred.rel.mutex p1, p2 [,...] (also .pred.rel "mutex")
4386 .pred.safe_across_calls p1 [, p2 [,...]]
4395 int p1 = -1, p2 = -1;
4399 if (*input_line_pointer != '"')
4401 as_bad (_("Missing predicate relation type"));
4402 ignore_rest_of_line ();
4408 char *form = demand_copy_C_string (&len);
4409 if (strcmp (form, "mutex") == 0)
4411 else if (strcmp (form, "clear") == 0)
4413 else if (strcmp (form, "imply") == 0)
4417 as_bad (_("Unrecognized predicate relation type"));
4418 ignore_rest_of_line ();
4422 if (*input_line_pointer == ',')
4423 ++input_line_pointer;
4433 if (toupper (*input_line_pointer) != 'P'
4434 || (regno = atoi (++input_line_pointer)) < 0
4437 as_bad (_("Predicate register expected"));
4438 ignore_rest_of_line ();
4441 while (isdigit (*input_line_pointer))
4442 ++input_line_pointer;
4449 as_warn (_("Duplicate predicate register ignored"));
4452 /* See if it's a range. */
4453 if (*input_line_pointer == '-')
4456 ++input_line_pointer;
4458 if (toupper (*input_line_pointer) != 'P'
4459 || (regno = atoi (++input_line_pointer)) < 0
4462 as_bad (_("Predicate register expected"));
4463 ignore_rest_of_line ();
4466 while (isdigit (*input_line_pointer))
4467 ++input_line_pointer;
4471 as_bad (_("Bad register range"));
4472 ignore_rest_of_line ();
4483 if (*input_line_pointer != ',')
4485 ++input_line_pointer;
4494 clear_qp_mutex (mask);
4495 clear_qp_implies (mask, (valueT) 0);
4498 if (count != 2 || p1 == -1 || p2 == -1)
4499 as_bad (_("Predicate source and target required"));
4500 else if (p1 == 0 || p2 == 0)
4501 as_bad (_("Use of p0 is not valid in this context"));
4503 add_qp_imply (p1, p2);
4508 as_bad (_("At least two PR arguments expected"));
4513 as_bad (_("Use of p0 is not valid in this context"));
4516 add_qp_mutex (mask);
4519 /* note that we don't override any existing relations */
4522 as_bad (_("At least one PR argument expected"));
4527 fprintf (stderr, "Safe across calls: ");
4528 print_prmask (mask);
4529 fprintf (stderr, "\n");
4531 qp_safe_across_calls = mask;
4534 demand_empty_rest_of_line ();
4537 /* .entry label [, label [, ...]]
4538 Hint to DV code that the given labels are to be considered entry points.
4539 Otherwise, only global labels are considered entry points. */
4543 int dummy ATTRIBUTE_UNUSED;
4552 name = input_line_pointer;
4553 c = get_symbol_end ();
4554 symbolP = symbol_find_or_make (name);
4556 err = hash_insert (md.entry_hash, S_GET_NAME (symbolP), (PTR) symbolP);
4558 as_fatal (_("Inserting \"%s\" into entry hint table failed: %s"),
4561 *input_line_pointer = c;
4563 c = *input_line_pointer;
4566 input_line_pointer++;
4568 if (*input_line_pointer == '\n')
4574 demand_empty_rest_of_line ();
4577 /* .mem.offset offset, base
4578 "base" is used to distinguish between offsets from a different base. */
4581 dot_mem_offset (dummy)
4582 int dummy ATTRIBUTE_UNUSED;
4584 md.mem_offset.hint = 1;
4585 md.mem_offset.offset = get_absolute_expression ();
4586 if (*input_line_pointer != ',')
4588 as_bad (_("Comma expected"));
4589 ignore_rest_of_line ();
4592 ++input_line_pointer;
4593 md.mem_offset.base = get_absolute_expression ();
4594 demand_empty_rest_of_line ();
4597 /* ia64-specific pseudo-ops: */
4598 const pseudo_typeS md_pseudo_table[] =
4600 { "radix", dot_radix, 0 },
4601 { "lcomm", s_lcomm_bytes, 1 },
4602 { "bss", dot_special_section, SPECIAL_SECTION_BSS },
4603 { "sbss", dot_special_section, SPECIAL_SECTION_SBSS },
4604 { "sdata", dot_special_section, SPECIAL_SECTION_SDATA },
4605 { "rodata", dot_special_section, SPECIAL_SECTION_RODATA },
4606 { "comment", dot_special_section, SPECIAL_SECTION_COMMENT },
4607 { "ia_64.unwind", dot_special_section, SPECIAL_SECTION_UNWIND },
4608 { "ia_64.unwind_info", dot_special_section, SPECIAL_SECTION_UNWIND_INFO },
4609 { "proc", dot_proc, 0 },
4610 { "body", dot_body, 0 },
4611 { "prologue", dot_prologue, 0 },
4612 { "endp", dot_endp, 0 },
4613 { "file", dwarf2_directive_file, 0 },
4614 { "loc", dwarf2_directive_loc, 0 },
4616 { "fframe", dot_fframe, 0 },
4617 { "vframe", dot_vframe, 0 },
4618 { "vframesp", dot_vframesp, 0 },
4619 { "vframepsp", dot_vframepsp, 0 },
4620 { "save", dot_save, 0 },
4621 { "restore", dot_restore, 0 },
4622 { "restorereg", dot_restorereg, 0 },
4623 { "restorereg.p", dot_restorereg_p, 0 },
4624 { "handlerdata", dot_handlerdata, 0 },
4625 { "unwentry", dot_unwentry, 0 },
4626 { "altrp", dot_altrp, 0 },
4627 { "savesp", dot_savemem, 0 },
4628 { "savepsp", dot_savemem, 1 },
4629 { "save.g", dot_saveg, 0 },
4630 { "save.f", dot_savef, 0 },
4631 { "save.b", dot_saveb, 0 },
4632 { "save.gf", dot_savegf, 0 },
4633 { "spill", dot_spill, 0 },
4634 { "spillreg", dot_spillreg, 0 },
4635 { "spillsp", dot_spillmem, 0 },
4636 { "spillpsp", dot_spillmem, 1 },
4637 { "spillreg.p", dot_spillreg_p, 0 },
4638 { "spillsp.p", dot_spillmem_p, 0 },
4639 { "spillpsp.p", dot_spillmem_p, 1 },
4640 { "label_state", dot_label_state, 0 },
4641 { "copy_state", dot_copy_state, 0 },
4642 { "unwabi", dot_unwabi, 0 },
4643 { "personality", dot_personality, 0 },
4645 { "estate", dot_estate, 0 },
4647 { "mii", dot_template, 0x0 },
4648 { "mli", dot_template, 0x2 }, /* old format, for compatibility */
4649 { "mlx", dot_template, 0x2 },
4650 { "mmi", dot_template, 0x4 },
4651 { "mfi", dot_template, 0x6 },
4652 { "mmf", dot_template, 0x7 },
4653 { "mib", dot_template, 0x8 },
4654 { "mbb", dot_template, 0x9 },
4655 { "bbb", dot_template, 0xb },
4656 { "mmb", dot_template, 0xc },
4657 { "mfb", dot_template, 0xe },
4659 { "lb", dot_scope, 0 },
4660 { "le", dot_scope, 1 },
4662 { "align", s_align_bytes, 0 },
4663 { "regstk", dot_regstk, 0 },
4664 { "rotr", dot_rot, DYNREG_GR },
4665 { "rotf", dot_rot, DYNREG_FR },
4666 { "rotp", dot_rot, DYNREG_PR },
4667 { "lsb", dot_byteorder, 0 },
4668 { "msb", dot_byteorder, 1 },
4669 { "psr", dot_psr, 0 },
4670 { "alias", dot_alias, 0 },
4671 { "ln", dot_ln, 0 }, /* source line info (for debugging) */
4673 { "xdata1", dot_xdata, 1 },
4674 { "xdata2", dot_xdata, 2 },
4675 { "xdata4", dot_xdata, 4 },
4676 { "xdata8", dot_xdata, 8 },
4677 { "xreal4", dot_xfloat_cons, 'f' },
4678 { "xreal8", dot_xfloat_cons, 'd' },
4679 { "xreal10", dot_xfloat_cons, 'x' },
4680 { "xstring", dot_xstringer, 0 },
4681 { "xstringz", dot_xstringer, 1 },
4683 /* unaligned versions: */
4684 { "xdata2.ua", dot_xdata_ua, 2 },
4685 { "xdata4.ua", dot_xdata_ua, 4 },
4686 { "xdata8.ua", dot_xdata_ua, 8 },
4687 { "xreal4.ua", dot_xfloat_cons_ua, 'f' },
4688 { "xreal8.ua", dot_xfloat_cons_ua, 'd' },
4689 { "xreal10.ua", dot_xfloat_cons_ua, 'x' },
4691 /* annotations/DV checking support */
4692 { "entry", dot_entry, 0 },
4693 { "mem.offset", dot_mem_offset, 0 },
4694 { "pred.rel", dot_pred_rel, 0 },
4695 { "pred.rel.clear", dot_pred_rel, 'c' },
4696 { "pred.rel.imply", dot_pred_rel, 'i' },
4697 { "pred.rel.mutex", dot_pred_rel, 'm' },
4698 { "pred.safe_across_calls", dot_pred_rel, 's' },
4699 { "reg.val", dot_reg_val, 0 },
4700 { "auto", dot_dv_mode, 'a' },
4701 { "explicit", dot_dv_mode, 'e' },
4702 { "default", dot_dv_mode, 'd' },
4704 /* ??? These are needed to make gas/testsuite/gas/elf/ehopt.s work.
4705 IA-64 aligns data allocation pseudo-ops by default, so we have to
4706 tell it that these ones are supposed to be unaligned. Long term,
4707 should rewrite so that only IA-64 specific data allocation pseudo-ops
4708 are aligned by default. */
4709 {"2byte", stmt_cons_ua, 2},
4710 {"4byte", stmt_cons_ua, 4},
4711 {"8byte", stmt_cons_ua, 8},
4716 static const struct pseudo_opcode
4719 void (*handler) (int);
4724 /* these are more like pseudo-ops, but don't start with a dot */
4725 { "data1", cons, 1 },
4726 { "data2", cons, 2 },
4727 { "data4", cons, 4 },
4728 { "data8", cons, 8 },
4729 { "real4", stmt_float_cons, 'f' },
4730 { "real8", stmt_float_cons, 'd' },
4731 { "real10", stmt_float_cons, 'x' },
4732 { "string", stringer, 0 },
4733 { "stringz", stringer, 1 },
4735 /* unaligned versions: */
4736 { "data2.ua", stmt_cons_ua, 2 },
4737 { "data4.ua", stmt_cons_ua, 4 },
4738 { "data8.ua", stmt_cons_ua, 8 },
4739 { "real4.ua", float_cons, 'f' },
4740 { "real8.ua", float_cons, 'd' },
4741 { "real10.ua", float_cons, 'x' },
4744 /* Declare a register by creating a symbol for it and entering it in
4745 the symbol table. */
4748 declare_register (name, regnum)
4755 sym = symbol_new (name, reg_section, regnum, &zero_address_frag);
4757 err = hash_insert (md.reg_hash, S_GET_NAME (sym), (PTR) sym);
4759 as_fatal ("Inserting \"%s\" into register table failed: %s",
4766 declare_register_set (prefix, num_regs, base_regnum)
4774 for (i = 0; i < num_regs; ++i)
4776 sprintf (name, "%s%u", prefix, i);
4777 declare_register (name, base_regnum + i);
4782 operand_width (opnd)
4783 enum ia64_opnd opnd;
4785 const struct ia64_operand *odesc = &elf64_ia64_operands[opnd];
4786 unsigned int bits = 0;
4790 for (i = 0; i < NELEMS (odesc->field) && odesc->field[i].bits; ++i)
4791 bits += odesc->field[i].bits;
4796 static enum operand_match_result
4797 operand_match (idesc, index, e)
4798 const struct ia64_opcode *idesc;
4802 enum ia64_opnd opnd = idesc->operands[index];
4803 int bits, relocatable = 0;
4804 struct insn_fix *fix;
4811 case IA64_OPND_AR_CCV:
4812 if (e->X_op == O_register && e->X_add_number == REG_AR + 32)
4813 return OPERAND_MATCH;
4816 case IA64_OPND_AR_PFS:
4817 if (e->X_op == O_register && e->X_add_number == REG_AR + 64)
4818 return OPERAND_MATCH;
4822 if (e->X_op == O_register && e->X_add_number == REG_GR + 0)
4823 return OPERAND_MATCH;
4827 if (e->X_op == O_register && e->X_add_number == REG_IP)
4828 return OPERAND_MATCH;
4832 if (e->X_op == O_register && e->X_add_number == REG_PR)
4833 return OPERAND_MATCH;
4836 case IA64_OPND_PR_ROT:
4837 if (e->X_op == O_register && e->X_add_number == REG_PR_ROT)
4838 return OPERAND_MATCH;
4842 if (e->X_op == O_register && e->X_add_number == REG_PSR)
4843 return OPERAND_MATCH;
4846 case IA64_OPND_PSR_L:
4847 if (e->X_op == O_register && e->X_add_number == REG_PSR_L)
4848 return OPERAND_MATCH;
4851 case IA64_OPND_PSR_UM:
4852 if (e->X_op == O_register && e->X_add_number == REG_PSR_UM)
4853 return OPERAND_MATCH;
4857 if (e->X_op == O_constant)
4859 if (e->X_add_number == 1)
4860 return OPERAND_MATCH;
4862 return OPERAND_OUT_OF_RANGE;
4867 if (e->X_op == O_constant)
4869 if (e->X_add_number == 8)
4870 return OPERAND_MATCH;
4872 return OPERAND_OUT_OF_RANGE;
4877 if (e->X_op == O_constant)
4879 if (e->X_add_number == 16)
4880 return OPERAND_MATCH;
4882 return OPERAND_OUT_OF_RANGE;
4886 /* register operands: */
4889 if (e->X_op == O_register && e->X_add_number >= REG_AR
4890 && e->X_add_number < REG_AR + 128)
4891 return OPERAND_MATCH;
4896 if (e->X_op == O_register && e->X_add_number >= REG_BR
4897 && e->X_add_number < REG_BR + 8)
4898 return OPERAND_MATCH;
4902 if (e->X_op == O_register && e->X_add_number >= REG_CR
4903 && e->X_add_number < REG_CR + 128)
4904 return OPERAND_MATCH;
4911 if (e->X_op == O_register && e->X_add_number >= REG_FR
4912 && e->X_add_number < REG_FR + 128)
4913 return OPERAND_MATCH;
4918 if (e->X_op == O_register && e->X_add_number >= REG_P
4919 && e->X_add_number < REG_P + 64)
4920 return OPERAND_MATCH;
4926 if (e->X_op == O_register && e->X_add_number >= REG_GR
4927 && e->X_add_number < REG_GR + 128)
4928 return OPERAND_MATCH;
4931 case IA64_OPND_R3_2:
4932 if (e->X_op == O_register && e->X_add_number >= REG_GR)
4934 if (e->X_add_number < REG_GR + 4)
4935 return OPERAND_MATCH;
4936 else if (e->X_add_number < REG_GR + 128)
4937 return OPERAND_OUT_OF_RANGE;
4941 /* indirect operands: */
4942 case IA64_OPND_CPUID_R3:
4943 case IA64_OPND_DBR_R3:
4944 case IA64_OPND_DTR_R3:
4945 case IA64_OPND_ITR_R3:
4946 case IA64_OPND_IBR_R3:
4947 case IA64_OPND_MSR_R3:
4948 case IA64_OPND_PKR_R3:
4949 case IA64_OPND_PMC_R3:
4950 case IA64_OPND_PMD_R3:
4951 case IA64_OPND_RR_R3:
4952 if (e->X_op == O_index && e->X_op_symbol
4953 && (S_GET_VALUE (e->X_op_symbol) - IND_CPUID
4954 == opnd - IA64_OPND_CPUID_R3))
4955 return OPERAND_MATCH;
4959 if (e->X_op == O_index && !e->X_op_symbol)
4960 return OPERAND_MATCH;
4963 /* immediate operands: */
4964 case IA64_OPND_CNT2a:
4965 case IA64_OPND_LEN4:
4966 case IA64_OPND_LEN6:
4967 bits = operand_width (idesc->operands[index]);
4968 if (e->X_op == O_constant)
4970 if ((bfd_vma) (e->X_add_number - 1) < ((bfd_vma) 1 << bits))
4971 return OPERAND_MATCH;
4973 return OPERAND_OUT_OF_RANGE;
4977 case IA64_OPND_CNT2b:
4978 if (e->X_op == O_constant)
4980 if ((bfd_vma) (e->X_add_number - 1) < 3)
4981 return OPERAND_MATCH;
4983 return OPERAND_OUT_OF_RANGE;
4987 case IA64_OPND_CNT2c:
4988 val = e->X_add_number;
4989 if (e->X_op == O_constant)
4991 if ((val == 0 || val == 7 || val == 15 || val == 16))
4992 return OPERAND_MATCH;
4994 return OPERAND_OUT_OF_RANGE;
4999 /* SOR must be an integer multiple of 8 */
5000 if (e->X_op == O_constant && e->X_add_number & 0x7)
5001 return OPERAND_OUT_OF_RANGE;
5004 if (e->X_op == O_constant)
5006 if ((bfd_vma) e->X_add_number <= 96)
5007 return OPERAND_MATCH;
5009 return OPERAND_OUT_OF_RANGE;
5013 case IA64_OPND_IMMU62:
5014 if (e->X_op == O_constant)
5016 if ((bfd_vma) e->X_add_number < ((bfd_vma) 1 << 62))
5017 return OPERAND_MATCH;
5019 return OPERAND_OUT_OF_RANGE;
5023 /* FIXME -- need 62-bit relocation type */
5024 as_bad (_("62-bit relocation not yet implemented"));
5028 case IA64_OPND_IMMU64:
5029 if (e->X_op == O_symbol || e->X_op == O_pseudo_fixup
5030 || e->X_op == O_subtract)
5032 fix = CURR_SLOT.fixup + CURR_SLOT.num_fixups;
5033 fix->code = BFD_RELOC_IA64_IMM64;
5034 if (e->X_op != O_subtract)
5036 fix->code = ia64_gen_real_reloc_type (e->X_op_symbol, fix->code);
5037 if (e->X_op == O_pseudo_fixup)
5041 fix->opnd = idesc->operands[index];
5044 ++CURR_SLOT.num_fixups;
5045 return OPERAND_MATCH;
5047 else if (e->X_op == O_constant)
5048 return OPERAND_MATCH;
5051 case IA64_OPND_CCNT5:
5052 case IA64_OPND_CNT5:
5053 case IA64_OPND_CNT6:
5054 case IA64_OPND_CPOS6a:
5055 case IA64_OPND_CPOS6b:
5056 case IA64_OPND_CPOS6c:
5057 case IA64_OPND_IMMU2:
5058 case IA64_OPND_IMMU7a:
5059 case IA64_OPND_IMMU7b:
5060 case IA64_OPND_IMMU21:
5061 case IA64_OPND_IMMU24:
5062 case IA64_OPND_MBTYPE4:
5063 case IA64_OPND_MHTYPE8:
5064 case IA64_OPND_POS6:
5065 bits = operand_width (idesc->operands[index]);
5066 if (e->X_op == O_constant)
5068 if ((bfd_vma) e->X_add_number < ((bfd_vma) 1 << bits))
5069 return OPERAND_MATCH;
5071 return OPERAND_OUT_OF_RANGE;
5075 case IA64_OPND_IMMU9:
5076 bits = operand_width (idesc->operands[index]);
5077 if (e->X_op == O_constant)
5079 if ((bfd_vma) e->X_add_number < ((bfd_vma) 1 << bits))
5081 int lobits = e->X_add_number & 0x3;
5082 if (((bfd_vma) e->X_add_number & 0x3C) != 0 && lobits == 0)
5083 e->X_add_number |= (bfd_vma) 0x3;
5084 return OPERAND_MATCH;
5087 return OPERAND_OUT_OF_RANGE;
5091 case IA64_OPND_IMM44:
5092 /* least 16 bits must be zero */
5093 if ((e->X_add_number & 0xffff) != 0)
5094 /* XXX technically, this is wrong: we should not be issuing warning
5095 messages until we're sure this instruction pattern is going to
5097 as_warn (_("lower 16 bits of mask ignored"));
5099 if (e->X_op == O_constant)
5101 if (((e->X_add_number >= 0
5102 && (bfd_vma) e->X_add_number < ((bfd_vma) 1 << 44))
5103 || (e->X_add_number < 0
5104 && (bfd_vma) -e->X_add_number <= ((bfd_vma) 1 << 44))))
5107 if (e->X_add_number >= 0
5108 && (e->X_add_number & ((bfd_vma) 1 << 43)) != 0)
5110 e->X_add_number |= ~(((bfd_vma) 1 << 44) - 1);
5112 return OPERAND_MATCH;
5115 return OPERAND_OUT_OF_RANGE;
5119 case IA64_OPND_IMM17:
5120 /* bit 0 is a don't care (pr0 is hardwired to 1) */
5121 if (e->X_op == O_constant)
5123 if (((e->X_add_number >= 0
5124 && (bfd_vma) e->X_add_number < ((bfd_vma) 1 << 17))
5125 || (e->X_add_number < 0
5126 && (bfd_vma) -e->X_add_number <= ((bfd_vma) 1 << 17))))
5129 if (e->X_add_number >= 0
5130 && (e->X_add_number & ((bfd_vma) 1 << 16)) != 0)
5132 e->X_add_number |= ~(((bfd_vma) 1 << 17) - 1);
5134 return OPERAND_MATCH;
5137 return OPERAND_OUT_OF_RANGE;
5141 case IA64_OPND_IMM14:
5142 case IA64_OPND_IMM22:
5144 case IA64_OPND_IMM1:
5145 case IA64_OPND_IMM8:
5146 case IA64_OPND_IMM8U4:
5147 case IA64_OPND_IMM8M1:
5148 case IA64_OPND_IMM8M1U4:
5149 case IA64_OPND_IMM8M1U8:
5150 case IA64_OPND_IMM9a:
5151 case IA64_OPND_IMM9b:
5152 bits = operand_width (idesc->operands[index]);
5153 if (relocatable && (e->X_op == O_symbol
5154 || e->X_op == O_subtract
5155 || e->X_op == O_pseudo_fixup))
5157 fix = CURR_SLOT.fixup + CURR_SLOT.num_fixups;
5159 if (idesc->operands[index] == IA64_OPND_IMM14)
5160 fix->code = BFD_RELOC_IA64_IMM14;
5162 fix->code = BFD_RELOC_IA64_IMM22;
5164 if (e->X_op != O_subtract)
5166 fix->code = ia64_gen_real_reloc_type (e->X_op_symbol, fix->code);
5167 if (e->X_op == O_pseudo_fixup)
5171 fix->opnd = idesc->operands[index];
5174 ++CURR_SLOT.num_fixups;
5175 return OPERAND_MATCH;
5177 else if (e->X_op != O_constant
5178 && ! (e->X_op == O_big && opnd == IA64_OPND_IMM8M1U8))
5179 return OPERAND_MISMATCH;
5181 if (opnd == IA64_OPND_IMM8M1U4)
5183 /* Zero is not valid for unsigned compares that take an adjusted
5184 constant immediate range. */
5185 if (e->X_add_number == 0)
5186 return OPERAND_OUT_OF_RANGE;
5188 /* Sign-extend 32-bit unsigned numbers, so that the following range
5189 checks will work. */
5190 val = e->X_add_number;
5191 if (((val & (~(bfd_vma) 0 << 32)) == 0)
5192 && ((val & ((bfd_vma) 1 << 31)) != 0))
5193 val = ((val << 32) >> 32);
5195 /* Check for 0x100000000. This is valid because
5196 0x100000000-1 is the same as ((uint32_t) -1). */
5197 if (val == ((bfd_signed_vma) 1 << 32))
5198 return OPERAND_MATCH;
5202 else if (opnd == IA64_OPND_IMM8M1U8)
5204 /* Zero is not valid for unsigned compares that take an adjusted
5205 constant immediate range. */
5206 if (e->X_add_number == 0)
5207 return OPERAND_OUT_OF_RANGE;
5209 /* Check for 0x10000000000000000. */
5210 if (e->X_op == O_big)
5212 if (generic_bignum[0] == 0
5213 && generic_bignum[1] == 0
5214 && generic_bignum[2] == 0
5215 && generic_bignum[3] == 0
5216 && generic_bignum[4] == 1)
5217 return OPERAND_MATCH;
5219 return OPERAND_OUT_OF_RANGE;
5222 val = e->X_add_number - 1;
5224 else if (opnd == IA64_OPND_IMM8M1)
5225 val = e->X_add_number - 1;
5226 else if (opnd == IA64_OPND_IMM8U4)
5228 /* Sign-extend 32-bit unsigned numbers, so that the following range
5229 checks will work. */
5230 val = e->X_add_number;
5231 if (((val & (~(bfd_vma) 0 << 32)) == 0)
5232 && ((val & ((bfd_vma) 1 << 31)) != 0))
5233 val = ((val << 32) >> 32);
5236 val = e->X_add_number;
5238 if ((val >= 0 && (bfd_vma) val < ((bfd_vma) 1 << (bits - 1)))
5239 || (val < 0 && (bfd_vma) -val <= ((bfd_vma) 1 << (bits - 1))))
5240 return OPERAND_MATCH;
5242 return OPERAND_OUT_OF_RANGE;
5244 case IA64_OPND_INC3:
5245 /* +/- 1, 4, 8, 16 */
5246 val = e->X_add_number;
5249 if (e->X_op == O_constant)
5251 if ((val == 1 || val == 4 || val == 8 || val == 16))
5252 return OPERAND_MATCH;
5254 return OPERAND_OUT_OF_RANGE;
5258 case IA64_OPND_TGT25:
5259 case IA64_OPND_TGT25b:
5260 case IA64_OPND_TGT25c:
5261 case IA64_OPND_TGT64:
5262 if (e->X_op == O_symbol)
5264 fix = CURR_SLOT.fixup + CURR_SLOT.num_fixups;
5265 if (opnd == IA64_OPND_TGT25)
5266 fix->code = BFD_RELOC_IA64_PCREL21F;
5267 else if (opnd == IA64_OPND_TGT25b)
5268 fix->code = BFD_RELOC_IA64_PCREL21M;
5269 else if (opnd == IA64_OPND_TGT25c)
5270 fix->code = BFD_RELOC_IA64_PCREL21B;
5271 else if (opnd == IA64_OPND_TGT64)
5272 fix->code = BFD_RELOC_IA64_PCREL60B;
5276 fix->code = ia64_gen_real_reloc_type (e->X_op_symbol, fix->code);
5277 fix->opnd = idesc->operands[index];
5280 ++CURR_SLOT.num_fixups;
5281 return OPERAND_MATCH;
5283 case IA64_OPND_TAG13:
5284 case IA64_OPND_TAG13b:
5288 return OPERAND_MATCH;
5291 fix = CURR_SLOT.fixup + CURR_SLOT.num_fixups;
5292 /* There are no external relocs for TAG13/TAG13b fields, so we
5293 create a dummy reloc. This will not live past md_apply_fix3. */
5294 fix->code = BFD_RELOC_UNUSED;
5295 fix->code = ia64_gen_real_reloc_type (e->X_op_symbol, fix->code);
5296 fix->opnd = idesc->operands[index];
5299 ++CURR_SLOT.num_fixups;
5300 return OPERAND_MATCH;
5310 return OPERAND_MISMATCH;
5319 memset (e, 0, sizeof (*e));
5322 if (*input_line_pointer != '}')
5324 sep = *input_line_pointer++;
5328 if (!md.manual_bundling)
5329 as_warn ("Found '}' when manual bundling is off");
5331 CURR_SLOT.manual_bundling_off = 1;
5332 md.manual_bundling = 0;
5338 /* Returns the next entry in the opcode table that matches the one in
5339 IDESC, and frees the entry in IDESC. If no matching entry is
5340 found, NULL is returned instead. */
5342 static struct ia64_opcode *
5343 get_next_opcode (struct ia64_opcode *idesc)
5345 struct ia64_opcode *next = ia64_find_next_opcode (idesc);
5346 ia64_free_opcode (idesc);
5350 /* Parse the operands for the opcode and find the opcode variant that
5351 matches the specified operands, or NULL if no match is possible. */
5353 static struct ia64_opcode *
5354 parse_operands (idesc)
5355 struct ia64_opcode *idesc;
5357 int i = 0, highest_unmatched_operand, num_operands = 0, num_outputs = 0;
5358 int error_pos, out_of_range_pos, curr_out_of_range_pos, sep = 0;
5359 enum ia64_opnd expected_operand = IA64_OPND_NIL;
5360 enum operand_match_result result;
5362 char *first_arg = 0, *end, *saved_input_pointer;
5365 assert (strlen (idesc->name) <= 128);
5367 strcpy (mnemonic, idesc->name);
5368 if (idesc->operands[2] == IA64_OPND_SOF)
5370 /* To make the common idiom "alloc loc?=ar.pfs,0,1,0,0" work, we
5371 can't parse the first operand until we have parsed the
5372 remaining operands of the "alloc" instruction. */
5374 first_arg = input_line_pointer;
5375 end = strchr (input_line_pointer, '=');
5378 as_bad ("Expected separator `='");
5381 input_line_pointer = end + 1;
5386 for (; i < NELEMS (CURR_SLOT.opnd); ++i)
5388 sep = parse_operand (CURR_SLOT.opnd + i);
5389 if (CURR_SLOT.opnd[i].X_op == O_absent)
5394 if (sep != '=' && sep != ',')
5399 if (num_outputs > 0)
5400 as_bad ("Duplicate equal sign (=) in instruction");
5402 num_outputs = i + 1;
5407 as_bad ("Illegal operand separator `%c'", sep);
5411 if (idesc->operands[2] == IA64_OPND_SOF)
5413 /* map alloc r1=ar.pfs,i,l,o,r to alloc r1=ar.pfs,(i+l+o),(i+l),r */
5414 know (strcmp (idesc->name, "alloc") == 0);
5415 if (num_operands == 5 /* first_arg not included in this count! */
5416 && CURR_SLOT.opnd[2].X_op == O_constant
5417 && CURR_SLOT.opnd[3].X_op == O_constant
5418 && CURR_SLOT.opnd[4].X_op == O_constant
5419 && CURR_SLOT.opnd[5].X_op == O_constant)
5421 sof = set_regstack (CURR_SLOT.opnd[2].X_add_number,
5422 CURR_SLOT.opnd[3].X_add_number,
5423 CURR_SLOT.opnd[4].X_add_number,
5424 CURR_SLOT.opnd[5].X_add_number);
5426 /* now we can parse the first arg: */
5427 saved_input_pointer = input_line_pointer;
5428 input_line_pointer = first_arg;
5429 sep = parse_operand (CURR_SLOT.opnd + 0);
5431 --num_outputs; /* force error */
5432 input_line_pointer = saved_input_pointer;
5434 CURR_SLOT.opnd[2].X_add_number = sof;
5435 CURR_SLOT.opnd[3].X_add_number
5436 = sof - CURR_SLOT.opnd[4].X_add_number;
5437 CURR_SLOT.opnd[4] = CURR_SLOT.opnd[5];
5441 highest_unmatched_operand = 0;
5442 curr_out_of_range_pos = -1;
5444 expected_operand = idesc->operands[0];
5445 for (; idesc; idesc = get_next_opcode (idesc))
5447 if (num_outputs != idesc->num_outputs)
5448 continue; /* mismatch in # of outputs */
5450 CURR_SLOT.num_fixups = 0;
5452 /* Try to match all operands. If we see an out-of-range operand,
5453 then continue trying to match the rest of the operands, since if
5454 the rest match, then this idesc will give the best error message. */
5456 out_of_range_pos = -1;
5457 for (i = 0; i < num_operands && idesc->operands[i]; ++i)
5459 result = operand_match (idesc, i, CURR_SLOT.opnd + i);
5460 if (result != OPERAND_MATCH)
5462 if (result != OPERAND_OUT_OF_RANGE)
5464 if (out_of_range_pos < 0)
5465 /* remember position of the first out-of-range operand: */
5466 out_of_range_pos = i;
5470 /* If we did not match all operands, or if at least one operand was
5471 out-of-range, then this idesc does not match. Keep track of which
5472 idesc matched the most operands before failing. If we have two
5473 idescs that failed at the same position, and one had an out-of-range
5474 operand, then prefer the out-of-range operand. Thus if we have
5475 "add r0=0x1000000,r1" we get an error saying the constant is out
5476 of range instead of an error saying that the constant should have been
5479 if (i != num_operands || out_of_range_pos >= 0)
5481 if (i > highest_unmatched_operand
5482 || (i == highest_unmatched_operand
5483 && out_of_range_pos > curr_out_of_range_pos))
5485 highest_unmatched_operand = i;
5486 if (out_of_range_pos >= 0)
5488 expected_operand = idesc->operands[out_of_range_pos];
5489 error_pos = out_of_range_pos;
5493 expected_operand = idesc->operands[i];
5496 curr_out_of_range_pos = out_of_range_pos;
5501 if (num_operands < NELEMS (idesc->operands)
5502 && idesc->operands[num_operands])
5503 continue; /* mismatch in number of arguments */
5509 if (expected_operand)
5510 as_bad ("Operand %u of `%s' should be %s",
5511 error_pos + 1, mnemonic,
5512 elf64_ia64_operands[expected_operand].desc);
5514 as_bad ("Operand mismatch");
5520 /* Keep track of state necessary to determine whether a NOP is necessary
5521 to avoid an erratum in A and B step Itanium chips, and return 1 if we
5522 detect a case where additional NOPs may be necessary. */
5524 errata_nop_necessary_p (slot, insn_unit)
5526 enum ia64_unit insn_unit;
5529 struct group *this_group = md.last_groups + md.group_idx;
5530 struct group *prev_group = md.last_groups + (md.group_idx + 2) % 3;
5531 struct ia64_opcode *idesc = slot->idesc;
5533 /* Test whether this could be the first insn in a problematic sequence. */
5534 if (insn_unit == IA64_UNIT_F)
5536 for (i = 0; i < idesc->num_outputs; i++)
5537 if (idesc->operands[i] == IA64_OPND_P1
5538 || idesc->operands[i] == IA64_OPND_P2)
5540 int regno = slot->opnd[i].X_add_number - REG_P;
5541 /* Ignore invalid operands; they generate errors elsewhere. */
5544 this_group->p_reg_set[regno] = 1;
5548 /* Test whether this could be the second insn in a problematic sequence. */
5549 if (insn_unit == IA64_UNIT_M && slot->qp_regno > 0
5550 && prev_group->p_reg_set[slot->qp_regno])
5552 for (i = 0; i < idesc->num_outputs; i++)
5553 if (idesc->operands[i] == IA64_OPND_R1
5554 || idesc->operands[i] == IA64_OPND_R2
5555 || idesc->operands[i] == IA64_OPND_R3)
5557 int regno = slot->opnd[i].X_add_number - REG_GR;
5558 /* Ignore invalid operands; they generate errors elsewhere. */
5561 if (strncmp (idesc->name, "add", 3) != 0
5562 && strncmp (idesc->name, "sub", 3) != 0
5563 && strncmp (idesc->name, "shladd", 6) != 0
5564 && (idesc->flags & IA64_OPCODE_POSTINC) == 0)
5565 this_group->g_reg_set_conditionally[regno] = 1;
5569 /* Test whether this could be the third insn in a problematic sequence. */
5570 for (i = 0; i < NELEMS (idesc->operands) && idesc->operands[i]; i++)
5572 if (/* For fc, ptc, ptr, tak, thash, tpa, ttag, probe, ptr, ptc. */
5573 idesc->operands[i] == IA64_OPND_R3
5574 /* For mov indirect. */
5575 || idesc->operands[i] == IA64_OPND_RR_R3
5576 || idesc->operands[i] == IA64_OPND_DBR_R3
5577 || idesc->operands[i] == IA64_OPND_IBR_R3
5578 || idesc->operands[i] == IA64_OPND_PKR_R3
5579 || idesc->operands[i] == IA64_OPND_PMC_R3
5580 || idesc->operands[i] == IA64_OPND_PMD_R3
5581 || idesc->operands[i] == IA64_OPND_MSR_R3
5582 || idesc->operands[i] == IA64_OPND_CPUID_R3
5584 || idesc->operands[i] == IA64_OPND_ITR_R3
5585 || idesc->operands[i] == IA64_OPND_DTR_R3
5586 /* Normal memory addresses (load, store, xchg, cmpxchg, etc.). */
5587 || idesc->operands[i] == IA64_OPND_MR3)
5589 int regno = slot->opnd[i].X_add_number - REG_GR;
5590 /* Ignore invalid operands; they generate errors elsewhere. */
5593 if (idesc->operands[i] == IA64_OPND_R3)
5595 if (strcmp (idesc->name, "fc") != 0
5596 && strcmp (idesc->name, "tak") != 0
5597 && strcmp (idesc->name, "thash") != 0
5598 && strcmp (idesc->name, "tpa") != 0
5599 && strcmp (idesc->name, "ttag") != 0
5600 && strncmp (idesc->name, "ptr", 3) != 0
5601 && strncmp (idesc->name, "ptc", 3) != 0
5602 && strncmp (idesc->name, "probe", 5) != 0)
5605 if (prev_group->g_reg_set_conditionally[regno])
5613 build_insn (slot, insnp)
5617 const struct ia64_operand *odesc, *o2desc;
5618 struct ia64_opcode *idesc = slot->idesc;
5619 bfd_signed_vma insn, val;
5623 insn = idesc->opcode | slot->qp_regno;
5625 for (i = 0; i < NELEMS (idesc->operands) && idesc->operands[i]; ++i)
5627 if (slot->opnd[i].X_op == O_register
5628 || slot->opnd[i].X_op == O_constant
5629 || slot->opnd[i].X_op == O_index)
5630 val = slot->opnd[i].X_add_number;
5631 else if (slot->opnd[i].X_op == O_big)
5633 /* This must be the value 0x10000000000000000. */
5634 assert (idesc->operands[i] == IA64_OPND_IMM8M1U8);
5640 switch (idesc->operands[i])
5642 case IA64_OPND_IMMU64:
5643 *insnp++ = (val >> 22) & 0x1ffffffffffLL;
5644 insn |= (((val & 0x7f) << 13) | (((val >> 7) & 0x1ff) << 27)
5645 | (((val >> 16) & 0x1f) << 22) | (((val >> 21) & 0x1) << 21)
5646 | (((val >> 63) & 0x1) << 36));
5649 case IA64_OPND_IMMU62:
5650 val &= 0x3fffffffffffffffULL;
5651 if (val != slot->opnd[i].X_add_number)
5652 as_warn (_("Value truncated to 62 bits"));
5653 *insnp++ = (val >> 21) & 0x1ffffffffffLL;
5654 insn |= (((val & 0xfffff) << 6) | (((val >> 20) & 0x1) << 36));
5657 case IA64_OPND_TGT64:
5659 *insnp++ = ((val >> 20) & 0x7fffffffffLL) << 2;
5660 insn |= ((((val >> 59) & 0x1) << 36)
5661 | (((val >> 0) & 0xfffff) << 13));
5692 case IA64_OPND_R3_2:
5693 case IA64_OPND_CPUID_R3:
5694 case IA64_OPND_DBR_R3:
5695 case IA64_OPND_DTR_R3:
5696 case IA64_OPND_ITR_R3:
5697 case IA64_OPND_IBR_R3:
5699 case IA64_OPND_MSR_R3:
5700 case IA64_OPND_PKR_R3:
5701 case IA64_OPND_PMC_R3:
5702 case IA64_OPND_PMD_R3:
5703 case IA64_OPND_RR_R3:
5711 odesc = elf64_ia64_operands + idesc->operands[i];
5712 err = (*odesc->insert) (odesc, val, &insn);
5714 as_bad_where (slot->src_file, slot->src_line,
5715 "Bad operand value: %s", err);
5716 if (idesc->flags & IA64_OPCODE_PSEUDO)
5718 if ((idesc->flags & IA64_OPCODE_F2_EQ_F3)
5719 && odesc == elf64_ia64_operands + IA64_OPND_F3)
5721 o2desc = elf64_ia64_operands + IA64_OPND_F2;
5722 (*o2desc->insert) (o2desc, val, &insn);
5724 if ((idesc->flags & IA64_OPCODE_LEN_EQ_64MCNT)
5725 && (odesc == elf64_ia64_operands + IA64_OPND_CPOS6a
5726 || odesc == elf64_ia64_operands + IA64_OPND_POS6))
5728 o2desc = elf64_ia64_operands + IA64_OPND_LEN6;
5729 (*o2desc->insert) (o2desc, 64 - val, &insn);
5739 unsigned int manual_bundling_on = 0, manual_bundling_off = 0;
5740 unsigned int manual_bundling = 0;
5741 enum ia64_unit required_unit, insn_unit = 0;
5742 enum ia64_insn_type type[3], insn_type;
5743 unsigned int template, orig_template;
5744 bfd_vma insn[3] = { -1, -1, -1 };
5745 struct ia64_opcode *idesc;
5746 int end_of_insn_group = 0, user_template = -1;
5747 int n, i, j, first, curr;
5749 bfd_vma t0 = 0, t1 = 0;
5750 struct label_fix *lfix;
5751 struct insn_fix *ifix;
5756 first = (md.curr_slot + NUM_SLOTS - md.num_slots_in_use) % NUM_SLOTS;
5757 know (first >= 0 & first < NUM_SLOTS);
5758 n = MIN (3, md.num_slots_in_use);
5760 /* Determine template: user user_template if specified, best match
5763 if (md.slot[first].user_template >= 0)
5764 user_template = template = md.slot[first].user_template;
5767 /* Auto select appropriate template. */
5768 memset (type, 0, sizeof (type));
5770 for (i = 0; i < n; ++i)
5772 if (md.slot[curr].label_fixups && i != 0)
5774 type[i] = md.slot[curr].idesc->type;
5775 curr = (curr + 1) % NUM_SLOTS;
5777 template = best_template[type[0]][type[1]][type[2]];
5780 /* initialize instructions with appropriate nops: */
5781 for (i = 0; i < 3; ++i)
5782 insn[i] = nop[ia64_templ_desc[template].exec_unit[i]];
5786 /* now fill in slots with as many insns as possible: */
5788 idesc = md.slot[curr].idesc;
5789 end_of_insn_group = 0;
5790 for (i = 0; i < 3 && md.num_slots_in_use > 0; ++i)
5792 /* Set the slot number for prologue/body records now as those
5793 refer to the current point, not the point after the
5794 instruction has been issued: */
5795 /* Don't try to delete prologue/body records here, as that will cause
5796 them to also be deleted from the master list of unwind records. */
5797 for (ptr = md.slot[curr].unwind_record; ptr; ptr = ptr->next)
5798 if (ptr->r.type == prologue || ptr->r.type == prologue_gr
5799 || ptr->r.type == body)
5801 ptr->slot_number = (unsigned long) f + i;
5802 ptr->slot_frag = frag_now;
5805 if (idesc->flags & IA64_OPCODE_SLOT2)
5807 if (manual_bundling && i != 2)
5808 as_bad_where (md.slot[curr].src_file, md.slot[curr].src_line,
5809 "`%s' must be last in bundle", idesc->name);
5813 if (idesc->flags & IA64_OPCODE_LAST)
5816 unsigned int required_template;
5818 /* If we need a stop bit after an M slot, our only choice is
5819 template 5 (M;;MI). If we need a stop bit after a B
5820 slot, our only choice is to place it at the end of the
5821 bundle, because the only available templates are MIB,
5822 MBB, BBB, MMB, and MFB. We don't handle anything other
5823 than M and B slots because these are the only kind of
5824 instructions that can have the IA64_OPCODE_LAST bit set. */
5825 required_template = template;
5826 switch (idesc->type)
5830 required_template = 5;
5838 as_bad_where (md.slot[curr].src_file, md.slot[curr].src_line,
5839 "Internal error: don't know how to force %s to end"
5840 "of instruction group", idesc->name);
5844 if (manual_bundling && i != required_slot)
5845 as_bad_where (md.slot[curr].src_file, md.slot[curr].src_line,
5846 "`%s' must be last in instruction group",
5848 if (required_slot < i)
5849 /* Can't fit this instruction. */
5853 if (required_template != template)
5855 /* If we switch the template, we need to reset the NOPs
5856 after slot i. The slot-types of the instructions ahead
5857 of i never change, so we don't need to worry about
5858 changing NOPs in front of this slot. */
5859 for (j = i; j < 3; ++j)
5860 insn[j] = nop[ia64_templ_desc[required_template].exec_unit[j]];
5862 template = required_template;
5864 if (curr != first && md.slot[curr].label_fixups)
5866 if (manual_bundling_on)
5867 as_bad_where (md.slot[curr].src_file, md.slot[curr].src_line,
5868 "Label must be first in a bundle");
5869 /* This insn must go into the first slot of a bundle. */
5873 manual_bundling_on = md.slot[curr].manual_bundling_on;
5874 manual_bundling_off = md.slot[curr].manual_bundling_off;
5876 if (manual_bundling_on)
5879 manual_bundling = 1;
5881 break; /* need to start a new bundle */
5884 if (end_of_insn_group && md.num_slots_in_use >= 1)
5886 /* We need an instruction group boundary in the middle of a
5887 bundle. See if we can switch to an other template with
5888 an appropriate boundary. */
5890 orig_template = template;
5891 if (i == 1 && (user_template == 4
5892 || (user_template < 0
5893 && (ia64_templ_desc[template].exec_unit[0]
5897 end_of_insn_group = 0;
5899 else if (i == 2 && (user_template == 0
5900 || (user_template < 0
5901 && (ia64_templ_desc[template].exec_unit[1]
5903 /* This test makes sure we don't switch the template if
5904 the next instruction is one that needs to be first in
5905 an instruction group. Since all those instructions are
5906 in the M group, there is no way such an instruction can
5907 fit in this bundle even if we switch the template. The
5908 reason we have to check for this is that otherwise we
5909 may end up generating "MI;;I M.." which has the deadly
5910 effect that the second M instruction is no longer the
5911 first in the bundle! --davidm 99/12/16 */
5912 && (idesc->flags & IA64_OPCODE_FIRST) == 0)
5915 end_of_insn_group = 0;
5917 else if (curr != first)
5918 /* can't fit this insn */
5921 if (template != orig_template)
5922 /* if we switch the template, we need to reset the NOPs
5923 after slot i. The slot-types of the instructions ahead
5924 of i never change, so we don't need to worry about
5925 changing NOPs in front of this slot. */
5926 for (j = i; j < 3; ++j)
5927 insn[j] = nop[ia64_templ_desc[template].exec_unit[j]];
5929 required_unit = ia64_templ_desc[template].exec_unit[i];
5931 /* resolve dynamic opcodes such as "break" and "nop": */
5932 if (idesc->type == IA64_TYPE_DYN)
5934 if ((strcmp (idesc->name, "nop") == 0)
5935 || (strcmp (idesc->name, "break") == 0))
5936 insn_unit = required_unit;
5937 else if (strcmp (idesc->name, "chk.s") == 0)
5939 insn_unit = IA64_UNIT_M;
5940 if (required_unit == IA64_UNIT_I)
5941 insn_unit = IA64_UNIT_I;
5944 as_fatal ("emit_one_bundle: unexpected dynamic op");
5946 sprintf (mnemonic, "%s.%c", idesc->name, "?imbf??"[insn_unit]);
5947 ia64_free_opcode (idesc);
5948 md.slot[curr].idesc = idesc = ia64_find_opcode (mnemonic);
5950 know (!idesc->next); /* no resolved dynamic ops have collisions */
5955 insn_type = idesc->type;
5956 insn_unit = IA64_UNIT_NIL;
5960 if (required_unit == IA64_UNIT_I || required_unit == IA64_UNIT_M)
5961 insn_unit = required_unit;
5963 case IA64_TYPE_X: insn_unit = IA64_UNIT_L; break;
5964 case IA64_TYPE_I: insn_unit = IA64_UNIT_I; break;
5965 case IA64_TYPE_M: insn_unit = IA64_UNIT_M; break;
5966 case IA64_TYPE_B: insn_unit = IA64_UNIT_B; break;
5967 case IA64_TYPE_F: insn_unit = IA64_UNIT_F; break;
5972 if (insn_unit != required_unit)
5974 if (required_unit == IA64_UNIT_L
5975 && insn_unit == IA64_UNIT_I
5976 && !(idesc->flags & IA64_OPCODE_X_IN_MLX))
5978 /* we got ourselves an MLX template but the current
5979 instruction isn't an X-unit, or an I-unit instruction
5980 that can go into the X slot of an MLX template. Duh. */
5981 if (md.num_slots_in_use >= NUM_SLOTS)
5983 as_bad_where (md.slot[curr].src_file,
5984 md.slot[curr].src_line,
5985 "`%s' can't go in X slot of "
5986 "MLX template", idesc->name);
5987 /* drop this insn so we don't livelock: */
5988 --md.num_slots_in_use;
5992 continue; /* try next slot */
5998 addr = frag_now->fr_address + frag_now_fix () - 16 + i;
5999 dwarf2_gen_line_info (addr, &md.slot[curr].debug_line);
6002 if (errata_nop_necessary_p (md.slot + curr, insn_unit))
6003 as_warn (_("Additional NOP may be necessary to workaround Itanium processor A/B step errata"));
6005 build_insn (md.slot + curr, insn + i);
6007 /* Set slot counts for non prologue/body unwind records. */
6008 for (ptr = md.slot[curr].unwind_record; ptr; ptr = ptr->next)
6009 if (ptr->r.type != prologue && ptr->r.type != prologue_gr
6010 && ptr->r.type != body)
6012 ptr->slot_number = (unsigned long) f + i;
6013 ptr->slot_frag = frag_now;
6015 md.slot[curr].unwind_record = NULL;
6017 if (required_unit == IA64_UNIT_L)
6020 /* skip one slot for long/X-unit instructions */
6023 --md.num_slots_in_use;
6025 /* now is a good time to fix up the labels for this insn: */
6026 for (lfix = md.slot[curr].label_fixups; lfix; lfix = lfix->next)
6028 S_SET_VALUE (lfix->sym, frag_now_fix () - 16);
6029 symbol_set_frag (lfix->sym, frag_now);
6031 /* and fix up the tags also. */
6032 for (lfix = md.slot[curr].tag_fixups; lfix; lfix = lfix->next)
6034 S_SET_VALUE (lfix->sym, frag_now_fix () - 16 + i);
6035 symbol_set_frag (lfix->sym, frag_now);
6038 for (j = 0; j < md.slot[curr].num_fixups; ++j)
6040 ifix = md.slot[curr].fixup + j;
6041 fix = fix_new_exp (frag_now, frag_now_fix () - 16 + i, 8,
6042 &ifix->expr, ifix->is_pcrel, ifix->code);
6043 fix->tc_fix_data.opnd = ifix->opnd;
6044 fix->fx_plt = (fix->fx_r_type == BFD_RELOC_IA64_PLTOFF22);
6045 fix->fx_file = md.slot[curr].src_file;
6046 fix->fx_line = md.slot[curr].src_line;
6049 end_of_insn_group = md.slot[curr].end_of_insn_group;
6051 if (end_of_insn_group)
6053 md.group_idx = (md.group_idx + 1) % 3;
6054 memset (md.last_groups + md.group_idx, 0, sizeof md.last_groups[0]);
6058 ia64_free_opcode (md.slot[curr].idesc);
6059 memset (md.slot + curr, 0, sizeof (md.slot[curr]));
6060 md.slot[curr].user_template = -1;
6062 if (manual_bundling_off)
6064 manual_bundling = 0;
6067 curr = (curr + 1) % NUM_SLOTS;
6068 idesc = md.slot[curr].idesc;
6070 if (manual_bundling)
6072 if (md.num_slots_in_use > 0)
6073 as_bad_where (md.slot[curr].src_file, md.slot[curr].src_line,
6074 "`%s' does not fit into %s template",
6075 idesc->name, ia64_templ_desc[template].name);
6077 as_bad_where (md.slot[curr].src_file, md.slot[curr].src_line,
6078 "Missing '}' at end of file");
6080 know (md.num_slots_in_use < NUM_SLOTS);
6082 t0 = end_of_insn_group | (template << 1) | (insn[0] << 5) | (insn[1] << 46);
6083 t1 = ((insn[1] >> 18) & 0x7fffff) | (insn[2] << 23);
6085 number_to_chars_littleendian (f + 0, t0, 8);
6086 number_to_chars_littleendian (f + 8, t1, 8);
6088 unwind.next_slot_number = (unsigned long) f + 16;
6089 unwind.next_slot_frag = frag_now;
6093 md_parse_option (c, arg)
6100 /* Switches from the Intel assembler. */
6102 if (strcmp (arg, "ilp64") == 0
6103 || strcmp (arg, "lp64") == 0
6104 || strcmp (arg, "p64") == 0)
6106 md.flags |= EF_IA_64_ABI64;
6108 else if (strcmp (arg, "ilp32") == 0)
6110 md.flags &= ~EF_IA_64_ABI64;
6112 else if (strcmp (arg, "le") == 0)
6114 md.flags &= ~EF_IA_64_BE;
6116 else if (strcmp (arg, "be") == 0)
6118 md.flags |= EF_IA_64_BE;
6125 if (strcmp (arg, "so") == 0)
6127 /* Suppress signon message. */
6129 else if (strcmp (arg, "pi") == 0)
6131 /* Reject privileged instructions. FIXME */
6133 else if (strcmp (arg, "us") == 0)
6135 /* Allow union of signed and unsigned range. FIXME */
6137 else if (strcmp (arg, "close_fcalls") == 0)
6139 /* Do not resolve global function calls. */
6146 /* temp[="prefix"] Insert temporary labels into the object file
6147 symbol table prefixed by "prefix".
6148 Default prefix is ":temp:".
6153 /* indirect=<tgt> Assume unannotated indirect branches behavior
6154 according to <tgt> --
6155 exit: branch out from the current context (default)
6156 labels: all labels in context may be branch targets
6158 if (strncmp (arg, "indirect=", 9) != 0)
6163 /* -X conflicts with an ignored option, use -x instead */
6165 if (!arg || strcmp (arg, "explicit") == 0)
6167 /* set default mode to explicit */
6168 md.default_explicit_mode = 1;
6171 else if (strcmp (arg, "auto") == 0)
6173 md.default_explicit_mode = 0;
6175 else if (strcmp (arg, "debug") == 0)
6179 else if (strcmp (arg, "debugx") == 0)
6181 md.default_explicit_mode = 1;
6186 as_bad (_("Unrecognized option '-x%s'"), arg);
6191 /* nops Print nops statistics. */
6194 /* GNU specific switches for gcc. */
6195 case OPTION_MCONSTANT_GP:
6196 md.flags |= EF_IA_64_CONS_GP;
6199 case OPTION_MAUTO_PIC:
6200 md.flags |= EF_IA_64_NOFUNCDESC_CONS_GP;
6211 md_show_usage (stream)
6216 -milp32|-milp64|-mlp64|-mp64 select data model (default -mlp64)\n\
6217 -mle | -mbe select little- or big-endian byte order (default -mle)\n\
6218 -x | -xexplicit turn on dependency violation checking (default)\n\
6219 -xauto automagically remove dependency violations\n\
6220 -xdebug debug dependency violation checker\n"),
6224 /* Return true if TYPE fits in TEMPL at SLOT. */
6227 match (int templ, int type, int slot)
6229 enum ia64_unit unit;
6232 unit = ia64_templ_desc[templ].exec_unit[slot];
6235 case IA64_TYPE_DYN: result = 1; break; /* for nop and break */
6237 result = (unit == IA64_UNIT_I || unit == IA64_UNIT_M);
6239 case IA64_TYPE_X: result = (unit == IA64_UNIT_L); break;
6240 case IA64_TYPE_I: result = (unit == IA64_UNIT_I); break;
6241 case IA64_TYPE_M: result = (unit == IA64_UNIT_M); break;
6242 case IA64_TYPE_B: result = (unit == IA64_UNIT_B); break;
6243 case IA64_TYPE_F: result = (unit == IA64_UNIT_F); break;
6244 default: result = 0; break;
6249 /* Add a bit of extra goodness if a nop of type F or B would fit
6250 in TEMPL at SLOT. */
6253 extra_goodness (int templ, int slot)
6255 if (slot == 1 && match (templ, IA64_TYPE_F, slot))
6257 if (slot == 2 && match (templ, IA64_TYPE_B, slot))
6262 /* This function is called once, at assembler startup time. It sets
6263 up all the tables, etc. that the MD part of the assembler will need
6264 that can be determined before arguments are parsed. */
6268 int i, j, k, t, total, ar_base, cr_base, goodness, best, regnum, ok;
6273 md.explicit_mode = md.default_explicit_mode;
6275 bfd_set_section_alignment (stdoutput, text_section, 4);
6277 target_big_endian = TARGET_BYTES_BIG_ENDIAN;
6278 pseudo_func[FUNC_FPTR_RELATIVE].u.sym =
6279 symbol_new (".<fptr>", undefined_section, FUNC_FPTR_RELATIVE,
6280 &zero_address_frag);
6282 pseudo_func[FUNC_GP_RELATIVE].u.sym =
6283 symbol_new (".<gprel>", undefined_section, FUNC_GP_RELATIVE,
6284 &zero_address_frag);
6286 pseudo_func[FUNC_LT_RELATIVE].u.sym =
6287 symbol_new (".<ltoff>", undefined_section, FUNC_LT_RELATIVE,
6288 &zero_address_frag);
6290 pseudo_func[FUNC_PC_RELATIVE].u.sym =
6291 symbol_new (".<pcrel>", undefined_section, FUNC_PC_RELATIVE,
6292 &zero_address_frag);
6294 pseudo_func[FUNC_PLT_RELATIVE].u.sym =
6295 symbol_new (".<pltoff>", undefined_section, FUNC_PLT_RELATIVE,
6296 &zero_address_frag);
6298 pseudo_func[FUNC_SEC_RELATIVE].u.sym =
6299 symbol_new (".<secrel>", undefined_section, FUNC_SEC_RELATIVE,
6300 &zero_address_frag);
6302 pseudo_func[FUNC_SEG_RELATIVE].u.sym =
6303 symbol_new (".<segrel>", undefined_section, FUNC_SEG_RELATIVE,
6304 &zero_address_frag);
6306 pseudo_func[FUNC_LTV_RELATIVE].u.sym =
6307 symbol_new (".<ltv>", undefined_section, FUNC_LTV_RELATIVE,
6308 &zero_address_frag);
6310 pseudo_func[FUNC_LT_FPTR_RELATIVE].u.sym =
6311 symbol_new (".<ltoff.fptr>", undefined_section, FUNC_LT_FPTR_RELATIVE,
6312 &zero_address_frag);
6314 /* Compute the table of best templates. We compute goodness as a
6315 base 4 value, in which each match counts for 3, each F counts
6316 for 2, each B counts for 1. This should maximize the number of
6317 F and B nops in the chosen bundles, which is good because these
6318 pipelines are least likely to be overcommitted. */
6319 for (i = 0; i < IA64_NUM_TYPES; ++i)
6320 for (j = 0; j < IA64_NUM_TYPES; ++j)
6321 for (k = 0; k < IA64_NUM_TYPES; ++k)
6324 for (t = 0; t < NELEMS (ia64_templ_desc); ++t)
6327 if (match (t, i, 0))
6329 if (match (t, j, 1))
6331 if (match (t, k, 2))
6332 goodness = 3 + 3 + 3;
6334 goodness = 3 + 3 + extra_goodness (t, 2);
6336 else if (match (t, j, 2))
6337 goodness = 3 + 3 + extra_goodness (t, 1);
6341 goodness += extra_goodness (t, 1);
6342 goodness += extra_goodness (t, 2);
6345 else if (match (t, i, 1))
6347 if (match (t, j, 2))
6350 goodness = 3 + extra_goodness (t, 2);
6352 else if (match (t, i, 2))
6353 goodness = 3 + extra_goodness (t, 1);
6355 if (goodness > best)
6358 best_template[i][j][k] = t;
6363 for (i = 0; i < NUM_SLOTS; ++i)
6364 md.slot[i].user_template = -1;
6366 md.pseudo_hash = hash_new ();
6367 for (i = 0; i < NELEMS (pseudo_opcode); ++i)
6369 err = hash_insert (md.pseudo_hash, pseudo_opcode[i].name,
6370 (void *) (pseudo_opcode + i));
6372 as_fatal ("ia64.md_begin: can't hash `%s': %s",
6373 pseudo_opcode[i].name, err);
6376 md.reg_hash = hash_new ();
6377 md.dynreg_hash = hash_new ();
6378 md.const_hash = hash_new ();
6379 md.entry_hash = hash_new ();
6381 /* general registers: */
6384 for (i = 0; i < total; ++i)
6386 sprintf (name, "r%d", i - REG_GR);
6387 md.regsym[i] = declare_register (name, i);
6390 /* floating point registers: */
6392 for (; i < total; ++i)
6394 sprintf (name, "f%d", i - REG_FR);
6395 md.regsym[i] = declare_register (name, i);
6398 /* application registers: */
6401 for (; i < total; ++i)
6403 sprintf (name, "ar%d", i - REG_AR);
6404 md.regsym[i] = declare_register (name, i);
6407 /* control registers: */
6410 for (; i < total; ++i)
6412 sprintf (name, "cr%d", i - REG_CR);
6413 md.regsym[i] = declare_register (name, i);
6416 /* predicate registers: */
6418 for (; i < total; ++i)
6420 sprintf (name, "p%d", i - REG_P);
6421 md.regsym[i] = declare_register (name, i);
6424 /* branch registers: */
6426 for (; i < total; ++i)
6428 sprintf (name, "b%d", i - REG_BR);
6429 md.regsym[i] = declare_register (name, i);
6432 md.regsym[REG_IP] = declare_register ("ip", REG_IP);
6433 md.regsym[REG_CFM] = declare_register ("cfm", REG_CFM);
6434 md.regsym[REG_PR] = declare_register ("pr", REG_PR);
6435 md.regsym[REG_PR_ROT] = declare_register ("pr.rot", REG_PR_ROT);
6436 md.regsym[REG_PSR] = declare_register ("psr", REG_PSR);
6437 md.regsym[REG_PSR_L] = declare_register ("psr.l", REG_PSR_L);
6438 md.regsym[REG_PSR_UM] = declare_register ("psr.um", REG_PSR_UM);
6440 for (i = 0; i < NELEMS (indirect_reg); ++i)
6442 regnum = indirect_reg[i].regnum;
6443 md.regsym[regnum] = declare_register (indirect_reg[i].name, regnum);
6446 /* define synonyms for application registers: */
6447 for (i = REG_AR; i < REG_AR + NELEMS (ar); ++i)
6448 md.regsym[i] = declare_register (ar[i - REG_AR].name,
6449 REG_AR + ar[i - REG_AR].regnum);
6451 /* define synonyms for control registers: */
6452 for (i = REG_CR; i < REG_CR + NELEMS (cr); ++i)
6453 md.regsym[i] = declare_register (cr[i - REG_CR].name,
6454 REG_CR + cr[i - REG_CR].regnum);
6456 declare_register ("gp", REG_GR + 1);
6457 declare_register ("sp", REG_GR + 12);
6458 declare_register ("rp", REG_BR + 0);
6460 /* pseudo-registers used to specify unwind info: */
6461 declare_register ("psp", REG_PSP);
6463 declare_register_set ("ret", 4, REG_GR + 8);
6464 declare_register_set ("farg", 8, REG_FR + 8);
6465 declare_register_set ("fret", 8, REG_FR + 8);
6467 for (i = 0; i < NELEMS (const_bits); ++i)
6469 err = hash_insert (md.const_hash, const_bits[i].name,
6470 (PTR) (const_bits + i));
6472 as_fatal ("Inserting \"%s\" into constant hash table failed: %s",
6476 /* Set the architecture and machine depending on defaults and command line
6478 if (md.flags & EF_IA_64_ABI64)
6479 ok = bfd_set_arch_mach (stdoutput, bfd_arch_ia64, bfd_mach_ia64_elf64);
6481 ok = bfd_set_arch_mach (stdoutput, bfd_arch_ia64, bfd_mach_ia64_elf32);
6484 as_warn (_("Could not set architecture and machine"));
6486 md.mem_offset.hint = 0;
6489 md.entry_labels = NULL;
6492 /* Set the elf type to 64 bit ABI by default. Cannot do this in md_begin
6493 because that is called after md_parse_option which is where we do the
6494 dynamic changing of md.flags based on -mlp64 or -milp32. Also, set the
6495 default endianness. */
6498 ia64_init (argc, argv)
6499 int argc ATTRIBUTE_UNUSED;
6500 char **argv ATTRIBUTE_UNUSED;
6502 md.flags = EF_IA_64_ABI64;
6503 if (TARGET_BYTES_BIG_ENDIAN)
6504 md.flags |= EF_IA_64_BE;
6507 /* Return a string for the target object file format. */
6510 ia64_target_format ()
6512 if (OUTPUT_FLAVOR == bfd_target_elf_flavour)
6514 if (md.flags & EF_IA_64_BE)
6516 if (md.flags & EF_IA_64_ABI64)
6518 return "elf64-ia64-aix-big";
6520 return "elf64-ia64-big";
6524 return "elf32-ia64-aix-big";
6526 return "elf32-ia64-big";
6531 if (md.flags & EF_IA_64_ABI64)
6533 return "elf64-ia64-aix-little";
6535 return "elf64-ia64-little";
6539 return "elf32-ia64-aix-little";
6541 return "elf32-ia64-little";
6546 return "unknown-format";
6550 ia64_end_of_source ()
6552 /* terminate insn group upon reaching end of file: */
6553 insn_group_break (1, 0, 0);
6555 /* emits slots we haven't written yet: */
6556 ia64_flush_insns ();
6558 bfd_set_private_flags (stdoutput, md.flags);
6560 md.mem_offset.hint = 0;
6566 if (md.qp.X_op == O_register)
6567 as_bad ("qualifying predicate not followed by instruction");
6568 md.qp.X_op = O_absent;
6570 if (ignore_input ())
6573 if (input_line_pointer[0] == ';' && input_line_pointer[-1] == ';')
6575 if (md.detect_dv && !md.explicit_mode)
6576 as_warn (_("Explicit stops are ignored in auto mode"));
6578 insn_group_break (1, 0, 0);
6582 /* This is a hook for ia64_frob_label, so that it can distinguish tags from
6584 static int defining_tag = 0;
6587 ia64_unrecognized_line (ch)
6593 expression (&md.qp);
6594 if (*input_line_pointer++ != ')')
6596 as_bad ("Expected ')'");
6599 if (md.qp.X_op != O_register)
6601 as_bad ("Qualifying predicate expected");
6604 if (md.qp.X_add_number < REG_P || md.qp.X_add_number >= REG_P + 64)
6606 as_bad ("Predicate register expected");
6612 if (md.manual_bundling)
6613 as_warn ("Found '{' when manual bundling is already turned on");
6615 CURR_SLOT.manual_bundling_on = 1;
6616 md.manual_bundling = 1;
6618 /* Bundling is only acceptable in explicit mode
6619 or when in default automatic mode. */
6620 if (md.detect_dv && !md.explicit_mode)
6622 if (!md.mode_explicitly_set
6623 && !md.default_explicit_mode)
6626 as_warn (_("Found '{' after explicit switch to automatic mode"));
6631 if (!md.manual_bundling)
6632 as_warn ("Found '}' when manual bundling is off");
6634 PREV_SLOT.manual_bundling_off = 1;
6635 md.manual_bundling = 0;
6637 /* switch back to automatic mode, if applicable */
6640 && !md.mode_explicitly_set
6641 && !md.default_explicit_mode)
6644 /* Allow '{' to follow on the same line. We also allow ";;", but that
6645 happens automatically because ';' is an end of line marker. */
6647 if (input_line_pointer[0] == '{')
6649 input_line_pointer++;
6650 return ia64_unrecognized_line ('{');
6653 demand_empty_rest_of_line ();
6663 if (md.qp.X_op == O_register)
6665 as_bad ("Tag must come before qualifying predicate.");
6669 /* This implements just enough of read_a_source_file in read.c to
6670 recognize labels. */
6671 if (is_name_beginner (*input_line_pointer))
6673 s = input_line_pointer;
6674 c = get_symbol_end ();
6676 else if (LOCAL_LABELS_FB
6677 && isdigit ((unsigned char) *input_line_pointer))
6680 while (isdigit ((unsigned char) *input_line_pointer))
6681 temp = (temp * 10) + *input_line_pointer++ - '0';
6682 fb_label_instance_inc (temp);
6683 s = fb_label_name (temp, 0);
6684 c = *input_line_pointer;
6693 /* Put ':' back for error messages' sake. */
6694 *input_line_pointer++ = ':';
6695 as_bad ("Expected ':'");
6702 /* Put ':' back for error messages' sake. */
6703 *input_line_pointer++ = ':';
6704 if (*input_line_pointer++ != ']')
6706 as_bad ("Expected ']'");
6711 as_bad ("Tag name expected");
6721 /* Not a valid line. */
6726 ia64_frob_label (sym)
6729 struct label_fix *fix;
6731 /* Tags need special handling since they are not bundle breaks like
6735 fix = obstack_alloc (¬es, sizeof (*fix));
6737 fix->next = CURR_SLOT.tag_fixups;
6738 CURR_SLOT.tag_fixups = fix;
6743 if (bfd_get_section_flags (stdoutput, now_seg) & SEC_CODE)
6745 md.last_text_seg = now_seg;
6746 fix = obstack_alloc (¬es, sizeof (*fix));
6748 fix->next = CURR_SLOT.label_fixups;
6749 CURR_SLOT.label_fixups = fix;
6751 /* Keep track of how many code entry points we've seen. */
6752 if (md.path == md.maxpaths)
6755 md.entry_labels = (const char **)
6756 xrealloc ((void *) md.entry_labels,
6757 md.maxpaths * sizeof (char *));
6759 md.entry_labels[md.path++] = S_GET_NAME (sym);
6764 ia64_flush_pending_output ()
6766 if (!md.keep_pending_output
6767 && bfd_get_section_flags (stdoutput, now_seg) & SEC_CODE)
6769 /* ??? This causes many unnecessary stop bits to be emitted.
6770 Unfortunately, it isn't clear if it is safe to remove this. */
6771 insn_group_break (1, 0, 0);
6772 ia64_flush_insns ();
6776 /* Do ia64-specific expression optimization. All that's done here is
6777 to transform index expressions that are either due to the indexing
6778 of rotating registers or due to the indexing of indirect register
6781 ia64_optimize_expr (l, op, r)
6790 if (l->X_op == O_register && r->X_op == O_constant)
6792 num_regs = (l->X_add_number >> 16);
6793 if ((unsigned) r->X_add_number >= num_regs)
6796 as_bad ("No current frame");
6798 as_bad ("Index out of range 0..%u", num_regs - 1);
6799 r->X_add_number = 0;
6801 l->X_add_number = (l->X_add_number & 0xffff) + r->X_add_number;
6804 else if (l->X_op == O_register && r->X_op == O_register)
6806 if (l->X_add_number < IND_CPUID || l->X_add_number > IND_RR
6807 || l->X_add_number == IND_MEM)
6809 as_bad ("Indirect register set name expected");
6810 l->X_add_number = IND_CPUID;
6813 l->X_op_symbol = md.regsym[l->X_add_number];
6814 l->X_add_number = r->X_add_number;
6822 ia64_parse_name (name, e)
6826 struct const_desc *cdesc;
6827 struct dynreg *dr = 0;
6828 unsigned int regnum;
6832 /* first see if NAME is a known register name: */
6833 sym = hash_find (md.reg_hash, name);
6836 e->X_op = O_register;
6837 e->X_add_number = S_GET_VALUE (sym);
6841 cdesc = hash_find (md.const_hash, name);
6844 e->X_op = O_constant;
6845 e->X_add_number = cdesc->value;
6849 /* check for inN, locN, or outN: */
6853 if (name[1] == 'n' && isdigit (name[2]))
6861 if (name[1] == 'o' && name[2] == 'c' && isdigit (name[3]))
6869 if (name[1] == 'u' && name[2] == 't' && isdigit (name[3]))
6882 /* The name is inN, locN, or outN; parse the register number. */
6883 regnum = strtoul (name, &end, 10);
6884 if (end > name && *end == '\0')
6886 if ((unsigned) regnum >= dr->num_regs)
6889 as_bad ("No current frame");
6891 as_bad ("Register number out of range 0..%u",
6895 e->X_op = O_register;
6896 e->X_add_number = dr->base + regnum;
6901 if ((dr = hash_find (md.dynreg_hash, name)))
6903 /* We've got ourselves the name of a rotating register set.
6904 Store the base register number in the low 16 bits of
6905 X_add_number and the size of the register set in the top 16
6907 e->X_op = O_register;
6908 e->X_add_number = dr->base | (dr->num_regs << 16);
6914 /* Remove the '#' suffix that indicates a symbol as opposed to a register. */
6917 ia64_canonicalize_symbol_name (name)
6920 size_t len = strlen (name);
6921 if (len > 1 && name[len - 1] == '#')
6922 name[len - 1] = '\0';
6926 /* Return true if idesc is a conditional branch instruction. This excludes
6927 the modulo scheduled branches, and br.ia. Mod-sched branches are excluded
6928 because they always read/write resources regardless of the value of the
6929 qualifying predicate. br.ia must always use p0, and hence is always
6930 taken. Thus this function returns true for branches which can fall
6931 through, and which use no resources if they do fall through. */
6934 is_conditional_branch (idesc)
6935 struct ia64_opcode *idesc;
6937 /* br is a conditional branch. Everything that starts with br. except
6938 br.ia, br.c{loop,top,exit}, and br.w{top,exit} is a conditional branch.
6939 Everything that starts with brl is a conditional branch. */
6940 return (idesc->name[0] == 'b' && idesc->name[1] == 'r'
6941 && (idesc->name[2] == '\0'
6942 || (idesc->name[2] == '.' && idesc->name[3] != 'i'
6943 && idesc->name[3] != 'c' && idesc->name[3] != 'w')
6944 || idesc->name[2] == 'l'
6945 /* br.cond, br.call, br.clr */
6946 || (idesc->name[2] == '.' && idesc->name[3] == 'c'
6947 && (idesc->name[4] == 'a' || idesc->name[4] == 'o'
6948 || (idesc->name[4] == 'l' && idesc->name[5] == 'r')))));
6951 /* Return whether the given opcode is a taken branch. If there's any doubt,
6955 is_taken_branch (idesc)
6956 struct ia64_opcode *idesc;
6958 return ((is_conditional_branch (idesc) && CURR_SLOT.qp_regno == 0)
6959 || strncmp (idesc->name, "br.ia", 5) == 0);
6962 /* Return whether the given opcode is an interruption or rfi. If there's any
6963 doubt, returns zero. */
6966 is_interruption_or_rfi (idesc)
6967 struct ia64_opcode *idesc;
6969 if (strcmp (idesc->name, "rfi") == 0)
6974 /* Returns the index of the given dependency in the opcode's list of chks, or
6975 -1 if there is no dependency. */
6978 depends_on (depind, idesc)
6980 struct ia64_opcode *idesc;
6983 const struct ia64_opcode_dependency *dep = idesc->dependencies;
6984 for (i = 0; i < dep->nchks; i++)
6986 if (depind == DEP (dep->chks[i]))
6992 /* Determine a set of specific resources used for a particular resource
6993 class. Returns the number of specific resources identified For those
6994 cases which are not determinable statically, the resource returned is
6997 Meanings of value in 'NOTE':
6998 1) only read/write when the register number is explicitly encoded in the
7000 2) only read CFM when accessing a rotating GR, FR, or PR. mov pr only
7001 accesses CFM when qualifying predicate is in the rotating region.
7002 3) general register value is used to specify an indirect register; not
7003 determinable statically.
7004 4) only read the given resource when bits 7:0 of the indirect index
7005 register value does not match the register number of the resource; not
7006 determinable statically.
7007 5) all rules are implementation specific.
7008 6) only when both the index specified by the reader and the index specified
7009 by the writer have the same value in bits 63:61; not determinable
7011 7) only access the specified resource when the corresponding mask bit is
7013 8) PSR.dfh is only read when these insns reference FR32-127. PSR.dfl is
7014 only read when these insns reference FR2-31
7015 9) PSR.mfl is only written when these insns write FR2-31. PSR.mfh is only
7016 written when these insns write FR32-127
7017 10) The PSR.bn bit is only accessed when one of GR16-31 is specified in the
7019 11) The target predicates are written independently of PR[qp], but source
7020 registers are only read if PR[qp] is true. Since the state of PR[qp]
7021 cannot statically be determined, all source registers are marked used.
7022 12) This insn only reads the specified predicate register when that
7023 register is the PR[qp].
7024 13) This reference to ld-c only applies to teh GR whose value is loaded
7025 with data returned from memory, not the post-incremented address register.
7026 14) The RSE resource includes the implementation-specific RSE internal
7027 state resources. At least one (and possibly more) of these resources are
7028 read by each instruction listed in IC:rse-readers. At least one (and
7029 possibly more) of these resources are written by each insn listed in
7031 15+16) Represents reserved instructions, which the assembler does not
7034 Memory resources (i.e. locations in memory) are *not* marked or tracked by
7035 this code; there are no dependency violations based on memory access.
7038 #define MAX_SPECS 256
7043 specify_resource (dep, idesc, type, specs, note, path)
7044 const struct ia64_dependency *dep;
7045 struct ia64_opcode *idesc;
7046 int type; /* is this a DV chk or a DV reg? */
7047 struct rsrc specs[MAX_SPECS]; /* returned specific resources */
7048 int note; /* resource note for this insn's usage */
7049 int path; /* which execution path to examine */
7056 if (dep->mode == IA64_DV_WAW
7057 || (dep->mode == IA64_DV_RAW && type == DV_REG)
7058 || (dep->mode == IA64_DV_WAR && type == DV_CHK))
7061 /* template for any resources we identify */
7062 tmpl.dependency = dep;
7064 tmpl.insn_srlz = tmpl.data_srlz = 0;
7065 tmpl.qp_regno = CURR_SLOT.qp_regno;
7066 tmpl.link_to_qp_branch = 1;
7067 tmpl.mem_offset.hint = 0;
7070 tmpl.cmp_type = CMP_NONE;
7073 as_warn (_("Unhandled dependency %s for %s (%s), note %d"), \
7074 dep->name, idesc->name, (rsrc_write?"write":"read"), note)
7075 #define KNOWN(REG) (gr_values[REG].known && gr_values[REG].path >= path)
7077 /* we don't need to track these */
7078 if (dep->semantics == IA64_DVS_NONE)
7081 switch (dep->specifier)
7086 if (idesc->operands[!rsrc_write] == IA64_OPND_AR3)
7088 int regno = CURR_SLOT.opnd[!rsrc_write].X_add_number - REG_AR;
7089 if (regno >= 0 && regno <= 7)
7091 specs[count] = tmpl;
7092 specs[count++].index = regno;
7098 for (i = 0; i < 8; i++)
7100 specs[count] = tmpl;
7101 specs[count++].index = i;
7110 case IA64_RS_AR_UNAT:
7111 /* This is a mov =AR or mov AR= instruction. */
7112 if (idesc->operands[!rsrc_write] == IA64_OPND_AR3)
7114 int regno = CURR_SLOT.opnd[!rsrc_write].X_add_number - REG_AR;
7115 if (regno == AR_UNAT)
7117 specs[count++] = tmpl;
7122 /* This is a spill/fill, or other instruction that modifies the
7125 /* Unless we can determine the specific bits used, mark the whole
7126 thing; bits 8:3 of the memory address indicate the bit used in
7127 UNAT. The .mem.offset hint may be used to eliminate a small
7128 subset of conflicts. */
7129 specs[count] = tmpl;
7130 if (md.mem_offset.hint)
7133 fprintf (stderr, " Using hint for spill/fill\n");
7134 /* The index isn't actually used, just set it to something
7135 approximating the bit index. */
7136 specs[count].index = (md.mem_offset.offset >> 3) & 0x3F;
7137 specs[count].mem_offset.hint = 1;
7138 specs[count].mem_offset.offset = md.mem_offset.offset;
7139 specs[count++].mem_offset.base = md.mem_offset.base;
7143 specs[count++].specific = 0;
7151 if (idesc->operands[!rsrc_write] == IA64_OPND_AR3)
7153 int regno = CURR_SLOT.opnd[!rsrc_write].X_add_number - REG_AR;
7154 if ((regno >= 8 && regno <= 15)
7155 || (regno >= 20 && regno <= 23)
7156 || (regno >= 31 && regno <= 39)
7157 || (regno >= 41 && regno <= 47)
7158 || (regno >= 67 && regno <= 111))
7160 specs[count] = tmpl;
7161 specs[count++].index = regno;
7174 if (idesc->operands[!rsrc_write] == IA64_OPND_AR3)
7176 int regno = CURR_SLOT.opnd[!rsrc_write].X_add_number - REG_AR;
7177 if ((regno >= 48 && regno <= 63)
7178 || (regno >= 112 && regno <= 127))
7180 specs[count] = tmpl;
7181 specs[count++].index = regno;
7187 for (i = 48; i < 64; i++)
7189 specs[count] = tmpl;
7190 specs[count++].index = i;
7192 for (i = 112; i < 128; i++)
7194 specs[count] = tmpl;
7195 specs[count++].index = i;
7213 for (i = 0; i < idesc->num_outputs; i++)
7214 if (idesc->operands[i] == IA64_OPND_B1
7215 || idesc->operands[i] == IA64_OPND_B2)
7217 specs[count] = tmpl;
7218 specs[count++].index =
7219 CURR_SLOT.opnd[i].X_add_number - REG_BR;
7224 for (i = idesc->num_outputs;i < NELEMS (idesc->operands); i++)
7225 if (idesc->operands[i] == IA64_OPND_B1
7226 || idesc->operands[i] == IA64_OPND_B2)
7228 specs[count] = tmpl;
7229 specs[count++].index =
7230 CURR_SLOT.opnd[i].X_add_number - REG_BR;
7236 case IA64_RS_CPUID: /* four or more registers */
7239 if (idesc->operands[!rsrc_write] == IA64_OPND_CPUID_R3)
7241 int regno = CURR_SLOT.opnd[!rsrc_write].X_add_number - REG_GR;
7242 if (regno >= 0 && regno < NELEMS (gr_values)
7245 specs[count] = tmpl;
7246 specs[count++].index = gr_values[regno].value & 0xFF;
7250 specs[count] = tmpl;
7251 specs[count++].specific = 0;
7261 case IA64_RS_DBR: /* four or more registers */
7264 if (idesc->operands[!rsrc_write] == IA64_OPND_DBR_R3)
7266 int regno = CURR_SLOT.opnd[!rsrc_write].X_add_number - REG_GR;
7267 if (regno >= 0 && regno < NELEMS (gr_values)
7270 specs[count] = tmpl;
7271 specs[count++].index = gr_values[regno].value & 0xFF;
7275 specs[count] = tmpl;
7276 specs[count++].specific = 0;
7280 else if (note == 0 && !rsrc_write)
7282 specs[count] = tmpl;
7283 specs[count++].specific = 0;
7291 case IA64_RS_IBR: /* four or more registers */
7294 if (idesc->operands[!rsrc_write] == IA64_OPND_IBR_R3)
7296 int regno = CURR_SLOT.opnd[!rsrc_write].X_add_number - REG_GR;
7297 if (regno >= 0 && regno < NELEMS (gr_values)
7300 specs[count] = tmpl;
7301 specs[count++].index = gr_values[regno].value & 0xFF;
7305 specs[count] = tmpl;
7306 specs[count++].specific = 0;
7319 /* These are implementation specific. Force all references to
7320 conflict with all other references. */
7321 specs[count] = tmpl;
7322 specs[count++].specific = 0;
7330 case IA64_RS_PKR: /* 16 or more registers */
7331 if (note == 3 || note == 4)
7333 if (idesc->operands[!rsrc_write] == IA64_OPND_PKR_R3)
7335 int regno = CURR_SLOT.opnd[!rsrc_write].X_add_number - REG_GR;
7336 if (regno >= 0 && regno < NELEMS (gr_values)
7341 specs[count] = tmpl;
7342 specs[count++].index = gr_values[regno].value & 0xFF;
7345 for (i = 0; i < NELEMS (gr_values); i++)
7347 /* Uses all registers *except* the one in R3. */
7348 if ((unsigned)i != (gr_values[regno].value & 0xFF))
7350 specs[count] = tmpl;
7351 specs[count++].index = i;
7357 specs[count] = tmpl;
7358 specs[count++].specific = 0;
7365 specs[count] = tmpl;
7366 specs[count++].specific = 0;
7370 case IA64_RS_PMC: /* four or more registers */
7373 if (idesc->operands[!rsrc_write] == IA64_OPND_PMC_R3
7374 || (!rsrc_write && idesc->operands[1] == IA64_OPND_PMD_R3))
7377 int index = ((idesc->operands[1] == IA64_OPND_R3 && !rsrc_write)
7379 int regno = CURR_SLOT.opnd[index].X_add_number - REG_GR;
7380 if (regno >= 0 && regno < NELEMS (gr_values)
7383 specs[count] = tmpl;
7384 specs[count++].index = gr_values[regno].value & 0xFF;
7388 specs[count] = tmpl;
7389 specs[count++].specific = 0;
7399 case IA64_RS_PMD: /* four or more registers */
7402 if (idesc->operands[!rsrc_write] == IA64_OPND_PMD_R3)
7404 int regno = CURR_SLOT.opnd[!rsrc_write].X_add_number - REG_GR;
7405 if (regno >= 0 && regno < NELEMS (gr_values)
7408 specs[count] = tmpl;
7409 specs[count++].index = gr_values[regno].value & 0xFF;
7413 specs[count] = tmpl;
7414 specs[count++].specific = 0;
7424 case IA64_RS_RR: /* eight registers */
7427 if (idesc->operands[!rsrc_write] == IA64_OPND_RR_R3)
7429 int regno = CURR_SLOT.opnd[!rsrc_write].X_add_number - REG_GR;
7430 if (regno >= 0 && regno < NELEMS (gr_values)
7433 specs[count] = tmpl;
7434 specs[count++].index = (gr_values[regno].value >> 61) & 0x7;
7438 specs[count] = tmpl;
7439 specs[count++].specific = 0;
7443 else if (note == 0 && !rsrc_write)
7445 specs[count] = tmpl;
7446 specs[count++].specific = 0;
7454 case IA64_RS_CR_IRR:
7457 /* handle mov-from-CR-IVR; it's a read that writes CR[IRR] */
7458 int regno = CURR_SLOT.opnd[1].X_add_number - REG_CR;
7460 && idesc->operands[1] == IA64_OPND_CR3
7463 for (i = 0; i < 4; i++)
7465 specs[count] = tmpl;
7466 specs[count++].index = CR_IRR0 + i;
7472 int regno = CURR_SLOT.opnd[!rsrc_write].X_add_number - REG_CR;
7473 if (idesc->operands[!rsrc_write] == IA64_OPND_CR3
7475 && regno <= CR_IRR3)
7477 specs[count] = tmpl;
7478 specs[count++].index = regno;
7487 case IA64_RS_CR_LRR:
7494 int regno = CURR_SLOT.opnd[!rsrc_write].X_add_number - REG_CR;
7495 if (idesc->operands[!rsrc_write] == IA64_OPND_CR3
7496 && (regno == CR_LRR0 || regno == CR_LRR1))
7498 specs[count] = tmpl;
7499 specs[count++].index = regno;
7507 if (idesc->operands[!rsrc_write] == IA64_OPND_CR3)
7509 specs[count] = tmpl;
7510 specs[count++].index =
7511 CURR_SLOT.opnd[!rsrc_write].X_add_number - REG_CR;
7526 else if (rsrc_write)
7528 if (dep->specifier == IA64_RS_FRb
7529 && idesc->operands[0] == IA64_OPND_F1)
7531 specs[count] = tmpl;
7532 specs[count++].index = CURR_SLOT.opnd[0].X_add_number - REG_FR;
7537 for (i = idesc->num_outputs; i < NELEMS (idesc->operands); i++)
7539 if (idesc->operands[i] == IA64_OPND_F2
7540 || idesc->operands[i] == IA64_OPND_F3
7541 || idesc->operands[i] == IA64_OPND_F4)
7543 specs[count] = tmpl;
7544 specs[count++].index =
7545 CURR_SLOT.opnd[i].X_add_number - REG_FR;
7554 /* This reference applies only to the GR whose value is loaded with
7555 data returned from memory. */
7556 specs[count] = tmpl;
7557 specs[count++].index = CURR_SLOT.opnd[0].X_add_number - REG_GR;
7563 for (i = 0; i < idesc->num_outputs; i++)
7564 if (idesc->operands[i] == IA64_OPND_R1
7565 || idesc->operands[i] == IA64_OPND_R2
7566 || idesc->operands[i] == IA64_OPND_R3)
7568 specs[count] = tmpl;
7569 specs[count++].index =
7570 CURR_SLOT.opnd[i].X_add_number - REG_GR;
7572 if (idesc->flags & IA64_OPCODE_POSTINC)
7573 for (i = 0; i < NELEMS (idesc->operands); i++)
7574 if (idesc->operands[i] == IA64_OPND_MR3)
7576 specs[count] = tmpl;
7577 specs[count++].index =
7578 CURR_SLOT.opnd[i].X_add_number - REG_GR;
7583 /* Look for anything that reads a GR. */
7584 for (i = 0; i < NELEMS (idesc->operands); i++)
7586 if (idesc->operands[i] == IA64_OPND_MR3
7587 || idesc->operands[i] == IA64_OPND_CPUID_R3
7588 || idesc->operands[i] == IA64_OPND_DBR_R3
7589 || idesc->operands[i] == IA64_OPND_IBR_R3
7590 || idesc->operands[i] == IA64_OPND_MSR_R3
7591 || idesc->operands[i] == IA64_OPND_PKR_R3
7592 || idesc->operands[i] == IA64_OPND_PMC_R3
7593 || idesc->operands[i] == IA64_OPND_PMD_R3
7594 || idesc->operands[i] == IA64_OPND_RR_R3
7595 || ((i >= idesc->num_outputs)
7596 && (idesc->operands[i] == IA64_OPND_R1
7597 || idesc->operands[i] == IA64_OPND_R2
7598 || idesc->operands[i] == IA64_OPND_R3
7599 /* addl source register. */
7600 || idesc->operands[i] == IA64_OPND_R3_2)))
7602 specs[count] = tmpl;
7603 specs[count++].index =
7604 CURR_SLOT.opnd[i].X_add_number - REG_GR;
7615 /* This is the same as IA64_RS_PRr, except that the register range is
7616 from 1 - 15, and there are no rotating register reads/writes here. */
7620 for (i = 1; i < 16; i++)
7622 specs[count] = tmpl;
7623 specs[count++].index = i;
7629 /* Mark only those registers indicated by the mask. */
7632 mask = CURR_SLOT.opnd[2].X_add_number;
7633 for (i = 1; i < 16; i++)
7634 if (mask & ((valueT) 1 << i))
7636 specs[count] = tmpl;
7637 specs[count++].index = i;
7645 else if (note == 11) /* note 11 implies note 1 as well */
7649 for (i = 0; i < idesc->num_outputs; i++)
7651 if (idesc->operands[i] == IA64_OPND_P1
7652 || idesc->operands[i] == IA64_OPND_P2)
7654 int regno = CURR_SLOT.opnd[i].X_add_number - REG_P;
7655 if (regno >= 1 && regno < 16)
7657 specs[count] = tmpl;
7658 specs[count++].index = regno;
7668 else if (note == 12)
7670 if (CURR_SLOT.qp_regno >= 1 && CURR_SLOT.qp_regno < 16)
7672 specs[count] = tmpl;
7673 specs[count++].index = CURR_SLOT.qp_regno;
7680 int p1 = CURR_SLOT.opnd[0].X_add_number - REG_P;
7681 int p2 = CURR_SLOT.opnd[1].X_add_number - REG_P;
7682 int or_andcm = strstr(idesc->name, "or.andcm") != NULL;
7683 int and_orcm = strstr(idesc->name, "and.orcm") != NULL;
7685 if ((idesc->operands[0] == IA64_OPND_P1
7686 || idesc->operands[0] == IA64_OPND_P2)
7687 && p1 >= 1 && p1 < 16)
7689 specs[count] = tmpl;
7690 specs[count].cmp_type =
7691 (or_andcm ? CMP_OR : (and_orcm ? CMP_AND : CMP_NONE));
7692 specs[count++].index = p1;
7694 if ((idesc->operands[1] == IA64_OPND_P1
7695 || idesc->operands[1] == IA64_OPND_P2)
7696 && p2 >= 1 && p2 < 16)
7698 specs[count] = tmpl;
7699 specs[count].cmp_type =
7700 (or_andcm ? CMP_AND : (and_orcm ? CMP_OR : CMP_NONE));
7701 specs[count++].index = p2;
7706 if (CURR_SLOT.qp_regno >= 1 && CURR_SLOT.qp_regno < 16)
7708 specs[count] = tmpl;
7709 specs[count++].index = CURR_SLOT.qp_regno;
7711 if (idesc->operands[1] == IA64_OPND_PR)
7713 for (i = 1; i < 16; i++)
7715 specs[count] = tmpl;
7716 specs[count++].index = i;
7727 /* This is the general case for PRs. IA64_RS_PR and IA64_RS_PR63 are
7728 simplified cases of this. */
7732 for (i = 16; i < 63; i++)
7734 specs[count] = tmpl;
7735 specs[count++].index = i;
7741 /* Mark only those registers indicated by the mask. */
7743 && idesc->operands[0] == IA64_OPND_PR)
7745 mask = CURR_SLOT.opnd[2].X_add_number;
7746 if (mask & ((valueT) 1<<16))
7747 for (i = 16; i < 63; i++)
7749 specs[count] = tmpl;
7750 specs[count++].index = i;
7754 && idesc->operands[0] == IA64_OPND_PR_ROT)
7756 for (i = 16; i < 63; i++)
7758 specs[count] = tmpl;
7759 specs[count++].index = i;
7767 else if (note == 11) /* note 11 implies note 1 as well */
7771 for (i = 0; i < idesc->num_outputs; i++)
7773 if (idesc->operands[i] == IA64_OPND_P1
7774 || idesc->operands[i] == IA64_OPND_P2)
7776 int regno = CURR_SLOT.opnd[i].X_add_number - REG_P;
7777 if (regno >= 16 && regno < 63)
7779 specs[count] = tmpl;
7780 specs[count++].index = regno;
7790 else if (note == 12)
7792 if (CURR_SLOT.qp_regno >= 16 && CURR_SLOT.qp_regno < 63)
7794 specs[count] = tmpl;
7795 specs[count++].index = CURR_SLOT.qp_regno;
7802 int p1 = CURR_SLOT.opnd[0].X_add_number - REG_P;
7803 int p2 = CURR_SLOT.opnd[1].X_add_number - REG_P;
7804 int or_andcm = strstr(idesc->name, "or.andcm") != NULL;
7805 int and_orcm = strstr(idesc->name, "and.orcm") != NULL;
7807 if ((idesc->operands[0] == IA64_OPND_P1
7808 || idesc->operands[0] == IA64_OPND_P2)
7809 && p1 >= 16 && p1 < 63)
7811 specs[count] = tmpl;
7812 specs[count].cmp_type =
7813 (or_andcm ? CMP_OR : (and_orcm ? CMP_AND : CMP_NONE));
7814 specs[count++].index = p1;
7816 if ((idesc->operands[1] == IA64_OPND_P1
7817 || idesc->operands[1] == IA64_OPND_P2)
7818 && p2 >= 16 && p2 < 63)
7820 specs[count] = tmpl;
7821 specs[count].cmp_type =
7822 (or_andcm ? CMP_AND : (and_orcm ? CMP_OR : CMP_NONE));
7823 specs[count++].index = p2;
7828 if (CURR_SLOT.qp_regno >= 16 && CURR_SLOT.qp_regno < 63)
7830 specs[count] = tmpl;
7831 specs[count++].index = CURR_SLOT.qp_regno;
7833 if (idesc->operands[1] == IA64_OPND_PR)
7835 for (i = 16; i < 63; i++)
7837 specs[count] = tmpl;
7838 specs[count++].index = i;
7850 /* Verify that the instruction is using the PSR bit indicated in
7854 if (idesc->operands[!rsrc_write] == IA64_OPND_PSR_UM)
7856 if (dep->regindex < 6)
7858 specs[count++] = tmpl;
7861 else if (idesc->operands[!rsrc_write] == IA64_OPND_PSR)
7863 if (dep->regindex < 32
7864 || dep->regindex == 35
7865 || dep->regindex == 36
7866 || (!rsrc_write && dep->regindex == PSR_CPL))
7868 specs[count++] = tmpl;
7871 else if (idesc->operands[!rsrc_write] == IA64_OPND_PSR_L)
7873 if (dep->regindex < 32
7874 || dep->regindex == 35
7875 || dep->regindex == 36
7876 || (rsrc_write && dep->regindex == PSR_CPL))
7878 specs[count++] = tmpl;
7883 /* Several PSR bits have very specific dependencies. */
7884 switch (dep->regindex)
7887 specs[count++] = tmpl;
7892 specs[count++] = tmpl;
7896 /* Only certain CR accesses use PSR.ic */
7897 if (idesc->operands[0] == IA64_OPND_CR3
7898 || idesc->operands[1] == IA64_OPND_CR3)
7901 ((idesc->operands[0] == IA64_OPND_CR3)
7904 CURR_SLOT.opnd[index].X_add_number - REG_CR;
7919 specs[count++] = tmpl;
7928 specs[count++] = tmpl;
7932 /* Only some AR accesses use cpl */
7933 if (idesc->operands[0] == IA64_OPND_AR3
7934 || idesc->operands[1] == IA64_OPND_AR3)
7937 ((idesc->operands[0] == IA64_OPND_AR3)
7940 CURR_SLOT.opnd[index].X_add_number - REG_AR;
7947 && regno <= AR_K7))))
7949 specs[count++] = tmpl;
7954 specs[count++] = tmpl;
7964 if (idesc->operands[0] == IA64_OPND_IMMU24)
7966 mask = CURR_SLOT.opnd[0].X_add_number;
7972 if (mask & ((valueT) 1 << dep->regindex))
7974 specs[count++] = tmpl;
7979 int min = dep->regindex == PSR_DFL ? 2 : 32;
7980 int max = dep->regindex == PSR_DFL ? 31 : 127;
7981 /* dfh is read on FR32-127; dfl is read on FR2-31 */
7982 for (i = 0; i < NELEMS (idesc->operands); i++)
7984 if (idesc->operands[i] == IA64_OPND_F1
7985 || idesc->operands[i] == IA64_OPND_F2
7986 || idesc->operands[i] == IA64_OPND_F3
7987 || idesc->operands[i] == IA64_OPND_F4)
7989 int reg = CURR_SLOT.opnd[i].X_add_number - REG_FR;
7990 if (reg >= min && reg <= max)
7992 specs[count++] = tmpl;
7999 int min = dep->regindex == PSR_MFL ? 2 : 32;
8000 int max = dep->regindex == PSR_MFL ? 31 : 127;
8001 /* mfh is read on writes to FR32-127; mfl is read on writes to
8003 for (i = 0; i < idesc->num_outputs; i++)
8005 if (idesc->operands[i] == IA64_OPND_F1)
8007 int reg = CURR_SLOT.opnd[i].X_add_number - REG_FR;
8008 if (reg >= min && reg <= max)
8010 specs[count++] = tmpl;
8015 else if (note == 10)
8017 for (i = 0; i < NELEMS (idesc->operands); i++)
8019 if (idesc->operands[i] == IA64_OPND_R1
8020 || idesc->operands[i] == IA64_OPND_R2
8021 || idesc->operands[i] == IA64_OPND_R3)
8023 int regno = CURR_SLOT.opnd[i].X_add_number - REG_GR;
8024 if (regno >= 16 && regno <= 31)
8026 specs[count++] = tmpl;
8037 case IA64_RS_AR_FPSR:
8038 if (idesc->operands[!rsrc_write] == IA64_OPND_AR3)
8040 int regno = CURR_SLOT.opnd[!rsrc_write].X_add_number - REG_AR;
8041 if (regno == AR_FPSR)
8043 specs[count++] = tmpl;
8048 specs[count++] = tmpl;
8053 /* Handle all AR[REG] resources */
8054 if (note == 0 || note == 1)
8056 int regno = CURR_SLOT.opnd[!rsrc_write].X_add_number - REG_AR;
8057 if (idesc->operands[!rsrc_write] == IA64_OPND_AR3
8058 && regno == dep->regindex)
8060 specs[count++] = tmpl;
8062 /* other AR[REG] resources may be affected by AR accesses */
8063 else if (idesc->operands[0] == IA64_OPND_AR3)
8066 regno = CURR_SLOT.opnd[0].X_add_number - REG_AR;
8067 switch (dep->regindex)
8073 if (regno == AR_BSPSTORE)
8075 specs[count++] = tmpl;
8079 (regno == AR_BSPSTORE
8080 || regno == AR_RNAT))
8082 specs[count++] = tmpl;
8087 else if (idesc->operands[1] == IA64_OPND_AR3)
8090 regno = CURR_SLOT.opnd[1].X_add_number - REG_AR;
8091 switch (dep->regindex)
8096 if (regno == AR_BSPSTORE || regno == AR_RNAT)
8098 specs[count++] = tmpl;
8105 specs[count++] = tmpl;
8115 /* Handle all CR[REG] resources */
8116 if (note == 0 || note == 1)
8118 if (idesc->operands[!rsrc_write] == IA64_OPND_CR3)
8120 int regno = CURR_SLOT.opnd[!rsrc_write].X_add_number - REG_CR;
8121 if (regno == dep->regindex)
8123 specs[count++] = tmpl;
8125 else if (!rsrc_write)
8127 /* Reads from CR[IVR] affect other resources. */
8128 if (regno == CR_IVR)
8130 if ((dep->regindex >= CR_IRR0
8131 && dep->regindex <= CR_IRR3)
8132 || dep->regindex == CR_TPR)
8134 specs[count++] = tmpl;
8141 specs[count++] = tmpl;
8150 case IA64_RS_INSERVICE:
8151 /* look for write of EOI (67) or read of IVR (65) */
8152 if ((idesc->operands[0] == IA64_OPND_CR3
8153 && CURR_SLOT.opnd[0].X_add_number - REG_CR == CR_EOI)
8154 || (idesc->operands[1] == IA64_OPND_CR3
8155 && CURR_SLOT.opnd[1].X_add_number - REG_CR == CR_IVR))
8157 specs[count++] = tmpl;
8164 specs[count++] = tmpl;
8175 specs[count++] = tmpl;
8179 /* Check if any of the registers accessed are in the rotating region.
8180 mov to/from pr accesses CFM only when qp_regno is in the rotating
8182 for (i = 0; i < NELEMS (idesc->operands); i++)
8184 if (idesc->operands[i] == IA64_OPND_R1
8185 || idesc->operands[i] == IA64_OPND_R2
8186 || idesc->operands[i] == IA64_OPND_R3)
8188 int num = CURR_SLOT.opnd[i].X_add_number - REG_GR;
8189 /* Assumes that md.rot.num_regs is always valid */
8190 if (md.rot.num_regs > 0
8192 && num < 31 + md.rot.num_regs)
8194 specs[count] = tmpl;
8195 specs[count++].specific = 0;
8198 else if (idesc->operands[i] == IA64_OPND_F1
8199 || idesc->operands[i] == IA64_OPND_F2
8200 || idesc->operands[i] == IA64_OPND_F3
8201 || idesc->operands[i] == IA64_OPND_F4)
8203 int num = CURR_SLOT.opnd[i].X_add_number - REG_FR;
8206 specs[count] = tmpl;
8207 specs[count++].specific = 0;
8210 else if (idesc->operands[i] == IA64_OPND_P1
8211 || idesc->operands[i] == IA64_OPND_P2)
8213 int num = CURR_SLOT.opnd[i].X_add_number - REG_P;
8216 specs[count] = tmpl;
8217 specs[count++].specific = 0;
8221 if (CURR_SLOT.qp_regno > 15)
8223 specs[count] = tmpl;
8224 specs[count++].specific = 0;
8229 /* This is the same as IA64_RS_PRr, except simplified to account for
8230 the fact that there is only one register. */
8234 specs[count++] = tmpl;
8239 if (idesc->operands[2] == IA64_OPND_IMM17)
8240 mask = CURR_SLOT.opnd[2].X_add_number;
8241 if (mask & ((valueT) 1 << 63))
8242 specs[count++] = tmpl;
8244 else if (note == 11)
8246 if ((idesc->operands[0] == IA64_OPND_P1
8247 && CURR_SLOT.opnd[0].X_add_number - REG_P == 63)
8248 || (idesc->operands[1] == IA64_OPND_P2
8249 && CURR_SLOT.opnd[1].X_add_number - REG_P == 63))
8251 specs[count++] = tmpl;
8254 else if (note == 12)
8256 if (CURR_SLOT.qp_regno == 63)
8258 specs[count++] = tmpl;
8265 int p1 = CURR_SLOT.opnd[0].X_add_number - REG_P;
8266 int p2 = CURR_SLOT.opnd[1].X_add_number - REG_P;
8267 int or_andcm = strstr(idesc->name, "or.andcm") != NULL;
8268 int and_orcm = strstr(idesc->name, "and.orcm") != NULL;
8271 && (idesc->operands[0] == IA64_OPND_P1
8272 || idesc->operands[0] == IA64_OPND_P2))
8274 specs[count] = tmpl;
8275 specs[count++].cmp_type =
8276 (or_andcm ? CMP_OR : (and_orcm ? CMP_AND : CMP_NONE));
8279 && (idesc->operands[1] == IA64_OPND_P1
8280 || idesc->operands[1] == IA64_OPND_P2))
8282 specs[count] = tmpl;
8283 specs[count++].cmp_type =
8284 (or_andcm ? CMP_AND : (and_orcm ? CMP_OR : CMP_NONE));
8289 if (CURR_SLOT.qp_regno == 63)
8291 specs[count++] = tmpl;
8302 /* FIXME we can identify some individual RSE written resources, but RSE
8303 read resources have not yet been completely identified, so for now
8304 treat RSE as a single resource */
8305 if (strncmp (idesc->name, "mov", 3) == 0)
8309 if (idesc->operands[0] == IA64_OPND_AR3
8310 && CURR_SLOT.opnd[0].X_add_number - REG_AR == AR_BSPSTORE)
8312 specs[count] = tmpl;
8313 specs[count++].index = 0; /* IA64_RSE_BSPLOAD/RNATBITINDEX */
8318 if (idesc->operands[0] == IA64_OPND_AR3)
8320 if (CURR_SLOT.opnd[0].X_add_number - REG_AR == AR_BSPSTORE
8321 || CURR_SLOT.opnd[0].X_add_number - REG_AR == AR_RNAT)
8323 specs[count++] = tmpl;
8326 else if (idesc->operands[1] == IA64_OPND_AR3)
8328 if (CURR_SLOT.opnd[1].X_add_number - REG_AR == AR_BSP
8329 || CURR_SLOT.opnd[1].X_add_number - REG_AR == AR_BSPSTORE
8330 || CURR_SLOT.opnd[1].X_add_number - REG_AR == AR_RNAT)
8332 specs[count++] = tmpl;
8339 specs[count++] = tmpl;
8344 /* FIXME -- do any of these need to be non-specific? */
8345 specs[count++] = tmpl;
8349 as_bad (_("Unrecognized dependency specifier %d\n"), dep->specifier);
8356 /* Clear branch flags on marked resources. This breaks the link between the
8357 QP of the marking instruction and a subsequent branch on the same QP. */
8360 clear_qp_branch_flag (mask)
8364 for (i = 0; i < regdepslen; i++)
8366 valueT bit = ((valueT) 1 << regdeps[i].qp_regno);
8367 if ((bit & mask) != 0)
8369 regdeps[i].link_to_qp_branch = 0;
8374 /* Remove any mutexes which contain any of the PRs indicated in the mask.
8376 Any changes to a PR clears the mutex relations which include that PR. */
8379 clear_qp_mutex (mask)
8385 while (i < qp_mutexeslen)
8387 if ((qp_mutexes[i].prmask & mask) != 0)
8391 fprintf (stderr, " Clearing mutex relation");
8392 print_prmask (qp_mutexes[i].prmask);
8393 fprintf (stderr, "\n");
8395 qp_mutexes[i] = qp_mutexes[--qp_mutexeslen];
8402 /* Clear implies relations which contain PRs in the given masks.
8403 P1_MASK indicates the source of the implies relation, while P2_MASK
8404 indicates the implied PR. */
8407 clear_qp_implies (p1_mask, p2_mask)
8414 while (i < qp_implieslen)
8416 if ((((valueT) 1 << qp_implies[i].p1) & p1_mask) != 0
8417 || (((valueT) 1 << qp_implies[i].p2) & p2_mask) != 0)
8420 fprintf (stderr, "Clearing implied relation PR%d->PR%d\n",
8421 qp_implies[i].p1, qp_implies[i].p2);
8422 qp_implies[i] = qp_implies[--qp_implieslen];
8429 /* Add the PRs specified to the list of implied relations. */
8432 add_qp_imply (p1, p2)
8439 /* p0 is not meaningful here. */
8440 if (p1 == 0 || p2 == 0)
8446 /* If it exists already, ignore it. */
8447 for (i = 0; i < qp_implieslen; i++)
8449 if (qp_implies[i].p1 == p1
8450 && qp_implies[i].p2 == p2
8451 && qp_implies[i].path == md.path
8452 && !qp_implies[i].p2_branched)
8456 if (qp_implieslen == qp_impliestotlen)
8458 qp_impliestotlen += 20;
8459 qp_implies = (struct qp_imply *)
8460 xrealloc ((void *) qp_implies,
8461 qp_impliestotlen * sizeof (struct qp_imply));
8464 fprintf (stderr, " Registering PR%d implies PR%d\n", p1, p2);
8465 qp_implies[qp_implieslen].p1 = p1;
8466 qp_implies[qp_implieslen].p2 = p2;
8467 qp_implies[qp_implieslen].path = md.path;
8468 qp_implies[qp_implieslen++].p2_branched = 0;
8470 /* Add in the implied transitive relations; for everything that p2 implies,
8471 make p1 imply that, too; for everything that implies p1, make it imply p2
8473 for (i = 0; i < qp_implieslen; i++)
8475 if (qp_implies[i].p1 == p2)
8476 add_qp_imply (p1, qp_implies[i].p2);
8477 if (qp_implies[i].p2 == p1)
8478 add_qp_imply (qp_implies[i].p1, p2);
8480 /* Add in mutex relations implied by this implies relation; for each mutex
8481 relation containing p2, duplicate it and replace p2 with p1. */
8482 bit = (valueT) 1 << p1;
8483 mask = (valueT) 1 << p2;
8484 for (i = 0; i < qp_mutexeslen; i++)
8486 if (qp_mutexes[i].prmask & mask)
8487 add_qp_mutex ((qp_mutexes[i].prmask & ~mask) | bit);
8491 /* Add the PRs specified in the mask to the mutex list; this means that only
8492 one of the PRs can be true at any time. PR0 should never be included in
8502 if (qp_mutexeslen == qp_mutexestotlen)
8504 qp_mutexestotlen += 20;
8505 qp_mutexes = (struct qpmutex *)
8506 xrealloc ((void *) qp_mutexes,
8507 qp_mutexestotlen * sizeof (struct qpmutex));
8511 fprintf (stderr, " Registering mutex on");
8512 print_prmask (mask);
8513 fprintf (stderr, "\n");
8515 qp_mutexes[qp_mutexeslen].path = md.path;
8516 qp_mutexes[qp_mutexeslen++].prmask = mask;
8520 clear_register_values ()
8524 fprintf (stderr, " Clearing register values\n");
8525 for (i = 1; i < NELEMS (gr_values); i++)
8526 gr_values[i].known = 0;
8529 /* Keep track of register values/changes which affect DV tracking.
8531 optimization note: should add a flag to classes of insns where otherwise we
8532 have to examine a group of strings to identify them. */
8535 note_register_values (idesc)
8536 struct ia64_opcode *idesc;
8538 valueT qp_changemask = 0;
8541 /* Invalidate values for registers being written to. */
8542 for (i = 0; i < idesc->num_outputs; i++)
8544 if (idesc->operands[i] == IA64_OPND_R1
8545 || idesc->operands[i] == IA64_OPND_R2
8546 || idesc->operands[i] == IA64_OPND_R3)
8548 int regno = CURR_SLOT.opnd[i].X_add_number - REG_GR;
8549 if (regno > 0 && regno < NELEMS (gr_values))
8550 gr_values[regno].known = 0;
8552 else if (idesc->operands[i] == IA64_OPND_R3_2)
8554 int regno = CURR_SLOT.opnd[i].X_add_number - REG_GR;
8555 if (regno > 0 && regno < 4)
8556 gr_values[regno].known = 0;
8558 else if (idesc->operands[i] == IA64_OPND_P1
8559 || idesc->operands[i] == IA64_OPND_P2)
8561 int regno = CURR_SLOT.opnd[i].X_add_number - REG_P;
8562 qp_changemask |= (valueT) 1 << regno;
8564 else if (idesc->operands[i] == IA64_OPND_PR)
8566 if (idesc->operands[2] & (valueT) 0x10000)
8567 qp_changemask = ~(valueT) 0x1FFFF | idesc->operands[2];
8569 qp_changemask = idesc->operands[2];
8572 else if (idesc->operands[i] == IA64_OPND_PR_ROT)
8574 if (idesc->operands[1] & ((valueT) 1 << 43))
8575 qp_changemask = ~(valueT) 0xFFFFFFFFFFF | idesc->operands[1];
8577 qp_changemask = idesc->operands[1];
8578 qp_changemask &= ~(valueT) 0xFFFF;
8583 /* Always clear qp branch flags on any PR change. */
8584 /* FIXME there may be exceptions for certain compares. */
8585 clear_qp_branch_flag (qp_changemask);
8587 /* Invalidate rotating registers on insns which affect RRBs in CFM. */
8588 if (idesc->flags & IA64_OPCODE_MOD_RRBS)
8590 qp_changemask |= ~(valueT) 0xFFFF;
8591 if (strcmp (idesc->name, "clrrrb.pr") != 0)
8593 for (i = 32; i < 32 + md.rot.num_regs; i++)
8594 gr_values[i].known = 0;
8596 clear_qp_mutex (qp_changemask);
8597 clear_qp_implies (qp_changemask, qp_changemask);
8599 /* After a call, all register values are undefined, except those marked
8601 else if (strncmp (idesc->name, "br.call", 6) == 0
8602 || strncmp (idesc->name, "brl.call", 7) == 0)
8604 /* FIXME keep GR values which are marked as "safe_across_calls" */
8605 clear_register_values ();
8606 clear_qp_mutex (~qp_safe_across_calls);
8607 clear_qp_implies (~qp_safe_across_calls, ~qp_safe_across_calls);
8608 clear_qp_branch_flag (~qp_safe_across_calls);
8610 else if (is_interruption_or_rfi (idesc)
8611 || is_taken_branch (idesc))
8613 clear_register_values ();
8614 clear_qp_mutex (~(valueT) 0);
8615 clear_qp_implies (~(valueT) 0, ~(valueT) 0);
8617 /* Look for mutex and implies relations. */
8618 else if ((idesc->operands[0] == IA64_OPND_P1
8619 || idesc->operands[0] == IA64_OPND_P2)
8620 && (idesc->operands[1] == IA64_OPND_P1
8621 || idesc->operands[1] == IA64_OPND_P2))
8623 int p1 = CURR_SLOT.opnd[0].X_add_number - REG_P;
8624 int p2 = CURR_SLOT.opnd[1].X_add_number - REG_P;
8625 valueT p1mask = (valueT) 1 << p1;
8626 valueT p2mask = (valueT) 1 << p2;
8628 /* If one of the PRs is PR0, we can't really do anything. */
8629 if (p1 == 0 || p2 == 0)
8632 fprintf (stderr, " Ignoring PRs due to inclusion of p0\n");
8634 /* In general, clear mutexes and implies which include P1 or P2,
8635 with the following exceptions. */
8636 else if (strstr (idesc->name, ".or.andcm") != NULL)
8638 add_qp_mutex (p1mask | p2mask);
8639 clear_qp_implies (p2mask, p1mask);
8641 else if (strstr (idesc->name, ".and.orcm") != NULL)
8643 add_qp_mutex (p1mask | p2mask);
8644 clear_qp_implies (p1mask, p2mask);
8646 else if (strstr (idesc->name, ".and") != NULL)
8648 clear_qp_implies (0, p1mask | p2mask);
8650 else if (strstr (idesc->name, ".or") != NULL)
8652 clear_qp_mutex (p1mask | p2mask);
8653 clear_qp_implies (p1mask | p2mask, 0);
8657 clear_qp_implies (p1mask | p2mask, p1mask | p2mask);
8658 if (strstr (idesc->name, ".unc") != NULL)
8660 add_qp_mutex (p1mask | p2mask);
8661 if (CURR_SLOT.qp_regno != 0)
8663 add_qp_imply (CURR_SLOT.opnd[0].X_add_number - REG_P,
8664 CURR_SLOT.qp_regno);
8665 add_qp_imply (CURR_SLOT.opnd[1].X_add_number - REG_P,
8666 CURR_SLOT.qp_regno);
8669 else if (CURR_SLOT.qp_regno == 0)
8671 add_qp_mutex (p1mask | p2mask);
8675 clear_qp_mutex (p1mask | p2mask);
8679 /* Look for mov imm insns into GRs. */
8680 else if (idesc->operands[0] == IA64_OPND_R1
8681 && (idesc->operands[1] == IA64_OPND_IMM22
8682 || idesc->operands[1] == IA64_OPND_IMMU64)
8683 && (strcmp (idesc->name, "mov") == 0
8684 || strcmp (idesc->name, "movl") == 0))
8686 int regno = CURR_SLOT.opnd[0].X_add_number - REG_GR;
8687 if (regno > 0 && regno < NELEMS (gr_values))
8689 gr_values[regno].known = 1;
8690 gr_values[regno].value = CURR_SLOT.opnd[1].X_add_number;
8691 gr_values[regno].path = md.path;
8694 fprintf (stderr, " Know gr%d = ", regno);
8695 fprintf_vma (stderr, gr_values[regno].value);
8696 fputs ("\n", stderr);
8702 clear_qp_mutex (qp_changemask);
8703 clear_qp_implies (qp_changemask, qp_changemask);
8707 /* Return whether the given predicate registers are currently mutex. */
8710 qp_mutex (p1, p2, path)
8720 mask = ((valueT) 1 << p1) | (valueT) 1 << p2;
8721 for (i = 0; i < qp_mutexeslen; i++)
8723 if (qp_mutexes[i].path >= path
8724 && (qp_mutexes[i].prmask & mask) == mask)
8731 /* Return whether the given resource is in the given insn's list of chks
8732 Return 1 if the conflict is absolutely determined, 2 if it's a potential
8736 resources_match (rs, idesc, note, qp_regno, path)
8738 struct ia64_opcode *idesc;
8743 struct rsrc specs[MAX_SPECS];
8746 /* If the marked resource's qp_regno and the given qp_regno are mutex,
8747 we don't need to check. One exception is note 11, which indicates that
8748 target predicates are written regardless of PR[qp]. */
8749 if (qp_mutex (rs->qp_regno, qp_regno, path)
8753 count = specify_resource (rs->dependency, idesc, DV_CHK, specs, note, path);
8756 /* UNAT checking is a bit more specific than other resources */
8757 if (rs->dependency->specifier == IA64_RS_AR_UNAT
8758 && specs[count].mem_offset.hint
8759 && rs->mem_offset.hint)
8761 if (rs->mem_offset.base == specs[count].mem_offset.base)
8763 if (((rs->mem_offset.offset >> 3) & 0x3F) ==
8764 ((specs[count].mem_offset.offset >> 3) & 0x3F))
8771 /* Skip apparent PR write conflicts where both writes are an AND or both
8772 writes are an OR. */
8773 if (rs->dependency->specifier == IA64_RS_PR
8774 || rs->dependency->specifier == IA64_RS_PRr
8775 || rs->dependency->specifier == IA64_RS_PR63)
8777 if (specs[count].cmp_type != CMP_NONE
8778 && specs[count].cmp_type == rs->cmp_type)
8781 fprintf (stderr, " %s on parallel compare allowed (PR%d)\n",
8782 dv_mode[rs->dependency->mode],
8783 rs->dependency->specifier != IA64_RS_PR63 ?
8784 specs[count].index : 63);
8789 " %s on parallel compare conflict %s vs %s on PR%d\n",
8790 dv_mode[rs->dependency->mode],
8791 dv_cmp_type[rs->cmp_type],
8792 dv_cmp_type[specs[count].cmp_type],
8793 rs->dependency->specifier != IA64_RS_PR63 ?
8794 specs[count].index : 63);
8798 /* If either resource is not specific, conservatively assume a conflict
8800 if (!specs[count].specific || !rs->specific)
8802 else if (specs[count].index == rs->index)
8807 fprintf (stderr, " No %s conflicts\n", rs->dependency->name);
8813 /* Indicate an instruction group break; if INSERT_STOP is non-zero, then
8814 insert a stop to create the break. Update all resource dependencies
8815 appropriately. If QP_REGNO is non-zero, only apply the break to resources
8816 which use the same QP_REGNO and have the link_to_qp_branch flag set.
8817 If SAVE_CURRENT is non-zero, don't affect resources marked by the current
8821 insn_group_break (insert_stop, qp_regno, save_current)
8828 if (insert_stop && md.num_slots_in_use > 0)
8829 PREV_SLOT.end_of_insn_group = 1;
8833 fprintf (stderr, " Insn group break%s",
8834 (insert_stop ? " (w/stop)" : ""));
8836 fprintf (stderr, " effective for QP=%d", qp_regno);
8837 fprintf (stderr, "\n");
8841 while (i < regdepslen)
8843 const struct ia64_dependency *dep = regdeps[i].dependency;
8846 && regdeps[i].qp_regno != qp_regno)
8853 && CURR_SLOT.src_file == regdeps[i].file
8854 && CURR_SLOT.src_line == regdeps[i].line)
8860 /* clear dependencies which are automatically cleared by a stop, or
8861 those that have reached the appropriate state of insn serialization */
8862 if (dep->semantics == IA64_DVS_IMPLIED
8863 || dep->semantics == IA64_DVS_IMPLIEDF
8864 || regdeps[i].insn_srlz == STATE_SRLZ)
8866 print_dependency ("Removing", i);
8867 regdeps[i] = regdeps[--regdepslen];
8871 if (dep->semantics == IA64_DVS_DATA
8872 || dep->semantics == IA64_DVS_INSTR
8873 || dep->semantics == IA64_DVS_SPECIFIC)
8875 if (regdeps[i].insn_srlz == STATE_NONE)
8876 regdeps[i].insn_srlz = STATE_STOP;
8877 if (regdeps[i].data_srlz == STATE_NONE)
8878 regdeps[i].data_srlz = STATE_STOP;
8885 /* Add the given resource usage spec to the list of active dependencies. */
8888 mark_resource (idesc, dep, spec, depind, path)
8889 struct ia64_opcode *idesc ATTRIBUTE_UNUSED;
8890 const struct ia64_dependency *dep ATTRIBUTE_UNUSED;
8895 if (regdepslen == regdepstotlen)
8897 regdepstotlen += 20;
8898 regdeps = (struct rsrc *)
8899 xrealloc ((void *) regdeps,
8900 regdepstotlen * sizeof (struct rsrc));
8903 regdeps[regdepslen] = *spec;
8904 regdeps[regdepslen].depind = depind;
8905 regdeps[regdepslen].path = path;
8906 regdeps[regdepslen].file = CURR_SLOT.src_file;
8907 regdeps[regdepslen].line = CURR_SLOT.src_line;
8909 print_dependency ("Adding", regdepslen);
8915 print_dependency (action, depind)
8921 fprintf (stderr, " %s %s '%s'",
8922 action, dv_mode[(regdeps[depind].dependency)->mode],
8923 (regdeps[depind].dependency)->name);
8924 if (regdeps[depind].specific && regdeps[depind].index != 0)
8925 fprintf (stderr, " (%d)", regdeps[depind].index);
8926 if (regdeps[depind].mem_offset.hint)
8928 fputs (" ", stderr);
8929 fprintf_vma (stderr, regdeps[depind].mem_offset.base);
8930 fputs ("+", stderr);
8931 fprintf_vma (stderr, regdeps[depind].mem_offset.offset);
8933 fprintf (stderr, "\n");
8938 instruction_serialization ()
8942 fprintf (stderr, " Instruction serialization\n");
8943 for (i = 0; i < regdepslen; i++)
8944 if (regdeps[i].insn_srlz == STATE_STOP)
8945 regdeps[i].insn_srlz = STATE_SRLZ;
8949 data_serialization ()
8953 fprintf (stderr, " Data serialization\n");
8954 while (i < regdepslen)
8956 if (regdeps[i].data_srlz == STATE_STOP
8957 /* Note: as of 991210, all "other" dependencies are cleared by a
8958 data serialization. This might change with new tables */
8959 || (regdeps[i].dependency)->semantics == IA64_DVS_OTHER)
8961 print_dependency ("Removing", i);
8962 regdeps[i] = regdeps[--regdepslen];
8969 /* Insert stops and serializations as needed to avoid DVs. */
8972 remove_marked_resource (rs)
8975 switch (rs->dependency->semantics)
8977 case IA64_DVS_SPECIFIC:
8979 fprintf (stderr, "Implementation-specific, assume worst case...\n");
8980 /* ...fall through... */
8981 case IA64_DVS_INSTR:
8983 fprintf (stderr, "Inserting instr serialization\n");
8984 if (rs->insn_srlz < STATE_STOP)
8985 insn_group_break (1, 0, 0);
8986 if (rs->insn_srlz < STATE_SRLZ)
8988 int oldqp = CURR_SLOT.qp_regno;
8989 struct ia64_opcode *oldidesc = CURR_SLOT.idesc;
8990 /* Manually jam a srlz.i insn into the stream */
8991 CURR_SLOT.qp_regno = 0;
8992 CURR_SLOT.idesc = ia64_find_opcode ("srlz.i");
8993 instruction_serialization ();
8994 md.curr_slot = (md.curr_slot + 1) % NUM_SLOTS;
8995 if (++md.num_slots_in_use >= NUM_SLOTS)
8997 CURR_SLOT.qp_regno = oldqp;
8998 CURR_SLOT.idesc = oldidesc;
9000 insn_group_break (1, 0, 0);
9002 case IA64_DVS_OTHER: /* as of rev2 (991220) of the DV tables, all
9003 "other" types of DV are eliminated
9004 by a data serialization */
9007 fprintf (stderr, "Inserting data serialization\n");
9008 if (rs->data_srlz < STATE_STOP)
9009 insn_group_break (1, 0, 0);
9011 int oldqp = CURR_SLOT.qp_regno;
9012 struct ia64_opcode *oldidesc = CURR_SLOT.idesc;
9013 /* Manually jam a srlz.d insn into the stream */
9014 CURR_SLOT.qp_regno = 0;
9015 CURR_SLOT.idesc = ia64_find_opcode ("srlz.d");
9016 data_serialization ();
9017 md.curr_slot = (md.curr_slot + 1) % NUM_SLOTS;
9018 if (++md.num_slots_in_use >= NUM_SLOTS)
9020 CURR_SLOT.qp_regno = oldqp;
9021 CURR_SLOT.idesc = oldidesc;
9024 case IA64_DVS_IMPLIED:
9025 case IA64_DVS_IMPLIEDF:
9027 fprintf (stderr, "Inserting stop\n");
9028 insn_group_break (1, 0, 0);
9035 /* Check the resources used by the given opcode against the current dependency
9038 The check is run once for each execution path encountered. In this case,
9039 a unique execution path is the sequence of instructions following a code
9040 entry point, e.g. the following has three execution paths, one starting
9041 at L0, one at L1, and one at L2.
9050 check_dependencies (idesc)
9051 struct ia64_opcode *idesc;
9053 const struct ia64_opcode_dependency *opdeps = idesc->dependencies;
9057 /* Note that the number of marked resources may change within the
9058 loop if in auto mode. */
9060 while (i < regdepslen)
9062 struct rsrc *rs = ®deps[i];
9063 const struct ia64_dependency *dep = rs->dependency;
9068 if (dep->semantics == IA64_DVS_NONE
9069 || (chkind = depends_on (rs->depind, idesc)) == -1)
9075 note = NOTE (opdeps->chks[chkind]);
9077 /* Check this resource against each execution path seen thus far. */
9078 for (path = 0; path <= md.path; path++)
9082 /* If the dependency wasn't on the path being checked, ignore it. */
9083 if (rs->path < path)
9086 /* If the QP for this insn implies a QP which has branched, don't
9087 bother checking. Ed. NOTE: I don't think this check is terribly
9088 useful; what's the point of generating code which will only be
9089 reached if its QP is zero?
9090 This code was specifically inserted to handle the following code,
9091 based on notes from Intel's DV checking code, where p1 implies p2.
9097 if (CURR_SLOT.qp_regno != 0)
9101 for (implies = 0; implies < qp_implieslen; implies++)
9103 if (qp_implies[implies].path >= path
9104 && qp_implies[implies].p1 == CURR_SLOT.qp_regno
9105 && qp_implies[implies].p2_branched)
9115 if ((matchtype = resources_match (rs, idesc, note,
9116 CURR_SLOT.qp_regno, path)) != 0)
9119 char pathmsg[256] = "";
9120 char indexmsg[256] = "";
9121 int certain = (matchtype == 1 && CURR_SLOT.qp_regno == 0);
9124 sprintf (pathmsg, " when entry is at label '%s'",
9125 md.entry_labels[path - 1]);
9126 if (rs->specific && rs->index != 0)
9127 sprintf (indexmsg, ", specific resource number is %d",
9129 sprintf (msg, "Use of '%s' %s %s dependency '%s' (%s)%s%s",
9131 (certain ? "violates" : "may violate"),
9132 dv_mode[dep->mode], dep->name,
9133 dv_sem[dep->semantics],
9136 if (md.explicit_mode)
9138 as_warn ("%s", msg);
9140 as_warn (_("Only the first path encountering the conflict "
9142 as_warn_where (rs->file, rs->line,
9143 _("This is the location of the "
9144 "conflicting usage"));
9145 /* Don't bother checking other paths, to avoid duplicating
9152 fprintf (stderr, "%s @ %s:%d\n", msg, rs->file, rs->line);
9154 remove_marked_resource (rs);
9156 /* since the set of dependencies has changed, start over */
9157 /* FIXME -- since we're removing dvs as we go, we
9158 probably don't really need to start over... */
9171 /* Register new dependencies based on the given opcode. */
9174 mark_resources (idesc)
9175 struct ia64_opcode *idesc;
9178 const struct ia64_opcode_dependency *opdeps = idesc->dependencies;
9179 int add_only_qp_reads = 0;
9181 /* A conditional branch only uses its resources if it is taken; if it is
9182 taken, we stop following that path. The other branch types effectively
9183 *always* write their resources. If it's not taken, register only QP
9185 if (is_conditional_branch (idesc) || is_interruption_or_rfi (idesc))
9187 add_only_qp_reads = 1;
9191 fprintf (stderr, "Registering '%s' resource usage\n", idesc->name);
9193 for (i = 0; i < opdeps->nregs; i++)
9195 const struct ia64_dependency *dep;
9196 struct rsrc specs[MAX_SPECS];
9201 dep = ia64_find_dependency (opdeps->regs[i]);
9202 note = NOTE (opdeps->regs[i]);
9204 if (add_only_qp_reads
9205 && !(dep->mode == IA64_DV_WAR
9206 && (dep->specifier == IA64_RS_PR
9207 || dep->specifier == IA64_RS_PRr
9208 || dep->specifier == IA64_RS_PR63)))
9211 count = specify_resource (dep, idesc, DV_REG, specs, note, md.path);
9214 if (md.debug_dv && !count)
9215 fprintf (stderr, " No %s %s usage found (path %d)\n",
9216 dv_mode[dep->mode], dep->name, md.path);
9221 mark_resource (idesc, dep, &specs[count],
9222 DEP (opdeps->regs[i]), md.path);
9225 /* The execution path may affect register values, which may in turn
9226 affect which indirect-access resources are accessed. */
9227 switch (dep->specifier)
9239 for (path = 0; path < md.path; path++)
9241 count = specify_resource (dep, idesc, DV_REG, specs, note, path);
9243 mark_resource (idesc, dep, &specs[count],
9244 DEP (opdeps->regs[i]), path);
9251 /* Remove dependencies when they no longer apply. */
9254 update_dependencies (idesc)
9255 struct ia64_opcode *idesc;
9259 if (strcmp (idesc->name, "srlz.i") == 0)
9261 instruction_serialization ();
9263 else if (strcmp (idesc->name, "srlz.d") == 0)
9265 data_serialization ();
9267 else if (is_interruption_or_rfi (idesc)
9268 || is_taken_branch (idesc))
9270 /* Although technically the taken branch doesn't clear dependencies
9271 which require a srlz.[id], we don't follow the branch; the next
9272 instruction is assumed to start with a clean slate. */
9276 else if (is_conditional_branch (idesc)
9277 && CURR_SLOT.qp_regno != 0)
9279 int is_call = strstr (idesc->name, ".call") != NULL;
9281 for (i = 0; i < qp_implieslen; i++)
9283 /* If the conditional branch's predicate is implied by the predicate
9284 in an existing dependency, remove that dependency. */
9285 if (qp_implies[i].p2 == CURR_SLOT.qp_regno)
9288 /* Note that this implied predicate takes a branch so that if
9289 a later insn generates a DV but its predicate implies this
9290 one, we can avoid the false DV warning. */
9291 qp_implies[i].p2_branched = 1;
9292 while (depind < regdepslen)
9294 if (regdeps[depind].qp_regno == qp_implies[i].p1)
9296 print_dependency ("Removing", depind);
9297 regdeps[depind] = regdeps[--regdepslen];
9304 /* Any marked resources which have this same predicate should be
9305 cleared, provided that the QP hasn't been modified between the
9306 marking instruction and the branch. */
9309 insn_group_break (0, CURR_SLOT.qp_regno, 1);
9314 while (i < regdepslen)
9316 if (regdeps[i].qp_regno == CURR_SLOT.qp_regno
9317 && regdeps[i].link_to_qp_branch
9318 && (regdeps[i].file != CURR_SLOT.src_file
9319 || regdeps[i].line != CURR_SLOT.src_line))
9321 /* Treat like a taken branch */
9322 print_dependency ("Removing", i);
9323 regdeps[i] = regdeps[--regdepslen];
9332 /* Examine the current instruction for dependency violations. */
9336 struct ia64_opcode *idesc;
9340 fprintf (stderr, "Checking %s for violations (line %d, %d/%d)\n",
9341 idesc->name, CURR_SLOT.src_line,
9342 idesc->dependencies->nchks,
9343 idesc->dependencies->nregs);
9346 /* Look through the list of currently marked resources; if the current
9347 instruction has the dependency in its chks list which uses that resource,
9348 check against the specific resources used. */
9349 check_dependencies (idesc);
9351 /* Look up the instruction's regdeps (RAW writes, WAW writes, and WAR reads),
9352 then add them to the list of marked resources. */
9353 mark_resources (idesc);
9355 /* There are several types of dependency semantics, and each has its own
9356 requirements for being cleared
9358 Instruction serialization (insns separated by interruption, rfi, or
9359 writer + srlz.i + reader, all in separate groups) clears DVS_INSTR.
9361 Data serialization (instruction serialization, or writer + srlz.d +
9362 reader, where writer and srlz.d are in separate groups) clears
9363 DVS_DATA. (This also clears DVS_OTHER, but that is not guaranteed to
9364 always be the case).
9366 Instruction group break (groups separated by stop, taken branch,
9367 interruption or rfi) clears DVS_IMPLIED and DVS_IMPLIEDF.
9369 update_dependencies (idesc);
9371 /* Sometimes, knowing a register value allows us to avoid giving a false DV
9372 warning. Keep track of as many as possible that are useful. */
9373 note_register_values (idesc);
9375 /* We don't need or want this anymore. */
9376 md.mem_offset.hint = 0;
9381 /* Translate one line of assembly. Pseudo ops and labels do not show
9387 char *saved_input_line_pointer, *mnemonic;
9388 const struct pseudo_opcode *pdesc;
9389 struct ia64_opcode *idesc;
9390 unsigned char qp_regno;
9394 saved_input_line_pointer = input_line_pointer;
9395 input_line_pointer = str;
9397 /* extract the opcode (mnemonic): */
9399 mnemonic = input_line_pointer;
9400 ch = get_symbol_end ();
9401 pdesc = (struct pseudo_opcode *) hash_find (md.pseudo_hash, mnemonic);
9404 *input_line_pointer = ch;
9405 (*pdesc->handler) (pdesc->arg);
9409 /* Find the instruction descriptor matching the arguments. */
9411 idesc = ia64_find_opcode (mnemonic);
9412 *input_line_pointer = ch;
9415 as_bad ("Unknown opcode `%s'", mnemonic);
9419 idesc = parse_operands (idesc);
9423 /* Handle the dynamic ops we can handle now: */
9424 if (idesc->type == IA64_TYPE_DYN)
9426 if (strcmp (idesc->name, "add") == 0)
9428 if (CURR_SLOT.opnd[2].X_op == O_register
9429 && CURR_SLOT.opnd[2].X_add_number < 4)
9433 ia64_free_opcode (idesc);
9434 idesc = ia64_find_opcode (mnemonic);
9436 know (!idesc->next);
9439 else if (strcmp (idesc->name, "mov") == 0)
9441 enum ia64_opnd opnd1, opnd2;
9444 opnd1 = idesc->operands[0];
9445 opnd2 = idesc->operands[1];
9446 if (opnd1 == IA64_OPND_AR3)
9448 else if (opnd2 == IA64_OPND_AR3)
9452 if (CURR_SLOT.opnd[rop].X_op == O_register
9453 && ar_is_in_integer_unit (CURR_SLOT.opnd[rop].X_add_number))
9457 ia64_free_opcode (idesc);
9458 idesc = ia64_find_opcode (mnemonic);
9459 while (idesc != NULL
9460 && (idesc->operands[0] != opnd1
9461 || idesc->operands[1] != opnd2))
9462 idesc = get_next_opcode (idesc);
9467 if (md.qp.X_op == O_register)
9469 qp_regno = md.qp.X_add_number - REG_P;
9470 md.qp.X_op = O_absent;
9473 flags = idesc->flags;
9475 if ((flags & IA64_OPCODE_FIRST) != 0)
9476 insn_group_break (1, 0, 0);
9478 if ((flags & IA64_OPCODE_NO_PRED) != 0 && qp_regno != 0)
9480 as_bad ("`%s' cannot be predicated", idesc->name);
9484 /* Build the instruction. */
9485 CURR_SLOT.qp_regno = qp_regno;
9486 CURR_SLOT.idesc = idesc;
9487 as_where (&CURR_SLOT.src_file, &CURR_SLOT.src_line);
9488 dwarf2_where (&CURR_SLOT.debug_line);
9490 /* Add unwind entry, if there is one. */
9491 if (unwind.current_entry)
9493 CURR_SLOT.unwind_record = unwind.current_entry;
9494 unwind.current_entry = NULL;
9497 /* Check for dependency violations. */
9501 md.curr_slot = (md.curr_slot + 1) % NUM_SLOTS;
9502 if (++md.num_slots_in_use >= NUM_SLOTS)
9505 if ((flags & IA64_OPCODE_LAST) != 0)
9506 insn_group_break (1, 0, 0);
9508 md.last_text_seg = now_seg;
9511 input_line_pointer = saved_input_line_pointer;
9514 /* Called when symbol NAME cannot be found in the symbol table.
9515 Should be used for dynamic valued symbols only. */
9518 md_undefined_symbol (name)
9519 char *name ATTRIBUTE_UNUSED;
9524 /* Called for any expression that can not be recognized. When the
9525 function is called, `input_line_pointer' will point to the start of
9532 enum pseudo_type pseudo_type;
9537 switch (*input_line_pointer)
9540 /* Find what relocation pseudo-function we're dealing with. */
9542 ch = *++input_line_pointer;
9543 for (i = 0; i < NELEMS (pseudo_func); ++i)
9544 if (pseudo_func[i].name && pseudo_func[i].name[0] == ch)
9546 len = strlen (pseudo_func[i].name);
9547 if (strncmp (pseudo_func[i].name + 1,
9548 input_line_pointer + 1, len - 1) == 0
9549 && !is_part_of_name (input_line_pointer[len]))
9551 input_line_pointer += len;
9552 pseudo_type = pseudo_func[i].type;
9556 switch (pseudo_type)
9558 case PSEUDO_FUNC_RELOC:
9560 if (*input_line_pointer != '(')
9562 as_bad ("Expected '('");
9566 ++input_line_pointer;
9568 if (*input_line_pointer++ != ')')
9570 as_bad ("Missing ')'");
9573 if (e->X_op != O_symbol)
9575 if (e->X_op != O_pseudo_fixup)
9577 as_bad ("Not a symbolic expression");
9580 if (S_GET_VALUE (e->X_op_symbol) == FUNC_FPTR_RELATIVE
9581 && i == FUNC_LT_RELATIVE)
9582 i = FUNC_LT_FPTR_RELATIVE;
9585 as_bad ("Illegal combination of relocation functions");
9589 /* Make sure gas doesn't get rid of local symbols that are used
9591 e->X_op = O_pseudo_fixup;
9592 e->X_op_symbol = pseudo_func[i].u.sym;
9595 case PSEUDO_FUNC_CONST:
9596 e->X_op = O_constant;
9597 e->X_add_number = pseudo_func[i].u.ival;
9600 case PSEUDO_FUNC_REG:
9601 e->X_op = O_register;
9602 e->X_add_number = pseudo_func[i].u.ival;
9606 name = input_line_pointer - 1;
9608 as_bad ("Unknown pseudo function `%s'", name);
9614 ++input_line_pointer;
9616 if (*input_line_pointer != ']')
9618 as_bad ("Closing bracket misssing");
9623 if (e->X_op != O_register)
9624 as_bad ("Register expected as index");
9626 ++input_line_pointer;
9637 ignore_rest_of_line ();
9640 /* Return 1 if it's OK to adjust a reloc by replacing the symbol with
9641 a section symbol plus some offset. For relocs involving @fptr(),
9642 directives we don't want such adjustments since we need to have the
9643 original symbol's name in the reloc. */
9645 ia64_fix_adjustable (fix)
9648 /* Prevent all adjustments to global symbols */
9649 if (S_IS_EXTERN (fix->fx_addsy) || S_IS_WEAK (fix->fx_addsy))
9652 switch (fix->fx_r_type)
9654 case BFD_RELOC_IA64_FPTR64I:
9655 case BFD_RELOC_IA64_FPTR32MSB:
9656 case BFD_RELOC_IA64_FPTR32LSB:
9657 case BFD_RELOC_IA64_FPTR64MSB:
9658 case BFD_RELOC_IA64_FPTR64LSB:
9659 case BFD_RELOC_IA64_LTOFF_FPTR22:
9660 case BFD_RELOC_IA64_LTOFF_FPTR64I:
9670 ia64_force_relocation (fix)
9673 switch (fix->fx_r_type)
9675 case BFD_RELOC_IA64_FPTR64I:
9676 case BFD_RELOC_IA64_FPTR32MSB:
9677 case BFD_RELOC_IA64_FPTR32LSB:
9678 case BFD_RELOC_IA64_FPTR64MSB:
9679 case BFD_RELOC_IA64_FPTR64LSB:
9681 case BFD_RELOC_IA64_LTOFF22:
9682 case BFD_RELOC_IA64_LTOFF64I:
9683 case BFD_RELOC_IA64_LTOFF_FPTR22:
9684 case BFD_RELOC_IA64_LTOFF_FPTR64I:
9685 case BFD_RELOC_IA64_PLTOFF22:
9686 case BFD_RELOC_IA64_PLTOFF64I:
9687 case BFD_RELOC_IA64_PLTOFF64MSB:
9688 case BFD_RELOC_IA64_PLTOFF64LSB:
9697 /* Decide from what point a pc-relative relocation is relative to,
9698 relative to the pc-relative fixup. Er, relatively speaking. */
9700 ia64_pcrel_from_section (fix, sec)
9704 unsigned long off = fix->fx_frag->fr_address + fix->fx_where;
9706 if (bfd_get_section_flags (stdoutput, sec) & SEC_CODE)
9712 /* This is called whenever some data item (not an instruction) needs a
9713 fixup. We pick the right reloc code depending on the byteorder
9714 currently in effect. */
9716 ia64_cons_fix_new (f, where, nbytes, exp)
9722 bfd_reloc_code_real_type code;
9727 /* There are no reloc for 8 and 16 bit quantities, but we allow
9728 them here since they will work fine as long as the expression
9729 is fully defined at the end of the pass over the source file. */
9730 case 1: code = BFD_RELOC_8; break;
9731 case 2: code = BFD_RELOC_16; break;
9733 if (target_big_endian)
9734 code = BFD_RELOC_IA64_DIR32MSB;
9736 code = BFD_RELOC_IA64_DIR32LSB;
9740 if (target_big_endian)
9741 code = BFD_RELOC_IA64_DIR64MSB;
9743 code = BFD_RELOC_IA64_DIR64LSB;
9747 as_bad ("Unsupported fixup size %d", nbytes);
9748 ignore_rest_of_line ();
9751 if (exp->X_op == O_pseudo_fixup)
9754 exp->X_op = O_symbol;
9755 code = ia64_gen_real_reloc_type (exp->X_op_symbol, code);
9757 fix = fix_new_exp (f, where, nbytes, exp, 0, code);
9758 /* We need to store the byte order in effect in case we're going
9759 to fix an 8 or 16 bit relocation (for which there no real
9760 relocs available). See md_apply_fix(). */
9761 fix->tc_fix_data.bigendian = target_big_endian;
9764 /* Return the actual relocation we wish to associate with the pseudo
9765 reloc described by SYM and R_TYPE. SYM should be one of the
9766 symbols in the pseudo_func array, or NULL. */
9768 static bfd_reloc_code_real_type
9769 ia64_gen_real_reloc_type (sym, r_type)
9771 bfd_reloc_code_real_type r_type;
9773 bfd_reloc_code_real_type new = 0;
9780 switch (S_GET_VALUE (sym))
9782 case FUNC_FPTR_RELATIVE:
9785 case BFD_RELOC_IA64_IMM64: new = BFD_RELOC_IA64_FPTR64I; break;
9786 case BFD_RELOC_IA64_DIR32MSB: new = BFD_RELOC_IA64_FPTR32MSB; break;
9787 case BFD_RELOC_IA64_DIR32LSB: new = BFD_RELOC_IA64_FPTR32LSB; break;
9788 case BFD_RELOC_IA64_DIR64MSB: new = BFD_RELOC_IA64_FPTR64MSB; break;
9789 case BFD_RELOC_IA64_DIR64LSB: new = BFD_RELOC_IA64_FPTR64LSB; break;
9794 case FUNC_GP_RELATIVE:
9797 case BFD_RELOC_IA64_IMM22: new = BFD_RELOC_IA64_GPREL22; break;
9798 case BFD_RELOC_IA64_IMM64: new = BFD_RELOC_IA64_GPREL64I; break;
9799 case BFD_RELOC_IA64_DIR32MSB: new = BFD_RELOC_IA64_GPREL32MSB; break;
9800 case BFD_RELOC_IA64_DIR32LSB: new = BFD_RELOC_IA64_GPREL32LSB; break;
9801 case BFD_RELOC_IA64_DIR64MSB: new = BFD_RELOC_IA64_GPREL64MSB; break;
9802 case BFD_RELOC_IA64_DIR64LSB: new = BFD_RELOC_IA64_GPREL64LSB; break;
9807 case FUNC_LT_RELATIVE:
9810 case BFD_RELOC_IA64_IMM22: new = BFD_RELOC_IA64_LTOFF22; break;
9811 case BFD_RELOC_IA64_IMM64: new = BFD_RELOC_IA64_LTOFF64I; break;
9816 case FUNC_PC_RELATIVE:
9819 case BFD_RELOC_IA64_IMM22: new = BFD_RELOC_IA64_PCREL22; break;
9820 case BFD_RELOC_IA64_IMM64: new = BFD_RELOC_IA64_PCREL64I; break;
9821 case BFD_RELOC_IA64_DIR32MSB: new = BFD_RELOC_IA64_PCREL32MSB; break;
9822 case BFD_RELOC_IA64_DIR32LSB: new = BFD_RELOC_IA64_PCREL32LSB; break;
9823 case BFD_RELOC_IA64_DIR64MSB: new = BFD_RELOC_IA64_PCREL64MSB; break;
9824 case BFD_RELOC_IA64_DIR64LSB: new = BFD_RELOC_IA64_PCREL64LSB; break;
9829 case FUNC_PLT_RELATIVE:
9832 case BFD_RELOC_IA64_IMM22: new = BFD_RELOC_IA64_PLTOFF22; break;
9833 case BFD_RELOC_IA64_IMM64: new = BFD_RELOC_IA64_PLTOFF64I; break;
9834 case BFD_RELOC_IA64_DIR64MSB: new = BFD_RELOC_IA64_PLTOFF64MSB;break;
9835 case BFD_RELOC_IA64_DIR64LSB: new = BFD_RELOC_IA64_PLTOFF64LSB;break;
9840 case FUNC_SEC_RELATIVE:
9843 case BFD_RELOC_IA64_DIR32MSB: new = BFD_RELOC_IA64_SECREL32MSB;break;
9844 case BFD_RELOC_IA64_DIR32LSB: new = BFD_RELOC_IA64_SECREL32LSB;break;
9845 case BFD_RELOC_IA64_DIR64MSB: new = BFD_RELOC_IA64_SECREL64MSB;break;
9846 case BFD_RELOC_IA64_DIR64LSB: new = BFD_RELOC_IA64_SECREL64LSB;break;
9851 case FUNC_SEG_RELATIVE:
9854 case BFD_RELOC_IA64_DIR32MSB: new = BFD_RELOC_IA64_SEGREL32MSB;break;
9855 case BFD_RELOC_IA64_DIR32LSB: new = BFD_RELOC_IA64_SEGREL32LSB;break;
9856 case BFD_RELOC_IA64_DIR64MSB: new = BFD_RELOC_IA64_SEGREL64MSB;break;
9857 case BFD_RELOC_IA64_DIR64LSB: new = BFD_RELOC_IA64_SEGREL64LSB;break;
9862 case FUNC_LTV_RELATIVE:
9865 case BFD_RELOC_IA64_DIR32MSB: new = BFD_RELOC_IA64_LTV32MSB; break;
9866 case BFD_RELOC_IA64_DIR32LSB: new = BFD_RELOC_IA64_LTV32LSB; break;
9867 case BFD_RELOC_IA64_DIR64MSB: new = BFD_RELOC_IA64_LTV64MSB; break;
9868 case BFD_RELOC_IA64_DIR64LSB: new = BFD_RELOC_IA64_LTV64LSB; break;
9873 case FUNC_LT_FPTR_RELATIVE:
9876 case BFD_RELOC_IA64_IMM22:
9877 new = BFD_RELOC_IA64_LTOFF_FPTR22; break;
9878 case BFD_RELOC_IA64_IMM64:
9879 new = BFD_RELOC_IA64_LTOFF_FPTR64I; break;
9887 /* Hmmmm. Should this ever occur? */
9894 /* Here is where generate the appropriate reloc for pseudo relocation
9897 ia64_validate_fix (fix)
9900 switch (fix->fx_r_type)
9902 case BFD_RELOC_IA64_FPTR64I:
9903 case BFD_RELOC_IA64_FPTR32MSB:
9904 case BFD_RELOC_IA64_FPTR64LSB:
9905 case BFD_RELOC_IA64_LTOFF_FPTR22:
9906 case BFD_RELOC_IA64_LTOFF_FPTR64I:
9907 if (fix->fx_offset != 0)
9908 as_bad_where (fix->fx_file, fix->fx_line,
9909 "No addend allowed in @fptr() relocation");
9919 fix_insn (fix, odesc, value)
9921 const struct ia64_operand *odesc;
9924 bfd_vma insn[3], t0, t1, control_bits;
9929 slot = fix->fx_where & 0x3;
9930 fixpos = fix->fx_frag->fr_literal + (fix->fx_where - slot);
9932 /* Bundles are always in little-endian byte order */
9933 t0 = bfd_getl64 (fixpos);
9934 t1 = bfd_getl64 (fixpos + 8);
9935 control_bits = t0 & 0x1f;
9936 insn[0] = (t0 >> 5) & 0x1ffffffffffLL;
9937 insn[1] = ((t0 >> 46) & 0x3ffff) | ((t1 & 0x7fffff) << 18);
9938 insn[2] = (t1 >> 23) & 0x1ffffffffffLL;
9941 if (odesc - elf64_ia64_operands == IA64_OPND_IMMU64)
9943 insn[1] = (value >> 22) & 0x1ffffffffffLL;
9944 insn[2] |= (((value & 0x7f) << 13)
9945 | (((value >> 7) & 0x1ff) << 27)
9946 | (((value >> 16) & 0x1f) << 22)
9947 | (((value >> 21) & 0x1) << 21)
9948 | (((value >> 63) & 0x1) << 36));
9950 else if (odesc - elf64_ia64_operands == IA64_OPND_IMMU62)
9952 if (value & ~0x3fffffffffffffffULL)
9953 err = "integer operand out of range";
9954 insn[1] = (value >> 21) & 0x1ffffffffffLL;
9955 insn[2] |= (((value & 0xfffff) << 6) | (((value >> 20) & 0x1) << 36));
9957 else if (odesc - elf64_ia64_operands == IA64_OPND_TGT64)
9960 insn[1] = ((value >> 20) & 0x7fffffffffLL) << 2;
9961 insn[2] |= ((((value >> 59) & 0x1) << 36)
9962 | (((value >> 0) & 0xfffff) << 13));
9965 err = (*odesc->insert) (odesc, value, insn + slot);
9968 as_bad_where (fix->fx_file, fix->fx_line, err);
9970 t0 = control_bits | (insn[0] << 5) | (insn[1] << 46);
9971 t1 = ((insn[1] >> 18) & 0x7fffff) | (insn[2] << 23);
9972 number_to_chars_littleendian (fixpos + 0, t0, 8);
9973 number_to_chars_littleendian (fixpos + 8, t1, 8);
9976 /* Attempt to simplify or even eliminate a fixup. The return value is
9977 ignored; perhaps it was once meaningful, but now it is historical.
9978 To indicate that a fixup has been eliminated, set FIXP->FX_DONE.
9980 If fixp->fx_addsy is non-NULL, we'll have to generate a reloc entry
9983 md_apply_fix3 (fix, valuep, seg)
9986 segT seg ATTRIBUTE_UNUSED;
9989 valueT value = *valuep;
9992 fixpos = fix->fx_frag->fr_literal + fix->fx_where;
9996 switch (fix->fx_r_type)
9998 case BFD_RELOC_IA64_DIR32MSB:
9999 fix->fx_r_type = BFD_RELOC_IA64_PCREL32MSB;
10003 case BFD_RELOC_IA64_DIR32LSB:
10004 fix->fx_r_type = BFD_RELOC_IA64_PCREL32LSB;
10008 case BFD_RELOC_IA64_DIR64MSB:
10009 fix->fx_r_type = BFD_RELOC_IA64_PCREL64MSB;
10013 case BFD_RELOC_IA64_DIR64LSB:
10014 fix->fx_r_type = BFD_RELOC_IA64_PCREL64LSB;
10024 if (fix->fx_r_type == (int) BFD_RELOC_UNUSED)
10026 /* This must be a TAG13 or TAG13b operand. There are no external
10027 relocs defined for them, so we must give an error. */
10028 as_bad_where (fix->fx_file, fix->fx_line,
10029 "%s must have a constant value",
10030 elf64_ia64_operands[fix->tc_fix_data.opnd].desc);
10035 /* ??? This is a hack copied from tc-i386.c to make PCREL relocs
10036 work. There should be a better way to handle this. */
10038 fix->fx_offset += fix->fx_where + fix->fx_frag->fr_address;
10040 else if (fix->tc_fix_data.opnd == IA64_OPND_NIL)
10042 if (fix->tc_fix_data.bigendian)
10043 number_to_chars_bigendian (fixpos, value, fix->fx_size);
10045 number_to_chars_littleendian (fixpos, value, fix->fx_size);
10051 fix_insn (fix, elf64_ia64_operands + fix->tc_fix_data.opnd, value);
10058 /* Generate the BFD reloc to be stuck in the object file from the
10059 fixup used internally in the assembler. */
10062 tc_gen_reloc (sec, fixp)
10063 asection *sec ATTRIBUTE_UNUSED;
10068 reloc = xmalloc (sizeof (*reloc));
10069 reloc->sym_ptr_ptr = (asymbol **) xmalloc (sizeof (asymbol *));
10070 *reloc->sym_ptr_ptr = symbol_get_bfdsym (fixp->fx_addsy);
10071 reloc->address = fixp->fx_frag->fr_address + fixp->fx_where;
10072 reloc->addend = fixp->fx_offset;
10073 reloc->howto = bfd_reloc_type_lookup (stdoutput, fixp->fx_r_type);
10077 as_bad_where (fixp->fx_file, fixp->fx_line,
10078 "Cannot represent %s relocation in object file",
10079 bfd_get_reloc_code_name (fixp->fx_r_type));
10084 /* Turn a string in input_line_pointer into a floating point constant
10085 of type TYPE, and store the appropriate bytes in *LIT. The number
10086 of LITTLENUMS emitted is stored in *SIZE. An error message is
10087 returned, or NULL on OK. */
10089 #define MAX_LITTLENUMS 5
10092 md_atof (type, lit, size)
10097 LITTLENUM_TYPE words[MAX_LITTLENUMS];
10098 LITTLENUM_TYPE *word;
10128 return "Bad call to MD_ATOF()";
10130 t = atof_ieee (input_line_pointer, type, words);
10132 input_line_pointer = t;
10133 *size = prec * sizeof (LITTLENUM_TYPE);
10135 for (word = words + prec - 1; prec--;)
10137 md_number_to_chars (lit, (long) (*word--), sizeof (LITTLENUM_TYPE));
10138 lit += sizeof (LITTLENUM_TYPE);
10143 /* Round up a section's size to the appropriate boundary. */
10145 md_section_align (seg, size)
10149 int align = bfd_get_section_alignment (stdoutput, seg);
10150 valueT mask = ((valueT) 1 << align) - 1;
10152 return (size + mask) & ~mask;
10155 /* Handle ia64 specific semantics of the align directive. */
10158 ia64_md_do_align (n, fill, len, max)
10159 int n ATTRIBUTE_UNUSED;
10160 const char *fill ATTRIBUTE_UNUSED;
10161 int len ATTRIBUTE_UNUSED;
10162 int max ATTRIBUTE_UNUSED;
10164 if (subseg_text_p (now_seg))
10165 ia64_flush_insns ();
10168 /* This is called from HANDLE_ALIGN in write.c. Fill in the contents
10169 of an rs_align_code fragment. */
10172 ia64_handle_align (fragp)
10175 /* Use mfi bundle of nops with no stop bits. */
10176 static const unsigned char be_nop[]
10177 = { 0x00, 0x04, 0x00, 0x00, 0x00, 0x00, 0x02, 0x00,
10178 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, 0x0c};
10179 static const unsigned char le_nop[]
10180 = { 0x0c, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00,
10181 0x00, 0x02, 0x00, 0x00, 0x00, 0x00, 0x04, 0x00};
10186 if (fragp->fr_type != rs_align_code)
10189 bytes = fragp->fr_next->fr_address - fragp->fr_address - fragp->fr_fix;
10190 p = fragp->fr_literal + fragp->fr_fix;
10192 /* Make sure we are on a 16-byte boundary, in case someone has been
10193 putting data into a text section. */
10196 int fix = bytes & 15;
10197 memset (p, 0, fix);
10200 fragp->fr_fix += fix;
10203 memcpy (p, (target_big_endian ? be_nop : le_nop), 16);
10204 fragp->fr_var = 16;