1 /* Dwarf2 Call Frame Information helper routines.
2 Copyright (C) 1992, 1993, 1995, 1996, 1997, 1998, 1999, 2000, 2001, 2002,
3 2003, 2004, 2005, 2006, 2007, 2008, 2009, 2010, 2011
4 Free Software Foundation, Inc.
6 This file is part of GCC.
8 GCC is free software; you can redistribute it and/or modify it under
9 the terms of the GNU General Public License as published by the Free
10 Software Foundation; either version 3, or (at your option) any later
13 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
14 WARRANTY; without even the implied warranty of MERCHANTABILITY or
15 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
18 You should have received a copy of the GNU General Public License
19 along with GCC; see the file COPYING3. If not see
20 <http://www.gnu.org/licenses/>. */
24 #include "coretypes.h"
31 #include "dwarf2out.h"
32 #include "dwarf2asm.h"
36 #include "common/common-target.h"
37 #include "tree-pass.h"
39 #include "except.h" /* expand_builtin_dwarf_sp_column */
40 #include "expr.h" /* init_return_column_size */
41 #include "regs.h" /* expand_builtin_init_dwarf_reg_sizes */
42 #include "output.h" /* asm_out_file */
43 #include "debug.h" /* dwarf2out_do_frame, dwarf2out_do_cfi_asm */
46 /* ??? Poison these here until it can be done generically. They've been
47 totally replaced in this file; make sure it stays that way. */
48 #undef DWARF2_UNWIND_INFO
49 #undef DWARF2_FRAME_INFO
50 #if (GCC_VERSION >= 3000)
51 #pragma GCC poison DWARF2_UNWIND_INFO DWARF2_FRAME_INFO
54 #ifndef INCOMING_RETURN_ADDR_RTX
55 #define INCOMING_RETURN_ADDR_RTX (gcc_unreachable (), NULL_RTX)
58 /* Maximum size (in bytes) of an artificially generated label. */
59 #define MAX_ARTIFICIAL_LABEL_BYTES 30
61 /* A collected description of an entire row of the abstract CFI table. */
62 typedef struct GTY(()) dw_cfi_row_struct
64 /* The expression that computes the CFA, expressed in two different ways.
65 The CFA member for the simple cases, and the full CFI expression for
66 the complex cases. The later will be a DW_CFA_cfa_expression. */
70 /* The expressions for any register column that is saved. */
73 /* The value of any DW_CFA_GNU_args_size. */
74 HOST_WIDE_INT args_size;
78 /* A vector of call frame insns for the CIE. */
81 /* The state of the first row of the FDE table, which includes the
82 state provided by the CIE. */
83 static GTY(()) dw_cfi_row *cie_cfi_row;
85 static GTY(()) unsigned long dwarf2out_cfi_label_num;
87 /* The insn after which a new CFI note should be emitted. */
88 static rtx add_cfi_insn;
90 /* When non-null, add_cfi will add the CFI to this vector. */
91 static cfi_vec *add_cfi_vec;
93 /* True if remember_state should be emitted before following CFI directive. */
94 static bool emit_cfa_remember;
96 /* True if any CFI directives were emitted at the current insn. */
97 static bool any_cfis_emitted;
99 /* Short-hand for commonly used register numbers. */
100 static unsigned dw_stack_pointer_regnum;
101 static unsigned dw_frame_pointer_regnum;
104 static void dwarf2out_cfi_begin_epilogue (rtx insn);
105 static void dwarf2out_frame_debug_restore_state (void);
108 /* Hook used by __throw. */
111 expand_builtin_dwarf_sp_column (void)
113 unsigned int dwarf_regnum = DWARF_FRAME_REGNUM (STACK_POINTER_REGNUM);
114 return GEN_INT (DWARF2_FRAME_REG_OUT (dwarf_regnum, 1));
117 /* MEM is a memory reference for the register size table, each element of
118 which has mode MODE. Initialize column C as a return address column. */
121 init_return_column_size (enum machine_mode mode, rtx mem, unsigned int c)
123 HOST_WIDE_INT offset = c * GET_MODE_SIZE (mode);
124 HOST_WIDE_INT size = GET_MODE_SIZE (Pmode);
125 emit_move_insn (adjust_address (mem, mode, offset), GEN_INT (size));
128 /* Generate code to initialize the register size table. */
131 expand_builtin_init_dwarf_reg_sizes (tree address)
134 enum machine_mode mode = TYPE_MODE (char_type_node);
135 rtx addr = expand_normal (address);
136 rtx mem = gen_rtx_MEM (BLKmode, addr);
137 bool wrote_return_column = false;
139 for (i = 0; i < FIRST_PSEUDO_REGISTER; i++)
141 unsigned int dnum = DWARF_FRAME_REGNUM (i);
142 unsigned int rnum = DWARF2_FRAME_REG_OUT (dnum, 1);
144 if (rnum < DWARF_FRAME_REGISTERS)
146 HOST_WIDE_INT offset = rnum * GET_MODE_SIZE (mode);
147 enum machine_mode save_mode = reg_raw_mode[i];
150 if (HARD_REGNO_CALL_PART_CLOBBERED (i, save_mode))
151 save_mode = choose_hard_reg_mode (i, 1, true);
152 if (dnum == DWARF_FRAME_RETURN_COLUMN)
154 if (save_mode == VOIDmode)
156 wrote_return_column = true;
158 size = GET_MODE_SIZE (save_mode);
162 emit_move_insn (adjust_address (mem, mode, offset),
163 gen_int_mode (size, mode));
167 if (!wrote_return_column)
168 init_return_column_size (mode, mem, DWARF_FRAME_RETURN_COLUMN);
170 #ifdef DWARF_ALT_FRAME_RETURN_COLUMN
171 init_return_column_size (mode, mem, DWARF_ALT_FRAME_RETURN_COLUMN);
174 targetm.init_dwarf_reg_sizes_extra (address);
177 /* Divide OFF by DWARF_CIE_DATA_ALIGNMENT, asserting no remainder. */
179 static inline HOST_WIDE_INT
180 div_data_align (HOST_WIDE_INT off)
182 HOST_WIDE_INT r = off / DWARF_CIE_DATA_ALIGNMENT;
183 gcc_assert (r * DWARF_CIE_DATA_ALIGNMENT == off);
187 /* Return true if we need a signed version of a given opcode
188 (e.g. DW_CFA_offset_extended_sf vs DW_CFA_offset_extended). */
191 need_data_align_sf_opcode (HOST_WIDE_INT off)
193 return DWARF_CIE_DATA_ALIGNMENT < 0 ? off > 0 : off < 0;
196 /* Return a pointer to a newly allocated Call Frame Instruction. */
198 static inline dw_cfi_ref
201 dw_cfi_ref cfi = ggc_alloc_dw_cfi_node ();
203 cfi->dw_cfi_oprnd1.dw_cfi_reg_num = 0;
204 cfi->dw_cfi_oprnd2.dw_cfi_reg_num = 0;
209 /* Return a newly allocated CFI row, with no defined data. */
214 dw_cfi_row *row = ggc_alloc_cleared_dw_cfi_row ();
216 row->cfa.reg = INVALID_REGNUM;
221 /* Return a copy of an existing CFI row. */
224 copy_cfi_row (dw_cfi_row *src)
226 dw_cfi_row *dst = ggc_alloc_dw_cfi_row ();
229 dst->reg_save = VEC_copy (dw_cfi_ref, gc, src->reg_save);
234 /* Free an allocated CFI row. */
237 free_cfi_row (dw_cfi_row *row)
241 VEC_free (dw_cfi_ref, gc, row->reg_save);
246 /* Generate a new label for the CFI info to refer to. */
249 dwarf2out_cfi_label (void)
251 int num = dwarf2out_cfi_label_num++;
254 ASM_GENERATE_INTERNAL_LABEL (label, "LCFI", num);
256 return xstrdup (label);
259 /* Add CFI either to the current insn stream or to a vector, or both. */
262 add_cfi (dw_cfi_ref cfi)
264 if (emit_cfa_remember)
266 dw_cfi_ref cfi_remember;
268 /* Emit the state save. */
269 emit_cfa_remember = false;
270 cfi_remember = new_cfi ();
271 cfi_remember->dw_cfi_opc = DW_CFA_remember_state;
272 add_cfi (cfi_remember);
275 any_cfis_emitted = true;
277 if (add_cfi_insn != NULL)
279 add_cfi_insn = emit_note_after (NOTE_INSN_CFI, add_cfi_insn);
280 NOTE_CFI (add_cfi_insn) = cfi;
283 if (add_cfi_vec != NULL)
284 VEC_safe_push (dw_cfi_ref, gc, *add_cfi_vec, cfi);
288 add_cfi_args_size (HOST_WIDE_INT size)
290 dw_cfi_ref cfi = new_cfi ();
292 cfi->dw_cfi_opc = DW_CFA_GNU_args_size;
293 cfi->dw_cfi_oprnd1.dw_cfi_offset = size;
299 add_cfi_restore (unsigned reg)
301 dw_cfi_ref cfi = new_cfi ();
303 cfi->dw_cfi_opc = (reg & ~0x3f ? DW_CFA_restore_extended : DW_CFA_restore);
304 cfi->dw_cfi_oprnd1.dw_cfi_reg_num = reg;
309 /* Perform ROW->REG_SAVE[COLUMN] = CFI. CFI may be null, indicating
310 that the register column is no longer saved. */
313 update_row_reg_save (dw_cfi_row *row, unsigned column, dw_cfi_ref cfi)
315 if (VEC_length (dw_cfi_ref, row->reg_save) <= column)
316 VEC_safe_grow_cleared (dw_cfi_ref, gc, row->reg_save, column + 1);
317 VEC_replace (dw_cfi_ref, row->reg_save, column, cfi);
320 /* This function fills in aa dw_cfa_location structure from a dwarf location
321 descriptor sequence. */
324 get_cfa_from_loc_descr (dw_cfa_location *cfa, struct dw_loc_descr_struct *loc)
326 struct dw_loc_descr_struct *ptr;
328 cfa->base_offset = 0;
332 for (ptr = loc; ptr != NULL; ptr = ptr->dw_loc_next)
334 enum dwarf_location_atom op = ptr->dw_loc_opc;
370 cfa->reg = op - DW_OP_reg0;
373 cfa->reg = ptr->dw_loc_oprnd1.v.val_int;
407 cfa->reg = op - DW_OP_breg0;
408 cfa->base_offset = ptr->dw_loc_oprnd1.v.val_int;
411 cfa->reg = ptr->dw_loc_oprnd1.v.val_int;
412 cfa->base_offset = ptr->dw_loc_oprnd2.v.val_int;
417 case DW_OP_plus_uconst:
418 cfa->offset = ptr->dw_loc_oprnd1.v.val_unsigned;
426 /* Find the previous value for the CFA, iteratively. CFI is the opcode
427 to interpret, *LOC will be updated as necessary, *REMEMBER is used for
428 one level of remember/restore state processing. */
431 lookup_cfa_1 (dw_cfi_ref cfi, dw_cfa_location *loc, dw_cfa_location *remember)
433 switch (cfi->dw_cfi_opc)
435 case DW_CFA_def_cfa_offset:
436 case DW_CFA_def_cfa_offset_sf:
437 loc->offset = cfi->dw_cfi_oprnd1.dw_cfi_offset;
439 case DW_CFA_def_cfa_register:
440 loc->reg = cfi->dw_cfi_oprnd1.dw_cfi_reg_num;
443 case DW_CFA_def_cfa_sf:
444 loc->reg = cfi->dw_cfi_oprnd1.dw_cfi_reg_num;
445 loc->offset = cfi->dw_cfi_oprnd2.dw_cfi_offset;
447 case DW_CFA_def_cfa_expression:
448 get_cfa_from_loc_descr (loc, cfi->dw_cfi_oprnd1.dw_cfi_loc);
451 case DW_CFA_remember_state:
452 gcc_assert (!remember->in_use);
454 remember->in_use = 1;
456 case DW_CFA_restore_state:
457 gcc_assert (remember->in_use);
459 remember->in_use = 0;
467 /* The current, i.e. most recently generated, row of the CFI table. */
468 static dw_cfi_row *cur_row;
470 /* The row state from a preceeding DW_CFA_remember_state. */
471 static dw_cfi_row *remember_row;
473 /* The register used for saving registers to the stack, and its offset
475 static dw_cfa_location cfa_store;
477 /* A temporary register holding an integral value used in adjusting SP
478 or setting up the store_reg. The "offset" field holds the integer
479 value, not an offset. */
480 static dw_cfa_location cfa_temp;
482 /* The (really) current value for DW_CFA_GNU_args_size. We delay actually
483 emitting this data, i.e. updating CUR_ROW, without async unwind. */
484 static HOST_WIDE_INT args_size;
486 /* Determine if two dw_cfa_location structures define the same data. */
489 cfa_equal_p (const dw_cfa_location *loc1, const dw_cfa_location *loc2)
491 return (loc1->reg == loc2->reg
492 && loc1->offset == loc2->offset
493 && loc1->indirect == loc2->indirect
494 && (loc1->indirect == 0
495 || loc1->base_offset == loc2->base_offset));
498 /* Determine if two CFI operands are identical. */
501 cfi_oprnd_equal_p (enum dw_cfi_oprnd_type t, dw_cfi_oprnd *a, dw_cfi_oprnd *b)
505 case dw_cfi_oprnd_unused:
507 case dw_cfi_oprnd_reg_num:
508 return a->dw_cfi_reg_num == b->dw_cfi_reg_num;
509 case dw_cfi_oprnd_offset:
510 return a->dw_cfi_offset == b->dw_cfi_offset;
511 case dw_cfi_oprnd_addr:
512 return (a->dw_cfi_addr == b->dw_cfi_addr
513 || strcmp (a->dw_cfi_addr, b->dw_cfi_addr) == 0);
514 case dw_cfi_oprnd_loc:
515 return loc_descr_equal_p (a->dw_cfi_loc, b->dw_cfi_loc);
520 /* Determine if two CFI entries are identical. */
523 cfi_equal_p (dw_cfi_ref a, dw_cfi_ref b)
525 enum dwarf_call_frame_info opc;
527 /* Make things easier for our callers, including missing operands. */
530 if (a == NULL || b == NULL)
533 /* Obviously, the opcodes must match. */
535 if (opc != b->dw_cfi_opc)
538 /* Compare the two operands, re-using the type of the operands as
539 already exposed elsewhere. */
540 return (cfi_oprnd_equal_p (dw_cfi_oprnd1_desc (opc),
541 &a->dw_cfi_oprnd1, &b->dw_cfi_oprnd1)
542 && cfi_oprnd_equal_p (dw_cfi_oprnd2_desc (opc),
543 &a->dw_cfi_oprnd2, &b->dw_cfi_oprnd2));
546 /* The CFA is now calculated from NEW_CFA. Consider OLD_CFA in determining
547 what opcode to emit. Returns the CFI opcode to effect the change, or
548 NULL if NEW_CFA == OLD_CFA. */
551 def_cfa_0 (dw_cfa_location *old_cfa, dw_cfa_location *new_cfa)
555 /* If nothing changed, no need to issue any call frame instructions. */
556 if (cfa_equal_p (old_cfa, new_cfa))
561 if (new_cfa->reg == old_cfa->reg && !new_cfa->indirect && !old_cfa->indirect)
563 /* Construct a "DW_CFA_def_cfa_offset <offset>" instruction, indicating
564 the CFA register did not change but the offset did. The data
565 factoring for DW_CFA_def_cfa_offset_sf happens in output_cfi, or
566 in the assembler via the .cfi_def_cfa_offset directive. */
567 if (new_cfa->offset < 0)
568 cfi->dw_cfi_opc = DW_CFA_def_cfa_offset_sf;
570 cfi->dw_cfi_opc = DW_CFA_def_cfa_offset;
571 cfi->dw_cfi_oprnd1.dw_cfi_offset = new_cfa->offset;
574 #ifndef MIPS_DEBUGGING_INFO /* SGI dbx thinks this means no offset. */
575 else if (new_cfa->offset == old_cfa->offset
576 && old_cfa->reg != INVALID_REGNUM
577 && !new_cfa->indirect
578 && !old_cfa->indirect)
580 /* Construct a "DW_CFA_def_cfa_register <register>" instruction,
581 indicating the CFA register has changed to <register> but the
582 offset has not changed. */
583 cfi->dw_cfi_opc = DW_CFA_def_cfa_register;
584 cfi->dw_cfi_oprnd1.dw_cfi_reg_num = new_cfa->reg;
588 else if (new_cfa->indirect == 0)
590 /* Construct a "DW_CFA_def_cfa <register> <offset>" instruction,
591 indicating the CFA register has changed to <register> with
592 the specified offset. The data factoring for DW_CFA_def_cfa_sf
593 happens in output_cfi, or in the assembler via the .cfi_def_cfa
595 if (new_cfa->offset < 0)
596 cfi->dw_cfi_opc = DW_CFA_def_cfa_sf;
598 cfi->dw_cfi_opc = DW_CFA_def_cfa;
599 cfi->dw_cfi_oprnd1.dw_cfi_reg_num = new_cfa->reg;
600 cfi->dw_cfi_oprnd2.dw_cfi_offset = new_cfa->offset;
604 /* Construct a DW_CFA_def_cfa_expression instruction to
605 calculate the CFA using a full location expression since no
606 register-offset pair is available. */
607 struct dw_loc_descr_struct *loc_list;
609 cfi->dw_cfi_opc = DW_CFA_def_cfa_expression;
610 loc_list = build_cfa_loc (new_cfa, 0);
611 cfi->dw_cfi_oprnd1.dw_cfi_loc = loc_list;
617 /* Similarly, but take OLD_CFA from CUR_ROW, and update it after the fact. */
620 def_cfa_1 (dw_cfa_location *new_cfa)
624 if (cfa_store.reg == new_cfa->reg && new_cfa->indirect == 0)
625 cfa_store.offset = new_cfa->offset;
627 cfi = def_cfa_0 (&cur_row->cfa, new_cfa);
630 cur_row->cfa = *new_cfa;
631 if (cfi->dw_cfi_opc == DW_CFA_def_cfa_expression)
632 cur_row->cfa_cfi = cfi;
638 /* Add the CFI for saving a register. REG is the CFA column number.
639 If SREG is -1, the register is saved at OFFSET from the CFA;
640 otherwise it is saved in SREG. */
643 reg_save (unsigned int reg, unsigned int sreg, HOST_WIDE_INT offset)
645 dw_fde_ref fde = cfun ? cfun->fde : NULL;
646 dw_cfi_ref cfi = new_cfi ();
648 cfi->dw_cfi_oprnd1.dw_cfi_reg_num = reg;
650 /* When stack is aligned, store REG using DW_CFA_expression with FP. */
652 && fde->stack_realign
653 && sreg == INVALID_REGNUM)
655 cfi->dw_cfi_opc = DW_CFA_expression;
656 cfi->dw_cfi_oprnd1.dw_cfi_reg_num = reg;
657 cfi->dw_cfi_oprnd2.dw_cfi_loc
658 = build_cfa_aligned_loc (&cur_row->cfa, offset,
659 fde->stack_realignment);
661 else if (sreg == INVALID_REGNUM)
663 if (need_data_align_sf_opcode (offset))
664 cfi->dw_cfi_opc = DW_CFA_offset_extended_sf;
665 else if (reg & ~0x3f)
666 cfi->dw_cfi_opc = DW_CFA_offset_extended;
668 cfi->dw_cfi_opc = DW_CFA_offset;
669 cfi->dw_cfi_oprnd2.dw_cfi_offset = offset;
671 else if (sreg == reg)
673 /* While we could emit something like DW_CFA_same_value or
674 DW_CFA_restore, we never expect to see something like that
675 in a prologue. This is more likely to be a bug. A backend
676 can always bypass this by using REG_CFA_RESTORE directly. */
681 cfi->dw_cfi_opc = DW_CFA_register;
682 cfi->dw_cfi_oprnd2.dw_cfi_reg_num = sreg;
686 update_row_reg_save (cur_row, reg, cfi);
689 /* Given a SET, calculate the amount of stack adjustment it
693 stack_adjust_offset (const_rtx pattern, HOST_WIDE_INT cur_args_size,
694 HOST_WIDE_INT cur_offset)
696 const_rtx src = SET_SRC (pattern);
697 const_rtx dest = SET_DEST (pattern);
698 HOST_WIDE_INT offset = 0;
701 if (dest == stack_pointer_rtx)
703 code = GET_CODE (src);
705 /* Assume (set (reg sp) (reg whatever)) sets args_size
707 if (code == REG && src != stack_pointer_rtx)
709 offset = -cur_args_size;
710 #ifndef STACK_GROWS_DOWNWARD
713 return offset - cur_offset;
716 if (! (code == PLUS || code == MINUS)
717 || XEXP (src, 0) != stack_pointer_rtx
718 || !CONST_INT_P (XEXP (src, 1)))
721 /* (set (reg sp) (plus (reg sp) (const_int))) */
722 offset = INTVAL (XEXP (src, 1));
728 if (MEM_P (src) && !MEM_P (dest))
732 /* (set (mem (pre_dec (reg sp))) (foo)) */
733 src = XEXP (dest, 0);
734 code = GET_CODE (src);
740 if (XEXP (src, 0) == stack_pointer_rtx)
742 rtx val = XEXP (XEXP (src, 1), 1);
743 /* We handle only adjustments by constant amount. */
744 gcc_assert (GET_CODE (XEXP (src, 1)) == PLUS
745 && CONST_INT_P (val));
746 offset = -INTVAL (val);
753 if (XEXP (src, 0) == stack_pointer_rtx)
755 offset = GET_MODE_SIZE (GET_MODE (dest));
762 if (XEXP (src, 0) == stack_pointer_rtx)
764 offset = -GET_MODE_SIZE (GET_MODE (dest));
779 /* Precomputed args_size for CODE_LABELs and BARRIERs preceeding them,
780 indexed by INSN_UID. */
782 static HOST_WIDE_INT *barrier_args_size;
784 /* Helper function for compute_barrier_args_size. Handle one insn. */
787 compute_barrier_args_size_1 (rtx insn, HOST_WIDE_INT cur_args_size,
788 VEC (rtx, heap) **next)
790 HOST_WIDE_INT offset = 0;
793 if (! RTX_FRAME_RELATED_P (insn))
795 if (prologue_epilogue_contains (insn))
797 else if (GET_CODE (PATTERN (insn)) == SET)
798 offset = stack_adjust_offset (PATTERN (insn), cur_args_size, 0);
799 else if (GET_CODE (PATTERN (insn)) == PARALLEL
800 || GET_CODE (PATTERN (insn)) == SEQUENCE)
802 /* There may be stack adjustments inside compound insns. Search
804 for (i = XVECLEN (PATTERN (insn), 0) - 1; i >= 0; i--)
805 if (GET_CODE (XVECEXP (PATTERN (insn), 0, i)) == SET)
806 offset += stack_adjust_offset (XVECEXP (PATTERN (insn), 0, i),
807 cur_args_size, offset);
812 rtx expr = find_reg_note (insn, REG_FRAME_RELATED_EXPR, NULL_RTX);
816 expr = XEXP (expr, 0);
817 if (GET_CODE (expr) == PARALLEL
818 || GET_CODE (expr) == SEQUENCE)
819 for (i = 1; i < XVECLEN (expr, 0); i++)
821 rtx elem = XVECEXP (expr, 0, i);
823 if (GET_CODE (elem) == SET && !RTX_FRAME_RELATED_P (elem))
824 offset += stack_adjust_offset (elem, cur_args_size, offset);
829 #ifndef STACK_GROWS_DOWNWARD
833 cur_args_size += offset;
834 if (cur_args_size < 0)
839 rtx dest = JUMP_LABEL (insn);
843 if (barrier_args_size [INSN_UID (dest)] < 0)
845 barrier_args_size [INSN_UID (dest)] = cur_args_size;
846 VEC_safe_push (rtx, heap, *next, dest);
851 return cur_args_size;
854 /* Walk the whole function and compute args_size on BARRIERs. */
857 compute_barrier_args_size (void)
859 int max_uid = get_max_uid (), i;
861 VEC (rtx, heap) *worklist, *next, *tmp;
863 barrier_args_size = XNEWVEC (HOST_WIDE_INT, max_uid);
864 for (i = 0; i < max_uid; i++)
865 barrier_args_size[i] = -1;
867 worklist = VEC_alloc (rtx, heap, 20);
868 next = VEC_alloc (rtx, heap, 20);
870 barrier_args_size[INSN_UID (insn)] = 0;
871 VEC_quick_push (rtx, worklist, insn);
874 while (!VEC_empty (rtx, worklist))
876 rtx prev, body, first_insn;
877 HOST_WIDE_INT cur_args_size;
879 first_insn = insn = VEC_pop (rtx, worklist);
880 cur_args_size = barrier_args_size[INSN_UID (insn)];
881 prev = prev_nonnote_insn (insn);
882 if (prev && BARRIER_P (prev))
883 barrier_args_size[INSN_UID (prev)] = cur_args_size;
885 for (; insn; insn = NEXT_INSN (insn))
887 if (INSN_DELETED_P (insn) || NOTE_P (insn))
889 if (BARRIER_P (insn))
894 if (insn == first_insn)
896 else if (barrier_args_size[INSN_UID (insn)] < 0)
898 barrier_args_size[INSN_UID (insn)] = cur_args_size;
903 /* The insns starting with this label have been
904 already scanned or are in the worklist. */
909 body = PATTERN (insn);
910 if (GET_CODE (body) == SEQUENCE)
912 HOST_WIDE_INT dest_args_size = cur_args_size;
913 for (i = 1; i < XVECLEN (body, 0); i++)
914 if (INSN_ANNULLED_BRANCH_P (XVECEXP (body, 0, 0))
915 && INSN_FROM_TARGET_P (XVECEXP (body, 0, i)))
917 = compute_barrier_args_size_1 (XVECEXP (body, 0, i),
918 dest_args_size, &next);
921 = compute_barrier_args_size_1 (XVECEXP (body, 0, i),
922 cur_args_size, &next);
924 if (INSN_ANNULLED_BRANCH_P (XVECEXP (body, 0, 0)))
925 compute_barrier_args_size_1 (XVECEXP (body, 0, 0),
926 dest_args_size, &next);
929 = compute_barrier_args_size_1 (XVECEXP (body, 0, 0),
930 cur_args_size, &next);
934 = compute_barrier_args_size_1 (insn, cur_args_size, &next);
938 if (VEC_empty (rtx, next))
941 /* Swap WORKLIST with NEXT and truncate NEXT for next iteration. */
945 VEC_truncate (rtx, next, 0);
948 VEC_free (rtx, heap, worklist);
949 VEC_free (rtx, heap, next);
952 /* Add a CFI to update the running total of the size of arguments
953 pushed onto the stack. */
956 dwarf2out_args_size (HOST_WIDE_INT size)
958 if (size == cur_row->args_size)
961 cur_row->args_size = size;
962 add_cfi_args_size (size);
965 /* Record a stack adjustment of OFFSET bytes. */
968 dwarf2out_stack_adjust (HOST_WIDE_INT offset)
970 dw_cfa_location loc = cur_row->cfa;
972 if (loc.reg == dw_stack_pointer_regnum)
973 loc.offset += offset;
975 if (cfa_store.reg == dw_stack_pointer_regnum)
976 cfa_store.offset += offset;
978 /* ??? The assumption seems to be that if A_O_A, the only CFA adjustments
979 involving the stack pointer are inside the prologue and marked as
980 RTX_FRAME_RELATED_P. That said, should we not verify this assumption
981 by *asserting* A_O_A at this point? Why else would we have a change
982 to the stack pointer? */
983 if (ACCUMULATE_OUTGOING_ARGS)
986 #ifndef STACK_GROWS_DOWNWARD
995 if (flag_asynchronous_unwind_tables)
996 dwarf2out_args_size (args_size);
999 /* Check INSN to see if it looks like a push or a stack adjustment, and
1000 make a note of it if it does. EH uses this information to find out
1001 how much extra space it needs to pop off the stack. */
1004 dwarf2out_notice_stack_adjust (rtx insn, bool after_p)
1006 HOST_WIDE_INT offset;
1009 /* Don't handle epilogues at all. Certainly it would be wrong to do so
1010 with this function. Proper support would require all frame-related
1011 insns to be marked, and to be able to handle saving state around
1012 epilogues textually in the middle of the function. */
1013 if (prologue_epilogue_contains (insn))
1016 /* If INSN is an instruction from target of an annulled branch, the
1017 effects are for the target only and so current argument size
1018 shouldn't change at all. */
1020 && INSN_ANNULLED_BRANCH_P (XVECEXP (final_sequence, 0, 0))
1021 && INSN_FROM_TARGET_P (insn))
1024 /* If only calls can throw, and we have a frame pointer,
1025 save up adjustments until we see the CALL_INSN. */
1026 if (!flag_asynchronous_unwind_tables
1027 && cur_row->cfa.reg != dw_stack_pointer_regnum)
1029 if (CALL_P (insn) && !after_p)
1031 /* Extract the size of the args from the CALL rtx itself. */
1032 insn = PATTERN (insn);
1033 if (GET_CODE (insn) == PARALLEL)
1034 insn = XVECEXP (insn, 0, 0);
1035 if (GET_CODE (insn) == SET)
1036 insn = SET_SRC (insn);
1037 gcc_assert (GET_CODE (insn) == CALL);
1038 dwarf2out_args_size (INTVAL (XEXP (insn, 1)));
1043 if (CALL_P (insn) && !after_p)
1045 if (!flag_asynchronous_unwind_tables)
1046 dwarf2out_args_size (args_size);
1049 else if (BARRIER_P (insn))
1051 /* Don't call compute_barrier_args_size () if the only
1052 BARRIER is at the end of function. */
1053 if (barrier_args_size == NULL && next_nonnote_insn (insn))
1054 compute_barrier_args_size ();
1055 if (barrier_args_size == NULL)
1059 offset = barrier_args_size[INSN_UID (insn)];
1064 offset -= args_size;
1065 #ifndef STACK_GROWS_DOWNWARD
1069 else if (GET_CODE (PATTERN (insn)) == SET)
1070 offset = stack_adjust_offset (PATTERN (insn), args_size, 0);
1071 else if (GET_CODE (PATTERN (insn)) == PARALLEL
1072 || GET_CODE (PATTERN (insn)) == SEQUENCE)
1074 /* There may be stack adjustments inside compound insns. Search
1076 for (offset = 0, i = XVECLEN (PATTERN (insn), 0) - 1; i >= 0; i--)
1077 if (GET_CODE (XVECEXP (PATTERN (insn), 0, i)) == SET)
1078 offset += stack_adjust_offset (XVECEXP (PATTERN (insn), 0, i),
1087 dwarf2out_stack_adjust (offset);
1090 /* We delay emitting a register save until either (a) we reach the end
1091 of the prologue or (b) the register is clobbered. This clusters
1092 register saves so that there are fewer pc advances. */
1094 struct GTY(()) queued_reg_save {
1095 struct queued_reg_save *next;
1097 HOST_WIDE_INT cfa_offset;
1101 static GTY(()) struct queued_reg_save *queued_reg_saves;
1103 /* The caller's ORIG_REG is saved in SAVED_IN_REG. */
1104 typedef struct GTY(()) reg_saved_in_data {
1107 } reg_saved_in_data;
1109 DEF_VEC_O (reg_saved_in_data);
1110 DEF_VEC_ALLOC_O (reg_saved_in_data, gc);
1112 /* A set of registers saved in other registers. This is implemented as
1113 a flat array because it normally contains zero or 1 entry, depending
1114 on the target. IA-64 is the big spender here, using a maximum of
1116 static GTY(()) VEC(reg_saved_in_data, gc) *regs_saved_in_regs;
1118 static GTY(()) reg_saved_in_data *cie_return_save;
1120 /* Short-hand inline for the very common D_F_R (REGNO (x)) operation. */
1121 /* ??? This ought to go into dwarf2out.h, except that dwarf2out.h is
1122 used in places where rtl is prohibited. */
1124 static inline unsigned
1125 dwf_regno (const_rtx reg)
1127 return DWARF_FRAME_REGNUM (REGNO (reg));
1130 /* Compare X and Y for equivalence. The inputs may be REGs or PC_RTX. */
1133 compare_reg_or_pc (rtx x, rtx y)
1135 if (REG_P (x) && REG_P (y))
1136 return REGNO (x) == REGNO (y);
1140 /* Record SRC as being saved in DEST. DEST may be null to delete an
1141 existing entry. SRC may be a register or PC_RTX. */
1144 record_reg_saved_in_reg (rtx dest, rtx src)
1146 reg_saved_in_data *elt;
1149 FOR_EACH_VEC_ELT (reg_saved_in_data, regs_saved_in_regs, i, elt)
1150 if (compare_reg_or_pc (elt->orig_reg, src))
1153 VEC_unordered_remove(reg_saved_in_data, regs_saved_in_regs, i);
1155 elt->saved_in_reg = dest;
1162 elt = VEC_safe_push(reg_saved_in_data, gc, regs_saved_in_regs, NULL);
1163 elt->orig_reg = src;
1164 elt->saved_in_reg = dest;
1167 /* Add an entry to QUEUED_REG_SAVES saying that REG is now saved at
1168 SREG, or if SREG is NULL then it is saved at OFFSET to the CFA. */
1171 queue_reg_save (rtx reg, rtx sreg, HOST_WIDE_INT offset)
1173 struct queued_reg_save *q;
1175 /* Duplicates waste space, but it's also necessary to remove them
1176 for correctness, since the queue gets output in reverse order. */
1177 for (q = queued_reg_saves; q != NULL; q = q->next)
1178 if (compare_reg_or_pc (q->reg, reg))
1183 q = ggc_alloc_queued_reg_save ();
1184 q->next = queued_reg_saves;
1185 queued_reg_saves = q;
1189 q->cfa_offset = offset;
1190 q->saved_reg = sreg;
1193 /* Output all the entries in QUEUED_REG_SAVES. */
1196 dwarf2out_flush_queued_reg_saves (void)
1198 struct queued_reg_save *q;
1200 for (q = queued_reg_saves; q; q = q->next)
1202 unsigned int reg, sreg;
1204 record_reg_saved_in_reg (q->saved_reg, q->reg);
1206 if (q->reg == pc_rtx)
1207 reg = DWARF_FRAME_RETURN_COLUMN;
1209 reg = dwf_regno (q->reg);
1211 sreg = dwf_regno (q->saved_reg);
1213 sreg = INVALID_REGNUM;
1214 reg_save (reg, sreg, q->cfa_offset);
1217 queued_reg_saves = NULL;
1220 /* Does INSN clobber any register which QUEUED_REG_SAVES lists a saved
1221 location for? Or, does it clobber a register which we've previously
1222 said that some other register is saved in, and for which we now
1223 have a new location for? */
1226 clobbers_queued_reg_save (const_rtx insn)
1228 struct queued_reg_save *q;
1230 for (q = queued_reg_saves; q; q = q->next)
1233 reg_saved_in_data *rir;
1235 if (modified_in_p (q->reg, insn))
1238 FOR_EACH_VEC_ELT (reg_saved_in_data, regs_saved_in_regs, i, rir)
1239 if (compare_reg_or_pc (q->reg, rir->orig_reg)
1240 && modified_in_p (rir->saved_in_reg, insn))
1247 /* What register, if any, is currently saved in REG? */
1250 reg_saved_in (rtx reg)
1252 unsigned int regn = REGNO (reg);
1253 struct queued_reg_save *q;
1254 reg_saved_in_data *rir;
1257 for (q = queued_reg_saves; q; q = q->next)
1258 if (q->saved_reg && regn == REGNO (q->saved_reg))
1261 FOR_EACH_VEC_ELT (reg_saved_in_data, regs_saved_in_regs, i, rir)
1262 if (regn == REGNO (rir->saved_in_reg))
1263 return rir->orig_reg;
1268 /* A subroutine of dwarf2out_frame_debug, process a REG_DEF_CFA note. */
1271 dwarf2out_frame_debug_def_cfa (rtx pat)
1273 dw_cfa_location loc;
1275 memset (&loc, 0, sizeof (loc));
1277 switch (GET_CODE (pat))
1280 loc.reg = dwf_regno (XEXP (pat, 0));
1281 loc.offset = INTVAL (XEXP (pat, 1));
1285 loc.reg = dwf_regno (pat);
1290 pat = XEXP (pat, 0);
1291 if (GET_CODE (pat) == PLUS)
1293 loc.base_offset = INTVAL (XEXP (pat, 1));
1294 pat = XEXP (pat, 0);
1296 loc.reg = dwf_regno (pat);
1300 /* Recurse and define an expression. */
1307 /* A subroutine of dwarf2out_frame_debug, process a REG_ADJUST_CFA note. */
1310 dwarf2out_frame_debug_adjust_cfa (rtx pat)
1312 dw_cfa_location loc = cur_row->cfa;
1315 gcc_assert (GET_CODE (pat) == SET);
1316 dest = XEXP (pat, 0);
1317 src = XEXP (pat, 1);
1319 switch (GET_CODE (src))
1322 gcc_assert (dwf_regno (XEXP (src, 0)) == loc.reg);
1323 loc.offset -= INTVAL (XEXP (src, 1));
1333 loc.reg = dwf_regno (dest);
1334 gcc_assert (loc.indirect == 0);
1339 /* A subroutine of dwarf2out_frame_debug, process a REG_CFA_OFFSET note. */
1342 dwarf2out_frame_debug_cfa_offset (rtx set)
1344 HOST_WIDE_INT offset;
1345 rtx src, addr, span;
1346 unsigned int sregno;
1348 src = XEXP (set, 1);
1349 addr = XEXP (set, 0);
1350 gcc_assert (MEM_P (addr));
1351 addr = XEXP (addr, 0);
1353 /* As documented, only consider extremely simple addresses. */
1354 switch (GET_CODE (addr))
1357 gcc_assert (dwf_regno (addr) == cur_row->cfa.reg);
1358 offset = -cur_row->cfa.offset;
1361 gcc_assert (dwf_regno (XEXP (addr, 0)) == cur_row->cfa.reg);
1362 offset = INTVAL (XEXP (addr, 1)) - cur_row->cfa.offset;
1371 sregno = DWARF_FRAME_RETURN_COLUMN;
1375 span = targetm.dwarf_register_span (src);
1376 sregno = dwf_regno (src);
1379 /* ??? We'd like to use queue_reg_save, but we need to come up with
1380 a different flushing heuristic for epilogues. */
1382 reg_save (sregno, INVALID_REGNUM, offset);
1385 /* We have a PARALLEL describing where the contents of SRC live.
1386 Queue register saves for each piece of the PARALLEL. */
1389 HOST_WIDE_INT span_offset = offset;
1391 gcc_assert (GET_CODE (span) == PARALLEL);
1393 limit = XVECLEN (span, 0);
1394 for (par_index = 0; par_index < limit; par_index++)
1396 rtx elem = XVECEXP (span, 0, par_index);
1398 sregno = dwf_regno (src);
1399 reg_save (sregno, INVALID_REGNUM, span_offset);
1400 span_offset += GET_MODE_SIZE (GET_MODE (elem));
1405 /* A subroutine of dwarf2out_frame_debug, process a REG_CFA_REGISTER note. */
1408 dwarf2out_frame_debug_cfa_register (rtx set)
1411 unsigned sregno, dregno;
1413 src = XEXP (set, 1);
1414 dest = XEXP (set, 0);
1416 record_reg_saved_in_reg (dest, src);
1418 sregno = DWARF_FRAME_RETURN_COLUMN;
1420 sregno = dwf_regno (src);
1422 dregno = dwf_regno (dest);
1424 /* ??? We'd like to use queue_reg_save, but we need to come up with
1425 a different flushing heuristic for epilogues. */
1426 reg_save (sregno, dregno, 0);
1429 /* A subroutine of dwarf2out_frame_debug, process a REG_CFA_EXPRESSION note. */
1432 dwarf2out_frame_debug_cfa_expression (rtx set)
1434 rtx src, dest, span;
1435 dw_cfi_ref cfi = new_cfi ();
1438 dest = SET_DEST (set);
1439 src = SET_SRC (set);
1441 gcc_assert (REG_P (src));
1442 gcc_assert (MEM_P (dest));
1444 span = targetm.dwarf_register_span (src);
1447 regno = dwf_regno (src);
1449 cfi->dw_cfi_opc = DW_CFA_expression;
1450 cfi->dw_cfi_oprnd1.dw_cfi_reg_num = regno;
1451 cfi->dw_cfi_oprnd2.dw_cfi_loc
1452 = mem_loc_descriptor (XEXP (dest, 0), get_address_mode (dest),
1453 GET_MODE (dest), VAR_INIT_STATUS_INITIALIZED);
1455 /* ??? We'd like to use queue_reg_save, were the interface different,
1456 and, as above, we could manage flushing for epilogues. */
1458 update_row_reg_save (cur_row, regno, cfi);
1461 /* A subroutine of dwarf2out_frame_debug, process a REG_CFA_RESTORE note. */
1464 dwarf2out_frame_debug_cfa_restore (rtx reg)
1466 unsigned int regno = dwf_regno (reg);
1468 add_cfi_restore (regno);
1469 update_row_reg_save (cur_row, regno, NULL);
1472 /* A subroutine of dwarf2out_frame_debug, process a REG_CFA_WINDOW_SAVE.
1473 ??? Perhaps we should note in the CIE where windows are saved (instead of
1474 assuming 0(cfa)) and what registers are in the window. */
1477 dwarf2out_frame_debug_cfa_window_save (void)
1479 dw_cfi_ref cfi = new_cfi ();
1481 cfi->dw_cfi_opc = DW_CFA_GNU_window_save;
1485 /* Record call frame debugging information for an expression EXPR,
1486 which either sets SP or FP (adjusting how we calculate the frame
1487 address) or saves a register to the stack or another register.
1488 LABEL indicates the address of EXPR.
1490 This function encodes a state machine mapping rtxes to actions on
1491 cfa, cfa_store, and cfa_temp.reg. We describe these rules so
1492 users need not read the source code.
1494 The High-Level Picture
1496 Changes in the register we use to calculate the CFA: Currently we
1497 assume that if you copy the CFA register into another register, we
1498 should take the other one as the new CFA register; this seems to
1499 work pretty well. If it's wrong for some target, it's simple
1500 enough not to set RTX_FRAME_RELATED_P on the insn in question.
1502 Changes in the register we use for saving registers to the stack:
1503 This is usually SP, but not always. Again, we deduce that if you
1504 copy SP into another register (and SP is not the CFA register),
1505 then the new register is the one we will be using for register
1506 saves. This also seems to work.
1508 Register saves: There's not much guesswork about this one; if
1509 RTX_FRAME_RELATED_P is set on an insn which modifies memory, it's a
1510 register save, and the register used to calculate the destination
1511 had better be the one we think we're using for this purpose.
1512 It's also assumed that a copy from a call-saved register to another
1513 register is saving that register if RTX_FRAME_RELATED_P is set on
1514 that instruction. If the copy is from a call-saved register to
1515 the *same* register, that means that the register is now the same
1516 value as in the caller.
1518 Except: If the register being saved is the CFA register, and the
1519 offset is nonzero, we are saving the CFA, so we assume we have to
1520 use DW_CFA_def_cfa_expression. If the offset is 0, we assume that
1521 the intent is to save the value of SP from the previous frame.
1523 In addition, if a register has previously been saved to a different
1526 Invariants / Summaries of Rules
1528 cfa current rule for calculating the CFA. It usually
1529 consists of a register and an offset. This is
1530 actually stored in cur_row->cfa, but abbreviated
1531 for the purposes of this documentation.
1532 cfa_store register used by prologue code to save things to the stack
1533 cfa_store.offset is the offset from the value of
1534 cfa_store.reg to the actual CFA
1535 cfa_temp register holding an integral value. cfa_temp.offset
1536 stores the value, which will be used to adjust the
1537 stack pointer. cfa_temp is also used like cfa_store,
1538 to track stores to the stack via fp or a temp reg.
1540 Rules 1- 4: Setting a register's value to cfa.reg or an expression
1541 with cfa.reg as the first operand changes the cfa.reg and its
1542 cfa.offset. Rule 1 and 4 also set cfa_temp.reg and
1545 Rules 6- 9: Set a non-cfa.reg register value to a constant or an
1546 expression yielding a constant. This sets cfa_temp.reg
1547 and cfa_temp.offset.
1549 Rule 5: Create a new register cfa_store used to save items to the
1552 Rules 10-14: Save a register to the stack. Define offset as the
1553 difference of the original location and cfa_store's
1554 location (or cfa_temp's location if cfa_temp is used).
1556 Rules 16-20: If AND operation happens on sp in prologue, we assume
1557 stack is realigned. We will use a group of DW_OP_XXX
1558 expressions to represent the location of the stored
1559 register instead of CFA+offset.
1563 "{a,b}" indicates a choice of a xor b.
1564 "<reg>:cfa.reg" indicates that <reg> must equal cfa.reg.
1567 (set <reg1> <reg2>:cfa.reg)
1568 effects: cfa.reg = <reg1>
1569 cfa.offset unchanged
1570 cfa_temp.reg = <reg1>
1571 cfa_temp.offset = cfa.offset
1574 (set sp ({minus,plus,losum} {sp,fp}:cfa.reg
1575 {<const_int>,<reg>:cfa_temp.reg}))
1576 effects: cfa.reg = sp if fp used
1577 cfa.offset += {+/- <const_int>, cfa_temp.offset} if cfa.reg==sp
1578 cfa_store.offset += {+/- <const_int>, cfa_temp.offset}
1579 if cfa_store.reg==sp
1582 (set fp ({minus,plus,losum} <reg>:cfa.reg <const_int>))
1583 effects: cfa.reg = fp
1584 cfa_offset += +/- <const_int>
1587 (set <reg1> ({plus,losum} <reg2>:cfa.reg <const_int>))
1588 constraints: <reg1> != fp
1590 effects: cfa.reg = <reg1>
1591 cfa_temp.reg = <reg1>
1592 cfa_temp.offset = cfa.offset
1595 (set <reg1> (plus <reg2>:cfa_temp.reg sp:cfa.reg))
1596 constraints: <reg1> != fp
1598 effects: cfa_store.reg = <reg1>
1599 cfa_store.offset = cfa.offset - cfa_temp.offset
1602 (set <reg> <const_int>)
1603 effects: cfa_temp.reg = <reg>
1604 cfa_temp.offset = <const_int>
1607 (set <reg1>:cfa_temp.reg (ior <reg2>:cfa_temp.reg <const_int>))
1608 effects: cfa_temp.reg = <reg1>
1609 cfa_temp.offset |= <const_int>
1612 (set <reg> (high <exp>))
1616 (set <reg> (lo_sum <exp> <const_int>))
1617 effects: cfa_temp.reg = <reg>
1618 cfa_temp.offset = <const_int>
1621 (set (mem ({pre,post}_modify sp:cfa_store (???? <reg1> <const_int>))) <reg2>)
1622 effects: cfa_store.offset -= <const_int>
1623 cfa.offset = cfa_store.offset if cfa.reg == sp
1625 cfa.base_offset = -cfa_store.offset
1628 (set (mem ({pre_inc,pre_dec,post_dec} sp:cfa_store.reg)) <reg>)
1629 effects: cfa_store.offset += -/+ mode_size(mem)
1630 cfa.offset = cfa_store.offset if cfa.reg == sp
1632 cfa.base_offset = -cfa_store.offset
1635 (set (mem ({minus,plus,losum} <reg1>:{cfa_store,cfa_temp} <const_int>))
1638 effects: cfa.reg = <reg1>
1639 cfa.base_offset = -/+ <const_int> - {cfa_store,cfa_temp}.offset
1642 (set (mem <reg1>:{cfa_store,cfa_temp}) <reg2>)
1643 effects: cfa.reg = <reg1>
1644 cfa.base_offset = -{cfa_store,cfa_temp}.offset
1647 (set (mem (post_inc <reg1>:cfa_temp <const_int>)) <reg2>)
1648 effects: cfa.reg = <reg1>
1649 cfa.base_offset = -cfa_temp.offset
1650 cfa_temp.offset -= mode_size(mem)
1653 (set <reg> {unspec, unspec_volatile})
1654 effects: target-dependent
1657 (set sp (and: sp <const_int>))
1658 constraints: cfa_store.reg == sp
1659 effects: cfun->fde.stack_realign = 1
1660 cfa_store.offset = 0
1661 fde->drap_reg = cfa.reg if cfa.reg != sp and cfa.reg != fp
1664 (set (mem ({pre_inc, pre_dec} sp)) (mem (plus (cfa.reg) (const_int))))
1665 effects: cfa_store.offset += -/+ mode_size(mem)
1668 (set (mem ({pre_inc, pre_dec} sp)) fp)
1669 constraints: fde->stack_realign == 1
1670 effects: cfa_store.offset = 0
1671 cfa.reg != HARD_FRAME_POINTER_REGNUM
1674 (set (mem ({pre_inc, pre_dec} sp)) cfa.reg)
1675 constraints: fde->stack_realign == 1
1677 && cfa.indirect == 0
1678 && cfa.reg != HARD_FRAME_POINTER_REGNUM
1679 effects: Use DW_CFA_def_cfa_expression to define cfa
1680 cfa.reg == fde->drap_reg */
1683 dwarf2out_frame_debug_expr (rtx expr)
1685 dw_cfa_location cfa = cur_row->cfa;
1686 rtx src, dest, span;
1687 HOST_WIDE_INT offset;
1690 /* If RTX_FRAME_RELATED_P is set on a PARALLEL, process each member of
1691 the PARALLEL independently. The first element is always processed if
1692 it is a SET. This is for backward compatibility. Other elements
1693 are processed only if they are SETs and the RTX_FRAME_RELATED_P
1694 flag is set in them. */
1695 if (GET_CODE (expr) == PARALLEL || GET_CODE (expr) == SEQUENCE)
1698 int limit = XVECLEN (expr, 0);
1701 /* PARALLELs have strict read-modify-write semantics, so we
1702 ought to evaluate every rvalue before changing any lvalue.
1703 It's cumbersome to do that in general, but there's an
1704 easy approximation that is enough for all current users:
1705 handle register saves before register assignments. */
1706 if (GET_CODE (expr) == PARALLEL)
1707 for (par_index = 0; par_index < limit; par_index++)
1709 elem = XVECEXP (expr, 0, par_index);
1710 if (GET_CODE (elem) == SET
1711 && MEM_P (SET_DEST (elem))
1712 && (RTX_FRAME_RELATED_P (elem) || par_index == 0))
1713 dwarf2out_frame_debug_expr (elem);
1716 for (par_index = 0; par_index < limit; par_index++)
1718 elem = XVECEXP (expr, 0, par_index);
1719 if (GET_CODE (elem) == SET
1720 && (!MEM_P (SET_DEST (elem)) || GET_CODE (expr) == SEQUENCE)
1721 && (RTX_FRAME_RELATED_P (elem) || par_index == 0))
1722 dwarf2out_frame_debug_expr (elem);
1723 else if (GET_CODE (elem) == SET
1725 && !RTX_FRAME_RELATED_P (elem))
1727 /* Stack adjustment combining might combine some post-prologue
1728 stack adjustment into a prologue stack adjustment. */
1729 HOST_WIDE_INT offset = stack_adjust_offset (elem, args_size, 0);
1732 dwarf2out_stack_adjust (offset);
1738 gcc_assert (GET_CODE (expr) == SET);
1740 src = SET_SRC (expr);
1741 dest = SET_DEST (expr);
1745 rtx rsi = reg_saved_in (src);
1752 switch (GET_CODE (dest))
1755 switch (GET_CODE (src))
1757 /* Setting FP from SP. */
1759 if (cfa.reg == dwf_regno (src))
1762 /* Update the CFA rule wrt SP or FP. Make sure src is
1763 relative to the current CFA register.
1765 We used to require that dest be either SP or FP, but the
1766 ARM copies SP to a temporary register, and from there to
1767 FP. So we just rely on the backends to only set
1768 RTX_FRAME_RELATED_P on appropriate insns. */
1769 cfa.reg = dwf_regno (dest);
1770 cfa_temp.reg = cfa.reg;
1771 cfa_temp.offset = cfa.offset;
1775 /* Saving a register in a register. */
1776 gcc_assert (!fixed_regs [REGNO (dest)]
1777 /* For the SPARC and its register window. */
1778 || (dwf_regno (src) == DWARF_FRAME_RETURN_COLUMN));
1780 /* After stack is aligned, we can only save SP in FP
1781 if drap register is used. In this case, we have
1782 to restore stack pointer with the CFA value and we
1783 don't generate this DWARF information. */
1785 && fde->stack_realign
1786 && REGNO (src) == STACK_POINTER_REGNUM)
1787 gcc_assert (REGNO (dest) == HARD_FRAME_POINTER_REGNUM
1788 && fde->drap_reg != INVALID_REGNUM
1789 && cfa.reg != dwf_regno (src));
1791 queue_reg_save (src, dest, 0);
1798 if (dest == stack_pointer_rtx)
1802 switch (GET_CODE (XEXP (src, 1)))
1805 offset = INTVAL (XEXP (src, 1));
1808 gcc_assert (dwf_regno (XEXP (src, 1)) == cfa_temp.reg);
1809 offset = cfa_temp.offset;
1815 if (XEXP (src, 0) == hard_frame_pointer_rtx)
1817 /* Restoring SP from FP in the epilogue. */
1818 gcc_assert (cfa.reg == dw_frame_pointer_regnum);
1819 cfa.reg = dw_stack_pointer_regnum;
1821 else if (GET_CODE (src) == LO_SUM)
1822 /* Assume we've set the source reg of the LO_SUM from sp. */
1825 gcc_assert (XEXP (src, 0) == stack_pointer_rtx);
1827 if (GET_CODE (src) != MINUS)
1829 if (cfa.reg == dw_stack_pointer_regnum)
1830 cfa.offset += offset;
1831 if (cfa_store.reg == dw_stack_pointer_regnum)
1832 cfa_store.offset += offset;
1834 else if (dest == hard_frame_pointer_rtx)
1837 /* Either setting the FP from an offset of the SP,
1838 or adjusting the FP */
1839 gcc_assert (frame_pointer_needed);
1841 gcc_assert (REG_P (XEXP (src, 0))
1842 && dwf_regno (XEXP (src, 0)) == cfa.reg
1843 && CONST_INT_P (XEXP (src, 1)));
1844 offset = INTVAL (XEXP (src, 1));
1845 if (GET_CODE (src) != MINUS)
1847 cfa.offset += offset;
1848 cfa.reg = dw_frame_pointer_regnum;
1852 gcc_assert (GET_CODE (src) != MINUS);
1855 if (REG_P (XEXP (src, 0))
1856 && dwf_regno (XEXP (src, 0)) == cfa.reg
1857 && CONST_INT_P (XEXP (src, 1)))
1859 /* Setting a temporary CFA register that will be copied
1860 into the FP later on. */
1861 offset = - INTVAL (XEXP (src, 1));
1862 cfa.offset += offset;
1863 cfa.reg = dwf_regno (dest);
1864 /* Or used to save regs to the stack. */
1865 cfa_temp.reg = cfa.reg;
1866 cfa_temp.offset = cfa.offset;
1870 else if (REG_P (XEXP (src, 0))
1871 && dwf_regno (XEXP (src, 0)) == cfa_temp.reg
1872 && XEXP (src, 1) == stack_pointer_rtx)
1874 /* Setting a scratch register that we will use instead
1875 of SP for saving registers to the stack. */
1876 gcc_assert (cfa.reg == dw_stack_pointer_regnum);
1877 cfa_store.reg = dwf_regno (dest);
1878 cfa_store.offset = cfa.offset - cfa_temp.offset;
1882 else if (GET_CODE (src) == LO_SUM
1883 && CONST_INT_P (XEXP (src, 1)))
1885 cfa_temp.reg = dwf_regno (dest);
1886 cfa_temp.offset = INTVAL (XEXP (src, 1));
1895 cfa_temp.reg = dwf_regno (dest);
1896 cfa_temp.offset = INTVAL (src);
1901 gcc_assert (REG_P (XEXP (src, 0))
1902 && dwf_regno (XEXP (src, 0)) == cfa_temp.reg
1903 && CONST_INT_P (XEXP (src, 1)));
1905 cfa_temp.reg = dwf_regno (dest);
1906 cfa_temp.offset |= INTVAL (XEXP (src, 1));
1909 /* Skip over HIGH, assuming it will be followed by a LO_SUM,
1910 which will fill in all of the bits. */
1917 case UNSPEC_VOLATILE:
1918 /* All unspecs should be represented by REG_CFA_* notes. */
1924 /* If this AND operation happens on stack pointer in prologue,
1925 we assume the stack is realigned and we extract the
1927 if (fde && XEXP (src, 0) == stack_pointer_rtx)
1929 /* We interpret reg_save differently with stack_realign set.
1930 Thus we must flush whatever we have queued first. */
1931 dwarf2out_flush_queued_reg_saves ();
1933 gcc_assert (cfa_store.reg == dwf_regno (XEXP (src, 0)));
1934 fde->stack_realign = 1;
1935 fde->stack_realignment = INTVAL (XEXP (src, 1));
1936 cfa_store.offset = 0;
1938 if (cfa.reg != dw_stack_pointer_regnum
1939 && cfa.reg != dw_frame_pointer_regnum)
1940 fde->drap_reg = cfa.reg;
1953 /* Saving a register to the stack. Make sure dest is relative to the
1955 switch (GET_CODE (XEXP (dest, 0)))
1961 /* We can't handle variable size modifications. */
1962 gcc_assert (GET_CODE (XEXP (XEXP (XEXP (dest, 0), 1), 1))
1964 offset = -INTVAL (XEXP (XEXP (XEXP (dest, 0), 1), 1));
1966 gcc_assert (REGNO (XEXP (XEXP (dest, 0), 0)) == STACK_POINTER_REGNUM
1967 && cfa_store.reg == dw_stack_pointer_regnum);
1969 cfa_store.offset += offset;
1970 if (cfa.reg == dw_stack_pointer_regnum)
1971 cfa.offset = cfa_store.offset;
1973 if (GET_CODE (XEXP (dest, 0)) == POST_MODIFY)
1974 offset -= cfa_store.offset;
1976 offset = -cfa_store.offset;
1983 offset = GET_MODE_SIZE (GET_MODE (dest));
1984 if (GET_CODE (XEXP (dest, 0)) == PRE_INC)
1987 gcc_assert ((REGNO (XEXP (XEXP (dest, 0), 0))
1988 == STACK_POINTER_REGNUM)
1989 && cfa_store.reg == dw_stack_pointer_regnum);
1991 cfa_store.offset += offset;
1993 /* Rule 18: If stack is aligned, we will use FP as a
1994 reference to represent the address of the stored
1997 && fde->stack_realign
1998 && src == hard_frame_pointer_rtx)
2000 gcc_assert (cfa.reg != dw_frame_pointer_regnum);
2001 cfa_store.offset = 0;
2004 if (cfa.reg == dw_stack_pointer_regnum)
2005 cfa.offset = cfa_store.offset;
2007 if (GET_CODE (XEXP (dest, 0)) == POST_DEC)
2008 offset += -cfa_store.offset;
2010 offset = -cfa_store.offset;
2014 /* With an offset. */
2021 gcc_assert (CONST_INT_P (XEXP (XEXP (dest, 0), 1))
2022 && REG_P (XEXP (XEXP (dest, 0), 0)));
2023 offset = INTVAL (XEXP (XEXP (dest, 0), 1));
2024 if (GET_CODE (XEXP (dest, 0)) == MINUS)
2027 regno = dwf_regno (XEXP (XEXP (dest, 0), 0));
2029 if (cfa.reg == regno)
2030 offset -= cfa.offset;
2031 else if (cfa_store.reg == regno)
2032 offset -= cfa_store.offset;
2035 gcc_assert (cfa_temp.reg == regno);
2036 offset -= cfa_temp.offset;
2042 /* Without an offset. */
2045 unsigned int regno = dwf_regno (XEXP (dest, 0));
2047 if (cfa.reg == regno)
2048 offset = -cfa.offset;
2049 else if (cfa_store.reg == regno)
2050 offset = -cfa_store.offset;
2053 gcc_assert (cfa_temp.reg == regno);
2054 offset = -cfa_temp.offset;
2061 gcc_assert (cfa_temp.reg == dwf_regno (XEXP (XEXP (dest, 0), 0)));
2062 offset = -cfa_temp.offset;
2063 cfa_temp.offset -= GET_MODE_SIZE (GET_MODE (dest));
2071 /* If the source operand of this MEM operation is a memory,
2072 we only care how much stack grew. */
2077 && REGNO (src) != STACK_POINTER_REGNUM
2078 && REGNO (src) != HARD_FRAME_POINTER_REGNUM
2079 && dwf_regno (src) == cfa.reg)
2081 /* We're storing the current CFA reg into the stack. */
2083 if (cfa.offset == 0)
2086 /* If stack is aligned, putting CFA reg into stack means
2087 we can no longer use reg + offset to represent CFA.
2088 Here we use DW_CFA_def_cfa_expression instead. The
2089 result of this expression equals to the original CFA
2092 && fde->stack_realign
2093 && cfa.indirect == 0
2094 && cfa.reg != dw_frame_pointer_regnum)
2096 dw_cfa_location cfa_exp;
2098 gcc_assert (fde->drap_reg == cfa.reg);
2100 cfa_exp.indirect = 1;
2101 cfa_exp.reg = dw_frame_pointer_regnum;
2102 cfa_exp.base_offset = offset;
2105 fde->drap_reg_saved = 1;
2107 def_cfa_1 (&cfa_exp);
2111 /* If the source register is exactly the CFA, assume
2112 we're saving SP like any other register; this happens
2115 queue_reg_save (stack_pointer_rtx, NULL_RTX, offset);
2120 /* Otherwise, we'll need to look in the stack to
2121 calculate the CFA. */
2122 rtx x = XEXP (dest, 0);
2126 gcc_assert (REG_P (x));
2128 cfa.reg = dwf_regno (x);
2129 cfa.base_offset = offset;
2140 span = targetm.dwarf_register_span (src);
2142 queue_reg_save (src, NULL_RTX, offset);
2145 /* We have a PARALLEL describing where the contents of SRC live.
2146 Queue register saves for each piece of the PARALLEL. */
2149 HOST_WIDE_INT span_offset = offset;
2151 gcc_assert (GET_CODE (span) == PARALLEL);
2153 limit = XVECLEN (span, 0);
2154 for (par_index = 0; par_index < limit; par_index++)
2156 rtx elem = XVECEXP (span, 0, par_index);
2157 queue_reg_save (elem, NULL_RTX, span_offset);
2158 span_offset += GET_MODE_SIZE (GET_MODE (elem));
2168 /* Record call frame debugging information for INSN, which either
2169 sets SP or FP (adjusting how we calculate the frame address) or saves a
2170 register to the stack. If INSN is NULL_RTX, initialize our state.
2172 If AFTER_P is false, we're being called before the insn is emitted,
2173 otherwise after. Call instructions get invoked twice. */
2176 dwarf2out_frame_debug (rtx insn, bool after_p)
2179 bool handled_one = false;
2180 bool need_flush = false;
2182 if (!NONJUMP_INSN_P (insn) || clobbers_queued_reg_save (insn))
2183 dwarf2out_flush_queued_reg_saves ();
2185 if (!RTX_FRAME_RELATED_P (insn))
2187 /* ??? This should be done unconditionally since stack adjustments
2188 matter if the stack pointer is not the CFA register anymore but
2189 is still used to save registers. */
2190 if (!ACCUMULATE_OUTGOING_ARGS)
2191 dwarf2out_notice_stack_adjust (insn, after_p);
2195 any_cfis_emitted = false;
2197 for (note = REG_NOTES (insn); note; note = XEXP (note, 1))
2198 switch (REG_NOTE_KIND (note))
2200 case REG_FRAME_RELATED_EXPR:
2201 insn = XEXP (note, 0);
2204 case REG_CFA_DEF_CFA:
2205 dwarf2out_frame_debug_def_cfa (XEXP (note, 0));
2209 case REG_CFA_ADJUST_CFA:
2214 if (GET_CODE (n) == PARALLEL)
2215 n = XVECEXP (n, 0, 0);
2217 dwarf2out_frame_debug_adjust_cfa (n);
2221 case REG_CFA_OFFSET:
2224 n = single_set (insn);
2225 dwarf2out_frame_debug_cfa_offset (n);
2229 case REG_CFA_REGISTER:
2234 if (GET_CODE (n) == PARALLEL)
2235 n = XVECEXP (n, 0, 0);
2237 dwarf2out_frame_debug_cfa_register (n);
2241 case REG_CFA_EXPRESSION:
2244 n = single_set (insn);
2245 dwarf2out_frame_debug_cfa_expression (n);
2249 case REG_CFA_RESTORE:
2254 if (GET_CODE (n) == PARALLEL)
2255 n = XVECEXP (n, 0, 0);
2258 dwarf2out_frame_debug_cfa_restore (n);
2262 case REG_CFA_SET_VDRAP:
2266 dw_fde_ref fde = cfun->fde;
2269 gcc_assert (fde->vdrap_reg == INVALID_REGNUM);
2271 fde->vdrap_reg = dwf_regno (n);
2277 case REG_CFA_WINDOW_SAVE:
2278 dwarf2out_frame_debug_cfa_window_save ();
2282 case REG_CFA_FLUSH_QUEUE:
2283 /* The actual flush happens below. */
2294 /* Minimize the number of advances by emitting the entire queue
2295 once anything is emitted. */
2296 need_flush |= any_cfis_emitted;
2300 insn = PATTERN (insn);
2302 dwarf2out_frame_debug_expr (insn);
2304 /* Check again. A parallel can save and update the same register.
2305 We could probably check just once, here, but this is safer than
2306 removing the check at the start of the function. */
2307 if (any_cfis_emitted || clobbers_queued_reg_save (insn))
2312 dwarf2out_flush_queued_reg_saves ();
2315 /* Emit CFI info to change the state from OLD_ROW to NEW_ROW. */
2318 change_cfi_row (dw_cfi_row *old_row, dw_cfi_row *new_row)
2320 size_t i, n_old, n_new, n_max;
2323 if (new_row->cfa_cfi && !cfi_equal_p (old_row->cfa_cfi, new_row->cfa_cfi))
2324 add_cfi (new_row->cfa_cfi);
2327 cfi = def_cfa_0 (&old_row->cfa, &new_row->cfa);
2332 if (old_row->args_size != new_row->args_size)
2333 add_cfi_args_size (new_row->args_size);
2335 n_old = VEC_length (dw_cfi_ref, old_row->reg_save);
2336 n_new = VEC_length (dw_cfi_ref, new_row->reg_save);
2337 n_max = MAX (n_old, n_new);
2339 for (i = 0; i < n_max; ++i)
2341 dw_cfi_ref r_old = NULL, r_new = NULL;
2344 r_old = VEC_index (dw_cfi_ref, old_row->reg_save, i);
2346 r_new = VEC_index (dw_cfi_ref, new_row->reg_save, i);
2350 else if (r_new == NULL)
2351 add_cfi_restore (i);
2352 else if (!cfi_equal_p (r_old, r_new))
2357 /* Examine CFI and return true if a cfi label and set_loc is needed
2358 beforehand. Even when generating CFI assembler instructions, we
2359 still have to add the cfi to the list so that lookup_cfa_1 works
2360 later on. When -g2 and above we even need to force emitting of
2361 CFI labels and add to list a DW_CFA_set_loc for convert_cfa_to_fb_loc_list
2362 purposes. If we're generating DWARF3 output we use DW_OP_call_frame_cfa
2363 and so don't use convert_cfa_to_fb_loc_list. */
2366 cfi_label_required_p (dw_cfi_ref cfi)
2368 if (!dwarf2out_do_cfi_asm ())
2371 if (dwarf_version == 2
2372 && debug_info_level > DINFO_LEVEL_TERSE
2373 && (write_symbols == DWARF2_DEBUG
2374 || write_symbols == VMS_AND_DWARF2_DEBUG))
2376 switch (cfi->dw_cfi_opc)
2378 case DW_CFA_def_cfa_offset:
2379 case DW_CFA_def_cfa_offset_sf:
2380 case DW_CFA_def_cfa_register:
2381 case DW_CFA_def_cfa:
2382 case DW_CFA_def_cfa_sf:
2383 case DW_CFA_def_cfa_expression:
2384 case DW_CFA_restore_state:
2393 /* Walk the function, looking for NOTE_INSN_CFI notes. Add the CFIs to the
2394 function's FDE, adding CFI labels and set_loc/advance_loc opcodes as
2397 add_cfis_to_fde (void)
2399 dw_fde_ref fde = cfun->fde;
2401 /* We always start with a function_begin label. */
2404 for (insn = get_insns (); insn; insn = next)
2406 next = NEXT_INSN (insn);
2408 if (NOTE_P (insn) && NOTE_KIND (insn) == NOTE_INSN_SWITCH_TEXT_SECTIONS)
2410 fde->dw_fde_switch_cfi_index
2411 = VEC_length (dw_cfi_ref, fde->dw_fde_cfi);
2412 /* Don't attempt to advance_loc4 between labels
2413 in different sections. */
2417 if (NOTE_P (insn) && NOTE_KIND (insn) == NOTE_INSN_CFI)
2419 bool required = cfi_label_required_p (NOTE_CFI (insn));
2420 while (next && NOTE_P (next) && NOTE_KIND (next) == NOTE_INSN_CFI)
2422 required |= cfi_label_required_p (NOTE_CFI (next));
2423 next = NEXT_INSN (next);
2427 int num = dwarf2out_cfi_label_num;
2428 const char *label = dwarf2out_cfi_label ();
2432 /* Set the location counter to the new label. */
2434 xcfi->dw_cfi_opc = (first ? DW_CFA_set_loc
2435 : DW_CFA_advance_loc4);
2436 xcfi->dw_cfi_oprnd1.dw_cfi_addr = label;
2437 VEC_safe_push (dw_cfi_ref, gc, fde->dw_fde_cfi, xcfi);
2439 tmp = emit_note_before (NOTE_INSN_CFI_LABEL, insn);
2440 NOTE_LABEL_NUMBER (tmp) = num;
2445 VEC_safe_push (dw_cfi_ref, gc, fde->dw_fde_cfi, NOTE_CFI (insn));
2446 insn = NEXT_INSN (insn);
2448 while (insn != next);
2454 /* Scan the function and create the initial set of CFI notes. */
2457 create_cfi_notes (void)
2461 for (insn = get_insns (); insn ; insn = NEXT_INSN (insn))
2465 add_cfi_insn = PREV_INSN (insn);
2467 if (BARRIER_P (insn))
2469 dwarf2out_frame_debug (insn, false);
2475 switch (NOTE_KIND (insn))
2477 case NOTE_INSN_PROLOGUE_END:
2478 dwarf2out_flush_queued_reg_saves ();
2481 case NOTE_INSN_EPILOGUE_BEG:
2482 #if defined(HAVE_epilogue)
2483 dwarf2out_cfi_begin_epilogue (insn);
2487 case NOTE_INSN_CFA_RESTORE_STATE:
2488 add_cfi_insn = insn;
2489 dwarf2out_frame_debug_restore_state ();
2492 case NOTE_INSN_SWITCH_TEXT_SECTIONS:
2493 /* In dwarf2out_switch_text_section, we'll begin a new FDE
2494 for the portion of the function in the alternate text
2495 section. The row state at the very beginning of that
2496 new FDE will be exactly the row state from the CIE.
2497 Emit whatever CFIs are necessary to make CUR_ROW current. */
2498 add_cfi_insn = insn;
2499 change_cfi_row (cie_cfi_row, cur_row);
2505 if (!NONDEBUG_INSN_P (insn))
2508 pat = PATTERN (insn);
2509 if (asm_noperands (pat) >= 0)
2511 dwarf2out_frame_debug (insn, false);
2515 if (GET_CODE (pat) == SEQUENCE)
2517 int i, n = XVECLEN (pat, 0);
2518 for (i = 1; i < n; ++i)
2519 dwarf2out_frame_debug (XVECEXP (pat, 0, i), false);
2523 || find_reg_note (insn, REG_CFA_FLUSH_QUEUE, NULL))
2524 dwarf2out_frame_debug (insn, false);
2526 /* Do not separate tablejump insns from their ADDR_DIFF_VEC.
2527 Putting the note after the VEC should be ok. */
2528 if (!tablejump_p (insn, NULL, &add_cfi_insn))
2529 add_cfi_insn = insn;
2531 dwarf2out_frame_debug (insn, true);
2534 add_cfi_insn = NULL;
2537 /* Determine if we need to save and restore CFI information around this
2538 epilogue. If SIBCALL is true, then this is a sibcall epilogue. If
2539 we do need to save/restore, then emit the save now, and insert a
2540 NOTE_INSN_CFA_RESTORE_STATE at the appropriate place in the stream. */
2543 dwarf2out_cfi_begin_epilogue (rtx insn)
2545 bool saw_frp = false;
2548 /* Scan forward to the return insn, noticing if there are possible
2549 frame related insns. */
2550 for (i = NEXT_INSN (insn); i ; i = NEXT_INSN (i))
2555 /* Look for both regular and sibcalls to end the block. */
2556 if (returnjump_p (i))
2558 if (CALL_P (i) && SIBLING_CALL_P (i))
2561 if (GET_CODE (PATTERN (i)) == SEQUENCE)
2564 rtx seq = PATTERN (i);
2566 if (returnjump_p (XVECEXP (seq, 0, 0)))
2568 if (CALL_P (XVECEXP (seq, 0, 0))
2569 && SIBLING_CALL_P (XVECEXP (seq, 0, 0)))
2572 for (idx = 0; idx < XVECLEN (seq, 0); idx++)
2573 if (RTX_FRAME_RELATED_P (XVECEXP (seq, 0, idx)))
2577 if (RTX_FRAME_RELATED_P (i))
2581 /* If the port doesn't emit epilogue unwind info, we don't need a
2582 save/restore pair. */
2586 /* Otherwise, search forward to see if the return insn was the last
2587 basic block of the function. If so, we don't need save/restore. */
2588 gcc_assert (i != NULL);
2589 i = next_real_insn (i);
2593 /* Insert the restore before that next real insn in the stream, and before
2594 a potential NOTE_INSN_EPILOGUE_BEG -- we do need these notes to be
2595 properly nested. This should be after any label or alignment. This
2596 will be pushed into the CFI stream by the function below. */
2599 rtx p = PREV_INSN (i);
2602 if (NOTE_KIND (p) == NOTE_INSN_BASIC_BLOCK)
2606 emit_note_before (NOTE_INSN_CFA_RESTORE_STATE, i);
2608 emit_cfa_remember = true;
2610 /* And emulate the state save. */
2611 gcc_assert (remember_row == NULL);
2612 remember_row = copy_cfi_row (cur_row);
2615 /* A "subroutine" of dwarf2out_cfi_begin_epilogue. Emit the restore
2619 dwarf2out_frame_debug_restore_state (void)
2621 dw_cfi_ref cfi = new_cfi ();
2623 cfi->dw_cfi_opc = DW_CFA_restore_state;
2626 gcc_assert (remember_row != NULL);
2627 free_cfi_row (cur_row);
2628 cur_row = remember_row;
2629 remember_row = NULL;
2632 /* Record the initial position of the return address. RTL is
2633 INCOMING_RETURN_ADDR_RTX. */
2636 initial_return_save (rtx rtl)
2638 unsigned int reg = INVALID_REGNUM;
2639 HOST_WIDE_INT offset = 0;
2641 switch (GET_CODE (rtl))
2644 /* RA is in a register. */
2645 reg = dwf_regno (rtl);
2649 /* RA is on the stack. */
2650 rtl = XEXP (rtl, 0);
2651 switch (GET_CODE (rtl))
2654 gcc_assert (REGNO (rtl) == STACK_POINTER_REGNUM);
2659 gcc_assert (REGNO (XEXP (rtl, 0)) == STACK_POINTER_REGNUM);
2660 offset = INTVAL (XEXP (rtl, 1));
2664 gcc_assert (REGNO (XEXP (rtl, 0)) == STACK_POINTER_REGNUM);
2665 offset = -INTVAL (XEXP (rtl, 1));
2675 /* The return address is at some offset from any value we can
2676 actually load. For instance, on the SPARC it is in %i7+8. Just
2677 ignore the offset for now; it doesn't matter for unwinding frames. */
2678 gcc_assert (CONST_INT_P (XEXP (rtl, 1)));
2679 initial_return_save (XEXP (rtl, 0));
2686 if (reg != DWARF_FRAME_RETURN_COLUMN)
2688 if (reg != INVALID_REGNUM)
2689 record_reg_saved_in_reg (rtl, pc_rtx);
2690 reg_save (DWARF_FRAME_RETURN_COLUMN, reg, offset - cur_row->cfa.offset);
2694 /* Annotate the function with NOTE_INSN_CFI notes to record the CFI
2695 state at each location within the function. These notes will be
2696 emitted during pass_final. */
2699 execute_dwarf2_frame (void)
2701 /* The first time we're called, compute the incoming frame state. */
2702 if (cie_cfi_vec == NULL)
2704 dw_cfa_location loc;
2706 dw_stack_pointer_regnum = DWARF_FRAME_REGNUM (STACK_POINTER_REGNUM);
2707 dw_frame_pointer_regnum = DWARF_FRAME_REGNUM (HARD_FRAME_POINTER_REGNUM);
2709 add_cfi_vec = &cie_cfi_vec;
2710 cie_cfi_row = cur_row = new_cfi_row ();
2712 /* On entry, the Canonical Frame Address is at SP. */
2713 memset(&loc, 0, sizeof (loc));
2714 loc.reg = dw_stack_pointer_regnum;
2715 loc.offset = INCOMING_FRAME_SP_OFFSET;
2718 if (targetm.debug_unwind_info () == UI_DWARF2
2719 || targetm_common.except_unwind_info (&global_options) == UI_DWARF2)
2721 initial_return_save (INCOMING_RETURN_ADDR_RTX);
2723 /* For a few targets, we have the return address incoming into a
2724 register, but choose a different return column. This will result
2725 in a DW_CFA_register for the return, and an entry in
2726 regs_saved_in_regs to match. If the target later stores that
2727 return address register to the stack, we want to be able to emit
2728 the DW_CFA_offset against the return column, not the intermediate
2729 save register. Save the contents of regs_saved_in_regs so that
2730 we can re-initialize it at the start of each function. */
2731 switch (VEC_length (reg_saved_in_data, regs_saved_in_regs))
2736 cie_return_save = ggc_alloc_reg_saved_in_data ();
2737 *cie_return_save = *VEC_index (reg_saved_in_data,
2738 regs_saved_in_regs, 0);
2739 regs_saved_in_regs = NULL;
2749 /* Set up state for generating call frame debug info. */
2750 gcc_checking_assert (queued_reg_saves == NULL);
2751 gcc_checking_assert (regs_saved_in_regs == NULL);
2753 cur_row = copy_cfi_row (cie_cfi_row);
2754 if (cie_return_save)
2755 VEC_safe_push (reg_saved_in_data, gc, regs_saved_in_regs, cie_return_save);
2757 cfa_store = cur_row->cfa;
2760 memset (&cfa_temp, 0, sizeof(cfa_temp));
2761 cfa_temp.reg = INVALID_REGNUM;
2763 dwarf2out_alloc_current_fde ();
2766 create_cfi_notes ();
2769 /* Reset all function-specific information, particularly for GC. */
2770 XDELETEVEC (barrier_args_size);
2771 barrier_args_size = NULL;
2772 regs_saved_in_regs = NULL;
2773 queued_reg_saves = NULL;
2775 free_cfi_row (cur_row);
2781 /* Convert a DWARF call frame info. operation to its string name */
2784 dwarf_cfi_name (unsigned int cfi_opc)
2788 case DW_CFA_advance_loc:
2789 return "DW_CFA_advance_loc";
2791 return "DW_CFA_offset";
2792 case DW_CFA_restore:
2793 return "DW_CFA_restore";
2795 return "DW_CFA_nop";
2796 case DW_CFA_set_loc:
2797 return "DW_CFA_set_loc";
2798 case DW_CFA_advance_loc1:
2799 return "DW_CFA_advance_loc1";
2800 case DW_CFA_advance_loc2:
2801 return "DW_CFA_advance_loc2";
2802 case DW_CFA_advance_loc4:
2803 return "DW_CFA_advance_loc4";
2804 case DW_CFA_offset_extended:
2805 return "DW_CFA_offset_extended";
2806 case DW_CFA_restore_extended:
2807 return "DW_CFA_restore_extended";
2808 case DW_CFA_undefined:
2809 return "DW_CFA_undefined";
2810 case DW_CFA_same_value:
2811 return "DW_CFA_same_value";
2812 case DW_CFA_register:
2813 return "DW_CFA_register";
2814 case DW_CFA_remember_state:
2815 return "DW_CFA_remember_state";
2816 case DW_CFA_restore_state:
2817 return "DW_CFA_restore_state";
2818 case DW_CFA_def_cfa:
2819 return "DW_CFA_def_cfa";
2820 case DW_CFA_def_cfa_register:
2821 return "DW_CFA_def_cfa_register";
2822 case DW_CFA_def_cfa_offset:
2823 return "DW_CFA_def_cfa_offset";
2826 case DW_CFA_def_cfa_expression:
2827 return "DW_CFA_def_cfa_expression";
2828 case DW_CFA_expression:
2829 return "DW_CFA_expression";
2830 case DW_CFA_offset_extended_sf:
2831 return "DW_CFA_offset_extended_sf";
2832 case DW_CFA_def_cfa_sf:
2833 return "DW_CFA_def_cfa_sf";
2834 case DW_CFA_def_cfa_offset_sf:
2835 return "DW_CFA_def_cfa_offset_sf";
2837 /* SGI/MIPS specific */
2838 case DW_CFA_MIPS_advance_loc8:
2839 return "DW_CFA_MIPS_advance_loc8";
2841 /* GNU extensions */
2842 case DW_CFA_GNU_window_save:
2843 return "DW_CFA_GNU_window_save";
2844 case DW_CFA_GNU_args_size:
2845 return "DW_CFA_GNU_args_size";
2846 case DW_CFA_GNU_negative_offset_extended:
2847 return "DW_CFA_GNU_negative_offset_extended";
2850 return "DW_CFA_<unknown>";
2854 /* This routine will generate the correct assembly data for a location
2855 description based on a cfi entry with a complex address. */
2858 output_cfa_loc (dw_cfi_ref cfi, int for_eh)
2860 dw_loc_descr_ref loc;
2863 if (cfi->dw_cfi_opc == DW_CFA_expression)
2866 DWARF2_FRAME_REG_OUT (cfi->dw_cfi_oprnd1.dw_cfi_reg_num, for_eh);
2867 dw2_asm_output_data (1, r, NULL);
2868 loc = cfi->dw_cfi_oprnd2.dw_cfi_loc;
2871 loc = cfi->dw_cfi_oprnd1.dw_cfi_loc;
2873 /* Output the size of the block. */
2874 size = size_of_locs (loc);
2875 dw2_asm_output_data_uleb128 (size, NULL);
2877 /* Now output the operations themselves. */
2878 output_loc_sequence (loc, for_eh);
2881 /* Similar, but used for .cfi_escape. */
2884 output_cfa_loc_raw (dw_cfi_ref cfi)
2886 dw_loc_descr_ref loc;
2889 if (cfi->dw_cfi_opc == DW_CFA_expression)
2892 DWARF2_FRAME_REG_OUT (cfi->dw_cfi_oprnd1.dw_cfi_reg_num, 1);
2893 fprintf (asm_out_file, "%#x,", r);
2894 loc = cfi->dw_cfi_oprnd2.dw_cfi_loc;
2897 loc = cfi->dw_cfi_oprnd1.dw_cfi_loc;
2899 /* Output the size of the block. */
2900 size = size_of_locs (loc);
2901 dw2_asm_output_data_uleb128_raw (size);
2902 fputc (',', asm_out_file);
2904 /* Now output the operations themselves. */
2905 output_loc_sequence_raw (loc);
2908 /* Output a Call Frame Information opcode and its operand(s). */
2911 output_cfi (dw_cfi_ref cfi, dw_fde_ref fde, int for_eh)
2916 if (cfi->dw_cfi_opc == DW_CFA_advance_loc)
2917 dw2_asm_output_data (1, (cfi->dw_cfi_opc
2918 | (cfi->dw_cfi_oprnd1.dw_cfi_offset & 0x3f)),
2919 "DW_CFA_advance_loc " HOST_WIDE_INT_PRINT_HEX,
2920 ((unsigned HOST_WIDE_INT)
2921 cfi->dw_cfi_oprnd1.dw_cfi_offset));
2922 else if (cfi->dw_cfi_opc == DW_CFA_offset)
2924 r = DWARF2_FRAME_REG_OUT (cfi->dw_cfi_oprnd1.dw_cfi_reg_num, for_eh);
2925 dw2_asm_output_data (1, (cfi->dw_cfi_opc | (r & 0x3f)),
2926 "DW_CFA_offset, column %#lx", r);
2927 off = div_data_align (cfi->dw_cfi_oprnd2.dw_cfi_offset);
2928 dw2_asm_output_data_uleb128 (off, NULL);
2930 else if (cfi->dw_cfi_opc == DW_CFA_restore)
2932 r = DWARF2_FRAME_REG_OUT (cfi->dw_cfi_oprnd1.dw_cfi_reg_num, for_eh);
2933 dw2_asm_output_data (1, (cfi->dw_cfi_opc | (r & 0x3f)),
2934 "DW_CFA_restore, column %#lx", r);
2938 dw2_asm_output_data (1, cfi->dw_cfi_opc,
2939 "%s", dwarf_cfi_name (cfi->dw_cfi_opc));
2941 switch (cfi->dw_cfi_opc)
2943 case DW_CFA_set_loc:
2945 dw2_asm_output_encoded_addr_rtx (
2946 ASM_PREFERRED_EH_DATA_FORMAT (/*code=*/1, /*global=*/0),
2947 gen_rtx_SYMBOL_REF (Pmode, cfi->dw_cfi_oprnd1.dw_cfi_addr),
2950 dw2_asm_output_addr (DWARF2_ADDR_SIZE,
2951 cfi->dw_cfi_oprnd1.dw_cfi_addr, NULL);
2952 fde->dw_fde_current_label = cfi->dw_cfi_oprnd1.dw_cfi_addr;
2955 case DW_CFA_advance_loc1:
2956 dw2_asm_output_delta (1, cfi->dw_cfi_oprnd1.dw_cfi_addr,
2957 fde->dw_fde_current_label, NULL);
2958 fde->dw_fde_current_label = cfi->dw_cfi_oprnd1.dw_cfi_addr;
2961 case DW_CFA_advance_loc2:
2962 dw2_asm_output_delta (2, cfi->dw_cfi_oprnd1.dw_cfi_addr,
2963 fde->dw_fde_current_label, NULL);
2964 fde->dw_fde_current_label = cfi->dw_cfi_oprnd1.dw_cfi_addr;
2967 case DW_CFA_advance_loc4:
2968 dw2_asm_output_delta (4, cfi->dw_cfi_oprnd1.dw_cfi_addr,
2969 fde->dw_fde_current_label, NULL);
2970 fde->dw_fde_current_label = cfi->dw_cfi_oprnd1.dw_cfi_addr;
2973 case DW_CFA_MIPS_advance_loc8:
2974 dw2_asm_output_delta (8, cfi->dw_cfi_oprnd1.dw_cfi_addr,
2975 fde->dw_fde_current_label, NULL);
2976 fde->dw_fde_current_label = cfi->dw_cfi_oprnd1.dw_cfi_addr;
2979 case DW_CFA_offset_extended:
2980 r = DWARF2_FRAME_REG_OUT (cfi->dw_cfi_oprnd1.dw_cfi_reg_num, for_eh);
2981 dw2_asm_output_data_uleb128 (r, NULL);
2982 off = div_data_align (cfi->dw_cfi_oprnd2.dw_cfi_offset);
2983 dw2_asm_output_data_uleb128 (off, NULL);
2986 case DW_CFA_def_cfa:
2987 r = DWARF2_FRAME_REG_OUT (cfi->dw_cfi_oprnd1.dw_cfi_reg_num, for_eh);
2988 dw2_asm_output_data_uleb128 (r, NULL);
2989 dw2_asm_output_data_uleb128 (cfi->dw_cfi_oprnd2.dw_cfi_offset, NULL);
2992 case DW_CFA_offset_extended_sf:
2993 r = DWARF2_FRAME_REG_OUT (cfi->dw_cfi_oprnd1.dw_cfi_reg_num, for_eh);
2994 dw2_asm_output_data_uleb128 (r, NULL);
2995 off = div_data_align (cfi->dw_cfi_oprnd2.dw_cfi_offset);
2996 dw2_asm_output_data_sleb128 (off, NULL);
2999 case DW_CFA_def_cfa_sf:
3000 r = DWARF2_FRAME_REG_OUT (cfi->dw_cfi_oprnd1.dw_cfi_reg_num, for_eh);
3001 dw2_asm_output_data_uleb128 (r, NULL);
3002 off = div_data_align (cfi->dw_cfi_oprnd2.dw_cfi_offset);
3003 dw2_asm_output_data_sleb128 (off, NULL);
3006 case DW_CFA_restore_extended:
3007 case DW_CFA_undefined:
3008 case DW_CFA_same_value:
3009 case DW_CFA_def_cfa_register:
3010 r = DWARF2_FRAME_REG_OUT (cfi->dw_cfi_oprnd1.dw_cfi_reg_num, for_eh);
3011 dw2_asm_output_data_uleb128 (r, NULL);
3014 case DW_CFA_register:
3015 r = DWARF2_FRAME_REG_OUT (cfi->dw_cfi_oprnd1.dw_cfi_reg_num, for_eh);
3016 dw2_asm_output_data_uleb128 (r, NULL);
3017 r = DWARF2_FRAME_REG_OUT (cfi->dw_cfi_oprnd2.dw_cfi_reg_num, for_eh);
3018 dw2_asm_output_data_uleb128 (r, NULL);
3021 case DW_CFA_def_cfa_offset:
3022 case DW_CFA_GNU_args_size:
3023 dw2_asm_output_data_uleb128 (cfi->dw_cfi_oprnd1.dw_cfi_offset, NULL);
3026 case DW_CFA_def_cfa_offset_sf:
3027 off = div_data_align (cfi->dw_cfi_oprnd1.dw_cfi_offset);
3028 dw2_asm_output_data_sleb128 (off, NULL);
3031 case DW_CFA_GNU_window_save:
3034 case DW_CFA_def_cfa_expression:
3035 case DW_CFA_expression:
3036 output_cfa_loc (cfi, for_eh);
3039 case DW_CFA_GNU_negative_offset_extended:
3040 /* Obsoleted by DW_CFA_offset_extended_sf. */
3049 /* Similar, but do it via assembler directives instead. */
3052 output_cfi_directive (FILE *f, dw_cfi_ref cfi)
3054 unsigned long r, r2;
3056 switch (cfi->dw_cfi_opc)
3058 case DW_CFA_advance_loc:
3059 case DW_CFA_advance_loc1:
3060 case DW_CFA_advance_loc2:
3061 case DW_CFA_advance_loc4:
3062 case DW_CFA_MIPS_advance_loc8:
3063 case DW_CFA_set_loc:
3064 /* Should only be created in a code path not followed when emitting
3065 via directives. The assembler is going to take care of this for
3066 us. But this routines is also used for debugging dumps, so
3068 gcc_assert (f != asm_out_file);
3069 fprintf (f, "\t.cfi_advance_loc\n");
3073 case DW_CFA_offset_extended:
3074 case DW_CFA_offset_extended_sf:
3075 r = DWARF2_FRAME_REG_OUT (cfi->dw_cfi_oprnd1.dw_cfi_reg_num, 1);
3076 fprintf (f, "\t.cfi_offset %lu, "HOST_WIDE_INT_PRINT_DEC"\n",
3077 r, cfi->dw_cfi_oprnd2.dw_cfi_offset);
3080 case DW_CFA_restore:
3081 case DW_CFA_restore_extended:
3082 r = DWARF2_FRAME_REG_OUT (cfi->dw_cfi_oprnd1.dw_cfi_reg_num, 1);
3083 fprintf (f, "\t.cfi_restore %lu\n", r);
3086 case DW_CFA_undefined:
3087 r = DWARF2_FRAME_REG_OUT (cfi->dw_cfi_oprnd1.dw_cfi_reg_num, 1);
3088 fprintf (f, "\t.cfi_undefined %lu\n", r);
3091 case DW_CFA_same_value:
3092 r = DWARF2_FRAME_REG_OUT (cfi->dw_cfi_oprnd1.dw_cfi_reg_num, 1);
3093 fprintf (f, "\t.cfi_same_value %lu\n", r);
3096 case DW_CFA_def_cfa:
3097 case DW_CFA_def_cfa_sf:
3098 r = DWARF2_FRAME_REG_OUT (cfi->dw_cfi_oprnd1.dw_cfi_reg_num, 1);
3099 fprintf (f, "\t.cfi_def_cfa %lu, "HOST_WIDE_INT_PRINT_DEC"\n",
3100 r, cfi->dw_cfi_oprnd2.dw_cfi_offset);
3103 case DW_CFA_def_cfa_register:
3104 r = DWARF2_FRAME_REG_OUT (cfi->dw_cfi_oprnd1.dw_cfi_reg_num, 1);
3105 fprintf (f, "\t.cfi_def_cfa_register %lu\n", r);
3108 case DW_CFA_register:
3109 r = DWARF2_FRAME_REG_OUT (cfi->dw_cfi_oprnd1.dw_cfi_reg_num, 1);
3110 r2 = DWARF2_FRAME_REG_OUT (cfi->dw_cfi_oprnd2.dw_cfi_reg_num, 1);
3111 fprintf (f, "\t.cfi_register %lu, %lu\n", r, r2);
3114 case DW_CFA_def_cfa_offset:
3115 case DW_CFA_def_cfa_offset_sf:
3116 fprintf (f, "\t.cfi_def_cfa_offset "
3117 HOST_WIDE_INT_PRINT_DEC"\n",
3118 cfi->dw_cfi_oprnd1.dw_cfi_offset);
3121 case DW_CFA_remember_state:
3122 fprintf (f, "\t.cfi_remember_state\n");
3124 case DW_CFA_restore_state:
3125 fprintf (f, "\t.cfi_restore_state\n");
3128 case DW_CFA_GNU_args_size:
3129 if (f == asm_out_file)
3131 fprintf (f, "\t.cfi_escape %#x,", DW_CFA_GNU_args_size);
3132 dw2_asm_output_data_uleb128_raw (cfi->dw_cfi_oprnd1.dw_cfi_offset);
3134 fprintf (f, "\t%s args_size "HOST_WIDE_INT_PRINT_DEC,
3135 ASM_COMMENT_START, cfi->dw_cfi_oprnd1.dw_cfi_offset);
3140 fprintf (f, "\t.cfi_GNU_args_size "HOST_WIDE_INT_PRINT_DEC "\n",
3141 cfi->dw_cfi_oprnd1.dw_cfi_offset);
3145 case DW_CFA_GNU_window_save:
3146 fprintf (f, "\t.cfi_window_save\n");
3149 case DW_CFA_def_cfa_expression:
3150 if (f != asm_out_file)
3152 fprintf (f, "\t.cfi_def_cfa_expression ...\n");
3156 case DW_CFA_expression:
3157 if (f != asm_out_file)
3159 fprintf (f, "\t.cfi_cfa_expression ...\n");
3162 fprintf (f, "\t.cfi_escape %#x,", cfi->dw_cfi_opc);
3163 output_cfa_loc_raw (cfi);
3173 dwarf2out_emit_cfi (dw_cfi_ref cfi)
3175 if (dwarf2out_do_cfi_asm ())
3176 output_cfi_directive (asm_out_file, cfi);
3180 /* Save the result of dwarf2out_do_frame across PCH.
3181 This variable is tri-state, with 0 unset, >0 true, <0 false. */
3182 static GTY(()) signed char saved_do_cfi_asm = 0;
3184 /* Decide whether we want to emit frame unwind information for the current
3185 translation unit. */
3188 dwarf2out_do_frame (void)
3190 /* We want to emit correct CFA location expressions or lists, so we
3191 have to return true if we're going to output debug info, even if
3192 we're not going to output frame or unwind info. */
3193 if (write_symbols == DWARF2_DEBUG || write_symbols == VMS_AND_DWARF2_DEBUG)
3196 if (saved_do_cfi_asm > 0)
3199 if (targetm.debug_unwind_info () == UI_DWARF2)
3202 if ((flag_unwind_tables || flag_exceptions)
3203 && targetm_common.except_unwind_info (&global_options) == UI_DWARF2)
3209 /* Decide whether to emit frame unwind via assembler directives. */
3212 dwarf2out_do_cfi_asm (void)
3216 #ifdef MIPS_DEBUGGING_INFO
3220 if (saved_do_cfi_asm != 0)
3221 return saved_do_cfi_asm > 0;
3223 /* Assume failure for a moment. */
3224 saved_do_cfi_asm = -1;
3226 if (!flag_dwarf2_cfi_asm || !dwarf2out_do_frame ())
3228 if (!HAVE_GAS_CFI_PERSONALITY_DIRECTIVE)
3231 /* Make sure the personality encoding is one the assembler can support.
3232 In particular, aligned addresses can't be handled. */
3233 enc = ASM_PREFERRED_EH_DATA_FORMAT (/*code=*/2,/*global=*/1);
3234 if ((enc & 0x70) != 0 && (enc & 0x70) != DW_EH_PE_pcrel)
3236 enc = ASM_PREFERRED_EH_DATA_FORMAT (/*code=*/0,/*global=*/0);
3237 if ((enc & 0x70) != 0 && (enc & 0x70) != DW_EH_PE_pcrel)
3240 /* If we can't get the assembler to emit only .debug_frame, and we don't need
3241 dwarf2 unwind info for exceptions, then emit .debug_frame by hand. */
3242 if (!HAVE_GAS_CFI_SECTIONS_DIRECTIVE
3243 && !flag_unwind_tables && !flag_exceptions
3244 && targetm_common.except_unwind_info (&global_options) != UI_DWARF2)
3248 saved_do_cfi_asm = 1;
3253 gate_dwarf2_frame (void)
3255 #ifndef HAVE_prologue
3256 /* Targets which still implement the prologue in assembler text
3257 cannot use the generic dwarf2 unwinding. */
3261 /* ??? What to do for UI_TARGET unwinding? They might be able to benefit
3262 from the optimized shrink-wrapping annotations that we will compute.
3263 For now, only produce the CFI notes for dwarf2. */
3264 return dwarf2out_do_frame ();
3267 struct rtl_opt_pass pass_dwarf2_frame =
3271 "dwarf2", /* name */
3272 gate_dwarf2_frame, /* gate */
3273 execute_dwarf2_frame, /* execute */
3276 0, /* static_pass_number */
3277 TV_FINAL, /* tv_id */
3278 0, /* properties_required */
3279 0, /* properties_provided */
3280 0, /* properties_destroyed */
3281 0, /* todo_flags_start */
3282 0 /* todo_flags_finish */
3286 #include "gt-dwarf2cfi.h"