1 /* Dwarf2 Call Frame Information helper routines.
2 Copyright (C) 1992, 1993, 1995, 1996, 1997, 1998, 1999, 2000, 2001, 2002,
3 2003, 2004, 2005, 2006, 2007, 2008, 2009, 2010, 2011
4 Free Software Foundation, Inc.
6 This file is part of GCC.
8 GCC is free software; you can redistribute it and/or modify it under
9 the terms of the GNU General Public License as published by the Free
10 Software Foundation; either version 3, or (at your option) any later
13 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
14 WARRANTY; without even the implied warranty of MERCHANTABILITY or
15 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
18 You should have received a copy of the GNU General Public License
19 along with GCC; see the file COPYING3. If not see
20 <http://www.gnu.org/licenses/>. */
24 #include "coretypes.h"
31 #include "dwarf2out.h"
32 #include "dwarf2asm.h"
36 #include "common/common-target.h"
37 #include "tree-pass.h"
39 #include "except.h" /* expand_builtin_dwarf_sp_column */
40 #include "expr.h" /* init_return_column_size */
41 #include "regs.h" /* expand_builtin_init_dwarf_reg_sizes */
42 #include "output.h" /* asm_out_file */
43 #include "debug.h" /* dwarf2out_do_frame, dwarf2out_do_cfi_asm */
46 /* ??? Poison these here until it can be done generically. They've been
47 totally replaced in this file; make sure it stays that way. */
48 #undef DWARF2_UNWIND_INFO
49 #undef DWARF2_FRAME_INFO
50 #if (GCC_VERSION >= 3000)
51 #pragma GCC poison DWARF2_UNWIND_INFO DWARF2_FRAME_INFO
54 #ifndef INCOMING_RETURN_ADDR_RTX
55 #define INCOMING_RETURN_ADDR_RTX (gcc_unreachable (), NULL_RTX)
58 /* Maximum size (in bytes) of an artificially generated label. */
59 #define MAX_ARTIFICIAL_LABEL_BYTES 30
61 /* A collected description of an entire row of the abstract CFI table. */
62 typedef struct GTY(()) dw_cfi_row_struct
64 /* The expression that computes the CFA, expressed in two different ways.
65 The CFA member for the simple cases, and the full CFI expression for
66 the complex cases. The later will be a DW_CFA_cfa_expression. */
70 /* The expressions for any register column that is saved. */
73 /* The value of any DW_CFA_GNU_args_size. */
74 HOST_WIDE_INT args_size;
77 typedef dw_cfi_row *dw_cfi_row_ref;
79 /* A vector of call frame insns for the CIE. */
82 /* The state of the first row of the FDE table, which includes the
83 state provided by the CIE. */
84 static GTY(()) dw_cfi_row_ref cie_cfi_row;
86 static GTY(()) unsigned long dwarf2out_cfi_label_num;
88 /* The insn after which a new CFI note should be emitted. */
91 /* When non-null, add_cfi will add the CFI to this vector. */
92 static cfi_vec *add_cfi_vec;
94 /* True if remember_state should be emitted before following CFI directive. */
95 static bool emit_cfa_remember;
97 /* True if any CFI directives were emitted at the current insn. */
98 static bool any_cfis_emitted;
100 /* Short-hand for commonly used register numbers. */
101 static unsigned dw_stack_pointer_regnum;
102 static unsigned dw_frame_pointer_regnum;
105 static void dwarf2out_cfi_begin_epilogue (rtx insn);
106 static void dwarf2out_frame_debug_restore_state (void);
109 /* Hook used by __throw. */
112 expand_builtin_dwarf_sp_column (void)
114 unsigned int dwarf_regnum = DWARF_FRAME_REGNUM (STACK_POINTER_REGNUM);
115 return GEN_INT (DWARF2_FRAME_REG_OUT (dwarf_regnum, 1));
118 /* MEM is a memory reference for the register size table, each element of
119 which has mode MODE. Initialize column C as a return address column. */
122 init_return_column_size (enum machine_mode mode, rtx mem, unsigned int c)
124 HOST_WIDE_INT offset = c * GET_MODE_SIZE (mode);
125 HOST_WIDE_INT size = GET_MODE_SIZE (Pmode);
126 emit_move_insn (adjust_address (mem, mode, offset), GEN_INT (size));
129 /* Generate code to initialize the register size table. */
132 expand_builtin_init_dwarf_reg_sizes (tree address)
135 enum machine_mode mode = TYPE_MODE (char_type_node);
136 rtx addr = expand_normal (address);
137 rtx mem = gen_rtx_MEM (BLKmode, addr);
138 bool wrote_return_column = false;
140 for (i = 0; i < FIRST_PSEUDO_REGISTER; i++)
142 unsigned int dnum = DWARF_FRAME_REGNUM (i);
143 unsigned int rnum = DWARF2_FRAME_REG_OUT (dnum, 1);
145 if (rnum < DWARF_FRAME_REGISTERS)
147 HOST_WIDE_INT offset = rnum * GET_MODE_SIZE (mode);
148 enum machine_mode save_mode = reg_raw_mode[i];
151 if (HARD_REGNO_CALL_PART_CLOBBERED (i, save_mode))
152 save_mode = choose_hard_reg_mode (i, 1, true);
153 if (dnum == DWARF_FRAME_RETURN_COLUMN)
155 if (save_mode == VOIDmode)
157 wrote_return_column = true;
159 size = GET_MODE_SIZE (save_mode);
163 emit_move_insn (adjust_address (mem, mode, offset),
164 gen_int_mode (size, mode));
168 if (!wrote_return_column)
169 init_return_column_size (mode, mem, DWARF_FRAME_RETURN_COLUMN);
171 #ifdef DWARF_ALT_FRAME_RETURN_COLUMN
172 init_return_column_size (mode, mem, DWARF_ALT_FRAME_RETURN_COLUMN);
175 targetm.init_dwarf_reg_sizes_extra (address);
178 /* Divide OFF by DWARF_CIE_DATA_ALIGNMENT, asserting no remainder. */
180 static inline HOST_WIDE_INT
181 div_data_align (HOST_WIDE_INT off)
183 HOST_WIDE_INT r = off / DWARF_CIE_DATA_ALIGNMENT;
184 gcc_assert (r * DWARF_CIE_DATA_ALIGNMENT == off);
188 /* Return true if we need a signed version of a given opcode
189 (e.g. DW_CFA_offset_extended_sf vs DW_CFA_offset_extended). */
192 need_data_align_sf_opcode (HOST_WIDE_INT off)
194 return DWARF_CIE_DATA_ALIGNMENT < 0 ? off > 0 : off < 0;
197 /* Return a pointer to a newly allocated Call Frame Instruction. */
199 static inline dw_cfi_ref
202 dw_cfi_ref cfi = ggc_alloc_dw_cfi_node ();
204 cfi->dw_cfi_oprnd1.dw_cfi_reg_num = 0;
205 cfi->dw_cfi_oprnd2.dw_cfi_reg_num = 0;
210 /* Return a newly allocated CFI row, with no defined data. */
212 static dw_cfi_row_ref
215 dw_cfi_row_ref row = ggc_alloc_cleared_dw_cfi_row ();
217 row->cfa.reg = INVALID_REGNUM;
222 /* Return a copy of an existing CFI row. */
224 static dw_cfi_row_ref
225 copy_cfi_row (dw_cfi_row_ref src)
227 dw_cfi_row_ref dst = ggc_alloc_dw_cfi_row ();
230 dst->reg_save = VEC_copy (dw_cfi_ref, gc, src->reg_save);
235 /* Free an allocated CFI row. */
238 free_cfi_row (dw_cfi_row_ref row)
242 VEC_free (dw_cfi_ref, gc, row->reg_save);
247 /* Generate a new label for the CFI info to refer to. */
250 dwarf2out_cfi_label (void)
252 int num = dwarf2out_cfi_label_num++;
255 ASM_GENERATE_INTERNAL_LABEL (label, "LCFI", num);
257 return xstrdup (label);
260 /* Add CFI either to the current insn stream or to a vector, or both. */
263 add_cfi (dw_cfi_ref cfi)
265 if (emit_cfa_remember)
267 dw_cfi_ref cfi_remember;
269 /* Emit the state save. */
270 emit_cfa_remember = false;
271 cfi_remember = new_cfi ();
272 cfi_remember->dw_cfi_opc = DW_CFA_remember_state;
273 add_cfi (cfi_remember);
276 any_cfis_emitted = true;
277 if (cfi_insn != NULL)
279 cfi_insn = emit_note_after (NOTE_INSN_CFI, cfi_insn);
280 NOTE_CFI (cfi_insn) = cfi;
282 if (add_cfi_vec != NULL)
283 VEC_safe_push (dw_cfi_ref, gc, *add_cfi_vec, cfi);
286 /* This function fills in aa dw_cfa_location structure from a dwarf location
287 descriptor sequence. */
290 get_cfa_from_loc_descr (dw_cfa_location *cfa, struct dw_loc_descr_struct *loc)
292 struct dw_loc_descr_struct *ptr;
294 cfa->base_offset = 0;
298 for (ptr = loc; ptr != NULL; ptr = ptr->dw_loc_next)
300 enum dwarf_location_atom op = ptr->dw_loc_opc;
336 cfa->reg = op - DW_OP_reg0;
339 cfa->reg = ptr->dw_loc_oprnd1.v.val_int;
373 cfa->reg = op - DW_OP_breg0;
374 cfa->base_offset = ptr->dw_loc_oprnd1.v.val_int;
377 cfa->reg = ptr->dw_loc_oprnd1.v.val_int;
378 cfa->base_offset = ptr->dw_loc_oprnd2.v.val_int;
383 case DW_OP_plus_uconst:
384 cfa->offset = ptr->dw_loc_oprnd1.v.val_unsigned;
392 /* Find the previous value for the CFA, iteratively. CFI is the opcode
393 to interpret, *LOC will be updated as necessary, *REMEMBER is used for
394 one level of remember/restore state processing. */
397 lookup_cfa_1 (dw_cfi_ref cfi, dw_cfa_location *loc, dw_cfa_location *remember)
399 switch (cfi->dw_cfi_opc)
401 case DW_CFA_def_cfa_offset:
402 case DW_CFA_def_cfa_offset_sf:
403 loc->offset = cfi->dw_cfi_oprnd1.dw_cfi_offset;
405 case DW_CFA_def_cfa_register:
406 loc->reg = cfi->dw_cfi_oprnd1.dw_cfi_reg_num;
409 case DW_CFA_def_cfa_sf:
410 loc->reg = cfi->dw_cfi_oprnd1.dw_cfi_reg_num;
411 loc->offset = cfi->dw_cfi_oprnd2.dw_cfi_offset;
413 case DW_CFA_def_cfa_expression:
414 get_cfa_from_loc_descr (loc, cfi->dw_cfi_oprnd1.dw_cfi_loc);
417 case DW_CFA_remember_state:
418 gcc_assert (!remember->in_use);
420 remember->in_use = 1;
422 case DW_CFA_restore_state:
423 gcc_assert (remember->in_use);
425 remember->in_use = 0;
433 /* The current, i.e. most recently generated, row of the CFI table. */
434 static dw_cfi_row_ref cur_row;
436 /* The row state from a preceeding DW_CFA_remember_state. */
437 static dw_cfi_row_ref remember_row;
439 /* The register used for saving registers to the stack, and its offset
441 static dw_cfa_location cfa_store;
443 /* A temporary register holding an integral value used in adjusting SP
444 or setting up the store_reg. The "offset" field holds the integer
445 value, not an offset. */
446 static dw_cfa_location cfa_temp;
448 /* The (really) current value for DW_CFA_GNU_args_size. We delay actually
449 emitting this data, i.e. updating CUR_ROW, without async unwind. */
450 static HOST_WIDE_INT args_size;
452 /* Determine if two dw_cfa_location structures define the same data. */
455 cfa_equal_p (const dw_cfa_location *loc1, const dw_cfa_location *loc2)
457 return (loc1->reg == loc2->reg
458 && loc1->offset == loc2->offset
459 && loc1->indirect == loc2->indirect
460 && (loc1->indirect == 0
461 || loc1->base_offset == loc2->base_offset));
464 /* This routine does the actual work. The CFA is now calculated from
465 the dw_cfa_location structure. */
468 def_cfa_1 (dw_cfa_location *loc_p)
471 dw_cfa_location loc = *loc_p;
473 if (cfa_store.reg == loc.reg && loc.indirect == 0)
474 cfa_store.offset = loc.offset;
476 /* If nothing changed, no need to issue any call frame instructions. */
477 if (cfa_equal_p (&loc, &cur_row->cfa))
482 if (loc.reg == cur_row->cfa.reg && !loc.indirect && !cur_row->cfa.indirect)
484 /* Construct a "DW_CFA_def_cfa_offset <offset>" instruction, indicating
485 the CFA register did not change but the offset did. The data
486 factoring for DW_CFA_def_cfa_offset_sf happens in output_cfi, or
487 in the assembler via the .cfi_def_cfa_offset directive. */
489 cfi->dw_cfi_opc = DW_CFA_def_cfa_offset_sf;
491 cfi->dw_cfi_opc = DW_CFA_def_cfa_offset;
492 cfi->dw_cfi_oprnd1.dw_cfi_offset = loc.offset;
495 #ifndef MIPS_DEBUGGING_INFO /* SGI dbx thinks this means no offset. */
496 else if (loc.offset == cur_row->cfa.offset
497 && cur_row->cfa.reg != INVALID_REGNUM
499 && !cur_row->cfa.indirect)
501 /* Construct a "DW_CFA_def_cfa_register <register>" instruction,
502 indicating the CFA register has changed to <register> but the
503 offset has not changed. */
504 cfi->dw_cfi_opc = DW_CFA_def_cfa_register;
505 cfi->dw_cfi_oprnd1.dw_cfi_reg_num = loc.reg;
509 else if (loc.indirect == 0)
511 /* Construct a "DW_CFA_def_cfa <register> <offset>" instruction,
512 indicating the CFA register has changed to <register> with
513 the specified offset. The data factoring for DW_CFA_def_cfa_sf
514 happens in output_cfi, or in the assembler via the .cfi_def_cfa
517 cfi->dw_cfi_opc = DW_CFA_def_cfa_sf;
519 cfi->dw_cfi_opc = DW_CFA_def_cfa;
520 cfi->dw_cfi_oprnd1.dw_cfi_reg_num = loc.reg;
521 cfi->dw_cfi_oprnd2.dw_cfi_offset = loc.offset;
525 /* Construct a DW_CFA_def_cfa_expression instruction to
526 calculate the CFA using a full location expression since no
527 register-offset pair is available. */
528 struct dw_loc_descr_struct *loc_list;
530 cfi->dw_cfi_opc = DW_CFA_def_cfa_expression;
531 loc_list = build_cfa_loc (&loc, 0);
532 cfi->dw_cfi_oprnd1.dw_cfi_loc = loc_list;
534 cur_row->cfa_cfi = cfi;
541 /* Add the CFI for saving a register. REG is the CFA column number.
542 If SREG is -1, the register is saved at OFFSET from the CFA;
543 otherwise it is saved in SREG. */
546 reg_save (unsigned int reg, unsigned int sreg, HOST_WIDE_INT offset)
548 dw_fde_ref fde = cfun ? cfun->fde : NULL;
549 dw_cfi_ref cfi = new_cfi ();
551 cfi->dw_cfi_oprnd1.dw_cfi_reg_num = reg;
553 /* When stack is aligned, store REG using DW_CFA_expression with FP. */
555 && fde->stack_realign
556 && sreg == INVALID_REGNUM)
558 cfi->dw_cfi_opc = DW_CFA_expression;
559 cfi->dw_cfi_oprnd1.dw_cfi_reg_num = reg;
560 cfi->dw_cfi_oprnd2.dw_cfi_loc
561 = build_cfa_aligned_loc (&cur_row->cfa, offset,
562 fde->stack_realignment);
564 else if (sreg == INVALID_REGNUM)
566 if (need_data_align_sf_opcode (offset))
567 cfi->dw_cfi_opc = DW_CFA_offset_extended_sf;
568 else if (reg & ~0x3f)
569 cfi->dw_cfi_opc = DW_CFA_offset_extended;
571 cfi->dw_cfi_opc = DW_CFA_offset;
572 cfi->dw_cfi_oprnd2.dw_cfi_offset = offset;
574 else if (sreg == reg)
575 cfi->dw_cfi_opc = DW_CFA_same_value;
578 cfi->dw_cfi_opc = DW_CFA_register;
579 cfi->dw_cfi_oprnd2.dw_cfi_reg_num = sreg;
585 /* Given a SET, calculate the amount of stack adjustment it
589 stack_adjust_offset (const_rtx pattern, HOST_WIDE_INT cur_args_size,
590 HOST_WIDE_INT cur_offset)
592 const_rtx src = SET_SRC (pattern);
593 const_rtx dest = SET_DEST (pattern);
594 HOST_WIDE_INT offset = 0;
597 if (dest == stack_pointer_rtx)
599 code = GET_CODE (src);
601 /* Assume (set (reg sp) (reg whatever)) sets args_size
603 if (code == REG && src != stack_pointer_rtx)
605 offset = -cur_args_size;
606 #ifndef STACK_GROWS_DOWNWARD
609 return offset - cur_offset;
612 if (! (code == PLUS || code == MINUS)
613 || XEXP (src, 0) != stack_pointer_rtx
614 || !CONST_INT_P (XEXP (src, 1)))
617 /* (set (reg sp) (plus (reg sp) (const_int))) */
618 offset = INTVAL (XEXP (src, 1));
624 if (MEM_P (src) && !MEM_P (dest))
628 /* (set (mem (pre_dec (reg sp))) (foo)) */
629 src = XEXP (dest, 0);
630 code = GET_CODE (src);
636 if (XEXP (src, 0) == stack_pointer_rtx)
638 rtx val = XEXP (XEXP (src, 1), 1);
639 /* We handle only adjustments by constant amount. */
640 gcc_assert (GET_CODE (XEXP (src, 1)) == PLUS
641 && CONST_INT_P (val));
642 offset = -INTVAL (val);
649 if (XEXP (src, 0) == stack_pointer_rtx)
651 offset = GET_MODE_SIZE (GET_MODE (dest));
658 if (XEXP (src, 0) == stack_pointer_rtx)
660 offset = -GET_MODE_SIZE (GET_MODE (dest));
675 /* Precomputed args_size for CODE_LABELs and BARRIERs preceeding them,
676 indexed by INSN_UID. */
678 static HOST_WIDE_INT *barrier_args_size;
680 /* Helper function for compute_barrier_args_size. Handle one insn. */
683 compute_barrier_args_size_1 (rtx insn, HOST_WIDE_INT cur_args_size,
684 VEC (rtx, heap) **next)
686 HOST_WIDE_INT offset = 0;
689 if (! RTX_FRAME_RELATED_P (insn))
691 if (prologue_epilogue_contains (insn))
693 else if (GET_CODE (PATTERN (insn)) == SET)
694 offset = stack_adjust_offset (PATTERN (insn), cur_args_size, 0);
695 else if (GET_CODE (PATTERN (insn)) == PARALLEL
696 || GET_CODE (PATTERN (insn)) == SEQUENCE)
698 /* There may be stack adjustments inside compound insns. Search
700 for (i = XVECLEN (PATTERN (insn), 0) - 1; i >= 0; i--)
701 if (GET_CODE (XVECEXP (PATTERN (insn), 0, i)) == SET)
702 offset += stack_adjust_offset (XVECEXP (PATTERN (insn), 0, i),
703 cur_args_size, offset);
708 rtx expr = find_reg_note (insn, REG_FRAME_RELATED_EXPR, NULL_RTX);
712 expr = XEXP (expr, 0);
713 if (GET_CODE (expr) == PARALLEL
714 || GET_CODE (expr) == SEQUENCE)
715 for (i = 1; i < XVECLEN (expr, 0); i++)
717 rtx elem = XVECEXP (expr, 0, i);
719 if (GET_CODE (elem) == SET && !RTX_FRAME_RELATED_P (elem))
720 offset += stack_adjust_offset (elem, cur_args_size, offset);
725 #ifndef STACK_GROWS_DOWNWARD
729 cur_args_size += offset;
730 if (cur_args_size < 0)
735 rtx dest = JUMP_LABEL (insn);
739 if (barrier_args_size [INSN_UID (dest)] < 0)
741 barrier_args_size [INSN_UID (dest)] = cur_args_size;
742 VEC_safe_push (rtx, heap, *next, dest);
747 return cur_args_size;
750 /* Walk the whole function and compute args_size on BARRIERs. */
753 compute_barrier_args_size (void)
755 int max_uid = get_max_uid (), i;
757 VEC (rtx, heap) *worklist, *next, *tmp;
759 barrier_args_size = XNEWVEC (HOST_WIDE_INT, max_uid);
760 for (i = 0; i < max_uid; i++)
761 barrier_args_size[i] = -1;
763 worklist = VEC_alloc (rtx, heap, 20);
764 next = VEC_alloc (rtx, heap, 20);
766 barrier_args_size[INSN_UID (insn)] = 0;
767 VEC_quick_push (rtx, worklist, insn);
770 while (!VEC_empty (rtx, worklist))
772 rtx prev, body, first_insn;
773 HOST_WIDE_INT cur_args_size;
775 first_insn = insn = VEC_pop (rtx, worklist);
776 cur_args_size = barrier_args_size[INSN_UID (insn)];
777 prev = prev_nonnote_insn (insn);
778 if (prev && BARRIER_P (prev))
779 barrier_args_size[INSN_UID (prev)] = cur_args_size;
781 for (; insn; insn = NEXT_INSN (insn))
783 if (INSN_DELETED_P (insn) || NOTE_P (insn))
785 if (BARRIER_P (insn))
790 if (insn == first_insn)
792 else if (barrier_args_size[INSN_UID (insn)] < 0)
794 barrier_args_size[INSN_UID (insn)] = cur_args_size;
799 /* The insns starting with this label have been
800 already scanned or are in the worklist. */
805 body = PATTERN (insn);
806 if (GET_CODE (body) == SEQUENCE)
808 HOST_WIDE_INT dest_args_size = cur_args_size;
809 for (i = 1; i < XVECLEN (body, 0); i++)
810 if (INSN_ANNULLED_BRANCH_P (XVECEXP (body, 0, 0))
811 && INSN_FROM_TARGET_P (XVECEXP (body, 0, i)))
813 = compute_barrier_args_size_1 (XVECEXP (body, 0, i),
814 dest_args_size, &next);
817 = compute_barrier_args_size_1 (XVECEXP (body, 0, i),
818 cur_args_size, &next);
820 if (INSN_ANNULLED_BRANCH_P (XVECEXP (body, 0, 0)))
821 compute_barrier_args_size_1 (XVECEXP (body, 0, 0),
822 dest_args_size, &next);
825 = compute_barrier_args_size_1 (XVECEXP (body, 0, 0),
826 cur_args_size, &next);
830 = compute_barrier_args_size_1 (insn, cur_args_size, &next);
834 if (VEC_empty (rtx, next))
837 /* Swap WORKLIST with NEXT and truncate NEXT for next iteration. */
841 VEC_truncate (rtx, next, 0);
844 VEC_free (rtx, heap, worklist);
845 VEC_free (rtx, heap, next);
848 /* Add a CFI to update the running total of the size of arguments
849 pushed onto the stack. */
852 dwarf2out_args_size (HOST_WIDE_INT size)
856 if (size == cur_row->args_size)
859 cur_row->args_size = size;
862 cfi->dw_cfi_opc = DW_CFA_GNU_args_size;
863 cfi->dw_cfi_oprnd1.dw_cfi_offset = size;
867 /* Record a stack adjustment of OFFSET bytes. */
870 dwarf2out_stack_adjust (HOST_WIDE_INT offset)
872 dw_cfa_location loc = cur_row->cfa;
874 if (loc.reg == dw_stack_pointer_regnum)
875 loc.offset += offset;
877 if (cfa_store.reg == dw_stack_pointer_regnum)
878 cfa_store.offset += offset;
880 /* ??? The assumption seems to be that if A_O_A, the only CFA adjustments
881 involving the stack pointer are inside the prologue and marked as
882 RTX_FRAME_RELATED_P. That said, should we not verify this assumption
883 by *asserting* A_O_A at this point? Why else would we have a change
884 to the stack pointer? */
885 if (ACCUMULATE_OUTGOING_ARGS)
888 #ifndef STACK_GROWS_DOWNWARD
897 if (flag_asynchronous_unwind_tables)
898 dwarf2out_args_size (args_size);
901 /* Check INSN to see if it looks like a push or a stack adjustment, and
902 make a note of it if it does. EH uses this information to find out
903 how much extra space it needs to pop off the stack. */
906 dwarf2out_notice_stack_adjust (rtx insn, bool after_p)
908 HOST_WIDE_INT offset;
911 /* Don't handle epilogues at all. Certainly it would be wrong to do so
912 with this function. Proper support would require all frame-related
913 insns to be marked, and to be able to handle saving state around
914 epilogues textually in the middle of the function. */
915 if (prologue_epilogue_contains (insn))
918 /* If INSN is an instruction from target of an annulled branch, the
919 effects are for the target only and so current argument size
920 shouldn't change at all. */
922 && INSN_ANNULLED_BRANCH_P (XVECEXP (final_sequence, 0, 0))
923 && INSN_FROM_TARGET_P (insn))
926 /* If only calls can throw, and we have a frame pointer,
927 save up adjustments until we see the CALL_INSN. */
928 if (!flag_asynchronous_unwind_tables
929 && cur_row->cfa.reg != dw_stack_pointer_regnum)
931 if (CALL_P (insn) && !after_p)
933 /* Extract the size of the args from the CALL rtx itself. */
934 insn = PATTERN (insn);
935 if (GET_CODE (insn) == PARALLEL)
936 insn = XVECEXP (insn, 0, 0);
937 if (GET_CODE (insn) == SET)
938 insn = SET_SRC (insn);
939 gcc_assert (GET_CODE (insn) == CALL);
940 dwarf2out_args_size (INTVAL (XEXP (insn, 1)));
945 if (CALL_P (insn) && !after_p)
947 if (!flag_asynchronous_unwind_tables)
948 dwarf2out_args_size (args_size);
951 else if (BARRIER_P (insn))
953 /* Don't call compute_barrier_args_size () if the only
954 BARRIER is at the end of function. */
955 if (barrier_args_size == NULL && next_nonnote_insn (insn))
956 compute_barrier_args_size ();
957 if (barrier_args_size == NULL)
961 offset = barrier_args_size[INSN_UID (insn)];
967 #ifndef STACK_GROWS_DOWNWARD
971 else if (GET_CODE (PATTERN (insn)) == SET)
972 offset = stack_adjust_offset (PATTERN (insn), args_size, 0);
973 else if (GET_CODE (PATTERN (insn)) == PARALLEL
974 || GET_CODE (PATTERN (insn)) == SEQUENCE)
976 /* There may be stack adjustments inside compound insns. Search
978 for (offset = 0, i = XVECLEN (PATTERN (insn), 0) - 1; i >= 0; i--)
979 if (GET_CODE (XVECEXP (PATTERN (insn), 0, i)) == SET)
980 offset += stack_adjust_offset (XVECEXP (PATTERN (insn), 0, i),
989 dwarf2out_stack_adjust (offset);
992 /* We delay emitting a register save until either (a) we reach the end
993 of the prologue or (b) the register is clobbered. This clusters
994 register saves so that there are fewer pc advances. */
996 struct GTY(()) queued_reg_save {
997 struct queued_reg_save *next;
999 HOST_WIDE_INT cfa_offset;
1003 static GTY(()) struct queued_reg_save *queued_reg_saves;
1005 /* The caller's ORIG_REG is saved in SAVED_IN_REG. */
1006 typedef struct GTY(()) reg_saved_in_data {
1009 } reg_saved_in_data;
1011 DEF_VEC_O (reg_saved_in_data);
1012 DEF_VEC_ALLOC_O (reg_saved_in_data, gc);
1014 /* A set of registers saved in other registers. This is implemented as
1015 a flat array because it normally contains zero or 1 entry, depending
1016 on the target. IA-64 is the big spender here, using a maximum of
1018 static GTY(()) VEC(reg_saved_in_data, gc) *regs_saved_in_regs;
1020 static GTY(()) reg_saved_in_data *cie_return_save;
1022 /* Short-hand inline for the very common D_F_R (REGNO (x)) operation. */
1023 /* ??? This ought to go into dwarf2out.h, except that dwarf2out.h is
1024 used in places where rtl is prohibited. */
1026 static inline unsigned
1027 dwf_regno (const_rtx reg)
1029 return DWARF_FRAME_REGNUM (REGNO (reg));
1032 /* Compare X and Y for equivalence. The inputs may be REGs or PC_RTX. */
1035 compare_reg_or_pc (rtx x, rtx y)
1037 if (REG_P (x) && REG_P (y))
1038 return REGNO (x) == REGNO (y);
1042 /* Record SRC as being saved in DEST. DEST may be null to delete an
1043 existing entry. SRC may be a register or PC_RTX. */
1046 record_reg_saved_in_reg (rtx dest, rtx src)
1048 reg_saved_in_data *elt;
1051 FOR_EACH_VEC_ELT (reg_saved_in_data, regs_saved_in_regs, i, elt)
1052 if (compare_reg_or_pc (elt->orig_reg, src))
1055 VEC_unordered_remove(reg_saved_in_data, regs_saved_in_regs, i);
1057 elt->saved_in_reg = dest;
1064 elt = VEC_safe_push(reg_saved_in_data, gc, regs_saved_in_regs, NULL);
1065 elt->orig_reg = src;
1066 elt->saved_in_reg = dest;
1069 /* Add an entry to QUEUED_REG_SAVES saying that REG is now saved at
1070 SREG, or if SREG is NULL then it is saved at OFFSET to the CFA. */
1073 queue_reg_save (rtx reg, rtx sreg, HOST_WIDE_INT offset)
1075 struct queued_reg_save *q;
1077 /* Duplicates waste space, but it's also necessary to remove them
1078 for correctness, since the queue gets output in reverse order. */
1079 for (q = queued_reg_saves; q != NULL; q = q->next)
1080 if (compare_reg_or_pc (q->reg, reg))
1085 q = ggc_alloc_queued_reg_save ();
1086 q->next = queued_reg_saves;
1087 queued_reg_saves = q;
1091 q->cfa_offset = offset;
1092 q->saved_reg = sreg;
1095 /* Output all the entries in QUEUED_REG_SAVES. */
1098 dwarf2out_flush_queued_reg_saves (void)
1100 struct queued_reg_save *q;
1102 for (q = queued_reg_saves; q; q = q->next)
1104 unsigned int reg, sreg;
1106 record_reg_saved_in_reg (q->saved_reg, q->reg);
1108 if (q->reg == pc_rtx)
1109 reg = DWARF_FRAME_RETURN_COLUMN;
1111 reg = dwf_regno (q->reg);
1113 sreg = dwf_regno (q->saved_reg);
1115 sreg = INVALID_REGNUM;
1116 reg_save (reg, sreg, q->cfa_offset);
1119 queued_reg_saves = NULL;
1122 /* Does INSN clobber any register which QUEUED_REG_SAVES lists a saved
1123 location for? Or, does it clobber a register which we've previously
1124 said that some other register is saved in, and for which we now
1125 have a new location for? */
1128 clobbers_queued_reg_save (const_rtx insn)
1130 struct queued_reg_save *q;
1132 for (q = queued_reg_saves; q; q = q->next)
1135 reg_saved_in_data *rir;
1137 if (modified_in_p (q->reg, insn))
1140 FOR_EACH_VEC_ELT (reg_saved_in_data, regs_saved_in_regs, i, rir)
1141 if (compare_reg_or_pc (q->reg, rir->orig_reg)
1142 && modified_in_p (rir->saved_in_reg, insn))
1149 /* What register, if any, is currently saved in REG? */
1152 reg_saved_in (rtx reg)
1154 unsigned int regn = REGNO (reg);
1155 struct queued_reg_save *q;
1156 reg_saved_in_data *rir;
1159 for (q = queued_reg_saves; q; q = q->next)
1160 if (q->saved_reg && regn == REGNO (q->saved_reg))
1163 FOR_EACH_VEC_ELT (reg_saved_in_data, regs_saved_in_regs, i, rir)
1164 if (regn == REGNO (rir->saved_in_reg))
1165 return rir->orig_reg;
1170 /* A subroutine of dwarf2out_frame_debug, process a REG_DEF_CFA note. */
1173 dwarf2out_frame_debug_def_cfa (rtx pat)
1175 dw_cfa_location loc;
1177 memset (&loc, 0, sizeof (loc));
1179 switch (GET_CODE (pat))
1182 loc.reg = dwf_regno (XEXP (pat, 0));
1183 loc.offset = INTVAL (XEXP (pat, 1));
1187 loc.reg = dwf_regno (pat);
1192 pat = XEXP (pat, 0);
1193 if (GET_CODE (pat) == PLUS)
1195 loc.base_offset = INTVAL (XEXP (pat, 1));
1196 pat = XEXP (pat, 0);
1198 loc.reg = dwf_regno (pat);
1202 /* Recurse and define an expression. */
1209 /* A subroutine of dwarf2out_frame_debug, process a REG_ADJUST_CFA note. */
1212 dwarf2out_frame_debug_adjust_cfa (rtx pat)
1214 dw_cfa_location loc = cur_row->cfa;
1217 gcc_assert (GET_CODE (pat) == SET);
1218 dest = XEXP (pat, 0);
1219 src = XEXP (pat, 1);
1221 switch (GET_CODE (src))
1224 gcc_assert (dwf_regno (XEXP (src, 0)) == loc.reg);
1225 loc.offset -= INTVAL (XEXP (src, 1));
1235 loc.reg = dwf_regno (dest);
1236 gcc_assert (loc.indirect == 0);
1241 /* A subroutine of dwarf2out_frame_debug, process a REG_CFA_OFFSET note. */
1244 dwarf2out_frame_debug_cfa_offset (rtx set)
1246 HOST_WIDE_INT offset;
1247 rtx src, addr, span;
1248 unsigned int sregno;
1250 src = XEXP (set, 1);
1251 addr = XEXP (set, 0);
1252 gcc_assert (MEM_P (addr));
1253 addr = XEXP (addr, 0);
1255 /* As documented, only consider extremely simple addresses. */
1256 switch (GET_CODE (addr))
1259 gcc_assert (dwf_regno (addr) == cur_row->cfa.reg);
1260 offset = -cur_row->cfa.offset;
1263 gcc_assert (dwf_regno (XEXP (addr, 0)) == cur_row->cfa.reg);
1264 offset = INTVAL (XEXP (addr, 1)) - cur_row->cfa.offset;
1273 sregno = DWARF_FRAME_RETURN_COLUMN;
1277 span = targetm.dwarf_register_span (src);
1278 sregno = dwf_regno (src);
1281 /* ??? We'd like to use queue_reg_save, but we need to come up with
1282 a different flushing heuristic for epilogues. */
1284 reg_save (sregno, INVALID_REGNUM, offset);
1287 /* We have a PARALLEL describing where the contents of SRC live.
1288 Queue register saves for each piece of the PARALLEL. */
1291 HOST_WIDE_INT span_offset = offset;
1293 gcc_assert (GET_CODE (span) == PARALLEL);
1295 limit = XVECLEN (span, 0);
1296 for (par_index = 0; par_index < limit; par_index++)
1298 rtx elem = XVECEXP (span, 0, par_index);
1300 sregno = dwf_regno (src);
1301 reg_save (sregno, INVALID_REGNUM, span_offset);
1302 span_offset += GET_MODE_SIZE (GET_MODE (elem));
1307 /* A subroutine of dwarf2out_frame_debug, process a REG_CFA_REGISTER note. */
1310 dwarf2out_frame_debug_cfa_register (rtx set)
1313 unsigned sregno, dregno;
1315 src = XEXP (set, 1);
1316 dest = XEXP (set, 0);
1318 record_reg_saved_in_reg (dest, src);
1320 sregno = DWARF_FRAME_RETURN_COLUMN;
1322 sregno = dwf_regno (src);
1324 dregno = dwf_regno (dest);
1326 /* ??? We'd like to use queue_reg_save, but we need to come up with
1327 a different flushing heuristic for epilogues. */
1328 reg_save (sregno, dregno, 0);
1331 /* A subroutine of dwarf2out_frame_debug, process a REG_CFA_EXPRESSION note. */
1334 dwarf2out_frame_debug_cfa_expression (rtx set)
1336 rtx src, dest, span;
1337 dw_cfi_ref cfi = new_cfi ();
1339 dest = SET_DEST (set);
1340 src = SET_SRC (set);
1342 gcc_assert (REG_P (src));
1343 gcc_assert (MEM_P (dest));
1345 span = targetm.dwarf_register_span (src);
1348 cfi->dw_cfi_opc = DW_CFA_expression;
1349 cfi->dw_cfi_oprnd1.dw_cfi_reg_num = dwf_regno (src);
1350 cfi->dw_cfi_oprnd2.dw_cfi_loc
1351 = mem_loc_descriptor (XEXP (dest, 0), get_address_mode (dest),
1352 GET_MODE (dest), VAR_INIT_STATUS_INITIALIZED);
1354 /* ??? We'd like to use queue_reg_save, were the interface different,
1355 and, as above, we could manage flushing for epilogues. */
1359 /* A subroutine of dwarf2out_frame_debug, process a REG_CFA_RESTORE note. */
1362 dwarf2out_frame_debug_cfa_restore (rtx reg)
1364 dw_cfi_ref cfi = new_cfi ();
1365 unsigned int regno = dwf_regno (reg);
1367 cfi->dw_cfi_opc = (regno & ~0x3f ? DW_CFA_restore_extended : DW_CFA_restore);
1368 cfi->dw_cfi_oprnd1.dw_cfi_reg_num = regno;
1373 /* A subroutine of dwarf2out_frame_debug, process a REG_CFA_WINDOW_SAVE.
1374 ??? Perhaps we should note in the CIE where windows are saved (instead of
1375 assuming 0(cfa)) and what registers are in the window. */
1378 dwarf2out_frame_debug_cfa_window_save (void)
1380 dw_cfi_ref cfi = new_cfi ();
1382 cfi->dw_cfi_opc = DW_CFA_GNU_window_save;
1386 /* Record call frame debugging information for an expression EXPR,
1387 which either sets SP or FP (adjusting how we calculate the frame
1388 address) or saves a register to the stack or another register.
1389 LABEL indicates the address of EXPR.
1391 This function encodes a state machine mapping rtxes to actions on
1392 cfa, cfa_store, and cfa_temp.reg. We describe these rules so
1393 users need not read the source code.
1395 The High-Level Picture
1397 Changes in the register we use to calculate the CFA: Currently we
1398 assume that if you copy the CFA register into another register, we
1399 should take the other one as the new CFA register; this seems to
1400 work pretty well. If it's wrong for some target, it's simple
1401 enough not to set RTX_FRAME_RELATED_P on the insn in question.
1403 Changes in the register we use for saving registers to the stack:
1404 This is usually SP, but not always. Again, we deduce that if you
1405 copy SP into another register (and SP is not the CFA register),
1406 then the new register is the one we will be using for register
1407 saves. This also seems to work.
1409 Register saves: There's not much guesswork about this one; if
1410 RTX_FRAME_RELATED_P is set on an insn which modifies memory, it's a
1411 register save, and the register used to calculate the destination
1412 had better be the one we think we're using for this purpose.
1413 It's also assumed that a copy from a call-saved register to another
1414 register is saving that register if RTX_FRAME_RELATED_P is set on
1415 that instruction. If the copy is from a call-saved register to
1416 the *same* register, that means that the register is now the same
1417 value as in the caller.
1419 Except: If the register being saved is the CFA register, and the
1420 offset is nonzero, we are saving the CFA, so we assume we have to
1421 use DW_CFA_def_cfa_expression. If the offset is 0, we assume that
1422 the intent is to save the value of SP from the previous frame.
1424 In addition, if a register has previously been saved to a different
1427 Invariants / Summaries of Rules
1429 cfa current rule for calculating the CFA. It usually
1430 consists of a register and an offset. This is
1431 actually stored in cur_row->cfa, but abbreviated
1432 for the purposes of this documentation.
1433 cfa_store register used by prologue code to save things to the stack
1434 cfa_store.offset is the offset from the value of
1435 cfa_store.reg to the actual CFA
1436 cfa_temp register holding an integral value. cfa_temp.offset
1437 stores the value, which will be used to adjust the
1438 stack pointer. cfa_temp is also used like cfa_store,
1439 to track stores to the stack via fp or a temp reg.
1441 Rules 1- 4: Setting a register's value to cfa.reg or an expression
1442 with cfa.reg as the first operand changes the cfa.reg and its
1443 cfa.offset. Rule 1 and 4 also set cfa_temp.reg and
1446 Rules 6- 9: Set a non-cfa.reg register value to a constant or an
1447 expression yielding a constant. This sets cfa_temp.reg
1448 and cfa_temp.offset.
1450 Rule 5: Create a new register cfa_store used to save items to the
1453 Rules 10-14: Save a register to the stack. Define offset as the
1454 difference of the original location and cfa_store's
1455 location (or cfa_temp's location if cfa_temp is used).
1457 Rules 16-20: If AND operation happens on sp in prologue, we assume
1458 stack is realigned. We will use a group of DW_OP_XXX
1459 expressions to represent the location of the stored
1460 register instead of CFA+offset.
1464 "{a,b}" indicates a choice of a xor b.
1465 "<reg>:cfa.reg" indicates that <reg> must equal cfa.reg.
1468 (set <reg1> <reg2>:cfa.reg)
1469 effects: cfa.reg = <reg1>
1470 cfa.offset unchanged
1471 cfa_temp.reg = <reg1>
1472 cfa_temp.offset = cfa.offset
1475 (set sp ({minus,plus,losum} {sp,fp}:cfa.reg
1476 {<const_int>,<reg>:cfa_temp.reg}))
1477 effects: cfa.reg = sp if fp used
1478 cfa.offset += {+/- <const_int>, cfa_temp.offset} if cfa.reg==sp
1479 cfa_store.offset += {+/- <const_int>, cfa_temp.offset}
1480 if cfa_store.reg==sp
1483 (set fp ({minus,plus,losum} <reg>:cfa.reg <const_int>))
1484 effects: cfa.reg = fp
1485 cfa_offset += +/- <const_int>
1488 (set <reg1> ({plus,losum} <reg2>:cfa.reg <const_int>))
1489 constraints: <reg1> != fp
1491 effects: cfa.reg = <reg1>
1492 cfa_temp.reg = <reg1>
1493 cfa_temp.offset = cfa.offset
1496 (set <reg1> (plus <reg2>:cfa_temp.reg sp:cfa.reg))
1497 constraints: <reg1> != fp
1499 effects: cfa_store.reg = <reg1>
1500 cfa_store.offset = cfa.offset - cfa_temp.offset
1503 (set <reg> <const_int>)
1504 effects: cfa_temp.reg = <reg>
1505 cfa_temp.offset = <const_int>
1508 (set <reg1>:cfa_temp.reg (ior <reg2>:cfa_temp.reg <const_int>))
1509 effects: cfa_temp.reg = <reg1>
1510 cfa_temp.offset |= <const_int>
1513 (set <reg> (high <exp>))
1517 (set <reg> (lo_sum <exp> <const_int>))
1518 effects: cfa_temp.reg = <reg>
1519 cfa_temp.offset = <const_int>
1522 (set (mem ({pre,post}_modify sp:cfa_store (???? <reg1> <const_int>))) <reg2>)
1523 effects: cfa_store.offset -= <const_int>
1524 cfa.offset = cfa_store.offset if cfa.reg == sp
1526 cfa.base_offset = -cfa_store.offset
1529 (set (mem ({pre_inc,pre_dec,post_dec} sp:cfa_store.reg)) <reg>)
1530 effects: cfa_store.offset += -/+ mode_size(mem)
1531 cfa.offset = cfa_store.offset if cfa.reg == sp
1533 cfa.base_offset = -cfa_store.offset
1536 (set (mem ({minus,plus,losum} <reg1>:{cfa_store,cfa_temp} <const_int>))
1539 effects: cfa.reg = <reg1>
1540 cfa.base_offset = -/+ <const_int> - {cfa_store,cfa_temp}.offset
1543 (set (mem <reg1>:{cfa_store,cfa_temp}) <reg2>)
1544 effects: cfa.reg = <reg1>
1545 cfa.base_offset = -{cfa_store,cfa_temp}.offset
1548 (set (mem (post_inc <reg1>:cfa_temp <const_int>)) <reg2>)
1549 effects: cfa.reg = <reg1>
1550 cfa.base_offset = -cfa_temp.offset
1551 cfa_temp.offset -= mode_size(mem)
1554 (set <reg> {unspec, unspec_volatile})
1555 effects: target-dependent
1558 (set sp (and: sp <const_int>))
1559 constraints: cfa_store.reg == sp
1560 effects: cfun->fde.stack_realign = 1
1561 cfa_store.offset = 0
1562 fde->drap_reg = cfa.reg if cfa.reg != sp and cfa.reg != fp
1565 (set (mem ({pre_inc, pre_dec} sp)) (mem (plus (cfa.reg) (const_int))))
1566 effects: cfa_store.offset += -/+ mode_size(mem)
1569 (set (mem ({pre_inc, pre_dec} sp)) fp)
1570 constraints: fde->stack_realign == 1
1571 effects: cfa_store.offset = 0
1572 cfa.reg != HARD_FRAME_POINTER_REGNUM
1575 (set (mem ({pre_inc, pre_dec} sp)) cfa.reg)
1576 constraints: fde->stack_realign == 1
1578 && cfa.indirect == 0
1579 && cfa.reg != HARD_FRAME_POINTER_REGNUM
1580 effects: Use DW_CFA_def_cfa_expression to define cfa
1581 cfa.reg == fde->drap_reg */
1584 dwarf2out_frame_debug_expr (rtx expr)
1586 dw_cfa_location cfa = cur_row->cfa;
1587 rtx src, dest, span;
1588 HOST_WIDE_INT offset;
1591 /* If RTX_FRAME_RELATED_P is set on a PARALLEL, process each member of
1592 the PARALLEL independently. The first element is always processed if
1593 it is a SET. This is for backward compatibility. Other elements
1594 are processed only if they are SETs and the RTX_FRAME_RELATED_P
1595 flag is set in them. */
1596 if (GET_CODE (expr) == PARALLEL || GET_CODE (expr) == SEQUENCE)
1599 int limit = XVECLEN (expr, 0);
1602 /* PARALLELs have strict read-modify-write semantics, so we
1603 ought to evaluate every rvalue before changing any lvalue.
1604 It's cumbersome to do that in general, but there's an
1605 easy approximation that is enough for all current users:
1606 handle register saves before register assignments. */
1607 if (GET_CODE (expr) == PARALLEL)
1608 for (par_index = 0; par_index < limit; par_index++)
1610 elem = XVECEXP (expr, 0, par_index);
1611 if (GET_CODE (elem) == SET
1612 && MEM_P (SET_DEST (elem))
1613 && (RTX_FRAME_RELATED_P (elem) || par_index == 0))
1614 dwarf2out_frame_debug_expr (elem);
1617 for (par_index = 0; par_index < limit; par_index++)
1619 elem = XVECEXP (expr, 0, par_index);
1620 if (GET_CODE (elem) == SET
1621 && (!MEM_P (SET_DEST (elem)) || GET_CODE (expr) == SEQUENCE)
1622 && (RTX_FRAME_RELATED_P (elem) || par_index == 0))
1623 dwarf2out_frame_debug_expr (elem);
1624 else if (GET_CODE (elem) == SET
1626 && !RTX_FRAME_RELATED_P (elem))
1628 /* Stack adjustment combining might combine some post-prologue
1629 stack adjustment into a prologue stack adjustment. */
1630 HOST_WIDE_INT offset = stack_adjust_offset (elem, args_size, 0);
1633 dwarf2out_stack_adjust (offset);
1639 gcc_assert (GET_CODE (expr) == SET);
1641 src = SET_SRC (expr);
1642 dest = SET_DEST (expr);
1646 rtx rsi = reg_saved_in (src);
1653 switch (GET_CODE (dest))
1656 switch (GET_CODE (src))
1658 /* Setting FP from SP. */
1660 if (cfa.reg == dwf_regno (src))
1663 /* Update the CFA rule wrt SP or FP. Make sure src is
1664 relative to the current CFA register.
1666 We used to require that dest be either SP or FP, but the
1667 ARM copies SP to a temporary register, and from there to
1668 FP. So we just rely on the backends to only set
1669 RTX_FRAME_RELATED_P on appropriate insns. */
1670 cfa.reg = dwf_regno (dest);
1671 cfa_temp.reg = cfa.reg;
1672 cfa_temp.offset = cfa.offset;
1676 /* Saving a register in a register. */
1677 gcc_assert (!fixed_regs [REGNO (dest)]
1678 /* For the SPARC and its register window. */
1679 || (dwf_regno (src) == DWARF_FRAME_RETURN_COLUMN));
1681 /* After stack is aligned, we can only save SP in FP
1682 if drap register is used. In this case, we have
1683 to restore stack pointer with the CFA value and we
1684 don't generate this DWARF information. */
1686 && fde->stack_realign
1687 && REGNO (src) == STACK_POINTER_REGNUM)
1688 gcc_assert (REGNO (dest) == HARD_FRAME_POINTER_REGNUM
1689 && fde->drap_reg != INVALID_REGNUM
1690 && cfa.reg != dwf_regno (src));
1692 queue_reg_save (src, dest, 0);
1699 if (dest == stack_pointer_rtx)
1703 switch (GET_CODE (XEXP (src, 1)))
1706 offset = INTVAL (XEXP (src, 1));
1709 gcc_assert (dwf_regno (XEXP (src, 1)) == cfa_temp.reg);
1710 offset = cfa_temp.offset;
1716 if (XEXP (src, 0) == hard_frame_pointer_rtx)
1718 /* Restoring SP from FP in the epilogue. */
1719 gcc_assert (cfa.reg == dw_frame_pointer_regnum);
1720 cfa.reg = dw_stack_pointer_regnum;
1722 else if (GET_CODE (src) == LO_SUM)
1723 /* Assume we've set the source reg of the LO_SUM from sp. */
1726 gcc_assert (XEXP (src, 0) == stack_pointer_rtx);
1728 if (GET_CODE (src) != MINUS)
1730 if (cfa.reg == dw_stack_pointer_regnum)
1731 cfa.offset += offset;
1732 if (cfa_store.reg == dw_stack_pointer_regnum)
1733 cfa_store.offset += offset;
1735 else if (dest == hard_frame_pointer_rtx)
1738 /* Either setting the FP from an offset of the SP,
1739 or adjusting the FP */
1740 gcc_assert (frame_pointer_needed);
1742 gcc_assert (REG_P (XEXP (src, 0))
1743 && dwf_regno (XEXP (src, 0)) == cfa.reg
1744 && CONST_INT_P (XEXP (src, 1)));
1745 offset = INTVAL (XEXP (src, 1));
1746 if (GET_CODE (src) != MINUS)
1748 cfa.offset += offset;
1749 cfa.reg = dw_frame_pointer_regnum;
1753 gcc_assert (GET_CODE (src) != MINUS);
1756 if (REG_P (XEXP (src, 0))
1757 && dwf_regno (XEXP (src, 0)) == cfa.reg
1758 && CONST_INT_P (XEXP (src, 1)))
1760 /* Setting a temporary CFA register that will be copied
1761 into the FP later on. */
1762 offset = - INTVAL (XEXP (src, 1));
1763 cfa.offset += offset;
1764 cfa.reg = dwf_regno (dest);
1765 /* Or used to save regs to the stack. */
1766 cfa_temp.reg = cfa.reg;
1767 cfa_temp.offset = cfa.offset;
1771 else if (REG_P (XEXP (src, 0))
1772 && dwf_regno (XEXP (src, 0)) == cfa_temp.reg
1773 && XEXP (src, 1) == stack_pointer_rtx)
1775 /* Setting a scratch register that we will use instead
1776 of SP for saving registers to the stack. */
1777 gcc_assert (cfa.reg == dw_stack_pointer_regnum);
1778 cfa_store.reg = dwf_regno (dest);
1779 cfa_store.offset = cfa.offset - cfa_temp.offset;
1783 else if (GET_CODE (src) == LO_SUM
1784 && CONST_INT_P (XEXP (src, 1)))
1786 cfa_temp.reg = dwf_regno (dest);
1787 cfa_temp.offset = INTVAL (XEXP (src, 1));
1796 cfa_temp.reg = dwf_regno (dest);
1797 cfa_temp.offset = INTVAL (src);
1802 gcc_assert (REG_P (XEXP (src, 0))
1803 && dwf_regno (XEXP (src, 0)) == cfa_temp.reg
1804 && CONST_INT_P (XEXP (src, 1)));
1806 cfa_temp.reg = dwf_regno (dest);
1807 cfa_temp.offset |= INTVAL (XEXP (src, 1));
1810 /* Skip over HIGH, assuming it will be followed by a LO_SUM,
1811 which will fill in all of the bits. */
1818 case UNSPEC_VOLATILE:
1819 /* All unspecs should be represented by REG_CFA_* notes. */
1825 /* If this AND operation happens on stack pointer in prologue,
1826 we assume the stack is realigned and we extract the
1828 if (fde && XEXP (src, 0) == stack_pointer_rtx)
1830 /* We interpret reg_save differently with stack_realign set.
1831 Thus we must flush whatever we have queued first. */
1832 dwarf2out_flush_queued_reg_saves ();
1834 gcc_assert (cfa_store.reg == dwf_regno (XEXP (src, 0)));
1835 fde->stack_realign = 1;
1836 fde->stack_realignment = INTVAL (XEXP (src, 1));
1837 cfa_store.offset = 0;
1839 if (cfa.reg != dw_stack_pointer_regnum
1840 && cfa.reg != dw_frame_pointer_regnum)
1841 fde->drap_reg = cfa.reg;
1854 /* Saving a register to the stack. Make sure dest is relative to the
1856 switch (GET_CODE (XEXP (dest, 0)))
1862 /* We can't handle variable size modifications. */
1863 gcc_assert (GET_CODE (XEXP (XEXP (XEXP (dest, 0), 1), 1))
1865 offset = -INTVAL (XEXP (XEXP (XEXP (dest, 0), 1), 1));
1867 gcc_assert (REGNO (XEXP (XEXP (dest, 0), 0)) == STACK_POINTER_REGNUM
1868 && cfa_store.reg == dw_stack_pointer_regnum);
1870 cfa_store.offset += offset;
1871 if (cfa.reg == dw_stack_pointer_regnum)
1872 cfa.offset = cfa_store.offset;
1874 if (GET_CODE (XEXP (dest, 0)) == POST_MODIFY)
1875 offset -= cfa_store.offset;
1877 offset = -cfa_store.offset;
1884 offset = GET_MODE_SIZE (GET_MODE (dest));
1885 if (GET_CODE (XEXP (dest, 0)) == PRE_INC)
1888 gcc_assert ((REGNO (XEXP (XEXP (dest, 0), 0))
1889 == STACK_POINTER_REGNUM)
1890 && cfa_store.reg == dw_stack_pointer_regnum);
1892 cfa_store.offset += offset;
1894 /* Rule 18: If stack is aligned, we will use FP as a
1895 reference to represent the address of the stored
1898 && fde->stack_realign
1899 && src == hard_frame_pointer_rtx)
1901 gcc_assert (cfa.reg != dw_frame_pointer_regnum);
1902 cfa_store.offset = 0;
1905 if (cfa.reg == dw_stack_pointer_regnum)
1906 cfa.offset = cfa_store.offset;
1908 if (GET_CODE (XEXP (dest, 0)) == POST_DEC)
1909 offset += -cfa_store.offset;
1911 offset = -cfa_store.offset;
1915 /* With an offset. */
1922 gcc_assert (CONST_INT_P (XEXP (XEXP (dest, 0), 1))
1923 && REG_P (XEXP (XEXP (dest, 0), 0)));
1924 offset = INTVAL (XEXP (XEXP (dest, 0), 1));
1925 if (GET_CODE (XEXP (dest, 0)) == MINUS)
1928 regno = dwf_regno (XEXP (XEXP (dest, 0), 0));
1930 if (cfa.reg == regno)
1931 offset -= cfa.offset;
1932 else if (cfa_store.reg == regno)
1933 offset -= cfa_store.offset;
1936 gcc_assert (cfa_temp.reg == regno);
1937 offset -= cfa_temp.offset;
1943 /* Without an offset. */
1946 unsigned int regno = dwf_regno (XEXP (dest, 0));
1948 if (cfa.reg == regno)
1949 offset = -cfa.offset;
1950 else if (cfa_store.reg == regno)
1951 offset = -cfa_store.offset;
1954 gcc_assert (cfa_temp.reg == regno);
1955 offset = -cfa_temp.offset;
1962 gcc_assert (cfa_temp.reg == dwf_regno (XEXP (XEXP (dest, 0), 0)));
1963 offset = -cfa_temp.offset;
1964 cfa_temp.offset -= GET_MODE_SIZE (GET_MODE (dest));
1972 /* If the source operand of this MEM operation is a memory,
1973 we only care how much stack grew. */
1978 && REGNO (src) != STACK_POINTER_REGNUM
1979 && REGNO (src) != HARD_FRAME_POINTER_REGNUM
1980 && dwf_regno (src) == cfa.reg)
1982 /* We're storing the current CFA reg into the stack. */
1984 if (cfa.offset == 0)
1987 /* If stack is aligned, putting CFA reg into stack means
1988 we can no longer use reg + offset to represent CFA.
1989 Here we use DW_CFA_def_cfa_expression instead. The
1990 result of this expression equals to the original CFA
1993 && fde->stack_realign
1994 && cfa.indirect == 0
1995 && cfa.reg != dw_frame_pointer_regnum)
1997 dw_cfa_location cfa_exp;
1999 gcc_assert (fde->drap_reg == cfa.reg);
2001 cfa_exp.indirect = 1;
2002 cfa_exp.reg = dw_frame_pointer_regnum;
2003 cfa_exp.base_offset = offset;
2006 fde->drap_reg_saved = 1;
2008 def_cfa_1 (&cfa_exp);
2012 /* If the source register is exactly the CFA, assume
2013 we're saving SP like any other register; this happens
2016 queue_reg_save (stack_pointer_rtx, NULL_RTX, offset);
2021 /* Otherwise, we'll need to look in the stack to
2022 calculate the CFA. */
2023 rtx x = XEXP (dest, 0);
2027 gcc_assert (REG_P (x));
2029 cfa.reg = dwf_regno (x);
2030 cfa.base_offset = offset;
2041 span = targetm.dwarf_register_span (src);
2043 queue_reg_save (src, NULL_RTX, offset);
2046 /* We have a PARALLEL describing where the contents of SRC live.
2047 Queue register saves for each piece of the PARALLEL. */
2050 HOST_WIDE_INT span_offset = offset;
2052 gcc_assert (GET_CODE (span) == PARALLEL);
2054 limit = XVECLEN (span, 0);
2055 for (par_index = 0; par_index < limit; par_index++)
2057 rtx elem = XVECEXP (span, 0, par_index);
2058 queue_reg_save (elem, NULL_RTX, span_offset);
2059 span_offset += GET_MODE_SIZE (GET_MODE (elem));
2069 /* Record call frame debugging information for INSN, which either
2070 sets SP or FP (adjusting how we calculate the frame address) or saves a
2071 register to the stack. If INSN is NULL_RTX, initialize our state.
2073 If AFTER_P is false, we're being called before the insn is emitted,
2074 otherwise after. Call instructions get invoked twice. */
2077 dwarf2out_frame_debug (rtx insn, bool after_p)
2080 bool handled_one = false;
2081 bool need_flush = false;
2083 if (!NONJUMP_INSN_P (insn) || clobbers_queued_reg_save (insn))
2084 dwarf2out_flush_queued_reg_saves ();
2086 if (!RTX_FRAME_RELATED_P (insn))
2088 /* ??? This should be done unconditionally since stack adjustments
2089 matter if the stack pointer is not the CFA register anymore but
2090 is still used to save registers. */
2091 if (!ACCUMULATE_OUTGOING_ARGS)
2092 dwarf2out_notice_stack_adjust (insn, after_p);
2096 any_cfis_emitted = false;
2098 for (note = REG_NOTES (insn); note; note = XEXP (note, 1))
2099 switch (REG_NOTE_KIND (note))
2101 case REG_FRAME_RELATED_EXPR:
2102 insn = XEXP (note, 0);
2105 case REG_CFA_DEF_CFA:
2106 dwarf2out_frame_debug_def_cfa (XEXP (note, 0));
2110 case REG_CFA_ADJUST_CFA:
2115 if (GET_CODE (n) == PARALLEL)
2116 n = XVECEXP (n, 0, 0);
2118 dwarf2out_frame_debug_adjust_cfa (n);
2122 case REG_CFA_OFFSET:
2125 n = single_set (insn);
2126 dwarf2out_frame_debug_cfa_offset (n);
2130 case REG_CFA_REGISTER:
2135 if (GET_CODE (n) == PARALLEL)
2136 n = XVECEXP (n, 0, 0);
2138 dwarf2out_frame_debug_cfa_register (n);
2142 case REG_CFA_EXPRESSION:
2145 n = single_set (insn);
2146 dwarf2out_frame_debug_cfa_expression (n);
2150 case REG_CFA_RESTORE:
2155 if (GET_CODE (n) == PARALLEL)
2156 n = XVECEXP (n, 0, 0);
2159 dwarf2out_frame_debug_cfa_restore (n);
2163 case REG_CFA_SET_VDRAP:
2167 dw_fde_ref fde = cfun->fde;
2170 gcc_assert (fde->vdrap_reg == INVALID_REGNUM);
2172 fde->vdrap_reg = dwf_regno (n);
2178 case REG_CFA_WINDOW_SAVE:
2179 dwarf2out_frame_debug_cfa_window_save ();
2183 case REG_CFA_FLUSH_QUEUE:
2184 /* The actual flush happens below. */
2195 /* Minimize the number of advances by emitting the entire queue
2196 once anything is emitted. */
2197 need_flush |= any_cfis_emitted;
2201 insn = PATTERN (insn);
2203 dwarf2out_frame_debug_expr (insn);
2205 /* Check again. A parallel can save and update the same register.
2206 We could probably check just once, here, but this is safer than
2207 removing the check at the start of the function. */
2208 if (any_cfis_emitted || clobbers_queued_reg_save (insn))
2213 dwarf2out_flush_queued_reg_saves ();
2216 /* Examine CFI and return true if a cfi label and set_loc is needed
2217 beforehand. Even when generating CFI assembler instructions, we
2218 still have to add the cfi to the list so that lookup_cfa_1 works
2219 later on. When -g2 and above we even need to force emitting of
2220 CFI labels and add to list a DW_CFA_set_loc for convert_cfa_to_fb_loc_list
2221 purposes. If we're generating DWARF3 output we use DW_OP_call_frame_cfa
2222 and so don't use convert_cfa_to_fb_loc_list. */
2225 cfi_label_required_p (dw_cfi_ref cfi)
2227 if (!dwarf2out_do_cfi_asm ())
2230 if (dwarf_version == 2
2231 && debug_info_level > DINFO_LEVEL_TERSE
2232 && (write_symbols == DWARF2_DEBUG
2233 || write_symbols == VMS_AND_DWARF2_DEBUG))
2235 switch (cfi->dw_cfi_opc)
2237 case DW_CFA_def_cfa_offset:
2238 case DW_CFA_def_cfa_offset_sf:
2239 case DW_CFA_def_cfa_register:
2240 case DW_CFA_def_cfa:
2241 case DW_CFA_def_cfa_sf:
2242 case DW_CFA_def_cfa_expression:
2243 case DW_CFA_restore_state:
2252 /* Walk the function, looking for NOTE_INSN_CFI notes. Add the CFIs to the
2253 function's FDE, adding CFI labels and set_loc/advance_loc opcodes as
2256 add_cfis_to_fde (void)
2258 dw_fde_ref fde = cfun->fde;
2260 /* We always start with a function_begin label. */
2263 for (insn = get_insns (); insn; insn = next)
2265 next = NEXT_INSN (insn);
2267 if (NOTE_P (insn) && NOTE_KIND (insn) == NOTE_INSN_SWITCH_TEXT_SECTIONS)
2269 /* Don't attempt to advance_loc4 between labels
2270 in different sections. */
2274 if (NOTE_P (insn) && NOTE_KIND (insn) == NOTE_INSN_CFI)
2276 bool required = cfi_label_required_p (NOTE_CFI (insn));
2277 while (next && NOTE_P (next) && NOTE_KIND (next) == NOTE_INSN_CFI)
2279 required |= cfi_label_required_p (NOTE_CFI (next));
2280 next = NEXT_INSN (next);
2284 int num = dwarf2out_cfi_label_num;
2285 const char *label = dwarf2out_cfi_label ();
2289 /* Set the location counter to the new label. */
2291 xcfi->dw_cfi_opc = (first ? DW_CFA_set_loc
2292 : DW_CFA_advance_loc4);
2293 xcfi->dw_cfi_oprnd1.dw_cfi_addr = label;
2294 VEC_safe_push (dw_cfi_ref, gc, fde->dw_fde_cfi, xcfi);
2296 tmp = emit_note_before (NOTE_INSN_CFI_LABEL, insn);
2297 NOTE_LABEL_NUMBER (tmp) = num;
2302 VEC_safe_push (dw_cfi_ref, gc, fde->dw_fde_cfi, NOTE_CFI (insn));
2303 insn = NEXT_INSN (insn);
2305 while (insn != next);
2311 /* Scan the function and create the initial set of CFI notes. */
2314 create_cfi_notes (void)
2318 for (insn = get_insns (); insn ; insn = NEXT_INSN (insn))
2322 cfi_insn = PREV_INSN (insn);
2324 if (BARRIER_P (insn))
2326 dwarf2out_frame_debug (insn, false);
2332 switch (NOTE_KIND (insn))
2334 case NOTE_INSN_PROLOGUE_END:
2335 dwarf2out_flush_queued_reg_saves ();
2338 case NOTE_INSN_EPILOGUE_BEG:
2339 #if defined(HAVE_epilogue)
2340 dwarf2out_cfi_begin_epilogue (insn);
2344 case NOTE_INSN_CFA_RESTORE_STATE:
2346 dwarf2out_frame_debug_restore_state ();
2352 if (!NONDEBUG_INSN_P (insn))
2355 pat = PATTERN (insn);
2356 if (asm_noperands (pat) >= 0)
2358 dwarf2out_frame_debug (insn, false);
2362 if (GET_CODE (pat) == SEQUENCE)
2364 int i, n = XVECLEN (pat, 0);
2365 for (i = 1; i < n; ++i)
2366 dwarf2out_frame_debug (XVECEXP (pat, 0, i), false);
2370 || find_reg_note (insn, REG_CFA_FLUSH_QUEUE, NULL))
2371 dwarf2out_frame_debug (insn, false);
2373 /* Do not separate tablejump insns from their ADDR_DIFF_VEC.
2374 Putting the note after the VEC should be ok. */
2375 if (!tablejump_p (insn, NULL, &cfi_insn))
2378 dwarf2out_frame_debug (insn, true);
2384 /* Determine if we need to save and restore CFI information around this
2385 epilogue. If SIBCALL is true, then this is a sibcall epilogue. If
2386 we do need to save/restore, then emit the save now, and insert a
2387 NOTE_INSN_CFA_RESTORE_STATE at the appropriate place in the stream. */
2390 dwarf2out_cfi_begin_epilogue (rtx insn)
2392 bool saw_frp = false;
2395 /* Scan forward to the return insn, noticing if there are possible
2396 frame related insns. */
2397 for (i = NEXT_INSN (insn); i ; i = NEXT_INSN (i))
2402 /* Look for both regular and sibcalls to end the block. */
2403 if (returnjump_p (i))
2405 if (CALL_P (i) && SIBLING_CALL_P (i))
2408 if (GET_CODE (PATTERN (i)) == SEQUENCE)
2411 rtx seq = PATTERN (i);
2413 if (returnjump_p (XVECEXP (seq, 0, 0)))
2415 if (CALL_P (XVECEXP (seq, 0, 0))
2416 && SIBLING_CALL_P (XVECEXP (seq, 0, 0)))
2419 for (idx = 0; idx < XVECLEN (seq, 0); idx++)
2420 if (RTX_FRAME_RELATED_P (XVECEXP (seq, 0, idx)))
2424 if (RTX_FRAME_RELATED_P (i))
2428 /* If the port doesn't emit epilogue unwind info, we don't need a
2429 save/restore pair. */
2433 /* Otherwise, search forward to see if the return insn was the last
2434 basic block of the function. If so, we don't need save/restore. */
2435 gcc_assert (i != NULL);
2436 i = next_real_insn (i);
2440 /* Insert the restore before that next real insn in the stream, and before
2441 a potential NOTE_INSN_EPILOGUE_BEG -- we do need these notes to be
2442 properly nested. This should be after any label or alignment. This
2443 will be pushed into the CFI stream by the function below. */
2446 rtx p = PREV_INSN (i);
2449 if (NOTE_KIND (p) == NOTE_INSN_BASIC_BLOCK)
2453 emit_note_before (NOTE_INSN_CFA_RESTORE_STATE, i);
2455 emit_cfa_remember = true;
2457 /* And emulate the state save. */
2458 gcc_assert (remember_row == NULL);
2459 remember_row = copy_cfi_row (cur_row);
2462 /* A "subroutine" of dwarf2out_cfi_begin_epilogue. Emit the restore
2466 dwarf2out_frame_debug_restore_state (void)
2468 dw_cfi_ref cfi = new_cfi ();
2470 cfi->dw_cfi_opc = DW_CFA_restore_state;
2473 gcc_assert (remember_row != NULL);
2474 free_cfi_row (cur_row);
2475 cur_row = remember_row;
2476 remember_row = NULL;
2479 /* Record the initial position of the return address. RTL is
2480 INCOMING_RETURN_ADDR_RTX. */
2483 initial_return_save (rtx rtl)
2485 unsigned int reg = INVALID_REGNUM;
2486 HOST_WIDE_INT offset = 0;
2488 switch (GET_CODE (rtl))
2491 /* RA is in a register. */
2492 reg = dwf_regno (rtl);
2496 /* RA is on the stack. */
2497 rtl = XEXP (rtl, 0);
2498 switch (GET_CODE (rtl))
2501 gcc_assert (REGNO (rtl) == STACK_POINTER_REGNUM);
2506 gcc_assert (REGNO (XEXP (rtl, 0)) == STACK_POINTER_REGNUM);
2507 offset = INTVAL (XEXP (rtl, 1));
2511 gcc_assert (REGNO (XEXP (rtl, 0)) == STACK_POINTER_REGNUM);
2512 offset = -INTVAL (XEXP (rtl, 1));
2522 /* The return address is at some offset from any value we can
2523 actually load. For instance, on the SPARC it is in %i7+8. Just
2524 ignore the offset for now; it doesn't matter for unwinding frames. */
2525 gcc_assert (CONST_INT_P (XEXP (rtl, 1)));
2526 initial_return_save (XEXP (rtl, 0));
2533 if (reg != DWARF_FRAME_RETURN_COLUMN)
2535 if (reg != INVALID_REGNUM)
2536 record_reg_saved_in_reg (rtl, pc_rtx);
2537 reg_save (DWARF_FRAME_RETURN_COLUMN, reg, offset - cur_row->cfa.offset);
2541 /* Annotate the function with NOTE_INSN_CFI notes to record the CFI
2542 state at each location within the function. These notes will be
2543 emitted during pass_final. */
2546 execute_dwarf2_frame (void)
2548 /* The first time we're called, compute the incoming frame state. */
2549 if (cie_cfi_vec == NULL)
2551 dw_cfa_location loc;
2553 dw_stack_pointer_regnum = DWARF_FRAME_REGNUM (STACK_POINTER_REGNUM);
2554 dw_frame_pointer_regnum = DWARF_FRAME_REGNUM (HARD_FRAME_POINTER_REGNUM);
2556 add_cfi_vec = &cie_cfi_vec;
2557 cie_cfi_row = cur_row = new_cfi_row ();
2559 /* On entry, the Canonical Frame Address is at SP. */
2560 memset(&loc, 0, sizeof (loc));
2561 loc.reg = dw_stack_pointer_regnum;
2562 loc.offset = INCOMING_FRAME_SP_OFFSET;
2565 if (targetm.debug_unwind_info () == UI_DWARF2
2566 || targetm_common.except_unwind_info (&global_options) == UI_DWARF2)
2568 initial_return_save (INCOMING_RETURN_ADDR_RTX);
2570 /* For a few targets, we have the return address incoming into a
2571 register, but choose a different return column. This will result
2572 in a DW_CFA_register for the return, and an entry in
2573 regs_saved_in_regs to match. If the target later stores that
2574 return address register to the stack, we want to be able to emit
2575 the DW_CFA_offset against the return column, not the intermediate
2576 save register. Save the contents of regs_saved_in_regs so that
2577 we can re-initialize it at the start of each function. */
2578 switch (VEC_length (reg_saved_in_data, regs_saved_in_regs))
2583 cie_return_save = ggc_alloc_reg_saved_in_data ();
2584 *cie_return_save = *VEC_index (reg_saved_in_data,
2585 regs_saved_in_regs, 0);
2586 regs_saved_in_regs = NULL;
2596 /* Set up state for generating call frame debug info. */
2597 gcc_checking_assert (queued_reg_saves == NULL);
2598 gcc_checking_assert (regs_saved_in_regs == NULL);
2600 cur_row = copy_cfi_row (cie_cfi_row);
2601 if (cie_return_save)
2602 VEC_safe_push (reg_saved_in_data, gc, regs_saved_in_regs, cie_return_save);
2604 cfa_store = cur_row->cfa;
2607 memset (&cfa_temp, 0, sizeof(cfa_temp));
2608 cfa_temp.reg = INVALID_REGNUM;
2610 dwarf2out_alloc_current_fde ();
2613 create_cfi_notes ();
2616 /* Reset all function-specific information, particularly for GC. */
2617 XDELETEVEC (barrier_args_size);
2618 barrier_args_size = NULL;
2619 regs_saved_in_regs = NULL;
2620 queued_reg_saves = NULL;
2622 free_cfi_row (cur_row);
2628 /* Convert a DWARF call frame info. operation to its string name */
2631 dwarf_cfi_name (unsigned int cfi_opc)
2635 case DW_CFA_advance_loc:
2636 return "DW_CFA_advance_loc";
2638 return "DW_CFA_offset";
2639 case DW_CFA_restore:
2640 return "DW_CFA_restore";
2642 return "DW_CFA_nop";
2643 case DW_CFA_set_loc:
2644 return "DW_CFA_set_loc";
2645 case DW_CFA_advance_loc1:
2646 return "DW_CFA_advance_loc1";
2647 case DW_CFA_advance_loc2:
2648 return "DW_CFA_advance_loc2";
2649 case DW_CFA_advance_loc4:
2650 return "DW_CFA_advance_loc4";
2651 case DW_CFA_offset_extended:
2652 return "DW_CFA_offset_extended";
2653 case DW_CFA_restore_extended:
2654 return "DW_CFA_restore_extended";
2655 case DW_CFA_undefined:
2656 return "DW_CFA_undefined";
2657 case DW_CFA_same_value:
2658 return "DW_CFA_same_value";
2659 case DW_CFA_register:
2660 return "DW_CFA_register";
2661 case DW_CFA_remember_state:
2662 return "DW_CFA_remember_state";
2663 case DW_CFA_restore_state:
2664 return "DW_CFA_restore_state";
2665 case DW_CFA_def_cfa:
2666 return "DW_CFA_def_cfa";
2667 case DW_CFA_def_cfa_register:
2668 return "DW_CFA_def_cfa_register";
2669 case DW_CFA_def_cfa_offset:
2670 return "DW_CFA_def_cfa_offset";
2673 case DW_CFA_def_cfa_expression:
2674 return "DW_CFA_def_cfa_expression";
2675 case DW_CFA_expression:
2676 return "DW_CFA_expression";
2677 case DW_CFA_offset_extended_sf:
2678 return "DW_CFA_offset_extended_sf";
2679 case DW_CFA_def_cfa_sf:
2680 return "DW_CFA_def_cfa_sf";
2681 case DW_CFA_def_cfa_offset_sf:
2682 return "DW_CFA_def_cfa_offset_sf";
2684 /* SGI/MIPS specific */
2685 case DW_CFA_MIPS_advance_loc8:
2686 return "DW_CFA_MIPS_advance_loc8";
2688 /* GNU extensions */
2689 case DW_CFA_GNU_window_save:
2690 return "DW_CFA_GNU_window_save";
2691 case DW_CFA_GNU_args_size:
2692 return "DW_CFA_GNU_args_size";
2693 case DW_CFA_GNU_negative_offset_extended:
2694 return "DW_CFA_GNU_negative_offset_extended";
2697 return "DW_CFA_<unknown>";
2701 /* This routine will generate the correct assembly data for a location
2702 description based on a cfi entry with a complex address. */
2705 output_cfa_loc (dw_cfi_ref cfi, int for_eh)
2707 dw_loc_descr_ref loc;
2710 if (cfi->dw_cfi_opc == DW_CFA_expression)
2713 DWARF2_FRAME_REG_OUT (cfi->dw_cfi_oprnd1.dw_cfi_reg_num, for_eh);
2714 dw2_asm_output_data (1, r, NULL);
2715 loc = cfi->dw_cfi_oprnd2.dw_cfi_loc;
2718 loc = cfi->dw_cfi_oprnd1.dw_cfi_loc;
2720 /* Output the size of the block. */
2721 size = size_of_locs (loc);
2722 dw2_asm_output_data_uleb128 (size, NULL);
2724 /* Now output the operations themselves. */
2725 output_loc_sequence (loc, for_eh);
2728 /* Similar, but used for .cfi_escape. */
2731 output_cfa_loc_raw (dw_cfi_ref cfi)
2733 dw_loc_descr_ref loc;
2736 if (cfi->dw_cfi_opc == DW_CFA_expression)
2739 DWARF2_FRAME_REG_OUT (cfi->dw_cfi_oprnd1.dw_cfi_reg_num, 1);
2740 fprintf (asm_out_file, "%#x,", r);
2741 loc = cfi->dw_cfi_oprnd2.dw_cfi_loc;
2744 loc = cfi->dw_cfi_oprnd1.dw_cfi_loc;
2746 /* Output the size of the block. */
2747 size = size_of_locs (loc);
2748 dw2_asm_output_data_uleb128_raw (size);
2749 fputc (',', asm_out_file);
2751 /* Now output the operations themselves. */
2752 output_loc_sequence_raw (loc);
2755 /* Output a Call Frame Information opcode and its operand(s). */
2758 output_cfi (dw_cfi_ref cfi, dw_fde_ref fde, int for_eh)
2763 if (cfi->dw_cfi_opc == DW_CFA_advance_loc)
2764 dw2_asm_output_data (1, (cfi->dw_cfi_opc
2765 | (cfi->dw_cfi_oprnd1.dw_cfi_offset & 0x3f)),
2766 "DW_CFA_advance_loc " HOST_WIDE_INT_PRINT_HEX,
2767 ((unsigned HOST_WIDE_INT)
2768 cfi->dw_cfi_oprnd1.dw_cfi_offset));
2769 else if (cfi->dw_cfi_opc == DW_CFA_offset)
2771 r = DWARF2_FRAME_REG_OUT (cfi->dw_cfi_oprnd1.dw_cfi_reg_num, for_eh);
2772 dw2_asm_output_data (1, (cfi->dw_cfi_opc | (r & 0x3f)),
2773 "DW_CFA_offset, column %#lx", r);
2774 off = div_data_align (cfi->dw_cfi_oprnd2.dw_cfi_offset);
2775 dw2_asm_output_data_uleb128 (off, NULL);
2777 else if (cfi->dw_cfi_opc == DW_CFA_restore)
2779 r = DWARF2_FRAME_REG_OUT (cfi->dw_cfi_oprnd1.dw_cfi_reg_num, for_eh);
2780 dw2_asm_output_data (1, (cfi->dw_cfi_opc | (r & 0x3f)),
2781 "DW_CFA_restore, column %#lx", r);
2785 dw2_asm_output_data (1, cfi->dw_cfi_opc,
2786 "%s", dwarf_cfi_name (cfi->dw_cfi_opc));
2788 switch (cfi->dw_cfi_opc)
2790 case DW_CFA_set_loc:
2792 dw2_asm_output_encoded_addr_rtx (
2793 ASM_PREFERRED_EH_DATA_FORMAT (/*code=*/1, /*global=*/0),
2794 gen_rtx_SYMBOL_REF (Pmode, cfi->dw_cfi_oprnd1.dw_cfi_addr),
2797 dw2_asm_output_addr (DWARF2_ADDR_SIZE,
2798 cfi->dw_cfi_oprnd1.dw_cfi_addr, NULL);
2799 fde->dw_fde_current_label = cfi->dw_cfi_oprnd1.dw_cfi_addr;
2802 case DW_CFA_advance_loc1:
2803 dw2_asm_output_delta (1, cfi->dw_cfi_oprnd1.dw_cfi_addr,
2804 fde->dw_fde_current_label, NULL);
2805 fde->dw_fde_current_label = cfi->dw_cfi_oprnd1.dw_cfi_addr;
2808 case DW_CFA_advance_loc2:
2809 dw2_asm_output_delta (2, cfi->dw_cfi_oprnd1.dw_cfi_addr,
2810 fde->dw_fde_current_label, NULL);
2811 fde->dw_fde_current_label = cfi->dw_cfi_oprnd1.dw_cfi_addr;
2814 case DW_CFA_advance_loc4:
2815 dw2_asm_output_delta (4, cfi->dw_cfi_oprnd1.dw_cfi_addr,
2816 fde->dw_fde_current_label, NULL);
2817 fde->dw_fde_current_label = cfi->dw_cfi_oprnd1.dw_cfi_addr;
2820 case DW_CFA_MIPS_advance_loc8:
2821 dw2_asm_output_delta (8, cfi->dw_cfi_oprnd1.dw_cfi_addr,
2822 fde->dw_fde_current_label, NULL);
2823 fde->dw_fde_current_label = cfi->dw_cfi_oprnd1.dw_cfi_addr;
2826 case DW_CFA_offset_extended:
2827 r = DWARF2_FRAME_REG_OUT (cfi->dw_cfi_oprnd1.dw_cfi_reg_num, for_eh);
2828 dw2_asm_output_data_uleb128 (r, NULL);
2829 off = div_data_align (cfi->dw_cfi_oprnd2.dw_cfi_offset);
2830 dw2_asm_output_data_uleb128 (off, NULL);
2833 case DW_CFA_def_cfa:
2834 r = DWARF2_FRAME_REG_OUT (cfi->dw_cfi_oprnd1.dw_cfi_reg_num, for_eh);
2835 dw2_asm_output_data_uleb128 (r, NULL);
2836 dw2_asm_output_data_uleb128 (cfi->dw_cfi_oprnd2.dw_cfi_offset, NULL);
2839 case DW_CFA_offset_extended_sf:
2840 r = DWARF2_FRAME_REG_OUT (cfi->dw_cfi_oprnd1.dw_cfi_reg_num, for_eh);
2841 dw2_asm_output_data_uleb128 (r, NULL);
2842 off = div_data_align (cfi->dw_cfi_oprnd2.dw_cfi_offset);
2843 dw2_asm_output_data_sleb128 (off, NULL);
2846 case DW_CFA_def_cfa_sf:
2847 r = DWARF2_FRAME_REG_OUT (cfi->dw_cfi_oprnd1.dw_cfi_reg_num, for_eh);
2848 dw2_asm_output_data_uleb128 (r, NULL);
2849 off = div_data_align (cfi->dw_cfi_oprnd2.dw_cfi_offset);
2850 dw2_asm_output_data_sleb128 (off, NULL);
2853 case DW_CFA_restore_extended:
2854 case DW_CFA_undefined:
2855 case DW_CFA_same_value:
2856 case DW_CFA_def_cfa_register:
2857 r = DWARF2_FRAME_REG_OUT (cfi->dw_cfi_oprnd1.dw_cfi_reg_num, for_eh);
2858 dw2_asm_output_data_uleb128 (r, NULL);
2861 case DW_CFA_register:
2862 r = DWARF2_FRAME_REG_OUT (cfi->dw_cfi_oprnd1.dw_cfi_reg_num, for_eh);
2863 dw2_asm_output_data_uleb128 (r, NULL);
2864 r = DWARF2_FRAME_REG_OUT (cfi->dw_cfi_oprnd2.dw_cfi_reg_num, for_eh);
2865 dw2_asm_output_data_uleb128 (r, NULL);
2868 case DW_CFA_def_cfa_offset:
2869 case DW_CFA_GNU_args_size:
2870 dw2_asm_output_data_uleb128 (cfi->dw_cfi_oprnd1.dw_cfi_offset, NULL);
2873 case DW_CFA_def_cfa_offset_sf:
2874 off = div_data_align (cfi->dw_cfi_oprnd1.dw_cfi_offset);
2875 dw2_asm_output_data_sleb128 (off, NULL);
2878 case DW_CFA_GNU_window_save:
2881 case DW_CFA_def_cfa_expression:
2882 case DW_CFA_expression:
2883 output_cfa_loc (cfi, for_eh);
2886 case DW_CFA_GNU_negative_offset_extended:
2887 /* Obsoleted by DW_CFA_offset_extended_sf. */
2896 /* Similar, but do it via assembler directives instead. */
2899 output_cfi_directive (FILE *f, dw_cfi_ref cfi)
2901 unsigned long r, r2;
2903 switch (cfi->dw_cfi_opc)
2905 case DW_CFA_advance_loc:
2906 case DW_CFA_advance_loc1:
2907 case DW_CFA_advance_loc2:
2908 case DW_CFA_advance_loc4:
2909 case DW_CFA_MIPS_advance_loc8:
2910 case DW_CFA_set_loc:
2911 /* Should only be created in a code path not followed when emitting
2912 via directives. The assembler is going to take care of this for
2913 us. But this routines is also used for debugging dumps, so
2915 gcc_assert (f != asm_out_file);
2916 fprintf (f, "\t.cfi_advance_loc\n");
2920 case DW_CFA_offset_extended:
2921 case DW_CFA_offset_extended_sf:
2922 r = DWARF2_FRAME_REG_OUT (cfi->dw_cfi_oprnd1.dw_cfi_reg_num, 1);
2923 fprintf (f, "\t.cfi_offset %lu, "HOST_WIDE_INT_PRINT_DEC"\n",
2924 r, cfi->dw_cfi_oprnd2.dw_cfi_offset);
2927 case DW_CFA_restore:
2928 case DW_CFA_restore_extended:
2929 r = DWARF2_FRAME_REG_OUT (cfi->dw_cfi_oprnd1.dw_cfi_reg_num, 1);
2930 fprintf (f, "\t.cfi_restore %lu\n", r);
2933 case DW_CFA_undefined:
2934 r = DWARF2_FRAME_REG_OUT (cfi->dw_cfi_oprnd1.dw_cfi_reg_num, 1);
2935 fprintf (f, "\t.cfi_undefined %lu\n", r);
2938 case DW_CFA_same_value:
2939 r = DWARF2_FRAME_REG_OUT (cfi->dw_cfi_oprnd1.dw_cfi_reg_num, 1);
2940 fprintf (f, "\t.cfi_same_value %lu\n", r);
2943 case DW_CFA_def_cfa:
2944 case DW_CFA_def_cfa_sf:
2945 r = DWARF2_FRAME_REG_OUT (cfi->dw_cfi_oprnd1.dw_cfi_reg_num, 1);
2946 fprintf (f, "\t.cfi_def_cfa %lu, "HOST_WIDE_INT_PRINT_DEC"\n",
2947 r, cfi->dw_cfi_oprnd2.dw_cfi_offset);
2950 case DW_CFA_def_cfa_register:
2951 r = DWARF2_FRAME_REG_OUT (cfi->dw_cfi_oprnd1.dw_cfi_reg_num, 1);
2952 fprintf (f, "\t.cfi_def_cfa_register %lu\n", r);
2955 case DW_CFA_register:
2956 r = DWARF2_FRAME_REG_OUT (cfi->dw_cfi_oprnd1.dw_cfi_reg_num, 1);
2957 r2 = DWARF2_FRAME_REG_OUT (cfi->dw_cfi_oprnd2.dw_cfi_reg_num, 1);
2958 fprintf (f, "\t.cfi_register %lu, %lu\n", r, r2);
2961 case DW_CFA_def_cfa_offset:
2962 case DW_CFA_def_cfa_offset_sf:
2963 fprintf (f, "\t.cfi_def_cfa_offset "
2964 HOST_WIDE_INT_PRINT_DEC"\n",
2965 cfi->dw_cfi_oprnd1.dw_cfi_offset);
2968 case DW_CFA_remember_state:
2969 fprintf (f, "\t.cfi_remember_state\n");
2971 case DW_CFA_restore_state:
2972 fprintf (f, "\t.cfi_restore_state\n");
2975 case DW_CFA_GNU_args_size:
2976 if (f == asm_out_file)
2978 fprintf (f, "\t.cfi_escape %#x,", DW_CFA_GNU_args_size);
2979 dw2_asm_output_data_uleb128_raw (cfi->dw_cfi_oprnd1.dw_cfi_offset);
2981 fprintf (f, "\t%s args_size "HOST_WIDE_INT_PRINT_DEC,
2982 ASM_COMMENT_START, cfi->dw_cfi_oprnd1.dw_cfi_offset);
2987 fprintf (f, "\t.cfi_GNU_args_size "HOST_WIDE_INT_PRINT_DEC "\n",
2988 cfi->dw_cfi_oprnd1.dw_cfi_offset);
2992 case DW_CFA_GNU_window_save:
2993 fprintf (f, "\t.cfi_window_save\n");
2996 case DW_CFA_def_cfa_expression:
2997 if (f != asm_out_file)
2999 fprintf (f, "\t.cfi_def_cfa_expression ...\n");
3003 case DW_CFA_expression:
3004 if (f != asm_out_file)
3006 fprintf (f, "\t.cfi_cfa_expression ...\n");
3009 fprintf (f, "\t.cfi_escape %#x,", cfi->dw_cfi_opc);
3010 output_cfa_loc_raw (cfi);
3020 dwarf2out_emit_cfi (dw_cfi_ref cfi)
3022 if (dwarf2out_do_cfi_asm ())
3023 output_cfi_directive (asm_out_file, cfi);
3026 /* Output CFIs from VEC, up to index UPTO, to bring current FDE to the
3027 same state as after executing CFIs in CFI chain. DO_CFI_ASM is
3028 true if .cfi_* directives shall be emitted, false otherwise. If it
3029 is false, FDE and FOR_EH are the other arguments to pass to
3033 output_cfis (cfi_vec vec, int upto, bool do_cfi_asm,
3034 dw_fde_ref fde, bool for_eh)
3037 struct dw_cfi_struct cfi_buf;
3039 dw_cfi_ref cfi_args_size = NULL, cfi_cfa = NULL, cfi_cfa_offset = NULL;
3040 VEC(dw_cfi_ref, heap) *regs = VEC_alloc (dw_cfi_ref, heap, 32);
3041 unsigned int len, idx;
3043 for (ix = 0; ix < upto + 1; ix++)
3045 dw_cfi_ref cfi = ix < upto ? VEC_index (dw_cfi_ref, vec, ix) : NULL;
3046 switch (cfi ? cfi->dw_cfi_opc : DW_CFA_nop)
3048 case DW_CFA_advance_loc:
3049 case DW_CFA_advance_loc1:
3050 case DW_CFA_advance_loc2:
3051 case DW_CFA_advance_loc4:
3052 case DW_CFA_MIPS_advance_loc8:
3053 case DW_CFA_set_loc:
3054 /* All advances should be ignored. */
3056 case DW_CFA_remember_state:
3058 dw_cfi_ref args_size = cfi_args_size;
3060 /* Skip everything between .cfi_remember_state and
3061 .cfi_restore_state. */
3066 for (; ix < upto; ix++)
3068 cfi2 = VEC_index (dw_cfi_ref, vec, ix);
3069 if (cfi2->dw_cfi_opc == DW_CFA_restore_state)
3071 else if (cfi2->dw_cfi_opc == DW_CFA_GNU_args_size)
3074 gcc_assert (cfi2->dw_cfi_opc != DW_CFA_remember_state);
3077 cfi_args_size = args_size;
3080 case DW_CFA_GNU_args_size:
3081 cfi_args_size = cfi;
3083 case DW_CFA_GNU_window_save:
3086 case DW_CFA_offset_extended:
3087 case DW_CFA_offset_extended_sf:
3088 case DW_CFA_restore:
3089 case DW_CFA_restore_extended:
3090 case DW_CFA_undefined:
3091 case DW_CFA_same_value:
3092 case DW_CFA_register:
3093 case DW_CFA_val_offset:
3094 case DW_CFA_val_offset_sf:
3095 case DW_CFA_expression:
3096 case DW_CFA_val_expression:
3097 case DW_CFA_GNU_negative_offset_extended:
3098 if (VEC_length (dw_cfi_ref, regs)
3099 <= cfi->dw_cfi_oprnd1.dw_cfi_reg_num)
3100 VEC_safe_grow_cleared (dw_cfi_ref, heap, regs,
3101 cfi->dw_cfi_oprnd1.dw_cfi_reg_num + 1);
3102 VEC_replace (dw_cfi_ref, regs, cfi->dw_cfi_oprnd1.dw_cfi_reg_num,
3105 case DW_CFA_def_cfa:
3106 case DW_CFA_def_cfa_sf:
3107 case DW_CFA_def_cfa_expression:
3109 cfi_cfa_offset = cfi;
3111 case DW_CFA_def_cfa_register:
3114 case DW_CFA_def_cfa_offset:
3115 case DW_CFA_def_cfa_offset_sf:
3116 cfi_cfa_offset = cfi;
3119 gcc_assert (cfi == NULL);
3121 len = VEC_length (dw_cfi_ref, regs);
3122 for (idx = 0; idx < len; idx++)
3124 cfi2 = VEC_replace (dw_cfi_ref, regs, idx, NULL);
3126 && cfi2->dw_cfi_opc != DW_CFA_restore
3127 && cfi2->dw_cfi_opc != DW_CFA_restore_extended)
3130 output_cfi_directive (asm_out_file, cfi2);
3132 output_cfi (cfi2, fde, for_eh);
3135 if (cfi_cfa && cfi_cfa_offset && cfi_cfa_offset != cfi_cfa)
3137 gcc_assert (cfi_cfa->dw_cfi_opc != DW_CFA_def_cfa_expression);
3139 switch (cfi_cfa_offset->dw_cfi_opc)
3141 case DW_CFA_def_cfa_offset:
3142 cfi_buf.dw_cfi_opc = DW_CFA_def_cfa;
3143 cfi_buf.dw_cfi_oprnd2 = cfi_cfa_offset->dw_cfi_oprnd1;
3145 case DW_CFA_def_cfa_offset_sf:
3146 cfi_buf.dw_cfi_opc = DW_CFA_def_cfa_sf;
3147 cfi_buf.dw_cfi_oprnd2 = cfi_cfa_offset->dw_cfi_oprnd1;
3149 case DW_CFA_def_cfa:
3150 case DW_CFA_def_cfa_sf:
3151 cfi_buf.dw_cfi_opc = cfi_cfa_offset->dw_cfi_opc;
3152 cfi_buf.dw_cfi_oprnd2 = cfi_cfa_offset->dw_cfi_oprnd2;
3159 else if (cfi_cfa_offset)
3160 cfi_cfa = cfi_cfa_offset;
3164 output_cfi_directive (asm_out_file, cfi_cfa);
3166 output_cfi (cfi_cfa, fde, for_eh);
3169 cfi_cfa_offset = NULL;
3171 && cfi_args_size->dw_cfi_oprnd1.dw_cfi_offset)
3174 output_cfi_directive (asm_out_file, cfi_args_size);
3176 output_cfi (cfi_args_size, fde, for_eh);
3178 cfi_args_size = NULL;
3181 VEC_free (dw_cfi_ref, heap, regs);
3184 else if (do_cfi_asm)
3185 output_cfi_directive (asm_out_file, cfi);
3187 output_cfi (cfi, fde, for_eh);
3196 /* Save the result of dwarf2out_do_frame across PCH.
3197 This variable is tri-state, with 0 unset, >0 true, <0 false. */
3198 static GTY(()) signed char saved_do_cfi_asm = 0;
3200 /* Decide whether we want to emit frame unwind information for the current
3201 translation unit. */
3204 dwarf2out_do_frame (void)
3206 /* We want to emit correct CFA location expressions or lists, so we
3207 have to return true if we're going to output debug info, even if
3208 we're not going to output frame or unwind info. */
3209 if (write_symbols == DWARF2_DEBUG || write_symbols == VMS_AND_DWARF2_DEBUG)
3212 if (saved_do_cfi_asm > 0)
3215 if (targetm.debug_unwind_info () == UI_DWARF2)
3218 if ((flag_unwind_tables || flag_exceptions)
3219 && targetm_common.except_unwind_info (&global_options) == UI_DWARF2)
3225 /* Decide whether to emit frame unwind via assembler directives. */
3228 dwarf2out_do_cfi_asm (void)
3232 #ifdef MIPS_DEBUGGING_INFO
3236 if (saved_do_cfi_asm != 0)
3237 return saved_do_cfi_asm > 0;
3239 /* Assume failure for a moment. */
3240 saved_do_cfi_asm = -1;
3242 if (!flag_dwarf2_cfi_asm || !dwarf2out_do_frame ())
3244 if (!HAVE_GAS_CFI_PERSONALITY_DIRECTIVE)
3247 /* Make sure the personality encoding is one the assembler can support.
3248 In particular, aligned addresses can't be handled. */
3249 enc = ASM_PREFERRED_EH_DATA_FORMAT (/*code=*/2,/*global=*/1);
3250 if ((enc & 0x70) != 0 && (enc & 0x70) != DW_EH_PE_pcrel)
3252 enc = ASM_PREFERRED_EH_DATA_FORMAT (/*code=*/0,/*global=*/0);
3253 if ((enc & 0x70) != 0 && (enc & 0x70) != DW_EH_PE_pcrel)
3256 /* If we can't get the assembler to emit only .debug_frame, and we don't need
3257 dwarf2 unwind info for exceptions, then emit .debug_frame by hand. */
3258 if (!HAVE_GAS_CFI_SECTIONS_DIRECTIVE
3259 && !flag_unwind_tables && !flag_exceptions
3260 && targetm_common.except_unwind_info (&global_options) != UI_DWARF2)
3264 saved_do_cfi_asm = 1;
3269 gate_dwarf2_frame (void)
3271 #ifndef HAVE_prologue
3272 /* Targets which still implement the prologue in assembler text
3273 cannot use the generic dwarf2 unwinding. */
3277 /* ??? What to do for UI_TARGET unwinding? They might be able to benefit
3278 from the optimized shrink-wrapping annotations that we will compute.
3279 For now, only produce the CFI notes for dwarf2. */
3280 return dwarf2out_do_frame ();
3283 struct rtl_opt_pass pass_dwarf2_frame =
3287 "dwarf2", /* name */
3288 gate_dwarf2_frame, /* gate */
3289 execute_dwarf2_frame, /* execute */
3292 0, /* static_pass_number */
3293 TV_FINAL, /* tv_id */
3294 0, /* properties_required */
3295 0, /* properties_provided */
3296 0, /* properties_destroyed */
3297 0, /* todo_flags_start */
3298 0 /* todo_flags_finish */
3302 #include "gt-dwarf2cfi.h"