1 /* Dwarf2 Call Frame Information helper routines.
2 Copyright (C) 1992, 1993, 1995, 1996, 1997, 1998, 1999, 2000, 2001, 2002,
3 2003, 2004, 2005, 2006, 2007, 2008, 2009, 2010, 2011
4 Free Software Foundation, Inc.
6 This file is part of GCC.
8 GCC is free software; you can redistribute it and/or modify it under
9 the terms of the GNU General Public License as published by the Free
10 Software Foundation; either version 3, or (at your option) any later
13 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
14 WARRANTY; without even the implied warranty of MERCHANTABILITY or
15 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
18 You should have received a copy of the GNU General Public License
19 along with GCC; see the file COPYING3. If not see
20 <http://www.gnu.org/licenses/>. */
24 #include "coretypes.h"
31 #include "dwarf2out.h"
32 #include "dwarf2asm.h"
36 #include "common/common-target.h"
37 #include "tree-pass.h"
39 #include "except.h" /* expand_builtin_dwarf_sp_column */
40 #include "expr.h" /* init_return_column_size */
41 #include "regs.h" /* expand_builtin_init_dwarf_reg_sizes */
42 #include "output.h" /* asm_out_file */
43 #include "debug.h" /* dwarf2out_do_frame, dwarf2out_do_cfi_asm */
46 /* ??? Poison these here until it can be done generically. They've been
47 totally replaced in this file; make sure it stays that way. */
48 #undef DWARF2_UNWIND_INFO
49 #undef DWARF2_FRAME_INFO
50 #if (GCC_VERSION >= 3000)
51 #pragma GCC poison DWARF2_UNWIND_INFO DWARF2_FRAME_INFO
54 #ifndef INCOMING_RETURN_ADDR_RTX
55 #define INCOMING_RETURN_ADDR_RTX (gcc_unreachable (), NULL_RTX)
58 /* The size of the target's pointer type. */
60 #define PTR_SIZE (POINTER_SIZE / BITS_PER_UNIT)
63 /* Maximum size (in bytes) of an artificially generated label. */
64 #define MAX_ARTIFICIAL_LABEL_BYTES 30
66 /* The size of addresses as they appear in the Dwarf 2 data.
67 Some architectures use word addresses to refer to code locations,
68 but Dwarf 2 info always uses byte addresses. On such machines,
69 Dwarf 2 addresses need to be larger than the architecture's
71 #ifndef DWARF2_ADDR_SIZE
72 #define DWARF2_ADDR_SIZE (POINTER_SIZE / BITS_PER_UNIT)
75 /* The size in bytes of a DWARF field indicating an offset or length
76 relative to a debug info section, specified to be 4 bytes in the
77 DWARF-2 specification. The SGI/MIPS ABI defines it to be the same
80 #ifndef DWARF_OFFSET_SIZE
81 #define DWARF_OFFSET_SIZE 4
84 /* According to the (draft) DWARF 3 specification, the initial length
85 should either be 4 or 12 bytes. When it's 12 bytes, the first 4
86 bytes are 0xffffffff, followed by the length stored in the next 8
89 However, the SGI/MIPS ABI uses an initial length which is equal to
90 DWARF_OFFSET_SIZE. It is defined (elsewhere) accordingly. */
92 #ifndef DWARF_INITIAL_LENGTH_SIZE
93 #define DWARF_INITIAL_LENGTH_SIZE (DWARF_OFFSET_SIZE == 4 ? 4 : 12)
96 /* Round SIZE up to the nearest BOUNDARY. */
97 #define DWARF_ROUND(SIZE,BOUNDARY) \
98 ((((SIZE) + (BOUNDARY) - 1) / (BOUNDARY)) * (BOUNDARY))
100 /* Offsets recorded in opcodes are a multiple of this alignment factor. */
101 #ifndef DWARF_CIE_DATA_ALIGNMENT
102 #ifdef STACK_GROWS_DOWNWARD
103 #define DWARF_CIE_DATA_ALIGNMENT (-((int) UNITS_PER_WORD))
105 #define DWARF_CIE_DATA_ALIGNMENT ((int) UNITS_PER_WORD)
109 /* CIE identifier. */
110 #if HOST_BITS_PER_WIDE_INT >= 64
111 #define DWARF_CIE_ID \
112 (unsigned HOST_WIDE_INT) (DWARF_OFFSET_SIZE == 4 ? DW_CIE_ID : DW64_CIE_ID)
114 #define DWARF_CIE_ID DW_CIE_ID
117 /* The DWARF 2 CFA column which tracks the return address. Normally this
118 is the column for PC, or the first column after all of the hard
120 #ifndef DWARF_FRAME_RETURN_COLUMN
122 #define DWARF_FRAME_RETURN_COLUMN DWARF_FRAME_REGNUM (PC_REGNUM)
124 #define DWARF_FRAME_RETURN_COLUMN DWARF_FRAME_REGISTERS
128 /* The mapping from gcc register number to DWARF 2 CFA column number. By
129 default, we just provide columns for all registers. */
130 #ifndef DWARF_FRAME_REGNUM
131 #define DWARF_FRAME_REGNUM(REG) DBX_REGISTER_NUMBER (REG)
134 /* Map register numbers held in the call frame info that gcc has
135 collected using DWARF_FRAME_REGNUM to those that should be output in
136 .debug_frame and .eh_frame. */
137 #ifndef DWARF2_FRAME_REG_OUT
138 #define DWARF2_FRAME_REG_OUT(REGNO, FOR_EH) (REGNO)
141 /* A vector of call frame insns for the CIE. */
144 static GTY(()) unsigned long dwarf2out_cfi_label_num;
146 /* The insn after which a new CFI note should be emitted. */
149 /* True if remember_state should be emitted before following CFI directive. */
150 static bool emit_cfa_remember;
152 /* True if any CFI directives were emitted at the current insn. */
153 static bool any_cfis_emitted;
156 static void dwarf2out_cfi_begin_epilogue (rtx insn);
157 static void dwarf2out_frame_debug_restore_state (void);
160 /* Hook used by __throw. */
163 expand_builtin_dwarf_sp_column (void)
165 unsigned int dwarf_regnum = DWARF_FRAME_REGNUM (STACK_POINTER_REGNUM);
166 return GEN_INT (DWARF2_FRAME_REG_OUT (dwarf_regnum, 1));
169 /* MEM is a memory reference for the register size table, each element of
170 which has mode MODE. Initialize column C as a return address column. */
173 init_return_column_size (enum machine_mode mode, rtx mem, unsigned int c)
175 HOST_WIDE_INT offset = c * GET_MODE_SIZE (mode);
176 HOST_WIDE_INT size = GET_MODE_SIZE (Pmode);
177 emit_move_insn (adjust_address (mem, mode, offset), GEN_INT (size));
180 /* Generate code to initialize the register size table. */
183 expand_builtin_init_dwarf_reg_sizes (tree address)
186 enum machine_mode mode = TYPE_MODE (char_type_node);
187 rtx addr = expand_normal (address);
188 rtx mem = gen_rtx_MEM (BLKmode, addr);
189 bool wrote_return_column = false;
191 for (i = 0; i < FIRST_PSEUDO_REGISTER; i++)
193 int rnum = DWARF2_FRAME_REG_OUT (DWARF_FRAME_REGNUM (i), 1);
195 if (rnum < DWARF_FRAME_REGISTERS)
197 HOST_WIDE_INT offset = rnum * GET_MODE_SIZE (mode);
198 enum machine_mode save_mode = reg_raw_mode[i];
201 if (HARD_REGNO_CALL_PART_CLOBBERED (i, save_mode))
202 save_mode = choose_hard_reg_mode (i, 1, true);
203 if (DWARF_FRAME_REGNUM (i) == DWARF_FRAME_RETURN_COLUMN)
205 if (save_mode == VOIDmode)
207 wrote_return_column = true;
209 size = GET_MODE_SIZE (save_mode);
213 emit_move_insn (adjust_address (mem, mode, offset),
214 gen_int_mode (size, mode));
218 if (!wrote_return_column)
219 init_return_column_size (mode, mem, DWARF_FRAME_RETURN_COLUMN);
221 #ifdef DWARF_ALT_FRAME_RETURN_COLUMN
222 init_return_column_size (mode, mem, DWARF_ALT_FRAME_RETURN_COLUMN);
225 targetm.init_dwarf_reg_sizes_extra (address);
228 /* Divide OFF by DWARF_CIE_DATA_ALIGNMENT, asserting no remainder. */
230 static inline HOST_WIDE_INT
231 div_data_align (HOST_WIDE_INT off)
233 HOST_WIDE_INT r = off / DWARF_CIE_DATA_ALIGNMENT;
234 gcc_assert (r * DWARF_CIE_DATA_ALIGNMENT == off);
238 /* Return true if we need a signed version of a given opcode
239 (e.g. DW_CFA_offset_extended_sf vs DW_CFA_offset_extended). */
242 need_data_align_sf_opcode (HOST_WIDE_INT off)
244 return DWARF_CIE_DATA_ALIGNMENT < 0 ? off > 0 : off < 0;
247 /* Return a pointer to a newly allocated Call Frame Instruction. */
249 static inline dw_cfi_ref
252 dw_cfi_ref cfi = ggc_alloc_dw_cfi_node ();
254 cfi->dw_cfi_oprnd1.dw_cfi_reg_num = 0;
255 cfi->dw_cfi_oprnd2.dw_cfi_reg_num = 0;
260 /* Generate a new label for the CFI info to refer to. */
263 dwarf2out_cfi_label (void)
265 int num = dwarf2out_cfi_label_num++;
268 ASM_GENERATE_INTERNAL_LABEL (label, "LCFI", num);
270 return xstrdup (label);
273 /* Add CFI to the current fde. */
276 add_fde_cfi (dw_cfi_ref cfi)
278 if (emit_cfa_remember)
280 dw_cfi_ref cfi_remember;
282 /* Emit the state save. */
283 emit_cfa_remember = false;
284 cfi_remember = new_cfi ();
285 cfi_remember->dw_cfi_opc = DW_CFA_remember_state;
286 add_fde_cfi (cfi_remember);
289 any_cfis_emitted = true;
290 if (cfi_insn != NULL)
292 cfi_insn = emit_note_after (NOTE_INSN_CFI, cfi_insn);
293 NOTE_CFI (cfi_insn) = cfi;
297 dw_fde_ref fde = cfun->fde;
298 VEC_safe_push (dw_cfi_ref, gc, fde->dw_fde_cfi, cfi);
299 dwarf2out_emit_cfi (cfi);
304 add_cie_cfi (dw_cfi_ref cfi)
306 VEC_safe_push (dw_cfi_ref, gc, cie_cfi_vec, cfi);
309 /* This function fills in aa dw_cfa_location structure from a dwarf location
310 descriptor sequence. */
313 get_cfa_from_loc_descr (dw_cfa_location *cfa, struct dw_loc_descr_struct *loc)
315 struct dw_loc_descr_struct *ptr;
317 cfa->base_offset = 0;
321 for (ptr = loc; ptr != NULL; ptr = ptr->dw_loc_next)
323 enum dwarf_location_atom op = ptr->dw_loc_opc;
359 cfa->reg = op - DW_OP_reg0;
362 cfa->reg = ptr->dw_loc_oprnd1.v.val_int;
396 cfa->reg = op - DW_OP_breg0;
397 cfa->base_offset = ptr->dw_loc_oprnd1.v.val_int;
400 cfa->reg = ptr->dw_loc_oprnd1.v.val_int;
401 cfa->base_offset = ptr->dw_loc_oprnd2.v.val_int;
406 case DW_OP_plus_uconst:
407 cfa->offset = ptr->dw_loc_oprnd1.v.val_unsigned;
415 /* Subroutine of lookup_cfa. */
418 lookup_cfa_1 (dw_cfi_ref cfi, dw_cfa_location *loc, dw_cfa_location *remember)
420 switch (cfi->dw_cfi_opc)
422 case DW_CFA_def_cfa_offset:
423 case DW_CFA_def_cfa_offset_sf:
424 loc->offset = cfi->dw_cfi_oprnd1.dw_cfi_offset;
426 case DW_CFA_def_cfa_register:
427 loc->reg = cfi->dw_cfi_oprnd1.dw_cfi_reg_num;
430 case DW_CFA_def_cfa_sf:
431 loc->reg = cfi->dw_cfi_oprnd1.dw_cfi_reg_num;
432 loc->offset = cfi->dw_cfi_oprnd2.dw_cfi_offset;
434 case DW_CFA_def_cfa_expression:
435 get_cfa_from_loc_descr (loc, cfi->dw_cfi_oprnd1.dw_cfi_loc);
438 case DW_CFA_remember_state:
439 gcc_assert (!remember->in_use);
441 remember->in_use = 1;
443 case DW_CFA_restore_state:
444 gcc_assert (remember->in_use);
446 remember->in_use = 0;
454 /* Find the previous value for the CFA. */
457 lookup_cfa (dw_cfa_location *loc)
462 dw_cfa_location remember;
464 memset (loc, 0, sizeof (*loc));
465 loc->reg = INVALID_REGNUM;
468 FOR_EACH_VEC_ELT (dw_cfi_ref, cie_cfi_vec, ix, cfi)
469 lookup_cfa_1 (cfi, loc, &remember);
473 FOR_EACH_VEC_ELT (dw_cfi_ref, fde->dw_fde_cfi, ix, cfi)
474 lookup_cfa_1 (cfi, loc, &remember);
477 /* The current rule for calculating the DWARF2 canonical frame address. */
478 static dw_cfa_location cfa;
480 /* A copy of the CFA, for comparison purposes. */
481 static dw_cfa_location old_cfa;
483 /* The register used for saving registers to the stack, and its offset
485 static dw_cfa_location cfa_store;
487 /* The current save location around an epilogue. */
488 static dw_cfa_location cfa_remember;
490 /* Like cfa_remember, but a copy of old_cfa. */
491 static dw_cfa_location old_cfa_remember;
493 /* The running total of the size of arguments pushed onto the stack. */
494 static HOST_WIDE_INT args_size;
496 /* The last args_size we actually output. */
497 static HOST_WIDE_INT old_args_size;
499 /* Determine if two dw_cfa_location structures define the same data. */
502 cfa_equal_p (const dw_cfa_location *loc1, const dw_cfa_location *loc2)
504 return (loc1->reg == loc2->reg
505 && loc1->offset == loc2->offset
506 && loc1->indirect == loc2->indirect
507 && (loc1->indirect == 0
508 || loc1->base_offset == loc2->base_offset));
511 /* This routine does the actual work. The CFA is now calculated from
512 the dw_cfa_location structure. */
515 def_cfa_1 (bool for_cie, dw_cfa_location *loc_p)
523 if (cfa_store.reg == loc.reg && loc.indirect == 0)
524 cfa_store.offset = loc.offset;
526 loc.reg = DWARF_FRAME_REGNUM (loc.reg);
528 /* If nothing changed, no need to issue any call frame instructions. */
529 if (cfa_equal_p (&loc, &old_cfa))
534 if (loc.reg == old_cfa.reg && !loc.indirect && !old_cfa.indirect)
536 /* Construct a "DW_CFA_def_cfa_offset <offset>" instruction, indicating
537 the CFA register did not change but the offset did. The data
538 factoring for DW_CFA_def_cfa_offset_sf happens in output_cfi, or
539 in the assembler via the .cfi_def_cfa_offset directive. */
541 cfi->dw_cfi_opc = DW_CFA_def_cfa_offset_sf;
543 cfi->dw_cfi_opc = DW_CFA_def_cfa_offset;
544 cfi->dw_cfi_oprnd1.dw_cfi_offset = loc.offset;
547 #ifndef MIPS_DEBUGGING_INFO /* SGI dbx thinks this means no offset. */
548 else if (loc.offset == old_cfa.offset
549 && old_cfa.reg != INVALID_REGNUM
551 && !old_cfa.indirect)
553 /* Construct a "DW_CFA_def_cfa_register <register>" instruction,
554 indicating the CFA register has changed to <register> but the
555 offset has not changed. */
556 cfi->dw_cfi_opc = DW_CFA_def_cfa_register;
557 cfi->dw_cfi_oprnd1.dw_cfi_reg_num = loc.reg;
561 else if (loc.indirect == 0)
563 /* Construct a "DW_CFA_def_cfa <register> <offset>" instruction,
564 indicating the CFA register has changed to <register> with
565 the specified offset. The data factoring for DW_CFA_def_cfa_sf
566 happens in output_cfi, or in the assembler via the .cfi_def_cfa
569 cfi->dw_cfi_opc = DW_CFA_def_cfa_sf;
571 cfi->dw_cfi_opc = DW_CFA_def_cfa;
572 cfi->dw_cfi_oprnd1.dw_cfi_reg_num = loc.reg;
573 cfi->dw_cfi_oprnd2.dw_cfi_offset = loc.offset;
577 /* Construct a DW_CFA_def_cfa_expression instruction to
578 calculate the CFA using a full location expression since no
579 register-offset pair is available. */
580 struct dw_loc_descr_struct *loc_list;
582 cfi->dw_cfi_opc = DW_CFA_def_cfa_expression;
583 loc_list = build_cfa_loc (&loc, 0);
584 cfi->dw_cfi_oprnd1.dw_cfi_loc = loc_list;
594 /* Add the CFI for saving a register. REG is the CFA column number.
595 If SREG is -1, the register is saved at OFFSET from the CFA;
596 otherwise it is saved in SREG. */
599 reg_save (bool for_cie, unsigned int reg, unsigned int sreg,
600 HOST_WIDE_INT offset)
602 dw_fde_ref fde = for_cie ? NULL : cfun->fde;
603 dw_cfi_ref cfi = new_cfi ();
605 cfi->dw_cfi_oprnd1.dw_cfi_reg_num = reg;
607 /* When stack is aligned, store REG using DW_CFA_expression with FP. */
609 && fde->stack_realign
610 && sreg == INVALID_REGNUM)
612 cfi->dw_cfi_opc = DW_CFA_expression;
613 cfi->dw_cfi_oprnd1.dw_cfi_reg_num = reg;
614 cfi->dw_cfi_oprnd2.dw_cfi_loc
615 = build_cfa_aligned_loc (&cfa, offset, fde->stack_realignment);
617 else if (sreg == INVALID_REGNUM)
619 if (need_data_align_sf_opcode (offset))
620 cfi->dw_cfi_opc = DW_CFA_offset_extended_sf;
621 else if (reg & ~0x3f)
622 cfi->dw_cfi_opc = DW_CFA_offset_extended;
624 cfi->dw_cfi_opc = DW_CFA_offset;
625 cfi->dw_cfi_oprnd2.dw_cfi_offset = offset;
627 else if (sreg == reg)
628 cfi->dw_cfi_opc = DW_CFA_same_value;
631 cfi->dw_cfi_opc = DW_CFA_register;
632 cfi->dw_cfi_oprnd2.dw_cfi_reg_num = sreg;
641 /* Record the initial position of the return address. RTL is
642 INCOMING_RETURN_ADDR_RTX. */
645 initial_return_save (rtx rtl)
647 unsigned int reg = INVALID_REGNUM;
648 HOST_WIDE_INT offset = 0;
650 switch (GET_CODE (rtl))
653 /* RA is in a register. */
654 reg = DWARF_FRAME_REGNUM (REGNO (rtl));
658 /* RA is on the stack. */
660 switch (GET_CODE (rtl))
663 gcc_assert (REGNO (rtl) == STACK_POINTER_REGNUM);
668 gcc_assert (REGNO (XEXP (rtl, 0)) == STACK_POINTER_REGNUM);
669 offset = INTVAL (XEXP (rtl, 1));
673 gcc_assert (REGNO (XEXP (rtl, 0)) == STACK_POINTER_REGNUM);
674 offset = -INTVAL (XEXP (rtl, 1));
684 /* The return address is at some offset from any value we can
685 actually load. For instance, on the SPARC it is in %i7+8. Just
686 ignore the offset for now; it doesn't matter for unwinding frames. */
687 gcc_assert (CONST_INT_P (XEXP (rtl, 1)));
688 initial_return_save (XEXP (rtl, 0));
695 if (reg != DWARF_FRAME_RETURN_COLUMN)
696 reg_save (true, DWARF_FRAME_RETURN_COLUMN, reg, offset - cfa.offset);
699 /* Given a SET, calculate the amount of stack adjustment it
703 stack_adjust_offset (const_rtx pattern, HOST_WIDE_INT cur_args_size,
704 HOST_WIDE_INT cur_offset)
706 const_rtx src = SET_SRC (pattern);
707 const_rtx dest = SET_DEST (pattern);
708 HOST_WIDE_INT offset = 0;
711 if (dest == stack_pointer_rtx)
713 code = GET_CODE (src);
715 /* Assume (set (reg sp) (reg whatever)) sets args_size
717 if (code == REG && src != stack_pointer_rtx)
719 offset = -cur_args_size;
720 #ifndef STACK_GROWS_DOWNWARD
723 return offset - cur_offset;
726 if (! (code == PLUS || code == MINUS)
727 || XEXP (src, 0) != stack_pointer_rtx
728 || !CONST_INT_P (XEXP (src, 1)))
731 /* (set (reg sp) (plus (reg sp) (const_int))) */
732 offset = INTVAL (XEXP (src, 1));
738 if (MEM_P (src) && !MEM_P (dest))
742 /* (set (mem (pre_dec (reg sp))) (foo)) */
743 src = XEXP (dest, 0);
744 code = GET_CODE (src);
750 if (XEXP (src, 0) == stack_pointer_rtx)
752 rtx val = XEXP (XEXP (src, 1), 1);
753 /* We handle only adjustments by constant amount. */
754 gcc_assert (GET_CODE (XEXP (src, 1)) == PLUS
755 && CONST_INT_P (val));
756 offset = -INTVAL (val);
763 if (XEXP (src, 0) == stack_pointer_rtx)
765 offset = GET_MODE_SIZE (GET_MODE (dest));
772 if (XEXP (src, 0) == stack_pointer_rtx)
774 offset = -GET_MODE_SIZE (GET_MODE (dest));
789 /* Precomputed args_size for CODE_LABELs and BARRIERs preceeding them,
790 indexed by INSN_UID. */
792 static HOST_WIDE_INT *barrier_args_size;
794 /* Helper function for compute_barrier_args_size. Handle one insn. */
797 compute_barrier_args_size_1 (rtx insn, HOST_WIDE_INT cur_args_size,
798 VEC (rtx, heap) **next)
800 HOST_WIDE_INT offset = 0;
803 if (! RTX_FRAME_RELATED_P (insn))
805 if (prologue_epilogue_contains (insn))
807 else if (GET_CODE (PATTERN (insn)) == SET)
808 offset = stack_adjust_offset (PATTERN (insn), cur_args_size, 0);
809 else if (GET_CODE (PATTERN (insn)) == PARALLEL
810 || GET_CODE (PATTERN (insn)) == SEQUENCE)
812 /* There may be stack adjustments inside compound insns. Search
814 for (i = XVECLEN (PATTERN (insn), 0) - 1; i >= 0; i--)
815 if (GET_CODE (XVECEXP (PATTERN (insn), 0, i)) == SET)
816 offset += stack_adjust_offset (XVECEXP (PATTERN (insn), 0, i),
817 cur_args_size, offset);
822 rtx expr = find_reg_note (insn, REG_FRAME_RELATED_EXPR, NULL_RTX);
826 expr = XEXP (expr, 0);
827 if (GET_CODE (expr) == PARALLEL
828 || GET_CODE (expr) == SEQUENCE)
829 for (i = 1; i < XVECLEN (expr, 0); i++)
831 rtx elem = XVECEXP (expr, 0, i);
833 if (GET_CODE (elem) == SET && !RTX_FRAME_RELATED_P (elem))
834 offset += stack_adjust_offset (elem, cur_args_size, offset);
839 #ifndef STACK_GROWS_DOWNWARD
843 cur_args_size += offset;
844 if (cur_args_size < 0)
849 rtx dest = JUMP_LABEL (insn);
853 if (barrier_args_size [INSN_UID (dest)] < 0)
855 barrier_args_size [INSN_UID (dest)] = cur_args_size;
856 VEC_safe_push (rtx, heap, *next, dest);
861 return cur_args_size;
864 /* Walk the whole function and compute args_size on BARRIERs. */
867 compute_barrier_args_size (void)
869 int max_uid = get_max_uid (), i;
871 VEC (rtx, heap) *worklist, *next, *tmp;
873 barrier_args_size = XNEWVEC (HOST_WIDE_INT, max_uid);
874 for (i = 0; i < max_uid; i++)
875 barrier_args_size[i] = -1;
877 worklist = VEC_alloc (rtx, heap, 20);
878 next = VEC_alloc (rtx, heap, 20);
880 barrier_args_size[INSN_UID (insn)] = 0;
881 VEC_quick_push (rtx, worklist, insn);
884 while (!VEC_empty (rtx, worklist))
886 rtx prev, body, first_insn;
887 HOST_WIDE_INT cur_args_size;
889 first_insn = insn = VEC_pop (rtx, worklist);
890 cur_args_size = barrier_args_size[INSN_UID (insn)];
891 prev = prev_nonnote_insn (insn);
892 if (prev && BARRIER_P (prev))
893 barrier_args_size[INSN_UID (prev)] = cur_args_size;
895 for (; insn; insn = NEXT_INSN (insn))
897 if (INSN_DELETED_P (insn) || NOTE_P (insn))
899 if (BARRIER_P (insn))
904 if (insn == first_insn)
906 else if (barrier_args_size[INSN_UID (insn)] < 0)
908 barrier_args_size[INSN_UID (insn)] = cur_args_size;
913 /* The insns starting with this label have been
914 already scanned or are in the worklist. */
919 body = PATTERN (insn);
920 if (GET_CODE (body) == SEQUENCE)
922 HOST_WIDE_INT dest_args_size = cur_args_size;
923 for (i = 1; i < XVECLEN (body, 0); i++)
924 if (INSN_ANNULLED_BRANCH_P (XVECEXP (body, 0, 0))
925 && INSN_FROM_TARGET_P (XVECEXP (body, 0, i)))
927 = compute_barrier_args_size_1 (XVECEXP (body, 0, i),
928 dest_args_size, &next);
931 = compute_barrier_args_size_1 (XVECEXP (body, 0, i),
932 cur_args_size, &next);
934 if (INSN_ANNULLED_BRANCH_P (XVECEXP (body, 0, 0)))
935 compute_barrier_args_size_1 (XVECEXP (body, 0, 0),
936 dest_args_size, &next);
939 = compute_barrier_args_size_1 (XVECEXP (body, 0, 0),
940 cur_args_size, &next);
944 = compute_barrier_args_size_1 (insn, cur_args_size, &next);
948 if (VEC_empty (rtx, next))
951 /* Swap WORKLIST with NEXT and truncate NEXT for next iteration. */
955 VEC_truncate (rtx, next, 0);
958 VEC_free (rtx, heap, worklist);
959 VEC_free (rtx, heap, next);
962 /* Add a CFI to update the running total of the size of arguments
963 pushed onto the stack. */
966 dwarf2out_args_size (HOST_WIDE_INT size)
970 if (size == old_args_size)
973 old_args_size = size;
976 cfi->dw_cfi_opc = DW_CFA_GNU_args_size;
977 cfi->dw_cfi_oprnd1.dw_cfi_offset = size;
981 /* Record a stack adjustment of OFFSET bytes. */
984 dwarf2out_stack_adjust (HOST_WIDE_INT offset)
986 if (cfa.reg == STACK_POINTER_REGNUM)
987 cfa.offset += offset;
989 if (cfa_store.reg == STACK_POINTER_REGNUM)
990 cfa_store.offset += offset;
992 if (ACCUMULATE_OUTGOING_ARGS)
995 #ifndef STACK_GROWS_DOWNWARD
1003 def_cfa_1 (false, &cfa);
1004 if (flag_asynchronous_unwind_tables)
1005 dwarf2out_args_size (args_size);
1008 /* Check INSN to see if it looks like a push or a stack adjustment, and
1009 make a note of it if it does. EH uses this information to find out
1010 how much extra space it needs to pop off the stack. */
1013 dwarf2out_notice_stack_adjust (rtx insn, bool after_p)
1015 HOST_WIDE_INT offset;
1018 /* Don't handle epilogues at all. Certainly it would be wrong to do so
1019 with this function. Proper support would require all frame-related
1020 insns to be marked, and to be able to handle saving state around
1021 epilogues textually in the middle of the function. */
1022 if (prologue_epilogue_contains (insn))
1025 /* If INSN is an instruction from target of an annulled branch, the
1026 effects are for the target only and so current argument size
1027 shouldn't change at all. */
1029 && INSN_ANNULLED_BRANCH_P (XVECEXP (final_sequence, 0, 0))
1030 && INSN_FROM_TARGET_P (insn))
1033 /* If only calls can throw, and we have a frame pointer,
1034 save up adjustments until we see the CALL_INSN. */
1035 if (!flag_asynchronous_unwind_tables && cfa.reg != STACK_POINTER_REGNUM)
1037 if (CALL_P (insn) && !after_p)
1039 /* Extract the size of the args from the CALL rtx itself. */
1040 insn = PATTERN (insn);
1041 if (GET_CODE (insn) == PARALLEL)
1042 insn = XVECEXP (insn, 0, 0);
1043 if (GET_CODE (insn) == SET)
1044 insn = SET_SRC (insn);
1045 gcc_assert (GET_CODE (insn) == CALL);
1046 dwarf2out_args_size (INTVAL (XEXP (insn, 1)));
1051 if (CALL_P (insn) && !after_p)
1053 if (!flag_asynchronous_unwind_tables)
1054 dwarf2out_args_size (args_size);
1057 else if (BARRIER_P (insn))
1059 /* Don't call compute_barrier_args_size () if the only
1060 BARRIER is at the end of function. */
1061 if (barrier_args_size == NULL && next_nonnote_insn (insn))
1062 compute_barrier_args_size ();
1063 if (barrier_args_size == NULL)
1067 offset = barrier_args_size[INSN_UID (insn)];
1072 offset -= args_size;
1073 #ifndef STACK_GROWS_DOWNWARD
1077 else if (GET_CODE (PATTERN (insn)) == SET)
1078 offset = stack_adjust_offset (PATTERN (insn), args_size, 0);
1079 else if (GET_CODE (PATTERN (insn)) == PARALLEL
1080 || GET_CODE (PATTERN (insn)) == SEQUENCE)
1082 /* There may be stack adjustments inside compound insns. Search
1084 for (offset = 0, i = XVECLEN (PATTERN (insn), 0) - 1; i >= 0; i--)
1085 if (GET_CODE (XVECEXP (PATTERN (insn), 0, i)) == SET)
1086 offset += stack_adjust_offset (XVECEXP (PATTERN (insn), 0, i),
1095 dwarf2out_stack_adjust (offset);
1098 /* We delay emitting a register save until either (a) we reach the end
1099 of the prologue or (b) the register is clobbered. This clusters
1100 register saves so that there are fewer pc advances. */
1102 struct GTY(()) queued_reg_save {
1103 struct queued_reg_save *next;
1105 HOST_WIDE_INT cfa_offset;
1109 static GTY(()) struct queued_reg_save *queued_reg_saves;
1111 /* The caller's ORIG_REG is saved in SAVED_IN_REG. */
1112 typedef struct GTY(()) reg_saved_in_data {
1115 } reg_saved_in_data;
1117 DEF_VEC_O (reg_saved_in_data);
1118 DEF_VEC_ALLOC_O (reg_saved_in_data, gc);
1120 /* A set of registers saved in other registers. This is implemented as
1121 a flat array because it normally contains zero or 1 entry, depending
1122 on the target. IA-64 is the big spender here, using a maximum of
1124 static GTY(()) VEC(reg_saved_in_data, gc) *regs_saved_in_regs;
1126 /* Compare X and Y for equivalence. The inputs may be REGs or PC_RTX. */
1129 compare_reg_or_pc (rtx x, rtx y)
1131 if (REG_P (x) && REG_P (y))
1132 return REGNO (x) == REGNO (y);
1136 /* Record SRC as being saved in DEST. DEST may be null to delete an
1137 existing entry. SRC may be a register or PC_RTX. */
1140 record_reg_saved_in_reg (rtx dest, rtx src)
1142 reg_saved_in_data *elt;
1145 FOR_EACH_VEC_ELT (reg_saved_in_data, regs_saved_in_regs, i, elt)
1146 if (compare_reg_or_pc (elt->orig_reg, src))
1149 VEC_unordered_remove(reg_saved_in_data, regs_saved_in_regs, i);
1151 elt->saved_in_reg = dest;
1158 elt = VEC_safe_push(reg_saved_in_data, gc, regs_saved_in_regs, NULL);
1159 elt->orig_reg = src;
1160 elt->saved_in_reg = dest;
1163 /* Add an entry to QUEUED_REG_SAVES saying that REG is now saved at
1164 SREG, or if SREG is NULL then it is saved at OFFSET to the CFA. */
1167 queue_reg_save (rtx reg, rtx sreg, HOST_WIDE_INT offset)
1169 struct queued_reg_save *q;
1171 /* Duplicates waste space, but it's also necessary to remove them
1172 for correctness, since the queue gets output in reverse
1174 for (q = queued_reg_saves; q != NULL; q = q->next)
1175 if (REGNO (q->reg) == REGNO (reg))
1180 q = ggc_alloc_queued_reg_save ();
1181 q->next = queued_reg_saves;
1182 queued_reg_saves = q;
1186 q->cfa_offset = offset;
1187 q->saved_reg = sreg;
1190 /* Output all the entries in QUEUED_REG_SAVES. */
1193 dwarf2out_flush_queued_reg_saves (void)
1195 struct queued_reg_save *q;
1197 for (q = queued_reg_saves; q; q = q->next)
1199 unsigned int reg, sreg;
1201 record_reg_saved_in_reg (q->saved_reg, q->reg);
1203 reg = DWARF_FRAME_REGNUM (REGNO (q->reg));
1205 sreg = DWARF_FRAME_REGNUM (REGNO (q->saved_reg));
1207 sreg = INVALID_REGNUM;
1208 reg_save (false, reg, sreg, q->cfa_offset);
1211 queued_reg_saves = NULL;
1214 /* Does INSN clobber any register which QUEUED_REG_SAVES lists a saved
1215 location for? Or, does it clobber a register which we've previously
1216 said that some other register is saved in, and for which we now
1217 have a new location for? */
1220 clobbers_queued_reg_save (const_rtx insn)
1222 struct queued_reg_save *q;
1224 for (q = queued_reg_saves; q; q = q->next)
1227 reg_saved_in_data *rir;
1229 if (modified_in_p (q->reg, insn))
1232 FOR_EACH_VEC_ELT (reg_saved_in_data, regs_saved_in_regs, i, rir)
1233 if (compare_reg_or_pc (q->reg, rir->orig_reg)
1234 && modified_in_p (rir->saved_in_reg, insn))
1241 /* What register, if any, is currently saved in REG? */
1244 reg_saved_in (rtx reg)
1246 unsigned int regn = REGNO (reg);
1247 struct queued_reg_save *q;
1248 reg_saved_in_data *rir;
1251 for (q = queued_reg_saves; q; q = q->next)
1252 if (q->saved_reg && regn == REGNO (q->saved_reg))
1255 FOR_EACH_VEC_ELT (reg_saved_in_data, regs_saved_in_regs, i, rir)
1256 if (regn == REGNO (rir->saved_in_reg))
1257 return rir->orig_reg;
1263 /* A temporary register holding an integral value used in adjusting SP
1264 or setting up the store_reg. The "offset" field holds the integer
1265 value, not an offset. */
1266 static dw_cfa_location cfa_temp;
1268 /* A subroutine of dwarf2out_frame_debug, process a REG_DEF_CFA note. */
1271 dwarf2out_frame_debug_def_cfa (rtx pat)
1273 memset (&cfa, 0, sizeof (cfa));
1275 switch (GET_CODE (pat))
1278 cfa.reg = REGNO (XEXP (pat, 0));
1279 cfa.offset = INTVAL (XEXP (pat, 1));
1283 cfa.reg = REGNO (pat);
1288 pat = XEXP (pat, 0);
1289 if (GET_CODE (pat) == PLUS)
1291 cfa.base_offset = INTVAL (XEXP (pat, 1));
1292 pat = XEXP (pat, 0);
1294 cfa.reg = REGNO (pat);
1298 /* Recurse and define an expression. */
1302 def_cfa_1 (false, &cfa);
1305 /* A subroutine of dwarf2out_frame_debug, process a REG_ADJUST_CFA note. */
1308 dwarf2out_frame_debug_adjust_cfa (rtx pat)
1312 gcc_assert (GET_CODE (pat) == SET);
1313 dest = XEXP (pat, 0);
1314 src = XEXP (pat, 1);
1316 switch (GET_CODE (src))
1319 gcc_assert (REGNO (XEXP (src, 0)) == cfa.reg);
1320 cfa.offset -= INTVAL (XEXP (src, 1));
1330 cfa.reg = REGNO (dest);
1331 gcc_assert (cfa.indirect == 0);
1333 def_cfa_1 (false, &cfa);
1336 /* A subroutine of dwarf2out_frame_debug, process a REG_CFA_OFFSET note. */
1339 dwarf2out_frame_debug_cfa_offset (rtx set)
1341 HOST_WIDE_INT offset;
1342 rtx src, addr, span;
1343 unsigned int sregno;
1345 src = XEXP (set, 1);
1346 addr = XEXP (set, 0);
1347 gcc_assert (MEM_P (addr));
1348 addr = XEXP (addr, 0);
1350 /* As documented, only consider extremely simple addresses. */
1351 switch (GET_CODE (addr))
1354 gcc_assert (REGNO (addr) == cfa.reg);
1355 offset = -cfa.offset;
1358 gcc_assert (REGNO (XEXP (addr, 0)) == cfa.reg);
1359 offset = INTVAL (XEXP (addr, 1)) - cfa.offset;
1368 sregno = DWARF_FRAME_RETURN_COLUMN;
1372 span = targetm.dwarf_register_span (src);
1373 sregno = DWARF_FRAME_REGNUM (REGNO (src));
1376 /* ??? We'd like to use queue_reg_save, but we need to come up with
1377 a different flushing heuristic for epilogues. */
1379 reg_save (false, sregno, INVALID_REGNUM, offset);
1382 /* We have a PARALLEL describing where the contents of SRC live.
1383 Queue register saves for each piece of the PARALLEL. */
1386 HOST_WIDE_INT span_offset = offset;
1388 gcc_assert (GET_CODE (span) == PARALLEL);
1390 limit = XVECLEN (span, 0);
1391 for (par_index = 0; par_index < limit; par_index++)
1393 rtx elem = XVECEXP (span, 0, par_index);
1395 sregno = DWARF_FRAME_REGNUM (REGNO (src));
1396 reg_save (false, sregno, INVALID_REGNUM, span_offset);
1397 span_offset += GET_MODE_SIZE (GET_MODE (elem));
1402 /* A subroutine of dwarf2out_frame_debug, process a REG_CFA_REGISTER note. */
1405 dwarf2out_frame_debug_cfa_register (rtx set)
1408 unsigned sregno, dregno;
1410 src = XEXP (set, 1);
1411 dest = XEXP (set, 0);
1414 sregno = DWARF_FRAME_RETURN_COLUMN;
1417 record_reg_saved_in_reg (dest, src);
1418 sregno = DWARF_FRAME_REGNUM (REGNO (src));
1421 dregno = DWARF_FRAME_REGNUM (REGNO (dest));
1423 /* ??? We'd like to use queue_reg_save, but we need to come up with
1424 a different flushing heuristic for epilogues. */
1425 reg_save (false, sregno, dregno, 0);
1428 /* A subroutine of dwarf2out_frame_debug, process a REG_CFA_EXPRESSION note. */
1431 dwarf2out_frame_debug_cfa_expression (rtx set)
1433 rtx src, dest, span;
1434 dw_cfi_ref cfi = new_cfi ();
1436 dest = SET_DEST (set);
1437 src = SET_SRC (set);
1439 gcc_assert (REG_P (src));
1440 gcc_assert (MEM_P (dest));
1442 span = targetm.dwarf_register_span (src);
1445 cfi->dw_cfi_opc = DW_CFA_expression;
1446 cfi->dw_cfi_oprnd1.dw_cfi_reg_num = DWARF_FRAME_REGNUM (REGNO (src));
1447 cfi->dw_cfi_oprnd2.dw_cfi_loc
1448 = mem_loc_descriptor (XEXP (dest, 0), get_address_mode (dest),
1449 GET_MODE (dest), VAR_INIT_STATUS_INITIALIZED);
1451 /* ??? We'd like to use queue_reg_save, were the interface different,
1452 and, as above, we could manage flushing for epilogues. */
1456 /* A subroutine of dwarf2out_frame_debug, process a REG_CFA_RESTORE note. */
1459 dwarf2out_frame_debug_cfa_restore (rtx reg)
1461 dw_cfi_ref cfi = new_cfi ();
1462 unsigned int regno = DWARF_FRAME_REGNUM (REGNO (reg));
1464 cfi->dw_cfi_opc = (regno & ~0x3f ? DW_CFA_restore_extended : DW_CFA_restore);
1465 cfi->dw_cfi_oprnd1.dw_cfi_reg_num = regno;
1470 /* A subroutine of dwarf2out_frame_debug, process a REG_CFA_WINDOW_SAVE.
1471 ??? Perhaps we should note in the CIE where windows are saved (instead of
1472 assuming 0(cfa)) and what registers are in the window. */
1475 dwarf2out_frame_debug_cfa_window_save (void)
1477 dw_cfi_ref cfi = new_cfi ();
1479 cfi->dw_cfi_opc = DW_CFA_GNU_window_save;
1483 /* Record call frame debugging information for an expression EXPR,
1484 which either sets SP or FP (adjusting how we calculate the frame
1485 address) or saves a register to the stack or another register.
1486 LABEL indicates the address of EXPR.
1488 This function encodes a state machine mapping rtxes to actions on
1489 cfa, cfa_store, and cfa_temp.reg. We describe these rules so
1490 users need not read the source code.
1492 The High-Level Picture
1494 Changes in the register we use to calculate the CFA: Currently we
1495 assume that if you copy the CFA register into another register, we
1496 should take the other one as the new CFA register; this seems to
1497 work pretty well. If it's wrong for some target, it's simple
1498 enough not to set RTX_FRAME_RELATED_P on the insn in question.
1500 Changes in the register we use for saving registers to the stack:
1501 This is usually SP, but not always. Again, we deduce that if you
1502 copy SP into another register (and SP is not the CFA register),
1503 then the new register is the one we will be using for register
1504 saves. This also seems to work.
1506 Register saves: There's not much guesswork about this one; if
1507 RTX_FRAME_RELATED_P is set on an insn which modifies memory, it's a
1508 register save, and the register used to calculate the destination
1509 had better be the one we think we're using for this purpose.
1510 It's also assumed that a copy from a call-saved register to another
1511 register is saving that register if RTX_FRAME_RELATED_P is set on
1512 that instruction. If the copy is from a call-saved register to
1513 the *same* register, that means that the register is now the same
1514 value as in the caller.
1516 Except: If the register being saved is the CFA register, and the
1517 offset is nonzero, we are saving the CFA, so we assume we have to
1518 use DW_CFA_def_cfa_expression. If the offset is 0, we assume that
1519 the intent is to save the value of SP from the previous frame.
1521 In addition, if a register has previously been saved to a different
1524 Invariants / Summaries of Rules
1526 cfa current rule for calculating the CFA. It usually
1527 consists of a register and an offset.
1528 cfa_store register used by prologue code to save things to the stack
1529 cfa_store.offset is the offset from the value of
1530 cfa_store.reg to the actual CFA
1531 cfa_temp register holding an integral value. cfa_temp.offset
1532 stores the value, which will be used to adjust the
1533 stack pointer. cfa_temp is also used like cfa_store,
1534 to track stores to the stack via fp or a temp reg.
1536 Rules 1- 4: Setting a register's value to cfa.reg or an expression
1537 with cfa.reg as the first operand changes the cfa.reg and its
1538 cfa.offset. Rule 1 and 4 also set cfa_temp.reg and
1541 Rules 6- 9: Set a non-cfa.reg register value to a constant or an
1542 expression yielding a constant. This sets cfa_temp.reg
1543 and cfa_temp.offset.
1545 Rule 5: Create a new register cfa_store used to save items to the
1548 Rules 10-14: Save a register to the stack. Define offset as the
1549 difference of the original location and cfa_store's
1550 location (or cfa_temp's location if cfa_temp is used).
1552 Rules 16-20: If AND operation happens on sp in prologue, we assume
1553 stack is realigned. We will use a group of DW_OP_XXX
1554 expressions to represent the location of the stored
1555 register instead of CFA+offset.
1559 "{a,b}" indicates a choice of a xor b.
1560 "<reg>:cfa.reg" indicates that <reg> must equal cfa.reg.
1563 (set <reg1> <reg2>:cfa.reg)
1564 effects: cfa.reg = <reg1>
1565 cfa.offset unchanged
1566 cfa_temp.reg = <reg1>
1567 cfa_temp.offset = cfa.offset
1570 (set sp ({minus,plus,losum} {sp,fp}:cfa.reg
1571 {<const_int>,<reg>:cfa_temp.reg}))
1572 effects: cfa.reg = sp if fp used
1573 cfa.offset += {+/- <const_int>, cfa_temp.offset} if cfa.reg==sp
1574 cfa_store.offset += {+/- <const_int>, cfa_temp.offset}
1575 if cfa_store.reg==sp
1578 (set fp ({minus,plus,losum} <reg>:cfa.reg <const_int>))
1579 effects: cfa.reg = fp
1580 cfa_offset += +/- <const_int>
1583 (set <reg1> ({plus,losum} <reg2>:cfa.reg <const_int>))
1584 constraints: <reg1> != fp
1586 effects: cfa.reg = <reg1>
1587 cfa_temp.reg = <reg1>
1588 cfa_temp.offset = cfa.offset
1591 (set <reg1> (plus <reg2>:cfa_temp.reg sp:cfa.reg))
1592 constraints: <reg1> != fp
1594 effects: cfa_store.reg = <reg1>
1595 cfa_store.offset = cfa.offset - cfa_temp.offset
1598 (set <reg> <const_int>)
1599 effects: cfa_temp.reg = <reg>
1600 cfa_temp.offset = <const_int>
1603 (set <reg1>:cfa_temp.reg (ior <reg2>:cfa_temp.reg <const_int>))
1604 effects: cfa_temp.reg = <reg1>
1605 cfa_temp.offset |= <const_int>
1608 (set <reg> (high <exp>))
1612 (set <reg> (lo_sum <exp> <const_int>))
1613 effects: cfa_temp.reg = <reg>
1614 cfa_temp.offset = <const_int>
1617 (set (mem ({pre,post}_modify sp:cfa_store (???? <reg1> <const_int>))) <reg2>)
1618 effects: cfa_store.offset -= <const_int>
1619 cfa.offset = cfa_store.offset if cfa.reg == sp
1621 cfa.base_offset = -cfa_store.offset
1624 (set (mem ({pre_inc,pre_dec,post_dec} sp:cfa_store.reg)) <reg>)
1625 effects: cfa_store.offset += -/+ mode_size(mem)
1626 cfa.offset = cfa_store.offset if cfa.reg == sp
1628 cfa.base_offset = -cfa_store.offset
1631 (set (mem ({minus,plus,losum} <reg1>:{cfa_store,cfa_temp} <const_int>))
1634 effects: cfa.reg = <reg1>
1635 cfa.base_offset = -/+ <const_int> - {cfa_store,cfa_temp}.offset
1638 (set (mem <reg1>:{cfa_store,cfa_temp}) <reg2>)
1639 effects: cfa.reg = <reg1>
1640 cfa.base_offset = -{cfa_store,cfa_temp}.offset
1643 (set (mem (post_inc <reg1>:cfa_temp <const_int>)) <reg2>)
1644 effects: cfa.reg = <reg1>
1645 cfa.base_offset = -cfa_temp.offset
1646 cfa_temp.offset -= mode_size(mem)
1649 (set <reg> {unspec, unspec_volatile})
1650 effects: target-dependent
1653 (set sp (and: sp <const_int>))
1654 constraints: cfa_store.reg == sp
1655 effects: cfun->fde.stack_realign = 1
1656 cfa_store.offset = 0
1657 fde->drap_reg = cfa.reg if cfa.reg != sp and cfa.reg != fp
1660 (set (mem ({pre_inc, pre_dec} sp)) (mem (plus (cfa.reg) (const_int))))
1661 effects: cfa_store.offset += -/+ mode_size(mem)
1664 (set (mem ({pre_inc, pre_dec} sp)) fp)
1665 constraints: fde->stack_realign == 1
1666 effects: cfa_store.offset = 0
1667 cfa.reg != HARD_FRAME_POINTER_REGNUM
1670 (set (mem ({pre_inc, pre_dec} sp)) cfa.reg)
1671 constraints: fde->stack_realign == 1
1673 && cfa.indirect == 0
1674 && cfa.reg != HARD_FRAME_POINTER_REGNUM
1675 effects: Use DW_CFA_def_cfa_expression to define cfa
1676 cfa.reg == fde->drap_reg */
1679 dwarf2out_frame_debug_expr (rtx expr)
1681 rtx src, dest, span;
1682 HOST_WIDE_INT offset;
1685 /* If RTX_FRAME_RELATED_P is set on a PARALLEL, process each member of
1686 the PARALLEL independently. The first element is always processed if
1687 it is a SET. This is for backward compatibility. Other elements
1688 are processed only if they are SETs and the RTX_FRAME_RELATED_P
1689 flag is set in them. */
1690 if (GET_CODE (expr) == PARALLEL || GET_CODE (expr) == SEQUENCE)
1693 int limit = XVECLEN (expr, 0);
1696 /* PARALLELs have strict read-modify-write semantics, so we
1697 ought to evaluate every rvalue before changing any lvalue.
1698 It's cumbersome to do that in general, but there's an
1699 easy approximation that is enough for all current users:
1700 handle register saves before register assignments. */
1701 if (GET_CODE (expr) == PARALLEL)
1702 for (par_index = 0; par_index < limit; par_index++)
1704 elem = XVECEXP (expr, 0, par_index);
1705 if (GET_CODE (elem) == SET
1706 && MEM_P (SET_DEST (elem))
1707 && (RTX_FRAME_RELATED_P (elem) || par_index == 0))
1708 dwarf2out_frame_debug_expr (elem);
1711 for (par_index = 0; par_index < limit; par_index++)
1713 elem = XVECEXP (expr, 0, par_index);
1714 if (GET_CODE (elem) == SET
1715 && (!MEM_P (SET_DEST (elem)) || GET_CODE (expr) == SEQUENCE)
1716 && (RTX_FRAME_RELATED_P (elem) || par_index == 0))
1717 dwarf2out_frame_debug_expr (elem);
1718 else if (GET_CODE (elem) == SET
1720 && !RTX_FRAME_RELATED_P (elem))
1722 /* Stack adjustment combining might combine some post-prologue
1723 stack adjustment into a prologue stack adjustment. */
1724 HOST_WIDE_INT offset = stack_adjust_offset (elem, args_size, 0);
1727 dwarf2out_stack_adjust (offset);
1733 gcc_assert (GET_CODE (expr) == SET);
1735 src = SET_SRC (expr);
1736 dest = SET_DEST (expr);
1740 rtx rsi = reg_saved_in (src);
1747 switch (GET_CODE (dest))
1750 switch (GET_CODE (src))
1752 /* Setting FP from SP. */
1754 if (cfa.reg == (unsigned) REGNO (src))
1757 /* Update the CFA rule wrt SP or FP. Make sure src is
1758 relative to the current CFA register.
1760 We used to require that dest be either SP or FP, but the
1761 ARM copies SP to a temporary register, and from there to
1762 FP. So we just rely on the backends to only set
1763 RTX_FRAME_RELATED_P on appropriate insns. */
1764 cfa.reg = REGNO (dest);
1765 cfa_temp.reg = cfa.reg;
1766 cfa_temp.offset = cfa.offset;
1770 /* Saving a register in a register. */
1771 gcc_assert (!fixed_regs [REGNO (dest)]
1772 /* For the SPARC and its register window. */
1773 || (DWARF_FRAME_REGNUM (REGNO (src))
1774 == DWARF_FRAME_RETURN_COLUMN));
1776 /* After stack is aligned, we can only save SP in FP
1777 if drap register is used. In this case, we have
1778 to restore stack pointer with the CFA value and we
1779 don't generate this DWARF information. */
1781 && fde->stack_realign
1782 && REGNO (src) == STACK_POINTER_REGNUM)
1783 gcc_assert (REGNO (dest) == HARD_FRAME_POINTER_REGNUM
1784 && fde->drap_reg != INVALID_REGNUM
1785 && cfa.reg != REGNO (src));
1787 queue_reg_save (src, dest, 0);
1794 if (dest == stack_pointer_rtx)
1798 switch (GET_CODE (XEXP (src, 1)))
1801 offset = INTVAL (XEXP (src, 1));
1804 gcc_assert ((unsigned) REGNO (XEXP (src, 1))
1806 offset = cfa_temp.offset;
1812 if (XEXP (src, 0) == hard_frame_pointer_rtx)
1814 /* Restoring SP from FP in the epilogue. */
1815 gcc_assert (cfa.reg == (unsigned) HARD_FRAME_POINTER_REGNUM);
1816 cfa.reg = STACK_POINTER_REGNUM;
1818 else if (GET_CODE (src) == LO_SUM)
1819 /* Assume we've set the source reg of the LO_SUM from sp. */
1822 gcc_assert (XEXP (src, 0) == stack_pointer_rtx);
1824 if (GET_CODE (src) != MINUS)
1826 if (cfa.reg == STACK_POINTER_REGNUM)
1827 cfa.offset += offset;
1828 if (cfa_store.reg == STACK_POINTER_REGNUM)
1829 cfa_store.offset += offset;
1831 else if (dest == hard_frame_pointer_rtx)
1834 /* Either setting the FP from an offset of the SP,
1835 or adjusting the FP */
1836 gcc_assert (frame_pointer_needed);
1838 gcc_assert (REG_P (XEXP (src, 0))
1839 && (unsigned) REGNO (XEXP (src, 0)) == cfa.reg
1840 && CONST_INT_P (XEXP (src, 1)));
1841 offset = INTVAL (XEXP (src, 1));
1842 if (GET_CODE (src) != MINUS)
1844 cfa.offset += offset;
1845 cfa.reg = HARD_FRAME_POINTER_REGNUM;
1849 gcc_assert (GET_CODE (src) != MINUS);
1852 if (REG_P (XEXP (src, 0))
1853 && REGNO (XEXP (src, 0)) == cfa.reg
1854 && CONST_INT_P (XEXP (src, 1)))
1856 /* Setting a temporary CFA register that will be copied
1857 into the FP later on. */
1858 offset = - INTVAL (XEXP (src, 1));
1859 cfa.offset += offset;
1860 cfa.reg = REGNO (dest);
1861 /* Or used to save regs to the stack. */
1862 cfa_temp.reg = cfa.reg;
1863 cfa_temp.offset = cfa.offset;
1867 else if (REG_P (XEXP (src, 0))
1868 && REGNO (XEXP (src, 0)) == cfa_temp.reg
1869 && XEXP (src, 1) == stack_pointer_rtx)
1871 /* Setting a scratch register that we will use instead
1872 of SP for saving registers to the stack. */
1873 gcc_assert (cfa.reg == STACK_POINTER_REGNUM);
1874 cfa_store.reg = REGNO (dest);
1875 cfa_store.offset = cfa.offset - cfa_temp.offset;
1879 else if (GET_CODE (src) == LO_SUM
1880 && CONST_INT_P (XEXP (src, 1)))
1882 cfa_temp.reg = REGNO (dest);
1883 cfa_temp.offset = INTVAL (XEXP (src, 1));
1892 cfa_temp.reg = REGNO (dest);
1893 cfa_temp.offset = INTVAL (src);
1898 gcc_assert (REG_P (XEXP (src, 0))
1899 && (unsigned) REGNO (XEXP (src, 0)) == cfa_temp.reg
1900 && CONST_INT_P (XEXP (src, 1)));
1902 if ((unsigned) REGNO (dest) != cfa_temp.reg)
1903 cfa_temp.reg = REGNO (dest);
1904 cfa_temp.offset |= INTVAL (XEXP (src, 1));
1907 /* Skip over HIGH, assuming it will be followed by a LO_SUM,
1908 which will fill in all of the bits. */
1915 case UNSPEC_VOLATILE:
1916 /* All unspecs should be represented by REG_CFA_* notes. */
1922 /* If this AND operation happens on stack pointer in prologue,
1923 we assume the stack is realigned and we extract the
1925 if (fde && XEXP (src, 0) == stack_pointer_rtx)
1927 /* We interpret reg_save differently with stack_realign set.
1928 Thus we must flush whatever we have queued first. */
1929 dwarf2out_flush_queued_reg_saves ();
1931 gcc_assert (cfa_store.reg == REGNO (XEXP (src, 0)));
1932 fde->stack_realign = 1;
1933 fde->stack_realignment = INTVAL (XEXP (src, 1));
1934 cfa_store.offset = 0;
1936 if (cfa.reg != STACK_POINTER_REGNUM
1937 && cfa.reg != HARD_FRAME_POINTER_REGNUM)
1938 fde->drap_reg = cfa.reg;
1946 def_cfa_1 (false, &cfa);
1951 /* Saving a register to the stack. Make sure dest is relative to the
1953 switch (GET_CODE (XEXP (dest, 0)))
1959 /* We can't handle variable size modifications. */
1960 gcc_assert (GET_CODE (XEXP (XEXP (XEXP (dest, 0), 1), 1))
1962 offset = -INTVAL (XEXP (XEXP (XEXP (dest, 0), 1), 1));
1964 gcc_assert (REGNO (XEXP (XEXP (dest, 0), 0)) == STACK_POINTER_REGNUM
1965 && cfa_store.reg == STACK_POINTER_REGNUM);
1967 cfa_store.offset += offset;
1968 if (cfa.reg == STACK_POINTER_REGNUM)
1969 cfa.offset = cfa_store.offset;
1971 if (GET_CODE (XEXP (dest, 0)) == POST_MODIFY)
1972 offset -= cfa_store.offset;
1974 offset = -cfa_store.offset;
1981 offset = GET_MODE_SIZE (GET_MODE (dest));
1982 if (GET_CODE (XEXP (dest, 0)) == PRE_INC)
1985 gcc_assert ((REGNO (XEXP (XEXP (dest, 0), 0))
1986 == STACK_POINTER_REGNUM)
1987 && cfa_store.reg == STACK_POINTER_REGNUM);
1989 cfa_store.offset += offset;
1991 /* Rule 18: If stack is aligned, we will use FP as a
1992 reference to represent the address of the stored
1995 && fde->stack_realign
1996 && src == hard_frame_pointer_rtx)
1998 gcc_assert (cfa.reg != HARD_FRAME_POINTER_REGNUM);
1999 cfa_store.offset = 0;
2002 if (cfa.reg == STACK_POINTER_REGNUM)
2003 cfa.offset = cfa_store.offset;
2005 if (GET_CODE (XEXP (dest, 0)) == POST_DEC)
2006 offset += -cfa_store.offset;
2008 offset = -cfa_store.offset;
2012 /* With an offset. */
2019 gcc_assert (CONST_INT_P (XEXP (XEXP (dest, 0), 1))
2020 && REG_P (XEXP (XEXP (dest, 0), 0)));
2021 offset = INTVAL (XEXP (XEXP (dest, 0), 1));
2022 if (GET_CODE (XEXP (dest, 0)) == MINUS)
2025 regno = REGNO (XEXP (XEXP (dest, 0), 0));
2027 if (cfa.reg == (unsigned) regno)
2028 offset -= cfa.offset;
2029 else if (cfa_store.reg == (unsigned) regno)
2030 offset -= cfa_store.offset;
2033 gcc_assert (cfa_temp.reg == (unsigned) regno);
2034 offset -= cfa_temp.offset;
2040 /* Without an offset. */
2043 int regno = REGNO (XEXP (dest, 0));
2045 if (cfa.reg == (unsigned) regno)
2046 offset = -cfa.offset;
2047 else if (cfa_store.reg == (unsigned) regno)
2048 offset = -cfa_store.offset;
2051 gcc_assert (cfa_temp.reg == (unsigned) regno);
2052 offset = -cfa_temp.offset;
2059 gcc_assert (cfa_temp.reg
2060 == (unsigned) REGNO (XEXP (XEXP (dest, 0), 0)));
2061 offset = -cfa_temp.offset;
2062 cfa_temp.offset -= GET_MODE_SIZE (GET_MODE (dest));
2070 /* If the source operand of this MEM operation is not a
2071 register, basically the source is return address. Here
2072 we only care how much stack grew and we don't save it. */
2076 if (REGNO (src) != STACK_POINTER_REGNUM
2077 && REGNO (src) != HARD_FRAME_POINTER_REGNUM
2078 && (unsigned) REGNO (src) == cfa.reg)
2080 /* We're storing the current CFA reg into the stack. */
2082 if (cfa.offset == 0)
2085 /* If stack is aligned, putting CFA reg into stack means
2086 we can no longer use reg + offset to represent CFA.
2087 Here we use DW_CFA_def_cfa_expression instead. The
2088 result of this expression equals to the original CFA
2091 && fde->stack_realign
2092 && cfa.indirect == 0
2093 && cfa.reg != HARD_FRAME_POINTER_REGNUM)
2095 dw_cfa_location cfa_exp;
2097 gcc_assert (fde->drap_reg == cfa.reg);
2099 cfa_exp.indirect = 1;
2100 cfa_exp.reg = HARD_FRAME_POINTER_REGNUM;
2101 cfa_exp.base_offset = offset;
2104 fde->drap_reg_saved = 1;
2106 def_cfa_1 (false, &cfa_exp);
2110 /* If the source register is exactly the CFA, assume
2111 we're saving SP like any other register; this happens
2113 def_cfa_1 (false, &cfa);
2114 queue_reg_save (stack_pointer_rtx, NULL_RTX, offset);
2119 /* Otherwise, we'll need to look in the stack to
2120 calculate the CFA. */
2121 rtx x = XEXP (dest, 0);
2125 gcc_assert (REG_P (x));
2127 cfa.reg = REGNO (x);
2128 cfa.base_offset = offset;
2130 def_cfa_1 (false, &cfa);
2135 def_cfa_1 (false, &cfa);
2137 span = targetm.dwarf_register_span (src);
2140 queue_reg_save (src, NULL_RTX, offset);
2143 /* We have a PARALLEL describing where the contents of SRC
2144 live. Queue register saves for each piece of the
2148 HOST_WIDE_INT span_offset = offset;
2150 gcc_assert (GET_CODE (span) == PARALLEL);
2152 limit = XVECLEN (span, 0);
2153 for (par_index = 0; par_index < limit; par_index++)
2155 rtx elem = XVECEXP (span, 0, par_index);
2157 queue_reg_save (elem, NULL_RTX, span_offset);
2158 span_offset += GET_MODE_SIZE (GET_MODE (elem));
2169 /* Record call frame debugging information for INSN, which either
2170 sets SP or FP (adjusting how we calculate the frame address) or saves a
2171 register to the stack. If INSN is NULL_RTX, initialize our state.
2173 If AFTER_P is false, we're being called before the insn is emitted,
2174 otherwise after. Call instructions get invoked twice. */
2177 dwarf2out_frame_debug (rtx insn, bool after_p)
2180 bool handled_one = false;
2181 bool need_flush = false;
2183 /* Remember where we are to insert notes. Do not separate tablejump
2184 insns from their ADDR_DIFF_VEC. Putting the note after the VEC
2188 if (!tablejump_p (insn, NULL, &cfi_insn))
2192 cfi_insn = PREV_INSN (insn);
2194 if (!NONJUMP_INSN_P (insn) || clobbers_queued_reg_save (insn))
2195 dwarf2out_flush_queued_reg_saves ();
2197 if (!RTX_FRAME_RELATED_P (insn))
2199 /* ??? This should be done unconditionally since stack adjustments
2200 matter if the stack pointer is not the CFA register anymore but
2201 is still used to save registers. */
2202 if (!ACCUMULATE_OUTGOING_ARGS)
2203 dwarf2out_notice_stack_adjust (insn, after_p);
2208 any_cfis_emitted = false;
2210 for (note = REG_NOTES (insn); note; note = XEXP (note, 1))
2211 switch (REG_NOTE_KIND (note))
2213 case REG_FRAME_RELATED_EXPR:
2214 insn = XEXP (note, 0);
2217 case REG_CFA_DEF_CFA:
2218 dwarf2out_frame_debug_def_cfa (XEXP (note, 0));
2222 case REG_CFA_ADJUST_CFA:
2227 if (GET_CODE (n) == PARALLEL)
2228 n = XVECEXP (n, 0, 0);
2230 dwarf2out_frame_debug_adjust_cfa (n);
2234 case REG_CFA_OFFSET:
2237 n = single_set (insn);
2238 dwarf2out_frame_debug_cfa_offset (n);
2242 case REG_CFA_REGISTER:
2247 if (GET_CODE (n) == PARALLEL)
2248 n = XVECEXP (n, 0, 0);
2250 dwarf2out_frame_debug_cfa_register (n);
2254 case REG_CFA_EXPRESSION:
2257 n = single_set (insn);
2258 dwarf2out_frame_debug_cfa_expression (n);
2262 case REG_CFA_RESTORE:
2267 if (GET_CODE (n) == PARALLEL)
2268 n = XVECEXP (n, 0, 0);
2271 dwarf2out_frame_debug_cfa_restore (n);
2275 case REG_CFA_SET_VDRAP:
2279 dw_fde_ref fde = cfun->fde;
2282 gcc_assert (fde->vdrap_reg == INVALID_REGNUM);
2284 fde->vdrap_reg = REGNO (n);
2290 case REG_CFA_WINDOW_SAVE:
2291 dwarf2out_frame_debug_cfa_window_save ();
2295 case REG_CFA_FLUSH_QUEUE:
2296 /* The actual flush happens below. */
2307 /* Minimize the number of advances by emitting the entire queue
2308 once anything is emitted. */
2309 need_flush |= any_cfis_emitted;
2313 insn = PATTERN (insn);
2315 dwarf2out_frame_debug_expr (insn);
2317 /* Check again. A parallel can save and update the same register.
2318 We could probably check just once, here, but this is safer than
2319 removing the check at the start of the function. */
2320 if (any_cfis_emitted || clobbers_queued_reg_save (insn))
2325 dwarf2out_flush_queued_reg_saves ();
2329 /* Examine CFI and return true if a cfi label and set_loc is needed
2330 beforehand. Even when generating CFI assembler instructions, we
2331 still have to add the cfi to the list so that lookup_cfa works
2332 later on. When -g2 and above we even need to force emitting of
2333 CFI labels and add to list a DW_CFA_set_loc for convert_cfa_to_fb_loc_list
2334 purposes. If we're generating DWARF3 output we use DW_OP_call_frame_cfa
2335 and so don't use convert_cfa_to_fb_loc_list. */
2338 cfi_label_required_p (dw_cfi_ref cfi)
2340 if (!dwarf2out_do_cfi_asm ())
2343 if (dwarf_version == 2
2344 && debug_info_level > DINFO_LEVEL_TERSE
2345 && (write_symbols == DWARF2_DEBUG
2346 || write_symbols == VMS_AND_DWARF2_DEBUG))
2348 switch (cfi->dw_cfi_opc)
2350 case DW_CFA_def_cfa_offset:
2351 case DW_CFA_def_cfa_offset_sf:
2352 case DW_CFA_def_cfa_register:
2353 case DW_CFA_def_cfa:
2354 case DW_CFA_def_cfa_sf:
2355 case DW_CFA_def_cfa_expression:
2356 case DW_CFA_restore_state:
2365 /* Walk the function, looking for NOTE_INSN_CFI notes. Add the CFIs to the
2366 function's FDE, adding CFI labels and set_loc/advance_loc opcodes as
2369 add_cfis_to_fde (void)
2371 dw_fde_ref fde = cfun->fde;
2373 /* We always start with a function_begin label. */
2376 for (insn = get_insns (); insn; insn = next)
2378 next = NEXT_INSN (insn);
2380 if (NOTE_P (insn) && NOTE_KIND (insn) == NOTE_INSN_SWITCH_TEXT_SECTIONS)
2382 /* Don't attempt to advance_loc4 between labels
2383 in different sections. */
2387 if (NOTE_P (insn) && NOTE_KIND (insn) == NOTE_INSN_CFI)
2389 bool required = cfi_label_required_p (NOTE_CFI (insn));
2390 while (next && NOTE_P (next) && NOTE_KIND (next) == NOTE_INSN_CFI)
2392 required |= cfi_label_required_p (NOTE_CFI (next));
2393 next = NEXT_INSN (next);
2397 int num = dwarf2out_cfi_label_num;
2398 const char *label = dwarf2out_cfi_label ();
2402 /* Set the location counter to the new label. */
2404 xcfi->dw_cfi_opc = (first ? DW_CFA_set_loc
2405 : DW_CFA_advance_loc4);
2406 xcfi->dw_cfi_oprnd1.dw_cfi_addr = label;
2407 VEC_safe_push (dw_cfi_ref, gc, fde->dw_fde_cfi, xcfi);
2409 tmp = emit_note_before (NOTE_INSN_CFI_LABEL, insn);
2410 NOTE_LABEL_NUMBER (tmp) = num;
2415 VEC_safe_push (dw_cfi_ref, gc, fde->dw_fde_cfi, NOTE_CFI (insn));
2416 insn = NEXT_INSN (insn);
2418 while (insn != next);
2424 /* Scan the function and create the initial set of CFI notes. */
2427 create_cfi_notes (void)
2431 for (insn = get_insns (); insn ; insn = NEXT_INSN (insn))
2435 if (BARRIER_P (insn))
2437 dwarf2out_frame_debug (insn, false);
2443 switch (NOTE_KIND (insn))
2445 case NOTE_INSN_EPILOGUE_BEG:
2446 #if defined(HAVE_epilogue)
2447 dwarf2out_cfi_begin_epilogue (insn);
2450 case NOTE_INSN_CFA_RESTORE_STATE:
2452 dwarf2out_frame_debug_restore_state ();
2459 if (!NONDEBUG_INSN_P (insn))
2462 pat = PATTERN (insn);
2463 if (asm_noperands (pat) >= 0)
2465 dwarf2out_frame_debug (insn, false);
2469 if (GET_CODE (pat) == SEQUENCE)
2471 int i, n = XVECLEN (pat, 0);
2472 for (i = 1; i < n; ++i)
2473 dwarf2out_frame_debug (XVECEXP (pat, 0, i), false);
2477 || find_reg_note (insn, REG_CFA_FLUSH_QUEUE, NULL))
2478 dwarf2out_frame_debug (insn, false);
2480 dwarf2out_frame_debug (insn, true);
2484 /* Determine if we need to save and restore CFI information around this
2485 epilogue. If SIBCALL is true, then this is a sibcall epilogue. If
2486 we do need to save/restore, then emit the save now, and insert a
2487 NOTE_INSN_CFA_RESTORE_STATE at the appropriate place in the stream. */
2490 dwarf2out_cfi_begin_epilogue (rtx insn)
2492 bool saw_frp = false;
2495 /* Scan forward to the return insn, noticing if there are possible
2496 frame related insns. */
2497 for (i = NEXT_INSN (insn); i ; i = NEXT_INSN (i))
2502 /* Look for both regular and sibcalls to end the block. */
2503 if (returnjump_p (i))
2505 if (CALL_P (i) && SIBLING_CALL_P (i))
2508 if (GET_CODE (PATTERN (i)) == SEQUENCE)
2511 rtx seq = PATTERN (i);
2513 if (returnjump_p (XVECEXP (seq, 0, 0)))
2515 if (CALL_P (XVECEXP (seq, 0, 0))
2516 && SIBLING_CALL_P (XVECEXP (seq, 0, 0)))
2519 for (idx = 0; idx < XVECLEN (seq, 0); idx++)
2520 if (RTX_FRAME_RELATED_P (XVECEXP (seq, 0, idx)))
2524 if (RTX_FRAME_RELATED_P (i))
2528 /* If the port doesn't emit epilogue unwind info, we don't need a
2529 save/restore pair. */
2533 /* Otherwise, search forward to see if the return insn was the last
2534 basic block of the function. If so, we don't need save/restore. */
2535 gcc_assert (i != NULL);
2536 i = next_real_insn (i);
2540 /* Insert the restore before that next real insn in the stream, and before
2541 a potential NOTE_INSN_EPILOGUE_BEG -- we do need these notes to be
2542 properly nested. This should be after any label or alignment. This
2543 will be pushed into the CFI stream by the function below. */
2546 rtx p = PREV_INSN (i);
2549 if (NOTE_KIND (p) == NOTE_INSN_BASIC_BLOCK)
2553 emit_note_before (NOTE_INSN_CFA_RESTORE_STATE, i);
2555 emit_cfa_remember = true;
2557 /* And emulate the state save. */
2558 gcc_assert (!cfa_remember.in_use);
2560 old_cfa_remember = old_cfa;
2561 cfa_remember.in_use = 1;
2564 /* A "subroutine" of dwarf2out_cfi_begin_epilogue. Emit the restore
2568 dwarf2out_frame_debug_restore_state (void)
2570 dw_cfi_ref cfi = new_cfi ();
2572 cfi->dw_cfi_opc = DW_CFA_restore_state;
2575 gcc_assert (cfa_remember.in_use);
2577 old_cfa = old_cfa_remember;
2578 cfa_remember.in_use = 0;
2582 /* Annotate the function with NOTE_INSN_CFI notes to record the CFI
2583 state at each location within the function. These notes will be
2584 emitted during pass_final. */
2587 execute_dwarf2_frame (void)
2589 /* The first time we're called, compute the incoming frame state. */
2590 if (cie_cfi_vec == NULL)
2592 dw_cfa_location loc;
2594 memset(&old_cfa, 0, sizeof (old_cfa));
2595 old_cfa.reg = INVALID_REGNUM;
2597 /* On entry, the Canonical Frame Address is at SP. */
2598 memset(&loc, 0, sizeof (loc));
2599 loc.reg = STACK_POINTER_REGNUM;
2600 loc.offset = INCOMING_FRAME_SP_OFFSET;
2601 def_cfa_1 (true, &loc);
2603 if (targetm.debug_unwind_info () == UI_DWARF2
2604 || targetm_common.except_unwind_info (&global_options) == UI_DWARF2)
2605 initial_return_save (INCOMING_RETURN_ADDR_RTX);
2608 /* Set up state for generating call frame debug info. */
2611 == (unsigned long)DWARF_FRAME_REGNUM (STACK_POINTER_REGNUM));
2614 cfa.reg = STACK_POINTER_REGNUM;
2617 cfa_temp.offset = 0;
2619 dwarf2out_alloc_current_fde ();
2622 create_cfi_notes ();
2625 /* Reset all function-specific information, particularly for GC. */
2626 XDELETEVEC (barrier_args_size);
2627 barrier_args_size = NULL;
2628 regs_saved_in_regs = NULL;
2629 queued_reg_saves = NULL;
2630 args_size = old_args_size = 0;
2636 /* Save the result of dwarf2out_do_frame across PCH.
2637 This variable is tri-state, with 0 unset, >0 true, <0 false. */
2638 static GTY(()) signed char saved_do_cfi_asm = 0;
2640 /* Decide whether we want to emit frame unwind information for the current
2641 translation unit. */
2644 dwarf2out_do_frame (void)
2646 /* We want to emit correct CFA location expressions or lists, so we
2647 have to return true if we're going to output debug info, even if
2648 we're not going to output frame or unwind info. */
2649 if (write_symbols == DWARF2_DEBUG || write_symbols == VMS_AND_DWARF2_DEBUG)
2652 if (saved_do_cfi_asm > 0)
2655 if (targetm.debug_unwind_info () == UI_DWARF2)
2658 if ((flag_unwind_tables || flag_exceptions)
2659 && targetm_common.except_unwind_info (&global_options) == UI_DWARF2)
2665 /* Decide whether to emit frame unwind via assembler directives. */
2668 dwarf2out_do_cfi_asm (void)
2672 #ifdef MIPS_DEBUGGING_INFO
2676 if (saved_do_cfi_asm != 0)
2677 return saved_do_cfi_asm > 0;
2679 /* Assume failure for a moment. */
2680 saved_do_cfi_asm = -1;
2682 if (!flag_dwarf2_cfi_asm || !dwarf2out_do_frame ())
2684 if (!HAVE_GAS_CFI_PERSONALITY_DIRECTIVE)
2687 /* Make sure the personality encoding is one the assembler can support.
2688 In particular, aligned addresses can't be handled. */
2689 enc = ASM_PREFERRED_EH_DATA_FORMAT (/*code=*/2,/*global=*/1);
2690 if ((enc & 0x70) != 0 && (enc & 0x70) != DW_EH_PE_pcrel)
2692 enc = ASM_PREFERRED_EH_DATA_FORMAT (/*code=*/0,/*global=*/0);
2693 if ((enc & 0x70) != 0 && (enc & 0x70) != DW_EH_PE_pcrel)
2696 /* If we can't get the assembler to emit only .debug_frame, and we don't need
2697 dwarf2 unwind info for exceptions, then emit .debug_frame by hand. */
2698 if (!HAVE_GAS_CFI_SECTIONS_DIRECTIVE
2699 && !flag_unwind_tables && !flag_exceptions
2700 && targetm_common.except_unwind_info (&global_options) != UI_DWARF2)
2704 saved_do_cfi_asm = 1;
2709 gate_dwarf2_frame (void)
2711 #ifndef HAVE_prologue
2712 /* Targets which still implement the prologue in assembler text
2713 cannot use the generic dwarf2 unwinding. */
2717 /* ??? What to do for UI_TARGET unwinding? They might be able to benefit
2718 from the optimized shrink-wrapping annotations that we will compute.
2719 For now, only produce the CFI notes for dwarf2. */
2720 return dwarf2out_do_frame ();
2723 struct rtl_opt_pass pass_dwarf2_frame =
2727 "dwarf2", /* name */
2728 gate_dwarf2_frame, /* gate */
2729 execute_dwarf2_frame, /* execute */
2732 0, /* static_pass_number */
2733 TV_FINAL, /* tv_id */
2734 0, /* properties_required */
2735 0, /* properties_provided */
2736 0, /* properties_destroyed */
2737 0, /* todo_flags_start */
2738 0 /* todo_flags_finish */
2742 #include "gt-dwarf2cfi.h"