1 /* Dwarf2 Call Frame Information helper routines.
2 Copyright (C) 1992, 1993, 1995, 1996, 1997, 1998, 1999, 2000, 2001, 2002,
3 2003, 2004, 2005, 2006, 2007, 2008, 2009, 2010, 2011
4 Free Software Foundation, Inc.
6 This file is part of GCC.
8 GCC is free software; you can redistribute it and/or modify it under
9 the terms of the GNU General Public License as published by the Free
10 Software Foundation; either version 3, or (at your option) any later
13 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
14 WARRANTY; without even the implied warranty of MERCHANTABILITY or
15 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
18 You should have received a copy of the GNU General Public License
19 along with GCC; see the file COPYING3. If not see
20 <http://www.gnu.org/licenses/>. */
24 #include "coretypes.h"
31 #include "dwarf2out.h"
32 #include "dwarf2asm.h"
36 #include "common/common-target.h"
37 #include "tree-pass.h"
39 #include "except.h" /* expand_builtin_dwarf_sp_column */
40 #include "expr.h" /* init_return_column_size */
41 #include "regs.h" /* expand_builtin_init_dwarf_reg_sizes */
42 #include "output.h" /* asm_out_file */
43 #include "debug.h" /* dwarf2out_do_frame, dwarf2out_do_cfi_asm */
46 /* ??? Poison these here until it can be done generically. They've been
47 totally replaced in this file; make sure it stays that way. */
48 #undef DWARF2_UNWIND_INFO
49 #undef DWARF2_FRAME_INFO
50 #if (GCC_VERSION >= 3000)
51 #pragma GCC poison DWARF2_UNWIND_INFO DWARF2_FRAME_INFO
54 #ifndef INCOMING_RETURN_ADDR_RTX
55 #define INCOMING_RETURN_ADDR_RTX (gcc_unreachable (), NULL_RTX)
58 /* Maximum size (in bytes) of an artificially generated label. */
59 #define MAX_ARTIFICIAL_LABEL_BYTES 30
61 /* A collected description of an entire row of the abstract CFI table. */
62 typedef struct GTY(()) dw_cfi_row_struct
64 /* The expression that computes the CFA, expressed in two different ways.
65 The CFA member for the simple cases, and the full CFI expression for
66 the complex cases. The later will be a DW_CFA_cfa_expression. */
70 /* The expressions for any register column that is saved. */
73 /* The value of any DW_CFA_GNU_args_size. */
74 HOST_WIDE_INT args_size;
77 typedef dw_cfi_row *dw_cfi_row_ref;
79 /* A vector of call frame insns for the CIE. */
82 /* The state of the first row of the FDE table, which includes the
83 state provided by the CIE. */
84 static GTY(()) dw_cfi_row_ref cie_cfi_row;
86 static GTY(()) unsigned long dwarf2out_cfi_label_num;
88 /* The insn after which a new CFI note should be emitted. */
89 static rtx add_cfi_insn;
91 /* When non-null, add_cfi will add the CFI to this vector. */
92 static cfi_vec *add_cfi_vec;
94 /* True if remember_state should be emitted before following CFI directive. */
95 static bool emit_cfa_remember;
97 /* True if any CFI directives were emitted at the current insn. */
98 static bool any_cfis_emitted;
100 /* Short-hand for commonly used register numbers. */
101 static unsigned dw_stack_pointer_regnum;
102 static unsigned dw_frame_pointer_regnum;
105 static void dwarf2out_cfi_begin_epilogue (rtx insn);
106 static void dwarf2out_frame_debug_restore_state (void);
109 /* Hook used by __throw. */
112 expand_builtin_dwarf_sp_column (void)
114 unsigned int dwarf_regnum = DWARF_FRAME_REGNUM (STACK_POINTER_REGNUM);
115 return GEN_INT (DWARF2_FRAME_REG_OUT (dwarf_regnum, 1));
118 /* MEM is a memory reference for the register size table, each element of
119 which has mode MODE. Initialize column C as a return address column. */
122 init_return_column_size (enum machine_mode mode, rtx mem, unsigned int c)
124 HOST_WIDE_INT offset = c * GET_MODE_SIZE (mode);
125 HOST_WIDE_INT size = GET_MODE_SIZE (Pmode);
126 emit_move_insn (adjust_address (mem, mode, offset), GEN_INT (size));
129 /* Generate code to initialize the register size table. */
132 expand_builtin_init_dwarf_reg_sizes (tree address)
135 enum machine_mode mode = TYPE_MODE (char_type_node);
136 rtx addr = expand_normal (address);
137 rtx mem = gen_rtx_MEM (BLKmode, addr);
138 bool wrote_return_column = false;
140 for (i = 0; i < FIRST_PSEUDO_REGISTER; i++)
142 unsigned int dnum = DWARF_FRAME_REGNUM (i);
143 unsigned int rnum = DWARF2_FRAME_REG_OUT (dnum, 1);
145 if (rnum < DWARF_FRAME_REGISTERS)
147 HOST_WIDE_INT offset = rnum * GET_MODE_SIZE (mode);
148 enum machine_mode save_mode = reg_raw_mode[i];
151 if (HARD_REGNO_CALL_PART_CLOBBERED (i, save_mode))
152 save_mode = choose_hard_reg_mode (i, 1, true);
153 if (dnum == DWARF_FRAME_RETURN_COLUMN)
155 if (save_mode == VOIDmode)
157 wrote_return_column = true;
159 size = GET_MODE_SIZE (save_mode);
163 emit_move_insn (adjust_address (mem, mode, offset),
164 gen_int_mode (size, mode));
168 if (!wrote_return_column)
169 init_return_column_size (mode, mem, DWARF_FRAME_RETURN_COLUMN);
171 #ifdef DWARF_ALT_FRAME_RETURN_COLUMN
172 init_return_column_size (mode, mem, DWARF_ALT_FRAME_RETURN_COLUMN);
175 targetm.init_dwarf_reg_sizes_extra (address);
178 /* Divide OFF by DWARF_CIE_DATA_ALIGNMENT, asserting no remainder. */
180 static inline HOST_WIDE_INT
181 div_data_align (HOST_WIDE_INT off)
183 HOST_WIDE_INT r = off / DWARF_CIE_DATA_ALIGNMENT;
184 gcc_assert (r * DWARF_CIE_DATA_ALIGNMENT == off);
188 /* Return true if we need a signed version of a given opcode
189 (e.g. DW_CFA_offset_extended_sf vs DW_CFA_offset_extended). */
192 need_data_align_sf_opcode (HOST_WIDE_INT off)
194 return DWARF_CIE_DATA_ALIGNMENT < 0 ? off > 0 : off < 0;
197 /* Return a pointer to a newly allocated Call Frame Instruction. */
199 static inline dw_cfi_ref
202 dw_cfi_ref cfi = ggc_alloc_dw_cfi_node ();
204 cfi->dw_cfi_oprnd1.dw_cfi_reg_num = 0;
205 cfi->dw_cfi_oprnd2.dw_cfi_reg_num = 0;
210 /* Return a newly allocated CFI row, with no defined data. */
212 static dw_cfi_row_ref
215 dw_cfi_row_ref row = ggc_alloc_cleared_dw_cfi_row ();
217 row->cfa.reg = INVALID_REGNUM;
222 /* Return a copy of an existing CFI row. */
224 static dw_cfi_row_ref
225 copy_cfi_row (dw_cfi_row_ref src)
227 dw_cfi_row_ref dst = ggc_alloc_dw_cfi_row ();
230 dst->reg_save = VEC_copy (dw_cfi_ref, gc, src->reg_save);
235 /* Free an allocated CFI row. */
238 free_cfi_row (dw_cfi_row_ref row)
242 VEC_free (dw_cfi_ref, gc, row->reg_save);
247 /* Generate a new label for the CFI info to refer to. */
250 dwarf2out_cfi_label (void)
252 int num = dwarf2out_cfi_label_num++;
255 ASM_GENERATE_INTERNAL_LABEL (label, "LCFI", num);
257 return xstrdup (label);
260 /* Add CFI either to the current insn stream or to a vector, or both. */
263 add_cfi (dw_cfi_ref cfi)
265 if (emit_cfa_remember)
267 dw_cfi_ref cfi_remember;
269 /* Emit the state save. */
270 emit_cfa_remember = false;
271 cfi_remember = new_cfi ();
272 cfi_remember->dw_cfi_opc = DW_CFA_remember_state;
273 add_cfi (cfi_remember);
276 any_cfis_emitted = true;
278 if (add_cfi_insn != NULL)
280 add_cfi_insn = emit_note_after (NOTE_INSN_CFI, add_cfi_insn);
281 NOTE_CFI (add_cfi_insn) = cfi;
284 if (add_cfi_vec != NULL)
285 VEC_safe_push (dw_cfi_ref, gc, *add_cfi_vec, cfi);
288 /* Perform ROW->REG_SAVE[COLUMN] = CFI. CFI may be null, indicating
289 that the register column is no longer saved. */
292 update_row_reg_save (dw_cfi_row_ref row, unsigned column, dw_cfi_ref cfi)
294 if (VEC_length (dw_cfi_ref, row->reg_save) <= column)
295 VEC_safe_grow_cleared (dw_cfi_ref, gc, row->reg_save, column + 1);
296 VEC_replace (dw_cfi_ref, row->reg_save, column, cfi);
299 /* This function fills in aa dw_cfa_location structure from a dwarf location
300 descriptor sequence. */
303 get_cfa_from_loc_descr (dw_cfa_location *cfa, struct dw_loc_descr_struct *loc)
305 struct dw_loc_descr_struct *ptr;
307 cfa->base_offset = 0;
311 for (ptr = loc; ptr != NULL; ptr = ptr->dw_loc_next)
313 enum dwarf_location_atom op = ptr->dw_loc_opc;
349 cfa->reg = op - DW_OP_reg0;
352 cfa->reg = ptr->dw_loc_oprnd1.v.val_int;
386 cfa->reg = op - DW_OP_breg0;
387 cfa->base_offset = ptr->dw_loc_oprnd1.v.val_int;
390 cfa->reg = ptr->dw_loc_oprnd1.v.val_int;
391 cfa->base_offset = ptr->dw_loc_oprnd2.v.val_int;
396 case DW_OP_plus_uconst:
397 cfa->offset = ptr->dw_loc_oprnd1.v.val_unsigned;
405 /* Find the previous value for the CFA, iteratively. CFI is the opcode
406 to interpret, *LOC will be updated as necessary, *REMEMBER is used for
407 one level of remember/restore state processing. */
410 lookup_cfa_1 (dw_cfi_ref cfi, dw_cfa_location *loc, dw_cfa_location *remember)
412 switch (cfi->dw_cfi_opc)
414 case DW_CFA_def_cfa_offset:
415 case DW_CFA_def_cfa_offset_sf:
416 loc->offset = cfi->dw_cfi_oprnd1.dw_cfi_offset;
418 case DW_CFA_def_cfa_register:
419 loc->reg = cfi->dw_cfi_oprnd1.dw_cfi_reg_num;
422 case DW_CFA_def_cfa_sf:
423 loc->reg = cfi->dw_cfi_oprnd1.dw_cfi_reg_num;
424 loc->offset = cfi->dw_cfi_oprnd2.dw_cfi_offset;
426 case DW_CFA_def_cfa_expression:
427 get_cfa_from_loc_descr (loc, cfi->dw_cfi_oprnd1.dw_cfi_loc);
430 case DW_CFA_remember_state:
431 gcc_assert (!remember->in_use);
433 remember->in_use = 1;
435 case DW_CFA_restore_state:
436 gcc_assert (remember->in_use);
438 remember->in_use = 0;
446 /* The current, i.e. most recently generated, row of the CFI table. */
447 static dw_cfi_row_ref cur_row;
449 /* The row state from a preceeding DW_CFA_remember_state. */
450 static dw_cfi_row_ref remember_row;
452 /* The register used for saving registers to the stack, and its offset
454 static dw_cfa_location cfa_store;
456 /* A temporary register holding an integral value used in adjusting SP
457 or setting up the store_reg. The "offset" field holds the integer
458 value, not an offset. */
459 static dw_cfa_location cfa_temp;
461 /* The (really) current value for DW_CFA_GNU_args_size. We delay actually
462 emitting this data, i.e. updating CUR_ROW, without async unwind. */
463 static HOST_WIDE_INT args_size;
465 /* Determine if two dw_cfa_location structures define the same data. */
468 cfa_equal_p (const dw_cfa_location *loc1, const dw_cfa_location *loc2)
470 return (loc1->reg == loc2->reg
471 && loc1->offset == loc2->offset
472 && loc1->indirect == loc2->indirect
473 && (loc1->indirect == 0
474 || loc1->base_offset == loc2->base_offset));
477 /* This routine does the actual work. The CFA is now calculated from
478 the dw_cfa_location structure. */
481 def_cfa_1 (dw_cfa_location *loc_p)
484 dw_cfa_location loc = *loc_p;
486 if (cfa_store.reg == loc.reg && loc.indirect == 0)
487 cfa_store.offset = loc.offset;
489 /* If nothing changed, no need to issue any call frame instructions. */
490 if (cfa_equal_p (&loc, &cur_row->cfa))
495 if (loc.reg == cur_row->cfa.reg && !loc.indirect && !cur_row->cfa.indirect)
497 /* Construct a "DW_CFA_def_cfa_offset <offset>" instruction, indicating
498 the CFA register did not change but the offset did. The data
499 factoring for DW_CFA_def_cfa_offset_sf happens in output_cfi, or
500 in the assembler via the .cfi_def_cfa_offset directive. */
502 cfi->dw_cfi_opc = DW_CFA_def_cfa_offset_sf;
504 cfi->dw_cfi_opc = DW_CFA_def_cfa_offset;
505 cfi->dw_cfi_oprnd1.dw_cfi_offset = loc.offset;
508 #ifndef MIPS_DEBUGGING_INFO /* SGI dbx thinks this means no offset. */
509 else if (loc.offset == cur_row->cfa.offset
510 && cur_row->cfa.reg != INVALID_REGNUM
512 && !cur_row->cfa.indirect)
514 /* Construct a "DW_CFA_def_cfa_register <register>" instruction,
515 indicating the CFA register has changed to <register> but the
516 offset has not changed. */
517 cfi->dw_cfi_opc = DW_CFA_def_cfa_register;
518 cfi->dw_cfi_oprnd1.dw_cfi_reg_num = loc.reg;
522 else if (loc.indirect == 0)
524 /* Construct a "DW_CFA_def_cfa <register> <offset>" instruction,
525 indicating the CFA register has changed to <register> with
526 the specified offset. The data factoring for DW_CFA_def_cfa_sf
527 happens in output_cfi, or in the assembler via the .cfi_def_cfa
530 cfi->dw_cfi_opc = DW_CFA_def_cfa_sf;
532 cfi->dw_cfi_opc = DW_CFA_def_cfa;
533 cfi->dw_cfi_oprnd1.dw_cfi_reg_num = loc.reg;
534 cfi->dw_cfi_oprnd2.dw_cfi_offset = loc.offset;
538 /* Construct a DW_CFA_def_cfa_expression instruction to
539 calculate the CFA using a full location expression since no
540 register-offset pair is available. */
541 struct dw_loc_descr_struct *loc_list;
543 cfi->dw_cfi_opc = DW_CFA_def_cfa_expression;
544 loc_list = build_cfa_loc (&loc, 0);
545 cfi->dw_cfi_oprnd1.dw_cfi_loc = loc_list;
547 cur_row->cfa_cfi = cfi;
554 /* Add the CFI for saving a register. REG is the CFA column number.
555 If SREG is -1, the register is saved at OFFSET from the CFA;
556 otherwise it is saved in SREG. */
559 reg_save (unsigned int reg, unsigned int sreg, HOST_WIDE_INT offset)
561 dw_fde_ref fde = cfun ? cfun->fde : NULL;
562 dw_cfi_ref cfi = new_cfi ();
564 cfi->dw_cfi_oprnd1.dw_cfi_reg_num = reg;
566 /* When stack is aligned, store REG using DW_CFA_expression with FP. */
568 && fde->stack_realign
569 && sreg == INVALID_REGNUM)
571 cfi->dw_cfi_opc = DW_CFA_expression;
572 cfi->dw_cfi_oprnd1.dw_cfi_reg_num = reg;
573 cfi->dw_cfi_oprnd2.dw_cfi_loc
574 = build_cfa_aligned_loc (&cur_row->cfa, offset,
575 fde->stack_realignment);
577 else if (sreg == INVALID_REGNUM)
579 if (need_data_align_sf_opcode (offset))
580 cfi->dw_cfi_opc = DW_CFA_offset_extended_sf;
581 else if (reg & ~0x3f)
582 cfi->dw_cfi_opc = DW_CFA_offset_extended;
584 cfi->dw_cfi_opc = DW_CFA_offset;
585 cfi->dw_cfi_oprnd2.dw_cfi_offset = offset;
587 else if (sreg == reg)
589 /* While we could emit something like DW_CFA_same_value or
590 DW_CFA_restore, we never expect to see something like that
591 in a prologue. This is more likely to be a bug. A backend
592 can always bypass this by using REG_CFA_RESTORE directly. */
597 cfi->dw_cfi_opc = DW_CFA_register;
598 cfi->dw_cfi_oprnd2.dw_cfi_reg_num = sreg;
602 update_row_reg_save (cur_row, reg, cfi);
605 /* Given a SET, calculate the amount of stack adjustment it
609 stack_adjust_offset (const_rtx pattern, HOST_WIDE_INT cur_args_size,
610 HOST_WIDE_INT cur_offset)
612 const_rtx src = SET_SRC (pattern);
613 const_rtx dest = SET_DEST (pattern);
614 HOST_WIDE_INT offset = 0;
617 if (dest == stack_pointer_rtx)
619 code = GET_CODE (src);
621 /* Assume (set (reg sp) (reg whatever)) sets args_size
623 if (code == REG && src != stack_pointer_rtx)
625 offset = -cur_args_size;
626 #ifndef STACK_GROWS_DOWNWARD
629 return offset - cur_offset;
632 if (! (code == PLUS || code == MINUS)
633 || XEXP (src, 0) != stack_pointer_rtx
634 || !CONST_INT_P (XEXP (src, 1)))
637 /* (set (reg sp) (plus (reg sp) (const_int))) */
638 offset = INTVAL (XEXP (src, 1));
644 if (MEM_P (src) && !MEM_P (dest))
648 /* (set (mem (pre_dec (reg sp))) (foo)) */
649 src = XEXP (dest, 0);
650 code = GET_CODE (src);
656 if (XEXP (src, 0) == stack_pointer_rtx)
658 rtx val = XEXP (XEXP (src, 1), 1);
659 /* We handle only adjustments by constant amount. */
660 gcc_assert (GET_CODE (XEXP (src, 1)) == PLUS
661 && CONST_INT_P (val));
662 offset = -INTVAL (val);
669 if (XEXP (src, 0) == stack_pointer_rtx)
671 offset = GET_MODE_SIZE (GET_MODE (dest));
678 if (XEXP (src, 0) == stack_pointer_rtx)
680 offset = -GET_MODE_SIZE (GET_MODE (dest));
695 /* Precomputed args_size for CODE_LABELs and BARRIERs preceeding them,
696 indexed by INSN_UID. */
698 static HOST_WIDE_INT *barrier_args_size;
700 /* Helper function for compute_barrier_args_size. Handle one insn. */
703 compute_barrier_args_size_1 (rtx insn, HOST_WIDE_INT cur_args_size,
704 VEC (rtx, heap) **next)
706 HOST_WIDE_INT offset = 0;
709 if (! RTX_FRAME_RELATED_P (insn))
711 if (prologue_epilogue_contains (insn))
713 else if (GET_CODE (PATTERN (insn)) == SET)
714 offset = stack_adjust_offset (PATTERN (insn), cur_args_size, 0);
715 else if (GET_CODE (PATTERN (insn)) == PARALLEL
716 || GET_CODE (PATTERN (insn)) == SEQUENCE)
718 /* There may be stack adjustments inside compound insns. Search
720 for (i = XVECLEN (PATTERN (insn), 0) - 1; i >= 0; i--)
721 if (GET_CODE (XVECEXP (PATTERN (insn), 0, i)) == SET)
722 offset += stack_adjust_offset (XVECEXP (PATTERN (insn), 0, i),
723 cur_args_size, offset);
728 rtx expr = find_reg_note (insn, REG_FRAME_RELATED_EXPR, NULL_RTX);
732 expr = XEXP (expr, 0);
733 if (GET_CODE (expr) == PARALLEL
734 || GET_CODE (expr) == SEQUENCE)
735 for (i = 1; i < XVECLEN (expr, 0); i++)
737 rtx elem = XVECEXP (expr, 0, i);
739 if (GET_CODE (elem) == SET && !RTX_FRAME_RELATED_P (elem))
740 offset += stack_adjust_offset (elem, cur_args_size, offset);
745 #ifndef STACK_GROWS_DOWNWARD
749 cur_args_size += offset;
750 if (cur_args_size < 0)
755 rtx dest = JUMP_LABEL (insn);
759 if (barrier_args_size [INSN_UID (dest)] < 0)
761 barrier_args_size [INSN_UID (dest)] = cur_args_size;
762 VEC_safe_push (rtx, heap, *next, dest);
767 return cur_args_size;
770 /* Walk the whole function and compute args_size on BARRIERs. */
773 compute_barrier_args_size (void)
775 int max_uid = get_max_uid (), i;
777 VEC (rtx, heap) *worklist, *next, *tmp;
779 barrier_args_size = XNEWVEC (HOST_WIDE_INT, max_uid);
780 for (i = 0; i < max_uid; i++)
781 barrier_args_size[i] = -1;
783 worklist = VEC_alloc (rtx, heap, 20);
784 next = VEC_alloc (rtx, heap, 20);
786 barrier_args_size[INSN_UID (insn)] = 0;
787 VEC_quick_push (rtx, worklist, insn);
790 while (!VEC_empty (rtx, worklist))
792 rtx prev, body, first_insn;
793 HOST_WIDE_INT cur_args_size;
795 first_insn = insn = VEC_pop (rtx, worklist);
796 cur_args_size = barrier_args_size[INSN_UID (insn)];
797 prev = prev_nonnote_insn (insn);
798 if (prev && BARRIER_P (prev))
799 barrier_args_size[INSN_UID (prev)] = cur_args_size;
801 for (; insn; insn = NEXT_INSN (insn))
803 if (INSN_DELETED_P (insn) || NOTE_P (insn))
805 if (BARRIER_P (insn))
810 if (insn == first_insn)
812 else if (barrier_args_size[INSN_UID (insn)] < 0)
814 barrier_args_size[INSN_UID (insn)] = cur_args_size;
819 /* The insns starting with this label have been
820 already scanned or are in the worklist. */
825 body = PATTERN (insn);
826 if (GET_CODE (body) == SEQUENCE)
828 HOST_WIDE_INT dest_args_size = cur_args_size;
829 for (i = 1; i < XVECLEN (body, 0); i++)
830 if (INSN_ANNULLED_BRANCH_P (XVECEXP (body, 0, 0))
831 && INSN_FROM_TARGET_P (XVECEXP (body, 0, i)))
833 = compute_barrier_args_size_1 (XVECEXP (body, 0, i),
834 dest_args_size, &next);
837 = compute_barrier_args_size_1 (XVECEXP (body, 0, i),
838 cur_args_size, &next);
840 if (INSN_ANNULLED_BRANCH_P (XVECEXP (body, 0, 0)))
841 compute_barrier_args_size_1 (XVECEXP (body, 0, 0),
842 dest_args_size, &next);
845 = compute_barrier_args_size_1 (XVECEXP (body, 0, 0),
846 cur_args_size, &next);
850 = compute_barrier_args_size_1 (insn, cur_args_size, &next);
854 if (VEC_empty (rtx, next))
857 /* Swap WORKLIST with NEXT and truncate NEXT for next iteration. */
861 VEC_truncate (rtx, next, 0);
864 VEC_free (rtx, heap, worklist);
865 VEC_free (rtx, heap, next);
868 /* Add a CFI to update the running total of the size of arguments
869 pushed onto the stack. */
872 dwarf2out_args_size (HOST_WIDE_INT size)
876 if (size == cur_row->args_size)
879 cur_row->args_size = size;
882 cfi->dw_cfi_opc = DW_CFA_GNU_args_size;
883 cfi->dw_cfi_oprnd1.dw_cfi_offset = size;
887 /* Record a stack adjustment of OFFSET bytes. */
890 dwarf2out_stack_adjust (HOST_WIDE_INT offset)
892 dw_cfa_location loc = cur_row->cfa;
894 if (loc.reg == dw_stack_pointer_regnum)
895 loc.offset += offset;
897 if (cfa_store.reg == dw_stack_pointer_regnum)
898 cfa_store.offset += offset;
900 /* ??? The assumption seems to be that if A_O_A, the only CFA adjustments
901 involving the stack pointer are inside the prologue and marked as
902 RTX_FRAME_RELATED_P. That said, should we not verify this assumption
903 by *asserting* A_O_A at this point? Why else would we have a change
904 to the stack pointer? */
905 if (ACCUMULATE_OUTGOING_ARGS)
908 #ifndef STACK_GROWS_DOWNWARD
917 if (flag_asynchronous_unwind_tables)
918 dwarf2out_args_size (args_size);
921 /* Check INSN to see if it looks like a push or a stack adjustment, and
922 make a note of it if it does. EH uses this information to find out
923 how much extra space it needs to pop off the stack. */
926 dwarf2out_notice_stack_adjust (rtx insn, bool after_p)
928 HOST_WIDE_INT offset;
931 /* Don't handle epilogues at all. Certainly it would be wrong to do so
932 with this function. Proper support would require all frame-related
933 insns to be marked, and to be able to handle saving state around
934 epilogues textually in the middle of the function. */
935 if (prologue_epilogue_contains (insn))
938 /* If INSN is an instruction from target of an annulled branch, the
939 effects are for the target only and so current argument size
940 shouldn't change at all. */
942 && INSN_ANNULLED_BRANCH_P (XVECEXP (final_sequence, 0, 0))
943 && INSN_FROM_TARGET_P (insn))
946 /* If only calls can throw, and we have a frame pointer,
947 save up adjustments until we see the CALL_INSN. */
948 if (!flag_asynchronous_unwind_tables
949 && cur_row->cfa.reg != dw_stack_pointer_regnum)
951 if (CALL_P (insn) && !after_p)
953 /* Extract the size of the args from the CALL rtx itself. */
954 insn = PATTERN (insn);
955 if (GET_CODE (insn) == PARALLEL)
956 insn = XVECEXP (insn, 0, 0);
957 if (GET_CODE (insn) == SET)
958 insn = SET_SRC (insn);
959 gcc_assert (GET_CODE (insn) == CALL);
960 dwarf2out_args_size (INTVAL (XEXP (insn, 1)));
965 if (CALL_P (insn) && !after_p)
967 if (!flag_asynchronous_unwind_tables)
968 dwarf2out_args_size (args_size);
971 else if (BARRIER_P (insn))
973 /* Don't call compute_barrier_args_size () if the only
974 BARRIER is at the end of function. */
975 if (barrier_args_size == NULL && next_nonnote_insn (insn))
976 compute_barrier_args_size ();
977 if (barrier_args_size == NULL)
981 offset = barrier_args_size[INSN_UID (insn)];
987 #ifndef STACK_GROWS_DOWNWARD
991 else if (GET_CODE (PATTERN (insn)) == SET)
992 offset = stack_adjust_offset (PATTERN (insn), args_size, 0);
993 else if (GET_CODE (PATTERN (insn)) == PARALLEL
994 || GET_CODE (PATTERN (insn)) == SEQUENCE)
996 /* There may be stack adjustments inside compound insns. Search
998 for (offset = 0, i = XVECLEN (PATTERN (insn), 0) - 1; i >= 0; i--)
999 if (GET_CODE (XVECEXP (PATTERN (insn), 0, i)) == SET)
1000 offset += stack_adjust_offset (XVECEXP (PATTERN (insn), 0, i),
1009 dwarf2out_stack_adjust (offset);
1012 /* We delay emitting a register save until either (a) we reach the end
1013 of the prologue or (b) the register is clobbered. This clusters
1014 register saves so that there are fewer pc advances. */
1016 struct GTY(()) queued_reg_save {
1017 struct queued_reg_save *next;
1019 HOST_WIDE_INT cfa_offset;
1023 static GTY(()) struct queued_reg_save *queued_reg_saves;
1025 /* The caller's ORIG_REG is saved in SAVED_IN_REG. */
1026 typedef struct GTY(()) reg_saved_in_data {
1029 } reg_saved_in_data;
1031 DEF_VEC_O (reg_saved_in_data);
1032 DEF_VEC_ALLOC_O (reg_saved_in_data, gc);
1034 /* A set of registers saved in other registers. This is implemented as
1035 a flat array because it normally contains zero or 1 entry, depending
1036 on the target. IA-64 is the big spender here, using a maximum of
1038 static GTY(()) VEC(reg_saved_in_data, gc) *regs_saved_in_regs;
1040 static GTY(()) reg_saved_in_data *cie_return_save;
1042 /* Short-hand inline for the very common D_F_R (REGNO (x)) operation. */
1043 /* ??? This ought to go into dwarf2out.h, except that dwarf2out.h is
1044 used in places where rtl is prohibited. */
1046 static inline unsigned
1047 dwf_regno (const_rtx reg)
1049 return DWARF_FRAME_REGNUM (REGNO (reg));
1052 /* Compare X and Y for equivalence. The inputs may be REGs or PC_RTX. */
1055 compare_reg_or_pc (rtx x, rtx y)
1057 if (REG_P (x) && REG_P (y))
1058 return REGNO (x) == REGNO (y);
1062 /* Record SRC as being saved in DEST. DEST may be null to delete an
1063 existing entry. SRC may be a register or PC_RTX. */
1066 record_reg_saved_in_reg (rtx dest, rtx src)
1068 reg_saved_in_data *elt;
1071 FOR_EACH_VEC_ELT (reg_saved_in_data, regs_saved_in_regs, i, elt)
1072 if (compare_reg_or_pc (elt->orig_reg, src))
1075 VEC_unordered_remove(reg_saved_in_data, regs_saved_in_regs, i);
1077 elt->saved_in_reg = dest;
1084 elt = VEC_safe_push(reg_saved_in_data, gc, regs_saved_in_regs, NULL);
1085 elt->orig_reg = src;
1086 elt->saved_in_reg = dest;
1089 /* Add an entry to QUEUED_REG_SAVES saying that REG is now saved at
1090 SREG, or if SREG is NULL then it is saved at OFFSET to the CFA. */
1093 queue_reg_save (rtx reg, rtx sreg, HOST_WIDE_INT offset)
1095 struct queued_reg_save *q;
1097 /* Duplicates waste space, but it's also necessary to remove them
1098 for correctness, since the queue gets output in reverse order. */
1099 for (q = queued_reg_saves; q != NULL; q = q->next)
1100 if (compare_reg_or_pc (q->reg, reg))
1105 q = ggc_alloc_queued_reg_save ();
1106 q->next = queued_reg_saves;
1107 queued_reg_saves = q;
1111 q->cfa_offset = offset;
1112 q->saved_reg = sreg;
1115 /* Output all the entries in QUEUED_REG_SAVES. */
1118 dwarf2out_flush_queued_reg_saves (void)
1120 struct queued_reg_save *q;
1122 for (q = queued_reg_saves; q; q = q->next)
1124 unsigned int reg, sreg;
1126 record_reg_saved_in_reg (q->saved_reg, q->reg);
1128 if (q->reg == pc_rtx)
1129 reg = DWARF_FRAME_RETURN_COLUMN;
1131 reg = dwf_regno (q->reg);
1133 sreg = dwf_regno (q->saved_reg);
1135 sreg = INVALID_REGNUM;
1136 reg_save (reg, sreg, q->cfa_offset);
1139 queued_reg_saves = NULL;
1142 /* Does INSN clobber any register which QUEUED_REG_SAVES lists a saved
1143 location for? Or, does it clobber a register which we've previously
1144 said that some other register is saved in, and for which we now
1145 have a new location for? */
1148 clobbers_queued_reg_save (const_rtx insn)
1150 struct queued_reg_save *q;
1152 for (q = queued_reg_saves; q; q = q->next)
1155 reg_saved_in_data *rir;
1157 if (modified_in_p (q->reg, insn))
1160 FOR_EACH_VEC_ELT (reg_saved_in_data, regs_saved_in_regs, i, rir)
1161 if (compare_reg_or_pc (q->reg, rir->orig_reg)
1162 && modified_in_p (rir->saved_in_reg, insn))
1169 /* What register, if any, is currently saved in REG? */
1172 reg_saved_in (rtx reg)
1174 unsigned int regn = REGNO (reg);
1175 struct queued_reg_save *q;
1176 reg_saved_in_data *rir;
1179 for (q = queued_reg_saves; q; q = q->next)
1180 if (q->saved_reg && regn == REGNO (q->saved_reg))
1183 FOR_EACH_VEC_ELT (reg_saved_in_data, regs_saved_in_regs, i, rir)
1184 if (regn == REGNO (rir->saved_in_reg))
1185 return rir->orig_reg;
1190 /* A subroutine of dwarf2out_frame_debug, process a REG_DEF_CFA note. */
1193 dwarf2out_frame_debug_def_cfa (rtx pat)
1195 dw_cfa_location loc;
1197 memset (&loc, 0, sizeof (loc));
1199 switch (GET_CODE (pat))
1202 loc.reg = dwf_regno (XEXP (pat, 0));
1203 loc.offset = INTVAL (XEXP (pat, 1));
1207 loc.reg = dwf_regno (pat);
1212 pat = XEXP (pat, 0);
1213 if (GET_CODE (pat) == PLUS)
1215 loc.base_offset = INTVAL (XEXP (pat, 1));
1216 pat = XEXP (pat, 0);
1218 loc.reg = dwf_regno (pat);
1222 /* Recurse and define an expression. */
1229 /* A subroutine of dwarf2out_frame_debug, process a REG_ADJUST_CFA note. */
1232 dwarf2out_frame_debug_adjust_cfa (rtx pat)
1234 dw_cfa_location loc = cur_row->cfa;
1237 gcc_assert (GET_CODE (pat) == SET);
1238 dest = XEXP (pat, 0);
1239 src = XEXP (pat, 1);
1241 switch (GET_CODE (src))
1244 gcc_assert (dwf_regno (XEXP (src, 0)) == loc.reg);
1245 loc.offset -= INTVAL (XEXP (src, 1));
1255 loc.reg = dwf_regno (dest);
1256 gcc_assert (loc.indirect == 0);
1261 /* A subroutine of dwarf2out_frame_debug, process a REG_CFA_OFFSET note. */
1264 dwarf2out_frame_debug_cfa_offset (rtx set)
1266 HOST_WIDE_INT offset;
1267 rtx src, addr, span;
1268 unsigned int sregno;
1270 src = XEXP (set, 1);
1271 addr = XEXP (set, 0);
1272 gcc_assert (MEM_P (addr));
1273 addr = XEXP (addr, 0);
1275 /* As documented, only consider extremely simple addresses. */
1276 switch (GET_CODE (addr))
1279 gcc_assert (dwf_regno (addr) == cur_row->cfa.reg);
1280 offset = -cur_row->cfa.offset;
1283 gcc_assert (dwf_regno (XEXP (addr, 0)) == cur_row->cfa.reg);
1284 offset = INTVAL (XEXP (addr, 1)) - cur_row->cfa.offset;
1293 sregno = DWARF_FRAME_RETURN_COLUMN;
1297 span = targetm.dwarf_register_span (src);
1298 sregno = dwf_regno (src);
1301 /* ??? We'd like to use queue_reg_save, but we need to come up with
1302 a different flushing heuristic for epilogues. */
1304 reg_save (sregno, INVALID_REGNUM, offset);
1307 /* We have a PARALLEL describing where the contents of SRC live.
1308 Queue register saves for each piece of the PARALLEL. */
1311 HOST_WIDE_INT span_offset = offset;
1313 gcc_assert (GET_CODE (span) == PARALLEL);
1315 limit = XVECLEN (span, 0);
1316 for (par_index = 0; par_index < limit; par_index++)
1318 rtx elem = XVECEXP (span, 0, par_index);
1320 sregno = dwf_regno (src);
1321 reg_save (sregno, INVALID_REGNUM, span_offset);
1322 span_offset += GET_MODE_SIZE (GET_MODE (elem));
1327 /* A subroutine of dwarf2out_frame_debug, process a REG_CFA_REGISTER note. */
1330 dwarf2out_frame_debug_cfa_register (rtx set)
1333 unsigned sregno, dregno;
1335 src = XEXP (set, 1);
1336 dest = XEXP (set, 0);
1338 record_reg_saved_in_reg (dest, src);
1340 sregno = DWARF_FRAME_RETURN_COLUMN;
1342 sregno = dwf_regno (src);
1344 dregno = dwf_regno (dest);
1346 /* ??? We'd like to use queue_reg_save, but we need to come up with
1347 a different flushing heuristic for epilogues. */
1348 reg_save (sregno, dregno, 0);
1351 /* A subroutine of dwarf2out_frame_debug, process a REG_CFA_EXPRESSION note. */
1354 dwarf2out_frame_debug_cfa_expression (rtx set)
1356 rtx src, dest, span;
1357 dw_cfi_ref cfi = new_cfi ();
1360 dest = SET_DEST (set);
1361 src = SET_SRC (set);
1363 gcc_assert (REG_P (src));
1364 gcc_assert (MEM_P (dest));
1366 span = targetm.dwarf_register_span (src);
1369 regno = dwf_regno (src);
1371 cfi->dw_cfi_opc = DW_CFA_expression;
1372 cfi->dw_cfi_oprnd1.dw_cfi_reg_num = regno;
1373 cfi->dw_cfi_oprnd2.dw_cfi_loc
1374 = mem_loc_descriptor (XEXP (dest, 0), get_address_mode (dest),
1375 GET_MODE (dest), VAR_INIT_STATUS_INITIALIZED);
1377 /* ??? We'd like to use queue_reg_save, were the interface different,
1378 and, as above, we could manage flushing for epilogues. */
1380 update_row_reg_save (cur_row, regno, cfi);
1383 /* A subroutine of dwarf2out_frame_debug, process a REG_CFA_RESTORE note. */
1386 dwarf2out_frame_debug_cfa_restore (rtx reg)
1388 dw_cfi_ref cfi = new_cfi ();
1389 unsigned int regno = dwf_regno (reg);
1391 cfi->dw_cfi_opc = (regno & ~0x3f ? DW_CFA_restore_extended : DW_CFA_restore);
1392 cfi->dw_cfi_oprnd1.dw_cfi_reg_num = regno;
1395 update_row_reg_save (cur_row, regno, NULL);
1398 /* A subroutine of dwarf2out_frame_debug, process a REG_CFA_WINDOW_SAVE.
1399 ??? Perhaps we should note in the CIE where windows are saved (instead of
1400 assuming 0(cfa)) and what registers are in the window. */
1403 dwarf2out_frame_debug_cfa_window_save (void)
1405 dw_cfi_ref cfi = new_cfi ();
1407 cfi->dw_cfi_opc = DW_CFA_GNU_window_save;
1411 /* Record call frame debugging information for an expression EXPR,
1412 which either sets SP or FP (adjusting how we calculate the frame
1413 address) or saves a register to the stack or another register.
1414 LABEL indicates the address of EXPR.
1416 This function encodes a state machine mapping rtxes to actions on
1417 cfa, cfa_store, and cfa_temp.reg. We describe these rules so
1418 users need not read the source code.
1420 The High-Level Picture
1422 Changes in the register we use to calculate the CFA: Currently we
1423 assume that if you copy the CFA register into another register, we
1424 should take the other one as the new CFA register; this seems to
1425 work pretty well. If it's wrong for some target, it's simple
1426 enough not to set RTX_FRAME_RELATED_P on the insn in question.
1428 Changes in the register we use for saving registers to the stack:
1429 This is usually SP, but not always. Again, we deduce that if you
1430 copy SP into another register (and SP is not the CFA register),
1431 then the new register is the one we will be using for register
1432 saves. This also seems to work.
1434 Register saves: There's not much guesswork about this one; if
1435 RTX_FRAME_RELATED_P is set on an insn which modifies memory, it's a
1436 register save, and the register used to calculate the destination
1437 had better be the one we think we're using for this purpose.
1438 It's also assumed that a copy from a call-saved register to another
1439 register is saving that register if RTX_FRAME_RELATED_P is set on
1440 that instruction. If the copy is from a call-saved register to
1441 the *same* register, that means that the register is now the same
1442 value as in the caller.
1444 Except: If the register being saved is the CFA register, and the
1445 offset is nonzero, we are saving the CFA, so we assume we have to
1446 use DW_CFA_def_cfa_expression. If the offset is 0, we assume that
1447 the intent is to save the value of SP from the previous frame.
1449 In addition, if a register has previously been saved to a different
1452 Invariants / Summaries of Rules
1454 cfa current rule for calculating the CFA. It usually
1455 consists of a register and an offset. This is
1456 actually stored in cur_row->cfa, but abbreviated
1457 for the purposes of this documentation.
1458 cfa_store register used by prologue code to save things to the stack
1459 cfa_store.offset is the offset from the value of
1460 cfa_store.reg to the actual CFA
1461 cfa_temp register holding an integral value. cfa_temp.offset
1462 stores the value, which will be used to adjust the
1463 stack pointer. cfa_temp is also used like cfa_store,
1464 to track stores to the stack via fp or a temp reg.
1466 Rules 1- 4: Setting a register's value to cfa.reg or an expression
1467 with cfa.reg as the first operand changes the cfa.reg and its
1468 cfa.offset. Rule 1 and 4 also set cfa_temp.reg and
1471 Rules 6- 9: Set a non-cfa.reg register value to a constant or an
1472 expression yielding a constant. This sets cfa_temp.reg
1473 and cfa_temp.offset.
1475 Rule 5: Create a new register cfa_store used to save items to the
1478 Rules 10-14: Save a register to the stack. Define offset as the
1479 difference of the original location and cfa_store's
1480 location (or cfa_temp's location if cfa_temp is used).
1482 Rules 16-20: If AND operation happens on sp in prologue, we assume
1483 stack is realigned. We will use a group of DW_OP_XXX
1484 expressions to represent the location of the stored
1485 register instead of CFA+offset.
1489 "{a,b}" indicates a choice of a xor b.
1490 "<reg>:cfa.reg" indicates that <reg> must equal cfa.reg.
1493 (set <reg1> <reg2>:cfa.reg)
1494 effects: cfa.reg = <reg1>
1495 cfa.offset unchanged
1496 cfa_temp.reg = <reg1>
1497 cfa_temp.offset = cfa.offset
1500 (set sp ({minus,plus,losum} {sp,fp}:cfa.reg
1501 {<const_int>,<reg>:cfa_temp.reg}))
1502 effects: cfa.reg = sp if fp used
1503 cfa.offset += {+/- <const_int>, cfa_temp.offset} if cfa.reg==sp
1504 cfa_store.offset += {+/- <const_int>, cfa_temp.offset}
1505 if cfa_store.reg==sp
1508 (set fp ({minus,plus,losum} <reg>:cfa.reg <const_int>))
1509 effects: cfa.reg = fp
1510 cfa_offset += +/- <const_int>
1513 (set <reg1> ({plus,losum} <reg2>:cfa.reg <const_int>))
1514 constraints: <reg1> != fp
1516 effects: cfa.reg = <reg1>
1517 cfa_temp.reg = <reg1>
1518 cfa_temp.offset = cfa.offset
1521 (set <reg1> (plus <reg2>:cfa_temp.reg sp:cfa.reg))
1522 constraints: <reg1> != fp
1524 effects: cfa_store.reg = <reg1>
1525 cfa_store.offset = cfa.offset - cfa_temp.offset
1528 (set <reg> <const_int>)
1529 effects: cfa_temp.reg = <reg>
1530 cfa_temp.offset = <const_int>
1533 (set <reg1>:cfa_temp.reg (ior <reg2>:cfa_temp.reg <const_int>))
1534 effects: cfa_temp.reg = <reg1>
1535 cfa_temp.offset |= <const_int>
1538 (set <reg> (high <exp>))
1542 (set <reg> (lo_sum <exp> <const_int>))
1543 effects: cfa_temp.reg = <reg>
1544 cfa_temp.offset = <const_int>
1547 (set (mem ({pre,post}_modify sp:cfa_store (???? <reg1> <const_int>))) <reg2>)
1548 effects: cfa_store.offset -= <const_int>
1549 cfa.offset = cfa_store.offset if cfa.reg == sp
1551 cfa.base_offset = -cfa_store.offset
1554 (set (mem ({pre_inc,pre_dec,post_dec} sp:cfa_store.reg)) <reg>)
1555 effects: cfa_store.offset += -/+ mode_size(mem)
1556 cfa.offset = cfa_store.offset if cfa.reg == sp
1558 cfa.base_offset = -cfa_store.offset
1561 (set (mem ({minus,plus,losum} <reg1>:{cfa_store,cfa_temp} <const_int>))
1564 effects: cfa.reg = <reg1>
1565 cfa.base_offset = -/+ <const_int> - {cfa_store,cfa_temp}.offset
1568 (set (mem <reg1>:{cfa_store,cfa_temp}) <reg2>)
1569 effects: cfa.reg = <reg1>
1570 cfa.base_offset = -{cfa_store,cfa_temp}.offset
1573 (set (mem (post_inc <reg1>:cfa_temp <const_int>)) <reg2>)
1574 effects: cfa.reg = <reg1>
1575 cfa.base_offset = -cfa_temp.offset
1576 cfa_temp.offset -= mode_size(mem)
1579 (set <reg> {unspec, unspec_volatile})
1580 effects: target-dependent
1583 (set sp (and: sp <const_int>))
1584 constraints: cfa_store.reg == sp
1585 effects: cfun->fde.stack_realign = 1
1586 cfa_store.offset = 0
1587 fde->drap_reg = cfa.reg if cfa.reg != sp and cfa.reg != fp
1590 (set (mem ({pre_inc, pre_dec} sp)) (mem (plus (cfa.reg) (const_int))))
1591 effects: cfa_store.offset += -/+ mode_size(mem)
1594 (set (mem ({pre_inc, pre_dec} sp)) fp)
1595 constraints: fde->stack_realign == 1
1596 effects: cfa_store.offset = 0
1597 cfa.reg != HARD_FRAME_POINTER_REGNUM
1600 (set (mem ({pre_inc, pre_dec} sp)) cfa.reg)
1601 constraints: fde->stack_realign == 1
1603 && cfa.indirect == 0
1604 && cfa.reg != HARD_FRAME_POINTER_REGNUM
1605 effects: Use DW_CFA_def_cfa_expression to define cfa
1606 cfa.reg == fde->drap_reg */
1609 dwarf2out_frame_debug_expr (rtx expr)
1611 dw_cfa_location cfa = cur_row->cfa;
1612 rtx src, dest, span;
1613 HOST_WIDE_INT offset;
1616 /* If RTX_FRAME_RELATED_P is set on a PARALLEL, process each member of
1617 the PARALLEL independently. The first element is always processed if
1618 it is a SET. This is for backward compatibility. Other elements
1619 are processed only if they are SETs and the RTX_FRAME_RELATED_P
1620 flag is set in them. */
1621 if (GET_CODE (expr) == PARALLEL || GET_CODE (expr) == SEQUENCE)
1624 int limit = XVECLEN (expr, 0);
1627 /* PARALLELs have strict read-modify-write semantics, so we
1628 ought to evaluate every rvalue before changing any lvalue.
1629 It's cumbersome to do that in general, but there's an
1630 easy approximation that is enough for all current users:
1631 handle register saves before register assignments. */
1632 if (GET_CODE (expr) == PARALLEL)
1633 for (par_index = 0; par_index < limit; par_index++)
1635 elem = XVECEXP (expr, 0, par_index);
1636 if (GET_CODE (elem) == SET
1637 && MEM_P (SET_DEST (elem))
1638 && (RTX_FRAME_RELATED_P (elem) || par_index == 0))
1639 dwarf2out_frame_debug_expr (elem);
1642 for (par_index = 0; par_index < limit; par_index++)
1644 elem = XVECEXP (expr, 0, par_index);
1645 if (GET_CODE (elem) == SET
1646 && (!MEM_P (SET_DEST (elem)) || GET_CODE (expr) == SEQUENCE)
1647 && (RTX_FRAME_RELATED_P (elem) || par_index == 0))
1648 dwarf2out_frame_debug_expr (elem);
1649 else if (GET_CODE (elem) == SET
1651 && !RTX_FRAME_RELATED_P (elem))
1653 /* Stack adjustment combining might combine some post-prologue
1654 stack adjustment into a prologue stack adjustment. */
1655 HOST_WIDE_INT offset = stack_adjust_offset (elem, args_size, 0);
1658 dwarf2out_stack_adjust (offset);
1664 gcc_assert (GET_CODE (expr) == SET);
1666 src = SET_SRC (expr);
1667 dest = SET_DEST (expr);
1671 rtx rsi = reg_saved_in (src);
1678 switch (GET_CODE (dest))
1681 switch (GET_CODE (src))
1683 /* Setting FP from SP. */
1685 if (cfa.reg == dwf_regno (src))
1688 /* Update the CFA rule wrt SP or FP. Make sure src is
1689 relative to the current CFA register.
1691 We used to require that dest be either SP or FP, but the
1692 ARM copies SP to a temporary register, and from there to
1693 FP. So we just rely on the backends to only set
1694 RTX_FRAME_RELATED_P on appropriate insns. */
1695 cfa.reg = dwf_regno (dest);
1696 cfa_temp.reg = cfa.reg;
1697 cfa_temp.offset = cfa.offset;
1701 /* Saving a register in a register. */
1702 gcc_assert (!fixed_regs [REGNO (dest)]
1703 /* For the SPARC and its register window. */
1704 || (dwf_regno (src) == DWARF_FRAME_RETURN_COLUMN));
1706 /* After stack is aligned, we can only save SP in FP
1707 if drap register is used. In this case, we have
1708 to restore stack pointer with the CFA value and we
1709 don't generate this DWARF information. */
1711 && fde->stack_realign
1712 && REGNO (src) == STACK_POINTER_REGNUM)
1713 gcc_assert (REGNO (dest) == HARD_FRAME_POINTER_REGNUM
1714 && fde->drap_reg != INVALID_REGNUM
1715 && cfa.reg != dwf_regno (src));
1717 queue_reg_save (src, dest, 0);
1724 if (dest == stack_pointer_rtx)
1728 switch (GET_CODE (XEXP (src, 1)))
1731 offset = INTVAL (XEXP (src, 1));
1734 gcc_assert (dwf_regno (XEXP (src, 1)) == cfa_temp.reg);
1735 offset = cfa_temp.offset;
1741 if (XEXP (src, 0) == hard_frame_pointer_rtx)
1743 /* Restoring SP from FP in the epilogue. */
1744 gcc_assert (cfa.reg == dw_frame_pointer_regnum);
1745 cfa.reg = dw_stack_pointer_regnum;
1747 else if (GET_CODE (src) == LO_SUM)
1748 /* Assume we've set the source reg of the LO_SUM from sp. */
1751 gcc_assert (XEXP (src, 0) == stack_pointer_rtx);
1753 if (GET_CODE (src) != MINUS)
1755 if (cfa.reg == dw_stack_pointer_regnum)
1756 cfa.offset += offset;
1757 if (cfa_store.reg == dw_stack_pointer_regnum)
1758 cfa_store.offset += offset;
1760 else if (dest == hard_frame_pointer_rtx)
1763 /* Either setting the FP from an offset of the SP,
1764 or adjusting the FP */
1765 gcc_assert (frame_pointer_needed);
1767 gcc_assert (REG_P (XEXP (src, 0))
1768 && dwf_regno (XEXP (src, 0)) == cfa.reg
1769 && CONST_INT_P (XEXP (src, 1)));
1770 offset = INTVAL (XEXP (src, 1));
1771 if (GET_CODE (src) != MINUS)
1773 cfa.offset += offset;
1774 cfa.reg = dw_frame_pointer_regnum;
1778 gcc_assert (GET_CODE (src) != MINUS);
1781 if (REG_P (XEXP (src, 0))
1782 && dwf_regno (XEXP (src, 0)) == cfa.reg
1783 && CONST_INT_P (XEXP (src, 1)))
1785 /* Setting a temporary CFA register that will be copied
1786 into the FP later on. */
1787 offset = - INTVAL (XEXP (src, 1));
1788 cfa.offset += offset;
1789 cfa.reg = dwf_regno (dest);
1790 /* Or used to save regs to the stack. */
1791 cfa_temp.reg = cfa.reg;
1792 cfa_temp.offset = cfa.offset;
1796 else if (REG_P (XEXP (src, 0))
1797 && dwf_regno (XEXP (src, 0)) == cfa_temp.reg
1798 && XEXP (src, 1) == stack_pointer_rtx)
1800 /* Setting a scratch register that we will use instead
1801 of SP for saving registers to the stack. */
1802 gcc_assert (cfa.reg == dw_stack_pointer_regnum);
1803 cfa_store.reg = dwf_regno (dest);
1804 cfa_store.offset = cfa.offset - cfa_temp.offset;
1808 else if (GET_CODE (src) == LO_SUM
1809 && CONST_INT_P (XEXP (src, 1)))
1811 cfa_temp.reg = dwf_regno (dest);
1812 cfa_temp.offset = INTVAL (XEXP (src, 1));
1821 cfa_temp.reg = dwf_regno (dest);
1822 cfa_temp.offset = INTVAL (src);
1827 gcc_assert (REG_P (XEXP (src, 0))
1828 && dwf_regno (XEXP (src, 0)) == cfa_temp.reg
1829 && CONST_INT_P (XEXP (src, 1)));
1831 cfa_temp.reg = dwf_regno (dest);
1832 cfa_temp.offset |= INTVAL (XEXP (src, 1));
1835 /* Skip over HIGH, assuming it will be followed by a LO_SUM,
1836 which will fill in all of the bits. */
1843 case UNSPEC_VOLATILE:
1844 /* All unspecs should be represented by REG_CFA_* notes. */
1850 /* If this AND operation happens on stack pointer in prologue,
1851 we assume the stack is realigned and we extract the
1853 if (fde && XEXP (src, 0) == stack_pointer_rtx)
1855 /* We interpret reg_save differently with stack_realign set.
1856 Thus we must flush whatever we have queued first. */
1857 dwarf2out_flush_queued_reg_saves ();
1859 gcc_assert (cfa_store.reg == dwf_regno (XEXP (src, 0)));
1860 fde->stack_realign = 1;
1861 fde->stack_realignment = INTVAL (XEXP (src, 1));
1862 cfa_store.offset = 0;
1864 if (cfa.reg != dw_stack_pointer_regnum
1865 && cfa.reg != dw_frame_pointer_regnum)
1866 fde->drap_reg = cfa.reg;
1879 /* Saving a register to the stack. Make sure dest is relative to the
1881 switch (GET_CODE (XEXP (dest, 0)))
1887 /* We can't handle variable size modifications. */
1888 gcc_assert (GET_CODE (XEXP (XEXP (XEXP (dest, 0), 1), 1))
1890 offset = -INTVAL (XEXP (XEXP (XEXP (dest, 0), 1), 1));
1892 gcc_assert (REGNO (XEXP (XEXP (dest, 0), 0)) == STACK_POINTER_REGNUM
1893 && cfa_store.reg == dw_stack_pointer_regnum);
1895 cfa_store.offset += offset;
1896 if (cfa.reg == dw_stack_pointer_regnum)
1897 cfa.offset = cfa_store.offset;
1899 if (GET_CODE (XEXP (dest, 0)) == POST_MODIFY)
1900 offset -= cfa_store.offset;
1902 offset = -cfa_store.offset;
1909 offset = GET_MODE_SIZE (GET_MODE (dest));
1910 if (GET_CODE (XEXP (dest, 0)) == PRE_INC)
1913 gcc_assert ((REGNO (XEXP (XEXP (dest, 0), 0))
1914 == STACK_POINTER_REGNUM)
1915 && cfa_store.reg == dw_stack_pointer_regnum);
1917 cfa_store.offset += offset;
1919 /* Rule 18: If stack is aligned, we will use FP as a
1920 reference to represent the address of the stored
1923 && fde->stack_realign
1924 && src == hard_frame_pointer_rtx)
1926 gcc_assert (cfa.reg != dw_frame_pointer_regnum);
1927 cfa_store.offset = 0;
1930 if (cfa.reg == dw_stack_pointer_regnum)
1931 cfa.offset = cfa_store.offset;
1933 if (GET_CODE (XEXP (dest, 0)) == POST_DEC)
1934 offset += -cfa_store.offset;
1936 offset = -cfa_store.offset;
1940 /* With an offset. */
1947 gcc_assert (CONST_INT_P (XEXP (XEXP (dest, 0), 1))
1948 && REG_P (XEXP (XEXP (dest, 0), 0)));
1949 offset = INTVAL (XEXP (XEXP (dest, 0), 1));
1950 if (GET_CODE (XEXP (dest, 0)) == MINUS)
1953 regno = dwf_regno (XEXP (XEXP (dest, 0), 0));
1955 if (cfa.reg == regno)
1956 offset -= cfa.offset;
1957 else if (cfa_store.reg == regno)
1958 offset -= cfa_store.offset;
1961 gcc_assert (cfa_temp.reg == regno);
1962 offset -= cfa_temp.offset;
1968 /* Without an offset. */
1971 unsigned int regno = dwf_regno (XEXP (dest, 0));
1973 if (cfa.reg == regno)
1974 offset = -cfa.offset;
1975 else if (cfa_store.reg == regno)
1976 offset = -cfa_store.offset;
1979 gcc_assert (cfa_temp.reg == regno);
1980 offset = -cfa_temp.offset;
1987 gcc_assert (cfa_temp.reg == dwf_regno (XEXP (XEXP (dest, 0), 0)));
1988 offset = -cfa_temp.offset;
1989 cfa_temp.offset -= GET_MODE_SIZE (GET_MODE (dest));
1997 /* If the source operand of this MEM operation is a memory,
1998 we only care how much stack grew. */
2003 && REGNO (src) != STACK_POINTER_REGNUM
2004 && REGNO (src) != HARD_FRAME_POINTER_REGNUM
2005 && dwf_regno (src) == cfa.reg)
2007 /* We're storing the current CFA reg into the stack. */
2009 if (cfa.offset == 0)
2012 /* If stack is aligned, putting CFA reg into stack means
2013 we can no longer use reg + offset to represent CFA.
2014 Here we use DW_CFA_def_cfa_expression instead. The
2015 result of this expression equals to the original CFA
2018 && fde->stack_realign
2019 && cfa.indirect == 0
2020 && cfa.reg != dw_frame_pointer_regnum)
2022 dw_cfa_location cfa_exp;
2024 gcc_assert (fde->drap_reg == cfa.reg);
2026 cfa_exp.indirect = 1;
2027 cfa_exp.reg = dw_frame_pointer_regnum;
2028 cfa_exp.base_offset = offset;
2031 fde->drap_reg_saved = 1;
2033 def_cfa_1 (&cfa_exp);
2037 /* If the source register is exactly the CFA, assume
2038 we're saving SP like any other register; this happens
2041 queue_reg_save (stack_pointer_rtx, NULL_RTX, offset);
2046 /* Otherwise, we'll need to look in the stack to
2047 calculate the CFA. */
2048 rtx x = XEXP (dest, 0);
2052 gcc_assert (REG_P (x));
2054 cfa.reg = dwf_regno (x);
2055 cfa.base_offset = offset;
2066 span = targetm.dwarf_register_span (src);
2068 queue_reg_save (src, NULL_RTX, offset);
2071 /* We have a PARALLEL describing where the contents of SRC live.
2072 Queue register saves for each piece of the PARALLEL. */
2075 HOST_WIDE_INT span_offset = offset;
2077 gcc_assert (GET_CODE (span) == PARALLEL);
2079 limit = XVECLEN (span, 0);
2080 for (par_index = 0; par_index < limit; par_index++)
2082 rtx elem = XVECEXP (span, 0, par_index);
2083 queue_reg_save (elem, NULL_RTX, span_offset);
2084 span_offset += GET_MODE_SIZE (GET_MODE (elem));
2094 /* Record call frame debugging information for INSN, which either
2095 sets SP or FP (adjusting how we calculate the frame address) or saves a
2096 register to the stack. If INSN is NULL_RTX, initialize our state.
2098 If AFTER_P is false, we're being called before the insn is emitted,
2099 otherwise after. Call instructions get invoked twice. */
2102 dwarf2out_frame_debug (rtx insn, bool after_p)
2105 bool handled_one = false;
2106 bool need_flush = false;
2108 if (!NONJUMP_INSN_P (insn) || clobbers_queued_reg_save (insn))
2109 dwarf2out_flush_queued_reg_saves ();
2111 if (!RTX_FRAME_RELATED_P (insn))
2113 /* ??? This should be done unconditionally since stack adjustments
2114 matter if the stack pointer is not the CFA register anymore but
2115 is still used to save registers. */
2116 if (!ACCUMULATE_OUTGOING_ARGS)
2117 dwarf2out_notice_stack_adjust (insn, after_p);
2121 any_cfis_emitted = false;
2123 for (note = REG_NOTES (insn); note; note = XEXP (note, 1))
2124 switch (REG_NOTE_KIND (note))
2126 case REG_FRAME_RELATED_EXPR:
2127 insn = XEXP (note, 0);
2130 case REG_CFA_DEF_CFA:
2131 dwarf2out_frame_debug_def_cfa (XEXP (note, 0));
2135 case REG_CFA_ADJUST_CFA:
2140 if (GET_CODE (n) == PARALLEL)
2141 n = XVECEXP (n, 0, 0);
2143 dwarf2out_frame_debug_adjust_cfa (n);
2147 case REG_CFA_OFFSET:
2150 n = single_set (insn);
2151 dwarf2out_frame_debug_cfa_offset (n);
2155 case REG_CFA_REGISTER:
2160 if (GET_CODE (n) == PARALLEL)
2161 n = XVECEXP (n, 0, 0);
2163 dwarf2out_frame_debug_cfa_register (n);
2167 case REG_CFA_EXPRESSION:
2170 n = single_set (insn);
2171 dwarf2out_frame_debug_cfa_expression (n);
2175 case REG_CFA_RESTORE:
2180 if (GET_CODE (n) == PARALLEL)
2181 n = XVECEXP (n, 0, 0);
2184 dwarf2out_frame_debug_cfa_restore (n);
2188 case REG_CFA_SET_VDRAP:
2192 dw_fde_ref fde = cfun->fde;
2195 gcc_assert (fde->vdrap_reg == INVALID_REGNUM);
2197 fde->vdrap_reg = dwf_regno (n);
2203 case REG_CFA_WINDOW_SAVE:
2204 dwarf2out_frame_debug_cfa_window_save ();
2208 case REG_CFA_FLUSH_QUEUE:
2209 /* The actual flush happens below. */
2220 /* Minimize the number of advances by emitting the entire queue
2221 once anything is emitted. */
2222 need_flush |= any_cfis_emitted;
2226 insn = PATTERN (insn);
2228 dwarf2out_frame_debug_expr (insn);
2230 /* Check again. A parallel can save and update the same register.
2231 We could probably check just once, here, but this is safer than
2232 removing the check at the start of the function. */
2233 if (any_cfis_emitted || clobbers_queued_reg_save (insn))
2238 dwarf2out_flush_queued_reg_saves ();
2241 /* Examine CFI and return true if a cfi label and set_loc is needed
2242 beforehand. Even when generating CFI assembler instructions, we
2243 still have to add the cfi to the list so that lookup_cfa_1 works
2244 later on. When -g2 and above we even need to force emitting of
2245 CFI labels and add to list a DW_CFA_set_loc for convert_cfa_to_fb_loc_list
2246 purposes. If we're generating DWARF3 output we use DW_OP_call_frame_cfa
2247 and so don't use convert_cfa_to_fb_loc_list. */
2250 cfi_label_required_p (dw_cfi_ref cfi)
2252 if (!dwarf2out_do_cfi_asm ())
2255 if (dwarf_version == 2
2256 && debug_info_level > DINFO_LEVEL_TERSE
2257 && (write_symbols == DWARF2_DEBUG
2258 || write_symbols == VMS_AND_DWARF2_DEBUG))
2260 switch (cfi->dw_cfi_opc)
2262 case DW_CFA_def_cfa_offset:
2263 case DW_CFA_def_cfa_offset_sf:
2264 case DW_CFA_def_cfa_register:
2265 case DW_CFA_def_cfa:
2266 case DW_CFA_def_cfa_sf:
2267 case DW_CFA_def_cfa_expression:
2268 case DW_CFA_restore_state:
2277 /* Walk the function, looking for NOTE_INSN_CFI notes. Add the CFIs to the
2278 function's FDE, adding CFI labels and set_loc/advance_loc opcodes as
2281 add_cfis_to_fde (void)
2283 dw_fde_ref fde = cfun->fde;
2285 /* We always start with a function_begin label. */
2288 for (insn = get_insns (); insn; insn = next)
2290 next = NEXT_INSN (insn);
2292 if (NOTE_P (insn) && NOTE_KIND (insn) == NOTE_INSN_SWITCH_TEXT_SECTIONS)
2294 /* Don't attempt to advance_loc4 between labels
2295 in different sections. */
2299 if (NOTE_P (insn) && NOTE_KIND (insn) == NOTE_INSN_CFI)
2301 bool required = cfi_label_required_p (NOTE_CFI (insn));
2302 while (next && NOTE_P (next) && NOTE_KIND (next) == NOTE_INSN_CFI)
2304 required |= cfi_label_required_p (NOTE_CFI (next));
2305 next = NEXT_INSN (next);
2309 int num = dwarf2out_cfi_label_num;
2310 const char *label = dwarf2out_cfi_label ();
2314 /* Set the location counter to the new label. */
2316 xcfi->dw_cfi_opc = (first ? DW_CFA_set_loc
2317 : DW_CFA_advance_loc4);
2318 xcfi->dw_cfi_oprnd1.dw_cfi_addr = label;
2319 VEC_safe_push (dw_cfi_ref, gc, fde->dw_fde_cfi, xcfi);
2321 tmp = emit_note_before (NOTE_INSN_CFI_LABEL, insn);
2322 NOTE_LABEL_NUMBER (tmp) = num;
2327 VEC_safe_push (dw_cfi_ref, gc, fde->dw_fde_cfi, NOTE_CFI (insn));
2328 insn = NEXT_INSN (insn);
2330 while (insn != next);
2336 /* Scan the function and create the initial set of CFI notes. */
2339 create_cfi_notes (void)
2343 for (insn = get_insns (); insn ; insn = NEXT_INSN (insn))
2347 add_cfi_insn = PREV_INSN (insn);
2349 if (BARRIER_P (insn))
2351 dwarf2out_frame_debug (insn, false);
2357 switch (NOTE_KIND (insn))
2359 case NOTE_INSN_PROLOGUE_END:
2360 dwarf2out_flush_queued_reg_saves ();
2363 case NOTE_INSN_EPILOGUE_BEG:
2364 #if defined(HAVE_epilogue)
2365 dwarf2out_cfi_begin_epilogue (insn);
2369 case NOTE_INSN_CFA_RESTORE_STATE:
2370 add_cfi_insn = insn;
2371 dwarf2out_frame_debug_restore_state ();
2377 if (!NONDEBUG_INSN_P (insn))
2380 pat = PATTERN (insn);
2381 if (asm_noperands (pat) >= 0)
2383 dwarf2out_frame_debug (insn, false);
2387 if (GET_CODE (pat) == SEQUENCE)
2389 int i, n = XVECLEN (pat, 0);
2390 for (i = 1; i < n; ++i)
2391 dwarf2out_frame_debug (XVECEXP (pat, 0, i), false);
2395 || find_reg_note (insn, REG_CFA_FLUSH_QUEUE, NULL))
2396 dwarf2out_frame_debug (insn, false);
2398 /* Do not separate tablejump insns from their ADDR_DIFF_VEC.
2399 Putting the note after the VEC should be ok. */
2400 if (!tablejump_p (insn, NULL, &add_cfi_insn))
2401 add_cfi_insn = insn;
2403 dwarf2out_frame_debug (insn, true);
2406 add_cfi_insn = NULL;
2409 /* Determine if we need to save and restore CFI information around this
2410 epilogue. If SIBCALL is true, then this is a sibcall epilogue. If
2411 we do need to save/restore, then emit the save now, and insert a
2412 NOTE_INSN_CFA_RESTORE_STATE at the appropriate place in the stream. */
2415 dwarf2out_cfi_begin_epilogue (rtx insn)
2417 bool saw_frp = false;
2420 /* Scan forward to the return insn, noticing if there are possible
2421 frame related insns. */
2422 for (i = NEXT_INSN (insn); i ; i = NEXT_INSN (i))
2427 /* Look for both regular and sibcalls to end the block. */
2428 if (returnjump_p (i))
2430 if (CALL_P (i) && SIBLING_CALL_P (i))
2433 if (GET_CODE (PATTERN (i)) == SEQUENCE)
2436 rtx seq = PATTERN (i);
2438 if (returnjump_p (XVECEXP (seq, 0, 0)))
2440 if (CALL_P (XVECEXP (seq, 0, 0))
2441 && SIBLING_CALL_P (XVECEXP (seq, 0, 0)))
2444 for (idx = 0; idx < XVECLEN (seq, 0); idx++)
2445 if (RTX_FRAME_RELATED_P (XVECEXP (seq, 0, idx)))
2449 if (RTX_FRAME_RELATED_P (i))
2453 /* If the port doesn't emit epilogue unwind info, we don't need a
2454 save/restore pair. */
2458 /* Otherwise, search forward to see if the return insn was the last
2459 basic block of the function. If so, we don't need save/restore. */
2460 gcc_assert (i != NULL);
2461 i = next_real_insn (i);
2465 /* Insert the restore before that next real insn in the stream, and before
2466 a potential NOTE_INSN_EPILOGUE_BEG -- we do need these notes to be
2467 properly nested. This should be after any label or alignment. This
2468 will be pushed into the CFI stream by the function below. */
2471 rtx p = PREV_INSN (i);
2474 if (NOTE_KIND (p) == NOTE_INSN_BASIC_BLOCK)
2478 emit_note_before (NOTE_INSN_CFA_RESTORE_STATE, i);
2480 emit_cfa_remember = true;
2482 /* And emulate the state save. */
2483 gcc_assert (remember_row == NULL);
2484 remember_row = copy_cfi_row (cur_row);
2487 /* A "subroutine" of dwarf2out_cfi_begin_epilogue. Emit the restore
2491 dwarf2out_frame_debug_restore_state (void)
2493 dw_cfi_ref cfi = new_cfi ();
2495 cfi->dw_cfi_opc = DW_CFA_restore_state;
2498 gcc_assert (remember_row != NULL);
2499 free_cfi_row (cur_row);
2500 cur_row = remember_row;
2501 remember_row = NULL;
2504 /* Record the initial position of the return address. RTL is
2505 INCOMING_RETURN_ADDR_RTX. */
2508 initial_return_save (rtx rtl)
2510 unsigned int reg = INVALID_REGNUM;
2511 HOST_WIDE_INT offset = 0;
2513 switch (GET_CODE (rtl))
2516 /* RA is in a register. */
2517 reg = dwf_regno (rtl);
2521 /* RA is on the stack. */
2522 rtl = XEXP (rtl, 0);
2523 switch (GET_CODE (rtl))
2526 gcc_assert (REGNO (rtl) == STACK_POINTER_REGNUM);
2531 gcc_assert (REGNO (XEXP (rtl, 0)) == STACK_POINTER_REGNUM);
2532 offset = INTVAL (XEXP (rtl, 1));
2536 gcc_assert (REGNO (XEXP (rtl, 0)) == STACK_POINTER_REGNUM);
2537 offset = -INTVAL (XEXP (rtl, 1));
2547 /* The return address is at some offset from any value we can
2548 actually load. For instance, on the SPARC it is in %i7+8. Just
2549 ignore the offset for now; it doesn't matter for unwinding frames. */
2550 gcc_assert (CONST_INT_P (XEXP (rtl, 1)));
2551 initial_return_save (XEXP (rtl, 0));
2558 if (reg != DWARF_FRAME_RETURN_COLUMN)
2560 if (reg != INVALID_REGNUM)
2561 record_reg_saved_in_reg (rtl, pc_rtx);
2562 reg_save (DWARF_FRAME_RETURN_COLUMN, reg, offset - cur_row->cfa.offset);
2566 /* Annotate the function with NOTE_INSN_CFI notes to record the CFI
2567 state at each location within the function. These notes will be
2568 emitted during pass_final. */
2571 execute_dwarf2_frame (void)
2573 /* The first time we're called, compute the incoming frame state. */
2574 if (cie_cfi_vec == NULL)
2576 dw_cfa_location loc;
2578 dw_stack_pointer_regnum = DWARF_FRAME_REGNUM (STACK_POINTER_REGNUM);
2579 dw_frame_pointer_regnum = DWARF_FRAME_REGNUM (HARD_FRAME_POINTER_REGNUM);
2581 add_cfi_vec = &cie_cfi_vec;
2582 cie_cfi_row = cur_row = new_cfi_row ();
2584 /* On entry, the Canonical Frame Address is at SP. */
2585 memset(&loc, 0, sizeof (loc));
2586 loc.reg = dw_stack_pointer_regnum;
2587 loc.offset = INCOMING_FRAME_SP_OFFSET;
2590 if (targetm.debug_unwind_info () == UI_DWARF2
2591 || targetm_common.except_unwind_info (&global_options) == UI_DWARF2)
2593 initial_return_save (INCOMING_RETURN_ADDR_RTX);
2595 /* For a few targets, we have the return address incoming into a
2596 register, but choose a different return column. This will result
2597 in a DW_CFA_register for the return, and an entry in
2598 regs_saved_in_regs to match. If the target later stores that
2599 return address register to the stack, we want to be able to emit
2600 the DW_CFA_offset against the return column, not the intermediate
2601 save register. Save the contents of regs_saved_in_regs so that
2602 we can re-initialize it at the start of each function. */
2603 switch (VEC_length (reg_saved_in_data, regs_saved_in_regs))
2608 cie_return_save = ggc_alloc_reg_saved_in_data ();
2609 *cie_return_save = *VEC_index (reg_saved_in_data,
2610 regs_saved_in_regs, 0);
2611 regs_saved_in_regs = NULL;
2621 /* Set up state for generating call frame debug info. */
2622 gcc_checking_assert (queued_reg_saves == NULL);
2623 gcc_checking_assert (regs_saved_in_regs == NULL);
2625 cur_row = copy_cfi_row (cie_cfi_row);
2626 if (cie_return_save)
2627 VEC_safe_push (reg_saved_in_data, gc, regs_saved_in_regs, cie_return_save);
2629 cfa_store = cur_row->cfa;
2632 memset (&cfa_temp, 0, sizeof(cfa_temp));
2633 cfa_temp.reg = INVALID_REGNUM;
2635 dwarf2out_alloc_current_fde ();
2638 create_cfi_notes ();
2641 /* Reset all function-specific information, particularly for GC. */
2642 XDELETEVEC (barrier_args_size);
2643 barrier_args_size = NULL;
2644 regs_saved_in_regs = NULL;
2645 queued_reg_saves = NULL;
2647 free_cfi_row (cur_row);
2653 /* Convert a DWARF call frame info. operation to its string name */
2656 dwarf_cfi_name (unsigned int cfi_opc)
2660 case DW_CFA_advance_loc:
2661 return "DW_CFA_advance_loc";
2663 return "DW_CFA_offset";
2664 case DW_CFA_restore:
2665 return "DW_CFA_restore";
2667 return "DW_CFA_nop";
2668 case DW_CFA_set_loc:
2669 return "DW_CFA_set_loc";
2670 case DW_CFA_advance_loc1:
2671 return "DW_CFA_advance_loc1";
2672 case DW_CFA_advance_loc2:
2673 return "DW_CFA_advance_loc2";
2674 case DW_CFA_advance_loc4:
2675 return "DW_CFA_advance_loc4";
2676 case DW_CFA_offset_extended:
2677 return "DW_CFA_offset_extended";
2678 case DW_CFA_restore_extended:
2679 return "DW_CFA_restore_extended";
2680 case DW_CFA_undefined:
2681 return "DW_CFA_undefined";
2682 case DW_CFA_same_value:
2683 return "DW_CFA_same_value";
2684 case DW_CFA_register:
2685 return "DW_CFA_register";
2686 case DW_CFA_remember_state:
2687 return "DW_CFA_remember_state";
2688 case DW_CFA_restore_state:
2689 return "DW_CFA_restore_state";
2690 case DW_CFA_def_cfa:
2691 return "DW_CFA_def_cfa";
2692 case DW_CFA_def_cfa_register:
2693 return "DW_CFA_def_cfa_register";
2694 case DW_CFA_def_cfa_offset:
2695 return "DW_CFA_def_cfa_offset";
2698 case DW_CFA_def_cfa_expression:
2699 return "DW_CFA_def_cfa_expression";
2700 case DW_CFA_expression:
2701 return "DW_CFA_expression";
2702 case DW_CFA_offset_extended_sf:
2703 return "DW_CFA_offset_extended_sf";
2704 case DW_CFA_def_cfa_sf:
2705 return "DW_CFA_def_cfa_sf";
2706 case DW_CFA_def_cfa_offset_sf:
2707 return "DW_CFA_def_cfa_offset_sf";
2709 /* SGI/MIPS specific */
2710 case DW_CFA_MIPS_advance_loc8:
2711 return "DW_CFA_MIPS_advance_loc8";
2713 /* GNU extensions */
2714 case DW_CFA_GNU_window_save:
2715 return "DW_CFA_GNU_window_save";
2716 case DW_CFA_GNU_args_size:
2717 return "DW_CFA_GNU_args_size";
2718 case DW_CFA_GNU_negative_offset_extended:
2719 return "DW_CFA_GNU_negative_offset_extended";
2722 return "DW_CFA_<unknown>";
2726 /* This routine will generate the correct assembly data for a location
2727 description based on a cfi entry with a complex address. */
2730 output_cfa_loc (dw_cfi_ref cfi, int for_eh)
2732 dw_loc_descr_ref loc;
2735 if (cfi->dw_cfi_opc == DW_CFA_expression)
2738 DWARF2_FRAME_REG_OUT (cfi->dw_cfi_oprnd1.dw_cfi_reg_num, for_eh);
2739 dw2_asm_output_data (1, r, NULL);
2740 loc = cfi->dw_cfi_oprnd2.dw_cfi_loc;
2743 loc = cfi->dw_cfi_oprnd1.dw_cfi_loc;
2745 /* Output the size of the block. */
2746 size = size_of_locs (loc);
2747 dw2_asm_output_data_uleb128 (size, NULL);
2749 /* Now output the operations themselves. */
2750 output_loc_sequence (loc, for_eh);
2753 /* Similar, but used for .cfi_escape. */
2756 output_cfa_loc_raw (dw_cfi_ref cfi)
2758 dw_loc_descr_ref loc;
2761 if (cfi->dw_cfi_opc == DW_CFA_expression)
2764 DWARF2_FRAME_REG_OUT (cfi->dw_cfi_oprnd1.dw_cfi_reg_num, 1);
2765 fprintf (asm_out_file, "%#x,", r);
2766 loc = cfi->dw_cfi_oprnd2.dw_cfi_loc;
2769 loc = cfi->dw_cfi_oprnd1.dw_cfi_loc;
2771 /* Output the size of the block. */
2772 size = size_of_locs (loc);
2773 dw2_asm_output_data_uleb128_raw (size);
2774 fputc (',', asm_out_file);
2776 /* Now output the operations themselves. */
2777 output_loc_sequence_raw (loc);
2780 /* Output a Call Frame Information opcode and its operand(s). */
2783 output_cfi (dw_cfi_ref cfi, dw_fde_ref fde, int for_eh)
2788 if (cfi->dw_cfi_opc == DW_CFA_advance_loc)
2789 dw2_asm_output_data (1, (cfi->dw_cfi_opc
2790 | (cfi->dw_cfi_oprnd1.dw_cfi_offset & 0x3f)),
2791 "DW_CFA_advance_loc " HOST_WIDE_INT_PRINT_HEX,
2792 ((unsigned HOST_WIDE_INT)
2793 cfi->dw_cfi_oprnd1.dw_cfi_offset));
2794 else if (cfi->dw_cfi_opc == DW_CFA_offset)
2796 r = DWARF2_FRAME_REG_OUT (cfi->dw_cfi_oprnd1.dw_cfi_reg_num, for_eh);
2797 dw2_asm_output_data (1, (cfi->dw_cfi_opc | (r & 0x3f)),
2798 "DW_CFA_offset, column %#lx", r);
2799 off = div_data_align (cfi->dw_cfi_oprnd2.dw_cfi_offset);
2800 dw2_asm_output_data_uleb128 (off, NULL);
2802 else if (cfi->dw_cfi_opc == DW_CFA_restore)
2804 r = DWARF2_FRAME_REG_OUT (cfi->dw_cfi_oprnd1.dw_cfi_reg_num, for_eh);
2805 dw2_asm_output_data (1, (cfi->dw_cfi_opc | (r & 0x3f)),
2806 "DW_CFA_restore, column %#lx", r);
2810 dw2_asm_output_data (1, cfi->dw_cfi_opc,
2811 "%s", dwarf_cfi_name (cfi->dw_cfi_opc));
2813 switch (cfi->dw_cfi_opc)
2815 case DW_CFA_set_loc:
2817 dw2_asm_output_encoded_addr_rtx (
2818 ASM_PREFERRED_EH_DATA_FORMAT (/*code=*/1, /*global=*/0),
2819 gen_rtx_SYMBOL_REF (Pmode, cfi->dw_cfi_oprnd1.dw_cfi_addr),
2822 dw2_asm_output_addr (DWARF2_ADDR_SIZE,
2823 cfi->dw_cfi_oprnd1.dw_cfi_addr, NULL);
2824 fde->dw_fde_current_label = cfi->dw_cfi_oprnd1.dw_cfi_addr;
2827 case DW_CFA_advance_loc1:
2828 dw2_asm_output_delta (1, cfi->dw_cfi_oprnd1.dw_cfi_addr,
2829 fde->dw_fde_current_label, NULL);
2830 fde->dw_fde_current_label = cfi->dw_cfi_oprnd1.dw_cfi_addr;
2833 case DW_CFA_advance_loc2:
2834 dw2_asm_output_delta (2, cfi->dw_cfi_oprnd1.dw_cfi_addr,
2835 fde->dw_fde_current_label, NULL);
2836 fde->dw_fde_current_label = cfi->dw_cfi_oprnd1.dw_cfi_addr;
2839 case DW_CFA_advance_loc4:
2840 dw2_asm_output_delta (4, cfi->dw_cfi_oprnd1.dw_cfi_addr,
2841 fde->dw_fde_current_label, NULL);
2842 fde->dw_fde_current_label = cfi->dw_cfi_oprnd1.dw_cfi_addr;
2845 case DW_CFA_MIPS_advance_loc8:
2846 dw2_asm_output_delta (8, cfi->dw_cfi_oprnd1.dw_cfi_addr,
2847 fde->dw_fde_current_label, NULL);
2848 fde->dw_fde_current_label = cfi->dw_cfi_oprnd1.dw_cfi_addr;
2851 case DW_CFA_offset_extended:
2852 r = DWARF2_FRAME_REG_OUT (cfi->dw_cfi_oprnd1.dw_cfi_reg_num, for_eh);
2853 dw2_asm_output_data_uleb128 (r, NULL);
2854 off = div_data_align (cfi->dw_cfi_oprnd2.dw_cfi_offset);
2855 dw2_asm_output_data_uleb128 (off, NULL);
2858 case DW_CFA_def_cfa:
2859 r = DWARF2_FRAME_REG_OUT (cfi->dw_cfi_oprnd1.dw_cfi_reg_num, for_eh);
2860 dw2_asm_output_data_uleb128 (r, NULL);
2861 dw2_asm_output_data_uleb128 (cfi->dw_cfi_oprnd2.dw_cfi_offset, NULL);
2864 case DW_CFA_offset_extended_sf:
2865 r = DWARF2_FRAME_REG_OUT (cfi->dw_cfi_oprnd1.dw_cfi_reg_num, for_eh);
2866 dw2_asm_output_data_uleb128 (r, NULL);
2867 off = div_data_align (cfi->dw_cfi_oprnd2.dw_cfi_offset);
2868 dw2_asm_output_data_sleb128 (off, NULL);
2871 case DW_CFA_def_cfa_sf:
2872 r = DWARF2_FRAME_REG_OUT (cfi->dw_cfi_oprnd1.dw_cfi_reg_num, for_eh);
2873 dw2_asm_output_data_uleb128 (r, NULL);
2874 off = div_data_align (cfi->dw_cfi_oprnd2.dw_cfi_offset);
2875 dw2_asm_output_data_sleb128 (off, NULL);
2878 case DW_CFA_restore_extended:
2879 case DW_CFA_undefined:
2880 case DW_CFA_same_value:
2881 case DW_CFA_def_cfa_register:
2882 r = DWARF2_FRAME_REG_OUT (cfi->dw_cfi_oprnd1.dw_cfi_reg_num, for_eh);
2883 dw2_asm_output_data_uleb128 (r, NULL);
2886 case DW_CFA_register:
2887 r = DWARF2_FRAME_REG_OUT (cfi->dw_cfi_oprnd1.dw_cfi_reg_num, for_eh);
2888 dw2_asm_output_data_uleb128 (r, NULL);
2889 r = DWARF2_FRAME_REG_OUT (cfi->dw_cfi_oprnd2.dw_cfi_reg_num, for_eh);
2890 dw2_asm_output_data_uleb128 (r, NULL);
2893 case DW_CFA_def_cfa_offset:
2894 case DW_CFA_GNU_args_size:
2895 dw2_asm_output_data_uleb128 (cfi->dw_cfi_oprnd1.dw_cfi_offset, NULL);
2898 case DW_CFA_def_cfa_offset_sf:
2899 off = div_data_align (cfi->dw_cfi_oprnd1.dw_cfi_offset);
2900 dw2_asm_output_data_sleb128 (off, NULL);
2903 case DW_CFA_GNU_window_save:
2906 case DW_CFA_def_cfa_expression:
2907 case DW_CFA_expression:
2908 output_cfa_loc (cfi, for_eh);
2911 case DW_CFA_GNU_negative_offset_extended:
2912 /* Obsoleted by DW_CFA_offset_extended_sf. */
2921 /* Similar, but do it via assembler directives instead. */
2924 output_cfi_directive (FILE *f, dw_cfi_ref cfi)
2926 unsigned long r, r2;
2928 switch (cfi->dw_cfi_opc)
2930 case DW_CFA_advance_loc:
2931 case DW_CFA_advance_loc1:
2932 case DW_CFA_advance_loc2:
2933 case DW_CFA_advance_loc4:
2934 case DW_CFA_MIPS_advance_loc8:
2935 case DW_CFA_set_loc:
2936 /* Should only be created in a code path not followed when emitting
2937 via directives. The assembler is going to take care of this for
2938 us. But this routines is also used for debugging dumps, so
2940 gcc_assert (f != asm_out_file);
2941 fprintf (f, "\t.cfi_advance_loc\n");
2945 case DW_CFA_offset_extended:
2946 case DW_CFA_offset_extended_sf:
2947 r = DWARF2_FRAME_REG_OUT (cfi->dw_cfi_oprnd1.dw_cfi_reg_num, 1);
2948 fprintf (f, "\t.cfi_offset %lu, "HOST_WIDE_INT_PRINT_DEC"\n",
2949 r, cfi->dw_cfi_oprnd2.dw_cfi_offset);
2952 case DW_CFA_restore:
2953 case DW_CFA_restore_extended:
2954 r = DWARF2_FRAME_REG_OUT (cfi->dw_cfi_oprnd1.dw_cfi_reg_num, 1);
2955 fprintf (f, "\t.cfi_restore %lu\n", r);
2958 case DW_CFA_undefined:
2959 r = DWARF2_FRAME_REG_OUT (cfi->dw_cfi_oprnd1.dw_cfi_reg_num, 1);
2960 fprintf (f, "\t.cfi_undefined %lu\n", r);
2963 case DW_CFA_same_value:
2964 r = DWARF2_FRAME_REG_OUT (cfi->dw_cfi_oprnd1.dw_cfi_reg_num, 1);
2965 fprintf (f, "\t.cfi_same_value %lu\n", r);
2968 case DW_CFA_def_cfa:
2969 case DW_CFA_def_cfa_sf:
2970 r = DWARF2_FRAME_REG_OUT (cfi->dw_cfi_oprnd1.dw_cfi_reg_num, 1);
2971 fprintf (f, "\t.cfi_def_cfa %lu, "HOST_WIDE_INT_PRINT_DEC"\n",
2972 r, cfi->dw_cfi_oprnd2.dw_cfi_offset);
2975 case DW_CFA_def_cfa_register:
2976 r = DWARF2_FRAME_REG_OUT (cfi->dw_cfi_oprnd1.dw_cfi_reg_num, 1);
2977 fprintf (f, "\t.cfi_def_cfa_register %lu\n", r);
2980 case DW_CFA_register:
2981 r = DWARF2_FRAME_REG_OUT (cfi->dw_cfi_oprnd1.dw_cfi_reg_num, 1);
2982 r2 = DWARF2_FRAME_REG_OUT (cfi->dw_cfi_oprnd2.dw_cfi_reg_num, 1);
2983 fprintf (f, "\t.cfi_register %lu, %lu\n", r, r2);
2986 case DW_CFA_def_cfa_offset:
2987 case DW_CFA_def_cfa_offset_sf:
2988 fprintf (f, "\t.cfi_def_cfa_offset "
2989 HOST_WIDE_INT_PRINT_DEC"\n",
2990 cfi->dw_cfi_oprnd1.dw_cfi_offset);
2993 case DW_CFA_remember_state:
2994 fprintf (f, "\t.cfi_remember_state\n");
2996 case DW_CFA_restore_state:
2997 fprintf (f, "\t.cfi_restore_state\n");
3000 case DW_CFA_GNU_args_size:
3001 if (f == asm_out_file)
3003 fprintf (f, "\t.cfi_escape %#x,", DW_CFA_GNU_args_size);
3004 dw2_asm_output_data_uleb128_raw (cfi->dw_cfi_oprnd1.dw_cfi_offset);
3006 fprintf (f, "\t%s args_size "HOST_WIDE_INT_PRINT_DEC,
3007 ASM_COMMENT_START, cfi->dw_cfi_oprnd1.dw_cfi_offset);
3012 fprintf (f, "\t.cfi_GNU_args_size "HOST_WIDE_INT_PRINT_DEC "\n",
3013 cfi->dw_cfi_oprnd1.dw_cfi_offset);
3017 case DW_CFA_GNU_window_save:
3018 fprintf (f, "\t.cfi_window_save\n");
3021 case DW_CFA_def_cfa_expression:
3022 if (f != asm_out_file)
3024 fprintf (f, "\t.cfi_def_cfa_expression ...\n");
3028 case DW_CFA_expression:
3029 if (f != asm_out_file)
3031 fprintf (f, "\t.cfi_cfa_expression ...\n");
3034 fprintf (f, "\t.cfi_escape %#x,", cfi->dw_cfi_opc);
3035 output_cfa_loc_raw (cfi);
3045 dwarf2out_emit_cfi (dw_cfi_ref cfi)
3047 if (dwarf2out_do_cfi_asm ())
3048 output_cfi_directive (asm_out_file, cfi);
3051 /* Output CFIs from VEC, up to index UPTO, to bring current FDE to the
3052 same state as after executing CFIs in CFI chain. DO_CFI_ASM is
3053 true if .cfi_* directives shall be emitted, false otherwise. If it
3054 is false, FDE and FOR_EH are the other arguments to pass to
3058 output_cfis (cfi_vec vec, int upto, bool do_cfi_asm,
3059 dw_fde_ref fde, bool for_eh)
3062 struct dw_cfi_struct cfi_buf;
3064 dw_cfi_ref cfi_args_size = NULL, cfi_cfa = NULL, cfi_cfa_offset = NULL;
3065 VEC(dw_cfi_ref, heap) *regs = VEC_alloc (dw_cfi_ref, heap, 32);
3066 unsigned int len, idx;
3068 for (ix = 0; ix < upto + 1; ix++)
3070 dw_cfi_ref cfi = ix < upto ? VEC_index (dw_cfi_ref, vec, ix) : NULL;
3071 switch (cfi ? cfi->dw_cfi_opc : DW_CFA_nop)
3073 case DW_CFA_advance_loc:
3074 case DW_CFA_advance_loc1:
3075 case DW_CFA_advance_loc2:
3076 case DW_CFA_advance_loc4:
3077 case DW_CFA_MIPS_advance_loc8:
3078 case DW_CFA_set_loc:
3079 /* All advances should be ignored. */
3081 case DW_CFA_remember_state:
3083 dw_cfi_ref args_size = cfi_args_size;
3085 /* Skip everything between .cfi_remember_state and
3086 .cfi_restore_state. */
3091 for (; ix < upto; ix++)
3093 cfi2 = VEC_index (dw_cfi_ref, vec, ix);
3094 if (cfi2->dw_cfi_opc == DW_CFA_restore_state)
3096 else if (cfi2->dw_cfi_opc == DW_CFA_GNU_args_size)
3099 gcc_assert (cfi2->dw_cfi_opc != DW_CFA_remember_state);
3102 cfi_args_size = args_size;
3105 case DW_CFA_GNU_args_size:
3106 cfi_args_size = cfi;
3108 case DW_CFA_GNU_window_save:
3111 case DW_CFA_offset_extended:
3112 case DW_CFA_offset_extended_sf:
3113 case DW_CFA_restore:
3114 case DW_CFA_restore_extended:
3115 case DW_CFA_undefined:
3116 case DW_CFA_same_value:
3117 case DW_CFA_register:
3118 case DW_CFA_val_offset:
3119 case DW_CFA_val_offset_sf:
3120 case DW_CFA_expression:
3121 case DW_CFA_val_expression:
3122 case DW_CFA_GNU_negative_offset_extended:
3123 if (VEC_length (dw_cfi_ref, regs)
3124 <= cfi->dw_cfi_oprnd1.dw_cfi_reg_num)
3125 VEC_safe_grow_cleared (dw_cfi_ref, heap, regs,
3126 cfi->dw_cfi_oprnd1.dw_cfi_reg_num + 1);
3127 VEC_replace (dw_cfi_ref, regs, cfi->dw_cfi_oprnd1.dw_cfi_reg_num,
3130 case DW_CFA_def_cfa:
3131 case DW_CFA_def_cfa_sf:
3132 case DW_CFA_def_cfa_expression:
3134 cfi_cfa_offset = cfi;
3136 case DW_CFA_def_cfa_register:
3139 case DW_CFA_def_cfa_offset:
3140 case DW_CFA_def_cfa_offset_sf:
3141 cfi_cfa_offset = cfi;
3144 gcc_assert (cfi == NULL);
3146 len = VEC_length (dw_cfi_ref, regs);
3147 for (idx = 0; idx < len; idx++)
3149 cfi2 = VEC_replace (dw_cfi_ref, regs, idx, NULL);
3151 && cfi2->dw_cfi_opc != DW_CFA_restore
3152 && cfi2->dw_cfi_opc != DW_CFA_restore_extended)
3155 output_cfi_directive (asm_out_file, cfi2);
3157 output_cfi (cfi2, fde, for_eh);
3160 if (cfi_cfa && cfi_cfa_offset && cfi_cfa_offset != cfi_cfa)
3162 gcc_assert (cfi_cfa->dw_cfi_opc != DW_CFA_def_cfa_expression);
3164 switch (cfi_cfa_offset->dw_cfi_opc)
3166 case DW_CFA_def_cfa_offset:
3167 cfi_buf.dw_cfi_opc = DW_CFA_def_cfa;
3168 cfi_buf.dw_cfi_oprnd2 = cfi_cfa_offset->dw_cfi_oprnd1;
3170 case DW_CFA_def_cfa_offset_sf:
3171 cfi_buf.dw_cfi_opc = DW_CFA_def_cfa_sf;
3172 cfi_buf.dw_cfi_oprnd2 = cfi_cfa_offset->dw_cfi_oprnd1;
3174 case DW_CFA_def_cfa:
3175 case DW_CFA_def_cfa_sf:
3176 cfi_buf.dw_cfi_opc = cfi_cfa_offset->dw_cfi_opc;
3177 cfi_buf.dw_cfi_oprnd2 = cfi_cfa_offset->dw_cfi_oprnd2;
3184 else if (cfi_cfa_offset)
3185 cfi_cfa = cfi_cfa_offset;
3189 output_cfi_directive (asm_out_file, cfi_cfa);
3191 output_cfi (cfi_cfa, fde, for_eh);
3194 cfi_cfa_offset = NULL;
3196 && cfi_args_size->dw_cfi_oprnd1.dw_cfi_offset)
3199 output_cfi_directive (asm_out_file, cfi_args_size);
3201 output_cfi (cfi_args_size, fde, for_eh);
3203 cfi_args_size = NULL;
3206 VEC_free (dw_cfi_ref, heap, regs);
3209 else if (do_cfi_asm)
3210 output_cfi_directive (asm_out_file, cfi);
3212 output_cfi (cfi, fde, for_eh);
3221 /* Save the result of dwarf2out_do_frame across PCH.
3222 This variable is tri-state, with 0 unset, >0 true, <0 false. */
3223 static GTY(()) signed char saved_do_cfi_asm = 0;
3225 /* Decide whether we want to emit frame unwind information for the current
3226 translation unit. */
3229 dwarf2out_do_frame (void)
3231 /* We want to emit correct CFA location expressions or lists, so we
3232 have to return true if we're going to output debug info, even if
3233 we're not going to output frame or unwind info. */
3234 if (write_symbols == DWARF2_DEBUG || write_symbols == VMS_AND_DWARF2_DEBUG)
3237 if (saved_do_cfi_asm > 0)
3240 if (targetm.debug_unwind_info () == UI_DWARF2)
3243 if ((flag_unwind_tables || flag_exceptions)
3244 && targetm_common.except_unwind_info (&global_options) == UI_DWARF2)
3250 /* Decide whether to emit frame unwind via assembler directives. */
3253 dwarf2out_do_cfi_asm (void)
3257 #ifdef MIPS_DEBUGGING_INFO
3261 if (saved_do_cfi_asm != 0)
3262 return saved_do_cfi_asm > 0;
3264 /* Assume failure for a moment. */
3265 saved_do_cfi_asm = -1;
3267 if (!flag_dwarf2_cfi_asm || !dwarf2out_do_frame ())
3269 if (!HAVE_GAS_CFI_PERSONALITY_DIRECTIVE)
3272 /* Make sure the personality encoding is one the assembler can support.
3273 In particular, aligned addresses can't be handled. */
3274 enc = ASM_PREFERRED_EH_DATA_FORMAT (/*code=*/2,/*global=*/1);
3275 if ((enc & 0x70) != 0 && (enc & 0x70) != DW_EH_PE_pcrel)
3277 enc = ASM_PREFERRED_EH_DATA_FORMAT (/*code=*/0,/*global=*/0);
3278 if ((enc & 0x70) != 0 && (enc & 0x70) != DW_EH_PE_pcrel)
3281 /* If we can't get the assembler to emit only .debug_frame, and we don't need
3282 dwarf2 unwind info for exceptions, then emit .debug_frame by hand. */
3283 if (!HAVE_GAS_CFI_SECTIONS_DIRECTIVE
3284 && !flag_unwind_tables && !flag_exceptions
3285 && targetm_common.except_unwind_info (&global_options) != UI_DWARF2)
3289 saved_do_cfi_asm = 1;
3294 gate_dwarf2_frame (void)
3296 #ifndef HAVE_prologue
3297 /* Targets which still implement the prologue in assembler text
3298 cannot use the generic dwarf2 unwinding. */
3302 /* ??? What to do for UI_TARGET unwinding? They might be able to benefit
3303 from the optimized shrink-wrapping annotations that we will compute.
3304 For now, only produce the CFI notes for dwarf2. */
3305 return dwarf2out_do_frame ();
3308 struct rtl_opt_pass pass_dwarf2_frame =
3312 "dwarf2", /* name */
3313 gate_dwarf2_frame, /* gate */
3314 execute_dwarf2_frame, /* execute */
3317 0, /* static_pass_number */
3318 TV_FINAL, /* tv_id */
3319 0, /* properties_required */
3320 0, /* properties_provided */
3321 0, /* properties_destroyed */
3322 0, /* todo_flags_start */
3323 0 /* todo_flags_finish */
3327 #include "gt-dwarf2cfi.h"