1 /* Exception handling semantics and decomposition for trees.
2 Copyright (C) 2003-2016 Free Software Foundation, Inc.
4 This file is part of GCC.
6 GCC is free software; you can redistribute it and/or modify
7 it under the terms of the GNU General Public License as published by
8 the Free Software Foundation; either version 3, or (at your option)
11 GCC is distributed in the hope that it will be useful,
12 but WITHOUT ANY WARRANTY; without even the implied warranty of
13 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 GNU General Public License for more details.
16 You should have received a copy of the GNU General Public License
17 along with GCC; see the file COPYING3. If not see
18 <http://www.gnu.org/licenses/>. */
22 #include "coretypes.h"
28 #include "tree-pass.h"
31 #include "diagnostic-core.h"
32 #include "fold-const.h"
36 #include "cfgcleanup.h"
38 #include "gimple-iterator.h"
40 #include "tree-into-ssa.h"
42 #include "tree-inline.h"
43 #include "langhooks.h"
45 #include "gimple-low.h"
47 /* In some instances a tree and a gimple need to be stored in a same table,
48 i.e. in hash tables. This is a structure to do this. */
49 typedef union {tree *tp; tree t; gimple *g;} treemple;
51 /* Misc functions used in this file. */
53 /* Remember and lookup EH landing pad data for arbitrary statements.
54 Really this means any statement that could_throw_p. We could
55 stuff this information into the stmt_ann data structure, but:
57 (1) We absolutely rely on this information being kept until
58 we get to rtl. Once we're done with lowering here, if we lose
59 the information there's no way to recover it!
61 (2) There are many more statements that *cannot* throw as
62 compared to those that can. We should be saving some amount
63 of space by only allocating memory for those that can throw. */
65 /* Add statement T in function IFUN to landing pad NUM. */
68 add_stmt_to_eh_lp_fn (struct function *ifun, gimple *t, int num)
70 gcc_assert (num != 0);
72 if (!get_eh_throw_stmt_table (ifun))
73 set_eh_throw_stmt_table (ifun, hash_map<gimple *, int>::create_ggc (31));
75 gcc_assert (!get_eh_throw_stmt_table (ifun)->put (t, num));
78 /* Add statement T in the current function (cfun) to EH landing pad NUM. */
81 add_stmt_to_eh_lp (gimple *t, int num)
83 add_stmt_to_eh_lp_fn (cfun, t, num);
86 /* Add statement T to the single EH landing pad in REGION. */
89 record_stmt_eh_region (eh_region region, gimple *t)
93 if (region->type == ERT_MUST_NOT_THROW)
94 add_stmt_to_eh_lp_fn (cfun, t, -region->index);
97 eh_landing_pad lp = region->landing_pads;
99 lp = gen_eh_landing_pad (region);
101 gcc_assert (lp->next_lp == NULL);
102 add_stmt_to_eh_lp_fn (cfun, t, lp->index);
107 /* Remove statement T in function IFUN from its EH landing pad. */
110 remove_stmt_from_eh_lp_fn (struct function *ifun, gimple *t)
112 if (!get_eh_throw_stmt_table (ifun))
115 if (!get_eh_throw_stmt_table (ifun)->get (t))
118 get_eh_throw_stmt_table (ifun)->remove (t);
123 /* Remove statement T in the current function (cfun) from its
127 remove_stmt_from_eh_lp (gimple *t)
129 return remove_stmt_from_eh_lp_fn (cfun, t);
132 /* Determine if statement T is inside an EH region in function IFUN.
133 Positive numbers indicate a landing pad index; negative numbers
134 indicate a MUST_NOT_THROW region index; zero indicates that the
135 statement is not recorded in the region table. */
138 lookup_stmt_eh_lp_fn (struct function *ifun, gimple *t)
140 if (ifun->eh->throw_stmt_table == NULL)
143 int *lp_nr = ifun->eh->throw_stmt_table->get (t);
144 return lp_nr ? *lp_nr : 0;
147 /* Likewise, but always use the current function. */
150 lookup_stmt_eh_lp (gimple *t)
152 /* We can get called from initialized data when -fnon-call-exceptions
153 is on; prevent crash. */
156 return lookup_stmt_eh_lp_fn (cfun, t);
159 /* First pass of EH node decomposition. Build up a tree of GIMPLE_TRY_FINALLY
160 nodes and LABEL_DECL nodes. We will use this during the second phase to
161 determine if a goto leaves the body of a TRY_FINALLY_EXPR node. */
163 struct finally_tree_node
165 /* When storing a GIMPLE_TRY, we have to record a gimple. However
166 when deciding whether a GOTO to a certain LABEL_DECL (which is a
167 tree) leaves the TRY block, its necessary to record a tree in
168 this field. Thus a treemple is used. */
173 /* Hashtable helpers. */
175 struct finally_tree_hasher : free_ptr_hash <finally_tree_node>
177 static inline hashval_t hash (const finally_tree_node *);
178 static inline bool equal (const finally_tree_node *,
179 const finally_tree_node *);
183 finally_tree_hasher::hash (const finally_tree_node *v)
185 return (intptr_t)v->child.t >> 4;
189 finally_tree_hasher::equal (const finally_tree_node *v,
190 const finally_tree_node *c)
192 return v->child.t == c->child.t;
195 /* Note that this table is *not* marked GTY. It is short-lived. */
196 static hash_table<finally_tree_hasher> *finally_tree;
199 record_in_finally_tree (treemple child, gtry *parent)
201 struct finally_tree_node *n;
202 finally_tree_node **slot;
204 n = XNEW (struct finally_tree_node);
208 slot = finally_tree->find_slot (n, INSERT);
214 collect_finally_tree (gimple *stmt, gtry *region);
216 /* Go through the gimple sequence. Works with collect_finally_tree to
217 record all GIMPLE_LABEL and GIMPLE_TRY statements. */
220 collect_finally_tree_1 (gimple_seq seq, gtry *region)
222 gimple_stmt_iterator gsi;
224 for (gsi = gsi_start (seq); !gsi_end_p (gsi); gsi_next (&gsi))
225 collect_finally_tree (gsi_stmt (gsi), region);
229 collect_finally_tree (gimple *stmt, gtry *region)
233 switch (gimple_code (stmt))
236 temp.t = gimple_label_label (as_a <glabel *> (stmt));
237 record_in_finally_tree (temp, region);
241 if (gimple_try_kind (stmt) == GIMPLE_TRY_FINALLY)
244 record_in_finally_tree (temp, region);
245 collect_finally_tree_1 (gimple_try_eval (stmt),
246 as_a <gtry *> (stmt));
247 collect_finally_tree_1 (gimple_try_cleanup (stmt), region);
249 else if (gimple_try_kind (stmt) == GIMPLE_TRY_CATCH)
251 collect_finally_tree_1 (gimple_try_eval (stmt), region);
252 collect_finally_tree_1 (gimple_try_cleanup (stmt), region);
257 collect_finally_tree_1 (gimple_catch_handler (
258 as_a <gcatch *> (stmt)),
262 case GIMPLE_EH_FILTER:
263 collect_finally_tree_1 (gimple_eh_filter_failure (stmt), region);
268 geh_else *eh_else_stmt = as_a <geh_else *> (stmt);
269 collect_finally_tree_1 (gimple_eh_else_n_body (eh_else_stmt), region);
270 collect_finally_tree_1 (gimple_eh_else_e_body (eh_else_stmt), region);
275 /* A type, a decl, or some kind of statement that we're not
276 interested in. Don't walk them. */
282 /* Use the finally tree to determine if a jump from START to TARGET
283 would leave the try_finally node that START lives in. */
286 outside_finally_tree (treemple start, gimple *target)
288 struct finally_tree_node n, *p;
293 p = finally_tree->find (&n);
298 while (start.g != target);
303 /* Second pass of EH node decomposition. Actually transform the GIMPLE_TRY
304 nodes into a set of gotos, magic labels, and eh regions.
305 The eh region creation is straight-forward, but frobbing all the gotos
306 and such into shape isn't. */
308 /* The sequence into which we record all EH stuff. This will be
309 placed at the end of the function when we're all done. */
310 static gimple_seq eh_seq;
312 /* Record whether an EH region contains something that can throw,
313 indexed by EH region number. */
314 static bitmap eh_region_may_contain_throw_map;
316 /* The GOTO_QUEUE is an array of GIMPLE_GOTO and GIMPLE_RETURN
317 statements that are seen to escape this GIMPLE_TRY_FINALLY node.
318 The idea is to record a gimple statement for everything except for
319 the conditionals, which get their labels recorded. Since labels are
320 of type 'tree', we need this node to store both gimple and tree
321 objects. REPL_STMT is the sequence used to replace the goto/return
322 statement. CONT_STMT is used to store the statement that allows
323 the return/goto to jump to the original destination. */
325 struct goto_queue_node
329 gimple_seq repl_stmt;
332 /* This is used when index >= 0 to indicate that stmt is a label (as
333 opposed to a goto stmt). */
337 /* State of the world while lowering. */
341 /* What's "current" while constructing the eh region tree. These
342 correspond to variables of the same name in cfun->eh, which we
343 don't have easy access to. */
344 eh_region cur_region;
346 /* What's "current" for the purposes of __builtin_eh_pointer. For
347 a CATCH, this is the associated TRY. For an EH_FILTER, this is
348 the associated ALLOWED_EXCEPTIONS, etc. */
349 eh_region ehp_region;
351 /* Processing of TRY_FINALLY requires a bit more state. This is
352 split out into a separate structure so that we don't have to
353 copy so much when processing other nodes. */
354 struct leh_tf_state *tf;
359 /* Pointer to the GIMPLE_TRY_FINALLY node under discussion. The
360 try_finally_expr is the original GIMPLE_TRY_FINALLY. We need to retain
361 this so that outside_finally_tree can reliably reference the tree used
362 in the collect_finally_tree data structures. */
363 gtry *try_finally_expr;
366 /* While lowering a top_p usually it is expanded into multiple statements,
367 thus we need the following field to store them. */
368 gimple_seq top_p_seq;
370 /* The state outside this try_finally node. */
371 struct leh_state *outer;
373 /* The exception region created for it. */
376 /* The goto queue. */
377 struct goto_queue_node *goto_queue;
378 size_t goto_queue_size;
379 size_t goto_queue_active;
381 /* Pointer map to help in searching goto_queue when it is large. */
382 hash_map<gimple *, goto_queue_node *> *goto_queue_map;
384 /* The set of unique labels seen as entries in the goto queue. */
385 vec<tree> dest_array;
387 /* A label to be added at the end of the completed transformed
388 sequence. It will be set if may_fallthru was true *at one time*,
389 though subsequent transformations may have cleared that flag. */
392 /* True if it is possible to fall out the bottom of the try block.
393 Cleared if the fallthru is converted to a goto. */
396 /* True if any entry in goto_queue is a GIMPLE_RETURN. */
399 /* True if the finally block can receive an exception edge.
400 Cleared if the exception case is handled by code duplication. */
404 static gimple_seq lower_eh_must_not_throw (struct leh_state *, gtry *);
406 /* Search for STMT in the goto queue. Return the replacement,
407 or null if the statement isn't in the queue. */
409 #define LARGE_GOTO_QUEUE 20
411 static void lower_eh_constructs_1 (struct leh_state *state, gimple_seq *seq);
414 find_goto_replacement (struct leh_tf_state *tf, treemple stmt)
418 if (tf->goto_queue_active < LARGE_GOTO_QUEUE)
420 for (i = 0; i < tf->goto_queue_active; i++)
421 if ( tf->goto_queue[i].stmt.g == stmt.g)
422 return tf->goto_queue[i].repl_stmt;
426 /* If we have a large number of entries in the goto_queue, create a
427 pointer map and use that for searching. */
429 if (!tf->goto_queue_map)
431 tf->goto_queue_map = new hash_map<gimple *, goto_queue_node *>;
432 for (i = 0; i < tf->goto_queue_active; i++)
434 bool existed = tf->goto_queue_map->put (tf->goto_queue[i].stmt.g,
436 gcc_assert (!existed);
440 goto_queue_node **slot = tf->goto_queue_map->get (stmt.g);
442 return ((*slot)->repl_stmt);
447 /* A subroutine of replace_goto_queue_1. Handles the sub-clauses of a
448 lowered GIMPLE_COND. If, by chance, the replacement is a simple goto,
449 then we can just splat it in, otherwise we add the new stmts immediately
450 after the GIMPLE_COND and redirect. */
453 replace_goto_queue_cond_clause (tree *tp, struct leh_tf_state *tf,
454 gimple_stmt_iterator *gsi)
459 location_t loc = gimple_location (gsi_stmt (*gsi));
462 new_seq = find_goto_replacement (tf, temp);
466 if (gimple_seq_singleton_p (new_seq)
467 && gimple_code (gimple_seq_first_stmt (new_seq)) == GIMPLE_GOTO)
469 *tp = gimple_goto_dest (gimple_seq_first_stmt (new_seq));
473 label = create_artificial_label (loc);
474 /* Set the new label for the GIMPLE_COND */
477 gsi_insert_after (gsi, gimple_build_label (label), GSI_CONTINUE_LINKING);
478 gsi_insert_seq_after (gsi, gimple_seq_copy (new_seq), GSI_CONTINUE_LINKING);
481 /* The real work of replace_goto_queue. Returns with TSI updated to
482 point to the next statement. */
484 static void replace_goto_queue_stmt_list (gimple_seq *, struct leh_tf_state *);
487 replace_goto_queue_1 (gimple *stmt, struct leh_tf_state *tf,
488 gimple_stmt_iterator *gsi)
494 switch (gimple_code (stmt))
499 seq = find_goto_replacement (tf, temp);
502 gsi_insert_seq_before (gsi, gimple_seq_copy (seq), GSI_SAME_STMT);
503 gsi_remove (gsi, false);
509 replace_goto_queue_cond_clause (gimple_op_ptr (stmt, 2), tf, gsi);
510 replace_goto_queue_cond_clause (gimple_op_ptr (stmt, 3), tf, gsi);
514 replace_goto_queue_stmt_list (gimple_try_eval_ptr (stmt), tf);
515 replace_goto_queue_stmt_list (gimple_try_cleanup_ptr (stmt), tf);
518 replace_goto_queue_stmt_list (gimple_catch_handler_ptr (
519 as_a <gcatch *> (stmt)),
522 case GIMPLE_EH_FILTER:
523 replace_goto_queue_stmt_list (gimple_eh_filter_failure_ptr (stmt), tf);
527 geh_else *eh_else_stmt = as_a <geh_else *> (stmt);
528 replace_goto_queue_stmt_list (gimple_eh_else_n_body_ptr (eh_else_stmt),
530 replace_goto_queue_stmt_list (gimple_eh_else_e_body_ptr (eh_else_stmt),
536 /* These won't have gotos in them. */
543 /* A subroutine of replace_goto_queue. Handles GIMPLE_SEQ. */
546 replace_goto_queue_stmt_list (gimple_seq *seq, struct leh_tf_state *tf)
548 gimple_stmt_iterator gsi = gsi_start (*seq);
550 while (!gsi_end_p (gsi))
551 replace_goto_queue_1 (gsi_stmt (gsi), tf, &gsi);
554 /* Replace all goto queue members. */
557 replace_goto_queue (struct leh_tf_state *tf)
559 if (tf->goto_queue_active == 0)
561 replace_goto_queue_stmt_list (&tf->top_p_seq, tf);
562 replace_goto_queue_stmt_list (&eh_seq, tf);
565 /* Add a new record to the goto queue contained in TF. NEW_STMT is the
566 data to be added, IS_LABEL indicates whether NEW_STMT is a label or
570 record_in_goto_queue (struct leh_tf_state *tf,
577 struct goto_queue_node *q;
579 gcc_assert (!tf->goto_queue_map);
581 active = tf->goto_queue_active;
582 size = tf->goto_queue_size;
585 size = (size ? size * 2 : 32);
586 tf->goto_queue_size = size;
588 = XRESIZEVEC (struct goto_queue_node, tf->goto_queue, size);
591 q = &tf->goto_queue[active];
592 tf->goto_queue_active = active + 1;
594 memset (q, 0, sizeof (*q));
597 q->location = location;
598 q->is_label = is_label;
601 /* Record the LABEL label in the goto queue contained in TF.
605 record_in_goto_queue_label (struct leh_tf_state *tf, treemple stmt, tree label,
609 treemple temp, new_stmt;
614 /* Computed and non-local gotos do not get processed. Given
615 their nature we can neither tell whether we've escaped the
616 finally block nor redirect them if we knew. */
617 if (TREE_CODE (label) != LABEL_DECL)
620 /* No need to record gotos that don't leave the try block. */
622 if (!outside_finally_tree (temp, tf->try_finally_expr))
625 if (! tf->dest_array.exists ())
627 tf->dest_array.create (10);
628 tf->dest_array.quick_push (label);
633 int n = tf->dest_array.length ();
634 for (index = 0; index < n; ++index)
635 if (tf->dest_array[index] == label)
638 tf->dest_array.safe_push (label);
641 /* In the case of a GOTO we want to record the destination label,
642 since with a GIMPLE_COND we have an easy access to the then/else
645 record_in_goto_queue (tf, new_stmt, index, true, location);
648 /* For any GIMPLE_GOTO or GIMPLE_RETURN, decide whether it leaves a try_finally
649 node, and if so record that fact in the goto queue associated with that
653 maybe_record_in_goto_queue (struct leh_state *state, gimple *stmt)
655 struct leh_tf_state *tf = state->tf;
661 switch (gimple_code (stmt))
665 gcond *cond_stmt = as_a <gcond *> (stmt);
666 new_stmt.tp = gimple_op_ptr (cond_stmt, 2);
667 record_in_goto_queue_label (tf, new_stmt,
668 gimple_cond_true_label (cond_stmt),
669 EXPR_LOCATION (*new_stmt.tp));
670 new_stmt.tp = gimple_op_ptr (cond_stmt, 3);
671 record_in_goto_queue_label (tf, new_stmt,
672 gimple_cond_false_label (cond_stmt),
673 EXPR_LOCATION (*new_stmt.tp));
678 record_in_goto_queue_label (tf, new_stmt, gimple_goto_dest (stmt),
679 gimple_location (stmt));
683 tf->may_return = true;
685 record_in_goto_queue (tf, new_stmt, -1, false, gimple_location (stmt));
695 /* We do not process GIMPLE_SWITCHes for now. As long as the original source
696 was in fact structured, and we've not yet done jump threading, then none
697 of the labels will leave outer GIMPLE_TRY_FINALLY nodes. Verify this. */
700 verify_norecord_switch_expr (struct leh_state *state,
701 gswitch *switch_expr)
703 struct leh_tf_state *tf = state->tf;
709 n = gimple_switch_num_labels (switch_expr);
711 for (i = 0; i < n; ++i)
714 tree lab = CASE_LABEL (gimple_switch_label (switch_expr, i));
716 gcc_assert (!outside_finally_tree (temp, tf->try_finally_expr));
720 #define verify_norecord_switch_expr(state, switch_expr)
723 /* Redirect a RETURN_EXPR pointed to by Q to FINLAB. If MOD is
724 non-null, insert it before the new branch. */
727 do_return_redirection (struct goto_queue_node *q, tree finlab, gimple_seq mod)
731 /* In the case of a return, the queue node must be a gimple statement. */
732 gcc_assert (!q->is_label);
734 /* Note that the return value may have already been computed, e.g.,
747 should return 0, not 1. We don't have to do anything to make
748 this happens because the return value has been placed in the
749 RESULT_DECL already. */
751 q->cont_stmt = q->stmt.g;
754 gimple_seq_add_seq (&q->repl_stmt, mod);
756 x = gimple_build_goto (finlab);
757 gimple_set_location (x, q->location);
758 gimple_seq_add_stmt (&q->repl_stmt, x);
761 /* Similar, but easier, for GIMPLE_GOTO. */
764 do_goto_redirection (struct goto_queue_node *q, tree finlab, gimple_seq mod,
765 struct leh_tf_state *tf)
769 gcc_assert (q->is_label);
771 q->cont_stmt = gimple_build_goto (tf->dest_array[q->index]);
774 gimple_seq_add_seq (&q->repl_stmt, mod);
776 x = gimple_build_goto (finlab);
777 gimple_set_location (x, q->location);
778 gimple_seq_add_stmt (&q->repl_stmt, x);
781 /* Emit a standard landing pad sequence into SEQ for REGION. */
784 emit_post_landing_pad (gimple_seq *seq, eh_region region)
786 eh_landing_pad lp = region->landing_pads;
790 lp = gen_eh_landing_pad (region);
792 lp->post_landing_pad = create_artificial_label (UNKNOWN_LOCATION);
793 EH_LANDING_PAD_NR (lp->post_landing_pad) = lp->index;
795 x = gimple_build_label (lp->post_landing_pad);
796 gimple_seq_add_stmt (seq, x);
799 /* Emit a RESX statement into SEQ for REGION. */
802 emit_resx (gimple_seq *seq, eh_region region)
804 gresx *x = gimple_build_resx (region->index);
805 gimple_seq_add_stmt (seq, x);
807 record_stmt_eh_region (region->outer, x);
810 /* Emit an EH_DISPATCH statement into SEQ for REGION. */
813 emit_eh_dispatch (gimple_seq *seq, eh_region region)
815 geh_dispatch *x = gimple_build_eh_dispatch (region->index);
816 gimple_seq_add_stmt (seq, x);
819 /* Note that the current EH region may contain a throw, or a
820 call to a function which itself may contain a throw. */
823 note_eh_region_may_contain_throw (eh_region region)
825 while (bitmap_set_bit (eh_region_may_contain_throw_map, region->index))
827 if (region->type == ERT_MUST_NOT_THROW)
829 region = region->outer;
835 /* Check if REGION has been marked as containing a throw. If REGION is
836 NULL, this predicate is false. */
839 eh_region_may_contain_throw (eh_region r)
841 return r && bitmap_bit_p (eh_region_may_contain_throw_map, r->index);
844 /* We want to transform
845 try { body; } catch { stuff; }
855 TP is a GIMPLE_TRY node. REGION is the region whose post_landing_pad
856 should be placed before the second operand, or NULL. OVER is
857 an existing label that should be put at the exit, or NULL. */
860 frob_into_branch_around (gtry *tp, eh_region region, tree over)
863 gimple_seq cleanup, result;
864 location_t loc = gimple_location (tp);
866 cleanup = gimple_try_cleanup (tp);
867 result = gimple_try_eval (tp);
870 emit_post_landing_pad (&eh_seq, region);
872 if (gimple_seq_may_fallthru (cleanup))
875 over = create_artificial_label (loc);
876 x = gimple_build_goto (over);
877 gimple_set_location (x, loc);
878 gimple_seq_add_stmt (&cleanup, x);
880 gimple_seq_add_seq (&eh_seq, cleanup);
884 x = gimple_build_label (over);
885 gimple_seq_add_stmt (&result, x);
890 /* A subroutine of lower_try_finally. Duplicate the tree rooted at T.
891 Make sure to record all new labels found. */
894 lower_try_finally_dup_block (gimple_seq seq, struct leh_state *outer_state,
899 gimple_stmt_iterator gsi;
901 new_seq = copy_gimple_seq_and_replace_locals (seq);
903 for (gsi = gsi_start (new_seq); !gsi_end_p (gsi); gsi_next (&gsi))
905 gimple *stmt = gsi_stmt (gsi);
906 /* We duplicate __builtin_stack_restore at -O0 in the hope of eliminating
907 it on the EH paths. When it is not eliminated, make it transparent in
909 if (gimple_call_builtin_p (stmt, BUILT_IN_STACK_RESTORE))
910 gimple_set_location (stmt, UNKNOWN_LOCATION);
911 else if (LOCATION_LOCUS (gimple_location (stmt)) == UNKNOWN_LOCATION)
913 tree block = gimple_block (stmt);
914 gimple_set_location (stmt, loc);
915 gimple_set_block (stmt, block);
920 region = outer_state->tf->try_finally_expr;
921 collect_finally_tree_1 (new_seq, region);
926 /* A subroutine of lower_try_finally. Create a fallthru label for
927 the given try_finally state. The only tricky bit here is that
928 we have to make sure to record the label in our outer context. */
931 lower_try_finally_fallthru_label (struct leh_tf_state *tf)
933 tree label = tf->fallthru_label;
938 label = create_artificial_label (gimple_location (tf->try_finally_expr));
939 tf->fallthru_label = label;
943 record_in_finally_tree (temp, tf->outer->tf->try_finally_expr);
949 /* A subroutine of lower_try_finally. If FINALLY consits of a
950 GIMPLE_EH_ELSE node, return it. */
952 static inline geh_else *
953 get_eh_else (gimple_seq finally)
955 gimple *x = gimple_seq_first_stmt (finally);
956 if (gimple_code (x) == GIMPLE_EH_ELSE)
958 gcc_assert (gimple_seq_singleton_p (finally));
959 return as_a <geh_else *> (x);
964 /* A subroutine of lower_try_finally. If the eh_protect_cleanup_actions
965 langhook returns non-null, then the language requires that the exception
966 path out of a try_finally be treated specially. To wit: the code within
967 the finally block may not itself throw an exception. We have two choices
968 here. First we can duplicate the finally block and wrap it in a
969 must_not_throw region. Second, we can generate code like
974 if (fintmp == eh_edge)
975 protect_cleanup_actions;
978 where "fintmp" is the temporary used in the switch statement generation
979 alternative considered below. For the nonce, we always choose the first
982 THIS_STATE may be null if this is a try-cleanup, not a try-finally. */
985 honor_protect_cleanup_actions (struct leh_state *outer_state,
986 struct leh_state *this_state,
987 struct leh_tf_state *tf)
989 gimple_seq finally = gimple_try_cleanup (tf->top_p);
991 /* EH_ELSE doesn't come from user code; only compiler generated stuff.
992 It does need to be handled here, so as to separate the (different)
993 EH path from the normal path. But we should not attempt to wrap
994 it with a must-not-throw node (which indeed gets in the way). */
995 if (geh_else *eh_else = get_eh_else (finally))
997 gimple_try_set_cleanup (tf->top_p, gimple_eh_else_n_body (eh_else));
998 finally = gimple_eh_else_e_body (eh_else);
1000 /* Let the ELSE see the exception that's being processed. */
1001 eh_region save_ehp = this_state->ehp_region;
1002 this_state->ehp_region = this_state->cur_region;
1003 lower_eh_constructs_1 (this_state, &finally);
1004 this_state->ehp_region = save_ehp;
1008 /* First check for nothing to do. */
1009 if (lang_hooks.eh_protect_cleanup_actions == NULL)
1011 tree actions = lang_hooks.eh_protect_cleanup_actions ();
1012 if (actions == NULL)
1016 finally = lower_try_finally_dup_block (finally, outer_state,
1017 gimple_location (tf->try_finally_expr));
1019 /* If this cleanup consists of a TRY_CATCH_EXPR with TRY_CATCH_IS_CLEANUP
1020 set, the handler of the TRY_CATCH_EXPR is another cleanup which ought
1021 to be in an enclosing scope, but needs to be implemented at this level
1022 to avoid a nesting violation (see wrap_temporary_cleanups in
1023 cp/decl.c). Since it's logically at an outer level, we should call
1024 terminate before we get to it, so strip it away before adding the
1025 MUST_NOT_THROW filter. */
1026 gimple_stmt_iterator gsi = gsi_start (finally);
1027 gimple *x = gsi_stmt (gsi);
1028 if (gimple_code (x) == GIMPLE_TRY
1029 && gimple_try_kind (x) == GIMPLE_TRY_CATCH
1030 && gimple_try_catch_is_cleanup (x))
1032 gsi_insert_seq_before (&gsi, gimple_try_eval (x), GSI_SAME_STMT);
1033 gsi_remove (&gsi, false);
1036 /* Wrap the block with protect_cleanup_actions as the action. */
1037 geh_mnt *eh_mnt = gimple_build_eh_must_not_throw (actions);
1038 gtry *try_stmt = gimple_build_try (finally,
1039 gimple_seq_alloc_with_stmt (eh_mnt),
1041 finally = lower_eh_must_not_throw (outer_state, try_stmt);
1044 /* Drop all of this into the exception sequence. */
1045 emit_post_landing_pad (&eh_seq, tf->region);
1046 gimple_seq_add_seq (&eh_seq, finally);
1047 if (gimple_seq_may_fallthru (finally))
1048 emit_resx (&eh_seq, tf->region);
1050 /* Having now been handled, EH isn't to be considered with
1051 the rest of the outgoing edges. */
1052 tf->may_throw = false;
1055 /* A subroutine of lower_try_finally. We have determined that there is
1056 no fallthru edge out of the finally block. This means that there is
1057 no outgoing edge corresponding to any incoming edge. Restructure the
1058 try_finally node for this special case. */
1061 lower_try_finally_nofallthru (struct leh_state *state,
1062 struct leh_tf_state *tf)
1068 struct goto_queue_node *q, *qe;
1070 lab = create_artificial_label (gimple_location (tf->try_finally_expr));
1072 /* We expect that tf->top_p is a GIMPLE_TRY. */
1073 finally = gimple_try_cleanup (tf->top_p);
1074 tf->top_p_seq = gimple_try_eval (tf->top_p);
1076 x = gimple_build_label (lab);
1077 gimple_seq_add_stmt (&tf->top_p_seq, x);
1080 qe = q + tf->goto_queue_active;
1083 do_return_redirection (q, lab, NULL);
1085 do_goto_redirection (q, lab, NULL, tf);
1087 replace_goto_queue (tf);
1089 /* Emit the finally block into the stream. Lower EH_ELSE at this time. */
1090 eh_else = get_eh_else (finally);
1093 finally = gimple_eh_else_n_body (eh_else);
1094 lower_eh_constructs_1 (state, &finally);
1095 gimple_seq_add_seq (&tf->top_p_seq, finally);
1099 finally = gimple_eh_else_e_body (eh_else);
1100 lower_eh_constructs_1 (state, &finally);
1102 emit_post_landing_pad (&eh_seq, tf->region);
1103 gimple_seq_add_seq (&eh_seq, finally);
1108 lower_eh_constructs_1 (state, &finally);
1109 gimple_seq_add_seq (&tf->top_p_seq, finally);
1113 emit_post_landing_pad (&eh_seq, tf->region);
1115 x = gimple_build_goto (lab);
1116 gimple_set_location (x, gimple_location (tf->try_finally_expr));
1117 gimple_seq_add_stmt (&eh_seq, x);
1122 /* A subroutine of lower_try_finally. We have determined that there is
1123 exactly one destination of the finally block. Restructure the
1124 try_finally node for this special case. */
1127 lower_try_finally_onedest (struct leh_state *state, struct leh_tf_state *tf)
1129 struct goto_queue_node *q, *qe;
1134 gimple_stmt_iterator gsi;
1136 location_t loc = gimple_location (tf->try_finally_expr);
1137 location_t finally_loc;
1139 finally = gimple_try_cleanup (tf->top_p);
1140 tf->top_p_seq = gimple_try_eval (tf->top_p);
1142 /* The location of the finally is either the last stmt in the finally
1143 block or the location of the TRY_FINALLY itself. */
1144 x = gimple_seq_last_stmt (finally);
1145 finally_loc = x ? gimple_location (x) : loc;
1147 /* Since there's only one destination, and the destination edge can only
1148 either be EH or non-EH, that implies that all of our incoming edges
1149 are of the same type. Therefore we can lower EH_ELSE immediately. */
1150 eh_else = get_eh_else (finally);
1154 finally = gimple_eh_else_e_body (eh_else);
1156 finally = gimple_eh_else_n_body (eh_else);
1159 lower_eh_constructs_1 (state, &finally);
1161 for (gsi = gsi_start (finally); !gsi_end_p (gsi); gsi_next (&gsi))
1163 gimple *stmt = gsi_stmt (gsi);
1164 if (LOCATION_LOCUS (gimple_location (stmt)) == UNKNOWN_LOCATION)
1166 tree block = gimple_block (stmt);
1167 gimple_set_location (stmt, finally_loc);
1168 gimple_set_block (stmt, block);
1174 /* Only reachable via the exception edge. Add the given label to
1175 the head of the FINALLY block. Append a RESX at the end. */
1176 emit_post_landing_pad (&eh_seq, tf->region);
1177 gimple_seq_add_seq (&eh_seq, finally);
1178 emit_resx (&eh_seq, tf->region);
1182 if (tf->may_fallthru)
1184 /* Only reachable via the fallthru edge. Do nothing but let
1185 the two blocks run together; we'll fall out the bottom. */
1186 gimple_seq_add_seq (&tf->top_p_seq, finally);
1190 finally_label = create_artificial_label (finally_loc);
1191 label_stmt = gimple_build_label (finally_label);
1192 gimple_seq_add_stmt (&tf->top_p_seq, label_stmt);
1194 gimple_seq_add_seq (&tf->top_p_seq, finally);
1197 qe = q + tf->goto_queue_active;
1201 /* Reachable by return expressions only. Redirect them. */
1203 do_return_redirection (q, finally_label, NULL);
1204 replace_goto_queue (tf);
1208 /* Reachable by goto expressions only. Redirect them. */
1210 do_goto_redirection (q, finally_label, NULL, tf);
1211 replace_goto_queue (tf);
1213 if (tf->dest_array[0] == tf->fallthru_label)
1215 /* Reachable by goto to fallthru label only. Redirect it
1216 to the new label (already created, sadly), and do not
1217 emit the final branch out, or the fallthru label. */
1218 tf->fallthru_label = NULL;
1223 /* Place the original return/goto to the original destination
1224 immediately after the finally block. */
1225 x = tf->goto_queue[0].cont_stmt;
1226 gimple_seq_add_stmt (&tf->top_p_seq, x);
1227 maybe_record_in_goto_queue (state, x);
1230 /* A subroutine of lower_try_finally. There are multiple edges incoming
1231 and outgoing from the finally block. Implement this by duplicating the
1232 finally block for every destination. */
1235 lower_try_finally_copy (struct leh_state *state, struct leh_tf_state *tf)
1238 gimple_seq new_stmt;
1243 location_t tf_loc = gimple_location (tf->try_finally_expr);
1245 finally = gimple_try_cleanup (tf->top_p);
1247 /* Notice EH_ELSE, and simplify some of the remaining code
1248 by considering FINALLY to be the normal return path only. */
1249 eh_else = get_eh_else (finally);
1251 finally = gimple_eh_else_n_body (eh_else);
1253 tf->top_p_seq = gimple_try_eval (tf->top_p);
1256 if (tf->may_fallthru)
1258 seq = lower_try_finally_dup_block (finally, state, tf_loc);
1259 lower_eh_constructs_1 (state, &seq);
1260 gimple_seq_add_seq (&new_stmt, seq);
1262 tmp = lower_try_finally_fallthru_label (tf);
1263 x = gimple_build_goto (tmp);
1264 gimple_set_location (x, tf_loc);
1265 gimple_seq_add_stmt (&new_stmt, x);
1270 /* We don't need to copy the EH path of EH_ELSE,
1271 since it is only emitted once. */
1273 seq = gimple_eh_else_e_body (eh_else);
1275 seq = lower_try_finally_dup_block (finally, state, tf_loc);
1276 lower_eh_constructs_1 (state, &seq);
1278 emit_post_landing_pad (&eh_seq, tf->region);
1279 gimple_seq_add_seq (&eh_seq, seq);
1280 emit_resx (&eh_seq, tf->region);
1285 struct goto_queue_node *q, *qe;
1286 int return_index, index;
1289 struct goto_queue_node *q;
1293 return_index = tf->dest_array.length ();
1294 labels = XCNEWVEC (struct labels_s, return_index + 1);
1297 qe = q + tf->goto_queue_active;
1300 index = q->index < 0 ? return_index : q->index;
1302 if (!labels[index].q)
1303 labels[index].q = q;
1306 for (index = 0; index < return_index + 1; index++)
1310 q = labels[index].q;
1314 lab = labels[index].label
1315 = create_artificial_label (tf_loc);
1317 if (index == return_index)
1318 do_return_redirection (q, lab, NULL);
1320 do_goto_redirection (q, lab, NULL, tf);
1322 x = gimple_build_label (lab);
1323 gimple_seq_add_stmt (&new_stmt, x);
1325 seq = lower_try_finally_dup_block (finally, state, q->location);
1326 lower_eh_constructs_1 (state, &seq);
1327 gimple_seq_add_seq (&new_stmt, seq);
1329 gimple_seq_add_stmt (&new_stmt, q->cont_stmt);
1330 maybe_record_in_goto_queue (state, q->cont_stmt);
1333 for (q = tf->goto_queue; q < qe; q++)
1337 index = q->index < 0 ? return_index : q->index;
1339 if (labels[index].q == q)
1342 lab = labels[index].label;
1344 if (index == return_index)
1345 do_return_redirection (q, lab, NULL);
1347 do_goto_redirection (q, lab, NULL, tf);
1350 replace_goto_queue (tf);
1354 /* Need to link new stmts after running replace_goto_queue due
1355 to not wanting to process the same goto stmts twice. */
1356 gimple_seq_add_seq (&tf->top_p_seq, new_stmt);
1359 /* A subroutine of lower_try_finally. There are multiple edges incoming
1360 and outgoing from the finally block. Implement this by instrumenting
1361 each incoming edge and creating a switch statement at the end of the
1362 finally block that branches to the appropriate destination. */
1365 lower_try_finally_switch (struct leh_state *state, struct leh_tf_state *tf)
1367 struct goto_queue_node *q, *qe;
1368 tree finally_tmp, finally_label;
1369 int return_index, eh_index, fallthru_index;
1370 int nlabels, ndests, j, last_case_index;
1372 auto_vec<tree> case_label_vec;
1373 gimple_seq switch_body = NULL;
1377 gimple *switch_stmt;
1379 hash_map<tree, gimple *> *cont_map = NULL;
1380 /* The location of the TRY_FINALLY stmt. */
1381 location_t tf_loc = gimple_location (tf->try_finally_expr);
1382 /* The location of the finally block. */
1383 location_t finally_loc;
1385 finally = gimple_try_cleanup (tf->top_p);
1386 eh_else = get_eh_else (finally);
1388 /* Mash the TRY block to the head of the chain. */
1389 tf->top_p_seq = gimple_try_eval (tf->top_p);
1391 /* The location of the finally is either the last stmt in the finally
1392 block or the location of the TRY_FINALLY itself. */
1393 x = gimple_seq_last_stmt (finally);
1394 finally_loc = x ? gimple_location (x) : tf_loc;
1396 /* Prepare for switch statement generation. */
1397 nlabels = tf->dest_array.length ();
1398 return_index = nlabels;
1399 eh_index = return_index + tf->may_return;
1400 fallthru_index = eh_index + (tf->may_throw && !eh_else);
1401 ndests = fallthru_index + tf->may_fallthru;
1403 finally_tmp = create_tmp_var (integer_type_node, "finally_tmp");
1404 finally_label = create_artificial_label (finally_loc);
1406 /* We use vec::quick_push on case_label_vec throughout this function,
1407 since we know the size in advance and allocate precisely as muce
1409 case_label_vec.create (ndests);
1411 last_case_index = 0;
1413 /* Begin inserting code for getting to the finally block. Things
1414 are done in this order to correspond to the sequence the code is
1417 if (tf->may_fallthru)
1419 x = gimple_build_assign (finally_tmp,
1420 build_int_cst (integer_type_node,
1422 gimple_set_location (x, finally_loc);
1423 gimple_seq_add_stmt (&tf->top_p_seq, x);
1425 tmp = build_int_cst (integer_type_node, fallthru_index);
1426 last_case = build_case_label (tmp, NULL,
1427 create_artificial_label (finally_loc));
1428 case_label_vec.quick_push (last_case);
1431 x = gimple_build_label (CASE_LABEL (last_case));
1432 gimple_seq_add_stmt (&switch_body, x);
1434 tmp = lower_try_finally_fallthru_label (tf);
1435 x = gimple_build_goto (tmp);
1436 gimple_set_location (x, finally_loc);
1437 gimple_seq_add_stmt (&switch_body, x);
1440 /* For EH_ELSE, emit the exception path (plus resx) now, then
1441 subsequently we only need consider the normal path. */
1446 finally = gimple_eh_else_e_body (eh_else);
1447 lower_eh_constructs_1 (state, &finally);
1449 emit_post_landing_pad (&eh_seq, tf->region);
1450 gimple_seq_add_seq (&eh_seq, finally);
1451 emit_resx (&eh_seq, tf->region);
1454 finally = gimple_eh_else_n_body (eh_else);
1456 else if (tf->may_throw)
1458 emit_post_landing_pad (&eh_seq, tf->region);
1460 x = gimple_build_assign (finally_tmp,
1461 build_int_cst (integer_type_node, eh_index));
1462 gimple_seq_add_stmt (&eh_seq, x);
1464 x = gimple_build_goto (finally_label);
1465 gimple_set_location (x, tf_loc);
1466 gimple_seq_add_stmt (&eh_seq, x);
1468 tmp = build_int_cst (integer_type_node, eh_index);
1469 last_case = build_case_label (tmp, NULL,
1470 create_artificial_label (tf_loc));
1471 case_label_vec.quick_push (last_case);
1474 x = gimple_build_label (CASE_LABEL (last_case));
1475 gimple_seq_add_stmt (&eh_seq, x);
1476 emit_resx (&eh_seq, tf->region);
1479 x = gimple_build_label (finally_label);
1480 gimple_seq_add_stmt (&tf->top_p_seq, x);
1482 lower_eh_constructs_1 (state, &finally);
1483 gimple_seq_add_seq (&tf->top_p_seq, finally);
1485 /* Redirect each incoming goto edge. */
1487 qe = q + tf->goto_queue_active;
1488 j = last_case_index + tf->may_return;
1489 /* Prepare the assignments to finally_tmp that are executed upon the
1490 entrance through a particular edge. */
1493 gimple_seq mod = NULL;
1495 unsigned int case_index;
1499 x = gimple_build_assign (finally_tmp,
1500 build_int_cst (integer_type_node,
1502 gimple_seq_add_stmt (&mod, x);
1503 do_return_redirection (q, finally_label, mod);
1504 switch_id = return_index;
1508 x = gimple_build_assign (finally_tmp,
1509 build_int_cst (integer_type_node, q->index));
1510 gimple_seq_add_stmt (&mod, x);
1511 do_goto_redirection (q, finally_label, mod, tf);
1512 switch_id = q->index;
1515 case_index = j + q->index;
1516 if (case_label_vec.length () <= case_index || !case_label_vec[case_index])
1519 tmp = build_int_cst (integer_type_node, switch_id);
1520 case_lab = build_case_label (tmp, NULL,
1521 create_artificial_label (tf_loc));
1522 /* We store the cont_stmt in the pointer map, so that we can recover
1523 it in the loop below. */
1525 cont_map = new hash_map<tree, gimple *>;
1526 cont_map->put (case_lab, q->cont_stmt);
1527 case_label_vec.quick_push (case_lab);
1530 for (j = last_case_index; j < last_case_index + nlabels; j++)
1534 last_case = case_label_vec[j];
1536 gcc_assert (last_case);
1537 gcc_assert (cont_map);
1539 cont_stmt = *cont_map->get (last_case);
1541 x = gimple_build_label (CASE_LABEL (last_case));
1542 gimple_seq_add_stmt (&switch_body, x);
1543 gimple_seq_add_stmt (&switch_body, cont_stmt);
1544 maybe_record_in_goto_queue (state, cont_stmt);
1549 replace_goto_queue (tf);
1551 /* Make sure that the last case is the default label, as one is required.
1552 Then sort the labels, which is also required in GIMPLE. */
1553 CASE_LOW (last_case) = NULL;
1554 tree tem = case_label_vec.pop ();
1555 gcc_assert (tem == last_case);
1556 sort_case_labels (case_label_vec);
1558 /* Build the switch statement, setting last_case to be the default
1560 switch_stmt = gimple_build_switch (finally_tmp, last_case,
1562 gimple_set_location (switch_stmt, finally_loc);
1564 /* Need to link SWITCH_STMT after running replace_goto_queue
1565 due to not wanting to process the same goto stmts twice. */
1566 gimple_seq_add_stmt (&tf->top_p_seq, switch_stmt);
1567 gimple_seq_add_seq (&tf->top_p_seq, switch_body);
1570 /* Decide whether or not we are going to duplicate the finally block.
1571 There are several considerations.
1573 First, if this is Java, then the finally block contains code
1574 written by the user. It has line numbers associated with it,
1575 so duplicating the block means it's difficult to set a breakpoint.
1576 Since controlling code generation via -g is verboten, we simply
1577 never duplicate code without optimization.
1579 Second, we'd like to prevent egregious code growth. One way to
1580 do this is to estimate the size of the finally block, multiply
1581 that by the number of copies we'd need to make, and compare against
1582 the estimate of the size of the switch machinery we'd have to add. */
1585 decide_copy_try_finally (int ndests, bool may_throw, gimple_seq finally)
1587 int f_estimate, sw_estimate;
1590 /* If there's an EH_ELSE involved, the exception path is separate
1591 and really doesn't come into play for this computation. */
1592 eh_else = get_eh_else (finally);
1595 ndests -= may_throw;
1596 finally = gimple_eh_else_n_body (eh_else);
1601 gimple_stmt_iterator gsi;
1606 for (gsi = gsi_start (finally); !gsi_end_p (gsi); gsi_next (&gsi))
1608 /* Duplicate __builtin_stack_restore in the hope of eliminating it
1609 on the EH paths and, consequently, useless cleanups. */
1610 gimple *stmt = gsi_stmt (gsi);
1611 if (!is_gimple_debug (stmt)
1612 && !gimple_clobber_p (stmt)
1613 && !gimple_call_builtin_p (stmt, BUILT_IN_STACK_RESTORE))
1619 /* Finally estimate N times, plus N gotos. */
1620 f_estimate = estimate_num_insns_seq (finally, &eni_size_weights);
1621 f_estimate = (f_estimate + 1) * ndests;
1623 /* Switch statement (cost 10), N variable assignments, N gotos. */
1624 sw_estimate = 10 + 2 * ndests;
1626 /* Optimize for size clearly wants our best guess. */
1627 if (optimize_function_for_size_p (cfun))
1628 return f_estimate < sw_estimate;
1630 /* ??? These numbers are completely made up so far. */
1632 return f_estimate < 100 || f_estimate < sw_estimate * 2;
1634 return f_estimate < 40 || f_estimate * 2 < sw_estimate * 3;
1637 /* REG is the enclosing region for a possible cleanup region, or the region
1638 itself. Returns TRUE if such a region would be unreachable.
1640 Cleanup regions within a must-not-throw region aren't actually reachable
1641 even if there are throwing stmts within them, because the personality
1642 routine will call terminate before unwinding. */
1645 cleanup_is_dead_in (eh_region reg)
1647 while (reg && reg->type == ERT_CLEANUP)
1649 return (reg && reg->type == ERT_MUST_NOT_THROW);
1652 /* A subroutine of lower_eh_constructs_1. Lower a GIMPLE_TRY_FINALLY nodes
1653 to a sequence of labels and blocks, plus the exception region trees
1654 that record all the magic. This is complicated by the need to
1655 arrange for the FINALLY block to be executed on all exits. */
1658 lower_try_finally (struct leh_state *state, gtry *tp)
1660 struct leh_tf_state this_tf;
1661 struct leh_state this_state;
1663 gimple_seq old_eh_seq;
1665 /* Process the try block. */
1667 memset (&this_tf, 0, sizeof (this_tf));
1668 this_tf.try_finally_expr = tp;
1670 this_tf.outer = state;
1671 if (using_eh_for_cleanups_p () && !cleanup_is_dead_in (state->cur_region))
1673 this_tf.region = gen_eh_region_cleanup (state->cur_region);
1674 this_state.cur_region = this_tf.region;
1678 this_tf.region = NULL;
1679 this_state.cur_region = state->cur_region;
1682 this_state.ehp_region = state->ehp_region;
1683 this_state.tf = &this_tf;
1685 old_eh_seq = eh_seq;
1688 lower_eh_constructs_1 (&this_state, gimple_try_eval_ptr (tp));
1690 /* Determine if the try block is escaped through the bottom. */
1691 this_tf.may_fallthru = gimple_seq_may_fallthru (gimple_try_eval (tp));
1693 /* Determine if any exceptions are possible within the try block. */
1695 this_tf.may_throw = eh_region_may_contain_throw (this_tf.region);
1696 if (this_tf.may_throw)
1697 honor_protect_cleanup_actions (state, &this_state, &this_tf);
1699 /* Determine how many edges (still) reach the finally block. Or rather,
1700 how many destinations are reached by the finally block. Use this to
1701 determine how we process the finally block itself. */
1703 ndests = this_tf.dest_array.length ();
1704 ndests += this_tf.may_fallthru;
1705 ndests += this_tf.may_return;
1706 ndests += this_tf.may_throw;
1708 /* If the FINALLY block is not reachable, dike it out. */
1711 gimple_seq_add_seq (&this_tf.top_p_seq, gimple_try_eval (tp));
1712 gimple_try_set_cleanup (tp, NULL);
1714 /* If the finally block doesn't fall through, then any destination
1715 we might try to impose there isn't reached either. There may be
1716 some minor amount of cleanup and redirection still needed. */
1717 else if (!gimple_seq_may_fallthru (gimple_try_cleanup (tp)))
1718 lower_try_finally_nofallthru (state, &this_tf);
1720 /* We can easily special-case redirection to a single destination. */
1721 else if (ndests == 1)
1722 lower_try_finally_onedest (state, &this_tf);
1723 else if (decide_copy_try_finally (ndests, this_tf.may_throw,
1724 gimple_try_cleanup (tp)))
1725 lower_try_finally_copy (state, &this_tf);
1727 lower_try_finally_switch (state, &this_tf);
1729 /* If someone requested we add a label at the end of the transformed
1731 if (this_tf.fallthru_label)
1733 /* This must be reached only if ndests == 0. */
1734 gimple *x = gimple_build_label (this_tf.fallthru_label);
1735 gimple_seq_add_stmt (&this_tf.top_p_seq, x);
1738 this_tf.dest_array.release ();
1739 free (this_tf.goto_queue);
1740 if (this_tf.goto_queue_map)
1741 delete this_tf.goto_queue_map;
1743 /* If there was an old (aka outer) eh_seq, append the current eh_seq.
1744 If there was no old eh_seq, then the append is trivially already done. */
1748 eh_seq = old_eh_seq;
1751 gimple_seq new_eh_seq = eh_seq;
1752 eh_seq = old_eh_seq;
1753 gimple_seq_add_seq (&eh_seq, new_eh_seq);
1757 return this_tf.top_p_seq;
1760 /* A subroutine of lower_eh_constructs_1. Lower a GIMPLE_TRY_CATCH with a
1761 list of GIMPLE_CATCH to a sequence of labels and blocks, plus the
1762 exception region trees that records all the magic. */
1765 lower_catch (struct leh_state *state, gtry *tp)
1767 eh_region try_region = NULL;
1768 struct leh_state this_state = *state;
1769 gimple_stmt_iterator gsi;
1771 gimple_seq new_seq, cleanup;
1773 location_t try_catch_loc = gimple_location (tp);
1775 if (flag_exceptions)
1777 try_region = gen_eh_region_try (state->cur_region);
1778 this_state.cur_region = try_region;
1781 lower_eh_constructs_1 (&this_state, gimple_try_eval_ptr (tp));
1783 if (!eh_region_may_contain_throw (try_region))
1784 return gimple_try_eval (tp);
1787 emit_eh_dispatch (&new_seq, try_region);
1788 emit_resx (&new_seq, try_region);
1790 this_state.cur_region = state->cur_region;
1791 this_state.ehp_region = try_region;
1793 /* Add eh_seq from lowering EH in the cleanup sequence after the cleanup
1794 itself, so that e.g. for coverage purposes the nested cleanups don't
1795 appear before the cleanup body. See PR64634 for details. */
1796 gimple_seq old_eh_seq = eh_seq;
1800 cleanup = gimple_try_cleanup (tp);
1801 for (gsi = gsi_start (cleanup);
1809 catch_stmt = as_a <gcatch *> (gsi_stmt (gsi));
1810 c = gen_eh_region_catch (try_region, gimple_catch_types (catch_stmt));
1812 handler = gimple_catch_handler (catch_stmt);
1813 lower_eh_constructs_1 (&this_state, &handler);
1815 c->label = create_artificial_label (UNKNOWN_LOCATION);
1816 x = gimple_build_label (c->label);
1817 gimple_seq_add_stmt (&new_seq, x);
1819 gimple_seq_add_seq (&new_seq, handler);
1821 if (gimple_seq_may_fallthru (new_seq))
1824 out_label = create_artificial_label (try_catch_loc);
1826 x = gimple_build_goto (out_label);
1827 gimple_seq_add_stmt (&new_seq, x);
1833 gimple_try_set_cleanup (tp, new_seq);
1835 gimple_seq new_eh_seq = eh_seq;
1836 eh_seq = old_eh_seq;
1837 gimple_seq ret_seq = frob_into_branch_around (tp, try_region, out_label);
1838 gimple_seq_add_seq (&eh_seq, new_eh_seq);
1842 /* A subroutine of lower_eh_constructs_1. Lower a GIMPLE_TRY with a
1843 GIMPLE_EH_FILTER to a sequence of labels and blocks, plus the exception
1844 region trees that record all the magic. */
1847 lower_eh_filter (struct leh_state *state, gtry *tp)
1849 struct leh_state this_state = *state;
1850 eh_region this_region = NULL;
1854 inner = gimple_seq_first_stmt (gimple_try_cleanup (tp));
1856 if (flag_exceptions)
1858 this_region = gen_eh_region_allowed (state->cur_region,
1859 gimple_eh_filter_types (inner));
1860 this_state.cur_region = this_region;
1863 lower_eh_constructs_1 (&this_state, gimple_try_eval_ptr (tp));
1865 if (!eh_region_may_contain_throw (this_region))
1866 return gimple_try_eval (tp);
1869 this_state.cur_region = state->cur_region;
1870 this_state.ehp_region = this_region;
1872 emit_eh_dispatch (&new_seq, this_region);
1873 emit_resx (&new_seq, this_region);
1875 this_region->u.allowed.label = create_artificial_label (UNKNOWN_LOCATION);
1876 x = gimple_build_label (this_region->u.allowed.label);
1877 gimple_seq_add_stmt (&new_seq, x);
1879 lower_eh_constructs_1 (&this_state, gimple_eh_filter_failure_ptr (inner));
1880 gimple_seq_add_seq (&new_seq, gimple_eh_filter_failure (inner));
1882 gimple_try_set_cleanup (tp, new_seq);
1884 return frob_into_branch_around (tp, this_region, NULL);
1887 /* A subroutine of lower_eh_constructs_1. Lower a GIMPLE_TRY with
1888 an GIMPLE_EH_MUST_NOT_THROW to a sequence of labels and blocks,
1889 plus the exception region trees that record all the magic. */
1892 lower_eh_must_not_throw (struct leh_state *state, gtry *tp)
1894 struct leh_state this_state = *state;
1896 if (flag_exceptions)
1898 gimple *inner = gimple_seq_first_stmt (gimple_try_cleanup (tp));
1899 eh_region this_region;
1901 this_region = gen_eh_region_must_not_throw (state->cur_region);
1902 this_region->u.must_not_throw.failure_decl
1903 = gimple_eh_must_not_throw_fndecl (
1904 as_a <geh_mnt *> (inner));
1905 this_region->u.must_not_throw.failure_loc
1906 = LOCATION_LOCUS (gimple_location (tp));
1908 /* In order to get mangling applied to this decl, we must mark it
1909 used now. Otherwise, pass_ipa_free_lang_data won't think it
1911 TREE_USED (this_region->u.must_not_throw.failure_decl) = 1;
1913 this_state.cur_region = this_region;
1916 lower_eh_constructs_1 (&this_state, gimple_try_eval_ptr (tp));
1918 return gimple_try_eval (tp);
1921 /* Implement a cleanup expression. This is similar to try-finally,
1922 except that we only execute the cleanup block for exception edges. */
1925 lower_cleanup (struct leh_state *state, gtry *tp)
1927 struct leh_state this_state = *state;
1928 eh_region this_region = NULL;
1929 struct leh_tf_state fake_tf;
1931 bool cleanup_dead = cleanup_is_dead_in (state->cur_region);
1933 if (flag_exceptions && !cleanup_dead)
1935 this_region = gen_eh_region_cleanup (state->cur_region);
1936 this_state.cur_region = this_region;
1939 lower_eh_constructs_1 (&this_state, gimple_try_eval_ptr (tp));
1941 if (cleanup_dead || !eh_region_may_contain_throw (this_region))
1942 return gimple_try_eval (tp);
1944 /* Build enough of a try-finally state so that we can reuse
1945 honor_protect_cleanup_actions. */
1946 memset (&fake_tf, 0, sizeof (fake_tf));
1947 fake_tf.top_p = fake_tf.try_finally_expr = tp;
1948 fake_tf.outer = state;
1949 fake_tf.region = this_region;
1950 fake_tf.may_fallthru = gimple_seq_may_fallthru (gimple_try_eval (tp));
1951 fake_tf.may_throw = true;
1953 honor_protect_cleanup_actions (state, NULL, &fake_tf);
1955 if (fake_tf.may_throw)
1957 /* In this case honor_protect_cleanup_actions had nothing to do,
1958 and we should process this normally. */
1959 lower_eh_constructs_1 (state, gimple_try_cleanup_ptr (tp));
1960 result = frob_into_branch_around (tp, this_region,
1961 fake_tf.fallthru_label);
1965 /* In this case honor_protect_cleanup_actions did nearly all of
1966 the work. All we have left is to append the fallthru_label. */
1968 result = gimple_try_eval (tp);
1969 if (fake_tf.fallthru_label)
1971 gimple *x = gimple_build_label (fake_tf.fallthru_label);
1972 gimple_seq_add_stmt (&result, x);
1978 /* Main loop for lowering eh constructs. Also moves gsi to the next
1982 lower_eh_constructs_2 (struct leh_state *state, gimple_stmt_iterator *gsi)
1986 gimple *stmt = gsi_stmt (*gsi);
1988 switch (gimple_code (stmt))
1992 tree fndecl = gimple_call_fndecl (stmt);
1995 if (fndecl && DECL_BUILT_IN_CLASS (fndecl) == BUILT_IN_NORMAL)
1996 switch (DECL_FUNCTION_CODE (fndecl))
1998 case BUILT_IN_EH_POINTER:
1999 /* The front end may have generated a call to
2000 __builtin_eh_pointer (0) within a catch region. Replace
2001 this zero argument with the current catch region number. */
2002 if (state->ehp_region)
2004 tree nr = build_int_cst (integer_type_node,
2005 state->ehp_region->index);
2006 gimple_call_set_arg (stmt, 0, nr);
2010 /* The user has dome something silly. Remove it. */
2011 rhs = null_pointer_node;
2016 case BUILT_IN_EH_FILTER:
2017 /* ??? This should never appear, but since it's a builtin it
2018 is accessible to abuse by users. Just remove it and
2019 replace the use with the arbitrary value zero. */
2020 rhs = build_int_cst (TREE_TYPE (TREE_TYPE (fndecl)), 0);
2022 lhs = gimple_call_lhs (stmt);
2023 x = gimple_build_assign (lhs, rhs);
2024 gsi_insert_before (gsi, x, GSI_SAME_STMT);
2027 case BUILT_IN_EH_COPY_VALUES:
2028 /* Likewise this should not appear. Remove it. */
2029 gsi_remove (gsi, true);
2039 /* If the stmt can throw use a new temporary for the assignment
2040 to a LHS. This makes sure the old value of the LHS is
2041 available on the EH edge. Only do so for statements that
2042 potentially fall through (no noreturn calls e.g.), otherwise
2043 this new assignment might create fake fallthru regions. */
2044 if (stmt_could_throw_p (stmt)
2045 && gimple_has_lhs (stmt)
2046 && gimple_stmt_may_fallthru (stmt)
2047 && !tree_could_throw_p (gimple_get_lhs (stmt))
2048 && is_gimple_reg_type (TREE_TYPE (gimple_get_lhs (stmt))))
2050 tree lhs = gimple_get_lhs (stmt);
2051 tree tmp = create_tmp_var (TREE_TYPE (lhs));
2052 gimple *s = gimple_build_assign (lhs, tmp);
2053 gimple_set_location (s, gimple_location (stmt));
2054 gimple_set_block (s, gimple_block (stmt));
2055 gimple_set_lhs (stmt, tmp);
2056 if (TREE_CODE (TREE_TYPE (tmp)) == COMPLEX_TYPE
2057 || TREE_CODE (TREE_TYPE (tmp)) == VECTOR_TYPE)
2058 DECL_GIMPLE_REG_P (tmp) = 1;
2059 gsi_insert_after (gsi, s, GSI_SAME_STMT);
2061 /* Look for things that can throw exceptions, and record them. */
2062 if (state->cur_region && stmt_could_throw_p (stmt))
2064 record_stmt_eh_region (state->cur_region, stmt);
2065 note_eh_region_may_contain_throw (state->cur_region);
2072 maybe_record_in_goto_queue (state, stmt);
2076 verify_norecord_switch_expr (state, as_a <gswitch *> (stmt));
2081 gtry *try_stmt = as_a <gtry *> (stmt);
2082 if (gimple_try_kind (try_stmt) == GIMPLE_TRY_FINALLY)
2083 replace = lower_try_finally (state, try_stmt);
2086 x = gimple_seq_first_stmt (gimple_try_cleanup (try_stmt));
2089 replace = gimple_try_eval (try_stmt);
2090 lower_eh_constructs_1 (state, &replace);
2093 switch (gimple_code (x))
2096 replace = lower_catch (state, try_stmt);
2098 case GIMPLE_EH_FILTER:
2099 replace = lower_eh_filter (state, try_stmt);
2101 case GIMPLE_EH_MUST_NOT_THROW:
2102 replace = lower_eh_must_not_throw (state, try_stmt);
2104 case GIMPLE_EH_ELSE:
2105 /* This code is only valid with GIMPLE_TRY_FINALLY. */
2108 replace = lower_cleanup (state, try_stmt);
2114 /* Remove the old stmt and insert the transformed sequence
2116 gsi_insert_seq_before (gsi, replace, GSI_SAME_STMT);
2117 gsi_remove (gsi, true);
2119 /* Return since we don't want gsi_next () */
2122 case GIMPLE_EH_ELSE:
2123 /* We should be eliminating this in lower_try_finally et al. */
2127 /* A type, a decl, or some kind of statement that we're not
2128 interested in. Don't walk them. */
2135 /* A helper to unwrap a gimple_seq and feed stmts to lower_eh_constructs_2. */
2138 lower_eh_constructs_1 (struct leh_state *state, gimple_seq *pseq)
2140 gimple_stmt_iterator gsi;
2141 for (gsi = gsi_start (*pseq); !gsi_end_p (gsi);)
2142 lower_eh_constructs_2 (state, &gsi);
2147 const pass_data pass_data_lower_eh =
2149 GIMPLE_PASS, /* type */
2151 OPTGROUP_NONE, /* optinfo_flags */
2152 TV_TREE_EH, /* tv_id */
2153 PROP_gimple_lcf, /* properties_required */
2154 PROP_gimple_leh, /* properties_provided */
2155 0, /* properties_destroyed */
2156 0, /* todo_flags_start */
2157 0, /* todo_flags_finish */
2160 class pass_lower_eh : public gimple_opt_pass
2163 pass_lower_eh (gcc::context *ctxt)
2164 : gimple_opt_pass (pass_data_lower_eh, ctxt)
2167 /* opt_pass methods: */
2168 virtual unsigned int execute (function *);
2170 }; // class pass_lower_eh
2173 pass_lower_eh::execute (function *fun)
2175 struct leh_state null_state;
2178 bodyp = gimple_body (current_function_decl);
2182 finally_tree = new hash_table<finally_tree_hasher> (31);
2183 eh_region_may_contain_throw_map = BITMAP_ALLOC (NULL);
2184 memset (&null_state, 0, sizeof (null_state));
2186 collect_finally_tree_1 (bodyp, NULL);
2187 lower_eh_constructs_1 (&null_state, &bodyp);
2188 gimple_set_body (current_function_decl, bodyp);
2190 /* We assume there's a return statement, or something, at the end of
2191 the function, and thus ploping the EH sequence afterward won't
2193 gcc_assert (!gimple_seq_may_fallthru (bodyp));
2194 gimple_seq_add_seq (&bodyp, eh_seq);
2196 /* We assume that since BODYP already existed, adding EH_SEQ to it
2197 didn't change its value, and we don't have to re-set the function. */
2198 gcc_assert (bodyp == gimple_body (current_function_decl));
2200 delete finally_tree;
2201 finally_tree = NULL;
2202 BITMAP_FREE (eh_region_may_contain_throw_map);
2205 /* If this function needs a language specific EH personality routine
2206 and the frontend didn't already set one do so now. */
2207 if (function_needs_eh_personality (fun) == eh_personality_lang
2208 && !DECL_FUNCTION_PERSONALITY (current_function_decl))
2209 DECL_FUNCTION_PERSONALITY (current_function_decl)
2210 = lang_hooks.eh_personality ();
2218 make_pass_lower_eh (gcc::context *ctxt)
2220 return new pass_lower_eh (ctxt);
2223 /* Create the multiple edges from an EH_DISPATCH statement to all of
2224 the possible handlers for its EH region. Return true if there's
2225 no fallthru edge; false if there is. */
2228 make_eh_dispatch_edges (geh_dispatch *stmt)
2232 basic_block src, dst;
2234 r = get_eh_region_from_number (gimple_eh_dispatch_region (stmt));
2235 src = gimple_bb (stmt);
2240 for (c = r->u.eh_try.first_catch; c ; c = c->next_catch)
2242 dst = label_to_block (c->label);
2243 make_edge (src, dst, 0);
2245 /* A catch-all handler doesn't have a fallthru. */
2246 if (c->type_list == NULL)
2251 case ERT_ALLOWED_EXCEPTIONS:
2252 dst = label_to_block (r->u.allowed.label);
2253 make_edge (src, dst, 0);
2263 /* Create the single EH edge from STMT to its nearest landing pad,
2264 if there is such a landing pad within the current function. */
2267 make_eh_edges (gimple *stmt)
2269 basic_block src, dst;
2273 lp_nr = lookup_stmt_eh_lp (stmt);
2277 lp = get_eh_landing_pad_from_number (lp_nr);
2278 gcc_assert (lp != NULL);
2280 src = gimple_bb (stmt);
2281 dst = label_to_block (lp->post_landing_pad);
2282 make_edge (src, dst, EDGE_EH);
2285 /* Do the work in redirecting EDGE_IN to NEW_BB within the EH region tree;
2286 do not actually perform the final edge redirection.
2288 CHANGE_REGION is true when we're being called from cleanup_empty_eh and
2289 we intend to change the destination EH region as well; this means
2290 EH_LANDING_PAD_NR must already be set on the destination block label.
2291 If false, we're being called from generic cfg manipulation code and we
2292 should preserve our place within the region tree. */
2295 redirect_eh_edge_1 (edge edge_in, basic_block new_bb, bool change_region)
2297 eh_landing_pad old_lp, new_lp;
2300 int old_lp_nr, new_lp_nr;
2301 tree old_label, new_label;
2305 old_bb = edge_in->dest;
2306 old_label = gimple_block_label (old_bb);
2307 old_lp_nr = EH_LANDING_PAD_NR (old_label);
2308 gcc_assert (old_lp_nr > 0);
2309 old_lp = get_eh_landing_pad_from_number (old_lp_nr);
2311 throw_stmt = last_stmt (edge_in->src);
2312 gcc_assert (lookup_stmt_eh_lp (throw_stmt) == old_lp_nr);
2314 new_label = gimple_block_label (new_bb);
2316 /* Look for an existing region that might be using NEW_BB already. */
2317 new_lp_nr = EH_LANDING_PAD_NR (new_label);
2320 new_lp = get_eh_landing_pad_from_number (new_lp_nr);
2321 gcc_assert (new_lp);
2323 /* Unless CHANGE_REGION is true, the new and old landing pad
2324 had better be associated with the same EH region. */
2325 gcc_assert (change_region || new_lp->region == old_lp->region);
2330 gcc_assert (!change_region);
2333 /* Notice when we redirect the last EH edge away from OLD_BB. */
2334 FOR_EACH_EDGE (e, ei, old_bb->preds)
2335 if (e != edge_in && (e->flags & EDGE_EH))
2340 /* NEW_LP already exists. If there are still edges into OLD_LP,
2341 there's nothing to do with the EH tree. If there are no more
2342 edges into OLD_LP, then we want to remove OLD_LP as it is unused.
2343 If CHANGE_REGION is true, then our caller is expecting to remove
2345 if (e == NULL && !change_region)
2346 remove_eh_landing_pad (old_lp);
2350 /* No correct landing pad exists. If there are no more edges
2351 into OLD_LP, then we can simply re-use the existing landing pad.
2352 Otherwise, we have to create a new landing pad. */
2355 EH_LANDING_PAD_NR (old_lp->post_landing_pad) = 0;
2359 new_lp = gen_eh_landing_pad (old_lp->region);
2360 new_lp->post_landing_pad = new_label;
2361 EH_LANDING_PAD_NR (new_label) = new_lp->index;
2364 /* Maybe move the throwing statement to the new region. */
2365 if (old_lp != new_lp)
2367 remove_stmt_from_eh_lp (throw_stmt);
2368 add_stmt_to_eh_lp (throw_stmt, new_lp->index);
2372 /* Redirect EH edge E to NEW_BB. */
2375 redirect_eh_edge (edge edge_in, basic_block new_bb)
2377 redirect_eh_edge_1 (edge_in, new_bb, false);
2378 return ssa_redirect_edge (edge_in, new_bb);
2381 /* This is a subroutine of gimple_redirect_edge_and_branch. Update the
2382 labels for redirecting a non-fallthru EH_DISPATCH edge E to NEW_BB.
2383 The actual edge update will happen in the caller. */
2386 redirect_eh_dispatch_edge (geh_dispatch *stmt, edge e, basic_block new_bb)
2388 tree new_lab = gimple_block_label (new_bb);
2389 bool any_changed = false;
2394 r = get_eh_region_from_number (gimple_eh_dispatch_region (stmt));
2398 for (c = r->u.eh_try.first_catch; c ; c = c->next_catch)
2400 old_bb = label_to_block (c->label);
2401 if (old_bb == e->dest)
2409 case ERT_ALLOWED_EXCEPTIONS:
2410 old_bb = label_to_block (r->u.allowed.label);
2411 gcc_assert (old_bb == e->dest);
2412 r->u.allowed.label = new_lab;
2420 gcc_assert (any_changed);
2423 /* Helper function for operation_could_trap_p and stmt_could_throw_p. */
2426 operation_could_trap_helper_p (enum tree_code op,
2437 case TRUNC_DIV_EXPR:
2439 case FLOOR_DIV_EXPR:
2440 case ROUND_DIV_EXPR:
2441 case EXACT_DIV_EXPR:
2443 case FLOOR_MOD_EXPR:
2444 case ROUND_MOD_EXPR:
2445 case TRUNC_MOD_EXPR:
2447 if (honor_snans || honor_trapv)
2450 return flag_trapping_math;
2451 if (!TREE_CONSTANT (divisor) || integer_zerop (divisor))
2460 /* Some floating point comparisons may trap. */
2465 case UNORDERED_EXPR:
2477 /* These operations don't trap with floating point. */
2485 /* Any floating arithmetic may trap. */
2486 if (fp_operation && flag_trapping_math)
2494 /* Constructing an object cannot trap. */
2498 /* Any floating arithmetic may trap. */
2499 if (fp_operation && flag_trapping_math)
2507 /* Return true if operation OP may trap. FP_OPERATION is true if OP is applied
2508 on floating-point values. HONOR_TRAPV is true if OP is applied on integer
2509 type operands that may trap. If OP is a division operator, DIVISOR contains
2510 the value of the divisor. */
2513 operation_could_trap_p (enum tree_code op, bool fp_operation, bool honor_trapv,
2516 bool honor_nans = (fp_operation && flag_trapping_math
2517 && !flag_finite_math_only);
2518 bool honor_snans = fp_operation && flag_signaling_nans != 0;
2521 if (TREE_CODE_CLASS (op) != tcc_comparison
2522 && TREE_CODE_CLASS (op) != tcc_unary
2523 && TREE_CODE_CLASS (op) != tcc_binary)
2526 return operation_could_trap_helper_p (op, fp_operation, honor_trapv,
2527 honor_nans, honor_snans, divisor,
2532 /* Returns true if it is possible to prove that the index of
2533 an array access REF (an ARRAY_REF expression) falls into the
2537 in_array_bounds_p (tree ref)
2539 tree idx = TREE_OPERAND (ref, 1);
2542 if (TREE_CODE (idx) != INTEGER_CST)
2545 min = array_ref_low_bound (ref);
2546 max = array_ref_up_bound (ref);
2549 || TREE_CODE (min) != INTEGER_CST
2550 || TREE_CODE (max) != INTEGER_CST)
2553 if (tree_int_cst_lt (idx, min)
2554 || tree_int_cst_lt (max, idx))
2560 /* Returns true if it is possible to prove that the range of
2561 an array access REF (an ARRAY_RANGE_REF expression) falls
2562 into the array bounds. */
2565 range_in_array_bounds_p (tree ref)
2567 tree domain_type = TYPE_DOMAIN (TREE_TYPE (ref));
2568 tree range_min, range_max, min, max;
2570 range_min = TYPE_MIN_VALUE (domain_type);
2571 range_max = TYPE_MAX_VALUE (domain_type);
2574 || TREE_CODE (range_min) != INTEGER_CST
2575 || TREE_CODE (range_max) != INTEGER_CST)
2578 min = array_ref_low_bound (ref);
2579 max = array_ref_up_bound (ref);
2582 || TREE_CODE (min) != INTEGER_CST
2583 || TREE_CODE (max) != INTEGER_CST)
2586 if (tree_int_cst_lt (range_min, min)
2587 || tree_int_cst_lt (max, range_max))
2593 /* Return true if EXPR can trap, as in dereferencing an invalid pointer
2594 location or floating point arithmetic. C.f. the rtl version, may_trap_p.
2595 This routine expects only GIMPLE lhs or rhs input. */
2598 tree_could_trap_p (tree expr)
2600 enum tree_code code;
2601 bool fp_operation = false;
2602 bool honor_trapv = false;
2603 tree t, base, div = NULL_TREE;
2608 code = TREE_CODE (expr);
2609 t = TREE_TYPE (expr);
2613 if (COMPARISON_CLASS_P (expr))
2614 fp_operation = FLOAT_TYPE_P (TREE_TYPE (TREE_OPERAND (expr, 0)));
2616 fp_operation = FLOAT_TYPE_P (t);
2617 honor_trapv = INTEGRAL_TYPE_P (t) && TYPE_OVERFLOW_TRAPS (t);
2620 if (TREE_CODE_CLASS (code) == tcc_binary)
2621 div = TREE_OPERAND (expr, 1);
2622 if (operation_could_trap_p (code, fp_operation, honor_trapv, div))
2632 case VIEW_CONVERT_EXPR:
2633 case WITH_SIZE_EXPR:
2634 expr = TREE_OPERAND (expr, 0);
2635 code = TREE_CODE (expr);
2638 case ARRAY_RANGE_REF:
2639 base = TREE_OPERAND (expr, 0);
2640 if (tree_could_trap_p (base))
2642 if (TREE_THIS_NOTRAP (expr))
2644 return !range_in_array_bounds_p (expr);
2647 base = TREE_OPERAND (expr, 0);
2648 if (tree_could_trap_p (base))
2650 if (TREE_THIS_NOTRAP (expr))
2652 return !in_array_bounds_p (expr);
2654 case TARGET_MEM_REF:
2656 if (TREE_CODE (TREE_OPERAND (expr, 0)) == ADDR_EXPR
2657 && tree_could_trap_p (TREE_OPERAND (TREE_OPERAND (expr, 0), 0)))
2659 if (TREE_THIS_NOTRAP (expr))
2661 /* We cannot prove that the access is in-bounds when we have
2662 variable-index TARGET_MEM_REFs. */
2663 if (code == TARGET_MEM_REF
2664 && (TMR_INDEX (expr) || TMR_INDEX2 (expr)))
2666 if (TREE_CODE (TREE_OPERAND (expr, 0)) == ADDR_EXPR)
2668 tree base = TREE_OPERAND (TREE_OPERAND (expr, 0), 0);
2669 offset_int off = mem_ref_offset (expr);
2670 if (wi::neg_p (off, SIGNED))
2672 if (TREE_CODE (base) == STRING_CST)
2673 return wi::leu_p (TREE_STRING_LENGTH (base), off);
2674 else if (DECL_SIZE_UNIT (base) == NULL_TREE
2675 || TREE_CODE (DECL_SIZE_UNIT (base)) != INTEGER_CST
2676 || wi::leu_p (wi::to_offset (DECL_SIZE_UNIT (base)), off))
2678 /* Now we are sure the first byte of the access is inside
2685 return !TREE_THIS_NOTRAP (expr);
2688 return TREE_THIS_VOLATILE (expr);
2691 t = get_callee_fndecl (expr);
2692 /* Assume that calls to weak functions may trap. */
2693 if (!t || !DECL_P (t))
2696 return tree_could_trap_p (t);
2700 /* Assume that accesses to weak functions may trap, unless we know
2701 they are certainly defined in current TU or in some other
2703 if (DECL_WEAK (expr) && !DECL_COMDAT (expr) && DECL_EXTERNAL (expr))
2705 cgraph_node *node = cgraph_node::get (expr);
2707 node = node->function_symbol ();
2708 return !(node && node->in_other_partition);
2713 /* Assume that accesses to weak vars may trap, unless we know
2714 they are certainly defined in current TU or in some other
2716 if (DECL_WEAK (expr) && !DECL_COMDAT (expr) && DECL_EXTERNAL (expr))
2718 varpool_node *node = varpool_node::get (expr);
2720 node = node->ultimate_alias_target ();
2721 return !(node && node->in_other_partition);
2731 /* Helper for stmt_could_throw_p. Return true if STMT (assumed to be a
2732 an assignment or a conditional) may throw. */
2735 stmt_could_throw_1_p (gimple *stmt)
2737 enum tree_code code = gimple_expr_code (stmt);
2738 bool honor_nans = false;
2739 bool honor_snans = false;
2740 bool fp_operation = false;
2741 bool honor_trapv = false;
2746 if (TREE_CODE_CLASS (code) == tcc_comparison
2747 || TREE_CODE_CLASS (code) == tcc_unary
2748 || TREE_CODE_CLASS (code) == tcc_binary)
2750 if (is_gimple_assign (stmt)
2751 && TREE_CODE_CLASS (code) == tcc_comparison)
2752 t = TREE_TYPE (gimple_assign_rhs1 (stmt));
2753 else if (gimple_code (stmt) == GIMPLE_COND)
2754 t = TREE_TYPE (gimple_cond_lhs (stmt));
2756 t = gimple_expr_type (stmt);
2757 fp_operation = FLOAT_TYPE_P (t);
2760 honor_nans = flag_trapping_math && !flag_finite_math_only;
2761 honor_snans = flag_signaling_nans != 0;
2763 else if (INTEGRAL_TYPE_P (t) && TYPE_OVERFLOW_TRAPS (t))
2767 /* Check if the main expression may trap. */
2768 t = is_gimple_assign (stmt) ? gimple_assign_rhs2 (stmt) : NULL;
2769 ret = operation_could_trap_helper_p (code, fp_operation, honor_trapv,
2770 honor_nans, honor_snans, t,
2775 /* If the expression does not trap, see if any of the individual operands may
2777 for (i = 0; i < gimple_num_ops (stmt); i++)
2778 if (tree_could_trap_p (gimple_op (stmt, i)))
2785 /* Return true if statement STMT could throw an exception. */
2788 stmt_could_throw_p (gimple *stmt)
2790 if (!flag_exceptions)
2793 /* The only statements that can throw an exception are assignments,
2794 conditionals, calls, resx, and asms. */
2795 switch (gimple_code (stmt))
2801 return !gimple_call_nothrow_p (as_a <gcall *> (stmt));
2805 if (!cfun->can_throw_non_call_exceptions)
2807 return stmt_could_throw_1_p (stmt);
2810 if (!cfun->can_throw_non_call_exceptions)
2812 return gimple_asm_volatile_p (as_a <gasm *> (stmt));
2820 /* Return true if expression T could throw an exception. */
2823 tree_could_throw_p (tree t)
2825 if (!flag_exceptions)
2827 if (TREE_CODE (t) == MODIFY_EXPR)
2829 if (cfun->can_throw_non_call_exceptions
2830 && tree_could_trap_p (TREE_OPERAND (t, 0)))
2832 t = TREE_OPERAND (t, 1);
2835 if (TREE_CODE (t) == WITH_SIZE_EXPR)
2836 t = TREE_OPERAND (t, 0);
2837 if (TREE_CODE (t) == CALL_EXPR)
2838 return (call_expr_flags (t) & ECF_NOTHROW) == 0;
2839 if (cfun->can_throw_non_call_exceptions)
2840 return tree_could_trap_p (t);
2844 /* Return true if STMT can throw an exception that is not caught within
2845 the current function (CFUN). */
2848 stmt_can_throw_external (gimple *stmt)
2852 if (!stmt_could_throw_p (stmt))
2855 lp_nr = lookup_stmt_eh_lp (stmt);
2859 /* Return true if STMT can throw an exception that is caught within
2860 the current function (CFUN). */
2863 stmt_can_throw_internal (gimple *stmt)
2867 if (!stmt_could_throw_p (stmt))
2870 lp_nr = lookup_stmt_eh_lp (stmt);
2874 /* Given a statement STMT in IFUN, if STMT can no longer throw, then
2875 remove any entry it might have from the EH table. Return true if
2876 any change was made. */
2879 maybe_clean_eh_stmt_fn (struct function *ifun, gimple *stmt)
2881 if (stmt_could_throw_p (stmt))
2883 return remove_stmt_from_eh_lp_fn (ifun, stmt);
2886 /* Likewise, but always use the current function. */
2889 maybe_clean_eh_stmt (gimple *stmt)
2891 return maybe_clean_eh_stmt_fn (cfun, stmt);
2894 /* Given a statement OLD_STMT and a new statement NEW_STMT that has replaced
2895 OLD_STMT in the function, remove OLD_STMT from the EH table and put NEW_STMT
2896 in the table if it should be in there. Return TRUE if a replacement was
2897 done that my require an EH edge purge. */
2900 maybe_clean_or_replace_eh_stmt (gimple *old_stmt, gimple *new_stmt)
2902 int lp_nr = lookup_stmt_eh_lp (old_stmt);
2906 bool new_stmt_could_throw = stmt_could_throw_p (new_stmt);
2908 if (new_stmt == old_stmt && new_stmt_could_throw)
2911 remove_stmt_from_eh_lp (old_stmt);
2912 if (new_stmt_could_throw)
2914 add_stmt_to_eh_lp (new_stmt, lp_nr);
2924 /* Given a statement OLD_STMT in OLD_FUN and a duplicate statement NEW_STMT
2925 in NEW_FUN, copy the EH table data from OLD_STMT to NEW_STMT. The MAP
2926 operand is the return value of duplicate_eh_regions. */
2929 maybe_duplicate_eh_stmt_fn (struct function *new_fun, gimple *new_stmt,
2930 struct function *old_fun, gimple *old_stmt,
2931 hash_map<void *, void *> *map,
2934 int old_lp_nr, new_lp_nr;
2936 if (!stmt_could_throw_p (new_stmt))
2939 old_lp_nr = lookup_stmt_eh_lp_fn (old_fun, old_stmt);
2942 if (default_lp_nr == 0)
2944 new_lp_nr = default_lp_nr;
2946 else if (old_lp_nr > 0)
2948 eh_landing_pad old_lp, new_lp;
2950 old_lp = (*old_fun->eh->lp_array)[old_lp_nr];
2951 new_lp = static_cast<eh_landing_pad> (*map->get (old_lp));
2952 new_lp_nr = new_lp->index;
2956 eh_region old_r, new_r;
2958 old_r = (*old_fun->eh->region_array)[-old_lp_nr];
2959 new_r = static_cast<eh_region> (*map->get (old_r));
2960 new_lp_nr = -new_r->index;
2963 add_stmt_to_eh_lp_fn (new_fun, new_stmt, new_lp_nr);
2967 /* Similar, but both OLD_STMT and NEW_STMT are within the current function,
2968 and thus no remapping is required. */
2971 maybe_duplicate_eh_stmt (gimple *new_stmt, gimple *old_stmt)
2975 if (!stmt_could_throw_p (new_stmt))
2978 lp_nr = lookup_stmt_eh_lp (old_stmt);
2982 add_stmt_to_eh_lp (new_stmt, lp_nr);
2986 /* Returns TRUE if oneh and twoh are exception handlers (gimple_try_cleanup of
2987 GIMPLE_TRY) that are similar enough to be considered the same. Currently
2988 this only handles handlers consisting of a single call, as that's the
2989 important case for C++: a destructor call for a particular object showing
2990 up in multiple handlers. */
2993 same_handler_p (gimple_seq oneh, gimple_seq twoh)
2995 gimple_stmt_iterator gsi;
2996 gimple *ones, *twos;
2999 gsi = gsi_start (oneh);
3000 if (!gsi_one_before_end_p (gsi))
3002 ones = gsi_stmt (gsi);
3004 gsi = gsi_start (twoh);
3005 if (!gsi_one_before_end_p (gsi))
3007 twos = gsi_stmt (gsi);
3009 if (!is_gimple_call (ones)
3010 || !is_gimple_call (twos)
3011 || gimple_call_lhs (ones)
3012 || gimple_call_lhs (twos)
3013 || gimple_call_chain (ones)
3014 || gimple_call_chain (twos)
3015 || !gimple_call_same_target_p (ones, twos)
3016 || gimple_call_num_args (ones) != gimple_call_num_args (twos))
3019 for (ai = 0; ai < gimple_call_num_args (ones); ++ai)
3020 if (!operand_equal_p (gimple_call_arg (ones, ai),
3021 gimple_call_arg (twos, ai), 0))
3028 try { A() } finally { try { ~B() } catch { ~A() } }
3029 try { ... } finally { ~A() }
3031 try { A() } catch { ~B() }
3032 try { ~B() ... } finally { ~A() }
3034 This occurs frequently in C++, where A is a local variable and B is a
3035 temporary used in the initializer for A. */
3038 optimize_double_finally (gtry *one, gtry *two)
3041 gimple_stmt_iterator gsi;
3044 cleanup = gimple_try_cleanup (one);
3045 gsi = gsi_start (cleanup);
3046 if (!gsi_one_before_end_p (gsi))
3049 oneh = gsi_stmt (gsi);
3050 if (gimple_code (oneh) != GIMPLE_TRY
3051 || gimple_try_kind (oneh) != GIMPLE_TRY_CATCH)
3054 if (same_handler_p (gimple_try_cleanup (oneh), gimple_try_cleanup (two)))
3056 gimple_seq seq = gimple_try_eval (oneh);
3058 gimple_try_set_cleanup (one, seq);
3059 gimple_try_set_kind (one, GIMPLE_TRY_CATCH);
3060 seq = copy_gimple_seq_and_replace_locals (seq);
3061 gimple_seq_add_seq (&seq, gimple_try_eval (two));
3062 gimple_try_set_eval (two, seq);
3066 /* Perform EH refactoring optimizations that are simpler to do when code
3067 flow has been lowered but EH structures haven't. */
3070 refactor_eh_r (gimple_seq seq)
3072 gimple_stmt_iterator gsi;
3077 gsi = gsi_start (seq);
3081 if (gsi_end_p (gsi))
3084 two = gsi_stmt (gsi);
3086 if (gtry *try_one = dyn_cast <gtry *> (one))
3087 if (gtry *try_two = dyn_cast <gtry *> (two))
3088 if (gimple_try_kind (try_one) == GIMPLE_TRY_FINALLY
3089 && gimple_try_kind (try_two) == GIMPLE_TRY_FINALLY)
3090 optimize_double_finally (try_one, try_two);
3092 switch (gimple_code (one))
3095 refactor_eh_r (gimple_try_eval (one));
3096 refactor_eh_r (gimple_try_cleanup (one));
3099 refactor_eh_r (gimple_catch_handler (as_a <gcatch *> (one)));
3101 case GIMPLE_EH_FILTER:
3102 refactor_eh_r (gimple_eh_filter_failure (one));
3104 case GIMPLE_EH_ELSE:
3106 geh_else *eh_else_stmt = as_a <geh_else *> (one);
3107 refactor_eh_r (gimple_eh_else_n_body (eh_else_stmt));
3108 refactor_eh_r (gimple_eh_else_e_body (eh_else_stmt));
3123 const pass_data pass_data_refactor_eh =
3125 GIMPLE_PASS, /* type */
3127 OPTGROUP_NONE, /* optinfo_flags */
3128 TV_TREE_EH, /* tv_id */
3129 PROP_gimple_lcf, /* properties_required */
3130 0, /* properties_provided */
3131 0, /* properties_destroyed */
3132 0, /* todo_flags_start */
3133 0, /* todo_flags_finish */
3136 class pass_refactor_eh : public gimple_opt_pass
3139 pass_refactor_eh (gcc::context *ctxt)
3140 : gimple_opt_pass (pass_data_refactor_eh, ctxt)
3143 /* opt_pass methods: */
3144 virtual bool gate (function *) { return flag_exceptions != 0; }
3145 virtual unsigned int execute (function *)
3147 refactor_eh_r (gimple_body (current_function_decl));
3151 }; // class pass_refactor_eh
3156 make_pass_refactor_eh (gcc::context *ctxt)
3158 return new pass_refactor_eh (ctxt);
3161 /* At the end of gimple optimization, we can lower RESX. */
3164 lower_resx (basic_block bb, gresx *stmt,
3165 hash_map<eh_region, tree> *mnt_map)
3168 eh_region src_r, dst_r;
3169 gimple_stmt_iterator gsi;
3174 lp_nr = lookup_stmt_eh_lp (stmt);
3176 dst_r = get_eh_region_from_lp_number (lp_nr);
3180 src_r = get_eh_region_from_number (gimple_resx_region (stmt));
3181 gsi = gsi_last_bb (bb);
3185 /* We can wind up with no source region when pass_cleanup_eh shows
3186 that there are no entries into an eh region and deletes it, but
3187 then the block that contains the resx isn't removed. This can
3188 happen without optimization when the switch statement created by
3189 lower_try_finally_switch isn't simplified to remove the eh case.
3191 Resolve this by expanding the resx node to an abort. */
3193 fn = builtin_decl_implicit (BUILT_IN_TRAP);
3194 x = gimple_build_call (fn, 0);
3195 gsi_insert_before (&gsi, x, GSI_SAME_STMT);
3197 while (EDGE_COUNT (bb->succs) > 0)
3198 remove_edge (EDGE_SUCC (bb, 0));
3202 /* When we have a destination region, we resolve this by copying
3203 the excptr and filter values into place, and changing the edge
3204 to immediately after the landing pad. */
3212 /* We are resuming into a MUST_NOT_CALL region. Expand a call to
3213 the failure decl into a new block, if needed. */
3214 gcc_assert (dst_r->type == ERT_MUST_NOT_THROW);
3216 tree *slot = mnt_map->get (dst_r);
3219 gimple_stmt_iterator gsi2;
3221 new_bb = create_empty_bb (bb);
3222 add_bb_to_loop (new_bb, bb->loop_father);
3223 lab = gimple_block_label (new_bb);
3224 gsi2 = gsi_start_bb (new_bb);
3226 fn = dst_r->u.must_not_throw.failure_decl;
3227 x = gimple_build_call (fn, 0);
3228 gimple_set_location (x, dst_r->u.must_not_throw.failure_loc);
3229 gsi_insert_after (&gsi2, x, GSI_CONTINUE_LINKING);
3231 mnt_map->put (dst_r, lab);
3236 new_bb = label_to_block (lab);
3239 gcc_assert (EDGE_COUNT (bb->succs) == 0);
3240 e = make_edge (bb, new_bb, EDGE_FALLTHRU);
3241 e->count = bb->count;
3242 e->probability = REG_BR_PROB_BASE;
3247 tree dst_nr = build_int_cst (integer_type_node, dst_r->index);
3249 fn = builtin_decl_implicit (BUILT_IN_EH_COPY_VALUES);
3250 src_nr = build_int_cst (integer_type_node, src_r->index);
3251 x = gimple_build_call (fn, 2, dst_nr, src_nr);
3252 gsi_insert_before (&gsi, x, GSI_SAME_STMT);
3254 /* Update the flags for the outgoing edge. */
3255 e = single_succ_edge (bb);
3256 gcc_assert (e->flags & EDGE_EH);
3257 e->flags = (e->flags & ~EDGE_EH) | EDGE_FALLTHRU;
3259 /* If there are no more EH users of the landing pad, delete it. */
3260 FOR_EACH_EDGE (e, ei, e->dest->preds)
3261 if (e->flags & EDGE_EH)
3265 eh_landing_pad lp = get_eh_landing_pad_from_number (lp_nr);
3266 remove_eh_landing_pad (lp);
3276 /* When we don't have a destination region, this exception escapes
3277 up the call chain. We resolve this by generating a call to the
3278 _Unwind_Resume library function. */
3280 /* The ARM EABI redefines _Unwind_Resume as __cxa_end_cleanup
3281 with no arguments for C++ and Java. Check for that. */
3282 if (src_r->use_cxa_end_cleanup)
3284 fn = builtin_decl_implicit (BUILT_IN_CXA_END_CLEANUP);
3285 x = gimple_build_call (fn, 0);
3286 gsi_insert_before (&gsi, x, GSI_SAME_STMT);
3290 fn = builtin_decl_implicit (BUILT_IN_EH_POINTER);
3291 src_nr = build_int_cst (integer_type_node, src_r->index);
3292 x = gimple_build_call (fn, 1, src_nr);
3293 var = create_tmp_var (ptr_type_node);
3294 var = make_ssa_name (var, x);
3295 gimple_call_set_lhs (x, var);
3296 gsi_insert_before (&gsi, x, GSI_SAME_STMT);
3298 fn = builtin_decl_implicit (BUILT_IN_UNWIND_RESUME);
3299 x = gimple_build_call (fn, 1, var);
3300 gsi_insert_before (&gsi, x, GSI_SAME_STMT);
3303 gcc_assert (EDGE_COUNT (bb->succs) == 0);
3306 gsi_remove (&gsi, true);
3313 const pass_data pass_data_lower_resx =
3315 GIMPLE_PASS, /* type */
3317 OPTGROUP_NONE, /* optinfo_flags */
3318 TV_TREE_EH, /* tv_id */
3319 PROP_gimple_lcf, /* properties_required */
3320 0, /* properties_provided */
3321 0, /* properties_destroyed */
3322 0, /* todo_flags_start */
3323 0, /* todo_flags_finish */
3326 class pass_lower_resx : public gimple_opt_pass
3329 pass_lower_resx (gcc::context *ctxt)
3330 : gimple_opt_pass (pass_data_lower_resx, ctxt)
3333 /* opt_pass methods: */
3334 virtual bool gate (function *) { return flag_exceptions != 0; }
3335 virtual unsigned int execute (function *);
3337 }; // class pass_lower_resx
3340 pass_lower_resx::execute (function *fun)
3343 bool dominance_invalidated = false;
3344 bool any_rewritten = false;
3346 hash_map<eh_region, tree> mnt_map;
3348 FOR_EACH_BB_FN (bb, fun)
3350 gimple *last = last_stmt (bb);
3351 if (last && is_gimple_resx (last))
3353 dominance_invalidated |=
3354 lower_resx (bb, as_a <gresx *> (last), &mnt_map);
3355 any_rewritten = true;
3359 if (dominance_invalidated)
3361 free_dominance_info (CDI_DOMINATORS);
3362 free_dominance_info (CDI_POST_DOMINATORS);
3365 return any_rewritten ? TODO_update_ssa_only_virtuals : 0;
3371 make_pass_lower_resx (gcc::context *ctxt)
3373 return new pass_lower_resx (ctxt);
3376 /* Try to optimize var = {v} {CLOBBER} stmts followed just by
3380 optimize_clobbers (basic_block bb)
3382 gimple_stmt_iterator gsi = gsi_last_bb (bb);
3383 bool any_clobbers = false;
3384 bool seen_stack_restore = false;
3388 /* Only optimize anything if the bb contains at least one clobber,
3389 ends with resx (checked by caller), optionally contains some
3390 debug stmts or labels, or at most one __builtin_stack_restore
3391 call, and has an incoming EH edge. */
3392 for (gsi_prev (&gsi); !gsi_end_p (gsi); gsi_prev (&gsi))
3394 gimple *stmt = gsi_stmt (gsi);
3395 if (is_gimple_debug (stmt))
3397 if (gimple_clobber_p (stmt))
3399 any_clobbers = true;
3402 if (!seen_stack_restore
3403 && gimple_call_builtin_p (stmt, BUILT_IN_STACK_RESTORE))
3405 seen_stack_restore = true;
3408 if (gimple_code (stmt) == GIMPLE_LABEL)
3414 FOR_EACH_EDGE (e, ei, bb->preds)
3415 if (e->flags & EDGE_EH)
3419 gsi = gsi_last_bb (bb);
3420 for (gsi_prev (&gsi); !gsi_end_p (gsi); gsi_prev (&gsi))
3422 gimple *stmt = gsi_stmt (gsi);
3423 if (!gimple_clobber_p (stmt))
3425 unlink_stmt_vdef (stmt);
3426 gsi_remove (&gsi, true);
3427 release_defs (stmt);
3431 /* Try to sink var = {v} {CLOBBER} stmts followed just by
3432 internal throw to successor BB. */
3435 sink_clobbers (basic_block bb)
3439 gimple_stmt_iterator gsi, dgsi;
3441 bool any_clobbers = false;
3444 /* Only optimize if BB has a single EH successor and
3445 all predecessor edges are EH too. */
3446 if (!single_succ_p (bb)
3447 || (single_succ_edge (bb)->flags & EDGE_EH) == 0)
3450 FOR_EACH_EDGE (e, ei, bb->preds)
3452 if ((e->flags & EDGE_EH) == 0)
3456 /* And BB contains only CLOBBER stmts before the final
3458 gsi = gsi_last_bb (bb);
3459 for (gsi_prev (&gsi); !gsi_end_p (gsi); gsi_prev (&gsi))
3461 gimple *stmt = gsi_stmt (gsi);
3462 if (is_gimple_debug (stmt))
3464 if (gimple_code (stmt) == GIMPLE_LABEL)
3466 if (!gimple_clobber_p (stmt))
3468 any_clobbers = true;
3473 edge succe = single_succ_edge (bb);
3474 succbb = succe->dest;
3476 /* See if there is a virtual PHI node to take an updated virtual
3479 tree vuse = NULL_TREE;
3480 for (gphi_iterator gpi = gsi_start_phis (succbb);
3481 !gsi_end_p (gpi); gsi_next (&gpi))
3483 tree res = gimple_phi_result (gpi.phi ());
3484 if (virtual_operand_p (res))
3492 dgsi = gsi_after_labels (succbb);
3493 gsi = gsi_last_bb (bb);
3494 for (gsi_prev (&gsi); !gsi_end_p (gsi); gsi_prev (&gsi))
3496 gimple *stmt = gsi_stmt (gsi);
3498 if (is_gimple_debug (stmt))
3500 if (gimple_code (stmt) == GIMPLE_LABEL)
3502 lhs = gimple_assign_lhs (stmt);
3503 /* Unfortunately we don't have dominance info updated at this
3504 point, so checking if
3505 dominated_by_p (CDI_DOMINATORS, succbb,
3506 gimple_bb (SSA_NAME_DEF_STMT (TREE_OPERAND (lhs, 0)))
3507 would be too costly. Thus, avoid sinking any clobbers that
3508 refer to non-(D) SSA_NAMEs. */
3509 if (TREE_CODE (lhs) == MEM_REF
3510 && TREE_CODE (TREE_OPERAND (lhs, 0)) == SSA_NAME
3511 && !SSA_NAME_IS_DEFAULT_DEF (TREE_OPERAND (lhs, 0)))
3513 unlink_stmt_vdef (stmt);
3514 gsi_remove (&gsi, true);
3515 release_defs (stmt);
3519 /* As we do not change stmt order when sinking across a
3520 forwarder edge we can keep virtual operands in place. */
3521 gsi_remove (&gsi, false);
3522 gsi_insert_before (&dgsi, stmt, GSI_NEW_STMT);
3524 /* But adjust virtual operands if we sunk across a PHI node. */
3528 imm_use_iterator iter;
3529 use_operand_p use_p;
3530 FOR_EACH_IMM_USE_STMT (use_stmt, iter, vuse)
3531 FOR_EACH_IMM_USE_ON_STMT (use_p, iter)
3532 SET_USE (use_p, gimple_vdef (stmt));
3533 if (SSA_NAME_OCCURS_IN_ABNORMAL_PHI (vuse))
3535 SSA_NAME_OCCURS_IN_ABNORMAL_PHI (gimple_vdef (stmt)) = 1;
3536 SSA_NAME_OCCURS_IN_ABNORMAL_PHI (vuse) = 0;
3538 /* Adjust the incoming virtual operand. */
3539 SET_USE (PHI_ARG_DEF_PTR_FROM_EDGE (vphi, succe), gimple_vuse (stmt));
3540 SET_USE (gimple_vuse_op (stmt), vuse);
3542 /* If there isn't a single predecessor but no virtual PHI node
3543 arrange for virtual operands to be renamed. */
3544 else if (gimple_vuse_op (stmt) != NULL_USE_OPERAND_P
3545 && !single_pred_p (succbb))
3547 /* In this case there will be no use of the VDEF of this stmt.
3548 ??? Unless this is a secondary opportunity and we have not
3549 removed unreachable blocks yet, so we cannot assert this.
3550 Which also means we will end up renaming too many times. */
3551 SET_USE (gimple_vuse_op (stmt), gimple_vop (cfun));
3552 mark_virtual_operands_for_renaming (cfun);
3553 todo |= TODO_update_ssa_only_virtuals;
3560 /* At the end of inlining, we can lower EH_DISPATCH. Return true when
3561 we have found some duplicate labels and removed some edges. */
3564 lower_eh_dispatch (basic_block src, geh_dispatch *stmt)
3566 gimple_stmt_iterator gsi;
3571 bool redirected = false;
3573 region_nr = gimple_eh_dispatch_region (stmt);
3574 r = get_eh_region_from_number (region_nr);
3576 gsi = gsi_last_bb (src);
3582 auto_vec<tree> labels;
3583 tree default_label = NULL;
3587 hash_set<tree> seen_values;
3589 /* Collect the labels for a switch. Zero the post_landing_pad
3590 field becase we'll no longer have anything keeping these labels
3591 in existence and the optimizer will be free to merge these
3593 for (c = r->u.eh_try.first_catch; c ; c = c->next_catch)
3595 tree tp_node, flt_node, lab = c->label;
3596 bool have_label = false;
3599 tp_node = c->type_list;
3600 flt_node = c->filter_list;
3602 if (tp_node == NULL)
3604 default_label = lab;
3609 /* Filter out duplicate labels that arise when this handler
3610 is shadowed by an earlier one. When no labels are
3611 attached to the handler anymore, we remove
3612 the corresponding edge and then we delete unreachable
3613 blocks at the end of this pass. */
3614 if (! seen_values.contains (TREE_VALUE (flt_node)))
3616 tree t = build_case_label (TREE_VALUE (flt_node),
3618 labels.safe_push (t);
3619 seen_values.add (TREE_VALUE (flt_node));
3623 tp_node = TREE_CHAIN (tp_node);
3624 flt_node = TREE_CHAIN (flt_node);
3629 remove_edge (find_edge (src, label_to_block (lab)));
3634 /* Clean up the edge flags. */
3635 FOR_EACH_EDGE (e, ei, src->succs)
3637 if (e->flags & EDGE_FALLTHRU)
3639 /* If there was no catch-all, use the fallthru edge. */
3640 if (default_label == NULL)
3641 default_label = gimple_block_label (e->dest);
3642 e->flags &= ~EDGE_FALLTHRU;
3645 gcc_assert (default_label != NULL);
3647 /* Don't generate a switch if there's only a default case.
3648 This is common in the form of try { A; } catch (...) { B; }. */
3649 if (!labels.exists ())
3651 e = single_succ_edge (src);
3652 e->flags |= EDGE_FALLTHRU;
3656 fn = builtin_decl_implicit (BUILT_IN_EH_FILTER);
3657 x = gimple_build_call (fn, 1, build_int_cst (integer_type_node,
3659 filter = create_tmp_var (TREE_TYPE (TREE_TYPE (fn)));
3660 filter = make_ssa_name (filter, x);
3661 gimple_call_set_lhs (x, filter);
3662 gsi_insert_before (&gsi, x, GSI_SAME_STMT);
3664 /* Turn the default label into a default case. */
3665 default_label = build_case_label (NULL, NULL, default_label);
3666 sort_case_labels (labels);
3668 x = gimple_build_switch (filter, default_label, labels);
3669 gsi_insert_before (&gsi, x, GSI_SAME_STMT);
3674 case ERT_ALLOWED_EXCEPTIONS:
3676 edge b_e = BRANCH_EDGE (src);
3677 edge f_e = FALLTHRU_EDGE (src);
3679 fn = builtin_decl_implicit (BUILT_IN_EH_FILTER);
3680 x = gimple_build_call (fn, 1, build_int_cst (integer_type_node,
3682 filter = create_tmp_var (TREE_TYPE (TREE_TYPE (fn)));
3683 filter = make_ssa_name (filter, x);
3684 gimple_call_set_lhs (x, filter);
3685 gsi_insert_before (&gsi, x, GSI_SAME_STMT);
3687 r->u.allowed.label = NULL;
3688 x = gimple_build_cond (EQ_EXPR, filter,
3689 build_int_cst (TREE_TYPE (filter),
3690 r->u.allowed.filter),
3691 NULL_TREE, NULL_TREE);
3692 gsi_insert_before (&gsi, x, GSI_SAME_STMT);
3694 b_e->flags = b_e->flags | EDGE_TRUE_VALUE;
3695 f_e->flags = (f_e->flags & ~EDGE_FALLTHRU) | EDGE_FALSE_VALUE;
3703 /* Replace the EH_DISPATCH with the SWITCH or COND generated above. */
3704 gsi_remove (&gsi, true);
3710 const pass_data pass_data_lower_eh_dispatch =
3712 GIMPLE_PASS, /* type */
3713 "ehdisp", /* name */
3714 OPTGROUP_NONE, /* optinfo_flags */
3715 TV_TREE_EH, /* tv_id */
3716 PROP_gimple_lcf, /* properties_required */
3717 0, /* properties_provided */
3718 0, /* properties_destroyed */
3719 0, /* todo_flags_start */
3720 0, /* todo_flags_finish */
3723 class pass_lower_eh_dispatch : public gimple_opt_pass
3726 pass_lower_eh_dispatch (gcc::context *ctxt)
3727 : gimple_opt_pass (pass_data_lower_eh_dispatch, ctxt)
3730 /* opt_pass methods: */
3731 virtual bool gate (function *fun) { return fun->eh->region_tree != NULL; }
3732 virtual unsigned int execute (function *);
3734 }; // class pass_lower_eh_dispatch
3737 pass_lower_eh_dispatch::execute (function *fun)
3741 bool redirected = false;
3743 assign_filter_values ();
3745 FOR_EACH_BB_FN (bb, fun)
3747 gimple *last = last_stmt (bb);
3750 if (gimple_code (last) == GIMPLE_EH_DISPATCH)
3752 redirected |= lower_eh_dispatch (bb,
3753 as_a <geh_dispatch *> (last));
3754 flags |= TODO_update_ssa_only_virtuals;
3756 else if (gimple_code (last) == GIMPLE_RESX)
3758 if (stmt_can_throw_external (last))
3759 optimize_clobbers (bb);
3761 flags |= sink_clobbers (bb);
3766 delete_unreachable_blocks ();
3773 make_pass_lower_eh_dispatch (gcc::context *ctxt)
3775 return new pass_lower_eh_dispatch (ctxt);
3778 /* Walk statements, see what regions and, optionally, landing pads
3779 are really referenced.
3781 Returns in R_REACHABLEP an sbitmap with bits set for reachable regions,
3782 and in LP_REACHABLE an sbitmap with bits set for reachable landing pads.
3784 Passing NULL for LP_REACHABLE is valid, in this case only reachable
3787 The caller is responsible for freeing the returned sbitmaps. */
3790 mark_reachable_handlers (sbitmap *r_reachablep, sbitmap *lp_reachablep)
3792 sbitmap r_reachable, lp_reachable;
3794 bool mark_landing_pads = (lp_reachablep != NULL);
3795 gcc_checking_assert (r_reachablep != NULL);
3797 r_reachable = sbitmap_alloc (cfun->eh->region_array->length ());
3798 bitmap_clear (r_reachable);
3799 *r_reachablep = r_reachable;
3801 if (mark_landing_pads)
3803 lp_reachable = sbitmap_alloc (cfun->eh->lp_array->length ());
3804 bitmap_clear (lp_reachable);
3805 *lp_reachablep = lp_reachable;
3808 lp_reachable = NULL;
3810 FOR_EACH_BB_FN (bb, cfun)
3812 gimple_stmt_iterator gsi;
3814 for (gsi = gsi_start_bb (bb); !gsi_end_p (gsi); gsi_next (&gsi))
3816 gimple *stmt = gsi_stmt (gsi);
3818 if (mark_landing_pads)
3820 int lp_nr = lookup_stmt_eh_lp (stmt);
3822 /* Negative LP numbers are MUST_NOT_THROW regions which
3823 are not considered BB enders. */
3825 bitmap_set_bit (r_reachable, -lp_nr);
3827 /* Positive LP numbers are real landing pads, and BB enders. */
3830 gcc_assert (gsi_one_before_end_p (gsi));
3831 eh_region region = get_eh_region_from_lp_number (lp_nr);
3832 bitmap_set_bit (r_reachable, region->index);
3833 bitmap_set_bit (lp_reachable, lp_nr);
3837 /* Avoid removing regions referenced from RESX/EH_DISPATCH. */
3838 switch (gimple_code (stmt))
3841 bitmap_set_bit (r_reachable,
3842 gimple_resx_region (as_a <gresx *> (stmt)));
3844 case GIMPLE_EH_DISPATCH:
3845 bitmap_set_bit (r_reachable,
3846 gimple_eh_dispatch_region (
3847 as_a <geh_dispatch *> (stmt)));
3850 if (gimple_call_builtin_p (stmt, BUILT_IN_EH_COPY_VALUES))
3851 for (int i = 0; i < 2; ++i)
3853 tree rt = gimple_call_arg (stmt, i);
3854 HOST_WIDE_INT ri = tree_to_shwi (rt);
3856 gcc_assert (ri == (int)ri);
3857 bitmap_set_bit (r_reachable, ri);
3867 /* Remove unreachable handlers and unreachable landing pads. */
3870 remove_unreachable_handlers (void)
3872 sbitmap r_reachable, lp_reachable;
3877 mark_reachable_handlers (&r_reachable, &lp_reachable);
3881 fprintf (dump_file, "Before removal of unreachable regions:\n");
3882 dump_eh_tree (dump_file, cfun);
3883 fprintf (dump_file, "Reachable regions: ");
3884 dump_bitmap_file (dump_file, r_reachable);
3885 fprintf (dump_file, "Reachable landing pads: ");
3886 dump_bitmap_file (dump_file, lp_reachable);
3891 FOR_EACH_VEC_SAFE_ELT (cfun->eh->region_array, i, region)
3892 if (region && !bitmap_bit_p (r_reachable, region->index))
3894 "Removing unreachable region %d\n",
3898 remove_unreachable_eh_regions (r_reachable);
3900 FOR_EACH_VEC_SAFE_ELT (cfun->eh->lp_array, i, lp)
3901 if (lp && !bitmap_bit_p (lp_reachable, lp->index))
3905 "Removing unreachable landing pad %d\n",
3907 remove_eh_landing_pad (lp);
3912 fprintf (dump_file, "\n\nAfter removal of unreachable regions:\n");
3913 dump_eh_tree (dump_file, cfun);
3914 fprintf (dump_file, "\n\n");
3917 sbitmap_free (r_reachable);
3918 sbitmap_free (lp_reachable);
3921 verify_eh_tree (cfun);
3924 /* Remove unreachable handlers if any landing pads have been removed after
3925 last ehcleanup pass (due to gimple_purge_dead_eh_edges). */
3928 maybe_remove_unreachable_handlers (void)
3933 if (cfun->eh == NULL)
3936 FOR_EACH_VEC_SAFE_ELT (cfun->eh->lp_array, i, lp)
3937 if (lp && lp->post_landing_pad)
3939 if (label_to_block (lp->post_landing_pad) == NULL)
3941 remove_unreachable_handlers ();
3947 /* Remove regions that do not have landing pads. This assumes
3948 that remove_unreachable_handlers has already been run, and
3949 that we've just manipulated the landing pads since then.
3951 Preserve regions with landing pads and regions that prevent
3952 exceptions from propagating further, even if these regions
3953 are not reachable. */
3956 remove_unreachable_handlers_no_lp (void)
3959 sbitmap r_reachable;
3962 mark_reachable_handlers (&r_reachable, /*lp_reachablep=*/NULL);
3964 FOR_EACH_VEC_SAFE_ELT (cfun->eh->region_array, i, region)
3969 if (region->landing_pads != NULL
3970 || region->type == ERT_MUST_NOT_THROW)
3971 bitmap_set_bit (r_reachable, region->index);
3974 && !bitmap_bit_p (r_reachable, region->index))
3976 "Removing unreachable region %d\n",
3980 remove_unreachable_eh_regions (r_reachable);
3982 sbitmap_free (r_reachable);
3985 /* Undo critical edge splitting on an EH landing pad. Earlier, we
3986 optimisticaly split all sorts of edges, including EH edges. The
3987 optimization passes in between may not have needed them; if not,
3988 we should undo the split.
3990 Recognize this case by having one EH edge incoming to the BB and
3991 one normal edge outgoing; BB should be empty apart from the
3992 post_landing_pad label.
3994 Note that this is slightly different from the empty handler case
3995 handled by cleanup_empty_eh, in that the actual handler may yet
3996 have actual code but the landing pad has been separated from the
3997 handler. As such, cleanup_empty_eh relies on this transformation
3998 having been done first. */
4001 unsplit_eh (eh_landing_pad lp)
4003 basic_block bb = label_to_block (lp->post_landing_pad);
4004 gimple_stmt_iterator gsi;
4007 /* Quickly check the edge counts on BB for singularity. */
4008 if (!single_pred_p (bb) || !single_succ_p (bb))
4010 e_in = single_pred_edge (bb);
4011 e_out = single_succ_edge (bb);
4013 /* Input edge must be EH and output edge must be normal. */
4014 if ((e_in->flags & EDGE_EH) == 0 || (e_out->flags & EDGE_EH) != 0)
4017 /* The block must be empty except for the labels and debug insns. */
4018 gsi = gsi_after_labels (bb);
4019 if (!gsi_end_p (gsi) && is_gimple_debug (gsi_stmt (gsi)))
4020 gsi_next_nondebug (&gsi);
4021 if (!gsi_end_p (gsi))
4024 /* The destination block must not already have a landing pad
4025 for a different region. */
4026 for (gsi = gsi_start_bb (e_out->dest); !gsi_end_p (gsi); gsi_next (&gsi))
4028 glabel *label_stmt = dyn_cast <glabel *> (gsi_stmt (gsi));
4034 lab = gimple_label_label (label_stmt);
4035 lp_nr = EH_LANDING_PAD_NR (lab);
4036 if (lp_nr && get_eh_region_from_lp_number (lp_nr) != lp->region)
4040 /* The new destination block must not already be a destination of
4041 the source block, lest we merge fallthru and eh edges and get
4042 all sorts of confused. */
4043 if (find_edge (e_in->src, e_out->dest))
4046 /* ??? We can get degenerate phis due to cfg cleanups. I would have
4047 thought this should have been cleaned up by a phicprop pass, but
4048 that doesn't appear to handle virtuals. Propagate by hand. */
4049 if (!gimple_seq_empty_p (phi_nodes (bb)))
4051 for (gphi_iterator gpi = gsi_start_phis (bb); !gsi_end_p (gpi); )
4054 gphi *phi = gpi.phi ();
4055 tree lhs = gimple_phi_result (phi);
4056 tree rhs = gimple_phi_arg_def (phi, 0);
4057 use_operand_p use_p;
4058 imm_use_iterator iter;
4060 FOR_EACH_IMM_USE_STMT (use_stmt, iter, lhs)
4062 FOR_EACH_IMM_USE_ON_STMT (use_p, iter)
4063 SET_USE (use_p, rhs);
4066 if (SSA_NAME_OCCURS_IN_ABNORMAL_PHI (lhs))
4067 SSA_NAME_OCCURS_IN_ABNORMAL_PHI (rhs) = 1;
4069 remove_phi_node (&gpi, true);
4073 if (dump_file && (dump_flags & TDF_DETAILS))
4074 fprintf (dump_file, "Unsplit EH landing pad %d to block %i.\n",
4075 lp->index, e_out->dest->index);
4077 /* Redirect the edge. Since redirect_eh_edge_1 expects to be moving
4078 a successor edge, humor it. But do the real CFG change with the
4079 predecessor of E_OUT in order to preserve the ordering of arguments
4080 to the PHI nodes in E_OUT->DEST. */
4081 redirect_eh_edge_1 (e_in, e_out->dest, false);
4082 redirect_edge_pred (e_out, e_in->src);
4083 e_out->flags = e_in->flags;
4084 e_out->probability = e_in->probability;
4085 e_out->count = e_in->count;
4091 /* Examine each landing pad block and see if it matches unsplit_eh. */
4094 unsplit_all_eh (void)
4096 bool changed = false;
4100 for (i = 1; vec_safe_iterate (cfun->eh->lp_array, i, &lp); ++i)
4102 changed |= unsplit_eh (lp);
4107 /* A subroutine of cleanup_empty_eh. Redirect all EH edges incoming
4108 to OLD_BB to NEW_BB; return true on success, false on failure.
4110 OLD_BB_OUT is the edge into NEW_BB from OLD_BB, so if we miss any
4111 PHI variables from OLD_BB we can pick them up from OLD_BB_OUT.
4112 Virtual PHIs may be deleted and marked for renaming. */
4115 cleanup_empty_eh_merge_phis (basic_block new_bb, basic_block old_bb,
4116 edge old_bb_out, bool change_region)
4118 gphi_iterator ngsi, ogsi;
4121 bitmap ophi_handled;
4123 /* The destination block must not be a regular successor for any
4124 of the preds of the landing pad. Thus, avoid turning
4134 which CFG verification would choke on. See PR45172 and PR51089. */
4135 FOR_EACH_EDGE (e, ei, old_bb->preds)
4136 if (find_edge (e->src, new_bb))
4139 FOR_EACH_EDGE (e, ei, old_bb->preds)
4140 redirect_edge_var_map_clear (e);
4142 ophi_handled = BITMAP_ALLOC (NULL);
4144 /* First, iterate through the PHIs on NEW_BB and set up the edge_var_map
4145 for the edges we're going to move. */
4146 for (ngsi = gsi_start_phis (new_bb); !gsi_end_p (ngsi); gsi_next (&ngsi))
4148 gphi *ophi, *nphi = ngsi.phi ();
4151 nresult = gimple_phi_result (nphi);
4152 nop = gimple_phi_arg_def (nphi, old_bb_out->dest_idx);
4154 /* Find the corresponding PHI in OLD_BB so we can forward-propagate
4155 the source ssa_name. */
4157 for (ogsi = gsi_start_phis (old_bb); !gsi_end_p (ogsi); gsi_next (&ogsi))
4160 if (gimple_phi_result (ophi) == nop)
4165 /* If we did find the corresponding PHI, copy those inputs. */
4168 /* If NOP is used somewhere else beyond phis in new_bb, give up. */
4169 if (!has_single_use (nop))
4171 imm_use_iterator imm_iter;
4172 use_operand_p use_p;
4174 FOR_EACH_IMM_USE_FAST (use_p, imm_iter, nop)
4176 if (!gimple_debug_bind_p (USE_STMT (use_p))
4177 && (gimple_code (USE_STMT (use_p)) != GIMPLE_PHI
4178 || gimple_bb (USE_STMT (use_p)) != new_bb))
4182 bitmap_set_bit (ophi_handled, SSA_NAME_VERSION (nop));
4183 FOR_EACH_EDGE (e, ei, old_bb->preds)
4188 if ((e->flags & EDGE_EH) == 0)
4190 oop = gimple_phi_arg_def (ophi, e->dest_idx);
4191 oloc = gimple_phi_arg_location (ophi, e->dest_idx);
4192 redirect_edge_var_map_add (e, nresult, oop, oloc);
4195 /* If we didn't find the PHI, if it's a real variable or a VOP, we know
4196 from the fact that OLD_BB is tree_empty_eh_handler_p that the
4197 variable is unchanged from input to the block and we can simply
4198 re-use the input to NEW_BB from the OLD_BB_OUT edge. */
4202 = gimple_phi_arg_location (nphi, old_bb_out->dest_idx);
4203 FOR_EACH_EDGE (e, ei, old_bb->preds)
4204 redirect_edge_var_map_add (e, nresult, nop, nloc);
4208 /* Second, verify that all PHIs from OLD_BB have been handled. If not,
4209 we don't know what values from the other edges into NEW_BB to use. */
4210 for (ogsi = gsi_start_phis (old_bb); !gsi_end_p (ogsi); gsi_next (&ogsi))
4212 gphi *ophi = ogsi.phi ();
4213 tree oresult = gimple_phi_result (ophi);
4214 if (!bitmap_bit_p (ophi_handled, SSA_NAME_VERSION (oresult)))
4218 /* Finally, move the edges and update the PHIs. */
4219 for (ei = ei_start (old_bb->preds); (e = ei_safe_edge (ei)); )
4220 if (e->flags & EDGE_EH)
4222 /* ??? CFG manipluation routines do not try to update loop
4223 form on edge redirection. Do so manually here for now. */
4224 /* If we redirect a loop entry or latch edge that will either create
4225 a multiple entry loop or rotate the loop. If the loops merge
4226 we may have created a loop with multiple latches.
4227 All of this isn't easily fixed thus cancel the affected loop
4228 and mark the other loop as possibly having multiple latches. */
4229 if (e->dest == e->dest->loop_father->header)
4231 mark_loop_for_removal (e->dest->loop_father);
4232 new_bb->loop_father->latch = NULL;
4233 loops_state_set (LOOPS_MAY_HAVE_MULTIPLE_LATCHES);
4235 redirect_eh_edge_1 (e, new_bb, change_region);
4236 redirect_edge_succ (e, new_bb);
4237 flush_pending_stmts (e);
4242 BITMAP_FREE (ophi_handled);
4246 FOR_EACH_EDGE (e, ei, old_bb->preds)
4247 redirect_edge_var_map_clear (e);
4248 BITMAP_FREE (ophi_handled);
4252 /* A subroutine of cleanup_empty_eh. Move a landing pad LP from its
4253 old region to NEW_REGION at BB. */
4256 cleanup_empty_eh_move_lp (basic_block bb, edge e_out,
4257 eh_landing_pad lp, eh_region new_region)
4259 gimple_stmt_iterator gsi;
4262 for (pp = &lp->region->landing_pads; *pp != lp; pp = &(*pp)->next_lp)
4266 lp->region = new_region;
4267 lp->next_lp = new_region->landing_pads;
4268 new_region->landing_pads = lp;
4270 /* Delete the RESX that was matched within the empty handler block. */
4271 gsi = gsi_last_bb (bb);
4272 unlink_stmt_vdef (gsi_stmt (gsi));
4273 gsi_remove (&gsi, true);
4275 /* Clean up E_OUT for the fallthru. */
4276 e_out->flags = (e_out->flags & ~EDGE_EH) | EDGE_FALLTHRU;
4277 e_out->probability = REG_BR_PROB_BASE;
4280 /* A subroutine of cleanup_empty_eh. Handle more complex cases of
4281 unsplitting than unsplit_eh was prepared to handle, e.g. when
4282 multiple incoming edges and phis are involved. */
4285 cleanup_empty_eh_unsplit (basic_block bb, edge e_out, eh_landing_pad lp)
4287 gimple_stmt_iterator gsi;
4290 /* We really ought not have totally lost everything following
4291 a landing pad label. Given that BB is empty, there had better
4293 gcc_assert (e_out != NULL);
4295 /* The destination block must not already have a landing pad
4296 for a different region. */
4298 for (gsi = gsi_start_bb (e_out->dest); !gsi_end_p (gsi); gsi_next (&gsi))
4300 glabel *stmt = dyn_cast <glabel *> (gsi_stmt (gsi));
4305 lab = gimple_label_label (stmt);
4306 lp_nr = EH_LANDING_PAD_NR (lab);
4307 if (lp_nr && get_eh_region_from_lp_number (lp_nr) != lp->region)
4311 /* Attempt to move the PHIs into the successor block. */
4312 if (cleanup_empty_eh_merge_phis (e_out->dest, bb, e_out, false))
4314 if (dump_file && (dump_flags & TDF_DETAILS))
4316 "Unsplit EH landing pad %d to block %i "
4317 "(via cleanup_empty_eh).\n",
4318 lp->index, e_out->dest->index);
4325 /* Return true if edge E_FIRST is part of an empty infinite loop
4326 or leads to such a loop through a series of single successor
4330 infinite_empty_loop_p (edge e_first)
4332 bool inf_loop = false;
4335 if (e_first->dest == e_first->src)
4338 e_first->src->aux = (void *) 1;
4339 for (e = e_first; single_succ_p (e->dest); e = single_succ_edge (e->dest))
4341 gimple_stmt_iterator gsi;
4347 e->dest->aux = (void *) 1;
4348 gsi = gsi_after_labels (e->dest);
4349 if (!gsi_end_p (gsi) && is_gimple_debug (gsi_stmt (gsi)))
4350 gsi_next_nondebug (&gsi);
4351 if (!gsi_end_p (gsi))
4354 e_first->src->aux = NULL;
4355 for (e = e_first; e->dest->aux; e = single_succ_edge (e->dest))
4356 e->dest->aux = NULL;
4361 /* Examine the block associated with LP to determine if it's an empty
4362 handler for its EH region. If so, attempt to redirect EH edges to
4363 an outer region. Return true the CFG was updated in any way. This
4364 is similar to jump forwarding, just across EH edges. */
4367 cleanup_empty_eh (eh_landing_pad lp)
4369 basic_block bb = label_to_block (lp->post_landing_pad);
4370 gimple_stmt_iterator gsi;
4372 eh_region new_region;
4375 bool has_non_eh_pred;
4379 /* There can be zero or one edges out of BB. This is the quickest test. */
4380 switch (EDGE_COUNT (bb->succs))
4386 e_out = single_succ_edge (bb);
4392 resx = last_stmt (bb);
4393 if (resx && is_gimple_resx (resx))
4395 if (stmt_can_throw_external (resx))
4396 optimize_clobbers (bb);
4397 else if (sink_clobbers (bb))
4401 gsi = gsi_after_labels (bb);
4403 /* Make sure to skip debug statements. */
4404 if (!gsi_end_p (gsi) && is_gimple_debug (gsi_stmt (gsi)))
4405 gsi_next_nondebug (&gsi);
4407 /* If the block is totally empty, look for more unsplitting cases. */
4408 if (gsi_end_p (gsi))
4410 /* For the degenerate case of an infinite loop bail out.
4411 If bb has no successors and is totally empty, which can happen e.g.
4412 because of incorrect noreturn attribute, bail out too. */
4414 || infinite_empty_loop_p (e_out))
4417 return ret | cleanup_empty_eh_unsplit (bb, e_out, lp);
4420 /* The block should consist only of a single RESX statement, modulo a
4421 preceding call to __builtin_stack_restore if there is no outgoing
4422 edge, since the call can be eliminated in this case. */
4423 resx = gsi_stmt (gsi);
4424 if (!e_out && gimple_call_builtin_p (resx, BUILT_IN_STACK_RESTORE))
4427 resx = gsi_stmt (gsi);
4429 if (!is_gimple_resx (resx))
4431 gcc_assert (gsi_one_before_end_p (gsi));
4433 /* Determine if there are non-EH edges, or resx edges into the handler. */
4434 has_non_eh_pred = false;
4435 FOR_EACH_EDGE (e, ei, bb->preds)
4436 if (!(e->flags & EDGE_EH))
4437 has_non_eh_pred = true;
4439 /* Find the handler that's outer of the empty handler by looking at
4440 where the RESX instruction was vectored. */
4441 new_lp_nr = lookup_stmt_eh_lp (resx);
4442 new_region = get_eh_region_from_lp_number (new_lp_nr);
4444 /* If there's no destination region within the current function,
4445 redirection is trivial via removing the throwing statements from
4446 the EH region, removing the EH edges, and allowing the block
4447 to go unreachable. */
4448 if (new_region == NULL)
4450 gcc_assert (e_out == NULL);
4451 for (ei = ei_start (bb->preds); (e = ei_safe_edge (ei)); )
4452 if (e->flags & EDGE_EH)
4454 gimple *stmt = last_stmt (e->src);
4455 remove_stmt_from_eh_lp (stmt);
4463 /* If the destination region is a MUST_NOT_THROW, allow the runtime
4464 to handle the abort and allow the blocks to go unreachable. */
4465 if (new_region->type == ERT_MUST_NOT_THROW)
4467 for (ei = ei_start (bb->preds); (e = ei_safe_edge (ei)); )
4468 if (e->flags & EDGE_EH)
4470 gimple *stmt = last_stmt (e->src);
4471 remove_stmt_from_eh_lp (stmt);
4472 add_stmt_to_eh_lp (stmt, new_lp_nr);
4480 /* Try to redirect the EH edges and merge the PHIs into the destination
4481 landing pad block. If the merge succeeds, we'll already have redirected
4482 all the EH edges. The handler itself will go unreachable if there were
4484 if (cleanup_empty_eh_merge_phis (e_out->dest, bb, e_out, true))
4487 /* Finally, if all input edges are EH edges, then we can (potentially)
4488 reduce the number of transfers from the runtime by moving the landing
4489 pad from the original region to the new region. This is a win when
4490 we remove the last CLEANUP region along a particular exception
4491 propagation path. Since nothing changes except for the region with
4492 which the landing pad is associated, the PHI nodes do not need to be
4494 if (!has_non_eh_pred)
4496 cleanup_empty_eh_move_lp (bb, e_out, lp, new_region);
4497 if (dump_file && (dump_flags & TDF_DETAILS))
4498 fprintf (dump_file, "Empty EH handler %i moved to EH region %i.\n",
4499 lp->index, new_region->index);
4501 /* ??? The CFG didn't change, but we may have rendered the
4502 old EH region unreachable. Trigger a cleanup there. */
4509 if (dump_file && (dump_flags & TDF_DETAILS))
4510 fprintf (dump_file, "Empty EH handler %i removed.\n", lp->index);
4511 remove_eh_landing_pad (lp);
4515 /* Do a post-order traversal of the EH region tree. Examine each
4516 post_landing_pad block and see if we can eliminate it as empty. */
4519 cleanup_all_empty_eh (void)
4521 bool changed = false;
4525 for (i = 1; vec_safe_iterate (cfun->eh->lp_array, i, &lp); ++i)
4527 changed |= cleanup_empty_eh (lp);
4532 /* Perform cleanups and lowering of exception handling
4533 1) cleanups regions with handlers doing nothing are optimized out
4534 2) MUST_NOT_THROW regions that became dead because of 1) are optimized out
4535 3) Info about regions that are containing instructions, and regions
4536 reachable via local EH edges is collected
4537 4) Eh tree is pruned for regions no longer necessary.
4539 TODO: Push MUST_NOT_THROW regions to the root of the EH tree.
4540 Unify those that have the same failure decl and locus.
4544 execute_cleanup_eh_1 (void)
4546 /* Do this first: unsplit_all_eh and cleanup_all_empty_eh can die
4547 looking up unreachable landing pads. */
4548 remove_unreachable_handlers ();
4550 /* Watch out for the region tree vanishing due to all unreachable. */
4551 if (cfun->eh->region_tree)
4553 bool changed = false;
4556 changed |= unsplit_all_eh ();
4557 changed |= cleanup_all_empty_eh ();
4561 free_dominance_info (CDI_DOMINATORS);
4562 free_dominance_info (CDI_POST_DOMINATORS);
4564 /* We delayed all basic block deletion, as we may have performed
4565 cleanups on EH edges while non-EH edges were still present. */
4566 delete_unreachable_blocks ();
4568 /* We manipulated the landing pads. Remove any region that no
4569 longer has a landing pad. */
4570 remove_unreachable_handlers_no_lp ();
4572 return TODO_cleanup_cfg | TODO_update_ssa_only_virtuals;
4581 const pass_data pass_data_cleanup_eh =
4583 GIMPLE_PASS, /* type */
4584 "ehcleanup", /* name */
4585 OPTGROUP_NONE, /* optinfo_flags */
4586 TV_TREE_EH, /* tv_id */
4587 PROP_gimple_lcf, /* properties_required */
4588 0, /* properties_provided */
4589 0, /* properties_destroyed */
4590 0, /* todo_flags_start */
4591 0, /* todo_flags_finish */
4594 class pass_cleanup_eh : public gimple_opt_pass
4597 pass_cleanup_eh (gcc::context *ctxt)
4598 : gimple_opt_pass (pass_data_cleanup_eh, ctxt)
4601 /* opt_pass methods: */
4602 opt_pass * clone () { return new pass_cleanup_eh (m_ctxt); }
4603 virtual bool gate (function *fun)
4605 return fun->eh != NULL && fun->eh->region_tree != NULL;
4608 virtual unsigned int execute (function *);
4610 }; // class pass_cleanup_eh
4613 pass_cleanup_eh::execute (function *fun)
4615 int ret = execute_cleanup_eh_1 ();
4617 /* If the function no longer needs an EH personality routine
4618 clear it. This exposes cross-language inlining opportunities
4619 and avoids references to a never defined personality routine. */
4620 if (DECL_FUNCTION_PERSONALITY (current_function_decl)
4621 && function_needs_eh_personality (fun) != eh_personality_lang)
4622 DECL_FUNCTION_PERSONALITY (current_function_decl) = NULL_TREE;
4630 make_pass_cleanup_eh (gcc::context *ctxt)
4632 return new pass_cleanup_eh (ctxt);
4635 /* Verify that BB containing STMT as the last statement, has precisely the
4636 edge that make_eh_edges would create. */
4639 verify_eh_edges (gimple *stmt)
4641 basic_block bb = gimple_bb (stmt);
4642 eh_landing_pad lp = NULL;
4647 lp_nr = lookup_stmt_eh_lp (stmt);
4649 lp = get_eh_landing_pad_from_number (lp_nr);
4652 FOR_EACH_EDGE (e, ei, bb->succs)
4654 if (e->flags & EDGE_EH)
4658 error ("BB %i has multiple EH edges", bb->index);
4670 error ("BB %i can not throw but has an EH edge", bb->index);
4676 if (!stmt_could_throw_p (stmt))
4678 error ("BB %i last statement has incorrectly set lp", bb->index);
4682 if (eh_edge == NULL)
4684 error ("BB %i is missing an EH edge", bb->index);
4688 if (eh_edge->dest != label_to_block (lp->post_landing_pad))
4690 error ("Incorrect EH edge %i->%i", bb->index, eh_edge->dest->index);
4697 /* Similarly, but handle GIMPLE_EH_DISPATCH specifically. */
4700 verify_eh_dispatch_edge (geh_dispatch *stmt)
4704 basic_block src, dst;
4705 bool want_fallthru = true;
4709 r = get_eh_region_from_number (gimple_eh_dispatch_region (stmt));
4710 src = gimple_bb (stmt);
4712 FOR_EACH_EDGE (e, ei, src->succs)
4713 gcc_assert (e->aux == NULL);
4718 for (c = r->u.eh_try.first_catch; c ; c = c->next_catch)
4720 dst = label_to_block (c->label);
4721 e = find_edge (src, dst);
4724 error ("BB %i is missing an edge", src->index);
4729 /* A catch-all handler doesn't have a fallthru. */
4730 if (c->type_list == NULL)
4732 want_fallthru = false;
4738 case ERT_ALLOWED_EXCEPTIONS:
4739 dst = label_to_block (r->u.allowed.label);
4740 e = find_edge (src, dst);
4743 error ("BB %i is missing an edge", src->index);
4754 FOR_EACH_EDGE (e, ei, src->succs)
4756 if (e->flags & EDGE_FALLTHRU)
4758 if (fall_edge != NULL)
4760 error ("BB %i too many fallthru edges", src->index);
4769 error ("BB %i has incorrect edge", src->index);
4773 if ((fall_edge != NULL) ^ want_fallthru)
4775 error ("BB %i has incorrect fallthru edge", src->index);