/* Exception handling semantics and decomposition for trees.
- Copyright (C) 2003, 2004, 2005, 2006, 2007, 2008, 2009
+ Copyright (C) 2003, 2004, 2005, 2006, 2007, 2008, 2009, 2010, 2011
Free Software Foundation, Inc.
This file is part of GCC.
#include "coretypes.h"
#include "tm.h"
#include "tree.h"
-#include "rtl.h"
-#include "tm_p.h"
#include "flags.h"
#include "function.h"
#include "except.h"
+#include "pointer-set.h"
#include "tree-flow.h"
#include "tree-dump.h"
#include "tree-inline.h"
#include "timevar.h"
#include "langhooks.h"
#include "ggc.h"
-#include "toplev.h"
+#include "diagnostic-core.h"
#include "gimple.h"
+#include "target.h"
/* In some instances a tree and a gimple need to be stored in a same table,
i.e. in hash tables. This is a structure to do this. */
/* Misc functions used in this file. */
-/* Compare and hash for any structure which begins with a canonical
- pointer. Assumes all pointers are interchangeable, which is sort
- of already assumed by gcc elsewhere IIRC. */
-
-static int
-struct_ptr_eq (const void *a, const void *b)
-{
- const void * const * x = (const void * const *) a;
- const void * const * y = (const void * const *) b;
- return *x == *y;
-}
-
-static hashval_t
-struct_ptr_hash (const void *a)
-{
- const void * const * x = (const void * const *) a;
- return (size_t)*x >> 4;
-}
-
-
-/* Remember and lookup EH region data for arbitrary statements.
+/* Remember and lookup EH landing pad data for arbitrary statements.
Really this means any statement that could_throw_p. We could
stuff this information into the stmt_ann data structure, but:
compared to those that can. We should be saving some amount
of space by only allocating memory for those that can throw. */
-static void
-record_stmt_eh_region (struct eh_region *region, gimple t)
-{
- if (!region)
- return;
-
- add_stmt_to_eh_region (t, get_eh_region_number (region));
-}
-
-
-/* Add statement T in function IFUN to EH region NUM. */
+/* Add statement T in function IFUN to landing pad NUM. */
void
-add_stmt_to_eh_region_fn (struct function *ifun, gimple t, int num)
+add_stmt_to_eh_lp_fn (struct function *ifun, gimple t, int num)
{
struct throw_stmt_node *n;
void **slot;
- gcc_assert (num >= 0);
- gcc_assert (gimple_code (t) != GIMPLE_RESX);
+ gcc_assert (num != 0);
- n = GGC_NEW (struct throw_stmt_node);
+ n = ggc_alloc_throw_stmt_node ();
n->stmt = t;
- n->region_nr = num;
+ n->lp_nr = num;
if (!get_eh_throw_stmt_table (ifun))
set_eh_throw_stmt_table (ifun, htab_create_ggc (31, struct_ptr_hash,
*slot = n;
}
-
-/* Add statement T in the current function (cfun) to EH region number
- NUM. */
+/* Add statement T in the current function (cfun) to EH landing pad NUM. */
void
-add_stmt_to_eh_region (gimple t, int num)
+add_stmt_to_eh_lp (gimple t, int num)
+{
+ add_stmt_to_eh_lp_fn (cfun, t, num);
+}
+
+/* Add statement T to the single EH landing pad in REGION. */
+
+static void
+record_stmt_eh_region (eh_region region, gimple t)
{
- add_stmt_to_eh_region_fn (cfun, t, num);
+ if (region == NULL)
+ return;
+ if (region->type == ERT_MUST_NOT_THROW)
+ add_stmt_to_eh_lp_fn (cfun, t, -region->index);
+ else
+ {
+ eh_landing_pad lp = region->landing_pads;
+ if (lp == NULL)
+ lp = gen_eh_landing_pad (region);
+ else
+ gcc_assert (lp->next_lp == NULL);
+ add_stmt_to_eh_lp_fn (cfun, t, lp->index);
+ }
}
-/* Remove statement T in function IFUN from the EH region holding it. */
+/* Remove statement T in function IFUN from its EH landing pad. */
bool
-remove_stmt_from_eh_region_fn (struct function *ifun, gimple t)
+remove_stmt_from_eh_lp_fn (struct function *ifun, gimple t)
{
struct throw_stmt_node dummy;
void **slot;
}
-/* Remove statement T in the current function (cfun) from the EH
- region holding it. */
+/* Remove statement T in the current function (cfun) from its
+ EH landing pad. */
bool
-remove_stmt_from_eh_region (gimple t)
+remove_stmt_from_eh_lp (gimple t)
{
- return remove_stmt_from_eh_region_fn (cfun, t);
+ return remove_stmt_from_eh_lp_fn (cfun, t);
}
/* Determine if statement T is inside an EH region in function IFUN.
- Return the EH region number if found, return -2 if IFUN does not
- have an EH table and -1 if T could not be found in IFUN's EH region
- table. */
+ Positive numbers indicate a landing pad index; negative numbers
+ indicate a MUST_NOT_THROW region index; zero indicates that the
+ statement is not recorded in the region table. */
int
-lookup_stmt_eh_region_fn (struct function *ifun, gimple t)
+lookup_stmt_eh_lp_fn (struct function *ifun, gimple t)
{
struct throw_stmt_node *p, n;
- if (!get_eh_throw_stmt_table (ifun))
- return -2;
+ if (ifun->eh->throw_stmt_table == NULL)
+ return 0;
n.stmt = t;
- p = (struct throw_stmt_node *) htab_find (get_eh_throw_stmt_table (ifun), &n);
- return (p ? p->region_nr : -1);
-}
-
-
-/* Determine if statement T is inside an EH region in the current
- function (cfun). Return the EH region number if found, return -2
- if cfun does not have an EH table and -1 if T could not be found in
- cfun's EH region table. */
-
-int
-lookup_stmt_eh_region (gimple t)
-{
- /* We can get called from initialized data when -fnon-call-exceptions
- is on; prevent crash. */
- if (!cfun)
- return -1;
-
- return lookup_stmt_eh_region_fn (cfun, t);
+ p = (struct throw_stmt_node *) htab_find (ifun->eh->throw_stmt_table, &n);
+ return p ? p->lp_nr : 0;
}
-
-/* Determine if expression T is inside an EH region in the current
- function (cfun). Return the EH region number if found, return -2
- if IFUN does not have an EH table and -1 if T could not be found in
- IFUN's EH region table. */
+/* Likewise, but always use the current function. */
int
-lookup_expr_eh_region (tree t)
+lookup_stmt_eh_lp (gimple t)
{
/* We can get called from initialized data when -fnon-call-exceptions
is on; prevent crash. */
if (!cfun)
- return -1;
-
- if (!get_eh_throw_stmt_table (cfun))
- return -2;
-
- if (t && EXPR_P (t))
- {
- tree_ann_common_t ann = tree_common_ann (t);
- if (ann)
- return (int) ann->rn;
- }
-
- return -1;
+ return 0;
+ return lookup_stmt_eh_lp_fn (cfun, t);
}
-
/* First pass of EH node decomposition. Build up a tree of GIMPLE_TRY_FINALLY
nodes and LABEL_DECL nodes. We will use this during the second phase to
determine if a goto leaves the body of a TRY_FINALLY_EXPR node. */
when deciding whether a GOTO to a certain LABEL_DECL (which is a
tree) leaves the TRY block, its necessary to record a tree in
this field. Thus a treemple is used. */
- treemple child;
+ treemple child;
gimple parent;
};
static void
collect_finally_tree (gimple stmt, gimple region);
-/* Go through the gimple sequence. Works with collect_finally_tree to
+/* Go through the gimple sequence. Works with collect_finally_tree to
record all GIMPLE_LABEL and GIMPLE_TRY statements. */
static void
collect_finally_tree_1 (gimple_eh_filter_failure (stmt), region);
break;
+ case GIMPLE_EH_ELSE:
+ collect_finally_tree_1 (gimple_eh_else_n_body (stmt), region);
+ collect_finally_tree_1 (gimple_eh_else_e_body (stmt), region);
+ break;
+
default:
/* A type, a decl, or some kind of statement that we're not
interested in. Don't walk them. */
The eh region creation is straight-forward, but frobbing all the gotos
and such into shape isn't. */
+/* The sequence into which we record all EH stuff. This will be
+ placed at the end of the function when we're all done. */
+static gimple_seq eh_seq;
+
+/* Record whether an EH region contains something that can throw,
+ indexed by EH region number. */
+static bitmap eh_region_may_contain_throw_map;
+
+/* The GOTO_QUEUE is is an array of GIMPLE_GOTO and GIMPLE_RETURN
+ statements that are seen to escape this GIMPLE_TRY_FINALLY node.
+ The idea is to record a gimple statement for everything except for
+ the conditionals, which get their labels recorded. Since labels are
+ of type 'tree', we need this node to store both gimple and tree
+ objects. REPL_STMT is the sequence used to replace the goto/return
+ statement. CONT_STMT is used to store the statement that allows
+ the return/goto to jump to the original destination. */
+
+struct goto_queue_node
+{
+ treemple stmt;
+ gimple_seq repl_stmt;
+ gimple cont_stmt;
+ int index;
+ /* This is used when index >= 0 to indicate that stmt is a label (as
+ opposed to a goto stmt). */
+ int is_label;
+};
+
/* State of the world while lowering. */
struct leh_state
/* What's "current" while constructing the eh region tree. These
correspond to variables of the same name in cfun->eh, which we
don't have easy access to. */
- struct eh_region *cur_region;
- struct eh_region *prev_try;
+ eh_region cur_region;
+
+ /* What's "current" for the purposes of __builtin_eh_pointer. For
+ a CATCH, this is the associated TRY. For an EH_FILTER, this is
+ the associated ALLOWED_EXCEPTIONS, etc. */
+ eh_region ehp_region;
/* Processing of TRY_FINALLY requires a bit more state. This is
split out into a separate structure so that we don't have to
in the collect_finally_tree data structures. */
gimple try_finally_expr;
gimple top_p;
+
/* While lowering a top_p usually it is expanded into multiple statements,
thus we need the following field to store them. */
gimple_seq top_p_seq;
struct leh_state *outer;
/* The exception region created for it. */
- struct eh_region *region;
-
- /* The GOTO_QUEUE is is an array of GIMPLE_GOTO and GIMPLE_RETURN statements
- that are seen to escape this GIMPLE_TRY_FINALLY node.
- The idea is to record a gimple statement for everything except for
- the conditionals, which get their labels recorded. Since labels are of
- type 'tree', we need this node to store both gimple and tree objects.
- REPL_STMT is the sequence used to replace the goto/return statement.
- CONT_STMT is used to store the statement that allows the return/goto to
- jump to the original destination. */
- struct goto_queue_node {
- treemple stmt;
- gimple_seq repl_stmt;
- gimple cont_stmt;
- int index;
- /* this is used when index >= 0 to indicate that stmt is a label(as
- opposed to a goto stmt) */
- int is_label;
- } *goto_queue;
+ eh_region region;
+
+ /* The goto queue. */
+ struct goto_queue_node *goto_queue;
size_t goto_queue_size;
size_t goto_queue_active;
though subsequent transformations may have cleared that flag. */
tree fallthru_label;
- /* A label that has been registered with except.c to be the
- landing pad for this try block. */
- tree eh_label;
-
/* True if it is possible to fall out the bottom of the try block.
Cleared if the fallthru is converted to a goto. */
bool may_fallthru;
bool may_throw;
};
-static gimple_seq lower_eh_filter (struct leh_state *, gimple);
+static gimple_seq lower_eh_must_not_throw (struct leh_state *, gimple);
/* Search for STMT in the goto queue. Return the replacement,
or null if the statement isn't in the queue. */
tree label;
gimple_seq new_seq;
treemple temp;
+ location_t loc = gimple_location (gsi_stmt (*gsi));
temp.tp = tp;
new_seq = find_goto_replacement (tf, temp);
return;
}
- label = create_artificial_label ();
+ label = create_artificial_label (loc);
/* Set the new label for the GIMPLE_COND */
*tp = label;
case GIMPLE_EH_FILTER:
replace_goto_queue_stmt_list (gimple_eh_filter_failure (stmt), tf);
break;
+ case GIMPLE_EH_ELSE:
+ replace_goto_queue_stmt_list (gimple_eh_else_n_body (stmt), tf);
+ replace_goto_queue_stmt_list (gimple_eh_else_e_body (stmt), tf);
+ break;
default:
/* These won't have gotos in them. */
if (tf->goto_queue_active == 0)
return;
replace_goto_queue_stmt_list (tf->top_p_seq, tf);
+ replace_goto_queue_stmt_list (eh_seq, tf);
}
/* Add a new record to the goto queue contained in TF. NEW_STMT is the
labels. */
new_stmt = stmt;
record_in_goto_queue (tf, new_stmt, index, true);
-
}
/* For any GIMPLE_GOTO or GIMPLE_RETURN, decide whether it leaves a try_finally
#define verify_norecord_switch_expr(state, switch_expr)
#endif
-/* Redirect a RETURN_EXPR pointed to by STMT_P to FINLAB. Place in CONT_P
- whatever is needed to finish the return. If MOD is non-null, insert it
- before the new branch. RETURN_VALUE_P is a cache containing a temporary
- variable to be used in manipulating the value returned from the function. */
+/* Redirect a RETURN_EXPR pointed to by Q to FINLAB. If MOD is
+ non-null, insert it before the new branch. */
static void
-do_return_redirection (struct goto_queue_node *q, tree finlab, gimple_seq mod,
- tree *return_value_p)
+do_return_redirection (struct goto_queue_node *q, tree finlab, gimple_seq mod)
{
- tree ret_expr;
gimple x;
- /* In the case of a return, the queue node must be a gimple statement. */
+ /* In the case of a return, the queue node must be a gimple statement. */
gcc_assert (!q->is_label);
- ret_expr = gimple_return_retval (q->stmt.g);
-
- if (ret_expr)
- {
- if (!*return_value_p)
- *return_value_p = ret_expr;
- else
- gcc_assert (*return_value_p == ret_expr);
- q->cont_stmt = q->stmt.g;
- /* The nasty part about redirecting the return value is that the
- return value itself is to be computed before the FINALLY block
- is executed. e.g.
-
- int x;
- int foo (void)
- {
- x = 0;
- try {
- return x;
- } finally {
- x++;
- }
- }
-
- should return 0, not 1. Arrange for this to happen by copying
- computed the return value into a local temporary. This also
- allows us to redirect multiple return statements through the
- same destination block; whether this is a net win or not really
- depends, I guess, but it does make generation of the switch in
- lower_try_finally_switch easier. */
+ /* Note that the return value may have already been computed, e.g.,
- if (TREE_CODE (ret_expr) == RESULT_DECL)
+ int x;
+ int foo (void)
{
- if (!*return_value_p)
- *return_value_p = ret_expr;
- else
- gcc_assert (*return_value_p == ret_expr);
- q->cont_stmt = q->stmt.g;
+ x = 0;
+ try {
+ return x;
+ } finally {
+ x++;
+ }
}
- else
- gcc_unreachable ();
- }
- else
- /* If we don't return a value, all return statements are the same. */
- q->cont_stmt = q->stmt.g;
+
+ should return 0, not 1. We don't have to do anything to make
+ this happens because the return value has been placed in the
+ RESULT_DECL already. */
+
+ q->cont_stmt = q->stmt.g;
if (!q->repl_stmt)
q->repl_stmt = gimple_seq_alloc ();
if (!q->repl_stmt)
q->repl_stmt = gimple_seq_alloc ();
- q->cont_stmt = gimple_build_goto (VEC_index (tree, tf->dest_array,q->index));
+ q->cont_stmt = gimple_build_goto (VEC_index (tree, tf->dest_array, q->index));
if (mod)
gimple_seq_add_seq (&q->repl_stmt, mod);
gimple_seq_add_stmt (&q->repl_stmt, x);
}
+/* Emit a standard landing pad sequence into SEQ for REGION. */
+
+static void
+emit_post_landing_pad (gimple_seq *seq, eh_region region)
+{
+ eh_landing_pad lp = region->landing_pads;
+ gimple x;
+
+ if (lp == NULL)
+ lp = gen_eh_landing_pad (region);
+
+ lp->post_landing_pad = create_artificial_label (UNKNOWN_LOCATION);
+ EH_LANDING_PAD_NR (lp->post_landing_pad) = lp->index;
+
+ x = gimple_build_label (lp->post_landing_pad);
+ gimple_seq_add_stmt (seq, x);
+}
+
+/* Emit a RESX statement into SEQ for REGION. */
+
+static void
+emit_resx (gimple_seq *seq, eh_region region)
+{
+ gimple x = gimple_build_resx (region->index);
+ gimple_seq_add_stmt (seq, x);
+ if (region->outer)
+ record_stmt_eh_region (region->outer, x);
+}
+
+/* Emit an EH_DISPATCH statement into SEQ for REGION. */
+
+static void
+emit_eh_dispatch (gimple_seq *seq, eh_region region)
+{
+ gimple x = gimple_build_eh_dispatch (region->index);
+ gimple_seq_add_stmt (seq, x);
+}
+
+/* Note that the current EH region may contain a throw, or a
+ call to a function which itself may contain a throw. */
+
+static void
+note_eh_region_may_contain_throw (eh_region region)
+{
+ while (bitmap_set_bit (eh_region_may_contain_throw_map, region->index))
+ {
+ if (region->type == ERT_MUST_NOT_THROW)
+ break;
+ region = region->outer;
+ if (region == NULL)
+ break;
+ }
+}
+
+/* Check if REGION has been marked as containing a throw. If REGION is
+ NULL, this predicate is false. */
+
+static inline bool
+eh_region_may_contain_throw (eh_region r)
+{
+ return r && bitmap_bit_p (eh_region_may_contain_throw_map, r->index);
+}
+
/* We want to transform
try { body; } catch { stuff; }
to
- body; goto over; lab: stuff; over:
-
- TP is a GIMPLE_TRY node. LAB is the label that
+ normal_seqence:
+ body;
+ over:
+ eh_seqence:
+ landing_pad:
+ stuff;
+ goto over;
+
+ TP is a GIMPLE_TRY node. REGION is the region whose post_landing_pad
should be placed before the second operand, or NULL. OVER is
an existing label that should be put at the exit, or NULL. */
static gimple_seq
-frob_into_branch_around (gimple tp, tree lab, tree over)
+frob_into_branch_around (gimple tp, eh_region region, tree over)
{
gimple x;
gimple_seq cleanup, result;
+ location_t loc = gimple_location (tp);
cleanup = gimple_try_cleanup (tp);
result = gimple_try_eval (tp);
- if (gimple_seq_may_fallthru (result))
+ if (region)
+ emit_post_landing_pad (&eh_seq, region);
+
+ if (gimple_seq_may_fallthru (cleanup))
{
if (!over)
- over = create_artificial_label ();
+ over = create_artificial_label (loc);
x = gimple_build_goto (over);
- gimple_seq_add_stmt (&result, x);
+ gimple_seq_add_stmt (&cleanup, x);
}
-
- if (lab)
- {
- x = gimple_build_label (lab);
- gimple_seq_add_stmt (&result, x);
- }
-
- gimple_seq_add_seq (&result, cleanup);
+ gimple_seq_add_seq (&eh_seq, cleanup);
if (over)
{
if (!label)
{
- label = create_artificial_label ();
+ label = create_artificial_label (gimple_location (tf->try_finally_expr));
tf->fallthru_label = label;
if (tf->outer->tf)
{
return label;
}
-/* A subroutine of lower_try_finally. If lang_protect_cleanup_actions
- returns non-null, then the language requires that the exception path out
- of a try_finally be treated specially. To wit: the code within the
- finally block may not itself throw an exception. We have two choices here.
- First we can duplicate the finally block and wrap it in a must_not_throw
- region. Second, we can generate code like
+/* A subroutine of lower_try_finally. If FINALLY consits of a
+ GIMPLE_EH_ELSE node, return it. */
+
+static inline gimple
+get_eh_else (gimple_seq finally)
+{
+ gimple x = gimple_seq_first_stmt (finally);
+ if (gimple_code (x) == GIMPLE_EH_ELSE)
+ {
+ gcc_assert (gimple_seq_singleton_p (finally));
+ return x;
+ }
+ return NULL;
+}
+
+/* A subroutine of lower_try_finally. If the eh_protect_cleanup_actions
+ langhook returns non-null, then the language requires that the exception
+ path out of a try_finally be treated specially. To wit: the code within
+ the finally block may not itself throw an exception. We have two choices
+ here. First we can duplicate the finally block and wrap it in a
+ must_not_throw region. Second, we can generate code like
try {
finally_block;
struct leh_state *this_state,
struct leh_tf_state *tf)
{
- gimple protect_cleanup_actions;
+ tree protect_cleanup_actions;
gimple_stmt_iterator gsi;
bool finally_may_fallthru;
gimple_seq finally;
- gimple x;
+ gimple x, eh_else;
/* First check for nothing to do. */
- if (lang_protect_cleanup_actions)
- protect_cleanup_actions = lang_protect_cleanup_actions ();
- else
- protect_cleanup_actions = NULL;
+ if (lang_hooks.eh_protect_cleanup_actions == NULL)
+ return;
+ protect_cleanup_actions = lang_hooks.eh_protect_cleanup_actions ();
+ if (protect_cleanup_actions == NULL)
+ return;
finally = gimple_try_cleanup (tf->top_p);
-
- /* If the EH case of the finally block can fall through, this may be a
- structure of the form
- try {
- try {
- throw ...;
- } cleanup {
- try {
- throw ...;
- } catch (...) {
- }
- }
- } catch (...) {
- yyy;
- }
- E.g. with an inline destructor with an embedded try block. In this
- case we must save the runtime EH data around the nested exception.
-
- This complication means that any time the previous runtime data might
- be used (via fallthru from the finally) we handle the eh case here,
- whether or not protect_cleanup_actions is active. */
-
- finally_may_fallthru = gimple_seq_may_fallthru (finally);
- if (!finally_may_fallthru && !protect_cleanup_actions)
- return;
+ eh_else = get_eh_else (finally);
/* Duplicate the FINALLY block. Only need to do this for try-finally,
- and not for cleanups. */
- if (this_state)
+ and not for cleanups. If we've got an EH_ELSE, extract it now. */
+ if (eh_else)
+ {
+ finally = gimple_eh_else_e_body (eh_else);
+ gimple_try_set_cleanup (tf->top_p, gimple_eh_else_n_body (eh_else));
+ }
+ else if (this_state)
finally = lower_try_finally_dup_block (finally, outer_state);
+ finally_may_fallthru = gimple_seq_may_fallthru (finally);
/* If this cleanup consists of a TRY_CATCH_EXPR with TRY_CATCH_IS_CLEANUP
set, the handler of the TRY_CATCH_EXPR is another cleanup which ought
MUST_NOT_THROW filter. */
gsi = gsi_start (finally);
x = gsi_stmt (gsi);
- if (protect_cleanup_actions
- && gimple_code (x) == GIMPLE_TRY
+ if (gimple_code (x) == GIMPLE_TRY
&& gimple_try_kind (x) == GIMPLE_TRY_CATCH
&& gimple_try_catch_is_cleanup (x))
{
gsi_remove (&gsi, false);
}
- /* Resume execution after the exception. Adding this now lets
- lower_eh_filter not add unnecessary gotos, as it is clear that
- we never fallthru from this copy of the finally block. */
- if (finally_may_fallthru)
- {
- tree save_eptr, save_filt;
- tree tmp;
-
- save_eptr = create_tmp_var (ptr_type_node, "save_eptr");
- save_filt = create_tmp_var (integer_type_node, "save_filt");
-
- gsi = gsi_start (finally);
- tmp = build0 (EXC_PTR_EXPR, ptr_type_node);
- x = gimple_build_assign (save_eptr, tmp);
- gsi_insert_before (&gsi, x, GSI_CONTINUE_LINKING);
-
- tmp = build0 (FILTER_EXPR, integer_type_node);
- x = gimple_build_assign (save_filt, tmp);
- gsi_insert_before (&gsi, x, GSI_CONTINUE_LINKING);
-
- gsi = gsi_last (finally);
- tmp = build0 (EXC_PTR_EXPR, ptr_type_node);
- x = gimple_build_assign (tmp, save_eptr);
- gsi_insert_after (&gsi, x, GSI_CONTINUE_LINKING);
-
- tmp = build0 (FILTER_EXPR, integer_type_node);
- x = gimple_build_assign (tmp, save_filt);
- gsi_insert_after (&gsi, x, GSI_CONTINUE_LINKING);
-
- x = gimple_build_resx (get_eh_region_number (tf->region));
- gsi_insert_after (&gsi, x, GSI_CONTINUE_LINKING);
- }
-
/* Wrap the block with protect_cleanup_actions as the action. */
- if (protect_cleanup_actions)
- {
- gimple_seq seq = NULL, failure = NULL;
-
- gimple_seq_add_stmt (&failure, protect_cleanup_actions);
- x = gimple_build_eh_filter (NULL, failure);
- gimple_eh_filter_set_must_not_throw (x, 1);
-
- gimple_seq_add_stmt (&seq, x);
- x = gimple_build_try (finally, seq, GIMPLE_TRY_CATCH);
- finally = lower_eh_filter (outer_state, x);
- }
- else
- lower_eh_constructs_1 (outer_state, finally);
-
- /* Hook this up to the end of the existing try block. If we
- previously fell through the end, we'll have to branch around.
- This means adding a new goto, and adding it to the queue. */
-
- gsi = gsi_last (gimple_try_eval (tf->top_p));
-
- if (tf->may_fallthru)
- {
- tree tmp;
- tmp = lower_try_finally_fallthru_label (tf);
- x = gimple_build_goto (tmp);
- gsi_insert_after (&gsi, x, GSI_CONTINUE_LINKING);
-
- if (this_state)
- maybe_record_in_goto_queue (this_state, x);
-
- tf->may_fallthru = false;
- }
-
- x = gimple_build_label (tf->eh_label);
- gsi_insert_after (&gsi, x, GSI_CONTINUE_LINKING);
- gsi_insert_seq_after (&gsi, finally, GSI_CONTINUE_LINKING);
+ x = gimple_build_eh_must_not_throw (protect_cleanup_actions);
+ x = gimple_build_try (finally, gimple_seq_alloc_with_stmt (x),
+ GIMPLE_TRY_CATCH);
+ finally = lower_eh_must_not_throw (outer_state, x);
+
+ /* Drop all of this into the exception sequence. */
+ emit_post_landing_pad (&eh_seq, tf->region);
+ gimple_seq_add_seq (&eh_seq, finally);
+ if (finally_may_fallthru)
+ emit_resx (&eh_seq, tf->region);
/* Having now been handled, EH isn't to be considered with
the rest of the outgoing edges. */
lower_try_finally_nofallthru (struct leh_state *state,
struct leh_tf_state *tf)
{
- tree lab, return_val;
- gimple x;
+ tree lab;
+ gimple x, eh_else;
gimple_seq finally;
struct goto_queue_node *q, *qe;
- if (tf->may_throw)
- lab = tf->eh_label;
- else
- lab = create_artificial_label ();
+ lab = create_artificial_label (gimple_location (tf->try_finally_expr));
/* We expect that tf->top_p is a GIMPLE_TRY. */
finally = gimple_try_cleanup (tf->top_p);
x = gimple_build_label (lab);
gimple_seq_add_stmt (&tf->top_p_seq, x);
- return_val = NULL;
q = tf->goto_queue;
qe = q + tf->goto_queue_active;
for (; q < qe; ++q)
if (q->index < 0)
- do_return_redirection (q, lab, NULL, &return_val);
+ do_return_redirection (q, lab, NULL);
else
do_goto_redirection (q, lab, NULL, tf);
replace_goto_queue (tf);
- lower_eh_constructs_1 (state, finally);
- gimple_seq_add_seq (&tf->top_p_seq, finally);
+ /* Emit the finally block into the stream. Lower EH_ELSE at this time. */
+ eh_else = get_eh_else (finally);
+ if (eh_else)
+ {
+ finally = gimple_eh_else_n_body (eh_else);
+ lower_eh_constructs_1 (state, finally);
+ gimple_seq_add_seq (&tf->top_p_seq, finally);
+
+ if (tf->may_throw)
+ {
+ finally = gimple_eh_else_e_body (eh_else);
+ lower_eh_constructs_1 (state, finally);
+
+ emit_post_landing_pad (&eh_seq, tf->region);
+ gimple_seq_add_seq (&eh_seq, finally);
+ }
+ }
+ else
+ {
+ lower_eh_constructs_1 (state, finally);
+ gimple_seq_add_seq (&tf->top_p_seq, finally);
+
+ if (tf->may_throw)
+ {
+ emit_post_landing_pad (&eh_seq, tf->region);
+
+ x = gimple_build_goto (lab);
+ gimple_seq_add_stmt (&eh_seq, x);
+ }
+ }
}
/* A subroutine of lower_try_finally. We have determined that there is
gimple x;
gimple_seq finally;
tree finally_label;
+ location_t loc = gimple_location (tf->try_finally_expr);
finally = gimple_try_cleanup (tf->top_p);
tf->top_p_seq = gimple_try_eval (tf->top_p);
+ /* Since there's only one destination, and the destination edge can only
+ either be EH or non-EH, that implies that all of our incoming edges
+ are of the same type. Therefore we can lower EH_ELSE immediately. */
+ x = get_eh_else (finally);
+ if (x)
+ {
+ if (tf->may_throw)
+ finally = gimple_eh_else_e_body (x);
+ else
+ finally = gimple_eh_else_n_body (x);
+ }
+
lower_eh_constructs_1 (state, finally);
if (tf->may_throw)
{
/* Only reachable via the exception edge. Add the given label to
the head of the FINALLY block. Append a RESX at the end. */
-
- x = gimple_build_label (tf->eh_label);
- gimple_seq_add_stmt (&tf->top_p_seq, x);
-
- gimple_seq_add_seq (&tf->top_p_seq, finally);
-
- x = gimple_build_resx (get_eh_region_number (tf->region));
-
- gimple_seq_add_stmt (&tf->top_p_seq, x);
-
+ emit_post_landing_pad (&eh_seq, tf->region);
+ gimple_seq_add_seq (&eh_seq, finally);
+ emit_resx (&eh_seq, tf->region);
return;
}
return;
}
- finally_label = create_artificial_label ();
+ finally_label = create_artificial_label (loc);
x = gimple_build_label (finally_label);
gimple_seq_add_stmt (&tf->top_p_seq, x);
if (tf->may_return)
{
/* Reachable by return expressions only. Redirect them. */
- tree return_val = NULL;
for (; q < qe; ++q)
- do_return_redirection (q, finally_label, NULL, &return_val);
+ do_return_redirection (q, finally_label, NULL);
replace_goto_queue (tf);
}
else
gimple_seq finally;
gimple_seq new_stmt;
gimple_seq seq;
- gimple x;
+ gimple x, eh_else;
tree tmp;
+ location_t tf_loc = gimple_location (tf->try_finally_expr);
finally = gimple_try_cleanup (tf->top_p);
+
+ /* Notice EH_ELSE, and simplify some of the remaining code
+ by considering FINALLY to be the normal return path only. */
+ eh_else = get_eh_else (finally);
+ if (eh_else)
+ finally = gimple_eh_else_n_body (eh_else);
+
tf->top_p_seq = gimple_try_eval (tf->top_p);
new_stmt = NULL;
if (tf->may_throw)
{
- x = gimple_build_label (tf->eh_label);
- gimple_seq_add_stmt (&new_stmt, x);
-
- seq = lower_try_finally_dup_block (finally, state);
+ /* We don't need to copy the EH path of EH_ELSE,
+ since it is only emitted once. */
+ if (eh_else)
+ seq = gimple_eh_else_e_body (eh_else);
+ else
+ seq = lower_try_finally_dup_block (finally, state);
lower_eh_constructs_1 (state, seq);
- gimple_seq_add_seq (&new_stmt, seq);
- x = gimple_build_resx (get_eh_region_number (tf->region));
- gimple_seq_add_stmt (&new_stmt, x);
+ emit_post_landing_pad (&eh_seq, tf->region);
+ gimple_seq_add_seq (&eh_seq, seq);
+ emit_resx (&eh_seq, tf->region);
}
if (tf->goto_queue)
{
struct goto_queue_node *q, *qe;
- tree return_val = NULL;
int return_index, index;
struct labels_s
{
if (! q)
continue;
- lab = labels[index].label = create_artificial_label ();
+ lab = labels[index].label
+ = create_artificial_label (tf_loc);
if (index == return_index)
- do_return_redirection (q, lab, NULL, &return_val);
+ do_return_redirection (q, lab, NULL);
else
do_goto_redirection (q, lab, NULL, tf);
lab = labels[index].label;
if (index == return_index)
- do_return_redirection (q, lab, NULL, &return_val);
+ do_return_redirection (q, lab, NULL);
else
do_goto_redirection (q, lab, NULL, tf);
}
-
+
replace_goto_queue (tf);
free (labels);
}
lower_try_finally_switch (struct leh_state *state, struct leh_tf_state *tf)
{
struct goto_queue_node *q, *qe;
- tree return_val = NULL;
tree finally_tmp, finally_label;
int return_index, eh_index, fallthru_index;
int nlabels, ndests, j, last_case_index;
tree last_case;
VEC (tree,heap) *case_label_vec;
gimple_seq switch_body;
- gimple x;
+ gimple x, eh_else;
tree tmp;
gimple switch_stmt;
gimple_seq finally;
struct pointer_map_t *cont_map = NULL;
+ /* The location of the TRY_FINALLY stmt. */
+ location_t tf_loc = gimple_location (tf->try_finally_expr);
+ /* The location of the finally block. */
+ location_t finally_loc;
switch_body = gimple_seq_alloc ();
+ finally = gimple_try_cleanup (tf->top_p);
+ eh_else = get_eh_else (finally);
/* Mash the TRY block to the head of the chain. */
- finally = gimple_try_cleanup (tf->top_p);
tf->top_p_seq = gimple_try_eval (tf->top_p);
+ /* The location of the finally is either the last stmt in the finally
+ block or the location of the TRY_FINALLY itself. */
+ finally_loc = gimple_seq_last_stmt (tf->top_p_seq) != NULL ?
+ gimple_location (gimple_seq_last_stmt (tf->top_p_seq))
+ : tf_loc;
+
/* Lower the finally block itself. */
lower_eh_constructs_1 (state, finally);
nlabels = VEC_length (tree, tf->dest_array);
return_index = nlabels;
eh_index = return_index + tf->may_return;
- fallthru_index = eh_index + tf->may_throw;
+ fallthru_index = eh_index + (tf->may_throw && !eh_else);
ndests = fallthru_index + tf->may_fallthru;
finally_tmp = create_tmp_var (integer_type_node, "finally_tmp");
- finally_label = create_artificial_label ();
+ finally_label = create_artificial_label (finally_loc);
/* We use VEC_quick_push on case_label_vec throughout this function,
since we know the size in advance and allocate precisely as muce
if (tf->may_fallthru)
{
- x = gimple_build_assign (finally_tmp, build_int_cst (integer_type_node,
- fallthru_index));
+ x = gimple_build_assign (finally_tmp,
+ build_int_cst (integer_type_node,
+ fallthru_index));
gimple_seq_add_stmt (&tf->top_p_seq, x);
- if (tf->may_throw)
- {
- x = gimple_build_goto (finally_label);
- gimple_seq_add_stmt (&tf->top_p_seq, x);
- }
-
-
- last_case = build3 (CASE_LABEL_EXPR, void_type_node,
- build_int_cst (NULL_TREE, fallthru_index), NULL,
- create_artificial_label ());
+ tmp = build_int_cst (integer_type_node, fallthru_index);
+ last_case = build_case_label (tmp, NULL,
+ create_artificial_label (tf_loc));
VEC_quick_push (tree, case_label_vec, last_case);
last_case_index++;
gimple_seq_add_stmt (&switch_body, x);
}
- if (tf->may_throw)
+ /* For EH_ELSE, emit the exception path (plus resx) now, then
+ subsequently we only need consider the normal path. */
+ if (eh_else)
{
- x = gimple_build_label (tf->eh_label);
- gimple_seq_add_stmt (&tf->top_p_seq, x);
+ if (tf->may_throw)
+ {
+ finally = gimple_eh_else_e_body (eh_else);
+ lower_eh_constructs_1 (state, finally);
- x = gimple_build_assign (finally_tmp, build_int_cst (integer_type_node,
- eh_index));
- gimple_seq_add_stmt (&tf->top_p_seq, x);
+ emit_post_landing_pad (&eh_seq, tf->region);
+ gimple_seq_add_seq (&eh_seq, finally);
+ emit_resx (&eh_seq, tf->region);
+ }
+
+ finally = gimple_eh_else_n_body (eh_else);
+ }
+ else if (tf->may_throw)
+ {
+ emit_post_landing_pad (&eh_seq, tf->region);
- last_case = build3 (CASE_LABEL_EXPR, void_type_node,
- build_int_cst (NULL_TREE, eh_index), NULL,
- create_artificial_label ());
+ x = gimple_build_assign (finally_tmp,
+ build_int_cst (integer_type_node, eh_index));
+ gimple_seq_add_stmt (&eh_seq, x);
+
+ x = gimple_build_goto (finally_label);
+ gimple_seq_add_stmt (&eh_seq, x);
+
+ tmp = build_int_cst (integer_type_node, eh_index);
+ last_case = build_case_label (tmp, NULL,
+ create_artificial_label (tf_loc));
VEC_quick_push (tree, case_label_vec, last_case);
last_case_index++;
x = gimple_build_label (CASE_LABEL (last_case));
- gimple_seq_add_stmt (&switch_body, x);
- x = gimple_build_resx (get_eh_region_number (tf->region));
- gimple_seq_add_stmt (&switch_body, x);
+ gimple_seq_add_stmt (&eh_seq, x);
+ emit_resx (&eh_seq, tf->region);
}
x = gimple_build_label (finally_label);
build_int_cst (integer_type_node,
return_index));
gimple_seq_add_stmt (&mod, x);
- do_return_redirection (q, finally_label, mod, &return_val);
+ do_return_redirection (q, finally_label, mod);
switch_id = return_index;
}
else
{
tree case_lab;
void **slot;
- case_lab = build3 (CASE_LABEL_EXPR, void_type_node,
- build_int_cst (NULL_TREE, switch_id), NULL,
- NULL);
+ tmp = build_int_cst (integer_type_node, switch_id);
+ case_lab = build_case_label (tmp, NULL,
+ create_artificial_label (tf_loc));
/* We store the cont_stmt in the pointer map, so that we can recover
- it in the loop below. We don't create the new label while
- walking the goto_queue because pointers don't offer a stable
- order. */
+ it in the loop below. */
if (!cont_map)
cont_map = pointer_map_create ();
slot = pointer_map_insert (cont_map, case_lab);
}
for (j = last_case_index; j < last_case_index + nlabels; j++)
{
- tree label;
gimple cont_stmt;
void **slot;
gcc_assert (cont_map);
slot = pointer_map_contains (cont_map, last_case);
- /* As the comment above suggests, CASE_LABEL (last_case) was just a
- placeholder, it does not store an actual label, yet. */
gcc_assert (slot);
cont_stmt = *(gimple *) slot;
- label = create_artificial_label ();
- CASE_LABEL (last_case) = label;
-
- x = gimple_build_label (label);
+ x = gimple_build_label (CASE_LABEL (last_case));
gimple_seq_add_stmt (&switch_body, x);
gimple_seq_add_stmt (&switch_body, cont_stmt);
maybe_record_in_goto_queue (state, cont_stmt);
label. */
switch_stmt = gimple_build_switch_vec (finally_tmp, last_case,
case_label_vec);
+ gimple_set_location (switch_stmt, finally_loc);
/* Need to link SWITCH_STMT after running replace_goto_queue
due to not wanting to process the same goto stmts twice. */
the estimate of the size of the switch machinery we'd have to add. */
static bool
-decide_copy_try_finally (int ndests, gimple_seq finally)
+decide_copy_try_finally (int ndests, bool may_throw, gimple_seq finally)
{
int f_estimate, sw_estimate;
+ gimple eh_else;
+
+ /* If there's an EH_ELSE involved, the exception path is separate
+ and really doesn't come into play for this computation. */
+ eh_else = get_eh_else (finally);
+ if (eh_else)
+ {
+ ndests -= may_throw;
+ finally = gimple_eh_else_n_body (eh_else);
+ }
if (!optimize)
- return false;
+ {
+ gimple_stmt_iterator gsi;
+
+ if (ndests == 1)
+ return true;
+
+ for (gsi = gsi_start (finally); !gsi_end_p (gsi); gsi_next (&gsi))
+ {
+ gimple stmt = gsi_stmt (gsi);
+ if (!is_gimple_debug (stmt) && !gimple_clobber_p (stmt))
+ return false;
+ }
+ return true;
+ }
/* Finally estimate N times, plus N gotos. */
f_estimate = count_insns_seq (finally, &eni_size_weights);
return f_estimate < 40 || f_estimate * 2 < sw_estimate * 3;
}
+/* REG is the enclosing region for a possible cleanup region, or the region
+ itself. Returns TRUE if such a region would be unreachable.
+
+ Cleanup regions within a must-not-throw region aren't actually reachable
+ even if there are throwing stmts within them, because the personality
+ routine will call terminate before unwinding. */
+
+static bool
+cleanup_is_dead_in (eh_region reg)
+{
+ while (reg && reg->type == ERT_CLEANUP)
+ reg = reg->outer;
+ return (reg && reg->type == ERT_MUST_NOT_THROW);
+}
/* A subroutine of lower_eh_constructs_1. Lower a GIMPLE_TRY_FINALLY nodes
to a sequence of labels and blocks, plus the exception region trees
struct leh_tf_state this_tf;
struct leh_state this_state;
int ndests;
+ gimple_seq old_eh_seq;
/* Process the try block. */
this_tf.try_finally_expr = tp;
this_tf.top_p = tp;
this_tf.outer = state;
- if (using_eh_for_cleanups_p)
- this_tf.region
- = gen_eh_region_cleanup (state->cur_region, state->prev_try);
- else
- this_tf.region = NULL;
+ if (using_eh_for_cleanups_p && !cleanup_is_dead_in (state->cur_region))
+ {
+ this_tf.region = gen_eh_region_cleanup (state->cur_region);
+ this_state.cur_region = this_tf.region;
+ }
+ else
+ {
+ this_tf.region = NULL;
+ this_state.cur_region = state->cur_region;
+ }
- this_state.cur_region = this_tf.region;
- this_state.prev_try = state->prev_try;
+ this_state.ehp_region = state->ehp_region;
this_state.tf = &this_tf;
+ old_eh_seq = eh_seq;
+ eh_seq = NULL;
+
lower_eh_constructs_1 (&this_state, gimple_try_eval(tp));
/* Determine if the try block is escaped through the bottom. */
this_tf.may_fallthru = gimple_seq_may_fallthru (gimple_try_eval (tp));
/* Determine if any exceptions are possible within the try block. */
- if (using_eh_for_cleanups_p)
- this_tf.may_throw = get_eh_region_may_contain_throw (this_tf.region);
+ if (this_tf.region)
+ this_tf.may_throw = eh_region_may_contain_throw (this_tf.region);
if (this_tf.may_throw)
- {
- this_tf.eh_label = create_artificial_label ();
- set_eh_region_tree_label (this_tf.region, this_tf.eh_label);
- honor_protect_cleanup_actions (state, &this_state, &this_tf);
- }
+ honor_protect_cleanup_actions (state, &this_state, &this_tf);
/* Determine how many edges (still) reach the finally block. Or rather,
how many destinations are reached by the finally block. Use this to
/* We can easily special-case redirection to a single destination. */
else if (ndests == 1)
lower_try_finally_onedest (state, &this_tf);
- else if (decide_copy_try_finally (ndests, gimple_try_cleanup (tp)))
+ else if (decide_copy_try_finally (ndests, this_tf.may_throw,
+ gimple_try_cleanup (tp)))
lower_try_finally_copy (state, &this_tf);
else
lower_try_finally_switch (state, &this_tf);
}
VEC_free (tree, heap, this_tf.dest_array);
- if (this_tf.goto_queue)
- free (this_tf.goto_queue);
+ free (this_tf.goto_queue);
if (this_tf.goto_queue_map)
pointer_map_destroy (this_tf.goto_queue_map);
+ /* If there was an old (aka outer) eh_seq, append the current eh_seq.
+ If there was no old eh_seq, then the append is trivially already done. */
+ if (old_eh_seq)
+ {
+ if (eh_seq == NULL)
+ eh_seq = old_eh_seq;
+ else
+ {
+ gimple_seq new_eh_seq = eh_seq;
+ eh_seq = old_eh_seq;
+ gimple_seq_add_seq(&eh_seq, new_eh_seq);
+ }
+ }
+
return this_tf.top_p_seq;
}
static gimple_seq
lower_catch (struct leh_state *state, gimple tp)
{
- struct eh_region *try_region;
- struct leh_state this_state;
+ eh_region try_region = NULL;
+ struct leh_state this_state = *state;
gimple_stmt_iterator gsi;
tree out_label;
+ gimple_seq new_seq;
+ gimple x;
+ location_t try_catch_loc = gimple_location (tp);
- try_region = gen_eh_region_try (state->cur_region);
- this_state.cur_region = try_region;
- this_state.prev_try = try_region;
- this_state.tf = state->tf;
+ if (flag_exceptions)
+ {
+ try_region = gen_eh_region_try (state->cur_region);
+ this_state.cur_region = try_region;
+ }
lower_eh_constructs_1 (&this_state, gimple_try_eval (tp));
- if (!get_eh_region_may_contain_throw (try_region))
- {
- return gimple_try_eval (tp);
- }
+ if (!eh_region_may_contain_throw (try_region))
+ return gimple_try_eval (tp);
+
+ new_seq = NULL;
+ emit_eh_dispatch (&new_seq, try_region);
+ emit_resx (&new_seq, try_region);
+
+ this_state.cur_region = state->cur_region;
+ this_state.ehp_region = try_region;
out_label = NULL;
- for (gsi = gsi_start (gimple_try_cleanup (tp)); !gsi_end_p (gsi); )
+ for (gsi = gsi_start (gimple_try_cleanup (tp));
+ !gsi_end_p (gsi);
+ gsi_next (&gsi))
{
- struct eh_region *catch_region;
- tree eh_label;
- gimple x, gcatch;
+ eh_catch c;
+ gimple gcatch;
+ gimple_seq handler;
gcatch = gsi_stmt (gsi);
- catch_region = gen_eh_region_catch (try_region,
- gimple_catch_types (gcatch));
+ c = gen_eh_region_catch (try_region, gimple_catch_types (gcatch));
- this_state.cur_region = catch_region;
- this_state.prev_try = state->prev_try;
- lower_eh_constructs_1 (&this_state, gimple_catch_handler (gcatch));
+ handler = gimple_catch_handler (gcatch);
+ lower_eh_constructs_1 (&this_state, handler);
- eh_label = create_artificial_label ();
- set_eh_region_tree_label (catch_region, eh_label);
+ c->label = create_artificial_label (UNKNOWN_LOCATION);
+ x = gimple_build_label (c->label);
+ gimple_seq_add_stmt (&new_seq, x);
- x = gimple_build_label (eh_label);
- gsi_insert_before (&gsi, x, GSI_SAME_STMT);
+ gimple_seq_add_seq (&new_seq, handler);
- if (gimple_seq_may_fallthru (gimple_catch_handler (gcatch)))
+ if (gimple_seq_may_fallthru (new_seq))
{
if (!out_label)
- out_label = create_artificial_label ();
+ out_label = create_artificial_label (try_catch_loc);
x = gimple_build_goto (out_label);
- gimple_seq_add_stmt (gimple_catch_handler_ptr (gcatch), x);
+ gimple_seq_add_stmt (&new_seq, x);
}
-
- gsi_insert_seq_before (&gsi, gimple_catch_handler (gcatch),
- GSI_SAME_STMT);
- gsi_remove (&gsi, false);
+ if (!c->type_list)
+ break;
}
- return frob_into_branch_around (tp, NULL, out_label);
+ gimple_try_set_cleanup (tp, new_seq);
+
+ return frob_into_branch_around (tp, try_region, out_label);
}
/* A subroutine of lower_eh_constructs_1. Lower a GIMPLE_TRY with a
static gimple_seq
lower_eh_filter (struct leh_state *state, gimple tp)
{
- struct leh_state this_state;
- struct eh_region *this_region;
- gimple inner;
- tree eh_label;
+ struct leh_state this_state = *state;
+ eh_region this_region = NULL;
+ gimple inner, x;
+ gimple_seq new_seq;
inner = gimple_seq_first_stmt (gimple_try_cleanup (tp));
- if (gimple_eh_filter_must_not_throw (inner))
- this_region = gen_eh_region_must_not_throw (state->cur_region);
- else
- this_region = gen_eh_region_allowed (state->cur_region,
- gimple_eh_filter_types (inner));
- this_state = *state;
- this_state.cur_region = this_region;
- /* For must not throw regions any cleanup regions inside it
- can't reach outer catch regions. */
- if (gimple_eh_filter_must_not_throw (inner))
- this_state.prev_try = NULL;
+ if (flag_exceptions)
+ {
+ this_region = gen_eh_region_allowed (state->cur_region,
+ gimple_eh_filter_types (inner));
+ this_state.cur_region = this_region;
+ }
lower_eh_constructs_1 (&this_state, gimple_try_eval (tp));
- if (!get_eh_region_may_contain_throw (this_region))
+ if (!eh_region_may_contain_throw (this_region))
+ return gimple_try_eval (tp);
+
+ new_seq = NULL;
+ this_state.cur_region = state->cur_region;
+ this_state.ehp_region = this_region;
+
+ emit_eh_dispatch (&new_seq, this_region);
+ emit_resx (&new_seq, this_region);
+
+ this_region->u.allowed.label = create_artificial_label (UNKNOWN_LOCATION);
+ x = gimple_build_label (this_region->u.allowed.label);
+ gimple_seq_add_stmt (&new_seq, x);
+
+ lower_eh_constructs_1 (&this_state, gimple_eh_filter_failure (inner));
+ gimple_seq_add_seq (&new_seq, gimple_eh_filter_failure (inner));
+
+ gimple_try_set_cleanup (tp, new_seq);
+
+ return frob_into_branch_around (tp, this_region, NULL);
+}
+
+/* A subroutine of lower_eh_constructs_1. Lower a GIMPLE_TRY with
+ an GIMPLE_EH_MUST_NOT_THROW to a sequence of labels and blocks,
+ plus the exception region trees that record all the magic. */
+
+static gimple_seq
+lower_eh_must_not_throw (struct leh_state *state, gimple tp)
+{
+ struct leh_state this_state = *state;
+
+ if (flag_exceptions)
{
- return gimple_try_eval (tp);
- }
+ gimple inner = gimple_seq_first_stmt (gimple_try_cleanup (tp));
+ eh_region this_region;
+
+ this_region = gen_eh_region_must_not_throw (state->cur_region);
+ this_region->u.must_not_throw.failure_decl
+ = gimple_eh_must_not_throw_fndecl (inner);
+ this_region->u.must_not_throw.failure_loc = gimple_location (tp);
+
+ /* In order to get mangling applied to this decl, we must mark it
+ used now. Otherwise, pass_ipa_free_lang_data won't think it
+ needs to happen. */
+ TREE_USED (this_region->u.must_not_throw.failure_decl) = 1;
- lower_eh_constructs_1 (state, gimple_eh_filter_failure (inner));
- gimple_try_set_cleanup (tp, gimple_eh_filter_failure (inner));
+ this_state.cur_region = this_region;
+ }
- eh_label = create_artificial_label ();
- set_eh_region_tree_label (this_region, eh_label);
+ lower_eh_constructs_1 (&this_state, gimple_try_eval (tp));
- return frob_into_branch_around (tp, eh_label, NULL);
+ return gimple_try_eval (tp);
}
/* Implement a cleanup expression. This is similar to try-finally,
static gimple_seq
lower_cleanup (struct leh_state *state, gimple tp)
{
- struct leh_state this_state;
- struct eh_region *this_region;
+ struct leh_state this_state = *state;
+ eh_region this_region = NULL;
struct leh_tf_state fake_tf;
gimple_seq result;
+ bool cleanup_dead = cleanup_is_dead_in (state->cur_region);
- /* If not using eh, then exception-only cleanups are no-ops. */
- if (!flag_exceptions)
+ if (flag_exceptions && !cleanup_dead)
{
- result = gimple_try_eval (tp);
- lower_eh_constructs_1 (state, result);
- return result;
+ this_region = gen_eh_region_cleanup (state->cur_region);
+ this_state.cur_region = this_region;
}
- this_region = gen_eh_region_cleanup (state->cur_region, state->prev_try);
- this_state = *state;
- this_state.cur_region = this_region;
-
lower_eh_constructs_1 (&this_state, gimple_try_eval (tp));
- if (!get_eh_region_may_contain_throw (this_region))
- {
- return gimple_try_eval (tp);
- }
+ if (cleanup_dead || !eh_region_may_contain_throw (this_region))
+ return gimple_try_eval (tp);
/* Build enough of a try-finally state so that we can reuse
honor_protect_cleanup_actions. */
memset (&fake_tf, 0, sizeof (fake_tf));
- fake_tf.top_p = tp;
+ fake_tf.top_p = fake_tf.try_finally_expr = tp;
fake_tf.outer = state;
fake_tf.region = this_region;
fake_tf.may_fallthru = gimple_seq_may_fallthru (gimple_try_eval (tp));
fake_tf.may_throw = true;
- fake_tf.eh_label = create_artificial_label ();
- set_eh_region_tree_label (this_region, fake_tf.eh_label);
-
honor_protect_cleanup_actions (state, NULL, &fake_tf);
if (fake_tf.may_throw)
/* In this case honor_protect_cleanup_actions had nothing to do,
and we should process this normally. */
lower_eh_constructs_1 (state, gimple_try_cleanup (tp));
- result = frob_into_branch_around (tp, fake_tf.eh_label,
- fake_tf.fallthru_label);
+ result = frob_into_branch_around (tp, this_region,
+ fake_tf.fallthru_label);
}
else
{
return result;
}
-
-
-/* Main loop for lowering eh constructs. Also moves gsi to the next
+/* Main loop for lowering eh constructs. Also moves gsi to the next
statement. */
static void
switch (gimple_code (stmt))
{
case GIMPLE_CALL:
+ {
+ tree fndecl = gimple_call_fndecl (stmt);
+ tree rhs, lhs;
+
+ if (fndecl && DECL_BUILT_IN_CLASS (fndecl) == BUILT_IN_NORMAL)
+ switch (DECL_FUNCTION_CODE (fndecl))
+ {
+ case BUILT_IN_EH_POINTER:
+ /* The front end may have generated a call to
+ __builtin_eh_pointer (0) within a catch region. Replace
+ this zero argument with the current catch region number. */
+ if (state->ehp_region)
+ {
+ tree nr = build_int_cst (integer_type_node,
+ state->ehp_region->index);
+ gimple_call_set_arg (stmt, 0, nr);
+ }
+ else
+ {
+ /* The user has dome something silly. Remove it. */
+ rhs = null_pointer_node;
+ goto do_replace;
+ }
+ break;
+
+ case BUILT_IN_EH_FILTER:
+ /* ??? This should never appear, but since it's a builtin it
+ is accessible to abuse by users. Just remove it and
+ replace the use with the arbitrary value zero. */
+ rhs = build_int_cst (TREE_TYPE (TREE_TYPE (fndecl)), 0);
+ do_replace:
+ lhs = gimple_call_lhs (stmt);
+ x = gimple_build_assign (lhs, rhs);
+ gsi_insert_before (gsi, x, GSI_SAME_STMT);
+ /* FALLTHRU */
+
+ case BUILT_IN_EH_COPY_VALUES:
+ /* Likewise this should not appear. Remove it. */
+ gsi_remove (gsi, true);
+ return;
+
+ default:
+ break;
+ }
+ }
+ /* FALLTHRU */
+
case GIMPLE_ASSIGN:
+ /* If the stmt can throw use a new temporary for the assignment
+ to a LHS. This makes sure the old value of the LHS is
+ available on the EH edge. Only do so for statements that
+ potentially fall thru (no noreturn calls e.g.), otherwise
+ this new assignment might create fake fallthru regions. */
+ if (stmt_could_throw_p (stmt)
+ && gimple_has_lhs (stmt)
+ && gimple_stmt_may_fallthru (stmt)
+ && !tree_could_throw_p (gimple_get_lhs (stmt))
+ && is_gimple_reg_type (TREE_TYPE (gimple_get_lhs (stmt))))
+ {
+ tree lhs = gimple_get_lhs (stmt);
+ tree tmp = create_tmp_var (TREE_TYPE (lhs), NULL);
+ gimple s = gimple_build_assign (lhs, tmp);
+ gimple_set_location (s, gimple_location (stmt));
+ gimple_set_block (s, gimple_block (stmt));
+ gimple_set_lhs (stmt, tmp);
+ if (TREE_CODE (TREE_TYPE (tmp)) == COMPLEX_TYPE
+ || TREE_CODE (TREE_TYPE (tmp)) == VECTOR_TYPE)
+ DECL_GIMPLE_REG_P (tmp) = 1;
+ gsi_insert_after (gsi, s, GSI_SAME_STMT);
+ }
/* Look for things that can throw exceptions, and record them. */
if (state->cur_region && stmt_could_throw_p (stmt))
{
else
{
x = gimple_seq_first_stmt (gimple_try_cleanup (stmt));
- switch (gimple_code (x))
+ if (!x)
{
- case GIMPLE_CATCH:
- replace = lower_catch (state, stmt);
- break;
- case GIMPLE_EH_FILTER:
- replace = lower_eh_filter (state, stmt);
- break;
- default:
- replace = lower_cleanup (state, stmt);
- break;
+ replace = gimple_try_eval (stmt);
+ lower_eh_constructs_1 (state, replace);
}
+ else
+ switch (gimple_code (x))
+ {
+ case GIMPLE_CATCH:
+ replace = lower_catch (state, stmt);
+ break;
+ case GIMPLE_EH_FILTER:
+ replace = lower_eh_filter (state, stmt);
+ break;
+ case GIMPLE_EH_MUST_NOT_THROW:
+ replace = lower_eh_must_not_throw (state, stmt);
+ break;
+ case GIMPLE_EH_ELSE:
+ /* This code is only valid with GIMPLE_TRY_FINALLY. */
+ gcc_unreachable ();
+ default:
+ replace = lower_cleanup (state, stmt);
+ break;
+ }
}
/* Remove the old stmt and insert the transformed sequence
/* Return since we don't want gsi_next () */
return;
+ case GIMPLE_EH_ELSE:
+ /* We should be eliminating this in lower_try_finally et al. */
+ gcc_unreachable ();
+
default:
/* A type, a decl, or some kind of statement that we're not
interested in. Don't walk them. */
lower_eh_constructs (void)
{
struct leh_state null_state;
+ gimple_seq bodyp;
- gimple_seq bodyp = gimple_body (current_function_decl);
+ bodyp = gimple_body (current_function_decl);
+ if (bodyp == NULL)
+ return 0;
finally_tree = htab_create (31, struct_ptr_hash, struct_ptr_eq, free);
+ eh_region_may_contain_throw_map = BITMAP_ALLOC (NULL);
+ memset (&null_state, 0, sizeof (null_state));
collect_finally_tree_1 (bodyp, NULL);
-
- memset (&null_state, 0, sizeof (null_state));
lower_eh_constructs_1 (&null_state, bodyp);
+ /* We assume there's a return statement, or something, at the end of
+ the function, and thus ploping the EH sequence afterward won't
+ change anything. */
+ gcc_assert (!gimple_seq_may_fallthru (bodyp));
+ gimple_seq_add_seq (&bodyp, eh_seq);
+
+ /* We assume that since BODYP already existed, adding EH_SEQ to it
+ didn't change its value, and we don't have to re-set the function. */
+ gcc_assert (bodyp == gimple_body (current_function_decl));
+
htab_delete (finally_tree);
+ BITMAP_FREE (eh_region_may_contain_throw_map);
+ eh_seq = NULL;
+
+ /* If this function needs a language specific EH personality routine
+ and the frontend didn't already set one do so now. */
+ if (function_needs_eh_personality (cfun) == eh_personality_lang
+ && !DECL_FUNCTION_PERSONALITY (current_function_decl))
+ DECL_FUNCTION_PERSONALITY (current_function_decl)
+ = lang_hooks.eh_personality ();
- collect_eh_region_array ();
return 0;
}
PROP_gimple_leh, /* properties_provided */
0, /* properties_destroyed */
0, /* todo_flags_start */
- TODO_dump_func /* todo_flags_finish */
+ 0 /* todo_flags_finish */
}
};
-
\f
-/* Construct EH edges for STMT. */
+/* Create the multiple edges from an EH_DISPATCH statement to all of
+ the possible handlers for its EH region. Return true if there's
+ no fallthru edge; false if there is. */
-static void
-make_eh_edge (struct eh_region *region, void *data)
+bool
+make_eh_dispatch_edges (gimple stmt)
{
- gimple stmt;
- tree lab;
+ eh_region r;
+ eh_catch c;
basic_block src, dst;
- stmt = (gimple) data;
- lab = get_eh_region_tree_label (region);
-
+ r = get_eh_region_from_number (gimple_eh_dispatch_region (stmt));
src = gimple_bb (stmt);
- dst = label_to_block (lab);
- make_edge (src, dst, EDGE_ABNORMAL | EDGE_EH);
-}
+ switch (r->type)
+ {
+ case ERT_TRY:
+ for (c = r->u.eh_try.first_catch; c ; c = c->next_catch)
+ {
+ dst = label_to_block (c->label);
+ make_edge (src, dst, 0);
-/* See if STMT is call that might be inlined. */
+ /* A catch-all handler doesn't have a fallthru. */
+ if (c->type_list == NULL)
+ return false;
+ }
+ break;
-static bool
-inlinable_call_p (gimple stmt)
-{
- tree decl;
- if (gimple_code (stmt) != GIMPLE_CALL)
- return false;
- if (cfun->after_inlining)
- return false;
- /* Indirect calls can be propagated to direct call
- and inlined. */
- decl = gimple_call_fndecl (stmt);
- if (!decl)
- return true;
- if (cgraph_function_flags_ready
- && cgraph_function_body_availability (cgraph_node (decl))
- < AVAIL_OVERWRITABLE)
- return false;
- return !DECL_UNINLINABLE (decl);
+ case ERT_ALLOWED_EXCEPTIONS:
+ dst = label_to_block (r->u.allowed.label);
+ make_edge (src, dst, 0);
+ break;
+
+ default:
+ gcc_unreachable ();
+ }
+
+ return true;
}
+/* Create the single EH edge from STMT to its nearest landing pad,
+ if there is such a landing pad within the current function. */
+
void
make_eh_edges (gimple stmt)
{
- int region_nr;
- bool is_resx;
- bool inlinable = false;
+ basic_block src, dst;
+ eh_landing_pad lp;
+ int lp_nr;
- if (gimple_code (stmt) == GIMPLE_RESX)
- {
- region_nr = gimple_resx_region (stmt);
- is_resx = true;
- }
- else
- {
- region_nr = lookup_stmt_eh_region (stmt);
- if (region_nr < 0)
- return;
- is_resx = false;
- inlinable = inlinable_call_p (stmt);
- }
+ lp_nr = lookup_stmt_eh_lp (stmt);
+ if (lp_nr <= 0)
+ return;
- foreach_reachable_handler (region_nr, is_resx, inlinable, make_eh_edge, stmt);
+ lp = get_eh_landing_pad_from_number (lp_nr);
+ gcc_assert (lp != NULL);
+
+ src = gimple_bb (stmt);
+ dst = label_to_block (lp->post_landing_pad);
+ make_edge (src, dst, EDGE_EH);
}
-static bool mark_eh_edge_found_error;
+/* Do the work in redirecting EDGE_IN to NEW_BB within the EH region tree;
+ do not actually perform the final edge redirection.
-/* Mark edge make_eh_edge would create for given region by setting it aux
- field, output error if something goes wrong. */
+ CHANGE_REGION is true when we're being called from cleanup_empty_eh and
+ we intend to change the destination EH region as well; this means
+ EH_LANDING_PAD_NR must already be set on the destination block label.
+ If false, we're being called from generic cfg manipulation code and we
+ should preserve our place within the region tree. */
static void
-mark_eh_edge (struct eh_region *region, void *data)
+redirect_eh_edge_1 (edge edge_in, basic_block new_bb, bool change_region)
{
- gimple stmt;
- tree lab;
- basic_block src, dst;
+ eh_landing_pad old_lp, new_lp;
+ basic_block old_bb;
+ gimple throw_stmt;
+ int old_lp_nr, new_lp_nr;
+ tree old_label, new_label;
+ edge_iterator ei;
edge e;
- stmt = (gimple) data;
- lab = get_eh_region_tree_label (region);
+ old_bb = edge_in->dest;
+ old_label = gimple_block_label (old_bb);
+ old_lp_nr = EH_LANDING_PAD_NR (old_label);
+ gcc_assert (old_lp_nr > 0);
+ old_lp = get_eh_landing_pad_from_number (old_lp_nr);
- src = gimple_bb (stmt);
- dst = label_to_block (lab);
+ throw_stmt = last_stmt (edge_in->src);
+ gcc_assert (lookup_stmt_eh_lp (throw_stmt) == old_lp_nr);
- e = find_edge (src, dst);
- if (!e)
- {
- error ("EH edge %i->%i is missing", src->index, dst->index);
- mark_eh_edge_found_error = true;
- }
- else if (!(e->flags & EDGE_EH))
+ new_label = gimple_block_label (new_bb);
+
+ /* Look for an existing region that might be using NEW_BB already. */
+ new_lp_nr = EH_LANDING_PAD_NR (new_label);
+ if (new_lp_nr)
{
- error ("EH edge %i->%i miss EH flag", src->index, dst->index);
- mark_eh_edge_found_error = true;
+ new_lp = get_eh_landing_pad_from_number (new_lp_nr);
+ gcc_assert (new_lp);
+
+ /* Unless CHANGE_REGION is true, the new and old landing pad
+ had better be associated with the same EH region. */
+ gcc_assert (change_region || new_lp->region == old_lp->region);
}
- else if (e->aux)
+ else
{
- /* ??? might not be mistake. */
- error ("EH edge %i->%i has duplicated regions", src->index, dst->index);
- mark_eh_edge_found_error = true;
+ new_lp = NULL;
+ gcc_assert (!change_region);
}
- else
- e->aux = (void *)1;
-}
-
-/* Verify that BB containing STMT as the last statement, has precisely the
- edges that make_eh_edges would create. */
-bool
-verify_eh_edges (gimple stmt)
-{
- int region_nr;
- bool is_resx;
- basic_block bb = gimple_bb (stmt);
- edge_iterator ei;
- edge e;
- bool inlinable = false;
+ /* Notice when we redirect the last EH edge away from OLD_BB. */
+ FOR_EACH_EDGE (e, ei, old_bb->preds)
+ if (e != edge_in && (e->flags & EDGE_EH))
+ break;
- FOR_EACH_EDGE (e, ei, bb->succs)
- gcc_assert (!e->aux);
- mark_eh_edge_found_error = false;
- if (gimple_code (stmt) == GIMPLE_RESX)
+ if (new_lp)
{
- region_nr = gimple_resx_region (stmt);
- is_resx = true;
+ /* NEW_LP already exists. If there are still edges into OLD_LP,
+ there's nothing to do with the EH tree. If there are no more
+ edges into OLD_LP, then we want to remove OLD_LP as it is unused.
+ If CHANGE_REGION is true, then our caller is expecting to remove
+ the landing pad. */
+ if (e == NULL && !change_region)
+ remove_eh_landing_pad (old_lp);
}
else
{
- region_nr = lookup_stmt_eh_region (stmt);
- if (region_nr < 0)
- {
- FOR_EACH_EDGE (e, ei, bb->succs)
- if (e->flags & EDGE_EH)
- {
- error ("BB %i can not throw but has EH edges", bb->index);
- return true;
- }
- return false;
- }
- if (!stmt_could_throw_p (stmt))
+ /* No correct landing pad exists. If there are no more edges
+ into OLD_LP, then we can simply re-use the existing landing pad.
+ Otherwise, we have to create a new landing pad. */
+ if (e == NULL)
{
- error ("BB %i last statement has incorrectly set region", bb->index);
- return true;
+ EH_LANDING_PAD_NR (old_lp->post_landing_pad) = 0;
+ new_lp = old_lp;
}
- inlinable = inlinable_call_p (stmt);
- is_resx = false;
+ else
+ new_lp = gen_eh_landing_pad (old_lp->region);
+ new_lp->post_landing_pad = new_label;
+ EH_LANDING_PAD_NR (new_label) = new_lp->index;
}
- foreach_reachable_handler (region_nr, is_resx, inlinable, mark_eh_edge, stmt);
- FOR_EACH_EDGE (e, ei, bb->succs)
+ /* Maybe move the throwing statement to the new region. */
+ if (old_lp != new_lp)
+ {
+ remove_stmt_from_eh_lp (throw_stmt);
+ add_stmt_to_eh_lp (throw_stmt, new_lp->index);
+ }
+}
+
+/* Redirect EH edge E to NEW_BB. */
+
+edge
+redirect_eh_edge (edge edge_in, basic_block new_bb)
+{
+ redirect_eh_edge_1 (edge_in, new_bb, false);
+ return ssa_redirect_edge (edge_in, new_bb);
+}
+
+/* This is a subroutine of gimple_redirect_edge_and_branch. Update the
+ labels for redirecting a non-fallthru EH_DISPATCH edge E to NEW_BB.
+ The actual edge update will happen in the caller. */
+
+void
+redirect_eh_dispatch_edge (gimple stmt, edge e, basic_block new_bb)
+{
+ tree new_lab = gimple_block_label (new_bb);
+ bool any_changed = false;
+ basic_block old_bb;
+ eh_region r;
+ eh_catch c;
+
+ r = get_eh_region_from_number (gimple_eh_dispatch_region (stmt));
+ switch (r->type)
{
- if ((e->flags & EDGE_EH) && !e->aux)
+ case ERT_TRY:
+ for (c = r->u.eh_try.first_catch; c ; c = c->next_catch)
{
- error ("unnecessary EH edge %i->%i", bb->index, e->dest->index);
- mark_eh_edge_found_error = true;
- return true;
+ old_bb = label_to_block (c->label);
+ if (old_bb == e->dest)
+ {
+ c->label = new_lab;
+ any_changed = true;
+ }
}
- e->aux = NULL;
+ break;
+
+ case ERT_ALLOWED_EXCEPTIONS:
+ old_bb = label_to_block (r->u.allowed.label);
+ gcc_assert (old_bb == e->dest);
+ r->u.allowed.label = new_lab;
+ any_changed = true;
+ break;
+
+ default:
+ gcc_unreachable ();
}
- return mark_eh_edge_found_error;
+ gcc_assert (any_changed);
}
-
\f
/* Helper function for operation_could_trap_p and stmt_could_throw_p. */
return true;
return false;
+ case COMPLEX_EXPR:
+ case CONSTRUCTOR:
+ /* Constructing an object cannot trap. */
+ return false;
+
default:
/* Any floating arithmetic may trap. */
if (fp_operation && flag_trapping_math)
if (!expr)
return false;
-
+
code = TREE_CODE (expr);
t = TREE_TYPE (expr);
switch (code)
{
case TARGET_MEM_REF:
- /* For TARGET_MEM_REFs use the information based on the original
- reference. */
- expr = TMR_ORIGINAL (expr);
- code = TREE_CODE (expr);
- goto restart;
+ if (TREE_CODE (TMR_BASE (expr)) == ADDR_EXPR
+ && !TMR_INDEX (expr) && !TMR_INDEX2 (expr))
+ return false;
+ return !TREE_THIS_NOTRAP (expr);
case COMPONENT_REF:
case REALPART_EXPR:
base = TREE_OPERAND (expr, 0);
if (tree_could_trap_p (base))
return true;
-
if (TREE_THIS_NOTRAP (expr))
return false;
-
return !range_in_array_bounds_p (expr);
case ARRAY_REF:
base = TREE_OPERAND (expr, 0);
if (tree_could_trap_p (base))
return true;
-
if (TREE_THIS_NOTRAP (expr))
return false;
-
return !in_array_bounds_p (expr);
+ case MEM_REF:
+ if (TREE_CODE (TREE_OPERAND (expr, 0)) == ADDR_EXPR)
+ return false;
+ /* Fallthru. */
case INDIRECT_REF:
- case ALIGN_INDIRECT_REF:
- case MISALIGNED_INDIRECT_REF:
return !TREE_THIS_NOTRAP (expr);
case ASM_EXPR:
return TREE_THIS_VOLATILE (expr);
-
case CALL_EXPR:
t = get_callee_fndecl (expr);
/* Assume that calls to weak functions may trap. */
- if (!t || !DECL_P (t) || DECL_WEAK (t))
+ if (!t || !DECL_P (t))
return true;
+ if (DECL_WEAK (t))
+ return tree_could_trap_p (t);
+ return false;
+
+ case FUNCTION_DECL:
+ /* Assume that accesses to weak functions may trap, unless we know
+ they are certainly defined in current TU or in some other
+ LTO partition. */
+ if (DECL_WEAK (expr))
+ {
+ struct cgraph_node *node;
+ if (!DECL_EXTERNAL (expr))
+ return false;
+ node = cgraph_function_node (cgraph_get_node (expr), NULL);
+ if (node && node->in_other_partition)
+ return false;
+ return true;
+ }
+ return false;
+
+ case VAR_DECL:
+ /* Assume that accesses to weak vars may trap, unless we know
+ they are certainly defined in current TU or in some other
+ LTO partition. */
+ if (DECL_WEAK (expr))
+ {
+ struct varpool_node *node;
+ if (!DECL_EXTERNAL (expr))
+ return false;
+ node = varpool_variable_node (varpool_get_node (expr), NULL);
+ if (node && node->in_other_partition)
+ return false;
+ return true;
+ }
return false;
default:
|| TREE_CODE_CLASS (code) == tcc_unary
|| TREE_CODE_CLASS (code) == tcc_binary)
{
- t = gimple_expr_type (stmt);
+ if (is_gimple_assign (stmt)
+ && TREE_CODE_CLASS (code) == tcc_comparison)
+ t = TREE_TYPE (gimple_assign_rhs1 (stmt));
+ else if (gimple_code (stmt) == GIMPLE_COND)
+ t = TREE_TYPE (gimple_cond_lhs (stmt));
+ else
+ t = gimple_expr_type (stmt);
fp_operation = FLOAT_TYPE_P (t);
if (fp_operation)
{
bool
stmt_could_throw_p (gimple stmt)
{
- enum gimple_code code;
-
if (!flag_exceptions)
return false;
/* The only statements that can throw an exception are assignments,
- conditionals, calls and asms. */
- code = gimple_code (stmt);
- if (code != GIMPLE_ASSIGN
- && code != GIMPLE_COND
- && code != GIMPLE_CALL
- && code != GIMPLE_ASM)
- return false;
+ conditionals, calls, resx, and asms. */
+ switch (gimple_code (stmt))
+ {
+ case GIMPLE_RESX:
+ return true;
- /* If exceptions can only be thrown by function calls and STMT is not a
- GIMPLE_CALL, the statement cannot throw. */
- if (!flag_non_call_exceptions && code != GIMPLE_CALL)
- return false;
+ case GIMPLE_CALL:
+ return !gimple_call_nothrow_p (stmt);
- if (code == GIMPLE_ASSIGN || code == GIMPLE_COND)
- return stmt_could_throw_1_p (stmt);
- else if (is_gimple_call (stmt))
- return (gimple_call_flags (stmt) & ECF_NOTHROW) == 0;
- else if (gimple_code (stmt) == GIMPLE_ASM)
- return (gimple_asm_volatile_p (stmt));
- else
- gcc_unreachable ();
+ case GIMPLE_ASSIGN:
+ case GIMPLE_COND:
+ if (!cfun->can_throw_non_call_exceptions)
+ return false;
+ return stmt_could_throw_1_p (stmt);
- return false;
+ case GIMPLE_ASM:
+ if (!cfun->can_throw_non_call_exceptions)
+ return false;
+ return gimple_asm_volatile_p (stmt);
+
+ default:
+ return false;
+ }
}
return false;
if (TREE_CODE (t) == MODIFY_EXPR)
{
- if (flag_non_call_exceptions
- && tree_could_trap_p (TREE_OPERAND (t, 0)))
- return true;
+ if (cfun->can_throw_non_call_exceptions
+ && tree_could_trap_p (TREE_OPERAND (t, 0)))
+ return true;
t = TREE_OPERAND (t, 1);
}
t = TREE_OPERAND (t, 0);
if (TREE_CODE (t) == CALL_EXPR)
return (call_expr_flags (t) & ECF_NOTHROW) == 0;
- if (flag_non_call_exceptions)
+ if (cfun->can_throw_non_call_exceptions)
return tree_could_trap_p (t);
return false;
}
bool
stmt_can_throw_external (gimple stmt)
{
- int region_nr;
- bool is_resx = false;
- bool inlinable_call = false;
+ int lp_nr;
if (!stmt_could_throw_p (stmt))
return false;
- if (gimple_code (stmt) == GIMPLE_RESX)
- {
- region_nr = gimple_resx_region (stmt);
- is_resx = true;
- }
- else
- region_nr = lookup_stmt_eh_region (stmt);
-
- if (region_nr < 0)
- return true;
-
- return can_throw_external_1 (region_nr, is_resx, inlinable_call);
+ lp_nr = lookup_stmt_eh_lp (stmt);
+ return lp_nr == 0;
}
/* Return true if STMT can throw an exception that is caught within
bool
stmt_can_throw_internal (gimple stmt)
{
- int region_nr;
- bool is_resx = false;
- bool inlinable_call = false;
-
- if (gimple_code (stmt) == GIMPLE_RESX)
- {
- region_nr = gimple_resx_region (stmt);
- is_resx = true;
- }
- else
- {
- region_nr = lookup_stmt_eh_region (stmt);
- inlinable_call = inlinable_call_p (stmt);
- }
+ int lp_nr;
- if (region_nr < 0)
+ if (!stmt_could_throw_p (stmt))
return false;
- return can_throw_internal_1 (region_nr, is_resx, inlinable_call);
+ lp_nr = lookup_stmt_eh_lp (stmt);
+ return lp_nr > 0;
+}
+
+/* Given a statement STMT in IFUN, if STMT can no longer throw, then
+ remove any entry it might have from the EH table. Return true if
+ any change was made. */
+
+bool
+maybe_clean_eh_stmt_fn (struct function *ifun, gimple stmt)
+{
+ if (stmt_could_throw_p (stmt))
+ return false;
+ return remove_stmt_from_eh_lp_fn (ifun, stmt);
}
+/* Likewise, but always use the current function. */
+
+bool
+maybe_clean_eh_stmt (gimple stmt)
+{
+ return maybe_clean_eh_stmt_fn (cfun, stmt);
+}
/* Given a statement OLD_STMT and a new statement NEW_STMT that has replaced
OLD_STMT in the function, remove OLD_STMT from the EH table and put NEW_STMT
in the table if it should be in there. Return TRUE if a replacement was
done that my require an EH edge purge. */
-bool
-maybe_clean_or_replace_eh_stmt (gimple old_stmt, gimple new_stmt)
+bool
+maybe_clean_or_replace_eh_stmt (gimple old_stmt, gimple new_stmt)
{
- int region_nr = lookup_stmt_eh_region (old_stmt);
+ int lp_nr = lookup_stmt_eh_lp (old_stmt);
- if (region_nr >= 0)
+ if (lp_nr != 0)
{
bool new_stmt_could_throw = stmt_could_throw_p (new_stmt);
if (new_stmt == old_stmt && new_stmt_could_throw)
return false;
- remove_stmt_from_eh_region (old_stmt);
+ remove_stmt_from_eh_lp (old_stmt);
if (new_stmt_could_throw)
{
- add_stmt_to_eh_region (new_stmt, region_nr);
+ add_stmt_to_eh_lp (new_stmt, lp_nr);
return false;
}
else
return false;
}
-\f
-/* Returns TRUE if oneh and twoh are exception handlers (gimple_try_cleanup of
- GIMPLE_TRY) that are similar enough to be considered the same. Currently
- this only handles handlers consisting of a single call, as that's the
- important case for C++: a destructor call for a particular object showing
- up in multiple handlers. */
-static bool
-same_handler_p (gimple_seq oneh, gimple_seq twoh)
+/* Given a statement OLD_STMT in OLD_FUN and a duplicate statment NEW_STMT
+ in NEW_FUN, copy the EH table data from OLD_STMT to NEW_STMT. The MAP
+ operand is the return value of duplicate_eh_regions. */
+
+bool
+maybe_duplicate_eh_stmt_fn (struct function *new_fun, gimple new_stmt,
+ struct function *old_fun, gimple old_stmt,
+ struct pointer_map_t *map, int default_lp_nr)
+{
+ int old_lp_nr, new_lp_nr;
+ void **slot;
+
+ if (!stmt_could_throw_p (new_stmt))
+ return false;
+
+ old_lp_nr = lookup_stmt_eh_lp_fn (old_fun, old_stmt);
+ if (old_lp_nr == 0)
+ {
+ if (default_lp_nr == 0)
+ return false;
+ new_lp_nr = default_lp_nr;
+ }
+ else if (old_lp_nr > 0)
+ {
+ eh_landing_pad old_lp, new_lp;
+
+ old_lp = VEC_index (eh_landing_pad, old_fun->eh->lp_array, old_lp_nr);
+ slot = pointer_map_contains (map, old_lp);
+ new_lp = (eh_landing_pad) *slot;
+ new_lp_nr = new_lp->index;
+ }
+ else
+ {
+ eh_region old_r, new_r;
+
+ old_r = VEC_index (eh_region, old_fun->eh->region_array, -old_lp_nr);
+ slot = pointer_map_contains (map, old_r);
+ new_r = (eh_region) *slot;
+ new_lp_nr = -new_r->index;
+ }
+
+ add_stmt_to_eh_lp_fn (new_fun, new_stmt, new_lp_nr);
+ return true;
+}
+
+/* Similar, but both OLD_STMT and NEW_STMT are within the current function,
+ and thus no remapping is required. */
+
+bool
+maybe_duplicate_eh_stmt (gimple new_stmt, gimple old_stmt)
+{
+ int lp_nr;
+
+ if (!stmt_could_throw_p (new_stmt))
+ return false;
+
+ lp_nr = lookup_stmt_eh_lp (old_stmt);
+ if (lp_nr == 0)
+ return false;
+
+ add_stmt_to_eh_lp (new_stmt, lp_nr);
+ return true;
+}
+\f
+/* Returns TRUE if oneh and twoh are exception handlers (gimple_try_cleanup of
+ GIMPLE_TRY) that are similar enough to be considered the same. Currently
+ this only handles handlers consisting of a single call, as that's the
+ important case for C++: a destructor call for a particular object showing
+ up in multiple handlers. */
+
+static bool
+same_handler_p (gimple_seq oneh, gimple_seq twoh)
{
gimple_stmt_iterator gsi;
gimple ones, twos;
|| gimple_call_lhs (twos)
|| gimple_call_chain (ones)
|| gimple_call_chain (twos)
- || !operand_equal_p (gimple_call_fn (ones), gimple_call_fn (twos), 0)
+ || !gimple_call_same_target_p (ones, twos)
|| gimple_call_num_args (ones) != gimple_call_num_args (twos))
return false;
for (ai = 0; ai < gimple_call_num_args (ones); ++ai)
if (!operand_equal_p (gimple_call_arg (ones, ai),
- gimple_call_arg (twos, ai), 0))
+ gimple_call_arg (twos, ai), 0))
return false;
return true;
case GIMPLE_EH_FILTER:
refactor_eh_r (gimple_eh_filter_failure (one));
break;
+ case GIMPLE_EH_ELSE:
+ refactor_eh_r (gimple_eh_else_n_body (one));
+ refactor_eh_r (gimple_eh_else_e_body (one));
+ break;
default:
break;
}
return 0;
}
+static bool
+gate_refactor_eh (void)
+{
+ return flag_exceptions != 0;
+}
+
struct gimple_opt_pass pass_refactor_eh =
{
{
GIMPLE_PASS,
"ehopt", /* name */
- NULL, /* gate */
+ gate_refactor_eh, /* gate */
refactor_eh, /* execute */
NULL, /* sub */
NULL, /* next */
0, /* properties_provided */
0, /* properties_destroyed */
0, /* todo_flags_start */
- TODO_dump_func /* todo_flags_finish */
+ 0 /* todo_flags_finish */
}
};
+\f
+/* At the end of gimple optimization, we can lower RESX. */
-/* Walk statements, see what regions are really references and remove unreachable ones. */
-
-static void
-tree_remove_unreachable_handlers (void)
+static bool
+lower_resx (basic_block bb, gimple stmt, struct pointer_map_t *mnt_map)
{
- sbitmap reachable, contains_stmt;
- VEC(int,heap) * label_to_region;
- basic_block bb;
+ int lp_nr;
+ eh_region src_r, dst_r;
+ gimple_stmt_iterator gsi;
+ gimple x;
+ tree fn, src_nr;
+ bool ret = false;
- label_to_region = label_to_region_map ();
- reachable = sbitmap_alloc (num_eh_regions ());
- sbitmap_zero (reachable);
- contains_stmt = sbitmap_alloc (num_eh_regions ());
- sbitmap_zero (contains_stmt);
+ lp_nr = lookup_stmt_eh_lp (stmt);
+ if (lp_nr != 0)
+ dst_r = get_eh_region_from_lp_number (lp_nr);
+ else
+ dst_r = NULL;
- FOR_EACH_BB (bb)
- {
- gimple_stmt_iterator gsi;
- int region;
- bool has_eh_preds = false;
- edge e;
- edge_iterator ei;
+ src_r = get_eh_region_from_number (gimple_resx_region (stmt));
+ gsi = gsi_last_bb (bb);
- FOR_EACH_EDGE (e, ei, bb->preds) if (e->flags & EDGE_EH)
- has_eh_preds = true;
+ if (src_r == NULL)
+ {
+ /* We can wind up with no source region when pass_cleanup_eh shows
+ that there are no entries into an eh region and deletes it, but
+ then the block that contains the resx isn't removed. This can
+ happen without optimization when the switch statement created by
+ lower_try_finally_switch isn't simplified to remove the eh case.
- for (gsi = gsi_start_bb (bb); !gsi_end_p (gsi); gsi_next (&gsi))
- {
- gimple stmt = gsi_stmt (gsi);
+ Resolve this by expanding the resx node to an abort. */
- if (gimple_code (stmt) == GIMPLE_LABEL && has_eh_preds)
- {
- int uid = LABEL_DECL_UID (gimple_label_label (stmt));
- if (uid <= cfun->cfg->last_label_uid)
- {
- int region = VEC_index (int, label_to_region, uid);
- SET_BIT (reachable, region);
- }
- }
- if (gimple_code (stmt) == RESX)
- SET_BIT (reachable, gimple_resx_region (stmt));
- if ((region = lookup_stmt_eh_region (stmt)) >= 0)
- SET_BIT (contains_stmt, region);
- }
- }
+ fn = builtin_decl_implicit (BUILT_IN_TRAP);
+ x = gimple_build_call (fn, 0);
+ gsi_insert_before (&gsi, x, GSI_SAME_STMT);
- if (dump_file)
- {
- fprintf (dump_file, "Before removal of unreachable regions:\n");
- dump_eh_tree (dump_file, cfun);
- fprintf (dump_file, "Reachable regions: ");
- dump_sbitmap_file (dump_file, reachable);
- fprintf (dump_file, "Regions containing insns: ");
- dump_sbitmap_file (dump_file, contains_stmt);
+ while (EDGE_COUNT (bb->succs) > 0)
+ remove_edge (EDGE_SUCC (bb, 0));
}
-
- remove_unreachable_regions (reachable, contains_stmt);
- sbitmap_free (reachable);
- sbitmap_free (contains_stmt);
- VEC_free (int, heap, label_to_region);
- if (dump_file)
+ else if (dst_r)
{
- fprintf (dump_file, "\n\nAfter removal of unreachable regions:\n");
- dump_eh_tree (dump_file, cfun);
- fprintf (dump_file, "\n\n");
- }
-}
+ /* When we have a destination region, we resolve this by copying
+ the excptr and filter values into place, and changing the edge
+ to immediately after the landing pad. */
+ edge e;
-/* Pattern match emtpy EH receiver looking like:
-
- save_filt.6352_662 = [filter_expr] <<<filter object>>>;
- save_eptr.6351_663 = [exc_ptr_expr] <<<exception object>>>;
- <<<exception object>>> = save_eptr.6351_663;
- <<<filter object>>> = save_filt.6352_662;
- resx 1
- */
+ if (lp_nr < 0)
+ {
+ basic_block new_bb;
+ void **slot;
+ tree lab;
-static int
-tree_empty_eh_handler_p (basic_block bb)
-{
- gimple_stmt_iterator gsi;
- int region;
+ /* We are resuming into a MUST_NOT_CALL region. Expand a call to
+ the failure decl into a new block, if needed. */
+ gcc_assert (dst_r->type == ERT_MUST_NOT_THROW);
- gsi = gsi_last_bb (bb);
+ slot = pointer_map_contains (mnt_map, dst_r);
+ if (slot == NULL)
+ {
+ gimple_stmt_iterator gsi2;
- /* RESX */
- if (gsi_end_p (gsi))
- return 0;
- if (gimple_code (gsi_stmt (gsi)) != GIMPLE_RESX)
- return 0;
- region = gimple_resx_region (gsi_stmt (gsi));
+ new_bb = create_empty_bb (bb);
+ lab = gimple_block_label (new_bb);
+ gsi2 = gsi_start_bb (new_bb);
- /* filter_object set. */
- gsi_prev (&gsi);
- if (gsi_end_p (gsi))
- return 0;
- if (gimple_code (gsi_stmt (gsi)) != GIMPLE_ASSIGN)
- return 0;
- if (TREE_CODE (gimple_assign_lhs (gsi_stmt (gsi))) != FILTER_EXPR)
- return 0;
+ fn = dst_r->u.must_not_throw.failure_decl;
+ x = gimple_build_call (fn, 0);
+ gimple_set_location (x, dst_r->u.must_not_throw.failure_loc);
+ gsi_insert_after (&gsi2, x, GSI_CONTINUE_LINKING);
- /* filter_object set. */
- gsi_prev (&gsi);
- if (gsi_end_p (gsi))
- return 0;
- if (gimple_code (gsi_stmt (gsi)) != GIMPLE_ASSIGN)
- return 0;
- if (TREE_CODE (gimple_assign_lhs (gsi_stmt (gsi))) != EXC_PTR_EXPR)
- return 0;
+ slot = pointer_map_insert (mnt_map, dst_r);
+ *slot = lab;
+ }
+ else
+ {
+ lab = (tree) *slot;
+ new_bb = label_to_block (lab);
+ }
- /* filter_object get. */
- gsi_prev (&gsi);
- if (gsi_end_p (gsi))
- return 0;
- if (gimple_code (gsi_stmt (gsi)) != GIMPLE_ASSIGN)
- return 0;
- if (TREE_CODE (gimple_assign_rhs1 (gsi_stmt (gsi))) != EXC_PTR_EXPR)
- return 0;
+ gcc_assert (EDGE_COUNT (bb->succs) == 0);
+ e = make_edge (bb, new_bb, EDGE_FALLTHRU);
+ e->count = bb->count;
+ e->probability = REG_BR_PROB_BASE;
+ }
+ else
+ {
+ edge_iterator ei;
+ tree dst_nr = build_int_cst (integer_type_node, dst_r->index);
- /* filter_object get. */
- gsi_prev (&gsi);
- if (gsi_end_p (gsi))
- return 0;
- if (gimple_code (gsi_stmt (gsi)) != GIMPLE_ASSIGN)
- return 0;
- if (TREE_CODE (gimple_assign_rhs1 (gsi_stmt (gsi))) != FILTER_EXPR)
- return 0;
+ fn = builtin_decl_implicit (BUILT_IN_EH_COPY_VALUES);
+ src_nr = build_int_cst (integer_type_node, src_r->index);
+ x = gimple_build_call (fn, 2, dst_nr, src_nr);
+ gsi_insert_before (&gsi, x, GSI_SAME_STMT);
- /* label. */
- gsi_prev (&gsi);
- if (gsi_end_p (gsi))
- return 0;
- if (gimple_code (gsi_stmt (gsi)) == GIMPLE_LABEL)
- return region;
+ /* Update the flags for the outgoing edge. */
+ e = single_succ_edge (bb);
+ gcc_assert (e->flags & EDGE_EH);
+ e->flags = (e->flags & ~EDGE_EH) | EDGE_FALLTHRU;
+
+ /* If there are no more EH users of the landing pad, delete it. */
+ FOR_EACH_EDGE (e, ei, e->dest->preds)
+ if (e->flags & EDGE_EH)
+ break;
+ if (e == NULL)
+ {
+ eh_landing_pad lp = get_eh_landing_pad_from_number (lp_nr);
+ remove_eh_landing_pad (lp);
+ }
+ }
+
+ ret = true;
+ }
else
- return 0;
-}
+ {
+ tree var;
-static bool dominance_info_invalidated;
+ /* When we don't have a destination region, this exception escapes
+ up the call chain. We resolve this by generating a call to the
+ _Unwind_Resume library function. */
-/* Look for basic blocks containing empty exception handler and remove them.
- This is similar to jump forwarding, just across EH edges. */
+ /* The ARM EABI redefines _Unwind_Resume as __cxa_end_cleanup
+ with no arguments for C++ and Java. Check for that. */
+ if (src_r->use_cxa_end_cleanup)
+ {
+ fn = builtin_decl_implicit (BUILT_IN_CXA_END_CLEANUP);
+ x = gimple_build_call (fn, 0);
+ gsi_insert_before (&gsi, x, GSI_SAME_STMT);
+ }
+ else
+ {
+ fn = builtin_decl_implicit (BUILT_IN_EH_POINTER);
+ src_nr = build_int_cst (integer_type_node, src_r->index);
+ x = gimple_build_call (fn, 1, src_nr);
+ var = create_tmp_var (ptr_type_node, NULL);
+ var = make_ssa_name (var, x);
+ gimple_call_set_lhs (x, var);
+ gsi_insert_before (&gsi, x, GSI_SAME_STMT);
+
+ fn = builtin_decl_implicit (BUILT_IN_UNWIND_RESUME);
+ x = gimple_build_call (fn, 1, var);
+ gsi_insert_before (&gsi, x, GSI_SAME_STMT);
+ }
-static bool
-cleanup_empty_eh (basic_block bb)
-{
- int region;
+ gcc_assert (EDGE_COUNT (bb->succs) == 0);
+ }
- /* When handler of EH region winds up to be empty, we can safely
- remove it. This leads to inner EH regions to be redirected
- to outer one, if present in function. So we need to rebuild
- EH edges in all sources. */
- if ((region = tree_empty_eh_handler_p (bb)))
- {
- edge_iterator ei;
- edge e;
- gimple_stmt_iterator si;
+ gsi_remove (&gsi, true);
- remove_eh_region (region);
+ return ret;
+}
- /* It is safe to mark symbol for renaming because we have abnormal PHI
- here. Once EH edges are made redirectable we might need to add here
- similar updating as jump threading does. */
+static unsigned
+execute_lower_resx (void)
+{
+ basic_block bb;
+ struct pointer_map_t *mnt_map;
+ bool dominance_invalidated = false;
+ bool any_rewritten = false;
- for (si = gsi_start_phis (bb); !gsi_end_p (si); gsi_next (&si))
- mark_sym_for_renaming (SSA_NAME_VAR (PHI_RESULT (gsi_stmt (si))));
+ mnt_map = pointer_map_create ();
- while ((e = ei_safe_edge (ei_start (bb->preds))))
+ FOR_EACH_BB (bb)
+ {
+ gimple last = last_stmt (bb);
+ if (last && is_gimple_resx (last))
{
- basic_block src = e->src;
- gcc_assert (e->flags & EDGE_EH);
- for (ei = ei_start (src->succs); (e = ei_safe_edge (ei));)
- {
- if (e->flags & EDGE_EH)
- {
- remove_edge (e);
- dominance_info_invalidated = true;
- }
- else
- ei_next (&ei);
- }
- if (!stmt_can_throw_internal (last_stmt (src)))
- continue;
- make_eh_edges (last_stmt (src));
- FOR_EACH_EDGE (e, ei, src->succs) if (e->flags & EDGE_EH)
- {
- dominance_info_invalidated = true;
- for (si = gsi_start_phis (e->dest); !gsi_end_p (si);
- gsi_next (&si))
- mark_sym_for_renaming (SSA_NAME_VAR
- (PHI_RESULT (gsi_stmt (si))));
- }
+ dominance_invalidated |= lower_resx (bb, last, mnt_map);
+ any_rewritten = true;
}
- if (dump_file)
- fprintf (dump_file, "Empty EH handler %i removed\n", region);
- delete_basic_block (bb);
- return true;
}
- return false;
+
+ pointer_map_destroy (mnt_map);
+
+ if (dominance_invalidated)
+ {
+ free_dominance_info (CDI_DOMINATORS);
+ free_dominance_info (CDI_POST_DOMINATORS);
+ }
+
+ return any_rewritten ? TODO_update_ssa_only_virtuals : 0;
+}
+
+static bool
+gate_lower_resx (void)
+{
+ return flag_exceptions != 0;
}
+struct gimple_opt_pass pass_lower_resx =
+{
+ {
+ GIMPLE_PASS,
+ "resx", /* name */
+ gate_lower_resx, /* gate */
+ execute_lower_resx, /* execute */
+ NULL, /* sub */
+ NULL, /* next */
+ 0, /* static_pass_number */
+ TV_TREE_EH, /* tv_id */
+ PROP_gimple_lcf, /* properties_required */
+ 0, /* properties_provided */
+ 0, /* properties_destroyed */
+ 0, /* todo_flags_start */
+ TODO_verify_flow /* todo_flags_finish */
+ }
+};
-/* Perform cleanups and lowering of exception handling
- 1) cleanups regions with handlers doing nothing are optimized out
- 2) MUST_NOT_THROW regions that became dead because of 1) are optimized out
- 3) Info about regions that are containing instructions, and regions
- reachable via local EH edges is collected
- 4) Eh tree is pruned for regions no longer neccesary.
- */
+/* Try to optimize var = {v} {CLOBBER} stmts followed just by
+ external throw. */
-static unsigned int
-cleanup_eh (void)
+static void
+optimize_clobbers (basic_block bb)
{
- bool changed = false;
- basic_block bb;
- int i;
+ gimple_stmt_iterator gsi = gsi_last_bb (bb);
+ for (gsi_prev (&gsi); !gsi_end_p (gsi); gsi_prev (&gsi))
+ {
+ gimple stmt = gsi_stmt (gsi);
+ if (is_gimple_debug (stmt))
+ continue;
+ if (!gimple_clobber_p (stmt)
+ || TREE_CODE (gimple_assign_lhs (stmt)) == SSA_NAME)
+ return;
+ unlink_stmt_vdef (stmt);
+ gsi_remove (&gsi, true);
+ release_defs (stmt);
+ }
+}
+
+/* Try to sink var = {v} {CLOBBER} stmts followed just by
+ internal throw to successor BB. */
- if (!cfun->eh)
+static int
+sink_clobbers (basic_block bb)
+{
+ edge e;
+ edge_iterator ei;
+ gimple_stmt_iterator gsi, dgsi;
+ basic_block succbb;
+ bool any_clobbers = false;
+
+ /* Only optimize if BB has a single EH successor and
+ all predecessor edges are EH too. */
+ if (!single_succ_p (bb)
+ || (single_succ_edge (bb)->flags & EDGE_EH) == 0)
return 0;
- if (dump_file)
+
+ FOR_EACH_EDGE (e, ei, bb->preds)
{
- fprintf (dump_file, "Before cleanups:\n");
- dump_eh_tree (dump_file, cfun);
+ if ((e->flags & EDGE_EH) == 0)
+ return 0;
}
- dominance_info_invalidated = false;
- /* We cannot use FOR_EACH_BB, since the basic blocks may get removed. */
- for (i = NUM_FIXED_BLOCKS; i < last_basic_block; i++)
+ /* And BB contains only CLOBBER stmts before the final
+ RESX. */
+ gsi = gsi_last_bb (bb);
+ for (gsi_prev (&gsi); !gsi_end_p (gsi); gsi_prev (&gsi))
{
- bb = BASIC_BLOCK (i);
- if (bb)
- changed |= cleanup_empty_eh (bb);
+ gimple stmt = gsi_stmt (gsi);
+ if (is_gimple_debug (stmt))
+ continue;
+ if (gimple_code (stmt) == GIMPLE_LABEL)
+ break;
+ if (!gimple_clobber_p (stmt)
+ || TREE_CODE (gimple_assign_lhs (stmt)) == SSA_NAME)
+ return 0;
+ any_clobbers = true;
}
- if (dominance_info_invalidated)
+ if (!any_clobbers)
+ return 0;
+
+ succbb = single_succ (bb);
+ dgsi = gsi_after_labels (succbb);
+ gsi = gsi_last_bb (bb);
+ for (gsi_prev (&gsi); !gsi_end_p (gsi); gsi_prev (&gsi))
{
- free_dominance_info (CDI_DOMINATORS);
- free_dominance_info (CDI_POST_DOMINATORS);
+ gimple stmt = gsi_stmt (gsi);
+ tree vdef;
+ if (is_gimple_debug (stmt))
+ continue;
+ if (gimple_code (stmt) == GIMPLE_LABEL)
+ break;
+ unlink_stmt_vdef (stmt);
+ gsi_remove (&gsi, false);
+ vdef = gimple_vdef (stmt);
+ if (vdef && TREE_CODE (vdef) == SSA_NAME)
+ {
+ vdef = SSA_NAME_VAR (vdef);
+ mark_sym_for_renaming (vdef);
+ gimple_set_vdef (stmt, vdef);
+ gimple_set_vuse (stmt, vdef);
+ }
+ release_defs (stmt);
+ gsi_insert_before (&dgsi, stmt, GSI_SAME_STMT);
}
- /* Removing contained cleanup can render MUST_NOT_THROW regions empty. */
- if (changed)
- delete_unreachable_blocks ();
+ return TODO_update_ssa_only_virtuals;
+}
- tree_remove_unreachable_handlers ();
- if (dump_file)
+/* At the end of inlining, we can lower EH_DISPATCH. Return true when
+ we have found some duplicate labels and removed some edges. */
+
+static bool
+lower_eh_dispatch (basic_block src, gimple stmt)
+{
+ gimple_stmt_iterator gsi;
+ int region_nr;
+ eh_region r;
+ tree filter, fn;
+ gimple x;
+ bool redirected = false;
+
+ region_nr = gimple_eh_dispatch_region (stmt);
+ r = get_eh_region_from_number (region_nr);
+
+ gsi = gsi_last_bb (src);
+
+ switch (r->type)
{
- fprintf (dump_file, "After cleanups:\n");
- dump_eh_tree (dump_file, cfun);
+ case ERT_TRY:
+ {
+ VEC (tree, heap) *labels = NULL;
+ tree default_label = NULL;
+ eh_catch c;
+ edge_iterator ei;
+ edge e;
+ struct pointer_set_t *seen_values = pointer_set_create ();
+
+ /* Collect the labels for a switch. Zero the post_landing_pad
+ field becase we'll no longer have anything keeping these labels
+ in existance and the optimizer will be free to merge these
+ blocks at will. */
+ for (c = r->u.eh_try.first_catch; c ; c = c->next_catch)
+ {
+ tree tp_node, flt_node, lab = c->label;
+ bool have_label = false;
+
+ c->label = NULL;
+ tp_node = c->type_list;
+ flt_node = c->filter_list;
+
+ if (tp_node == NULL)
+ {
+ default_label = lab;
+ break;
+ }
+ do
+ {
+ /* Filter out duplicate labels that arise when this handler
+ is shadowed by an earlier one. When no labels are
+ attached to the handler anymore, we remove
+ the corresponding edge and then we delete unreachable
+ blocks at the end of this pass. */
+ if (! pointer_set_contains (seen_values, TREE_VALUE (flt_node)))
+ {
+ tree t = build_case_label (TREE_VALUE (flt_node),
+ NULL, lab);
+ VEC_safe_push (tree, heap, labels, t);
+ pointer_set_insert (seen_values, TREE_VALUE (flt_node));
+ have_label = true;
+ }
+
+ tp_node = TREE_CHAIN (tp_node);
+ flt_node = TREE_CHAIN (flt_node);
+ }
+ while (tp_node);
+ if (! have_label)
+ {
+ remove_edge (find_edge (src, label_to_block (lab)));
+ redirected = true;
+ }
+ }
+
+ /* Clean up the edge flags. */
+ FOR_EACH_EDGE (e, ei, src->succs)
+ {
+ if (e->flags & EDGE_FALLTHRU)
+ {
+ /* If there was no catch-all, use the fallthru edge. */
+ if (default_label == NULL)
+ default_label = gimple_block_label (e->dest);
+ e->flags &= ~EDGE_FALLTHRU;
+ }
+ }
+ gcc_assert (default_label != NULL);
+
+ /* Don't generate a switch if there's only a default case.
+ This is common in the form of try { A; } catch (...) { B; }. */
+ if (labels == NULL)
+ {
+ e = single_succ_edge (src);
+ e->flags |= EDGE_FALLTHRU;
+ }
+ else
+ {
+ fn = builtin_decl_implicit (BUILT_IN_EH_FILTER);
+ x = gimple_build_call (fn, 1, build_int_cst (integer_type_node,
+ region_nr));
+ filter = create_tmp_var (TREE_TYPE (TREE_TYPE (fn)), NULL);
+ filter = make_ssa_name (filter, x);
+ gimple_call_set_lhs (x, filter);
+ gsi_insert_before (&gsi, x, GSI_SAME_STMT);
+
+ /* Turn the default label into a default case. */
+ default_label = build_case_label (NULL, NULL, default_label);
+ sort_case_labels (labels);
+
+ x = gimple_build_switch_vec (filter, default_label, labels);
+ gsi_insert_before (&gsi, x, GSI_SAME_STMT);
+
+ VEC_free (tree, heap, labels);
+ }
+ pointer_set_destroy (seen_values);
+ }
+ break;
+
+ case ERT_ALLOWED_EXCEPTIONS:
+ {
+ edge b_e = BRANCH_EDGE (src);
+ edge f_e = FALLTHRU_EDGE (src);
+
+ fn = builtin_decl_implicit (BUILT_IN_EH_FILTER);
+ x = gimple_build_call (fn, 1, build_int_cst (integer_type_node,
+ region_nr));
+ filter = create_tmp_var (TREE_TYPE (TREE_TYPE (fn)), NULL);
+ filter = make_ssa_name (filter, x);
+ gimple_call_set_lhs (x, filter);
+ gsi_insert_before (&gsi, x, GSI_SAME_STMT);
+
+ r->u.allowed.label = NULL;
+ x = gimple_build_cond (EQ_EXPR, filter,
+ build_int_cst (TREE_TYPE (filter),
+ r->u.allowed.filter),
+ NULL_TREE, NULL_TREE);
+ gsi_insert_before (&gsi, x, GSI_SAME_STMT);
+
+ b_e->flags = b_e->flags | EDGE_TRUE_VALUE;
+ f_e->flags = (f_e->flags & ~EDGE_FALLTHRU) | EDGE_FALSE_VALUE;
+ }
+ break;
+
+ default:
+ gcc_unreachable ();
}
- return (changed ? TODO_cleanup_cfg | TODO_update_ssa : 0);
+ /* Replace the EH_DISPATCH with the SWITCH or COND generated above. */
+ gsi_remove (&gsi, true);
+ return redirected;
}
-struct gimple_opt_pass pass_cleanup_eh = {
- {
- GIMPLE_PASS,
+static unsigned
+execute_lower_eh_dispatch (void)
+{
+ basic_block bb;
+ int flags = 0;
+ bool redirected = false;
+
+ assign_filter_values ();
+
+ FOR_EACH_BB (bb)
+ {
+ gimple last = last_stmt (bb);
+ if (last == NULL)
+ continue;
+ if (gimple_code (last) == GIMPLE_EH_DISPATCH)
+ {
+ redirected |= lower_eh_dispatch (bb, last);
+ flags |= TODO_update_ssa_only_virtuals;
+ }
+ else if (gimple_code (last) == GIMPLE_RESX)
+ {
+ if (stmt_can_throw_external (last))
+ optimize_clobbers (bb);
+ else
+ flags |= sink_clobbers (bb);
+ }
+ }
+
+ if (redirected)
+ delete_unreachable_blocks ();
+ return flags;
+}
+
+static bool
+gate_lower_eh_dispatch (void)
+{
+ return cfun->eh->region_tree != NULL;
+}
+
+struct gimple_opt_pass pass_lower_eh_dispatch =
+{
+ {
+ GIMPLE_PASS,
+ "ehdisp", /* name */
+ gate_lower_eh_dispatch, /* gate */
+ execute_lower_eh_dispatch, /* execute */
+ NULL, /* sub */
+ NULL, /* next */
+ 0, /* static_pass_number */
+ TV_TREE_EH, /* tv_id */
+ PROP_gimple_lcf, /* properties_required */
+ 0, /* properties_provided */
+ 0, /* properties_destroyed */
+ 0, /* todo_flags_start */
+ TODO_verify_flow /* todo_flags_finish */
+ }
+};
+\f
+/* Walk statements, see what regions are really referenced and remove
+ those that are unused. */
+
+static void
+remove_unreachable_handlers (void)
+{
+ sbitmap r_reachable, lp_reachable;
+ eh_region region;
+ eh_landing_pad lp;
+ basic_block bb;
+ int lp_nr, r_nr;
+
+ r_reachable = sbitmap_alloc (VEC_length (eh_region, cfun->eh->region_array));
+ lp_reachable
+ = sbitmap_alloc (VEC_length (eh_landing_pad, cfun->eh->lp_array));
+ sbitmap_zero (r_reachable);
+ sbitmap_zero (lp_reachable);
+
+ FOR_EACH_BB (bb)
+ {
+ gimple_stmt_iterator gsi;
+
+ for (gsi = gsi_start_bb (bb); !gsi_end_p (gsi); gsi_next (&gsi))
+ {
+ gimple stmt = gsi_stmt (gsi);
+ lp_nr = lookup_stmt_eh_lp (stmt);
+
+ /* Negative LP numbers are MUST_NOT_THROW regions which
+ are not considered BB enders. */
+ if (lp_nr < 0)
+ SET_BIT (r_reachable, -lp_nr);
+
+ /* Positive LP numbers are real landing pads, are are BB enders. */
+ else if (lp_nr > 0)
+ {
+ gcc_assert (gsi_one_before_end_p (gsi));
+ region = get_eh_region_from_lp_number (lp_nr);
+ SET_BIT (r_reachable, region->index);
+ SET_BIT (lp_reachable, lp_nr);
+ }
+
+ /* Avoid removing regions referenced from RESX/EH_DISPATCH. */
+ switch (gimple_code (stmt))
+ {
+ case GIMPLE_RESX:
+ SET_BIT (r_reachable, gimple_resx_region (stmt));
+ break;
+ case GIMPLE_EH_DISPATCH:
+ SET_BIT (r_reachable, gimple_eh_dispatch_region (stmt));
+ break;
+ default:
+ break;
+ }
+ }
+ }
+
+ if (dump_file)
+ {
+ fprintf (dump_file, "Before removal of unreachable regions:\n");
+ dump_eh_tree (dump_file, cfun);
+ fprintf (dump_file, "Reachable regions: ");
+ dump_sbitmap_file (dump_file, r_reachable);
+ fprintf (dump_file, "Reachable landing pads: ");
+ dump_sbitmap_file (dump_file, lp_reachable);
+ }
+
+ for (r_nr = 1;
+ VEC_iterate (eh_region, cfun->eh->region_array, r_nr, region); ++r_nr)
+ if (region && !TEST_BIT (r_reachable, r_nr))
+ {
+ if (dump_file)
+ fprintf (dump_file, "Removing unreachable region %d\n", r_nr);
+ remove_eh_handler (region);
+ }
+
+ for (lp_nr = 1;
+ VEC_iterate (eh_landing_pad, cfun->eh->lp_array, lp_nr, lp); ++lp_nr)
+ if (lp && !TEST_BIT (lp_reachable, lp_nr))
+ {
+ if (dump_file)
+ fprintf (dump_file, "Removing unreachable landing pad %d\n", lp_nr);
+ remove_eh_landing_pad (lp);
+ }
+
+ if (dump_file)
+ {
+ fprintf (dump_file, "\n\nAfter removal of unreachable regions:\n");
+ dump_eh_tree (dump_file, cfun);
+ fprintf (dump_file, "\n\n");
+ }
+
+ sbitmap_free (r_reachable);
+ sbitmap_free (lp_reachable);
+
+#ifdef ENABLE_CHECKING
+ verify_eh_tree (cfun);
+#endif
+}
+
+/* Remove unreachable handlers if any landing pads have been removed after
+ last ehcleanup pass (due to gimple_purge_dead_eh_edges). */
+
+void
+maybe_remove_unreachable_handlers (void)
+{
+ eh_landing_pad lp;
+ int i;
+
+ if (cfun->eh == NULL)
+ return;
+
+ for (i = 1; VEC_iterate (eh_landing_pad, cfun->eh->lp_array, i, lp); ++i)
+ if (lp && lp->post_landing_pad)
+ {
+ if (label_to_block (lp->post_landing_pad) == NULL)
+ {
+ remove_unreachable_handlers ();
+ return;
+ }
+ }
+}
+
+/* Remove regions that do not have landing pads. This assumes
+ that remove_unreachable_handlers has already been run, and
+ that we've just manipulated the landing pads since then. */
+
+static void
+remove_unreachable_handlers_no_lp (void)
+{
+ eh_region r;
+ int i;
+ sbitmap r_reachable;
+ basic_block bb;
+
+ r_reachable = sbitmap_alloc (VEC_length (eh_region, cfun->eh->region_array));
+ sbitmap_zero (r_reachable);
+
+ FOR_EACH_BB (bb)
+ {
+ gimple stmt = last_stmt (bb);
+ if (stmt)
+ /* Avoid removing regions referenced from RESX/EH_DISPATCH. */
+ switch (gimple_code (stmt))
+ {
+ case GIMPLE_RESX:
+ SET_BIT (r_reachable, gimple_resx_region (stmt));
+ break;
+ case GIMPLE_EH_DISPATCH:
+ SET_BIT (r_reachable, gimple_eh_dispatch_region (stmt));
+ break;
+ default:
+ break;
+ }
+ }
+
+ for (i = 1; VEC_iterate (eh_region, cfun->eh->region_array, i, r); ++i)
+ if (r && r->landing_pads == NULL && r->type != ERT_MUST_NOT_THROW
+ && !TEST_BIT (r_reachable, i))
+ {
+ if (dump_file)
+ fprintf (dump_file, "Removing unreachable region %d\n", i);
+ remove_eh_handler (r);
+ }
+
+ sbitmap_free (r_reachable);
+}
+
+/* Undo critical edge splitting on an EH landing pad. Earlier, we
+ optimisticaly split all sorts of edges, including EH edges. The
+ optimization passes in between may not have needed them; if not,
+ we should undo the split.
+
+ Recognize this case by having one EH edge incoming to the BB and
+ one normal edge outgoing; BB should be empty apart from the
+ post_landing_pad label.
+
+ Note that this is slightly different from the empty handler case
+ handled by cleanup_empty_eh, in that the actual handler may yet
+ have actual code but the landing pad has been separated from the
+ handler. As such, cleanup_empty_eh relies on this transformation
+ having been done first. */
+
+static bool
+unsplit_eh (eh_landing_pad lp)
+{
+ basic_block bb = label_to_block (lp->post_landing_pad);
+ gimple_stmt_iterator gsi;
+ edge e_in, e_out;
+
+ /* Quickly check the edge counts on BB for singularity. */
+ if (EDGE_COUNT (bb->preds) != 1 || EDGE_COUNT (bb->succs) != 1)
+ return false;
+ e_in = EDGE_PRED (bb, 0);
+ e_out = EDGE_SUCC (bb, 0);
+
+ /* Input edge must be EH and output edge must be normal. */
+ if ((e_in->flags & EDGE_EH) == 0 || (e_out->flags & EDGE_EH) != 0)
+ return false;
+
+ /* The block must be empty except for the labels and debug insns. */
+ gsi = gsi_after_labels (bb);
+ if (!gsi_end_p (gsi) && is_gimple_debug (gsi_stmt (gsi)))
+ gsi_next_nondebug (&gsi);
+ if (!gsi_end_p (gsi))
+ return false;
+
+ /* The destination block must not already have a landing pad
+ for a different region. */
+ for (gsi = gsi_start_bb (e_out->dest); !gsi_end_p (gsi); gsi_next (&gsi))
+ {
+ gimple stmt = gsi_stmt (gsi);
+ tree lab;
+ int lp_nr;
+
+ if (gimple_code (stmt) != GIMPLE_LABEL)
+ break;
+ lab = gimple_label_label (stmt);
+ lp_nr = EH_LANDING_PAD_NR (lab);
+ if (lp_nr && get_eh_region_from_lp_number (lp_nr) != lp->region)
+ return false;
+ }
+
+ /* The new destination block must not already be a destination of
+ the source block, lest we merge fallthru and eh edges and get
+ all sorts of confused. */
+ if (find_edge (e_in->src, e_out->dest))
+ return false;
+
+ /* ??? We can get degenerate phis due to cfg cleanups. I would have
+ thought this should have been cleaned up by a phicprop pass, but
+ that doesn't appear to handle virtuals. Propagate by hand. */
+ if (!gimple_seq_empty_p (phi_nodes (bb)))
+ {
+ for (gsi = gsi_start_phis (bb); !gsi_end_p (gsi); )
+ {
+ gimple use_stmt, phi = gsi_stmt (gsi);
+ tree lhs = gimple_phi_result (phi);
+ tree rhs = gimple_phi_arg_def (phi, 0);
+ use_operand_p use_p;
+ imm_use_iterator iter;
+
+ FOR_EACH_IMM_USE_STMT (use_stmt, iter, lhs)
+ {
+ FOR_EACH_IMM_USE_ON_STMT (use_p, iter)
+ SET_USE (use_p, rhs);
+ }
+
+ if (SSA_NAME_OCCURS_IN_ABNORMAL_PHI (lhs))
+ SSA_NAME_OCCURS_IN_ABNORMAL_PHI (rhs) = 1;
+
+ remove_phi_node (&gsi, true);
+ }
+ }
+
+ if (dump_file && (dump_flags & TDF_DETAILS))
+ fprintf (dump_file, "Unsplit EH landing pad %d to block %i.\n",
+ lp->index, e_out->dest->index);
+
+ /* Redirect the edge. Since redirect_eh_edge_1 expects to be moving
+ a successor edge, humor it. But do the real CFG change with the
+ predecessor of E_OUT in order to preserve the ordering of arguments
+ to the PHI nodes in E_OUT->DEST. */
+ redirect_eh_edge_1 (e_in, e_out->dest, false);
+ redirect_edge_pred (e_out, e_in->src);
+ e_out->flags = e_in->flags;
+ e_out->probability = e_in->probability;
+ e_out->count = e_in->count;
+ remove_edge (e_in);
+
+ return true;
+}
+
+/* Examine each landing pad block and see if it matches unsplit_eh. */
+
+static bool
+unsplit_all_eh (void)
+{
+ bool changed = false;
+ eh_landing_pad lp;
+ int i;
+
+ for (i = 1; VEC_iterate (eh_landing_pad, cfun->eh->lp_array, i, lp); ++i)
+ if (lp)
+ changed |= unsplit_eh (lp);
+
+ return changed;
+}
+
+/* A subroutine of cleanup_empty_eh. Redirect all EH edges incoming
+ to OLD_BB to NEW_BB; return true on success, false on failure.
+
+ OLD_BB_OUT is the edge into NEW_BB from OLD_BB, so if we miss any
+ PHI variables from OLD_BB we can pick them up from OLD_BB_OUT.
+ Virtual PHIs may be deleted and marked for renaming. */
+
+static bool
+cleanup_empty_eh_merge_phis (basic_block new_bb, basic_block old_bb,
+ edge old_bb_out, bool change_region)
+{
+ gimple_stmt_iterator ngsi, ogsi;
+ edge_iterator ei;
+ edge e;
+ bitmap rename_virts;
+ bitmap ophi_handled;
+
+ /* The destination block must not be a regular successor for any
+ of the preds of the landing pad. Thus, avoid turning
+ <..>
+ | \ EH
+ | <..>
+ | /
+ <..>
+ into
+ <..>
+ | | EH
+ <..>
+ which CFG verification would choke on. See PR45172 and PR51089. */
+ FOR_EACH_EDGE (e, ei, old_bb->preds)
+ if (find_edge (e->src, new_bb))
+ return false;
+
+ FOR_EACH_EDGE (e, ei, old_bb->preds)
+ redirect_edge_var_map_clear (e);
+
+ ophi_handled = BITMAP_ALLOC (NULL);
+ rename_virts = BITMAP_ALLOC (NULL);
+
+ /* First, iterate through the PHIs on NEW_BB and set up the edge_var_map
+ for the edges we're going to move. */
+ for (ngsi = gsi_start_phis (new_bb); !gsi_end_p (ngsi); gsi_next (&ngsi))
+ {
+ gimple ophi, nphi = gsi_stmt (ngsi);
+ tree nresult, nop;
+
+ nresult = gimple_phi_result (nphi);
+ nop = gimple_phi_arg_def (nphi, old_bb_out->dest_idx);
+
+ /* Find the corresponding PHI in OLD_BB so we can forward-propagate
+ the source ssa_name. */
+ ophi = NULL;
+ for (ogsi = gsi_start_phis (old_bb); !gsi_end_p (ogsi); gsi_next (&ogsi))
+ {
+ ophi = gsi_stmt (ogsi);
+ if (gimple_phi_result (ophi) == nop)
+ break;
+ ophi = NULL;
+ }
+
+ /* If we did find the corresponding PHI, copy those inputs. */
+ if (ophi)
+ {
+ /* If NOP is used somewhere else beyond phis in new_bb, give up. */
+ if (!has_single_use (nop))
+ {
+ imm_use_iterator imm_iter;
+ use_operand_p use_p;
+
+ FOR_EACH_IMM_USE_FAST (use_p, imm_iter, nop)
+ {
+ if (!gimple_debug_bind_p (USE_STMT (use_p))
+ && (gimple_code (USE_STMT (use_p)) != GIMPLE_PHI
+ || gimple_bb (USE_STMT (use_p)) != new_bb))
+ goto fail;
+ }
+ }
+ bitmap_set_bit (ophi_handled, SSA_NAME_VERSION (nop));
+ FOR_EACH_EDGE (e, ei, old_bb->preds)
+ {
+ location_t oloc;
+ tree oop;
+
+ if ((e->flags & EDGE_EH) == 0)
+ continue;
+ oop = gimple_phi_arg_def (ophi, e->dest_idx);
+ oloc = gimple_phi_arg_location (ophi, e->dest_idx);
+ redirect_edge_var_map_add (e, nresult, oop, oloc);
+ }
+ }
+ /* If we didn't find the PHI, but it's a VOP, remember to rename
+ it later, assuming all other tests succeed. */
+ else if (!is_gimple_reg (nresult))
+ bitmap_set_bit (rename_virts, SSA_NAME_VERSION (nresult));
+ /* If we didn't find the PHI, and it's a real variable, we know
+ from the fact that OLD_BB is tree_empty_eh_handler_p that the
+ variable is unchanged from input to the block and we can simply
+ re-use the input to NEW_BB from the OLD_BB_OUT edge. */
+ else
+ {
+ location_t nloc
+ = gimple_phi_arg_location (nphi, old_bb_out->dest_idx);
+ FOR_EACH_EDGE (e, ei, old_bb->preds)
+ redirect_edge_var_map_add (e, nresult, nop, nloc);
+ }
+ }
+
+ /* Second, verify that all PHIs from OLD_BB have been handled. If not,
+ we don't know what values from the other edges into NEW_BB to use. */
+ for (ogsi = gsi_start_phis (old_bb); !gsi_end_p (ogsi); gsi_next (&ogsi))
+ {
+ gimple ophi = gsi_stmt (ogsi);
+ tree oresult = gimple_phi_result (ophi);
+ if (!bitmap_bit_p (ophi_handled, SSA_NAME_VERSION (oresult)))
+ goto fail;
+ }
+
+ /* At this point we know that the merge will succeed. Remove the PHI
+ nodes for the virtuals that we want to rename. */
+ if (!bitmap_empty_p (rename_virts))
+ {
+ for (ngsi = gsi_start_phis (new_bb); !gsi_end_p (ngsi); )
+ {
+ gimple nphi = gsi_stmt (ngsi);
+ tree nresult = gimple_phi_result (nphi);
+ if (bitmap_bit_p (rename_virts, SSA_NAME_VERSION (nresult)))
+ {
+ mark_virtual_phi_result_for_renaming (nphi);
+ remove_phi_node (&ngsi, true);
+ }
+ else
+ gsi_next (&ngsi);
+ }
+ }
+
+ /* Finally, move the edges and update the PHIs. */
+ for (ei = ei_start (old_bb->preds); (e = ei_safe_edge (ei)); )
+ if (e->flags & EDGE_EH)
+ {
+ redirect_eh_edge_1 (e, new_bb, change_region);
+ redirect_edge_succ (e, new_bb);
+ flush_pending_stmts (e);
+ }
+ else
+ ei_next (&ei);
+
+ BITMAP_FREE (ophi_handled);
+ BITMAP_FREE (rename_virts);
+ return true;
+
+ fail:
+ FOR_EACH_EDGE (e, ei, old_bb->preds)
+ redirect_edge_var_map_clear (e);
+ BITMAP_FREE (ophi_handled);
+ BITMAP_FREE (rename_virts);
+ return false;
+}
+
+/* A subroutine of cleanup_empty_eh. Move a landing pad LP from its
+ old region to NEW_REGION at BB. */
+
+static void
+cleanup_empty_eh_move_lp (basic_block bb, edge e_out,
+ eh_landing_pad lp, eh_region new_region)
+{
+ gimple_stmt_iterator gsi;
+ eh_landing_pad *pp;
+
+ for (pp = &lp->region->landing_pads; *pp != lp; pp = &(*pp)->next_lp)
+ continue;
+ *pp = lp->next_lp;
+
+ lp->region = new_region;
+ lp->next_lp = new_region->landing_pads;
+ new_region->landing_pads = lp;
+
+ /* Delete the RESX that was matched within the empty handler block. */
+ gsi = gsi_last_bb (bb);
+ mark_virtual_ops_for_renaming (gsi_stmt (gsi));
+ gsi_remove (&gsi, true);
+
+ /* Clean up E_OUT for the fallthru. */
+ e_out->flags = (e_out->flags & ~EDGE_EH) | EDGE_FALLTHRU;
+ e_out->probability = REG_BR_PROB_BASE;
+}
+
+/* A subroutine of cleanup_empty_eh. Handle more complex cases of
+ unsplitting than unsplit_eh was prepared to handle, e.g. when
+ multiple incoming edges and phis are involved. */
+
+static bool
+cleanup_empty_eh_unsplit (basic_block bb, edge e_out, eh_landing_pad lp)
+{
+ gimple_stmt_iterator gsi;
+ tree lab;
+
+ /* We really ought not have totally lost everything following
+ a landing pad label. Given that BB is empty, there had better
+ be a successor. */
+ gcc_assert (e_out != NULL);
+
+ /* The destination block must not already have a landing pad
+ for a different region. */
+ lab = NULL;
+ for (gsi = gsi_start_bb (e_out->dest); !gsi_end_p (gsi); gsi_next (&gsi))
+ {
+ gimple stmt = gsi_stmt (gsi);
+ int lp_nr;
+
+ if (gimple_code (stmt) != GIMPLE_LABEL)
+ break;
+ lab = gimple_label_label (stmt);
+ lp_nr = EH_LANDING_PAD_NR (lab);
+ if (lp_nr && get_eh_region_from_lp_number (lp_nr) != lp->region)
+ return false;
+ }
+
+ /* Attempt to move the PHIs into the successor block. */
+ if (cleanup_empty_eh_merge_phis (e_out->dest, bb, e_out, false))
+ {
+ if (dump_file && (dump_flags & TDF_DETAILS))
+ fprintf (dump_file,
+ "Unsplit EH landing pad %d to block %i "
+ "(via cleanup_empty_eh).\n",
+ lp->index, e_out->dest->index);
+ return true;
+ }
+
+ return false;
+}
+
+/* Return true if edge E_FIRST is part of an empty infinite loop
+ or leads to such a loop through a series of single successor
+ empty bbs. */
+
+static bool
+infinite_empty_loop_p (edge e_first)
+{
+ bool inf_loop = false;
+ edge e;
+
+ if (e_first->dest == e_first->src)
+ return true;
+
+ e_first->src->aux = (void *) 1;
+ for (e = e_first; single_succ_p (e->dest); e = single_succ_edge (e->dest))
+ {
+ gimple_stmt_iterator gsi;
+ if (e->dest->aux)
+ {
+ inf_loop = true;
+ break;
+ }
+ e->dest->aux = (void *) 1;
+ gsi = gsi_after_labels (e->dest);
+ if (!gsi_end_p (gsi) && is_gimple_debug (gsi_stmt (gsi)))
+ gsi_next_nondebug (&gsi);
+ if (!gsi_end_p (gsi))
+ break;
+ }
+ e_first->src->aux = NULL;
+ for (e = e_first; e->dest->aux; e = single_succ_edge (e->dest))
+ e->dest->aux = NULL;
+
+ return inf_loop;
+}
+
+/* Examine the block associated with LP to determine if it's an empty
+ handler for its EH region. If so, attempt to redirect EH edges to
+ an outer region. Return true the CFG was updated in any way. This
+ is similar to jump forwarding, just across EH edges. */
+
+static bool
+cleanup_empty_eh (eh_landing_pad lp)
+{
+ basic_block bb = label_to_block (lp->post_landing_pad);
+ gimple_stmt_iterator gsi;
+ gimple resx;
+ eh_region new_region;
+ edge_iterator ei;
+ edge e, e_out;
+ bool has_non_eh_pred;
+ bool ret = false;
+ int new_lp_nr;
+
+ /* There can be zero or one edges out of BB. This is the quickest test. */
+ switch (EDGE_COUNT (bb->succs))
+ {
+ case 0:
+ e_out = NULL;
+ break;
+ case 1:
+ e_out = EDGE_SUCC (bb, 0);
+ break;
+ default:
+ return false;
+ }
+
+ resx = last_stmt (bb);
+ if (resx && is_gimple_resx (resx))
+ {
+ if (stmt_can_throw_external (resx))
+ optimize_clobbers (bb);
+ else if (sink_clobbers (bb))
+ ret = true;
+ }
+
+ gsi = gsi_after_labels (bb);
+
+ /* Make sure to skip debug statements. */
+ if (!gsi_end_p (gsi) && is_gimple_debug (gsi_stmt (gsi)))
+ gsi_next_nondebug (&gsi);
+
+ /* If the block is totally empty, look for more unsplitting cases. */
+ if (gsi_end_p (gsi))
+ {
+ /* For the degenerate case of an infinite loop bail out. */
+ if (infinite_empty_loop_p (e_out))
+ return ret;
+
+ return ret | cleanup_empty_eh_unsplit (bb, e_out, lp);
+ }
+
+ /* The block should consist only of a single RESX statement, modulo a
+ preceding call to __builtin_stack_restore if there is no outgoing
+ edge, since the call can be eliminated in this case. */
+ resx = gsi_stmt (gsi);
+ if (!e_out && gimple_call_builtin_p (resx, BUILT_IN_STACK_RESTORE))
+ {
+ gsi_next (&gsi);
+ resx = gsi_stmt (gsi);
+ }
+ if (!is_gimple_resx (resx))
+ return ret;
+ gcc_assert (gsi_one_before_end_p (gsi));
+
+ /* Determine if there are non-EH edges, or resx edges into the handler. */
+ has_non_eh_pred = false;
+ FOR_EACH_EDGE (e, ei, bb->preds)
+ if (!(e->flags & EDGE_EH))
+ has_non_eh_pred = true;
+
+ /* Find the handler that's outer of the empty handler by looking at
+ where the RESX instruction was vectored. */
+ new_lp_nr = lookup_stmt_eh_lp (resx);
+ new_region = get_eh_region_from_lp_number (new_lp_nr);
+
+ /* If there's no destination region within the current function,
+ redirection is trivial via removing the throwing statements from
+ the EH region, removing the EH edges, and allowing the block
+ to go unreachable. */
+ if (new_region == NULL)
+ {
+ gcc_assert (e_out == NULL);
+ for (ei = ei_start (bb->preds); (e = ei_safe_edge (ei)); )
+ if (e->flags & EDGE_EH)
+ {
+ gimple stmt = last_stmt (e->src);
+ remove_stmt_from_eh_lp (stmt);
+ remove_edge (e);
+ }
+ else
+ ei_next (&ei);
+ goto succeed;
+ }
+
+ /* If the destination region is a MUST_NOT_THROW, allow the runtime
+ to handle the abort and allow the blocks to go unreachable. */
+ if (new_region->type == ERT_MUST_NOT_THROW)
+ {
+ for (ei = ei_start (bb->preds); (e = ei_safe_edge (ei)); )
+ if (e->flags & EDGE_EH)
+ {
+ gimple stmt = last_stmt (e->src);
+ remove_stmt_from_eh_lp (stmt);
+ add_stmt_to_eh_lp (stmt, new_lp_nr);
+ remove_edge (e);
+ }
+ else
+ ei_next (&ei);
+ goto succeed;
+ }
+
+ /* Try to redirect the EH edges and merge the PHIs into the destination
+ landing pad block. If the merge succeeds, we'll already have redirected
+ all the EH edges. The handler itself will go unreachable if there were
+ no normal edges. */
+ if (cleanup_empty_eh_merge_phis (e_out->dest, bb, e_out, true))
+ goto succeed;
+
+ /* Finally, if all input edges are EH edges, then we can (potentially)
+ reduce the number of transfers from the runtime by moving the landing
+ pad from the original region to the new region. This is a win when
+ we remove the last CLEANUP region along a particular exception
+ propagation path. Since nothing changes except for the region with
+ which the landing pad is associated, the PHI nodes do not need to be
+ adjusted at all. */
+ if (!has_non_eh_pred)
+ {
+ cleanup_empty_eh_move_lp (bb, e_out, lp, new_region);
+ if (dump_file && (dump_flags & TDF_DETAILS))
+ fprintf (dump_file, "Empty EH handler %i moved to EH region %i.\n",
+ lp->index, new_region->index);
+
+ /* ??? The CFG didn't change, but we may have rendered the
+ old EH region unreachable. Trigger a cleanup there. */
+ return true;
+ }
+
+ return ret;
+
+ succeed:
+ if (dump_file && (dump_flags & TDF_DETAILS))
+ fprintf (dump_file, "Empty EH handler %i removed.\n", lp->index);
+ remove_eh_landing_pad (lp);
+ return true;
+}
+
+/* Do a post-order traversal of the EH region tree. Examine each
+ post_landing_pad block and see if we can eliminate it as empty. */
+
+static bool
+cleanup_all_empty_eh (void)
+{
+ bool changed = false;
+ eh_landing_pad lp;
+ int i;
+
+ for (i = 1; VEC_iterate (eh_landing_pad, cfun->eh->lp_array, i, lp); ++i)
+ if (lp)
+ changed |= cleanup_empty_eh (lp);
+
+ return changed;
+}
+
+/* Perform cleanups and lowering of exception handling
+ 1) cleanups regions with handlers doing nothing are optimized out
+ 2) MUST_NOT_THROW regions that became dead because of 1) are optimized out
+ 3) Info about regions that are containing instructions, and regions
+ reachable via local EH edges is collected
+ 4) Eh tree is pruned for regions no longer neccesary.
+
+ TODO: Push MUST_NOT_THROW regions to the root of the EH tree.
+ Unify those that have the same failure decl and locus.
+*/
+
+static unsigned int
+execute_cleanup_eh_1 (void)
+{
+ /* Do this first: unsplit_all_eh and cleanup_all_empty_eh can die
+ looking up unreachable landing pads. */
+ remove_unreachable_handlers ();
+
+ /* Watch out for the region tree vanishing due to all unreachable. */
+ if (cfun->eh->region_tree && optimize)
+ {
+ bool changed = false;
+
+ changed |= unsplit_all_eh ();
+ changed |= cleanup_all_empty_eh ();
+
+ if (changed)
+ {
+ free_dominance_info (CDI_DOMINATORS);
+ free_dominance_info (CDI_POST_DOMINATORS);
+
+ /* We delayed all basic block deletion, as we may have performed
+ cleanups on EH edges while non-EH edges were still present. */
+ delete_unreachable_blocks ();
+
+ /* We manipulated the landing pads. Remove any region that no
+ longer has a landing pad. */
+ remove_unreachable_handlers_no_lp ();
+
+ return TODO_cleanup_cfg | TODO_update_ssa_only_virtuals;
+ }
+ }
+
+ return 0;
+}
+
+static unsigned int
+execute_cleanup_eh (void)
+{
+ int ret = execute_cleanup_eh_1 ();
+
+ /* If the function no longer needs an EH personality routine
+ clear it. This exposes cross-language inlining opportunities
+ and avoids references to a never defined personality routine. */
+ if (DECL_FUNCTION_PERSONALITY (current_function_decl)
+ && function_needs_eh_personality (cfun) != eh_personality_lang)
+ DECL_FUNCTION_PERSONALITY (current_function_decl) = NULL_TREE;
+
+ return ret;
+}
+
+static bool
+gate_cleanup_eh (void)
+{
+ return cfun->eh != NULL && cfun->eh->region_tree != NULL;
+}
+
+struct gimple_opt_pass pass_cleanup_eh = {
+ {
+ GIMPLE_PASS,
"ehcleanup", /* name */
- NULL, /* gate */
- cleanup_eh, /* execute */
+ gate_cleanup_eh, /* gate */
+ execute_cleanup_eh, /* execute */
NULL, /* sub */
NULL, /* next */
0, /* static_pass_number */
0, /* properties_provided */
0, /* properties_destroyed */
0, /* todo_flags_start */
- TODO_dump_func /* todo_flags_finish */
+ 0 /* todo_flags_finish */
}
};
+\f
+/* Verify that BB containing STMT as the last statement, has precisely the
+ edge that make_eh_edges would create. */
+
+DEBUG_FUNCTION bool
+verify_eh_edges (gimple stmt)
+{
+ basic_block bb = gimple_bb (stmt);
+ eh_landing_pad lp = NULL;
+ int lp_nr;
+ edge_iterator ei;
+ edge e, eh_edge;
+
+ lp_nr = lookup_stmt_eh_lp (stmt);
+ if (lp_nr > 0)
+ lp = get_eh_landing_pad_from_number (lp_nr);
+
+ eh_edge = NULL;
+ FOR_EACH_EDGE (e, ei, bb->succs)
+ {
+ if (e->flags & EDGE_EH)
+ {
+ if (eh_edge)
+ {
+ error ("BB %i has multiple EH edges", bb->index);
+ return true;
+ }
+ else
+ eh_edge = e;
+ }
+ }
+
+ if (lp == NULL)
+ {
+ if (eh_edge)
+ {
+ error ("BB %i can not throw but has an EH edge", bb->index);
+ return true;
+ }
+ return false;
+ }
+
+ if (!stmt_could_throw_p (stmt))
+ {
+ error ("BB %i last statement has incorrectly set lp", bb->index);
+ return true;
+ }
+
+ if (eh_edge == NULL)
+ {
+ error ("BB %i is missing an EH edge", bb->index);
+ return true;
+ }
+
+ if (eh_edge->dest != label_to_block (lp->post_landing_pad))
+ {
+ error ("Incorrect EH edge %i->%i", bb->index, eh_edge->dest->index);
+ return true;
+ }
+
+ return false;
+}
+
+/* Similarly, but handle GIMPLE_EH_DISPATCH specifically. */
+
+DEBUG_FUNCTION bool
+verify_eh_dispatch_edge (gimple stmt)
+{
+ eh_region r;
+ eh_catch c;
+ basic_block src, dst;
+ bool want_fallthru = true;
+ edge_iterator ei;
+ edge e, fall_edge;
+
+ r = get_eh_region_from_number (gimple_eh_dispatch_region (stmt));
+ src = gimple_bb (stmt);
+
+ FOR_EACH_EDGE (e, ei, src->succs)
+ gcc_assert (e->aux == NULL);
+
+ switch (r->type)
+ {
+ case ERT_TRY:
+ for (c = r->u.eh_try.first_catch; c ; c = c->next_catch)
+ {
+ dst = label_to_block (c->label);
+ e = find_edge (src, dst);
+ if (e == NULL)
+ {
+ error ("BB %i is missing an edge", src->index);
+ return true;
+ }
+ e->aux = (void *)e;
+
+ /* A catch-all handler doesn't have a fallthru. */
+ if (c->type_list == NULL)
+ {
+ want_fallthru = false;
+ break;
+ }
+ }
+ break;
+
+ case ERT_ALLOWED_EXCEPTIONS:
+ dst = label_to_block (r->u.allowed.label);
+ e = find_edge (src, dst);
+ if (e == NULL)
+ {
+ error ("BB %i is missing an edge", src->index);
+ return true;
+ }
+ e->aux = (void *)e;
+ break;
+
+ default:
+ gcc_unreachable ();
+ }
+
+ fall_edge = NULL;
+ FOR_EACH_EDGE (e, ei, src->succs)
+ {
+ if (e->flags & EDGE_FALLTHRU)
+ {
+ if (fall_edge != NULL)
+ {
+ error ("BB %i too many fallthru edges", src->index);
+ return true;
+ }
+ fall_edge = e;
+ }
+ else if (e->aux)
+ e->aux = NULL;
+ else
+ {
+ error ("BB %i has incorrect edge", src->index);
+ return true;
+ }
+ }
+ if ((fall_edge != NULL) ^ want_fallthru)
+ {
+ error ("BB %i has incorrect fallthru edge", src->index);
+ return true;
+ }
+
+ return false;
+}