+
+/* Given a statement OLD_STMT in OLD_FUN and a duplicate statment NEW_STMT
+ in NEW_FUN, copy the EH table data from OLD_STMT to NEW_STMT. The MAP
+ operand is the return value of duplicate_eh_regions. */
+
+bool
+maybe_duplicate_eh_stmt_fn (struct function *new_fun, gimple new_stmt,
+ struct function *old_fun, gimple old_stmt,
+ struct pointer_map_t *map, int default_lp_nr)
+{
+ int old_lp_nr, new_lp_nr;
+ void **slot;
+
+ if (!stmt_could_throw_p (new_stmt))
+ return false;
+
+ old_lp_nr = lookup_stmt_eh_lp_fn (old_fun, old_stmt);
+ if (old_lp_nr == 0)
+ {
+ if (default_lp_nr == 0)
+ return false;
+ new_lp_nr = default_lp_nr;
+ }
+ else if (old_lp_nr > 0)
+ {
+ eh_landing_pad old_lp, new_lp;
+
+ old_lp = VEC_index (eh_landing_pad, old_fun->eh->lp_array, old_lp_nr);
+ slot = pointer_map_contains (map, old_lp);
+ new_lp = (eh_landing_pad) *slot;
+ new_lp_nr = new_lp->index;
+ }
+ else
+ {
+ eh_region old_r, new_r;
+
+ old_r = VEC_index (eh_region, old_fun->eh->region_array, -old_lp_nr);
+ slot = pointer_map_contains (map, old_r);
+ new_r = (eh_region) *slot;
+ new_lp_nr = -new_r->index;
+ }
+
+ add_stmt_to_eh_lp_fn (new_fun, new_stmt, new_lp_nr);
+ return true;
+}
+
+/* Similar, but both OLD_STMT and NEW_STMT are within the current function,
+ and thus no remapping is required. */
+
+bool
+maybe_duplicate_eh_stmt (gimple new_stmt, gimple old_stmt)
+{
+ int lp_nr;
+
+ if (!stmt_could_throw_p (new_stmt))
+ return false;
+
+ lp_nr = lookup_stmt_eh_lp (old_stmt);
+ if (lp_nr == 0)
+ return false;
+
+ add_stmt_to_eh_lp (new_stmt, lp_nr);
+ return true;
+}
+\f
+/* Returns TRUE if oneh and twoh are exception handlers (gimple_try_cleanup of
+ GIMPLE_TRY) that are similar enough to be considered the same. Currently
+ this only handles handlers consisting of a single call, as that's the
+ important case for C++: a destructor call for a particular object showing
+ up in multiple handlers. */
+
+static bool
+same_handler_p (gimple_seq oneh, gimple_seq twoh)
+{
+ gimple_stmt_iterator gsi;
+ gimple ones, twos;
+ unsigned int ai;
+
+ gsi = gsi_start (oneh);
+ if (!gsi_one_before_end_p (gsi))
+ return false;
+ ones = gsi_stmt (gsi);
+
+ gsi = gsi_start (twoh);
+ if (!gsi_one_before_end_p (gsi))
+ return false;
+ twos = gsi_stmt (gsi);
+
+ if (!is_gimple_call (ones)
+ || !is_gimple_call (twos)
+ || gimple_call_lhs (ones)
+ || gimple_call_lhs (twos)
+ || gimple_call_chain (ones)
+ || gimple_call_chain (twos)
+ || !operand_equal_p (gimple_call_fn (ones), gimple_call_fn (twos), 0)
+ || gimple_call_num_args (ones) != gimple_call_num_args (twos))
+ return false;
+
+ for (ai = 0; ai < gimple_call_num_args (ones); ++ai)
+ if (!operand_equal_p (gimple_call_arg (ones, ai),
+ gimple_call_arg (twos, ai), 0))
+ return false;
+
+ return true;
+}
+
+/* Optimize
+ try { A() } finally { try { ~B() } catch { ~A() } }
+ try { ... } finally { ~A() }
+ into
+ try { A() } catch { ~B() }
+ try { ~B() ... } finally { ~A() }
+
+ This occurs frequently in C++, where A is a local variable and B is a
+ temporary used in the initializer for A. */
+
+static void
+optimize_double_finally (gimple one, gimple two)
+{
+ gimple oneh;
+ gimple_stmt_iterator gsi;
+
+ gsi = gsi_start (gimple_try_cleanup (one));
+ if (!gsi_one_before_end_p (gsi))
+ return;
+
+ oneh = gsi_stmt (gsi);
+ if (gimple_code (oneh) != GIMPLE_TRY
+ || gimple_try_kind (oneh) != GIMPLE_TRY_CATCH)
+ return;
+
+ if (same_handler_p (gimple_try_cleanup (oneh), gimple_try_cleanup (two)))
+ {
+ gimple_seq seq = gimple_try_eval (oneh);
+
+ gimple_try_set_cleanup (one, seq);
+ gimple_try_set_kind (one, GIMPLE_TRY_CATCH);
+ seq = copy_gimple_seq_and_replace_locals (seq);
+ gimple_seq_add_seq (&seq, gimple_try_eval (two));
+ gimple_try_set_eval (two, seq);
+ }
+}
+
+/* Perform EH refactoring optimizations that are simpler to do when code
+ flow has been lowered but EH structures haven't. */
+
+static void
+refactor_eh_r (gimple_seq seq)
+{
+ gimple_stmt_iterator gsi;
+ gimple one, two;
+
+ one = NULL;
+ two = NULL;
+ gsi = gsi_start (seq);
+ while (1)
+ {
+ one = two;
+ if (gsi_end_p (gsi))
+ two = NULL;
+ else
+ two = gsi_stmt (gsi);
+ if (one
+ && two
+ && gimple_code (one) == GIMPLE_TRY
+ && gimple_code (two) == GIMPLE_TRY
+ && gimple_try_kind (one) == GIMPLE_TRY_FINALLY
+ && gimple_try_kind (two) == GIMPLE_TRY_FINALLY)
+ optimize_double_finally (one, two);
+ if (one)
+ switch (gimple_code (one))
+ {
+ case GIMPLE_TRY:
+ refactor_eh_r (gimple_try_eval (one));
+ refactor_eh_r (gimple_try_cleanup (one));
+ break;
+ case GIMPLE_CATCH:
+ refactor_eh_r (gimple_catch_handler (one));
+ break;
+ case GIMPLE_EH_FILTER:
+ refactor_eh_r (gimple_eh_filter_failure (one));
+ break;
+ default:
+ break;
+ }
+ if (two)
+ gsi_next (&gsi);
+ else
+ break;
+ }
+}
+
+static unsigned
+refactor_eh (void)
+{
+ refactor_eh_r (gimple_body (current_function_decl));
+ return 0;
+}
+
+static bool
+gate_refactor_eh (void)
+{
+ return flag_exceptions != 0;
+}
+
+struct gimple_opt_pass pass_refactor_eh =
+{
+ {
+ GIMPLE_PASS,
+ "ehopt", /* name */
+ gate_refactor_eh, /* gate */
+ refactor_eh, /* execute */
+ NULL, /* sub */
+ NULL, /* next */
+ 0, /* static_pass_number */
+ TV_TREE_EH, /* tv_id */
+ PROP_gimple_lcf, /* properties_required */
+ 0, /* properties_provided */
+ 0, /* properties_destroyed */
+ 0, /* todo_flags_start */
+ TODO_dump_func /* todo_flags_finish */
+ }
+};
+\f
+/* At the end of gimple optimization, we can lower RESX. */
+
+static bool
+lower_resx (basic_block bb, gimple stmt, struct pointer_map_t *mnt_map)
+{
+ int lp_nr;
+ eh_region src_r, dst_r;
+ gimple_stmt_iterator gsi;
+ gimple x;
+ tree fn, src_nr;
+ bool ret = false;
+
+ lp_nr = lookup_stmt_eh_lp (stmt);
+ if (lp_nr != 0)
+ dst_r = get_eh_region_from_lp_number (lp_nr);
+ else
+ dst_r = NULL;
+
+ src_r = get_eh_region_from_number (gimple_resx_region (stmt));
+ gsi = gsi_last_bb (bb);
+
+ if (src_r == NULL)
+ {
+ /* We can wind up with no source region when pass_cleanup_eh shows
+ that there are no entries into an eh region and deletes it, but
+ then the block that contains the resx isn't removed. This can
+ happen without optimization when the switch statement created by
+ lower_try_finally_switch isn't simplified to remove the eh case.
+
+ Resolve this by expanding the resx node to an abort. */
+
+ fn = implicit_built_in_decls[BUILT_IN_TRAP];
+ x = gimple_build_call (fn, 0);
+ gsi_insert_before (&gsi, x, GSI_SAME_STMT);
+
+ while (EDGE_COUNT (bb->succs) > 0)
+ remove_edge (EDGE_SUCC (bb, 0));
+ }
+ else if (dst_r)
+ {
+ /* When we have a destination region, we resolve this by copying
+ the excptr and filter values into place, and changing the edge
+ to immediately after the landing pad. */
+ edge e;
+
+ if (lp_nr < 0)
+ {
+ basic_block new_bb;
+ void **slot;
+ tree lab;
+
+ /* We are resuming into a MUST_NOT_CALL region. Expand a call to
+ the failure decl into a new block, if needed. */
+ gcc_assert (dst_r->type == ERT_MUST_NOT_THROW);
+
+ slot = pointer_map_contains (mnt_map, dst_r);
+ if (slot == NULL)
+ {
+ gimple_stmt_iterator gsi2;
+
+ new_bb = create_empty_bb (bb);
+ lab = gimple_block_label (new_bb);
+ gsi2 = gsi_start_bb (new_bb);
+
+ fn = dst_r->u.must_not_throw.failure_decl;
+ x = gimple_build_call (fn, 0);
+ gimple_set_location (x, dst_r->u.must_not_throw.failure_loc);
+ gsi_insert_after (&gsi2, x, GSI_CONTINUE_LINKING);
+
+ slot = pointer_map_insert (mnt_map, dst_r);
+ *slot = lab;
+ }
+ else
+ {
+ lab = (tree) *slot;
+ new_bb = label_to_block (lab);
+ }
+
+ gcc_assert (EDGE_COUNT (bb->succs) == 0);
+ e = make_edge (bb, new_bb, EDGE_FALLTHRU);
+ e->count = bb->count;
+ e->probability = REG_BR_PROB_BASE;
+ }
+ else
+ {
+ edge_iterator ei;
+ tree dst_nr = build_int_cst (NULL, dst_r->index);
+
+ fn = implicit_built_in_decls[BUILT_IN_EH_COPY_VALUES];
+ src_nr = build_int_cst (NULL, src_r->index);
+ x = gimple_build_call (fn, 2, dst_nr, src_nr);
+ gsi_insert_before (&gsi, x, GSI_SAME_STMT);
+
+ /* Update the flags for the outgoing edge. */
+ e = single_succ_edge (bb);
+ gcc_assert (e->flags & EDGE_EH);
+ e->flags = (e->flags & ~EDGE_EH) | EDGE_FALLTHRU;
+
+ /* If there are no more EH users of the landing pad, delete it. */
+ FOR_EACH_EDGE (e, ei, e->dest->preds)
+ if (e->flags & EDGE_EH)
+ break;
+ if (e == NULL)
+ {
+ eh_landing_pad lp = get_eh_landing_pad_from_number (lp_nr);
+ remove_eh_landing_pad (lp);
+ }
+ }
+
+ ret = true;
+ }
+ else
+ {
+ tree var;
+
+ /* When we don't have a destination region, this exception escapes
+ up the call chain. We resolve this by generating a call to the
+ _Unwind_Resume library function. */
+
+ /* The ARM EABI redefines _Unwind_Resume as __cxa_end_cleanup
+ with no arguments for C++ and Java. Check for that. */
+ if (src_r->use_cxa_end_cleanup)
+ {
+ fn = implicit_built_in_decls[BUILT_IN_CXA_END_CLEANUP];
+ x = gimple_build_call (fn, 0);
+ gsi_insert_before (&gsi, x, GSI_SAME_STMT);
+ }
+ else
+ {
+ fn = implicit_built_in_decls[BUILT_IN_EH_POINTER];
+ src_nr = build_int_cst (NULL, src_r->index);
+ x = gimple_build_call (fn, 1, src_nr);
+ var = create_tmp_var (ptr_type_node, NULL);
+ var = make_ssa_name (var, x);
+ gimple_call_set_lhs (x, var);
+ gsi_insert_before (&gsi, x, GSI_SAME_STMT);
+
+ fn = implicit_built_in_decls[BUILT_IN_UNWIND_RESUME];
+ x = gimple_build_call (fn, 1, var);
+ gsi_insert_before (&gsi, x, GSI_SAME_STMT);
+ }
+
+ gcc_assert (EDGE_COUNT (bb->succs) == 0);
+ }
+
+ gsi_remove (&gsi, true);
+
+ return ret;
+}
+
+static unsigned
+execute_lower_resx (void)
+{
+ basic_block bb;
+ struct pointer_map_t *mnt_map;
+ bool dominance_invalidated = false;
+ bool any_rewritten = false;
+
+ mnt_map = pointer_map_create ();
+
+ FOR_EACH_BB (bb)
+ {
+ gimple last = last_stmt (bb);
+ if (last && is_gimple_resx (last))
+ {
+ dominance_invalidated |= lower_resx (bb, last, mnt_map);
+ any_rewritten = true;
+ }
+ }
+
+ pointer_map_destroy (mnt_map);
+
+ if (dominance_invalidated)
+ {
+ free_dominance_info (CDI_DOMINATORS);
+ free_dominance_info (CDI_POST_DOMINATORS);
+ }
+
+ return any_rewritten ? TODO_update_ssa_only_virtuals : 0;
+}
+
+static bool
+gate_lower_resx (void)
+{
+ return flag_exceptions != 0;
+}
+
+struct gimple_opt_pass pass_lower_resx =
+{
+ {
+ GIMPLE_PASS,
+ "resx", /* name */
+ gate_lower_resx, /* gate */
+ execute_lower_resx, /* execute */
+ NULL, /* sub */
+ NULL, /* next */
+ 0, /* static_pass_number */
+ TV_TREE_EH, /* tv_id */
+ PROP_gimple_lcf, /* properties_required */
+ 0, /* properties_provided */
+ 0, /* properties_destroyed */
+ 0, /* todo_flags_start */
+ TODO_dump_func | TODO_verify_flow /* todo_flags_finish */
+ }
+};
+
+
+/* At the end of inlining, we can lower EH_DISPATCH. Return true when
+ we have found some duplicate labels and removed some edges. */
+
+static bool
+lower_eh_dispatch (basic_block src, gimple stmt)
+{
+ gimple_stmt_iterator gsi;
+ int region_nr;
+ eh_region r;
+ tree filter, fn;
+ gimple x;
+ bool redirected = false;
+
+ region_nr = gimple_eh_dispatch_region (stmt);
+ r = get_eh_region_from_number (region_nr);
+
+ gsi = gsi_last_bb (src);
+
+ switch (r->type)
+ {
+ case ERT_TRY:
+ {
+ VEC (tree, heap) *labels = NULL;
+ tree default_label = NULL;
+ eh_catch c;
+ edge_iterator ei;
+ edge e;
+ struct pointer_set_t *seen_values = pointer_set_create ();
+
+ /* Collect the labels for a switch. Zero the post_landing_pad
+ field becase we'll no longer have anything keeping these labels
+ in existance and the optimizer will be free to merge these
+ blocks at will. */
+ for (c = r->u.eh_try.first_catch; c ; c = c->next_catch)
+ {
+ tree tp_node, flt_node, lab = c->label;
+ bool have_label = false;
+
+ c->label = NULL;
+ tp_node = c->type_list;
+ flt_node = c->filter_list;
+
+ if (tp_node == NULL)
+ {
+ default_label = lab;
+ break;
+ }
+ do
+ {
+ /* Filter out duplicate labels that arise when this handler
+ is shadowed by an earlier one. When no labels are
+ attached to the handler anymore, we remove
+ the corresponding edge and then we delete unreachable
+ blocks at the end of this pass. */
+ if (! pointer_set_contains (seen_values, TREE_VALUE (flt_node)))
+ {
+ tree t = build3 (CASE_LABEL_EXPR, void_type_node,
+ TREE_VALUE (flt_node), NULL, lab);
+ VEC_safe_push (tree, heap, labels, t);
+ pointer_set_insert (seen_values, TREE_VALUE (flt_node));
+ have_label = true;
+ }
+
+ tp_node = TREE_CHAIN (tp_node);
+ flt_node = TREE_CHAIN (flt_node);
+ }
+ while (tp_node);
+ if (! have_label)
+ {
+ remove_edge (find_edge (src, label_to_block (lab)));
+ redirected = true;
+ }
+ }
+
+ /* Clean up the edge flags. */
+ FOR_EACH_EDGE (e, ei, src->succs)
+ {
+ if (e->flags & EDGE_FALLTHRU)
+ {
+ /* If there was no catch-all, use the fallthru edge. */
+ if (default_label == NULL)
+ default_label = gimple_block_label (e->dest);
+ e->flags &= ~EDGE_FALLTHRU;
+ }
+ }
+ gcc_assert (default_label != NULL);
+
+ /* Don't generate a switch if there's only a default case.
+ This is common in the form of try { A; } catch (...) { B; }. */
+ if (labels == NULL)
+ {
+ e = single_succ_edge (src);
+ e->flags |= EDGE_FALLTHRU;
+ }
+ else
+ {
+ fn = implicit_built_in_decls[BUILT_IN_EH_FILTER];
+ x = gimple_build_call (fn, 1, build_int_cst (NULL, region_nr));
+ filter = create_tmp_var (TREE_TYPE (TREE_TYPE (fn)), NULL);
+ filter = make_ssa_name (filter, x);
+ gimple_call_set_lhs (x, filter);
+ gsi_insert_before (&gsi, x, GSI_SAME_STMT);
+
+ /* Turn the default label into a default case. */
+ default_label = build3 (CASE_LABEL_EXPR, void_type_node,
+ NULL, NULL, default_label);
+ sort_case_labels (labels);
+
+ x = gimple_build_switch_vec (filter, default_label, labels);
+ gsi_insert_before (&gsi, x, GSI_SAME_STMT);
+
+ VEC_free (tree, heap, labels);
+ }
+ pointer_set_destroy (seen_values);
+ }
+ break;
+
+ case ERT_ALLOWED_EXCEPTIONS:
+ {
+ edge b_e = BRANCH_EDGE (src);
+ edge f_e = FALLTHRU_EDGE (src);
+
+ fn = implicit_built_in_decls[BUILT_IN_EH_FILTER];
+ x = gimple_build_call (fn, 1, build_int_cst (NULL, region_nr));
+ filter = create_tmp_var (TREE_TYPE (TREE_TYPE (fn)), NULL);
+ filter = make_ssa_name (filter, x);
+ gimple_call_set_lhs (x, filter);
+ gsi_insert_before (&gsi, x, GSI_SAME_STMT);
+
+ r->u.allowed.label = NULL;
+ x = gimple_build_cond (EQ_EXPR, filter,
+ build_int_cst (TREE_TYPE (filter),
+ r->u.allowed.filter),
+ NULL_TREE, NULL_TREE);
+ gsi_insert_before (&gsi, x, GSI_SAME_STMT);
+
+ b_e->flags = b_e->flags | EDGE_TRUE_VALUE;
+ f_e->flags = (f_e->flags & ~EDGE_FALLTHRU) | EDGE_FALSE_VALUE;
+ }
+ break;
+
+ default:
+ gcc_unreachable ();
+ }
+
+ /* Replace the EH_DISPATCH with the SWITCH or COND generated above. */
+ gsi_remove (&gsi, true);
+ return redirected;
+}
+
+static unsigned
+execute_lower_eh_dispatch (void)
+{
+ basic_block bb;
+ bool any_rewritten = false;
+ bool redirected = false;
+
+ assign_filter_values ();
+
+ FOR_EACH_BB (bb)
+ {
+ gimple last = last_stmt (bb);
+ if (last && gimple_code (last) == GIMPLE_EH_DISPATCH)
+ {
+ redirected |= lower_eh_dispatch (bb, last);
+ any_rewritten = true;
+ }
+ }
+
+ if (redirected)
+ delete_unreachable_blocks ();
+ return any_rewritten ? TODO_update_ssa_only_virtuals : 0;
+}
+
+static bool
+gate_lower_eh_dispatch (void)
+{
+ return cfun->eh->region_tree != NULL;
+}
+
+struct gimple_opt_pass pass_lower_eh_dispatch =
+{
+ {
+ GIMPLE_PASS,
+ "ehdisp", /* name */
+ gate_lower_eh_dispatch, /* gate */
+ execute_lower_eh_dispatch, /* execute */
+ NULL, /* sub */
+ NULL, /* next */
+ 0, /* static_pass_number */
+ TV_TREE_EH, /* tv_id */
+ PROP_gimple_lcf, /* properties_required */
+ 0, /* properties_provided */
+ 0, /* properties_destroyed */
+ 0, /* todo_flags_start */
+ TODO_dump_func | TODO_verify_flow /* todo_flags_finish */
+ }
+};
+\f
+/* Walk statements, see what regions are really referenced and remove
+ those that are unused. */
+
+static void
+remove_unreachable_handlers (void)
+{
+ sbitmap r_reachable, lp_reachable;
+ eh_region region;
+ eh_landing_pad lp;
+ basic_block bb;
+ int lp_nr, r_nr;
+
+ r_reachable = sbitmap_alloc (VEC_length (eh_region, cfun->eh->region_array));
+ lp_reachable
+ = sbitmap_alloc (VEC_length (eh_landing_pad, cfun->eh->lp_array));
+ sbitmap_zero (r_reachable);
+ sbitmap_zero (lp_reachable);
+
+ FOR_EACH_BB (bb)
+ {
+ gimple_stmt_iterator gsi = gsi_start_bb (bb);
+
+ for (gsi = gsi_start_bb (bb); !gsi_end_p (gsi); gsi_next (&gsi))
+ {
+ gimple stmt = gsi_stmt (gsi);
+ lp_nr = lookup_stmt_eh_lp (stmt);
+
+ /* Negative LP numbers are MUST_NOT_THROW regions which
+ are not considered BB enders. */
+ if (lp_nr < 0)
+ SET_BIT (r_reachable, -lp_nr);
+
+ /* Positive LP numbers are real landing pads, are are BB enders. */
+ else if (lp_nr > 0)
+ {
+ gcc_assert (gsi_one_before_end_p (gsi));
+ region = get_eh_region_from_lp_number (lp_nr);
+ SET_BIT (r_reachable, region->index);
+ SET_BIT (lp_reachable, lp_nr);
+ }
+ }
+ }
+
+ if (dump_file)
+ {
+ fprintf (dump_file, "Before removal of unreachable regions:\n");
+ dump_eh_tree (dump_file, cfun);
+ fprintf (dump_file, "Reachable regions: ");
+ dump_sbitmap_file (dump_file, r_reachable);
+ fprintf (dump_file, "Reachable landing pads: ");
+ dump_sbitmap_file (dump_file, lp_reachable);
+ }
+
+ for (r_nr = 1;
+ VEC_iterate (eh_region, cfun->eh->region_array, r_nr, region); ++r_nr)
+ if (region && !TEST_BIT (r_reachable, r_nr))
+ {
+ if (dump_file)
+ fprintf (dump_file, "Removing unreachable region %d\n", r_nr);
+ remove_eh_handler (region);
+ }
+
+ for (lp_nr = 1;
+ VEC_iterate (eh_landing_pad, cfun->eh->lp_array, lp_nr, lp); ++lp_nr)
+ if (lp && !TEST_BIT (lp_reachable, lp_nr))
+ {
+ if (dump_file)
+ fprintf (dump_file, "Removing unreachable landing pad %d\n", lp_nr);
+ remove_eh_landing_pad (lp);
+ }
+
+ if (dump_file)
+ {
+ fprintf (dump_file, "\n\nAfter removal of unreachable regions:\n");
+ dump_eh_tree (dump_file, cfun);
+ fprintf (dump_file, "\n\n");
+ }
+
+ sbitmap_free (r_reachable);
+ sbitmap_free (lp_reachable);
+
+#ifdef ENABLE_CHECKING
+ verify_eh_tree (cfun);
+#endif
+}
+
+/* Remove regions that do not have landing pads. This assumes
+ that remove_unreachable_handlers has already been run, and
+ that we've just manipulated the landing pads since then. */
+
+static void
+remove_unreachable_handlers_no_lp (void)
+{
+ eh_region r;
+ int i;
+
+ for (i = 1; VEC_iterate (eh_region, cfun->eh->region_array, i, r); ++i)
+ if (r && r->landing_pads == NULL && r->type != ERT_MUST_NOT_THROW)
+ {
+ if (dump_file)
+ fprintf (dump_file, "Removing unreachable region %d\n", i);
+ remove_eh_handler (r);
+ }
+}
+
+/* Undo critical edge splitting on an EH landing pad. Earlier, we
+ optimisticaly split all sorts of edges, including EH edges. The
+ optimization passes in between may not have needed them; if not,
+ we should undo the split.
+
+ Recognize this case by having one EH edge incoming to the BB and
+ one normal edge outgoing; BB should be empty apart from the
+ post_landing_pad label.
+
+ Note that this is slightly different from the empty handler case
+ handled by cleanup_empty_eh, in that the actual handler may yet
+ have actual code but the landing pad has been separated from the
+ handler. As such, cleanup_empty_eh relies on this transformation
+ having been done first. */
+
+static bool
+unsplit_eh (eh_landing_pad lp)
+{
+ basic_block bb = label_to_block (lp->post_landing_pad);
+ gimple_stmt_iterator gsi;
+ edge e_in, e_out;
+
+ /* Quickly check the edge counts on BB for singularity. */
+ if (EDGE_COUNT (bb->preds) != 1 || EDGE_COUNT (bb->succs) != 1)
+ return false;
+ e_in = EDGE_PRED (bb, 0);
+ e_out = EDGE_SUCC (bb, 0);
+
+ /* Input edge must be EH and output edge must be normal. */
+ if ((e_in->flags & EDGE_EH) == 0 || (e_out->flags & EDGE_EH) != 0)
+ return false;
+
+ /* The block must be empty except for the labels and debug insns. */
+ gsi = gsi_after_labels (bb);
+ if (!gsi_end_p (gsi) && is_gimple_debug (gsi_stmt (gsi)))
+ gsi_next_nondebug (&gsi);
+ if (!gsi_end_p (gsi))
+ return false;
+
+ /* The destination block must not already have a landing pad
+ for a different region. */
+ for (gsi = gsi_start_bb (e_out->dest); !gsi_end_p (gsi); gsi_next (&gsi))
+ {
+ gimple stmt = gsi_stmt (gsi);
+ tree lab;
+ int lp_nr;
+
+ if (gimple_code (stmt) != GIMPLE_LABEL)
+ break;
+ lab = gimple_label_label (stmt);
+ lp_nr = EH_LANDING_PAD_NR (lab);
+ if (lp_nr && get_eh_region_from_lp_number (lp_nr) != lp->region)
+ return false;
+ }
+
+ /* The new destination block must not already be a destination of
+ the source block, lest we merge fallthru and eh edges and get
+ all sorts of confused. */
+ if (find_edge (e_in->src, e_out->dest))
+ return false;
+
+ /* ??? We can get degenerate phis due to cfg cleanups. I would have
+ thought this should have been cleaned up by a phicprop pass, but
+ that doesn't appear to handle virtuals. Propagate by hand. */
+ if (!gimple_seq_empty_p (phi_nodes (bb)))
+ {
+ for (gsi = gsi_start_phis (bb); !gsi_end_p (gsi); )
+ {
+ gimple use_stmt, phi = gsi_stmt (gsi);
+ tree lhs = gimple_phi_result (phi);
+ tree rhs = gimple_phi_arg_def (phi, 0);
+ use_operand_p use_p;
+ imm_use_iterator iter;
+
+ FOR_EACH_IMM_USE_STMT (use_stmt, iter, lhs)
+ {
+ FOR_EACH_IMM_USE_ON_STMT (use_p, iter)
+ SET_USE (use_p, rhs);
+ }
+
+ if (SSA_NAME_OCCURS_IN_ABNORMAL_PHI (lhs))
+ SSA_NAME_OCCURS_IN_ABNORMAL_PHI (rhs) = 1;
+
+ remove_phi_node (&gsi, true);
+ }
+ }
+
+ if (dump_file && (dump_flags & TDF_DETAILS))
+ fprintf (dump_file, "Unsplit EH landing pad %d to block %i.\n",
+ lp->index, e_out->dest->index);
+
+ /* Redirect the edge. Since redirect_eh_edge_1 expects to be moving
+ a successor edge, humor it. But do the real CFG change with the
+ predecessor of E_OUT in order to preserve the ordering of arguments
+ to the PHI nodes in E_OUT->DEST. */
+ redirect_eh_edge_1 (e_in, e_out->dest, false);
+ redirect_edge_pred (e_out, e_in->src);
+ e_out->flags = e_in->flags;
+ e_out->probability = e_in->probability;
+ e_out->count = e_in->count;
+ remove_edge (e_in);
+
+ return true;
+}
+
+/* Examine each landing pad block and see if it matches unsplit_eh. */
+
+static bool
+unsplit_all_eh (void)
+{
+ bool changed = false;
+ eh_landing_pad lp;
+ int i;
+
+ for (i = 1; VEC_iterate (eh_landing_pad, cfun->eh->lp_array, i, lp); ++i)
+ if (lp)
+ changed |= unsplit_eh (lp);
+
+ return changed;
+}
+
+/* A subroutine of cleanup_empty_eh. Redirect all EH edges incoming
+ to OLD_BB to NEW_BB; return true on success, false on failure.
+
+ OLD_BB_OUT is the edge into NEW_BB from OLD_BB, so if we miss any
+ PHI variables from OLD_BB we can pick them up from OLD_BB_OUT.
+ Virtual PHIs may be deleted and marked for renaming. */
+
+static bool
+cleanup_empty_eh_merge_phis (basic_block new_bb, basic_block old_bb,
+ edge old_bb_out, bool change_region)
+{
+ gimple_stmt_iterator ngsi, ogsi;
+ edge_iterator ei;
+ edge e;
+ bitmap rename_virts;
+ bitmap ophi_handled;
+
+ FOR_EACH_EDGE (e, ei, old_bb->preds)
+ redirect_edge_var_map_clear (e);
+
+ ophi_handled = BITMAP_ALLOC (NULL);
+ rename_virts = BITMAP_ALLOC (NULL);
+
+ /* First, iterate through the PHIs on NEW_BB and set up the edge_var_map
+ for the edges we're going to move. */
+ for (ngsi = gsi_start_phis (new_bb); !gsi_end_p (ngsi); gsi_next (&ngsi))
+ {
+ gimple ophi, nphi = gsi_stmt (ngsi);
+ tree nresult, nop;
+
+ nresult = gimple_phi_result (nphi);
+ nop = gimple_phi_arg_def (nphi, old_bb_out->dest_idx);
+
+ /* Find the corresponding PHI in OLD_BB so we can forward-propagate
+ the source ssa_name. */
+ ophi = NULL;
+ for (ogsi = gsi_start_phis (old_bb); !gsi_end_p (ogsi); gsi_next (&ogsi))
+ {
+ ophi = gsi_stmt (ogsi);
+ if (gimple_phi_result (ophi) == nop)
+ break;
+ ophi = NULL;
+ }
+
+ /* If we did find the corresponding PHI, copy those inputs. */
+ if (ophi)
+ {
+ bitmap_set_bit (ophi_handled, SSA_NAME_VERSION (nop));
+ FOR_EACH_EDGE (e, ei, old_bb->preds)
+ {
+ location_t oloc;
+ tree oop;
+
+ if ((e->flags & EDGE_EH) == 0)
+ continue;
+ oop = gimple_phi_arg_def (ophi, e->dest_idx);
+ oloc = gimple_phi_arg_location (ophi, e->dest_idx);
+ redirect_edge_var_map_add (e, nresult, oop, oloc);
+ }
+ }
+ /* If we didn't find the PHI, but it's a VOP, remember to rename
+ it later, assuming all other tests succeed. */
+ else if (!is_gimple_reg (nresult))
+ bitmap_set_bit (rename_virts, SSA_NAME_VERSION (nresult));
+ /* If we didn't find the PHI, and it's a real variable, we know
+ from the fact that OLD_BB is tree_empty_eh_handler_p that the
+ variable is unchanged from input to the block and we can simply
+ re-use the input to NEW_BB from the OLD_BB_OUT edge. */
+ else
+ {
+ location_t nloc
+ = gimple_phi_arg_location (nphi, old_bb_out->dest_idx);
+ FOR_EACH_EDGE (e, ei, old_bb->preds)
+ redirect_edge_var_map_add (e, nresult, nop, nloc);
+ }
+ }
+
+ /* Second, verify that all PHIs from OLD_BB have been handled. If not,
+ we don't know what values from the other edges into NEW_BB to use. */
+ for (ogsi = gsi_start_phis (old_bb); !gsi_end_p (ogsi); gsi_next (&ogsi))
+ {
+ gimple ophi = gsi_stmt (ogsi);
+ tree oresult = gimple_phi_result (ophi);
+ if (!bitmap_bit_p (ophi_handled, SSA_NAME_VERSION (oresult)))
+ goto fail;
+ }
+
+ /* At this point we know that the merge will succeed. Remove the PHI
+ nodes for the virtuals that we want to rename. */
+ if (!bitmap_empty_p (rename_virts))
+ {
+ for (ngsi = gsi_start_phis (new_bb); !gsi_end_p (ngsi); )
+ {
+ gimple nphi = gsi_stmt (ngsi);
+ tree nresult = gimple_phi_result (nphi);
+ if (bitmap_bit_p (rename_virts, SSA_NAME_VERSION (nresult)))
+ {
+ mark_virtual_phi_result_for_renaming (nphi);
+ remove_phi_node (&ngsi, true);
+ }
+ else
+ gsi_next (&ngsi);
+ }
+ }
+
+ /* Finally, move the edges and update the PHIs. */
+ for (ei = ei_start (old_bb->preds); (e = ei_safe_edge (ei)); )
+ if (e->flags & EDGE_EH)
+ {
+ redirect_eh_edge_1 (e, new_bb, change_region);
+ redirect_edge_succ (e, new_bb);
+ flush_pending_stmts (e);
+ }
+ else
+ ei_next (&ei);
+
+ BITMAP_FREE (ophi_handled);
+ BITMAP_FREE (rename_virts);
+ return true;
+
+ fail:
+ FOR_EACH_EDGE (e, ei, old_bb->preds)
+ redirect_edge_var_map_clear (e);
+ BITMAP_FREE (ophi_handled);
+ BITMAP_FREE (rename_virts);
+ return false;
+}
+
+/* A subroutine of cleanup_empty_eh. Move a landing pad LP from its
+ old region to NEW_REGION at BB. */
+
+static void
+cleanup_empty_eh_move_lp (basic_block bb, edge e_out,
+ eh_landing_pad lp, eh_region new_region)
+{
+ gimple_stmt_iterator gsi;
+ eh_landing_pad *pp;
+
+ for (pp = &lp->region->landing_pads; *pp != lp; pp = &(*pp)->next_lp)
+ continue;
+ *pp = lp->next_lp;
+
+ lp->region = new_region;
+ lp->next_lp = new_region->landing_pads;
+ new_region->landing_pads = lp;
+
+ /* Delete the RESX that was matched within the empty handler block. */
+ gsi = gsi_last_bb (bb);
+ mark_virtual_ops_for_renaming (gsi_stmt (gsi));
+ gsi_remove (&gsi, true);
+
+ /* Clean up E_OUT for the fallthru. */
+ e_out->flags = (e_out->flags & ~EDGE_EH) | EDGE_FALLTHRU;
+ e_out->probability = REG_BR_PROB_BASE;
+}
+
+/* A subroutine of cleanup_empty_eh. Handle more complex cases of
+ unsplitting than unsplit_eh was prepared to handle, e.g. when
+ multiple incoming edges and phis are involved. */
+
+static bool
+cleanup_empty_eh_unsplit (basic_block bb, edge e_out, eh_landing_pad lp)
+{
+ gimple_stmt_iterator gsi;
+ tree lab;
+
+ /* We really ought not have totally lost everything following
+ a landing pad label. Given that BB is empty, there had better
+ be a successor. */
+ gcc_assert (e_out != NULL);
+
+ /* The destination block must not already have a landing pad
+ for a different region. */
+ lab = NULL;
+ for (gsi = gsi_start_bb (e_out->dest); !gsi_end_p (gsi); gsi_next (&gsi))
+ {
+ gimple stmt = gsi_stmt (gsi);
+ int lp_nr;
+
+ if (gimple_code (stmt) != GIMPLE_LABEL)
+ break;
+ lab = gimple_label_label (stmt);
+ lp_nr = EH_LANDING_PAD_NR (lab);
+ if (lp_nr && get_eh_region_from_lp_number (lp_nr) != lp->region)
+ return false;
+ }
+
+ /* Attempt to move the PHIs into the successor block. */
+ if (cleanup_empty_eh_merge_phis (e_out->dest, bb, e_out, false))
+ {
+ if (dump_file && (dump_flags & TDF_DETAILS))
+ fprintf (dump_file,
+ "Unsplit EH landing pad %d to block %i "
+ "(via cleanup_empty_eh).\n",
+ lp->index, e_out->dest->index);
+ return true;
+ }
+
+ return false;
+}
+
+/* Examine the block associated with LP to determine if it's an empty
+ handler for its EH region. If so, attempt to redirect EH edges to
+ an outer region. Return true the CFG was updated in any way. This
+ is similar to jump forwarding, just across EH edges. */
+
+static bool
+cleanup_empty_eh (eh_landing_pad lp)
+{
+ basic_block bb = label_to_block (lp->post_landing_pad);
+ gimple_stmt_iterator gsi;
+ gimple resx;
+ eh_region new_region;
+ edge_iterator ei;
+ edge e, e_out;
+ bool has_non_eh_pred;
+ int new_lp_nr;
+
+ /* There can be zero or one edges out of BB. This is the quickest test. */
+ switch (EDGE_COUNT (bb->succs))
+ {
+ case 0:
+ e_out = NULL;
+ break;
+ case 1:
+ e_out = EDGE_SUCC (bb, 0);
+ break;
+ default:
+ return false;
+ }
+ gsi = gsi_after_labels (bb);
+
+ /* Make sure to skip debug statements. */
+ if (!gsi_end_p (gsi) && is_gimple_debug (gsi_stmt (gsi)))
+ gsi_next_nondebug (&gsi);
+
+ /* If the block is totally empty, look for more unsplitting cases. */
+ if (gsi_end_p (gsi))
+ return cleanup_empty_eh_unsplit (bb, e_out, lp);
+
+ /* The block should consist only of a single RESX statement. */
+ resx = gsi_stmt (gsi);
+ if (!is_gimple_resx (resx))
+ return false;
+ gcc_assert (gsi_one_before_end_p (gsi));
+
+ /* Determine if there are non-EH edges, or resx edges into the handler. */
+ has_non_eh_pred = false;
+ FOR_EACH_EDGE (e, ei, bb->preds)
+ if (!(e->flags & EDGE_EH))
+ has_non_eh_pred = true;
+
+ /* Find the handler that's outer of the empty handler by looking at
+ where the RESX instruction was vectored. */
+ new_lp_nr = lookup_stmt_eh_lp (resx);
+ new_region = get_eh_region_from_lp_number (new_lp_nr);
+
+ /* If there's no destination region within the current function,
+ redirection is trivial via removing the throwing statements from
+ the EH region, removing the EH edges, and allowing the block
+ to go unreachable. */
+ if (new_region == NULL)
+ {
+ gcc_assert (e_out == NULL);
+ for (ei = ei_start (bb->preds); (e = ei_safe_edge (ei)); )
+ if (e->flags & EDGE_EH)
+ {
+ gimple stmt = last_stmt (e->src);
+ remove_stmt_from_eh_lp (stmt);
+ remove_edge (e);
+ }
+ else
+ ei_next (&ei);
+ goto succeed;
+ }
+
+ /* If the destination region is a MUST_NOT_THROW, allow the runtime
+ to handle the abort and allow the blocks to go unreachable. */
+ if (new_region->type == ERT_MUST_NOT_THROW)
+ {
+ for (ei = ei_start (bb->preds); (e = ei_safe_edge (ei)); )
+ if (e->flags & EDGE_EH)
+ {
+ gimple stmt = last_stmt (e->src);
+ remove_stmt_from_eh_lp (stmt);
+ add_stmt_to_eh_lp (stmt, new_lp_nr);
+ remove_edge (e);
+ }
+ else
+ ei_next (&ei);
+ goto succeed;
+ }
+
+ /* Try to redirect the EH edges and merge the PHIs into the destination
+ landing pad block. If the merge succeeds, we'll already have redirected
+ all the EH edges. The handler itself will go unreachable if there were
+ no normal edges. */
+ if (cleanup_empty_eh_merge_phis (e_out->dest, bb, e_out, true))
+ goto succeed;
+
+ /* Finally, if all input edges are EH edges, then we can (potentially)
+ reduce the number of transfers from the runtime by moving the landing
+ pad from the original region to the new region. This is a win when
+ we remove the last CLEANUP region along a particular exception
+ propagation path. Since nothing changes except for the region with
+ which the landing pad is associated, the PHI nodes do not need to be
+ adjusted at all. */
+ if (!has_non_eh_pred)
+ {
+ cleanup_empty_eh_move_lp (bb, e_out, lp, new_region);
+ if (dump_file && (dump_flags & TDF_DETAILS))
+ fprintf (dump_file, "Empty EH handler %i moved to EH region %i.\n",
+ lp->index, new_region->index);
+
+ /* ??? The CFG didn't change, but we may have rendered the
+ old EH region unreachable. Trigger a cleanup there. */
+ return true;
+ }
+
+ return false;
+
+ succeed:
+ if (dump_file && (dump_flags & TDF_DETAILS))
+ fprintf (dump_file, "Empty EH handler %i removed.\n", lp->index);
+ remove_eh_landing_pad (lp);
+ return true;
+}
+
+/* Do a post-order traversal of the EH region tree. Examine each
+ post_landing_pad block and see if we can eliminate it as empty. */
+
+static bool
+cleanup_all_empty_eh (void)
+{
+ bool changed = false;
+ eh_landing_pad lp;
+ int i;
+
+ for (i = 1; VEC_iterate (eh_landing_pad, cfun->eh->lp_array, i, lp); ++i)
+ if (lp)
+ changed |= cleanup_empty_eh (lp);
+
+ return changed;
+}
+
+/* Perform cleanups and lowering of exception handling
+ 1) cleanups regions with handlers doing nothing are optimized out
+ 2) MUST_NOT_THROW regions that became dead because of 1) are optimized out
+ 3) Info about regions that are containing instructions, and regions
+ reachable via local EH edges is collected
+ 4) Eh tree is pruned for regions no longer neccesary.
+
+ TODO: Push MUST_NOT_THROW regions to the root of the EH tree.
+ Unify those that have the same failure decl and locus.
+*/
+
+static unsigned int
+execute_cleanup_eh (void)
+{
+ /* Do this first: unsplit_all_eh and cleanup_all_empty_eh can die
+ looking up unreachable landing pads. */
+ remove_unreachable_handlers ();
+
+ /* Watch out for the region tree vanishing due to all unreachable. */
+ if (cfun->eh->region_tree && optimize)
+ {
+ bool changed = false;
+
+ changed |= unsplit_all_eh ();
+ changed |= cleanup_all_empty_eh ();
+
+ if (changed)
+ {
+ free_dominance_info (CDI_DOMINATORS);
+ free_dominance_info (CDI_POST_DOMINATORS);
+
+ /* We delayed all basic block deletion, as we may have performed
+ cleanups on EH edges while non-EH edges were still present. */
+ delete_unreachable_blocks ();
+
+ /* We manipulated the landing pads. Remove any region that no
+ longer has a landing pad. */
+ remove_unreachable_handlers_no_lp ();
+
+ return TODO_cleanup_cfg | TODO_update_ssa_only_virtuals;
+ }
+ }
+
+ return 0;
+}
+
+static bool
+gate_cleanup_eh (void)
+{
+ return cfun->eh != NULL && cfun->eh->region_tree != NULL;
+}
+
+struct gimple_opt_pass pass_cleanup_eh = {
+ {
+ GIMPLE_PASS,
+ "ehcleanup", /* name */
+ gate_cleanup_eh, /* gate */
+ execute_cleanup_eh, /* execute */
+ NULL, /* sub */
+ NULL, /* next */
+ 0, /* static_pass_number */
+ TV_TREE_EH, /* tv_id */
+ PROP_gimple_lcf, /* properties_required */
+ 0, /* properties_provided */
+ 0, /* properties_destroyed */
+ 0, /* todo_flags_start */
+ TODO_dump_func /* todo_flags_finish */
+ }
+};
+\f
+/* Verify that BB containing STMT as the last statement, has precisely the
+ edge that make_eh_edges would create. */
+
+bool
+verify_eh_edges (gimple stmt)
+{
+ basic_block bb = gimple_bb (stmt);
+ eh_landing_pad lp = NULL;
+ int lp_nr;
+ edge_iterator ei;
+ edge e, eh_edge;
+
+ lp_nr = lookup_stmt_eh_lp (stmt);
+ if (lp_nr > 0)
+ lp = get_eh_landing_pad_from_number (lp_nr);
+
+ eh_edge = NULL;
+ FOR_EACH_EDGE (e, ei, bb->succs)
+ {
+ if (e->flags & EDGE_EH)
+ {
+ if (eh_edge)
+ {
+ error ("BB %i has multiple EH edges", bb->index);
+ return true;
+ }
+ else
+ eh_edge = e;
+ }
+ }
+
+ if (lp == NULL)
+ {
+ if (eh_edge)
+ {
+ error ("BB %i can not throw but has an EH edge", bb->index);
+ return true;
+ }
+ return false;
+ }
+
+ if (!stmt_could_throw_p (stmt))
+ {
+ error ("BB %i last statement has incorrectly set lp", bb->index);
+ return true;
+ }
+
+ if (eh_edge == NULL)
+ {
+ error ("BB %i is missing an EH edge", bb->index);
+ return true;
+ }
+
+ if (eh_edge->dest != label_to_block (lp->post_landing_pad))
+ {
+ error ("Incorrect EH edge %i->%i", bb->index, eh_edge->dest->index);
+ return true;
+ }
+
+ return false;
+}
+
+/* Similarly, but handle GIMPLE_EH_DISPATCH specifically. */
+
+bool
+verify_eh_dispatch_edge (gimple stmt)
+{
+ eh_region r;
+ eh_catch c;
+ basic_block src, dst;
+ bool want_fallthru = true;
+ edge_iterator ei;
+ edge e, fall_edge;
+
+ r = get_eh_region_from_number (gimple_eh_dispatch_region (stmt));
+ src = gimple_bb (stmt);
+
+ FOR_EACH_EDGE (e, ei, src->succs)
+ gcc_assert (e->aux == NULL);
+
+ switch (r->type)
+ {
+ case ERT_TRY:
+ for (c = r->u.eh_try.first_catch; c ; c = c->next_catch)
+ {
+ dst = label_to_block (c->label);
+ e = find_edge (src, dst);
+ if (e == NULL)
+ {
+ error ("BB %i is missing an edge", src->index);
+ return true;
+ }
+ e->aux = (void *)e;
+
+ /* A catch-all handler doesn't have a fallthru. */
+ if (c->type_list == NULL)
+ {
+ want_fallthru = false;
+ break;
+ }
+ }
+ break;
+
+ case ERT_ALLOWED_EXCEPTIONS:
+ dst = label_to_block (r->u.allowed.label);
+ e = find_edge (src, dst);
+ if (e == NULL)
+ {
+ error ("BB %i is missing an edge", src->index);
+ return true;
+ }
+ e->aux = (void *)e;
+ break;
+
+ default:
+ gcc_unreachable ();
+ }
+
+ fall_edge = NULL;
+ FOR_EACH_EDGE (e, ei, src->succs)
+ {
+ if (e->flags & EDGE_FALLTHRU)
+ {
+ if (fall_edge != NULL)
+ {
+ error ("BB %i too many fallthru edges", src->index);
+ return true;
+ }
+ fall_edge = e;
+ }
+ else if (e->aux)
+ e->aux = NULL;
+ else
+ {
+ error ("BB %i has incorrect edge", src->index);
+ return true;
+ }
+ }
+ if ((fall_edge != NULL) ^ want_fallthru)
+ {
+ error ("BB %i has incorrect fallthru edge", src->index);
+ return true;
+ }
+
+ return false;
+}