/* Control flow functions for trees.
- Copyright (C) 2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2009
- Free Software Foundation, Inc.
+ Copyright (C) 2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2009,
+ 2010 Free Software Foundation, Inc.
Contributed by Diego Novillo <dnovillo@redhat.com>
This file is part of GCC.
#include "coretypes.h"
#include "tm.h"
#include "tree.h"
-#include "rtl.h"
#include "tm_p.h"
-#include "hard-reg-set.h"
#include "basic-block.h"
#include "output.h"
#include "flags.h"
#include "function.h"
-#include "expr.h"
#include "ggc.h"
#include "langhooks.h"
-#include "diagnostic.h"
+#include "tree-pretty-print.h"
+#include "gimple-pretty-print.h"
#include "tree-flow.h"
#include "timevar.h"
#include "tree-dump.h"
#include "tree-pass.h"
-#include "toplev.h"
+#include "diagnostic-core.h"
#include "except.h"
#include "cfgloop.h"
#include "cfglayout.h"
static struct pointer_map_t *edge_to_cases;
+/* If we record edge_to_cases, this bitmap will hold indexes
+ of basic blocks that end in a GIMPLE_SWITCH which we touched
+ due to edge manipulations. */
+
+static bitmap touched_switch_bbs;
+
/* CFG statistics. */
struct cfg_stats_d
{
static edge find_taken_edge_cond_expr (basic_block, tree);
static edge find_taken_edge_switch_expr (basic_block, tree);
static tree find_case_label_for_value (gimple, tree);
+static void group_case_labels_stmt (gimple);
void
init_empty_tree_cfg_for_function (struct function *fn)
dump_end (TDI_vcg, vcg_file);
}
}
-
-#ifdef ENABLE_CHECKING
- verify_stmts ();
-#endif
}
static unsigned int
gcc_assert (!e);
/* Create and initialize a new basic block. Since alloc_block uses
- ggc_alloc_cleared to allocate a basic block, we do not have to
- clear the newly allocated basic block here. */
+ GC allocation that clears memory to allocate a basic block, we do
+ not have to clear the newly allocated basic block here. */
bb = alloc_block ();
bb->index = last_basic_block;
bb->flags = BB_NEW;
- bb->il.gimple = GGC_CNEW (struct gimple_bb_info);
+ bb->il.gimple = ggc_alloc_cleared_gimple_bb_info ();
set_bb_seq (bb, h ? (gimple_seq) h : gimple_seq_alloc ());
/* Add the new block to the linked list of blocks. */
create abnormal edges to them. */
make_eh_edges (last);
+ /* BUILTIN_RETURN is really a return statement. */
+ if (gimple_call_builtin_p (last, BUILT_IN_RETURN))
+ make_edge (bb, EXIT_BLOCK_PTR, 0), fallthru = false;
/* Some calls are known not to return. */
- fallthru = !(gimple_call_flags (last) & ECF_NORETURN);
+ else
+ fallthru = !(gimple_call_flags (last) & ECF_NORETURN);
break;
case GIMPLE_ASSIGN:
{
gcc_assert (edge_to_cases == NULL);
edge_to_cases = pointer_map_create ();
+ touched_switch_bbs = BITMAP_ALLOC (NULL);
}
/* Return nonzero if we are recording information for case labels. */
void
end_recording_case_labels (void)
{
+ bitmap_iterator bi;
+ unsigned i;
pointer_map_traverse (edge_to_cases, edge_to_cases_cleanup, NULL);
pointer_map_destroy (edge_to_cases);
edge_to_cases = NULL;
+ EXECUTE_IF_SET_IN_BITMAP (touched_switch_bbs, 0, i, bi)
+ {
+ basic_block bb = BASIC_BLOCK (i);
+ if (bb)
+ {
+ gimple stmt = last_stmt (bb);
+ if (stmt && gimple_code (stmt) == GIMPLE_SWITCH)
+ group_case_labels_stmt (stmt);
+ }
+ }
+ BITMAP_FREE (touched_switch_bbs);
}
/* If we are inside a {start,end}_recording_cases block, then return
/* We would die hard when faced by an undefined label. Emit a label to
the very first basic block. This will hopefully make even the dataflow
and undefined variable warnings quite right. */
- if ((errorcount || sorrycount) && uid < 0)
+ if (seen_error () && uid < 0)
{
gimple_stmt_iterator gsi = gsi_start_bb (BASIC_BLOCK (NUM_FIXED_BLOCKS));
gimple stmt;
free (label_for_bb);
}
-/* Look for blocks ending in a multiway branch (a SWITCH_EXPR in GIMPLE),
- and scan the sorted vector of cases. Combine the ones jumping to the
- same label.
+/* Scan the sorted vector of cases in STMT (a GIMPLE_SWITCH) and combine
+ the ones jumping to the same label.
Eg. three separate entries 1: 2: 3: become one entry 1..3: */
-void
-group_case_labels (void)
+static void
+group_case_labels_stmt (gimple stmt)
{
- basic_block bb;
+ int old_size = gimple_switch_num_labels (stmt);
+ int i, j, new_size = old_size;
+ tree default_case = NULL_TREE;
+ tree default_label = NULL_TREE;
+ bool has_default;
- FOR_EACH_BB (bb)
+ /* The default label is always the first case in a switch
+ statement after gimplification if it was not optimized
+ away */
+ if (!CASE_LOW (gimple_switch_default_label (stmt))
+ && !CASE_HIGH (gimple_switch_default_label (stmt)))
{
- gimple stmt = last_stmt (bb);
- if (stmt && gimple_code (stmt) == GIMPLE_SWITCH)
+ default_case = gimple_switch_default_label (stmt);
+ default_label = CASE_LABEL (default_case);
+ has_default = true;
+ }
+ else
+ has_default = false;
+
+ /* Look for possible opportunities to merge cases. */
+ if (has_default)
+ i = 1;
+ else
+ i = 0;
+ while (i < old_size)
+ {
+ tree base_case, base_label, base_high;
+ base_case = gimple_switch_label (stmt, i);
+
+ gcc_assert (base_case);
+ base_label = CASE_LABEL (base_case);
+
+ /* Discard cases that have the same destination as the
+ default case. */
+ if (base_label == default_label)
+ {
+ gimple_switch_set_label (stmt, i, NULL_TREE);
+ i++;
+ new_size--;
+ continue;
+ }
+
+ base_high = CASE_HIGH (base_case)
+ ? CASE_HIGH (base_case)
+ : CASE_LOW (base_case);
+ i++;
+
+ /* Try to merge case labels. Break out when we reach the end
+ of the label vector or when we cannot merge the next case
+ label with the current one. */
+ while (i < old_size)
{
- int old_size = gimple_switch_num_labels (stmt);
- int i, j, new_size = old_size;
- tree default_case = NULL_TREE;
- tree default_label = NULL_TREE;
- bool has_default;
-
- /* The default label is always the first case in a switch
- statement after gimplification if it was not optimized
- away */
- if (!CASE_LOW (gimple_switch_default_label (stmt))
- && !CASE_HIGH (gimple_switch_default_label (stmt)))
+ tree merge_case = gimple_switch_label (stmt, i);
+ tree merge_label = CASE_LABEL (merge_case);
+ tree t = int_const_binop (PLUS_EXPR, base_high,
+ integer_one_node, 1);
+
+ /* Merge the cases if they jump to the same place,
+ and their ranges are consecutive. */
+ if (merge_label == base_label
+ && tree_int_cst_equal (CASE_LOW (merge_case), t))
{
- default_case = gimple_switch_default_label (stmt);
- default_label = CASE_LABEL (default_case);
- has_default = true;
+ base_high = CASE_HIGH (merge_case) ?
+ CASE_HIGH (merge_case) : CASE_LOW (merge_case);
+ CASE_HIGH (base_case) = base_high;
+ gimple_switch_set_label (stmt, i, NULL_TREE);
+ new_size--;
+ i++;
}
else
- has_default = false;
-
- /* Look for possible opportunities to merge cases. */
- if (has_default)
- i = 1;
- else
- i = 0;
- while (i < old_size)
- {
- tree base_case, base_label, base_high;
- base_case = gimple_switch_label (stmt, i);
-
- gcc_assert (base_case);
- base_label = CASE_LABEL (base_case);
+ break;
+ }
+ }
- /* Discard cases that have the same destination as the
- default case. */
- if (base_label == default_label)
- {
- gimple_switch_set_label (stmt, i, NULL_TREE);
- i++;
- new_size--;
- continue;
- }
+ /* Compress the case labels in the label vector, and adjust the
+ length of the vector. */
+ for (i = 0, j = 0; i < new_size; i++)
+ {
+ while (! gimple_switch_label (stmt, j))
+ j++;
+ gimple_switch_set_label (stmt, i,
+ gimple_switch_label (stmt, j++));
+ }
- base_high = CASE_HIGH (base_case)
- ? CASE_HIGH (base_case)
- : CASE_LOW (base_case);
- i++;
+ gcc_assert (new_size <= old_size);
+ gimple_switch_set_num_labels (stmt, new_size);
+}
- /* Try to merge case labels. Break out when we reach the end
- of the label vector or when we cannot merge the next case
- label with the current one. */
- while (i < old_size)
- {
- tree merge_case = gimple_switch_label (stmt, i);
- tree merge_label = CASE_LABEL (merge_case);
- tree t = int_const_binop (PLUS_EXPR, base_high,
- integer_one_node, 1);
-
- /* Merge the cases if they jump to the same place,
- and their ranges are consecutive. */
- if (merge_label == base_label
- && tree_int_cst_equal (CASE_LOW (merge_case), t))
- {
- base_high = CASE_HIGH (merge_case) ?
- CASE_HIGH (merge_case) : CASE_LOW (merge_case);
- CASE_HIGH (base_case) = base_high;
- gimple_switch_set_label (stmt, i, NULL_TREE);
- new_size--;
- i++;
- }
- else
- break;
- }
- }
+/* Look for blocks ending in a multiway branch (a GIMPLE_SWITCH),
+ and scan the sorted vector of cases. Combine the ones jumping to the
+ same label. */
- /* Compress the case labels in the label vector, and adjust the
- length of the vector. */
- for (i = 0, j = 0; i < new_size; i++)
- {
- while (! gimple_switch_label (stmt, j))
- j++;
- gimple_switch_set_label (stmt, i,
- gimple_switch_label (stmt, j++));
- }
+void
+group_case_labels (void)
+{
+ basic_block bb;
- gcc_assert (new_size <= old_size);
- gimple_switch_set_num_labels (stmt, new_size);
- }
+ FOR_EACH_BB (bb)
+ {
+ gimple stmt = last_stmt (bb);
+ if (stmt && gimple_code (stmt) == GIMPLE_SWITCH)
+ group_case_labels_stmt (stmt);
}
}
return false;
/* It must be possible to eliminate all phi nodes in B. If ssa form
- is not up-to-date, we cannot eliminate any phis; however, if only
- some symbols as whole are marked for renaming, this is not a problem,
- as phi nodes for those symbols are irrelevant in updating anyway. */
+ is not up-to-date and a name-mapping is registered, we cannot eliminate
+ any phis. Symbols marked for renaming are never a problem though. */
phis = phi_nodes (b);
- if (!gimple_seq_empty_p (phis))
- {
- gimple_stmt_iterator i;
+ if (!gimple_seq_empty_p (phis)
+ && name_mappings_registered_p ())
+ return false;
- if (name_mappings_registered_p ())
+ /* When not optimizing, don't merge if we'd lose goto_locus. */
+ if (!optimize
+ && single_succ_edge (a)->goto_locus != UNKNOWN_LOCATION)
+ {
+ location_t goto_locus = single_succ_edge (a)->goto_locus;
+ gimple_stmt_iterator prev, next;
+ prev = gsi_last_nondebug_bb (a);
+ next = gsi_after_labels (b);
+ if (!gsi_end_p (next) && is_gimple_debug (gsi_stmt (next)))
+ gsi_next_nondebug (&next);
+ if ((gsi_end_p (prev)
+ || gimple_location (gsi_stmt (prev)) != goto_locus)
+ && (gsi_end_p (next)
+ || gimple_location (gsi_stmt (next)) != goto_locus))
return false;
-
- for (i = gsi_start (phis); !gsi_end_p (i); gsi_next (&i))
- {
- gimple phi = gsi_stmt (i);
-
- if (!is_gimple_reg (gimple_phi_result (phi))
- && !may_propagate_copy (gimple_phi_result (phi),
- gimple_phi_arg_def (phi, 0)))
- return false;
- }
}
return true;
size_t i;
fold_stmt_inplace (stmt);
- if (cfgcleanup_altered_bbs)
+ if (cfgcleanup_altered_bbs && !is_gimple_debug (stmt))
bitmap_set_bit (cfgcleanup_altered_bbs, gimple_bb (stmt)->index);
/* FIXME. This should go in update_stmt. */
FOR_EACH_IMM_USE_STMT (stmt, iter, def)
FOR_EACH_IMM_USE_ON_STMT (use_p, iter)
SET_USE (use_p, use);
+
+ if (SSA_NAME_OCCURS_IN_ABNORMAL_PHI (def))
+ SSA_NAME_OCCURS_IN_ABNORMAL_PHI (use) = 1;
}
else
replace_uses_by (def, use);
/* Dump CFG statistics on stderr. Keep extern so that it's always
linked in the final executable. */
-void
+DEBUG_FUNCTION void
debug_cfg_stats (void)
{
dump_cfg_stats (stderr);
/* A non-pure/const call alters flow control if the current
function has nonlocal labels. */
- if (!(flags & (ECF_CONST | ECF_PURE)) && cfun->has_nonlocal_label)
+ if (!(flags & (ECF_CONST | ECF_PURE | ECF_LEAF))
+ && cfun->has_nonlocal_label)
return true;
/* A call also alters control flow if it does not return. */
- if (gimple_call_flags (t) & ECF_NORETURN)
+ if (flags & ECF_NORETURN)
+ return true;
+
+ /* BUILT_IN_RETURN call is same as return statement. */
+ if (gimple_call_builtin_p (t, BUILT_IN_RETURN))
return true;
}
break;
if (computed_goto_p (t))
return true;
if (is_gimple_call (t))
- return gimple_has_side_effects (t) && cfun->has_nonlocal_label;
+ return (gimple_has_side_effects (t) && cfun->has_nonlocal_label
+ && !(gimple_call_flags (t) & ECF_LEAF));
return false;
}
return new_bb;
}
+
+/* Verify properties of the address expression T with base object BASE. */
+
+static tree
+verify_address (tree t, tree base)
+{
+ bool old_constant;
+ bool old_side_effects;
+ bool new_constant;
+ bool new_side_effects;
+
+ old_constant = TREE_CONSTANT (t);
+ old_side_effects = TREE_SIDE_EFFECTS (t);
+
+ recompute_tree_invariant_for_addr_expr (t);
+ new_side_effects = TREE_SIDE_EFFECTS (t);
+ new_constant = TREE_CONSTANT (t);
+
+ if (old_constant != new_constant)
+ {
+ error ("constant not recomputed when ADDR_EXPR changed");
+ return t;
+ }
+ if (old_side_effects != new_side_effects)
+ {
+ error ("side effects not recomputed when ADDR_EXPR changed");
+ return t;
+ }
+
+ if (!(TREE_CODE (base) == VAR_DECL
+ || TREE_CODE (base) == PARM_DECL
+ || TREE_CODE (base) == RESULT_DECL))
+ return NULL_TREE;
+
+ if (DECL_GIMPLE_REG_P (base))
+ {
+ error ("DECL_GIMPLE_REG_P set on a variable with address taken");
+ return base;
+ }
+
+ return NULL_TREE;
+}
+
/* Callback for walk_tree, check that all elements with address taken are
properly noticed as such. The DATA is an int* that is 1 if TP was seen
inside a PHI node. */
break;
case INDIRECT_REF:
+ error ("INDIRECT_REF in gimple IL");
+ return t;
+
+ case MEM_REF:
x = TREE_OPERAND (t, 0);
- if (!is_gimple_reg (x) && !is_gimple_min_invariant (x))
+ if (!POINTER_TYPE_P (TREE_TYPE (x))
+ || !is_gimple_mem_ref_addr (x))
{
- error ("Indirect reference's operand is not a register or a constant.");
+ error ("invalid first operand of MEM_REF");
return x;
}
+ if (TREE_CODE (TREE_OPERAND (t, 1)) != INTEGER_CST
+ || !POINTER_TYPE_P (TREE_TYPE (TREE_OPERAND (t, 1))))
+ {
+ error ("invalid offset operand of MEM_REF");
+ return TREE_OPERAND (t, 1);
+ }
+ if (TREE_CODE (x) == ADDR_EXPR
+ && (x = verify_address (x, TREE_OPERAND (x, 0))))
+ return x;
+ *walk_subtrees = 0;
break;
case ASSERT_EXPR:
break;
case MODIFY_EXPR:
- error ("MODIFY_EXPR not expected while having tuples.");
+ error ("MODIFY_EXPR not expected while having tuples");
return *tp;
case ADDR_EXPR:
{
- bool old_constant;
- bool old_side_effects;
- bool new_constant;
- bool new_side_effects;
+ tree tem;
gcc_assert (is_gimple_address (t));
- old_constant = TREE_CONSTANT (t);
- old_side_effects = TREE_SIDE_EFFECTS (t);
-
- recompute_tree_invariant_for_addr_expr (t);
- new_side_effects = TREE_SIDE_EFFECTS (t);
- new_constant = TREE_CONSTANT (t);
-
- if (old_constant != new_constant)
- {
- error ("constant not recomputed when ADDR_EXPR changed");
- return t;
- }
- if (old_side_effects != new_side_effects)
- {
- error ("side effects not recomputed when ADDR_EXPR changed");
- return t;
- }
-
/* Skip any references (they will be checked when we recurse down the
tree) and ensure that any variable used as a prefix is marked
addressable. */
x = TREE_OPERAND (x, 0))
;
+ if ((tem = verify_address (t, x)))
+ return tem;
+
if (!(TREE_CODE (x) == VAR_DECL
|| TREE_CODE (x) == PARM_DECL
|| TREE_CODE (x) == RESULT_DECL))
return NULL;
+
if (!TREE_ADDRESSABLE (x))
{
error ("address taken, but ADDRESSABLE bit not set");
return x;
}
- if (DECL_GIMPLE_REG_P (x))
- {
- error ("DECL_GIMPLE_REG_P set on a variable with address taken");
- return x;
- }
break;
}
TREE_TYPE (TREE_OPERAND (t, 1))))
{
error ("invalid operand to pointer plus, second operand is not an "
- "integer with type of sizetype.");
+ "integer with type of sizetype");
return t;
}
/* FALLTHROUGH */
if (is_gimple_id (expr))
return false;
- if (!INDIRECT_REF_P (expr)
- && TREE_CODE (expr) != TARGET_MEM_REF)
+ if (TREE_CODE (expr) != TARGET_MEM_REF
+ && TREE_CODE (expr) != MEM_REF)
{
error ("invalid expression for min lvalue");
return true;
debug_generic_stmt (op);
return true;
}
- if (!useless_type_conversion_p (TREE_TYPE (expr),
- TREE_TYPE (TREE_TYPE (op))))
- {
- error ("type mismatch in indirect reference");
- debug_generic_stmt (TREE_TYPE (expr));
- debug_generic_stmt (TREE_TYPE (TREE_TYPE (op)));
- return true;
- }
+ /* Memory references now generally can involve a value conversion. */
return false;
}
&& (TREE_CODE (op) == SSA_NAME
|| is_gimple_min_invariant (op)))
{
- error ("Conversion of an SSA_NAME on the left hand side.");
+ error ("conversion of an SSA_NAME on the left hand side");
+ debug_generic_stmt (expr);
+ return true;
+ }
+ else if (TREE_CODE (op) == SSA_NAME
+ && TYPE_SIZE (TREE_TYPE (expr)) != TYPE_SIZE (TREE_TYPE (op)))
+ {
+ error ("conversion of register to a different size");
debug_generic_stmt (expr);
return true;
}
expr = op;
}
+ if (TREE_CODE (expr) == MEM_REF)
+ {
+ if (!is_gimple_mem_ref_addr (TREE_OPERAND (expr, 0)))
+ {
+ error ("invalid address operand in MEM_REF");
+ debug_generic_stmt (expr);
+ return true;
+ }
+ if (TREE_CODE (TREE_OPERAND (expr, 1)) != INTEGER_CST
+ || !POINTER_TYPE_P (TREE_TYPE (TREE_OPERAND (expr, 1))))
+ {
+ error ("invalid offset operand in MEM_REF");
+ debug_generic_stmt (expr);
+ return true;
+ }
+ }
+ else if (TREE_CODE (expr) == TARGET_MEM_REF)
+ {
+ if (!TMR_BASE (expr)
+ || !is_gimple_mem_ref_addr (TMR_BASE (expr)))
+ {
+ error ("invalid address operand in in TARGET_MEM_REF");
+ return true;
+ }
+ if (!TMR_OFFSET (expr)
+ || TREE_CODE (TMR_OFFSET (expr)) != INTEGER_CST
+ || !POINTER_TYPE_P (TREE_TYPE (TMR_OFFSET (expr))))
+ {
+ error ("invalid offset operand in TARGET_MEM_REF");
+ debug_generic_stmt (expr);
+ return true;
+ }
+ }
+
return ((require_lvalue || !is_gimple_min_invariant (expr))
&& verify_types_in_gimple_min_lval (expr));
}
{
tree fn = gimple_call_fn (stmt);
tree fntype;
+ unsigned i;
+
+ if (TREE_CODE (fn) != OBJ_TYPE_REF
+ && !is_gimple_val (fn))
+ {
+ error ("invalid function in gimple call");
+ debug_generic_stmt (fn);
+ return true;
+ }
if (!POINTER_TYPE_P (TREE_TYPE (fn))
|| (TREE_CODE (TREE_TYPE (TREE_TYPE (fn))) != FUNCTION_TYPE
return true;
}
+ if (gimple_call_lhs (stmt) && gimple_call_noreturn_p (stmt))
+ {
+ error ("LHS in noreturn call");
+ return true;
+ }
+
fntype = TREE_TYPE (TREE_TYPE (fn));
if (gimple_call_lhs (stmt)
&& !useless_type_conversion_p (TREE_TYPE (gimple_call_lhs (stmt)),
return true;
}
+ if (gimple_call_chain (stmt)
+ && !is_gimple_val (gimple_call_chain (stmt)))
+ {
+ error ("invalid static chain in gimple call");
+ debug_generic_stmt (gimple_call_chain (stmt));
+ return true;
+ }
+
/* If there is a static chain argument, this should not be an indirect
call, and the decl should have DECL_STATIC_CHAIN set. */
if (gimple_call_chain (stmt))
{
- if (TREE_CODE (fn) != ADDR_EXPR
- || TREE_CODE (TREE_OPERAND (fn, 0)) != FUNCTION_DECL)
+ if (!gimple_call_fndecl (stmt))
{
error ("static chain in indirect gimple call");
return true;
if (!DECL_STATIC_CHAIN (fn))
{
- error ("static chain with function that doesn't use one");
+ error ("static chain with function that doesn%'t use one");
return true;
}
}
/* ??? The C frontend passes unpromoted arguments in case it
didn't see a function declaration before the call. So for now
- leave the call arguments unverified. Once we gimplify
+ leave the call arguments mostly unverified. Once we gimplify
unit-at-a-time we have a chance to fix this. */
+ for (i = 0; i < gimple_call_num_args (stmt); ++i)
+ {
+ tree arg = gimple_call_arg (stmt, i);
+ if ((is_gimple_reg_type (TREE_TYPE (arg))
+ && !is_gimple_val (arg))
+ || (!is_gimple_reg_type (TREE_TYPE (arg))
+ && !is_gimple_lvalue (arg)))
+ {
+ error ("invalid argument to gimple call");
+ debug_generic_expr (arg);
+ }
+ }
+
return false;
}
if ((!INTEGRAL_TYPE_P (rhs1_type)
&& !FIXED_POINT_TYPE_P (rhs1_type)
&& !(TREE_CODE (rhs1_type) == VECTOR_TYPE
- && TREE_CODE (TREE_TYPE (rhs1_type)) == INTEGER_TYPE))
+ && INTEGRAL_TYPE_P (TREE_TYPE (rhs1_type))))
|| (!INTEGRAL_TYPE_P (rhs2_type)
/* Vector shifts of vectors are also ok. */
&& !(TREE_CODE (rhs1_type) == VECTOR_TYPE
- && TREE_CODE (TREE_TYPE (rhs1_type)) == INTEGER_TYPE
+ && INTEGRAL_TYPE_P (TREE_TYPE (rhs1_type))
&& TREE_CODE (rhs2_type) == VECTOR_TYPE
- && TREE_CODE (TREE_TYPE (rhs2_type)) == INTEGER_TYPE))
+ && INTEGRAL_TYPE_P (TREE_TYPE (rhs2_type))))
|| !useless_type_conversion_p (lhs_type, rhs1_type))
{
error ("type mismatch in shift expression");
{
if (TREE_CODE (rhs1_type) != VECTOR_TYPE
|| !(INTEGRAL_TYPE_P (TREE_TYPE (rhs1_type))
+ || POINTER_TYPE_P (TREE_TYPE (rhs1_type))
|| FIXED_POINT_TYPE_P (TREE_TYPE (rhs1_type))
|| SCALAR_FLOAT_TYPE_P (TREE_TYPE (rhs1_type)))
|| (!INTEGRAL_TYPE_P (rhs2_type)
debug_generic_expr (rhs2_type);
return true;
}
- /* For shifting a vector of floating point components we
+ /* For shifting a vector of non-integral components we
only allow shifting by a constant multiple of the element size. */
- if (SCALAR_FLOAT_TYPE_P (TREE_TYPE (rhs1_type))
+ if (!INTEGRAL_TYPE_P (TREE_TYPE (rhs1_type))
&& (TREE_CODE (rhs2) != INTEGER_CST
|| !div_if_zero_remainder (EXACT_DIV_EXPR, rhs2,
TYPE_SIZE (TREE_TYPE (rhs1_type)))))
}
case PLUS_EXPR:
+ case MINUS_EXPR:
{
- /* We use regular PLUS_EXPR for vectors.
+ /* We use regular PLUS_EXPR and MINUS_EXPR for vectors.
??? This just makes the checker happy and may not be what is
intended. */
if (TREE_CODE (lhs_type) == VECTOR_TYPE
}
goto do_pointer_plus_expr_check;
}
- }
- /* Fallthru. */
- case MINUS_EXPR:
- {
if (POINTER_TYPE_P (lhs_type)
|| POINTER_TYPE_P (rhs1_type)
|| POINTER_TYPE_P (rhs2_type))
connected to the operand types. */
return verify_gimple_comparison (lhs_type, rhs1, rhs2);
- case WIDEN_SUM_EXPR:
case WIDEN_MULT_EXPR:
+ if (TREE_CODE (lhs_type) != INTEGER_TYPE)
+ return true;
+ return ((2 * TYPE_PRECISION (rhs1_type) != TYPE_PRECISION (lhs_type))
+ || (TYPE_PRECISION (rhs1_type) != TYPE_PRECISION (rhs2_type)));
+
+ case WIDEN_SUM_EXPR:
case VEC_WIDEN_MULT_HI_EXPR:
case VEC_WIDEN_MULT_LO_EXPR:
case VEC_PACK_TRUNC_EXPR:
return false;
}
+/* Verify a gimple assignment statement STMT with a ternary rhs.
+ Returns true if anything is wrong. */
+
+static bool
+verify_gimple_assign_ternary (gimple stmt)
+{
+ enum tree_code rhs_code = gimple_assign_rhs_code (stmt);
+ tree lhs = gimple_assign_lhs (stmt);
+ tree lhs_type = TREE_TYPE (lhs);
+ tree rhs1 = gimple_assign_rhs1 (stmt);
+ tree rhs1_type = TREE_TYPE (rhs1);
+ tree rhs2 = gimple_assign_rhs2 (stmt);
+ tree rhs2_type = TREE_TYPE (rhs2);
+ tree rhs3 = gimple_assign_rhs3 (stmt);
+ tree rhs3_type = TREE_TYPE (rhs3);
+
+ if (!is_gimple_reg (lhs)
+ && !(optimize == 0
+ && TREE_CODE (lhs_type) == COMPLEX_TYPE))
+ {
+ error ("non-register as LHS of ternary operation");
+ return true;
+ }
+
+ if (!is_gimple_val (rhs1)
+ || !is_gimple_val (rhs2)
+ || !is_gimple_val (rhs3))
+ {
+ error ("invalid operands in ternary operation");
+ return true;
+ }
+
+ /* First handle operations that involve different types. */
+ switch (rhs_code)
+ {
+ case WIDEN_MULT_PLUS_EXPR:
+ case WIDEN_MULT_MINUS_EXPR:
+ if ((!INTEGRAL_TYPE_P (rhs1_type)
+ && !FIXED_POINT_TYPE_P (rhs1_type))
+ || !useless_type_conversion_p (rhs1_type, rhs2_type)
+ || !useless_type_conversion_p (lhs_type, rhs3_type)
+ || 2 * TYPE_PRECISION (rhs1_type) != TYPE_PRECISION (lhs_type)
+ || TYPE_PRECISION (rhs1_type) != TYPE_PRECISION (rhs2_type))
+ {
+ error ("type mismatch in widening multiply-accumulate expression");
+ debug_generic_expr (lhs_type);
+ debug_generic_expr (rhs1_type);
+ debug_generic_expr (rhs2_type);
+ debug_generic_expr (rhs3_type);
+ return true;
+ }
+ break;
+
+ case FMA_EXPR:
+ if (!useless_type_conversion_p (lhs_type, rhs1_type)
+ || !useless_type_conversion_p (lhs_type, rhs2_type)
+ || !useless_type_conversion_p (lhs_type, rhs3_type))
+ {
+ error ("type mismatch in fused multiply-add expression");
+ debug_generic_expr (lhs_type);
+ debug_generic_expr (rhs1_type);
+ debug_generic_expr (rhs2_type);
+ debug_generic_expr (rhs3_type);
+ return true;
+ }
+ break;
+
+ default:
+ gcc_unreachable ();
+ }
+ return false;
+}
+
/* Verify a gimple assignment statement STMT with a single rhs.
Returns true if anything is wrong. */
return true;
}
- if (!types_compatible_p (TREE_TYPE (op), TREE_TYPE (TREE_TYPE (rhs1)))
+ /* Technically there is no longer a need for matching types, but
+ gimple hygiene asks for this check. In LTO we can end up
+ combining incompatible units and thus end up with addresses
+ of globals that change their type to a common one. */
+ if (!in_lto_p
+ && !types_compatible_p (TREE_TYPE (op),
+ TREE_TYPE (TREE_TYPE (rhs1)))
&& !one_pointer_to_useless_type_conversion_p (TREE_TYPE (rhs1),
TREE_TYPE (op)))
{
}
/* tcc_reference */
+ case INDIRECT_REF:
+ error ("INDIRECT_REF in gimple IL");
+ return true;
+
case COMPONENT_REF:
case BIT_FIELD_REF:
- case INDIRECT_REF:
- case ALIGN_INDIRECT_REF:
- case MISALIGNED_INDIRECT_REF:
case ARRAY_REF:
case ARRAY_RANGE_REF:
case VIEW_CONVERT_EXPR:
case REALPART_EXPR:
case IMAGPART_EXPR:
case TARGET_MEM_REF:
+ case MEM_REF:
if (!is_gimple_reg (lhs)
&& is_gimple_reg_type (TREE_TYPE (lhs)))
{
return res;
case COND_EXPR:
+ if (!is_gimple_reg (lhs)
+ || (!is_gimple_reg (TREE_OPERAND (rhs1, 0))
+ && !COMPARISON_CLASS_P (TREE_OPERAND (rhs1, 0)))
+ || (!is_gimple_reg (TREE_OPERAND (rhs1, 1))
+ && !is_gimple_min_invariant (TREE_OPERAND (rhs1, 1)))
+ || (!is_gimple_reg (TREE_OPERAND (rhs1, 2))
+ && !is_gimple_min_invariant (TREE_OPERAND (rhs1, 2))))
+ {
+ error ("invalid COND_EXPR in gimple assignment");
+ debug_generic_stmt (rhs1);
+ return true;
+ }
+ return res;
+
case CONSTRUCTOR:
case OBJ_TYPE_REF:
case ASSERT_EXPR:
case GIMPLE_BINARY_RHS:
return verify_gimple_assign_binary (stmt);
+ case GIMPLE_TERNARY_RHS:
+ return verify_gimple_assign_ternary (stmt);
+
default:
gcc_unreachable ();
}
return true;
}
- if (!useless_type_conversion_p (restype, TREE_TYPE (op))
- /* ??? With C++ we can have the situation that the result
- decl is a reference type while the return type is an aggregate. */
- && !(TREE_CODE (op) == RESULT_DECL
- && TREE_CODE (TREE_TYPE (op)) == REFERENCE_TYPE
- && useless_type_conversion_p (restype, TREE_TYPE (TREE_TYPE (op)))))
+ if ((TREE_CODE (op) == RESULT_DECL
+ && DECL_BY_REFERENCE (op))
+ || (TREE_CODE (op) == SSA_NAME
+ && TREE_CODE (SSA_NAME_VAR (op)) == RESULT_DECL
+ && DECL_BY_REFERENCE (SSA_NAME_VAR (op))))
+ op = TREE_TYPE (op);
+
+ if (!useless_type_conversion_p (restype, TREE_TYPE (op)))
{
error ("invalid conversion in return statement");
debug_generic_stmt (restype);
if (TREE_CODE (gimple_phi_result (stmt)) != SSA_NAME)
{
- error ("Invalid PHI result");
+ error ("invalid PHI result");
return true;
}
|| (!is_gimple_reg (gimple_phi_result (stmt))
&& !is_gimple_addressable (arg)))
{
- error ("Invalid PHI argument");
+ error ("invalid PHI argument");
debug_generic_stmt (arg);
return true;
}
if (!useless_type_conversion_p (type, TREE_TYPE (arg)))
{
- error ("Incompatible types in PHI argument %u", i);
+ error ("incompatible types in PHI argument %u", i);
debug_generic_stmt (type);
debug_generic_stmt (TREE_TYPE (arg));
return true;
return verify_gimple_call (stmt);
case GIMPLE_COND:
+ if (TREE_CODE_CLASS (gimple_cond_code (stmt)) != tcc_comparison)
+ {
+ error ("invalid comparison code in gimple cond");
+ return true;
+ }
+ if (!(!gimple_cond_true_label (stmt)
+ || TREE_CODE (gimple_cond_true_label (stmt)) == LABEL_DECL)
+ || !(!gimple_cond_false_label (stmt)
+ || TREE_CODE (gimple_cond_false_label (stmt)) == LABEL_DECL))
+ {
+ error ("invalid labels in gimple cond");
+ return true;
+ }
+
return verify_gimple_comparison (boolean_type_node,
gimple_cond_lhs (stmt),
gimple_cond_rhs (stmt));
{
if (!stmt_could_throw_p (stmt))
{
- /* During IPA passes, ipa-pure-const sets nothrow flags on calls
- and they are updated on statements only after fixup_cfg
- is executed at beggining of expansion stage. */
- if (cgraph_state != CGRAPH_STATE_IPA_SSA)
- {
- error ("statement marked for throw, but doesn%'t");
- goto fail;
- }
+ error ("statement marked for throw, but doesn%'t");
+ goto fail;
}
else if (lp_nr > 0 && !last_in_block && stmt_can_throw_internal (stmt))
{
if (!pointer_set_contains (visited, node->stmt))
{
- error ("Dead STMT in EH table");
+ error ("dead STMT in EH table");
debug_gimple_stmt (node->stmt);
eh_error_found = true;
}
/* Verify the GIMPLE statements in every basic block. */
-void
+DEBUG_FUNCTION void
verify_stmts (void)
{
basic_block bb;
err = 1;
}
+ if (prev_stmt && EH_LANDING_PAD_NR (label) != 0)
+ {
+ error ("EH landing pad label ");
+ print_generic_expr (stderr, label, 0);
+ fprintf (stderr, " is not first in a sequence of labels in bb %d",
+ bb->index);
+ err = 1;
+ }
+
if (label_to_block (label) != bb)
{
error ("label ");
}
break;
+ case GIMPLE_CALL:
+ if (!gimple_call_builtin_p (stmt, BUILT_IN_RETURN))
+ break;
+ /* ... fallthru ... */
case GIMPLE_RETURN:
if (!single_succ_p (bb)
|| (single_succ_edge (bb)->flags
TREE_CHAIN (last) = TREE_CHAIN (cases2);
TREE_CHAIN (cases2) = first;
}
+ bitmap_set_bit (touched_switch_bbs, gimple_bb (stmt)->index);
}
else
{
return new_bb;
}
-/* Add phi arguments to the phi nodes in E_COPY->dest according to
- the phi arguments coming from the equivalent edge at
- the phi nodes of DEST. */
-
-static void
-add_phi_args_after_redirect (edge e_copy, edge orig_e)
-{
- gimple_stmt_iterator psi, psi_copy;
- gimple phi, phi_copy;
- tree def;
-
- for (psi = gsi_start_phis (orig_e->dest),
- psi_copy = gsi_start_phis (e_copy->dest);
- !gsi_end_p (psi);
- gsi_next (&psi), gsi_next (&psi_copy))
- {
-
- phi = gsi_stmt (psi);
- phi_copy = gsi_stmt (psi_copy);
- def = PHI_ARG_DEF_FROM_EDGE (phi, orig_e);
- add_phi_arg (phi_copy, def, e_copy,
- gimple_phi_arg_location_from_edge (phi, orig_e));
- }
-}
-
/* Adds phi node arguments for edge E_COPY after basic block duplication. */
static void
edge exits[2], nexits[2], e;
gimple_stmt_iterator gsi,gsi1;
gimple cond_stmt;
- edge sorig, snew, orig_e;
+ edge sorig, snew;
basic_block exit_bb;
- edge_iterator ei;
- VEC (edge, heap) *redirect_edges;
- basic_block iters_bb, orig_src;
+ basic_block iters_bb;
tree new_rhs;
+ gimple_stmt_iterator psi;
+ gimple phi;
+ tree def;
gcc_assert (EDGE_COUNT (exit->src->succs) == 2);
exits[0] = exit;
if (!can_copy_bbs_p (region, n_region))
return false;
- /* Some sanity checking. Note that we do not check for all possible
- missuses of the functions. I.e. if you ask to copy something weird
- (e.g., in the example, if there is a jump from inside to the middle
- of some_code, or come_code defines some of the values used in cond)
- it will work, but the resulting code will not be correct. */
- for (i = 0; i < n_region; i++)
- {
- if (region[i] == orig_loop->latch)
- return false;
- }
-
initialize_original_copy_tables ();
set_loop_copy (orig_loop, loop);
duplicate_subloops (orig_loop, loop);
e = redirect_edge_and_branch (exits[0], exits[1]->dest);
PENDING_STMT (e) = NULL;
- /* If the block consisting of the exit condition has the latch as
- successor, then the body of the loop is executed before
- the exit condition is tested.
-
- { body }
- { cond } (exit[0]) -> { latch }
- |
- V (exit[1])
-
- { exit_bb }
-
-
- In such case, the equivalent copied edge nexits[1]
- (for the peeled iteration) needs to be redirected to exit_bb.
-
- Otherwise,
-
- { cond } (exit[0]) -> { body }
- |
- V (exit[1])
-
- { exit_bb }
-
-
- exit[0] is pointing to the body of the loop,
- and the equivalent nexits[0] needs to be redirected to
- the copied body (of the peeled iteration). */
-
- if (exits[1]->dest == orig_loop->latch)
- e = redirect_edge_and_branch (nexits[1], nexits[0]->dest);
- else
- e = redirect_edge_and_branch (nexits[0], nexits[1]->dest);
- PENDING_STMT (e) = NULL;
-
- redirect_edges = VEC_alloc (edge, heap, 10);
-
- for (i = 0; i < n_region; i++)
- region_copy[i]->flags |= BB_DUPLICATED;
-
- /* Iterate all incoming edges to latch. All those coming from
- copied bbs will be redirected to exit_bb. */
- FOR_EACH_EDGE (e, ei, orig_loop->latch->preds)
- {
- if (e->src->flags & BB_DUPLICATED)
- VEC_safe_push (edge, heap, redirect_edges, e);
- }
-
+ /* The latch of ORIG_LOOP was copied, and so was the backedge
+ to the original header. We redirect this backedge to EXIT_BB. */
for (i = 0; i < n_region; i++)
- region_copy[i]->flags &= ~BB_DUPLICATED;
-
- for (i = 0; VEC_iterate (edge, redirect_edges, i, e); ++i)
- {
- e = redirect_edge_and_branch (e, exit_bb);
- PENDING_STMT (e) = NULL;
- orig_src = get_bb_original (e->src);
- orig_e = find_edge (orig_src, orig_loop->latch);
- add_phi_args_after_redirect (e, orig_e);
- }
-
- VEC_free (edge, heap, redirect_edges);
-
+ if (get_bb_original (region_copy[i]) == orig_loop->latch)
+ {
+ gcc_assert (single_succ_edge (region_copy[i]));
+ e = redirect_edge_and_branch (single_succ_edge (region_copy[i]), exit_bb);
+ PENDING_STMT (e) = NULL;
+ for (psi = gsi_start_phis (exit_bb);
+ !gsi_end_p (psi);
+ gsi_next (&psi))
+ {
+ phi = gsi_stmt (psi);
+ def = PHI_ARG_DEF (phi, nexits[0]->dest_idx);
+ add_phi_arg (phi, def, e, gimple_phi_arg_location_from_edge (phi, e));
+ }
+ }
+ e = redirect_edge_and_branch (nexits[0], nexits[1]->dest);
+ PENDING_STMT (e) = NULL;
+
/* Anything that is outside of the region, but was dominated by something
inside needs to update dominance info. */
iterate_fix_dominators (CDI_DOMINATORS, doms, false);
VEC_free (basic_block, heap, doms);
-
/* Update the SSA web. */
update_ssa (TODO_update_ssa);
if (SSA_VAR_P (t))
{
new_t = copy_var_decl (t, DECL_NAME (t), TREE_TYPE (t));
- f->local_decls = tree_cons (NULL_TREE, new_t, f->local_decls);
+ add_local_decl (f, new_t);
}
else
{
return NULL_TREE;
}
-/* Marks virtual operands of all statements in basic blocks BBS for
- renaming. */
-
-void
-mark_virtual_ops_in_bb (basic_block bb)
-{
- gimple_stmt_iterator gsi;
-
- for (gsi = gsi_start_phis (bb); !gsi_end_p (gsi); gsi_next (&gsi))
- mark_virtual_ops_for_renaming (gsi_stmt (gsi));
-
- for (gsi = gsi_start_bb (bb); !gsi_end_p (gsi); gsi_next (&gsi))
- mark_virtual_ops_for_renaming (gsi_stmt (gsi));
-}
-
/* Move basic block BB from function CFUN to function DEST_FN. The
block is moved out of the original linked list and placed after
block AFTER in the new list. Also, the block is removed from the
{
tree *tp, t;
- for (tp = &BLOCK_VARS (block); *tp; tp = &TREE_CHAIN (*tp))
+ for (tp = &BLOCK_VARS (block); *tp; tp = &DECL_CHAIN (*tp))
{
t = *tp;
if (TREE_CODE (t) != VAR_DECL && TREE_CODE (t) != CONST_DECL)
SET_DECL_VALUE_EXPR (t, DECL_VALUE_EXPR (*tp));
DECL_HAS_VALUE_EXPR_P (t) = 1;
}
- TREE_CHAIN (t) = TREE_CHAIN (*tp);
+ DECL_CHAIN (t) = DECL_CHAIN (*tp);
*tp = t;
}
}
{
eh_region region = NULL;
- for (i = 0; VEC_iterate (basic_block, bbs, i, bb); i++)
+ FOR_EACH_VEC_ELT (basic_block, bbs, i, bb)
region = find_outermost_region_in_block (saved_cfun, bb, region);
init_eh_for_function ();
d.eh_map = eh_map;
d.remap_decls_p = true;
- for (i = 0; VEC_iterate (basic_block, bbs, i, bb); i++)
+ FOR_EACH_VEC_ELT (basic_block, bbs, i, bb)
{
/* No need to update edge counts on the last block. It has
already been updated earlier when we detached the region from
}
set_immediate_dominator (CDI_DOMINATORS, bb, dom_entry);
- for (i = 0; VEC_iterate (basic_block, dom_bbs, i, abb); i++)
+ FOR_EACH_VEC_ELT (basic_block, dom_bbs, i, abb)
set_immediate_dominator (CDI_DOMINATORS, abb, bb);
VEC_free (basic_block, heap, dom_bbs);
void
dump_function_to_file (tree fn, FILE *file, int flags)
{
- tree arg, vars, var;
+ tree arg, var;
struct function *dsf;
bool ignore_topmost_bind = false, any_var = false;
basic_block bb;
print_generic_expr (file, arg, dump_flags);
if (flags & TDF_VERBOSE)
print_node (file, "", arg, 4);
- if (TREE_CHAIN (arg))
+ if (DECL_CHAIN (arg))
fprintf (file, ", ");
- arg = TREE_CHAIN (arg);
+ arg = DECL_CHAIN (arg);
}
fprintf (file, ")\n");
/* When GIMPLE is lowered, the variables are no longer available in
BIND_EXPRs, so display them separately. */
- if (cfun && cfun->decl == fn && cfun->local_decls)
+ if (cfun && cfun->decl == fn && !VEC_empty (tree, cfun->local_decls))
{
+ unsigned ix;
ignore_topmost_bind = true;
fprintf (file, "{\n");
- for (vars = cfun->local_decls; vars; vars = TREE_CHAIN (vars))
+ FOR_EACH_LOCAL_DECL (cfun, ix, var)
{
- var = TREE_VALUE (vars);
-
print_generic_decl (file, var, flags);
if (flags & TDF_VERBOSE)
print_node (file, "", var, 4);
fprintf (file, "}\n");
}
+ if (flags & TDF_ENUMERATE_LOCALS)
+ dump_enumerated_decls (file, flags);
fprintf (file, "\n\n");
/* Restore CFUN. */
/* Dump FUNCTION_DECL FN to stderr using FLAGS (see TDF_* in tree.h) */
-void
+DEBUG_FUNCTION void
debug_function (tree fn, int flags)
{
dump_function_to_file (fn, stderr, flags);
/* Debugging loops structure at tree level, at some VERBOSITY level. */
-void
+DEBUG_FUNCTION void
debug_loops (int verbosity)
{
print_loops (stderr, verbosity);
/* Print on stderr the code of LOOP, at some VERBOSITY level. */
-void
+DEBUG_FUNCTION void
debug_loop (struct loop *loop, int verbosity)
{
print_loop (stderr, loop, 0, verbosity);
/* Print on stderr the code of loop number NUM, at some VERBOSITY
level. */
-void
+DEBUG_FUNCTION void
debug_loop_num (unsigned num, int verbosity)
{
debug_loop (get_loop (num), verbosity);
gimple_block_ends_with_call_p (basic_block bb)
{
gimple_stmt_iterator gsi = gsi_last_nondebug_bb (bb);
- return is_gimple_call (gsi_stmt (gsi));
+ return !gsi_end_p (gsi) && is_gimple_call (gsi_stmt (gsi));
}
if (check_last_block)
{
basic_block bb = EXIT_BLOCK_PTR->prev_bb;
- gimple_stmt_iterator gsi = gsi_last_bb (bb);
+ gimple_stmt_iterator gsi = gsi_last_nondebug_bb (bb);
gimple t = NULL;
if (!gsi_end_p (gsi))
if (blocks && !TEST_BIT (blocks, i))
continue;
- gsi = gsi_last_bb (bb);
+ gsi = gsi_last_nondebug_bb (bb);
if (!gsi_end_p (gsi))
{
last_stmt = gsi_stmt (gsi);
return blocks_split;
}
-/* Purge dead abnormal call edges from basic block BB. */
-
-bool
-gimple_purge_dead_abnormal_call_edges (basic_block bb)
-{
- bool changed = gimple_purge_dead_eh_edges (bb);
-
- if (cfun->has_nonlocal_label)
- {
- gimple stmt = last_stmt (bb);
- edge_iterator ei;
- edge e;
-
- if (!(stmt && stmt_can_make_abnormal_goto (stmt)))
- for (ei = ei_start (bb->succs); (e = ei_safe_edge (ei)); )
- {
- if (e->flags & EDGE_ABNORMAL)
- {
- remove_edge (e);
- changed = true;
- }
- else
- ei_next (&ei);
- }
-
- /* See gimple_purge_dead_eh_edges below. */
- if (changed)
- free_dominance_info (CDI_DOMINATORS);
- }
-
- return changed;
-}
-
/* Removes edge E and all the blocks dominated by it, and updates dominance
information. The IL in E->src needs to be updated separately.
If dominance info is not available, only the edge E is removed.*/
else
{
bbs_to_remove = get_all_dominated_blocks (CDI_DOMINATORS, e->dest);
- for (i = 0; VEC_iterate (basic_block, bbs_to_remove, i, bb); i++)
+ FOR_EACH_VEC_ELT (basic_block, bbs_to_remove, i, bb)
{
FOR_EACH_EDGE (f, ei, bb->succs)
{
bitmap_set_bit (df, f->dest->index);
}
}
- for (i = 0; VEC_iterate (basic_block, bbs_to_remove, i, bb); i++)
+ FOR_EACH_VEC_ELT (basic_block, bbs_to_remove, i, bb)
bitmap_clear_bit (df, bb->index);
EXECUTE_IF_SET_IN_BITMAP (df, 0, i, bi)
return changed;
}
+/* Purge dead EH edges from basic block listed in BLOCKS. */
+
bool
gimple_purge_all_dead_eh_edges (const_bitmap blocks)
{
return changed;
}
+/* Purge dead abnormal call edges from basic block BB. */
+
+bool
+gimple_purge_dead_abnormal_call_edges (basic_block bb)
+{
+ bool changed = false;
+ edge e;
+ edge_iterator ei;
+ gimple stmt = last_stmt (bb);
+
+ if (!cfun->has_nonlocal_label)
+ return false;
+
+ if (stmt && stmt_can_make_abnormal_goto (stmt))
+ return false;
+
+ for (ei = ei_start (bb->succs); (e = ei_safe_edge (ei)); )
+ {
+ if (e->flags & EDGE_ABNORMAL)
+ {
+ remove_edge_and_dominated_blocks (e);
+ changed = true;
+ }
+ else
+ ei_next (&ei);
+ }
+
+ return changed;
+}
+
+/* Purge dead abnormal call edges from basic block listed in BLOCKS. */
+
+bool
+gimple_purge_all_dead_abnormal_call_edges (const_bitmap blocks)
+{
+ bool changed = false;
+ unsigned i;
+ bitmap_iterator bi;
+
+ EXECUTE_IF_SET_IN_BITMAP (blocks, 0, i, bi)
+ {
+ basic_block bb = BASIC_BLOCK (i);
+
+ /* Earlier gimple_purge_dead_abnormal_call_edges could have removed
+ this basic block already. */
+ gcc_assert (bb || changed);
+ if (bb != NULL)
+ changed |= gimple_purge_dead_abnormal_call_edges (bb);
+ }
+
+ return changed;
+}
+
/* This function is called whenever a new edge is created or
redirected. */
{
basic_block bb = e->dest;
- if (phi_nodes (bb))
+ if (!gimple_seq_empty_p (phi_nodes (bb)))
reserve_phi_args_for_new_edge (bb);
}
static void
gimple_execute_on_shrinking_pred (edge e)
{
- if (phi_nodes (e->dest))
+ if (!gimple_seq_empty_p (phi_nodes (e->dest)))
remove_phi_args (e);
}
gsi = gsi_last_bb (e->src);
if (!gsi_end_p (gsi)
&& stmt_ends_bb_p (gsi_stmt (gsi))
- && gimple_code (gsi_stmt (gsi)) != GIMPLE_RETURN)
+ && (gimple_code (gsi_stmt (gsi)) != GIMPLE_RETURN
+ && !gimple_call_builtin_p (gsi_stmt (gsi),
+ BUILT_IN_RETURN)))
split_edge (e);
}
}
FOR_EACH_EDGE (e, ei, EXIT_BLOCK_PTR->preds)
{
last = last_stmt (e->src);
- if (gimple_code (last) == GIMPLE_RETURN
+ if ((gimple_code (last) == GIMPLE_RETURN
+ || gimple_call_builtin_p (last, BUILT_IN_RETURN))
&& (location = gimple_location (last)) != UNKNOWN_LOCATION)
break;
}
static unsigned int
execute_warn_function_noreturn (void)
{
- if (warn_missing_noreturn
- && !TREE_THIS_VOLATILE (cfun->decl)
- && EDGE_COUNT (EXIT_BLOCK_PTR->preds) == 0
- && !lang_hooks.missing_noreturn_ok_p (cfun->decl))
- warning_at (DECL_SOURCE_LOCATION (cfun->decl), OPT_Wmissing_noreturn,
- "function might be possible candidate "
- "for attribute %<noreturn%>");
+ if (!TREE_THIS_VOLATILE (current_function_decl)
+ && EDGE_COUNT (EXIT_BLOCK_PTR->preds) == 0)
+ warn_function_noreturn (current_function_decl);
return 0;
}
+static bool
+gate_warn_function_noreturn (void)
+{
+ return warn_suggest_attribute_noreturn;
+}
+
struct gimple_opt_pass pass_warn_function_noreturn =
{
{
GIMPLE_PASS,
"*warn_function_noreturn", /* name */
- NULL, /* gate */
+ gate_warn_function_noreturn, /* gate */
execute_warn_function_noreturn, /* execute */
NULL, /* sub */
NULL, /* next */