/* Control flow functions for trees.
Copyright (C) 2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2009,
- 2010 Free Software Foundation, Inc.
+ 2010, 2011 Free Software Foundation, Inc.
Contributed by Diego Novillo <dnovillo@redhat.com>
This file is part of GCC.
PROP_cfg, /* properties_provided */
0, /* properties_destroyed */
0, /* todo_flags_start */
- TODO_verify_stmts | TODO_cleanup_cfg
- | TODO_dump_func /* todo_flags_finish */
+ TODO_verify_stmts | TODO_cleanup_cfg /* todo_flags_finish */
}
};
}
*value = NULL;
- return false;
+ return true;
}
/* Start recording information mapping edges to case labels. */
{
tree merge_case = gimple_switch_label (stmt, i);
tree merge_label = CASE_LABEL (merge_case);
- tree t = int_const_binop (PLUS_EXPR, base_high,
- integer_one_node, 1);
+ double_int bhp1 = double_int_add (tree_to_double_int (base_high),
+ double_int_one);
/* Merge the cases if they jump to the same place,
and their ranges are consecutive. */
if (merge_label == base_label
- && tree_int_cst_equal (CASE_LOW (merge_case), t))
+ && double_int_equal_p (tree_to_double_int (CASE_LOW (merge_case)),
+ bhp1))
{
base_high = CASE_HIGH (merge_case) ?
CASE_HIGH (merge_case) : CASE_LOW (merge_case);
if (!single_succ_p (a))
return false;
- if (single_succ_edge (a)->flags & (EDGE_ABNORMAL | EDGE_EH))
+ if (single_succ_edge (a)->flags & (EDGE_ABNORMAL | EDGE_EH | EDGE_PRESERVE))
return false;
if (single_succ (a) != b)
if (gimple_code (stmt) != GIMPLE_PHI)
{
+ gimple_stmt_iterator gsi = gsi_for_stmt (stmt);
size_t i;
- fold_stmt_inplace (stmt);
+ fold_stmt (&gsi);
+ stmt = gsi_stmt (gsi);
if (cfgcleanup_altered_bbs && !is_gimple_debug (stmt))
bitmap_set_bit (cfgcleanup_altered_bbs, gimple_bb (stmt)->index);
{
if (flags & TDF_DETAILS)
{
- const char *funcname
- = lang_hooks.decl_printable_name (current_function_decl, 2);
-
- fputc ('\n', file);
- fprintf (file, ";; Function %s\n\n", funcname);
+ dump_function_header (file, current_function_decl, flags);
fprintf (file, ";; \n%d basic blocks, %d edges, last basic block %d.\n\n",
n_basic_blocks, n_edges, last_basic_block);
break;
case NON_LVALUE_EXPR:
- gcc_unreachable ();
+ case TRUTH_NOT_EXPR:
+ gcc_unreachable ();
CASE_CONVERT:
case FIX_TRUNC_EXPR:
case NEGATE_EXPR:
case ABS_EXPR:
case BIT_NOT_EXPR:
- case TRUTH_NOT_EXPR:
CHECK_OP (0, "invalid operand to unary operator");
break;
error ("invalid operand to pointer plus, first operand is not a pointer");
return t;
}
- /* Check to make sure the second operand is an integer with type of
- sizetype. */
- if (!useless_type_conversion_p (sizetype,
- TREE_TYPE (TREE_OPERAND (t, 1))))
+ /* Check to make sure the second operand is a ptrofftype. */
+ if (!ptrofftype_p (TREE_TYPE (TREE_OPERAND (t, 1))))
{
error ("invalid operand to pointer plus, second operand is not an "
- "integer with type of sizetype");
+ "integer type of appropriate width");
return t;
}
/* FALLTHROUGH */
*walk_subtrees = 0;
break;
+ case CASE_LABEL_EXPR:
+ if (CASE_CHAIN (t))
+ {
+ error ("invalid CASE_CHAIN");
+ return t;
+ }
+ break;
+
default:
break;
}
if (!TMR_BASE (expr)
|| !is_gimple_mem_ref_addr (TMR_BASE (expr)))
{
- error ("invalid address operand in in TARGET_MEM_REF");
+ error ("invalid address operand in TARGET_MEM_REF");
return true;
}
if (!TMR_OFFSET (expr)
tree fntype, fndecl;
unsigned i;
- if (!is_gimple_call_addr (fn))
+ if (gimple_call_internal_p (stmt))
+ {
+ if (fn)
+ {
+ error ("gimple call has two targets");
+ debug_generic_stmt (fn);
+ return true;
+ }
+ }
+ else
+ {
+ if (!fn)
+ {
+ error ("gimple call has no target");
+ return true;
+ }
+ }
+
+ if (fn && !is_gimple_call_addr (fn))
{
error ("invalid function in gimple call");
debug_generic_stmt (fn);
return true;
}
- if (!POINTER_TYPE_P (TREE_TYPE (fn))
- || (TREE_CODE (TREE_TYPE (TREE_TYPE (fn))) != FUNCTION_TYPE
- && TREE_CODE (TREE_TYPE (TREE_TYPE (fn))) != METHOD_TYPE))
+ if (fn
+ && (!POINTER_TYPE_P (TREE_TYPE (fn))
+ || (TREE_CODE (TREE_TYPE (TREE_TYPE (fn))) != FUNCTION_TYPE
+ && TREE_CODE (TREE_TYPE (TREE_TYPE (fn))) != METHOD_TYPE)))
{
error ("non-function in gimple call");
return true;
}
fntype = gimple_call_fntype (stmt);
- if (gimple_call_lhs (stmt)
+ if (fntype
+ && gimple_call_lhs (stmt)
&& !useless_type_conversion_p (TREE_TYPE (gimple_call_lhs (stmt)),
TREE_TYPE (fntype))
/* ??? At least C++ misses conversions at assignments from
effective type the comparison is carried out in. Instead
we require that either the first operand is trivially
convertible into the second, or the other way around.
- The resulting type of a comparison may be any integral type.
Because we special-case pointers to void we allow
comparisons of pointers with the same mode as well. */
- if ((!useless_type_conversion_p (op0_type, op1_type)
- && !useless_type_conversion_p (op1_type, op0_type)
- && (!POINTER_TYPE_P (op0_type)
- || !POINTER_TYPE_P (op1_type)
- || TYPE_MODE (op0_type) != TYPE_MODE (op1_type)))
- || !INTEGRAL_TYPE_P (type))
- {
- error ("type mismatch in comparison expression");
- debug_generic_expr (type);
+ if (!useless_type_conversion_p (op0_type, op1_type)
+ && !useless_type_conversion_p (op1_type, op0_type)
+ && (!POINTER_TYPE_P (op0_type)
+ || !POINTER_TYPE_P (op1_type)
+ || TYPE_MODE (op0_type) != TYPE_MODE (op1_type)))
+ {
+ error ("mismatching comparison operand types");
debug_generic_expr (op0_type);
debug_generic_expr (op1_type);
return true;
}
+ /* The resulting type of a comparison may be an effective boolean type. */
+ if (INTEGRAL_TYPE_P (type)
+ && (TREE_CODE (type) == BOOLEAN_TYPE
+ || TYPE_PRECISION (type) == 1))
+ ;
+ /* Or an integer vector type with the same size and element count
+ as the comparison operand types. */
+ else if (TREE_CODE (type) == VECTOR_TYPE
+ && TREE_CODE (TREE_TYPE (type)) == INTEGER_TYPE)
+ {
+ if (TREE_CODE (op0_type) != VECTOR_TYPE
+ || TREE_CODE (op1_type) != VECTOR_TYPE)
+ {
+ error ("non-vector operands in vector comparison");
+ debug_generic_expr (op0_type);
+ debug_generic_expr (op1_type);
+ return true;
+ }
+
+ if (TYPE_VECTOR_SUBPARTS (type) != TYPE_VECTOR_SUBPARTS (op0_type)
+ || (GET_MODE_SIZE (TYPE_MODE (type))
+ != GET_MODE_SIZE (TYPE_MODE (op0_type))))
+ {
+ error ("invalid vector comparison resulting type");
+ debug_generic_expr (type);
+ return true;
+ }
+ }
+ else
+ {
+ error ("bogus comparison result type");
+ debug_generic_expr (type);
+ return true;
+ }
+
return false;
}
{
/* Allow conversions between integral types and pointers only if
there is no sign or zero extension involved.
- For targets were the precision of sizetype doesn't match that
+ For targets were the precision of ptrofftype doesn't match that
of pointers we need to allow arbitrary conversions from and
- to sizetype. */
+ to ptrofftype. */
if ((POINTER_TYPE_P (lhs_type)
&& INTEGRAL_TYPE_P (rhs1_type)
&& (TYPE_PRECISION (lhs_type) >= TYPE_PRECISION (rhs1_type)
- || rhs1_type == sizetype))
+ || ptrofftype_p (rhs1_type)))
|| (POINTER_TYPE_P (rhs1_type)
&& INTEGRAL_TYPE_P (lhs_type)
&& (TYPE_PRECISION (rhs1_type) >= TYPE_PRECISION (lhs_type)
- || lhs_type == sizetype)))
+ || ptrofftype_p (sizetype))))
return false;
/* Allow conversion from integer to offset type and vice versa. */
/* FIXME. */
return false;
- case TRUTH_NOT_EXPR:
case NEGATE_EXPR:
case ABS_EXPR:
case BIT_NOT_EXPR:
do_pointer_plus_expr_check:
if (!POINTER_TYPE_P (rhs1_type)
|| !useless_type_conversion_p (lhs_type, rhs1_type)
- || !useless_type_conversion_p (sizetype, rhs2_type))
+ || !ptrofftype_p (rhs2_type))
{
error ("type mismatch in pointer plus expression");
debug_generic_stmt (lhs_type);
case TRUTH_ANDIF_EXPR:
case TRUTH_ORIF_EXPR:
- gcc_unreachable ();
-
case TRUTH_AND_EXPR:
case TRUTH_OR_EXPR:
case TRUTH_XOR_EXPR:
- {
- /* We allow any kind of integral typed argument and result. */
- if (!INTEGRAL_TYPE_P (rhs1_type)
- || !INTEGRAL_TYPE_P (rhs2_type)
- || !INTEGRAL_TYPE_P (lhs_type))
- {
- error ("type mismatch in binary truth expression");
- debug_generic_expr (lhs_type);
- debug_generic_expr (rhs1_type);
- debug_generic_expr (rhs2_type);
- return true;
- }
- return false;
- }
+ gcc_unreachable ();
case LT_EXPR:
case LE_EXPR:
case WIDEN_MULT_EXPR:
if (TREE_CODE (lhs_type) != INTEGER_TYPE)
return true;
- return ((2 * TYPE_PRECISION (rhs1_type) != TYPE_PRECISION (lhs_type))
+ return ((2 * TYPE_PRECISION (rhs1_type) > TYPE_PRECISION (lhs_type))
|| (TYPE_PRECISION (rhs1_type) != TYPE_PRECISION (rhs2_type)));
case WIDEN_SUM_EXPR:
return true;
}
- if (!is_gimple_val (rhs1)
+ if (((rhs_code == VEC_COND_EXPR || rhs_code == COND_EXPR)
+ ? !is_gimple_condexpr (rhs1) : !is_gimple_val (rhs1))
|| !is_gimple_val (rhs2)
|| !is_gimple_val (rhs3))
{
&& !FIXED_POINT_TYPE_P (rhs1_type))
|| !useless_type_conversion_p (rhs1_type, rhs2_type)
|| !useless_type_conversion_p (lhs_type, rhs3_type)
- || 2 * TYPE_PRECISION (rhs1_type) != TYPE_PRECISION (lhs_type)
+ || 2 * TYPE_PRECISION (rhs1_type) > TYPE_PRECISION (lhs_type)
|| TYPE_PRECISION (rhs1_type) != TYPE_PRECISION (rhs2_type))
{
error ("type mismatch in widening multiply-accumulate expression");
}
break;
+ case COND_EXPR:
+ case VEC_COND_EXPR:
+ if (!useless_type_conversion_p (lhs_type, rhs2_type)
+ || !useless_type_conversion_p (lhs_type, rhs3_type))
+ {
+ error ("type mismatch in conditional expression");
+ debug_generic_expr (lhs_type);
+ debug_generic_expr (rhs2_type);
+ debug_generic_expr (rhs3_type);
+ return true;
+ }
+ break;
+
case DOT_PROD_EXPR:
case REALIGN_LOAD_EXPR:
/* FIXME. */
}
return res;
- case COND_EXPR:
- if (!is_gimple_reg (lhs)
- || (!is_gimple_reg (TREE_OPERAND (rhs1, 0))
- && !COMPARISON_CLASS_P (TREE_OPERAND (rhs1, 0)))
- || (!is_gimple_reg (TREE_OPERAND (rhs1, 1))
- && !is_gimple_min_invariant (TREE_OPERAND (rhs1, 1)))
- || (!is_gimple_reg (TREE_OPERAND (rhs1, 2))
- && !is_gimple_min_invariant (TREE_OPERAND (rhs1, 2))))
- {
- error ("invalid COND_EXPR in gimple assignment");
- debug_generic_stmt (rhs1);
- return true;
- }
- return res;
-
case CONSTRUCTOR:
case OBJ_TYPE_REF:
case ASSERT_EXPR:
case WITH_SIZE_EXPR:
- case VEC_COND_EXPR:
/* FIXME. */
return res;
{
def_operand_p def_p;
ssa_op_iter op_iter;
+ tree lhs;
stmt = gsi_stmt (gsi);
if (gimple_code (stmt) == GIMPLE_LABEL)
maybe_duplicate_eh_stmt (copy, stmt);
gimple_duplicate_stmt_histograms (cfun, copy, cfun, stmt);
+ /* When copying around a stmt writing into a local non-user
+ aggregate, make sure it won't share stack slot with other
+ vars. */
+ lhs = gimple_get_lhs (stmt);
+ if (lhs && TREE_CODE (lhs) != SSA_NAME)
+ {
+ tree base = get_base_address (lhs);
+ if (base
+ && (TREE_CODE (base) == VAR_DECL
+ || TREE_CODE (base) == RESULT_DECL)
+ && DECL_IGNORED_P (base)
+ && !TREE_STATIC (base)
+ && !DECL_EXTERNAL (base)
+ && (TREE_CODE (base) != VAR_DECL
+ || !DECL_HAS_VALUE_EXPR_P (base)))
+ DECL_NONSHAREABLE (base) = 1;
+ }
+
/* Create new names for all the definitions created by COPY and
add replacement mappings for each new name. */
FOR_EACH_SSA_DEF_OPERAND (def_p, copy, op_iter, SSA_OP_ALL_DEFS)
int total_freq = 0, exit_freq = 0;
gcov_type total_count = 0, exit_count = 0;
edge exits[2], nexits[2], e;
- gimple_stmt_iterator gsi,gsi1;
+ gimple_stmt_iterator gsi;
gimple cond_stmt;
edge sorig, snew;
basic_block exit_bb;
- basic_block iters_bb;
- tree new_rhs;
gimple_stmt_iterator psi;
gimple phi;
tree def;
gcc_assert (gimple_code (cond_stmt) == GIMPLE_COND);
cond_stmt = gimple_copy (cond_stmt);
- /* If the block consisting of the exit condition has the latch as
- successor, then the body of the loop is executed before
- the exit condition is tested. In such case, moving the
- condition to the entry, causes that the loop will iterate
- one less iteration (which is the wanted outcome, since we
- peel out the last iteration). If the body is executed after
- the condition, moving the condition to the entry requires
- decrementing one iteration. */
- if (exits[1]->dest == orig_loop->latch)
- new_rhs = gimple_cond_rhs (cond_stmt);
- else
- {
- new_rhs = fold_build2 (MINUS_EXPR, TREE_TYPE (gimple_cond_rhs (cond_stmt)),
- gimple_cond_rhs (cond_stmt),
- build_int_cst (TREE_TYPE (gimple_cond_rhs (cond_stmt)), 1));
-
- if (TREE_CODE (gimple_cond_rhs (cond_stmt)) == SSA_NAME)
- {
- iters_bb = gimple_bb (SSA_NAME_DEF_STMT (gimple_cond_rhs (cond_stmt)));
- for (gsi1 = gsi_start_bb (iters_bb); !gsi_end_p (gsi1); gsi_next (&gsi1))
- if (gsi_stmt (gsi1) == SSA_NAME_DEF_STMT (gimple_cond_rhs (cond_stmt)))
- break;
-
- new_rhs = force_gimple_operand_gsi (&gsi1, new_rhs, true,
- NULL_TREE,false,GSI_CONTINUE_LINKING);
- }
- }
- gimple_cond_set_rhs (cond_stmt, unshare_expr (new_rhs));
- gimple_cond_set_lhs (cond_stmt, unshare_expr (gimple_cond_lhs (cond_stmt)));
gsi_insert_after (&gsi, cond_stmt, GSI_NEW_STMT);
sorig = single_succ_edge (switch_bb);
old_nr = tree_low_cst (old_t_nr, 0);
new_nr = move_stmt_eh_region_nr (old_nr, p);
- return build_int_cst (NULL, new_nr);
+ return build_int_cst (integer_type_node, new_nr);
}
/* Like move_stmt_op, but for gimple statements.
PROP_no_crit_edges, /* properties_provided */
0, /* properties_destroyed */
0, /* todo_flags_start */
- TODO_dump_func | TODO_verify_flow /* todo_flags_finish */
+ TODO_verify_flow /* todo_flags_finish */
}
};
case GIMPLE_CALL:
if (gimple_call_lhs (g))
break;
+ if (gimple_call_internal_p (g))
+ break;
/* This is a naked call, as opposed to a GIMPLE_CALL with an
LHS. All calls whose value is ignored should be
0, /* todo_flags_finish */
}
};
-