/* Miscellaneous SSA utility functions.
- Copyright (C) 2001, 2002, 2003, 2004, 2005, 2007, 2008, 2009
+ Copyright (C) 2001, 2002, 2003, 2004, 2005, 2007, 2008, 2009, 2010
Free Software Foundation, Inc.
This file is part of GCC.
#include "tm.h"
#include "tree.h"
#include "flags.h"
-#include "rtl.h"
#include "tm_p.h"
+#include "target.h"
#include "ggc.h"
#include "langhooks.h"
-#include "hard-reg-set.h"
#include "basic-block.h"
#include "output.h"
#include "expr.h"
#include "function.h"
#include "diagnostic.h"
+#include "tree-pretty-print.h"
+#include "gimple-pretty-print.h"
#include "bitmap.h"
#include "pointer-set.h"
#include "tree-flow.h"
#include "gimple.h"
#include "tree-inline.h"
-#include "varray.h"
#include "timevar.h"
#include "hashtab.h"
#include "tree-dump.h"
/* Add a mapping with PHI RESULT and PHI DEF associated with edge E. */
void
-redirect_edge_var_map_add (edge e, tree result, tree def)
+redirect_edge_var_map_add (edge e, tree result, tree def, source_location locus)
{
void **slot;
edge_var_map_vector old_head, head;
}
new_node.def = def;
new_node.result = result;
+ new_node.locus = locus;
VEC_safe_push (edge_var_map, heap, head, &new_node);
if (old_head != head)
for (gsi = gsi_start_phis (e->dest); !gsi_end_p (gsi); gsi_next (&gsi))
{
tree def;
+ source_location locus ;
phi = gsi_stmt (gsi);
def = gimple_phi_arg_def (phi, e->dest_idx);
+ locus = gimple_phi_arg_location (phi, e->dest_idx);
if (def == NULL_TREE)
continue;
- redirect_edge_var_map_add (e, gimple_phi_result (phi), def);
+ redirect_edge_var_map_add (e, gimple_phi_result (phi), def, locus);
}
e = redirect_edge_succ_nodup (e, dest);
phi = gsi_stmt (gsi);
def = redirect_edge_var_map_def (vm);
- add_phi_arg (phi, def, e);
+ add_phi_arg (phi, def, e, redirect_edge_var_map_location (vm));
}
redirect_edge_var_map_clear (e);
}
+/* Given a tree for an expression for which we might want to emit
+ locations or values in debug information (generally a variable, but
+ we might deal with other kinds of trees in the future), return the
+ tree that should be used as the variable of a DEBUG_BIND STMT or
+ VAR_LOCATION INSN or NOTE. Return NULL if VAR is not to be tracked. */
+
+tree
+target_for_debug_bind (tree var)
+{
+ if (!MAY_HAVE_DEBUG_STMTS)
+ return NULL_TREE;
+
+ if (TREE_CODE (var) != VAR_DECL
+ && TREE_CODE (var) != PARM_DECL)
+ return NULL_TREE;
+
+ if (DECL_HAS_VALUE_EXPR_P (var))
+ return target_for_debug_bind (DECL_VALUE_EXPR (var));
+
+ if (DECL_IGNORED_P (var))
+ return NULL_TREE;
+
+ if (!is_gimple_reg (var))
+ return NULL_TREE;
+
+ return var;
+}
+
+/* Called via walk_tree, look for SSA_NAMEs that have already been
+ released. */
+
+static tree
+find_released_ssa_name (tree *tp, int *walk_subtrees, void *data_)
+{
+ struct walk_stmt_info *wi = (struct walk_stmt_info *) data_;
+
+ if (wi && wi->is_lhs)
+ return NULL_TREE;
+
+ if (TREE_CODE (*tp) == SSA_NAME)
+ {
+ if (SSA_NAME_IN_FREE_LIST (*tp))
+ return *tp;
+
+ *walk_subtrees = 0;
+ }
+ else if (IS_TYPE_OR_DECL_P (*tp))
+ *walk_subtrees = 0;
+
+ return NULL_TREE;
+}
+
+/* Insert a DEBUG BIND stmt before the DEF of VAR if VAR is referenced
+ by other DEBUG stmts, and replace uses of the DEF with the
+ newly-created debug temp. */
+
+void
+insert_debug_temp_for_var_def (gimple_stmt_iterator *gsi, tree var)
+{
+ imm_use_iterator imm_iter;
+ use_operand_p use_p;
+ gimple stmt;
+ gimple def_stmt = NULL;
+ int usecount = 0;
+ tree value = NULL;
+
+ if (!MAY_HAVE_DEBUG_STMTS)
+ return;
+
+ /* If this name has already been registered for replacement, do nothing
+ as anything that uses this name isn't in SSA form. */
+ if (name_registered_for_update_p (var))
+ return;
+
+ /* Check whether there are debug stmts that reference this variable and,
+ if there are, decide whether we should use a debug temp. */
+ FOR_EACH_IMM_USE_FAST (use_p, imm_iter, var)
+ {
+ stmt = USE_STMT (use_p);
+
+ if (!gimple_debug_bind_p (stmt))
+ continue;
+
+ if (usecount++)
+ break;
+
+ if (gimple_debug_bind_get_value (stmt) != var)
+ {
+ /* Count this as an additional use, so as to make sure we
+ use a temp unless VAR's definition has a SINGLE_RHS that
+ can be shared. */
+ usecount++;
+ break;
+ }
+ }
+
+ if (!usecount)
+ return;
+
+ if (gsi)
+ def_stmt = gsi_stmt (*gsi);
+ else
+ def_stmt = SSA_NAME_DEF_STMT (var);
+
+ /* If we didn't get an insertion point, and the stmt has already
+ been removed, we won't be able to insert the debug bind stmt, so
+ we'll have to drop debug information. */
+ if (gimple_code (def_stmt) == GIMPLE_PHI)
+ {
+ value = degenerate_phi_result (def_stmt);
+ if (value && walk_tree (&value, find_released_ssa_name, NULL, NULL))
+ value = NULL;
+ }
+ else if (is_gimple_assign (def_stmt))
+ {
+ bool no_value = false;
+
+ if (!dom_info_available_p (CDI_DOMINATORS))
+ {
+ struct walk_stmt_info wi;
+
+ memset (&wi, 0, sizeof (wi));
+
+ /* When removing blocks without following reverse dominance
+ order, we may sometimes encounter SSA_NAMEs that have
+ already been released, referenced in other SSA_DEFs that
+ we're about to release. Consider:
+
+ <bb X>:
+ v_1 = foo;
+
+ <bb Y>:
+ w_2 = v_1 + bar;
+ # DEBUG w => w_2
+
+ If we deleted BB X first, propagating the value of w_2
+ won't do us any good. It's too late to recover their
+ original definition of v_1: when it was deleted, it was
+ only referenced in other DEFs, it couldn't possibly know
+ it should have been retained, and propagating every
+ single DEF just in case it might have to be propagated
+ into a DEBUG STMT would probably be too wasteful.
+
+ When dominator information is not readily available, we
+ check for and accept some loss of debug information. But
+ if it is available, there's no excuse for us to remove
+ blocks in the wrong order, so we don't even check for
+ dead SSA NAMEs. SSA verification shall catch any
+ errors. */
+ if ((!gsi && !gimple_bb (def_stmt))
+ || walk_gimple_op (def_stmt, find_released_ssa_name, &wi))
+ no_value = true;
+ }
+
+ if (!no_value)
+ value = gimple_assign_rhs_to_tree (def_stmt);
+ }
+
+ if (value)
+ {
+ /* If there's a single use of VAR, and VAR is the entire debug
+ expression (usecount would have been incremented again
+ otherwise), and the definition involves only constants and
+ SSA names, then we can propagate VALUE into this single use,
+ avoiding the temp.
+
+ We can also avoid using a temp if VALUE can be shared and
+ propagated into all uses, without generating expressions that
+ wouldn't be valid gimple RHSs.
+
+ Other cases that would require unsharing or non-gimple RHSs
+ are deferred to a debug temp, although we could avoid temps
+ at the expense of duplication of expressions. */
+
+ if (CONSTANT_CLASS_P (value)
+ || gimple_code (def_stmt) == GIMPLE_PHI
+ || (usecount == 1
+ && (!gimple_assign_single_p (def_stmt)
+ || is_gimple_min_invariant (value)))
+ || is_gimple_reg (value))
+ value = unshare_expr (value);
+ else
+ {
+ gimple def_temp;
+ tree vexpr = make_node (DEBUG_EXPR_DECL);
+
+ def_temp = gimple_build_debug_bind (vexpr,
+ unshare_expr (value),
+ def_stmt);
+
+ DECL_ARTIFICIAL (vexpr) = 1;
+ TREE_TYPE (vexpr) = TREE_TYPE (value);
+ if (DECL_P (value))
+ DECL_MODE (vexpr) = DECL_MODE (value);
+ else
+ DECL_MODE (vexpr) = TYPE_MODE (TREE_TYPE (value));
+
+ if (gsi)
+ gsi_insert_before (gsi, def_temp, GSI_SAME_STMT);
+ else
+ {
+ gimple_stmt_iterator ngsi = gsi_for_stmt (def_stmt);
+ gsi_insert_before (&ngsi, def_temp, GSI_SAME_STMT);
+ }
+
+ value = vexpr;
+ }
+ }
+
+ FOR_EACH_IMM_USE_STMT (stmt, imm_iter, var)
+ {
+ if (!gimple_debug_bind_p (stmt))
+ continue;
+
+ if (value)
+ FOR_EACH_IMM_USE_ON_STMT (use_p, imm_iter)
+ /* unshare_expr is not needed here. vexpr is either a
+ SINGLE_RHS, that can be safely shared, some other RHS
+ that was unshared when we found it had a single debug
+ use, or a DEBUG_EXPR_DECL, that can be safely
+ shared. */
+ SET_USE (use_p, value);
+ else
+ gimple_debug_bind_reset_value (stmt);
+
+ update_stmt (stmt);
+ }
+}
+
+
+/* Insert a DEBUG BIND stmt before STMT for each DEF referenced by
+ other DEBUG stmts, and replace uses of the DEF with the
+ newly-created debug temp. */
+
+void
+insert_debug_temps_for_defs (gimple_stmt_iterator *gsi)
+{
+ gimple stmt;
+ ssa_op_iter op_iter;
+ def_operand_p def_p;
+
+ if (!MAY_HAVE_DEBUG_STMTS)
+ return;
+
+ stmt = gsi_stmt (*gsi);
+
+ FOR_EACH_PHI_OR_STMT_DEF (def_p, stmt, op_iter, SSA_OP_DEF)
+ {
+ tree var = DEF_FROM_PTR (def_p);
+
+ if (TREE_CODE (var) != SSA_NAME)
+ continue;
+
+ insert_debug_temp_for_var_def (gsi, var);
+ }
+}
+
+/* Delete SSA DEFs for SSA versions in the TOREMOVE bitmap, removing
+ dominated stmts before their dominators, so that release_ssa_defs
+ stands a chance of propagating DEFs into debug bind stmts. */
+
+void
+release_defs_bitset (bitmap toremove)
+{
+ unsigned j;
+ bitmap_iterator bi;
+
+ /* Performing a topological sort is probably overkill, this will
+ most likely run in slightly superlinear time, rather than the
+ pathological quadratic worst case. */
+ while (!bitmap_empty_p (toremove))
+ EXECUTE_IF_SET_IN_BITMAP (toremove, 0, j, bi)
+ {
+ bool remove_now = true;
+ tree var = ssa_name (j);
+ gimple stmt;
+ imm_use_iterator uit;
+
+ FOR_EACH_IMM_USE_STMT (stmt, uit, var)
+ {
+ ssa_op_iter dit;
+ def_operand_p def_p;
+
+ /* We can't propagate PHI nodes into debug stmts. */
+ if (gimple_code (stmt) == GIMPLE_PHI
+ || is_gimple_debug (stmt))
+ continue;
+
+ /* If we find another definition to remove that uses
+ the one we're looking at, defer the removal of this
+ one, so that it can be propagated into debug stmts
+ after the other is. */
+ FOR_EACH_SSA_DEF_OPERAND (def_p, stmt, dit, SSA_OP_DEF)
+ {
+ tree odef = DEF_FROM_PTR (def_p);
+
+ if (bitmap_bit_p (toremove, SSA_NAME_VERSION (odef)))
+ {
+ remove_now = false;
+ break;
+ }
+ }
+
+ if (!remove_now)
+ BREAK_FROM_IMM_USE_STMT (uit);
+ }
+
+ if (remove_now)
+ {
+ gimple def = SSA_NAME_DEF_STMT (var);
+ gimple_stmt_iterator gsi = gsi_for_stmt (def);
+
+ if (gimple_code (def) == GIMPLE_PHI)
+ remove_phi_node (&gsi, true);
+ else
+ {
+ gsi_remove (&gsi, true);
+ release_defs (def);
+ }
+
+ bitmap_clear_bit (toremove, j);
+ }
+ }
+}
+
/* Return true if SSA_NAME is malformed and mark it visited.
IS_VIRTUAL is true if this SSA_NAME was found inside a virtual
err = true;
}
- /* Make sure the use is in an appropriate list by checking the previous
+ /* Make sure the use is in an appropriate list by checking the previous
element to make sure it's the same. */
if (use_p->prev == NULL)
{
goto err;
}
}
+ else if (gimple_debug_bind_p (stmt)
+ && !gimple_debug_bind_has_value_p (stmt))
+ continue;
/* Verify the single virtual operand and its constraints. */
has_err = false;
free_dominance_info (CDI_DOMINATORS);
else
set_dom_info_availability (CDI_DOMINATORS, orig_dom_state);
-
+
BITMAP_FREE (names_defined_in_bb);
timevar_pop (TV_TREE_SSA_VERIFY);
return;
init_tree_ssa (struct function *fn)
{
fn->gimple_df = GGC_CNEW (struct gimple_df);
- fn->gimple_df->referenced_vars = htab_create_ggc (20, uid_decl_map_hash,
+ fn->gimple_df->referenced_vars = htab_create_ggc (20, uid_decl_map_hash,
uid_decl_map_eq, NULL);
- fn->gimple_df->default_defs = htab_create_ggc (20, uid_ssaname_map_hash,
+ fn->gimple_df->default_defs = htab_create_ggc (20, uid_ssaname_map_hash,
uid_ssaname_map_eq, NULL);
pt_solution_reset (&fn->gimple_df->escaped);
- pt_solution_reset (&fn->gimple_df->callused);
init_ssanames (fn, 0);
init_phinodes ();
}
void
delete_tree_ssa (void)
{
- size_t i;
- basic_block bb;
- gimple_stmt_iterator gsi;
referenced_var_iterator rvi;
tree var;
- /* Release any ssa_names still in use. */
- for (i = 0; i < num_ssa_names; i++)
- {
- tree var = ssa_name (i);
- if (var && TREE_CODE (var) == SSA_NAME)
- {
- SSA_NAME_IMM_USE_NODE (var).prev = &(SSA_NAME_IMM_USE_NODE (var));
- SSA_NAME_IMM_USE_NODE (var).next = &(SSA_NAME_IMM_USE_NODE (var));
- }
- release_ssa_name (var);
- }
-
- /* FIXME. This may not be necessary. We will release all this
- memory en masse in free_ssa_operands. This clearing used to be
- necessary to avoid problems with the inliner, but it may not be
- needed anymore. */
- FOR_EACH_BB (bb)
- {
- for (gsi = gsi_start_bb (bb); !gsi_end_p (gsi); gsi_next (&gsi))
- {
- gimple stmt = gsi_stmt (gsi);
-
- if (gimple_has_ops (stmt))
- {
- gimple_set_def_ops (stmt, NULL);
- gimple_set_use_ops (stmt, NULL);
- }
-
- if (gimple_has_mem_ops (stmt))
- {
- gimple_set_vdef (stmt, NULL_TREE);
- gimple_set_vuse (stmt, NULL_TREE);
- }
-
- gimple_set_modified (stmt, true);
- }
- set_phi_nodes (bb, NULL);
- }
-
/* Remove annotations from every referenced local variable. */
FOR_EACH_REFERENCED_VAR (var, rvi)
{
if (is_global_var (var))
continue;
- if (var->base.ann)
- ggc_free (var->base.ann);
- var->base.ann = NULL;
+ if (var_ann (var))
+ {
+ ggc_free (var_ann (var));
+ *DECL_VAR_ANN_PTR (var) = NULL;
+ }
}
htab_delete (gimple_referenced_vars (cfun));
cfun->gimple_df->referenced_vars = NULL;
htab_delete (cfun->gimple_df->default_defs);
cfun->gimple_df->default_defs = NULL;
pt_solution_reset (&cfun->gimple_df->escaped);
- pt_solution_reset (&cfun->gimple_df->callused);
+ if (cfun->gimple_df->decls_to_pointers != NULL)
+ pointer_map_destroy (cfun->gimple_df->decls_to_pointers);
+ cfun->gimple_df->decls_to_pointers = NULL;
cfun->gimple_df->modified_noreturn_calls = NULL;
cfun->gimple_df = NULL;
redirect_edge_var_map_destroy ();
}
-/* Helper function for useless_type_conversion_p. */
+/* Return true if the conversion from INNER_TYPE to OUTER_TYPE is a
+ useless type conversion, otherwise return false.
-static bool
-useless_type_conversion_p_1 (tree outer_type, tree inner_type)
+ This function implicitly defines the middle-end type system. With
+ the notion of 'a < b' meaning that useless_type_conversion_p (a, b)
+ holds and 'a > b' meaning that useless_type_conversion_p (b, a) holds,
+ the following invariants shall be fulfilled:
+
+ 1) useless_type_conversion_p is transitive.
+ If a < b and b < c then a < c.
+
+ 2) useless_type_conversion_p is not symmetric.
+ From a < b does not follow a > b.
+
+ 3) Types define the available set of operations applicable to values.
+ A type conversion is useless if the operations for the target type
+ is a subset of the operations for the source type. For example
+ casts to void* are useless, casts from void* are not (void* can't
+ be dereferenced or offsetted, but copied, hence its set of operations
+ is a strict subset of that of all other data pointer types). Casts
+ to const T* are useless (can't be written to), casts from const T*
+ to T* are not. */
+
+bool
+useless_type_conversion_p (tree outer_type, tree inner_type)
{
/* Do the following before stripping toplevel qualifiers. */
if (POINTER_TYPE_P (inner_type)
&& POINTER_TYPE_P (outer_type))
{
+ /* Do not lose casts between pointers to different address spaces. */
+ if (TYPE_ADDR_SPACE (TREE_TYPE (outer_type))
+ != TYPE_ADDR_SPACE (TREE_TYPE (inner_type)))
+ return false;
+
+ /* If the outer type is (void *) or a pointer to an incomplete
+ record type or a pointer to an unprototyped function,
+ then the conversion is not necessary. */
+ if (VOID_TYPE_P (TREE_TYPE (outer_type))
+ || ((TREE_CODE (TREE_TYPE (outer_type)) == FUNCTION_TYPE
+ || TREE_CODE (TREE_TYPE (outer_type)) == METHOD_TYPE)
+ && (TREE_CODE (TREE_TYPE (outer_type))
+ == TREE_CODE (TREE_TYPE (inner_type)))
+ && !TYPE_ARG_TYPES (TREE_TYPE (outer_type))
+ && useless_type_conversion_p (TREE_TYPE (TREE_TYPE (outer_type)),
+ TREE_TYPE (TREE_TYPE (inner_type)))))
+ return true;
+
/* Do not lose casts to restrict qualified pointers. */
if ((TYPE_RESTRICT (outer_type)
!= TYPE_RESTRICT (inner_type))
&& TYPE_CANONICAL (inner_type) == TYPE_CANONICAL (outer_type))
return true;
- /* Changes in machine mode are never useless conversions. */
- if (TYPE_MODE (inner_type) != TYPE_MODE (outer_type))
+ /* Changes in machine mode are never useless conversions unless we
+ deal with aggregate types in which case we defer to later checks. */
+ if (TYPE_MODE (inner_type) != TYPE_MODE (outer_type)
+ && !AGGREGATE_TYPE_P (inner_type))
return false;
/* If both the inner and outer types are integral types, then the
|| TYPE_PRECISION (inner_type) != TYPE_PRECISION (outer_type))
return false;
- /* Conversions from a non-base to a base type are not useless.
- This way we preserve the invariant to do arithmetic in
- base types only. */
- if (TREE_TYPE (inner_type)
- && TREE_TYPE (inner_type) != inner_type
- && (TREE_TYPE (outer_type) == outer_type
- || TREE_TYPE (outer_type) == NULL_TREE))
- return false;
-
/* We don't need to preserve changes in the types minimum or
maximum value in general as these do not generate code
unless the types precisions are different. */
-
return true;
}
&& SCALAR_FLOAT_TYPE_P (outer_type))
return true;
+ /* Fixed point types with the same mode are compatible. */
+ else if (FIXED_POINT_TYPE_P (inner_type)
+ && FIXED_POINT_TYPE_P (outer_type))
+ return true;
+
/* We need to take special care recursing to pointed-to types. */
else if (POINTER_TYPE_P (inner_type)
&& POINTER_TYPE_P (outer_type))
&& TYPE_VOLATILE (TREE_TYPE (outer_type)))
return false;
- /* Do not lose casts between pointers with different
- TYPE_REF_CAN_ALIAS_ALL setting or alias sets. */
- if ((TYPE_REF_CAN_ALIAS_ALL (inner_type)
- != TYPE_REF_CAN_ALIAS_ALL (outer_type))
- || (get_alias_set (TREE_TYPE (inner_type))
- != get_alias_set (TREE_TYPE (outer_type))))
+ /* We require explicit conversions from incomplete target types. */
+ if (!COMPLETE_TYPE_P (TREE_TYPE (inner_type))
+ && COMPLETE_TYPE_P (TREE_TYPE (outer_type)))
+ return false;
+
+ /* Do not lose casts between pointers that when dereferenced access
+ memory with different alias sets. */
+ if (get_deref_alias_set (inner_type) != get_deref_alias_set (outer_type))
return false;
/* We do not care for const qualification of the pointed-to types
/* Otherwise pointers/references are equivalent if their pointed
to types are effectively the same. We can strip qualifiers
on pointed-to types for further comparison, which is done in
- the callee. */
- return useless_type_conversion_p_1 (TREE_TYPE (outer_type),
- TREE_TYPE (inner_type));
+ the callee. Note we have to use true compatibility here
+ because addresses are subject to propagation into dereferences
+ and thus might get the original type exposed which is equivalent
+ to a reverse conversion. */
+ return types_compatible_p (TREE_TYPE (outer_type),
+ TREE_TYPE (inner_type));
}
/* Recurse for complex types. */
return useless_type_conversion_p (TREE_TYPE (outer_type),
TREE_TYPE (inner_type));
- /* For aggregates we may need to fall back to structural equality
- checks. */
- else if (AGGREGATE_TYPE_P (inner_type)
- && AGGREGATE_TYPE_P (outer_type))
+ else if (TREE_CODE (inner_type) == ARRAY_TYPE
+ && TREE_CODE (outer_type) == ARRAY_TYPE)
{
- /* Different types of aggregates are incompatible. */
- if (TREE_CODE (inner_type) != TREE_CODE (outer_type))
+ /* Preserve string attributes. */
+ if (TYPE_STRING_FLAG (inner_type) != TYPE_STRING_FLAG (outer_type))
return false;
- /* ??? This seems to be necessary even for aggregates that don't
- have TYPE_STRUCTURAL_EQUALITY_P set. */
+ /* Conversions from array types with unknown extent to
+ array types with known extent are not useless. */
+ if (!TYPE_DOMAIN (inner_type)
+ && TYPE_DOMAIN (outer_type))
+ return false;
+
+ /* Nor are conversions from array types with non-constant size to
+ array types with constant size or to different size. */
+ if (TYPE_SIZE (outer_type)
+ && TREE_CODE (TYPE_SIZE (outer_type)) == INTEGER_CST
+ && (!TYPE_SIZE (inner_type)
+ || TREE_CODE (TYPE_SIZE (inner_type)) != INTEGER_CST
+ || !tree_int_cst_equal (TYPE_SIZE (outer_type),
+ TYPE_SIZE (inner_type))))
+ return false;
- /* ??? This should eventually just return false. */
- return lang_hooks.types_compatible_p (inner_type, outer_type);
+ /* Check conversions between arrays with partially known extents.
+ If the array min/max values are constant they have to match.
+ Otherwise allow conversions to unknown and variable extents.
+ In particular this declares conversions that may change the
+ mode to BLKmode as useless. */
+ if (TYPE_DOMAIN (inner_type)
+ && TYPE_DOMAIN (outer_type)
+ && TYPE_DOMAIN (inner_type) != TYPE_DOMAIN (outer_type))
+ {
+ tree inner_min = TYPE_MIN_VALUE (TYPE_DOMAIN (inner_type));
+ tree outer_min = TYPE_MIN_VALUE (TYPE_DOMAIN (outer_type));
+ tree inner_max = TYPE_MAX_VALUE (TYPE_DOMAIN (inner_type));
+ tree outer_max = TYPE_MAX_VALUE (TYPE_DOMAIN (outer_type));
+
+ /* After gimplification a variable min/max value carries no
+ additional information compared to a NULL value. All that
+ matters has been lowered to be part of the IL. */
+ if (inner_min && TREE_CODE (inner_min) != INTEGER_CST)
+ inner_min = NULL_TREE;
+ if (outer_min && TREE_CODE (outer_min) != INTEGER_CST)
+ outer_min = NULL_TREE;
+ if (inner_max && TREE_CODE (inner_max) != INTEGER_CST)
+ inner_max = NULL_TREE;
+ if (outer_max && TREE_CODE (outer_max) != INTEGER_CST)
+ outer_max = NULL_TREE;
+
+ /* Conversions NULL / variable <- cst are useless, but not
+ the other way around. */
+ if (outer_min
+ && (!inner_min
+ || !tree_int_cst_equal (inner_min, outer_min)))
+ return false;
+ if (outer_max
+ && (!inner_max
+ || !tree_int_cst_equal (inner_max, outer_max)))
+ return false;
+ }
+
+ /* Recurse on the element check. */
+ return useless_type_conversion_p (TREE_TYPE (outer_type),
+ TREE_TYPE (inner_type));
}
- /* Also for functions and possibly other types with
- TYPE_STRUCTURAL_EQUALITY_P set. */
- else if (TYPE_STRUCTURAL_EQUALITY_P (inner_type)
- && TYPE_STRUCTURAL_EQUALITY_P (outer_type))
- return lang_hooks.types_compatible_p (inner_type, outer_type);
-
- return false;
-}
-/* Return true if the conversion from INNER_TYPE to OUTER_TYPE is a
- useless type conversion, otherwise return false.
+ else if ((TREE_CODE (inner_type) == FUNCTION_TYPE
+ || TREE_CODE (inner_type) == METHOD_TYPE)
+ && TREE_CODE (inner_type) == TREE_CODE (outer_type))
+ {
+ tree outer_parm, inner_parm;
- This function implicitly defines the middle-end type system. With
- the notion of 'a < b' meaning that useless_type_conversion_p (a, b)
- holds and 'a > b' meaning that useless_type_conversion_p (b, a) holds,
- the following invariants shall be fulfilled:
+ /* If the return types are not compatible bail out. */
+ if (!useless_type_conversion_p (TREE_TYPE (outer_type),
+ TREE_TYPE (inner_type)))
+ return false;
- 1) useless_type_conversion_p is transitive.
- If a < b and b < c then a < c.
+ /* Method types should belong to a compatible base class. */
+ if (TREE_CODE (inner_type) == METHOD_TYPE
+ && !useless_type_conversion_p (TYPE_METHOD_BASETYPE (outer_type),
+ TYPE_METHOD_BASETYPE (inner_type)))
+ return false;
- 2) useless_type_conversion_p is not symmetric.
- From a < b does not follow a > b.
+ /* A conversion to an unprototyped argument list is ok. */
+ if (!TYPE_ARG_TYPES (outer_type))
+ return true;
+
+ /* If the unqualified argument types are compatible the conversion
+ is useless. */
+ if (TYPE_ARG_TYPES (outer_type) == TYPE_ARG_TYPES (inner_type))
+ return true;
+
+ for (outer_parm = TYPE_ARG_TYPES (outer_type),
+ inner_parm = TYPE_ARG_TYPES (inner_type);
+ outer_parm && inner_parm;
+ outer_parm = TREE_CHAIN (outer_parm),
+ inner_parm = TREE_CHAIN (inner_parm))
+ if (!useless_type_conversion_p
+ (TYPE_MAIN_VARIANT (TREE_VALUE (outer_parm)),
+ TYPE_MAIN_VARIANT (TREE_VALUE (inner_parm))))
+ return false;
+
+ /* If there is a mismatch in the number of arguments the functions
+ are not compatible. */
+ if (outer_parm || inner_parm)
+ return false;
- 3) Types define the available set of operations applicable to values.
- A type conversion is useless if the operations for the target type
- is a subset of the operations for the source type. For example
- casts to void* are useless, casts from void* are not (void* can't
- be dereferenced or offsetted, but copied, hence its set of operations
- is a strict subset of that of all other data pointer types). Casts
- to const T* are useless (can't be written to), casts from const T*
- to T* are not. */
+ /* Defer to the target if necessary. */
+ if (TYPE_ATTRIBUTES (inner_type) || TYPE_ATTRIBUTES (outer_type))
+ return targetm.comp_type_attributes (outer_type, inner_type) != 0;
-bool
-useless_type_conversion_p (tree outer_type, tree inner_type)
-{
- /* If the outer type is (void *), then the conversion is not
- necessary. We have to make sure to not apply this while
- recursing though. */
- if (POINTER_TYPE_P (inner_type)
- && POINTER_TYPE_P (outer_type)
- && TREE_CODE (TREE_TYPE (outer_type)) == VOID_TYPE)
- return true;
+ return true;
+ }
+
+ /* For aggregates we rely on TYPE_CANONICAL exclusively and require
+ explicit conversions for types involving to be structurally
+ compared types. */
+ else if (AGGREGATE_TYPE_P (inner_type)
+ && TREE_CODE (inner_type) == TREE_CODE (outer_type))
+ return false;
- return useless_type_conversion_p_1 (outer_type, inner_type);
+ return false;
}
/* Return true if a conversion from either type of TYPE1 and TYPE2
return false;
}
+/* Strip conversions from EXP according to
+ tree_ssa_useless_type_conversion and return the resulting
+ expression. */
+
+tree
+tree_ssa_strip_useless_type_conversions (tree exp)
+{
+ while (tree_ssa_useless_type_conversion (exp))
+ exp = TREE_OPERAND (exp, 0);
+ return exp;
+}
+
/* Internal helper for walk_use_def_chains. VAR, FN and DATA are as
described in walk_use_def_chains.
-
+
VISITED is a pointer set used to mark visited SSA_NAMEs to avoid
infinite loops. We used to have a bitmap for this to just mark
SSA versions we had visited. But non-sparse bitmaps are way too
if (fn (gimple_phi_arg_def (def_stmt, i), def_stmt, data))
return true;
}
-
+
return false;
}
-
+
/* Walk use-def chains starting at the SSA variable VAR. Call
arguments: VAR, its defining statement (DEF_STMT) and a generic
pointer to whatever state information that FN may want to maintain
(DATA). FN is able to stop the walk by returning true, otherwise
- in order to continue the walk, FN should return false.
+ in order to continue the walk, FN should return false.
Note, that if DEF_STMT is a PHI node, the semantics are slightly
different. The first argument to FN is no longer the original
}
\f
-/* Return true if T, an SSA_NAME, has an undefined value. */
-
-bool
-ssa_undefined_value_p (tree t)
-{
- tree var = SSA_NAME_VAR (t);
-
- /* Parameters get their initial value from the function entry. */
- if (TREE_CODE (var) == PARM_DECL)
- return false;
-
- /* Hard register variables get their initial value from the ether. */
- if (TREE_CODE (var) == VAR_DECL && DECL_HARD_REGISTER (var))
- return false;
-
- /* The value is undefined iff its definition statement is empty. */
- return gimple_nop_p (SSA_NAME_DEF_STMT (t));
-}
-
/* Emit warnings for uninitialized variables. This is done in two passes.
The first pass notices real uses of SSA names with undefined values.
/* Emit a warning for T, an SSA_NAME, being uninitialized. The exact
warning text is in MSGID and LOCUS may contain a location or be null. */
-static void
+void
warn_uninit (tree t, const char *gmsgid, void *data)
{
tree var = SSA_NAME_VAR (t);
/* Do not warn if it can be initialized outside this module. */
if (is_global_var (var))
return;
-
+
location = (context != NULL && gimple_has_location (context))
? gimple_location (context)
: DECL_SOURCE_LOCATION (var);
if (xloc.file != floc.file
|| xloc.line < floc.line
|| xloc.line > LOCATION_LINE (cfun->function_end_locus))
- inform (input_location, "%J%qD was declared here", var, var);
+ inform (DECL_SOURCE_LOCATION (var), "%qD was declared here", var);
}
}
/* We do not care about LHS. */
if (wi->is_lhs)
- return NULL_TREE;
+ {
+ /* Except for operands of INDIRECT_REF. */
+ if (!INDIRECT_REF_P (t))
+ return NULL_TREE;
+ t = TREE_OPERAND (t, 0);
+ }
switch (TREE_CODE (t))
{
use_operand_p vuse;
tree op;
- /* If there is not gimple stmt,
+ /* If there is not gimple stmt,
or alias information has not been computed,
then we cannot check VUSE ops. */
if (data->stmt == NULL)
return NULL_TREE;
op = USE_FROM_PTR (vuse);
- if (t != SSA_NAME_VAR (op)
+ if (t != SSA_NAME_VAR (op)
|| !SSA_NAME_IS_DEFAULT_DEF (op))
return NULL_TREE;
/* If this is a VUSE of t and it is the default definition,
return NULL_TREE;
}
-/* Look for inputs to PHI that are SSA_NAMEs that have empty definitions
- and warn about them. */
-
-static void
-warn_uninitialized_phi (gimple phi)
-{
- size_t i, n = gimple_phi_num_args (phi);
-
- /* Don't look at memory tags. */
- if (!is_gimple_reg (gimple_phi_result (phi)))
- return;
-
- for (i = 0; i < n; ++i)
- {
- tree op = gimple_phi_arg_def (phi, i);
- if (TREE_CODE (op) == SSA_NAME)
- warn_uninit (op, "%qD may be used uninitialized in this function",
- NULL);
- }
-}
-
-static unsigned int
+unsigned int
warn_uninitialized_vars (bool warn_possibly_uninitialized)
{
gimple_stmt_iterator gsi;
data.warn_possibly_uninitialized = warn_possibly_uninitialized;
- calculate_dominance_info (CDI_POST_DOMINATORS);
FOR_EACH_BB (bb)
{
{
struct walk_stmt_info wi;
data.stmt = gsi_stmt (gsi);
+ if (is_gimple_debug (data.stmt))
+ continue;
memset (&wi, 0, sizeof (wi));
wi.info = &data;
walk_gimple_op (gsi_stmt (gsi), warn_uninitialized_var, &wi);
}
}
- /* Post-dominator information can not be reliably updated. Free it
- after the use. */
-
- free_dominance_info (CDI_POST_DOMINATORS);
return 0;
}
as possible, thus don't do it here. However, without
optimization we need to warn here about "may be uninitialized".
*/
- warn_uninitialized_vars (/*warn_possibly_uninitialized=*/!optimize);
- return 0;
-}
-
-static unsigned int
-execute_late_warn_uninitialized (void)
-{
- basic_block bb;
- gimple_stmt_iterator gsi;
+ calculate_dominance_info (CDI_POST_DOMINATORS);
- /* Re-do the plain uninitialized variable check, as optimization may have
- straightened control flow. Do this first so that we don't accidentally
- get a "may be" warning when we'd have seen an "is" warning later. */
- warn_uninitialized_vars (/*warn_possibly_uninitialized=*/1);
+ warn_uninitialized_vars (/*warn_possibly_uninitialized=*/!optimize);
- FOR_EACH_BB (bb)
- for (gsi = gsi_start_phis (bb); !gsi_end_p (gsi); gsi_next (&gsi))
- warn_uninitialized_phi (gsi_stmt (gsi));
+ /* Post-dominator information can not be reliably updated. Free it
+ after the use. */
+ free_dominance_info (CDI_POST_DOMINATORS);
return 0;
}
{
{
GIMPLE_PASS,
- NULL, /* name */
+ "*early_warn_uninitialized", /* name */
gate_warn_uninitialized, /* gate */
execute_early_warn_uninitialized, /* execute */
NULL, /* sub */
}
};
-struct gimple_opt_pass pass_late_warn_uninitialized =
-{
- {
- GIMPLE_PASS,
- NULL, /* name */
- gate_warn_uninitialized, /* gate */
- execute_late_warn_uninitialized, /* execute */
- NULL, /* sub */
- NULL, /* next */
- 0, /* static_pass_number */
- TV_NONE, /* tv_id */
- PROP_ssa, /* properties_required */
- 0, /* properties_provided */
- 0, /* properties_destroyed */
- 0, /* todo_flags_start */
- 0 /* todo_flags_finish */
- }
-};
-
/* Compute TREE_ADDRESSABLE and DECL_GIMPLE_REG_P for local variables. */
void
a local decl that requires not to be a gimple register. */
if (code == GIMPLE_ASSIGN || code == GIMPLE_CALL)
{
- tree lhs = gimple_get_lhs (stmt);
- /* A plain decl does not need it set. */
- if (lhs && handled_component_p (lhs))
- {
- var = get_base_address (lhs);
- if (DECL_P (var))
- bitmap_set_bit (not_reg_needs, DECL_UID (var));
- }
+ tree lhs = gimple_get_lhs (stmt);
+
+ /* We may not rewrite TMR_SYMBOL to SSA. */
+ if (lhs && TREE_CODE (lhs) == TARGET_MEM_REF
+ && TMR_SYMBOL (lhs))
+ bitmap_set_bit (not_reg_needs, DECL_UID (TMR_SYMBOL (lhs)));
+
+ /* A plain decl does not need it set. */
+ else if (lhs && handled_component_p (lhs))
+ {
+ var = get_base_address (lhs);
+ if (DECL_P (var))
+ bitmap_set_bit (not_reg_needs, DECL_UID (var));
+ }
}
}
if (!DECL_GIMPLE_REG_P (var)
&& !bitmap_bit_p (not_reg_needs, DECL_UID (var))
&& (TREE_CODE (TREE_TYPE (var)) == COMPLEX_TYPE
- || TREE_CODE (TREE_TYPE (var)) == VECTOR_TYPE))
+ || TREE_CODE (TREE_TYPE (var)) == VECTOR_TYPE)
+ && !TREE_THIS_VOLATILE (var)
+ && (TREE_CODE (var) != VAR_DECL || !DECL_HARD_REGISTER (var)))
{
DECL_GIMPLE_REG_P (var) = 1;
mark_sym_for_renaming (var);
{
gimple stmt = gsi_stmt (gsi);
- if (gimple_references_memory_p (stmt))
+ if (gimple_references_memory_p (stmt)
+ || is_gimple_debug (stmt))
update_stmt (stmt);
}