/* Alias analysis for GNU C
Copyright (C) 1997, 1998, 1999, 2000, 2001, 2002, 2003, 2004, 2005, 2006,
- 2007, 2008, 2009 Free Software Foundation, Inc.
+ 2007, 2008, 2009, 2010 Free Software Foundation, Inc.
Contributed by John Carr (jfc@mit.edu).
This file is part of GCC.
#include "basic-block.h"
#include "flags.h"
#include "output.h"
+#include "diagnostic-core.h"
#include "toplev.h"
#include "cselib.h"
#include "splay-tree.h"
#include "timevar.h"
#include "target.h"
#include "cgraph.h"
-#include "varray.h"
#include "tree-pass.h"
#include "ipa-type-escape.h"
#include "df.h"
array. */
static GTY((deletable)) VEC(rtx,gc) *old_reg_base_value;
-/* Static hunks of RTL used by the aliasing code; these are initialized
- once per function to avoid unnecessary RTL allocations. */
-static GTY (()) rtx static_reg_base_value[FIRST_PSEUDO_REGISTER];
+#define static_reg_base_value \
+ (this_target_rtl->x_static_reg_base_value)
#define REG_BASE_VALUE(X) \
(REGNO (X) < VEC_length (rtx, reg_base_value) \
if (base == NULL_TREE)
return false;
+ /* The tree oracle doesn't like to have these. */
+ if (TREE_CODE (base) == FUNCTION_DECL
+ || TREE_CODE (base) == LABEL_DECL)
+ return false;
+
/* If this is a pointer dereference of a non-SSA_NAME punt.
??? We could replace it with a pointer to anything. */
- if (INDIRECT_REF_P (base)
+ if ((INDIRECT_REF_P (base)
+ || TREE_CODE (base) == MEM_REF)
&& TREE_CODE (TREE_OPERAND (base, 0)) != SSA_NAME)
return false;
+ if (TREE_CODE (base) == TARGET_MEM_REF
+ && TMR_BASE (base)
+ && TREE_CODE (TMR_BASE (base)) != SSA_NAME)
+ return false;
/* If this is a reference based on a partitioned decl replace the
base with an INDIRECT_REF of the pointer representative we
void *namep;
namep = pointer_map_contains (cfun->gimple_df->decls_to_pointers, base);
if (namep)
- {
- ref->base_alias_set = get_alias_set (base);
- ref->base = build1 (INDIRECT_REF, TREE_TYPE (base), *(tree *)namep);
- }
+ ref->base = build_simple_mem_ref (*(tree *)namep);
+ }
+ else if (TREE_CODE (base) == TARGET_MEM_REF
+ && TREE_CODE (TMR_BASE (base)) == ADDR_EXPR
+ && TREE_CODE (TREE_OPERAND (TMR_BASE (base), 0)) == VAR_DECL
+ && ! TREE_STATIC (TREE_OPERAND (TMR_BASE (base), 0))
+ && cfun->gimple_df->decls_to_pointers != NULL)
+ {
+ void *namep;
+ namep = pointer_map_contains (cfun->gimple_df->decls_to_pointers,
+ TREE_OPERAND (TMR_BASE (base), 0));
+ if (namep)
+ ref->base = build_simple_mem_ref (*(tree *)namep);
}
ref->ref_alias_set = MEM_ALIAS_SET (mem);
- /* For NULL MEM_OFFSET the MEM_EXPR may have been stripped arbitrarily
- without recording offset or extent adjustments properly. */
- if (MEM_OFFSET (mem) == NULL_RTX)
+ /* If MEM_OFFSET or MEM_SIZE are NULL we have to punt.
+ Keep points-to related information though. */
+ if (!MEM_OFFSET (mem)
+ || !MEM_SIZE (mem))
{
+ ref->ref = NULL_TREE;
ref->offset = 0;
- ref->max_size = -1;
- }
- else
- {
- ref->offset += INTVAL (MEM_OFFSET (mem)) * BITS_PER_UNIT;
- }
-
- /* NULL MEM_SIZE should not really happen with a non-NULL MEM_EXPR,
- but just play safe here. The size may have been adjusted together
- with the offset, so we need to take it if it is set and not rely
- on MEM_EXPR here (which has the size determining parts potentially
- stripped anyway). We lose precision for max_size which is only
- available from the remaining MEM_EXPR. */
- if (MEM_SIZE (mem) == NULL_RTX)
- {
ref->size = -1;
ref->max_size = -1;
+ return true;
}
- else
- {
- ref->size = INTVAL (MEM_SIZE (mem)) * BITS_PER_UNIT;
- }
+
+ /* If the base decl is a parameter we can have negative MEM_OFFSET in
+ case of promoted subregs on bigendian targets. Trust the MEM_EXPR
+ here. */
+ if (INTVAL (MEM_OFFSET (mem)) < 0
+ && ((INTVAL (MEM_SIZE (mem)) + INTVAL (MEM_OFFSET (mem)))
+ * BITS_PER_UNIT) == ref->size)
+ return true;
+
+ ref->offset += INTVAL (MEM_OFFSET (mem)) * BITS_PER_UNIT;
+ ref->size = INTVAL (MEM_SIZE (mem)) * BITS_PER_UNIT;
+
+ /* The MEM may extend into adjacent fields, so adjust max_size if
+ necessary. */
+ if (ref->max_size != -1
+ && ref->size > ref->max_size)
+ ref->max_size = ref->size;
+
+ /* If MEM_OFFSET and MEM_SIZE get us outside of the base object of
+ the MEM_EXPR punt. This happens for STRICT_ALIGNMENT targets a lot. */
+ if (MEM_EXPR (mem) != get_spill_slot_decl (false)
+ && (ref->offset < 0
+ || (DECL_P (ref->base)
+ && (!host_integerp (DECL_SIZE (ref->base), 1)
+ || (TREE_INT_CST_LOW (DECL_SIZE ((ref->base)))
+ < (unsigned HOST_WIDE_INT)(ref->offset + ref->size))))))
+ return false;
return true;
}
|| !ao_ref_from_mem (&ref2, mem))
return true;
- return refs_may_alias_p_1 (&ref1, &ref2, tbaa_p);
+ return refs_may_alias_p_1 (&ref1, &ref2,
+ tbaa_p
+ && MEM_ALIAS_SET (x) != 0
+ && MEM_ALIAS_SET (mem) != 0);
}
/* Returns a pointer to the alias set entry for ALIAS_SET, if there is
/* Otherwise, check if set1 is a subset of set2. */
ase = get_alias_set_entry (set2);
if (ase != 0
- && ((ase->has_zero_child && set1 == 0)
+ && (ase->has_zero_child
|| splay_tree_lookup (ase->children,
(splay_tree_key) set1)))
return true;
return 0;
}
-static int
-walk_mems_2 (rtx *x, rtx mem)
-{
- if (MEM_P (*x))
- {
- if (alias_sets_conflict_p (MEM_ALIAS_SET(*x), MEM_ALIAS_SET(mem)))
- return 1;
-
- return -1;
- }
- return 0;
-}
-
-static int
-walk_mems_1 (rtx *x, rtx *pat)
-{
- if (MEM_P (*x))
- {
- /* Visit all MEMs in *PAT and check indepedence. */
- if (for_each_rtx (pat, (rtx_function) walk_mems_2, *x))
- /* Indicate that dependence was determined and stop traversal. */
- return 1;
-
- return -1;
- }
- return 0;
-}
-
-/* Return 1 if two specified instructions have mem expr with conflict alias sets*/
-bool
-insn_alias_sets_conflict_p (rtx insn1, rtx insn2)
-{
- /* For each pair of MEMs in INSN1 and INSN2 check their independence. */
- return for_each_rtx (&PATTERN (insn1), (rtx_function) walk_mems_1,
- &PATTERN (insn2));
-}
-
/* Return 1 if the two specified alias sets will always conflict. */
int
aren't types. */
if (! TYPE_P (t))
{
- tree inner = t;
+ tree inner;
- /* Remove any nops, then give the language a chance to do
- something with this tree before we look at it. */
+ /* Give the language a chance to do something with this tree
+ before we look at it. */
STRIP_NOPS (t);
set = lang_hooks.get_alias_set (t);
if (set != -1)
return set;
- /* First see if the actual object referenced is an INDIRECT_REF from a
- restrict-qualified pointer or a "void *". */
+ /* Get the base object of the reference. */
+ inner = t;
while (handled_component_p (inner))
{
+ /* If there is a VIEW_CONVERT_EXPR in the chain we cannot use
+ the type of any component references that wrap it to
+ determine the alias-set. */
+ if (TREE_CODE (inner) == VIEW_CONVERT_EXPR)
+ t = TREE_OPERAND (inner, 0);
inner = TREE_OPERAND (inner, 0);
- STRIP_NOPS (inner);
}
+ /* Handle pointer dereferences here, they can override the
+ alias-set. */
if (INDIRECT_REF_P (inner))
{
set = get_deref_alias_set_1 (TREE_OPERAND (inner, 0));
if (set != -1)
return set;
}
+ else if (TREE_CODE (inner) == TARGET_MEM_REF)
+ return get_deref_alias_set (TMR_OFFSET (inner));
+ else if (TREE_CODE (inner) == MEM_REF)
+ {
+ set = get_deref_alias_set_1 (TREE_OPERAND (inner, 1));
+ if (set != -1)
+ return set;
+ }
+
+ /* If the innermost reference is a MEM_REF that has a
+ conversion embedded treat it like a VIEW_CONVERT_EXPR above,
+ using the memory access type for determining the alias-set. */
+ if (TREE_CODE (inner) == MEM_REF
+ && TYPE_MAIN_VARIANT (TREE_TYPE (inner))
+ != TYPE_MAIN_VARIANT
+ (TREE_TYPE (TREE_TYPE (TREE_OPERAND (inner, 1)))))
+ return get_deref_alias_set (TREE_OPERAND (inner, 1));
/* Otherwise, pick up the outermost object that we could have a pointer
to, processing conversions as above. */
}
/* Variant qualifiers don't affect the alias set, so get the main
- variant. Always use the canonical type as well.
- If this is a type with a known alias set, return it. */
+ variant. */
t = TYPE_MAIN_VARIANT (t);
- if (TYPE_CANONICAL (t))
- t = TYPE_CANONICAL (t);
+
+ /* Always use the canonical type as well. If this is a type that
+ requires structural comparisons to identify compatible types
+ use alias set zero. */
+ if (TYPE_STRUCTURAL_EQUALITY_P (t))
+ {
+ /* Allow the language to specify another alias set for this
+ type. */
+ set = lang_hooks.get_alias_set (t);
+ if (set != -1)
+ return set;
+ return 0;
+ }
+
+ t = TYPE_CANONICAL (t);
+
+ /* Canonical types shouldn't form a tree nor should the canonical
+ type require structural equality checks. */
+ gcc_checking_assert (TYPE_CANONICAL (t) == t
+ && !TYPE_STRUCTURAL_EQUALITY_P (t));
+
+ /* If this is a type with a known alias set, return it. */
if (TYPE_ALIAS_SET_KNOWN_P (t))
return TYPE_ALIAS_SET (t);
/* There are no objects of FUNCTION_TYPE, so there's no point in
using up an alias set for them. (There are, of course, pointers
and references to functions, but that's different.) */
- else if (TREE_CODE (t) == FUNCTION_TYPE
- || TREE_CODE (t) == METHOD_TYPE)
+ else if (TREE_CODE (t) == FUNCTION_TYPE || TREE_CODE (t) == METHOD_TYPE)
set = 0;
/* Unless the language specifies otherwise, let vector types alias
integer(kind=4)[4] the same alias set or not.
Just be pragmatic here and make sure the array and its element
type get the same alias set assigned. */
- else if (TREE_CODE (t) == ARRAY_TYPE
- && !TYPE_NONALIASED_COMPONENT (t))
+ else if (TREE_CODE (t) == ARRAY_TYPE && !TYPE_NONALIASED_COMPONENT (t))
set = get_alias_set (TREE_TYPE (t));
+ /* From the former common C and C++ langhook implementation:
+
+ Unfortunately, there is no canonical form of a pointer type.
+ In particular, if we have `typedef int I', then `int *', and
+ `I *' are different types. So, we have to pick a canonical
+ representative. We do this below.
+
+ Technically, this approach is actually more conservative that
+ it needs to be. In particular, `const int *' and `int *'
+ should be in different alias sets, according to the C and C++
+ standard, since their types are not the same, and so,
+ technically, an `int **' and `const int **' cannot point at
+ the same thing.
+
+ But, the standard is wrong. In particular, this code is
+ legal C++:
+
+ int *ip;
+ int **ipp = &ip;
+ const int* const* cipp = ipp;
+ And, it doesn't make sense for that to be legal unless you
+ can dereference IPP and CIPP. So, we ignore cv-qualifiers on
+ the pointed-to types. This issue has been reported to the
+ C++ committee.
+
+ In addition to the above canonicalization issue, with LTO
+ we should also canonicalize `T (*)[]' to `T *' avoiding
+ alias issues with pointer-to element types and pointer-to
+ array types.
+
+ Likewise we need to deal with the situation of incomplete
+ pointed-to types and make `*(struct X **)&a' and
+ `*(struct X {} **)&a' alias. Otherwise we will have to
+ guarantee that all pointer-to incomplete type variants
+ will be replaced by pointer-to complete type variants if
+ they are available.
+
+ With LTO the convenient situation of using `void *' to
+ access and store any pointer type will also become
+ more apparent (and `void *' is just another pointer-to
+ incomplete type). Assigning alias-set zero to `void *'
+ and all pointer-to incomplete types is a not appealing
+ solution. Assigning an effective alias-set zero only
+ affecting pointers might be - by recording proper subset
+ relationships of all pointer alias-sets.
+
+ Pointer-to function types are another grey area which
+ needs caution. Globbing them all into one alias-set
+ or the above effective zero set would work.
+
+ For now just assign the same alias-set to all pointers.
+ That's simple and avoids all the above problems. */
+ else if (POINTER_TYPE_P (t)
+ && t != ptr_type_node)
+ return get_alias_set (ptr_type_node);
+
+ /* Otherwise make a new alias set for this type. */
else
- /* Otherwise make a new alias set for this type. */
set = new_alias_set ();
TYPE_ALIAS_SET (t) = set;
- /* If this is an aggregate type, we must record any component aliasing
- information. */
+ /* If this is an aggregate type or a complex type, we must record any
+ component aliasing information. */
if (AGGREGATE_TYPE_P (t) || TREE_CODE (t) == COMPLEX_TYPE)
record_component_aliases (t);
{
/* Create an entry for the SUPERSET, so that we have a place to
attach the SUBSET. */
- superset_entry = GGC_NEW (struct alias_set_entry_d);
+ superset_entry = ggc_alloc_cleared_alias_set_entry_d ();
superset_entry->alias_set = superset;
superset_entry->children
- = splay_tree_new_ggc (splay_tree_compare_ints);
+ = splay_tree_new_ggc (splay_tree_compare_ints,
+ ggc_alloc_splay_tree_scalar_scalar_splay_tree_s,
+ ggc_alloc_splay_tree_scalar_scalar_splay_tree_node_s);
superset_entry->has_zero_child = 0;
VEC_replace (alias_set_entry, alias_sets, superset, superset_entry);
}
record_alias_subset (superset,
get_alias_set (BINFO_TYPE (base_binfo)));
}
- for (field = TYPE_FIELDS (type); field != 0; field = TREE_CHAIN (field))
+ for (field = TYPE_FIELDS (type); field != 0; field = DECL_CHAIN (field))
if (TREE_CODE (field) == FIELD_DECL && !DECL_NONADDRESSABLE_P (field))
record_alias_subset (superset, get_alias_set (TREE_TYPE (field)));
break;
return 0;
case TRUNCATE:
+ /* As we do not know which address space the pointer is refering to, we can
+ handle this only if the target does not support different pointer or
+ address modes depending on the address space. */
+ if (!target_default_pointer_address_modes_p ())
+ break;
if (GET_MODE_SIZE (GET_MODE (src)) < GET_MODE_SIZE (Pmode))
break;
/* Fall through. */
case ZERO_EXTEND:
case SIGN_EXTEND: /* used for NT/Alpha pointers */
+ /* As we do not know which address space the pointer is refering to, we can
+ handle this only if the target does not support different pointer or
+ address modes depending on the address space. */
+ if (!target_default_pointer_address_modes_p ())
+ break;
+
{
rtx temp = find_base_value (XEXP (src, 0));
regno = REGNO (dest);
- gcc_assert (regno < VEC_length (rtx, reg_base_value));
+ gcc_checking_assert (regno < VEC_length (rtx, reg_base_value));
/* If this spans multiple hard registers, then we must indicate that every
register has an unusable value. */
return REG_BASE_VALUE (x);
case TRUNCATE:
+ /* As we do not know which address space the pointer is refering to, we can
+ handle this only if the target does not support different pointer or
+ address modes depending on the address space. */
+ if (!target_default_pointer_address_modes_p ())
+ return 0;
if (GET_MODE_SIZE (GET_MODE (x)) < GET_MODE_SIZE (Pmode))
return 0;
/* Fall through. */
case ZERO_EXTEND:
case SIGN_EXTEND: /* Used for Alpha/NT pointers */
+ /* As we do not know which address space the pointer is refering to, we can
+ handle this only if the target does not support different pointer or
+ address modes depending on the address space. */
+ if (!target_default_pointer_address_modes_p ())
+ return 0;
+
{
rtx temp = find_base_term (XEXP (x, 0));
|| (GET_CODE (y_base) == ADDRESS && GET_MODE (y_base) == Pmode))
return 0;
- if (! flag_argument_noalias)
- return 1;
-
- if (flag_argument_noalias > 1)
- return 0;
-
- /* Weak noalias assertion (arguments are distinct, but may match globals). */
- return ! (GET_MODE (x_base) == VOIDmode && GET_MODE (y_base) == VOIDmode);
+ return 1;
}
/* Convert the address X into something we can use. This is done by returning
return addr;
}
-/* Return nonzero if X and Y (memory addresses) could reference the
- same location in memory. C is an offset accumulator. When
+/* Return one if X and Y (memory addresses) reference the
+ same location in memory or if the references overlap.
+ Return zero if they do not overlap, else return
+ minus one in which case they still might reference the same location.
+
+ C is an offset accumulator. When
C is nonzero, we are testing aliases between X and Y + C.
XSIZE is the size in bytes of the X reference,
similarly YSIZE is the size in bytes for Y.
align memory references, as is done on the Alpha.
Nice to notice that varying addresses cannot conflict with fp if no
- local variables had their addresses taken, but that's too hard now. */
+ local variables had their addresses taken, but that's too hard now.
+
+ ??? Contrary to the tree alias oracle this does not return
+ one for X + non-constant and Y + non-constant when X and Y are equal.
+ If that is fixed the TBAA hack for union type-punning can be removed. */
static int
memrefs_conflict_p (int xsize, rtx x, int ysize, rtx y, HOST_WIDE_INT c)
{
if (GET_CODE (x) == VALUE)
- x = get_addr (x);
+ {
+ if (REG_P (y))
+ {
+ struct elt_loc_list *l = NULL;
+ if (CSELIB_VAL_PTR (x))
+ for (l = CSELIB_VAL_PTR (x)->locs; l; l = l->next)
+ if (REG_P (l->loc) && rtx_equal_for_memref_p (l->loc, y))
+ break;
+ if (l)
+ x = y;
+ else
+ x = get_addr (x);
+ }
+ /* Don't call get_addr if y is the same VALUE. */
+ else if (x != y)
+ x = get_addr (x);
+ }
if (GET_CODE (y) == VALUE)
- y = get_addr (y);
+ {
+ if (REG_P (x))
+ {
+ struct elt_loc_list *l = NULL;
+ if (CSELIB_VAL_PTR (y))
+ for (l = CSELIB_VAL_PTR (y)->locs; l; l = l->next)
+ if (REG_P (l->loc) && rtx_equal_for_memref_p (l->loc, x))
+ break;
+ if (l)
+ y = x;
+ else
+ y = get_addr (y);
+ }
+ /* Don't call get_addr if x is the same VALUE. */
+ else if (y != x)
+ y = get_addr (y);
+ }
if (GET_CODE (x) == HIGH)
x = XEXP (x, 0);
else if (GET_CODE (x) == LO_SUM)
else if (CONST_INT_P (y1))
return memrefs_conflict_p (xsize, x, ysize, y0, c + INTVAL (y1));
- return 1;
+ return -1;
}
else if (CONST_INT_P (x1))
return memrefs_conflict_p (xsize, x0, ysize, y, c - INTVAL (x1));
if (CONST_INT_P (y1))
return memrefs_conflict_p (xsize, x, ysize, y0, c + INTVAL (y1));
else
- return 1;
+ return -1;
}
if (GET_CODE (x) == GET_CODE (y))
rtx x1 = canon_rtx (XEXP (x, 1));
rtx y1 = canon_rtx (XEXP (y, 1));
if (! rtx_equal_for_memref_p (x1, y1))
- return 1;
+ return -1;
x0 = canon_rtx (XEXP (x, 0));
y0 = canon_rtx (XEXP (y, 0));
if (rtx_equal_for_memref_p (x0, y0))
/* Can't properly adjust our sizes. */
if (!CONST_INT_P (x1))
- return 1;
+ return -1;
xsize /= INTVAL (x1);
ysize /= INTVAL (x1);
c /= INTVAL (x1);
|| (rtx_equal_for_memref_p (x, y)
&& ((c >= 0 && xsize > c) || (c < 0 && ysize+c > 0))));
- return 1;
+ return -1;
}
- return 1;
+
+ return -1;
}
/* Functions to compute memory dependencies.
{
const_tree fieldx, fieldy, typex, typey, orig_y;
+ if (!flag_strict_aliasing)
+ return false;
+
do
{
/* The comparison has to be done at a common type, since we don't
}
/* Return nonzero if we can determine the exprs corresponding to memrefs
- X and Y and they do not overlap. */
+ X and Y and they do not overlap.
+ If LOOP_VARIANT is set, skip offset-based disambiguation */
int
-nonoverlapping_memrefs_p (const_rtx x, const_rtx y)
+nonoverlapping_memrefs_p (const_rtx x, const_rtx y, bool loop_invariant)
{
tree exprx = MEM_EXPR (x), expry = MEM_EXPR (y);
rtx rtlx, rtly;
if (exprx == 0 || expry == 0)
return 0;
+ /* For spill-slot accesses make sure we have valid offsets. */
+ if ((exprx == get_spill_slot_decl (false)
+ && ! MEM_OFFSET (x))
+ || (expry == get_spill_slot_decl (false)
+ && ! MEM_OFFSET (y)))
+ return 0;
+
/* If both are field references, we may be able to determine something. */
if (TREE_CODE (exprx) == COMPONENT_REF
&& TREE_CODE (expry) == COMPONENT_REF
moffsetx = MEM_OFFSET (x);
if (TREE_CODE (exprx) == COMPONENT_REF)
{
- if (TREE_CODE (expry) == VAR_DECL
- && POINTER_TYPE_P (TREE_TYPE (expry)))
- {
- tree field = TREE_OPERAND (exprx, 1);
- tree fieldcontext = DECL_FIELD_CONTEXT (field);
- if (ipa_type_escape_field_does_not_clobber_p (fieldcontext,
- TREE_TYPE (field)))
- return 1;
- }
- {
- tree t = decl_for_component_ref (exprx);
- if (! t)
- return 0;
- moffsetx = adjust_offset_for_component_ref (exprx, moffsetx);
- exprx = t;
- }
- }
- else if (INDIRECT_REF_P (exprx))
- {
- exprx = TREE_OPERAND (exprx, 0);
- if (flag_argument_noalias < 2
- || TREE_CODE (exprx) != PARM_DECL)
+ tree t = decl_for_component_ref (exprx);
+ if (! t)
return 0;
+ moffsetx = adjust_offset_for_component_ref (exprx, moffsetx);
+ exprx = t;
}
moffsety = MEM_OFFSET (y);
if (TREE_CODE (expry) == COMPONENT_REF)
{
- if (TREE_CODE (exprx) == VAR_DECL
- && POINTER_TYPE_P (TREE_TYPE (exprx)))
- {
- tree field = TREE_OPERAND (expry, 1);
- tree fieldcontext = DECL_FIELD_CONTEXT (field);
- if (ipa_type_escape_field_does_not_clobber_p (fieldcontext,
- TREE_TYPE (field)))
- return 1;
- }
- {
- tree t = decl_for_component_ref (expry);
- if (! t)
- return 0;
- moffsety = adjust_offset_for_component_ref (expry, moffsety);
- expry = t;
- }
- }
- else if (INDIRECT_REF_P (expry))
- {
- expry = TREE_OPERAND (expry, 0);
- if (flag_argument_noalias < 2
- || TREE_CODE (expry) != PARM_DECL)
+ tree t = decl_for_component_ref (expry);
+ if (! t)
return 0;
+ moffsety = adjust_offset_for_component_ref (expry, moffsety);
+ expry = t;
}
if (! DECL_P (exprx) || ! DECL_P (expry))
return 0;
+ /* With invalid code we can end up storing into the constant pool.
+ Bail out to avoid ICEing when creating RTL for this.
+ See gfortran.dg/lto/20091028-2_0.f90. */
+ if (TREE_CODE (exprx) == CONST_DECL
+ || TREE_CODE (expry) == CONST_DECL)
+ return 1;
+
rtlx = DECL_RTL (exprx);
rtly = DECL_RTL (expry);
&& ! rtx_equal_p (rtlx, rtly))
return 1;
+ /* If we have MEMs refering to different address spaces (which can
+ potentially overlap), we cannot easily tell from the addresses
+ whether the references overlap. */
+ if (MEM_P (rtlx) && MEM_P (rtly)
+ && MEM_ADDR_SPACE (rtlx) != MEM_ADDR_SPACE (rtly))
+ return 0;
+
/* Get the base and offsets of both decls. If either is a register, we
know both are and are the same, so use that as the base. The only
we can avoid overlap is if we can deduce that they are nonoverlapping
|| (CONSTANT_P (basey) && REG_P (basex)
&& REGNO_PTR_FRAME_P (REGNO (basex))));
+ /* Offset based disambiguation not appropriate for loop invariant */
+ if (loop_invariant)
+ return 0;
+
sizex = (!MEM_P (rtlx) ? (int) GET_MODE_SIZE (GET_MODE (rtlx))
: MEM_SIZE (rtlx) ? INTVAL (MEM_SIZE (rtlx))
: -1);
return sizex >= 0 && offsety >= offsetx + sizex;
}
-/* True dependence: X is read after store in MEM takes place. */
+/* Helper for true_dependence and canon_true_dependence.
+ Checks for true dependence: X is read after store in MEM takes place.
-int
-true_dependence (const_rtx mem, enum machine_mode mem_mode, const_rtx x,
- bool (*varies) (const_rtx, bool))
+ VARIES is the function that should be used as rtx_varies function.
+
+ If MEM_CANONICALIZED is FALSE, then X_ADDR and MEM_ADDR should be
+ NULL_RTX, and the canonical addresses of MEM and X are both computed
+ here. If MEM_CANONICALIZED, then MEM must be already canonicalized.
+
+ If X_ADDR is non-NULL, it is used in preference of XEXP (x, 0).
+
+ Returns 1 if there is a true dependence, 0 otherwise. */
+
+static int
+true_dependence_1 (const_rtx mem, enum machine_mode mem_mode, rtx mem_addr,
+ const_rtx x, rtx x_addr, bool (*varies) (const_rtx, bool),
+ bool mem_canonicalized)
{
- rtx x_addr, mem_addr;
rtx base;
+ int ret;
+
+ gcc_checking_assert (mem_canonicalized ? (mem_addr != NULL_RTX)
+ : (mem_addr == NULL_RTX && x_addr == NULL_RTX));
if (MEM_VOLATILE_P (x) && MEM_VOLATILE_P (mem))
return 1;
|| MEM_ALIAS_SET (mem) == ALIAS_SET_MEMORY_BARRIER)
return 1;
- if (DIFFERENT_ALIAS_SETS_P (x, mem))
- return 0;
-
/* Read-only memory is by definition never modified, and therefore can't
conflict with anything. We don't expect to find read-only set on MEM,
but stupid user tricks can produce them, so don't die. */
if (MEM_READONLY_P (x))
return 0;
- if (nonoverlapping_memrefs_p (mem, x))
- return 0;
+ /* If we have MEMs refering to different address spaces (which can
+ potentially overlap), we cannot easily tell from the addresses
+ whether the references overlap. */
+ if (MEM_ADDR_SPACE (mem) != MEM_ADDR_SPACE (x))
+ return 1;
- if (mem_mode == VOIDmode)
- mem_mode = GET_MODE (mem);
+ if (! mem_addr)
+ {
+ mem_addr = XEXP (mem, 0);
+ if (mem_mode == VOIDmode)
+ mem_mode = GET_MODE (mem);
+ }
- x_addr = get_addr (XEXP (x, 0));
- mem_addr = get_addr (XEXP (mem, 0));
+ if (! x_addr)
+ {
+ x_addr = XEXP (x, 0);
+ if (!((GET_CODE (x_addr) == VALUE
+ && GET_CODE (mem_addr) != VALUE
+ && reg_mentioned_p (x_addr, mem_addr))
+ || (GET_CODE (x_addr) != VALUE
+ && GET_CODE (mem_addr) == VALUE
+ && reg_mentioned_p (mem_addr, x_addr))))
+ {
+ x_addr = get_addr (x_addr);
+ if (! mem_canonicalized)
+ mem_addr = get_addr (mem_addr);
+ }
+ }
base = find_base_term (x_addr);
if (base && (GET_CODE (base) == LABEL_REF
return 0;
x_addr = canon_rtx (x_addr);
- mem_addr = canon_rtx (mem_addr);
+ if (!mem_canonicalized)
+ mem_addr = canon_rtx (mem_addr);
+
+ if ((ret = memrefs_conflict_p (GET_MODE_SIZE (mem_mode), mem_addr,
+ SIZE_FOR_MODE (x), x_addr, 0)) != -1)
+ return ret;
+
+ if (DIFFERENT_ALIAS_SETS_P (x, mem))
+ return 0;
- if (! memrefs_conflict_p (GET_MODE_SIZE (mem_mode), mem_addr,
- SIZE_FOR_MODE (x), x_addr, 0))
+ if (nonoverlapping_memrefs_p (mem, x, false))
return 0;
if (aliases_everything_p (x))
return 1;
/* We cannot use aliases_everything_p to test MEM, since we must look
- at MEM_MODE, rather than GET_MODE (MEM). */
+ at MEM_ADDR, rather than XEXP (mem, 0). */
if (mem_mode == QImode || GET_CODE (mem_addr) == AND)
return 1;
- /* In true_dependence we also allow BLKmode to alias anything. Why
+ /* ??? In true_dependence we also allow BLKmode to alias anything. Why
don't we do this in anti_dependence and output_dependence? */
if (mem_mode == BLKmode || GET_MODE (x) == BLKmode)
return 1;
return rtx_refs_may_alias_p (x, mem, true);
}
+/* True dependence: X is read after store in MEM takes place. */
+
+int
+true_dependence (const_rtx mem, enum machine_mode mem_mode, const_rtx x,
+ bool (*varies) (const_rtx, bool))
+{
+ return true_dependence_1 (mem, mem_mode, NULL_RTX,
+ x, NULL_RTX, varies,
+ /*mem_canonicalized=*/false);
+}
+
/* Canonical true dependence: X is read after store in MEM takes place.
Variant of true_dependence which assumes MEM has already been
canonicalized (hence we no longer do that here).
- The mem_addr argument has been added, since true_dependence computed
- this value prior to canonicalizing.
- If x_addr is non-NULL, it is used in preference of XEXP (x, 0). */
+ The mem_addr argument has been added, since true_dependence_1 computed
+ this value prior to canonicalizing. */
int
canon_true_dependence (const_rtx mem, enum machine_mode mem_mode, rtx mem_addr,
const_rtx x, rtx x_addr, bool (*varies) (const_rtx, bool))
{
- if (MEM_VOLATILE_P (x) && MEM_VOLATILE_P (mem))
- return 1;
-
- /* (mem:BLK (scratch)) is a special mechanism to conflict with everything.
- This is used in epilogue deallocation functions. */
- if (GET_MODE (x) == BLKmode && GET_CODE (XEXP (x, 0)) == SCRATCH)
- return 1;
- if (GET_MODE (mem) == BLKmode && GET_CODE (XEXP (mem, 0)) == SCRATCH)
- return 1;
- if (MEM_ALIAS_SET (x) == ALIAS_SET_MEMORY_BARRIER
- || MEM_ALIAS_SET (mem) == ALIAS_SET_MEMORY_BARRIER)
- return 1;
-
- if (DIFFERENT_ALIAS_SETS_P (x, mem))
- return 0;
-
- /* Read-only memory is by definition never modified, and therefore can't
- conflict with anything. We don't expect to find read-only set on MEM,
- but stupid user tricks can produce them, so don't die. */
- if (MEM_READONLY_P (x))
- return 0;
-
- if (nonoverlapping_memrefs_p (x, mem))
- return 0;
-
- if (! x_addr)
- x_addr = get_addr (XEXP (x, 0));
-
- if (! base_alias_check (x_addr, mem_addr, GET_MODE (x), mem_mode))
- return 0;
-
- x_addr = canon_rtx (x_addr);
- if (! memrefs_conflict_p (GET_MODE_SIZE (mem_mode), mem_addr,
- SIZE_FOR_MODE (x), x_addr, 0))
- return 0;
-
- if (aliases_everything_p (x))
- return 1;
-
- /* We cannot use aliases_everything_p to test MEM, since we must look
- at MEM_MODE, rather than GET_MODE (MEM). */
- if (mem_mode == QImode || GET_CODE (mem_addr) == AND)
- return 1;
-
- /* In true_dependence we also allow BLKmode to alias anything. Why
- don't we do this in anti_dependence and output_dependence? */
- if (mem_mode == BLKmode || GET_MODE (x) == BLKmode)
- return 1;
-
- if (fixed_scalar_and_varying_struct_p (mem, x, mem_addr, x_addr, varies))
- return 0;
-
- return rtx_refs_may_alias_p (x, mem, true);
+ return true_dependence_1 (mem, mem_mode, mem_addr,
+ x, x_addr, varies,
+ /*mem_canonicalized=*/true);
}
/* Returns nonzero if a write to X might alias a previous read from
rtx x_addr, mem_addr;
const_rtx fixed_scalar;
rtx base;
+ int ret;
if (MEM_VOLATILE_P (x) && MEM_VOLATILE_P (mem))
return 1;
if (!writep && MEM_READONLY_P (mem))
return 0;
- if (nonoverlapping_memrefs_p (x, mem))
- return 0;
+ /* If we have MEMs refering to different address spaces (which can
+ potentially overlap), we cannot easily tell from the addresses
+ whether the references overlap. */
+ if (MEM_ADDR_SPACE (mem) != MEM_ADDR_SPACE (x))
+ return 1;
- x_addr = get_addr (XEXP (x, 0));
- mem_addr = get_addr (XEXP (mem, 0));
+ x_addr = XEXP (x, 0);
+ mem_addr = XEXP (mem, 0);
+ if (!((GET_CODE (x_addr) == VALUE
+ && GET_CODE (mem_addr) != VALUE
+ && reg_mentioned_p (x_addr, mem_addr))
+ || (GET_CODE (x_addr) != VALUE
+ && GET_CODE (mem_addr) == VALUE
+ && reg_mentioned_p (mem_addr, x_addr))))
+ {
+ x_addr = get_addr (x_addr);
+ mem_addr = get_addr (mem_addr);
+ }
if (! writep)
{
x_addr = canon_rtx (x_addr);
mem_addr = canon_rtx (mem_addr);
- if (!memrefs_conflict_p (SIZE_FOR_MODE (mem), mem_addr,
- SIZE_FOR_MODE (x), x_addr, 0))
+ if ((ret = memrefs_conflict_p (SIZE_FOR_MODE (mem), mem_addr,
+ SIZE_FOR_MODE (x), x_addr, 0)) != -1)
+ return ret;
+
+ if (nonoverlapping_memrefs_p (x, mem, false))
return 0;
fixed_scalar
}
\f
+
+/* Check whether X may be aliased with MEM. Don't do offset-based
+ memory disambiguation & TBAA. */
+int
+may_alias_p (const_rtx mem, const_rtx x)
+{
+ rtx x_addr, mem_addr;
+
+ if (MEM_VOLATILE_P (x) && MEM_VOLATILE_P (mem))
+ return 1;
+
+ /* ??? In true_dependence we also allow BLKmode to alias anything. */
+ if (GET_MODE (mem) == BLKmode || GET_MODE (x) == BLKmode)
+ return 1;
+
+ if (MEM_ALIAS_SET (x) == ALIAS_SET_MEMORY_BARRIER
+ || MEM_ALIAS_SET (mem) == ALIAS_SET_MEMORY_BARRIER)
+ return 1;
+
+ /* Read-only memory is by definition never modified, and therefore can't
+ conflict with anything. We don't expect to find read-only set on MEM,
+ but stupid user tricks can produce them, so don't die. */
+ if (MEM_READONLY_P (x))
+ return 0;
+
+ /* If we have MEMs refering to different address spaces (which can
+ potentially overlap), we cannot easily tell from the addresses
+ whether the references overlap. */
+ if (MEM_ADDR_SPACE (mem) != MEM_ADDR_SPACE (x))
+ return 1;
+
+ x_addr = XEXP (x, 0);
+ mem_addr = XEXP (mem, 0);
+ if (!((GET_CODE (x_addr) == VALUE
+ && GET_CODE (mem_addr) != VALUE
+ && reg_mentioned_p (x_addr, mem_addr))
+ || (GET_CODE (x_addr) != VALUE
+ && GET_CODE (mem_addr) == VALUE
+ && reg_mentioned_p (mem_addr, x_addr))))
+ {
+ x_addr = get_addr (x_addr);
+ mem_addr = get_addr (mem_addr);
+ }
+
+ if (! base_alias_check (x_addr, mem_addr, GET_MODE (x), GET_MODE (mem_addr)))
+ return 0;
+
+ x_addr = canon_rtx (x_addr);
+ mem_addr = canon_rtx (mem_addr);
+
+ if (nonoverlapping_memrefs_p (mem, x, true))
+ return 0;
+
+ if (aliases_everything_p (x))
+ return 1;
+
+ /* We cannot use aliases_everything_p to test MEM, since we must look
+ at MEM_ADDR, rather than XEXP (mem, 0). */
+ if (GET_MODE (mem) == QImode || GET_CODE (mem_addr) == AND)
+ return 1;
+
+ if (fixed_scalar_and_varying_struct_p (mem, x, mem_addr, x_addr,
+ rtx_addr_varies_p))
+ return 0;
+
+ /* TBAA not valid for loop_invarint */
+ return rtx_refs_may_alias_p (x, mem, false);
+}
+
void
init_alias_target (void)
{
= gen_rtx_ADDRESS (Pmode, arg_pointer_rtx);
static_reg_base_value[FRAME_POINTER_REGNUM]
= gen_rtx_ADDRESS (Pmode, frame_pointer_rtx);
-#if HARD_FRAME_POINTER_REGNUM != FRAME_POINTER_REGNUM
+#if !HARD_FRAME_POINTER_IS_FRAME_POINTER
static_reg_base_value[HARD_FRAME_POINTER_REGNUM]
= gen_rtx_ADDRESS (Pmode, hard_frame_pointer_rtx);
#endif
timevar_push (TV_ALIAS_ANALYSIS);
reg_known_value_size = maxreg - FIRST_PSEUDO_REGISTER;
- reg_known_value = GGC_CNEWVEC (rtx, reg_known_value_size);
+ reg_known_value = ggc_alloc_cleared_vec_rtx (reg_known_value_size);
reg_known_equiv_p = XCNEWVEC (bool, reg_known_value_size);
/* If we have memory allocated from the previous run, use it. */