/* Alias analysis for GNU C
Copyright (C) 1997, 1998, 1999, 2000, 2001, 2002, 2003, 2004, 2005, 2006,
- 2007, 2008, 2009 Free Software Foundation, Inc.
+ 2007, 2008, 2009, 2010 Free Software Foundation, Inc.
Contributed by John Carr (jfc@mit.edu).
This file is part of GCC.
#include "timevar.h"
#include "target.h"
#include "cgraph.h"
-#include "varray.h"
#include "tree-pass.h"
#include "ipa-type-escape.h"
#include "df.h"
+#include "tree-ssa-alias.h"
+#include "pointer-set.h"
+#include "tree-flow.h"
/* The aliasing API provided here solves related but different problems:
static rtx find_base_value (rtx);
static int mems_in_disjoint_alias_sets_p (const_rtx, const_rtx);
static int insert_subset_children (splay_tree_node, void*);
-static tree find_base_decl (tree);
static alias_set_entry get_alias_set_entry (alias_set_type);
static const_rtx fixed_scalar_and_varying_struct_p (const_rtx, const_rtx, rtx, rtx,
bool (*) (const_rtx, bool));
/* The splay-tree used to store the various alias set entries. */
static GTY (()) VEC(alias_set_entry,gc) *alias_sets;
\f
+/* Build a decomposed reference object for querying the alias-oracle
+ from the MEM rtx and store it in *REF.
+ Returns false if MEM is not suitable for the alias-oracle. */
+
+static bool
+ao_ref_from_mem (ao_ref *ref, const_rtx mem)
+{
+ tree expr = MEM_EXPR (mem);
+ tree base;
+
+ if (!expr)
+ return false;
+
+ ao_ref_init (ref, expr);
+
+ /* Get the base of the reference and see if we have to reject or
+ adjust it. */
+ base = ao_ref_base (ref);
+ if (base == NULL_TREE)
+ return false;
+
+ /* The tree oracle doesn't like to have these. */
+ if (TREE_CODE (base) == FUNCTION_DECL
+ || TREE_CODE (base) == LABEL_DECL)
+ return false;
+
+ /* If this is a pointer dereference of a non-SSA_NAME punt.
+ ??? We could replace it with a pointer to anything. */
+ if (INDIRECT_REF_P (base)
+ && TREE_CODE (TREE_OPERAND (base, 0)) != SSA_NAME)
+ return false;
+
+ /* If this is a reference based on a partitioned decl replace the
+ base with an INDIRECT_REF of the pointer representative we
+ created during stack slot partitioning. */
+ if (TREE_CODE (base) == VAR_DECL
+ && ! TREE_STATIC (base)
+ && cfun->gimple_df->decls_to_pointers != NULL)
+ {
+ void *namep;
+ namep = pointer_map_contains (cfun->gimple_df->decls_to_pointers, base);
+ if (namep)
+ {
+ ref->base_alias_set = get_alias_set (base);
+ ref->base = build1 (INDIRECT_REF, TREE_TYPE (base), *(tree *)namep);
+ }
+ }
+
+ ref->ref_alias_set = MEM_ALIAS_SET (mem);
+
+ /* If MEM_OFFSET or MEM_SIZE are NULL we have to punt.
+ Keep points-to related information though. */
+ if (!MEM_OFFSET (mem)
+ || !MEM_SIZE (mem))
+ {
+ ref->ref = NULL_TREE;
+ ref->offset = 0;
+ ref->size = -1;
+ ref->max_size = -1;
+ return true;
+ }
+
+ /* If the base decl is a parameter we can have negative MEM_OFFSET in
+ case of promoted subregs on bigendian targets. Trust the MEM_EXPR
+ here. */
+ if (INTVAL (MEM_OFFSET (mem)) < 0
+ && ((INTVAL (MEM_SIZE (mem)) + INTVAL (MEM_OFFSET (mem)))
+ * BITS_PER_UNIT) == ref->size)
+ return true;
+
+ ref->offset += INTVAL (MEM_OFFSET (mem)) * BITS_PER_UNIT;
+ ref->size = INTVAL (MEM_SIZE (mem)) * BITS_PER_UNIT;
+
+ /* The MEM may extend into adjacent fields, so adjust max_size if
+ necessary. */
+ if (ref->max_size != -1
+ && ref->size > ref->max_size)
+ ref->max_size = ref->size;
+
+ /* If MEM_OFFSET and MEM_SIZE get us outside of the base object of
+ the MEM_EXPR punt. This happens for STRICT_ALIGNMENT targets a lot. */
+ if (MEM_EXPR (mem) != get_spill_slot_decl (false)
+ && (ref->offset < 0
+ || (DECL_P (ref->base)
+ && (!host_integerp (DECL_SIZE (ref->base), 1)
+ || (TREE_INT_CST_LOW (DECL_SIZE ((ref->base)))
+ < (unsigned HOST_WIDE_INT)(ref->offset + ref->size))))))
+ return false;
+
+ return true;
+}
+
+/* Query the alias-oracle on whether the two memory rtx X and MEM may
+ alias. If TBAA_P is set also apply TBAA. Returns true if the
+ two rtxen may alias, false otherwise. */
+
+static bool
+rtx_refs_may_alias_p (const_rtx x, const_rtx mem, bool tbaa_p)
+{
+ ao_ref ref1, ref2;
+
+ if (!ao_ref_from_mem (&ref1, x)
+ || !ao_ref_from_mem (&ref2, mem))
+ return true;
+
+ return refs_may_alias_p_1 (&ref1, &ref2, tbaa_p);
+}
+
/* Returns a pointer to the alias set entry for ALIAS_SET, if there is
such an entry, or NULL otherwise. */
/* Otherwise, check if set1 is a subset of set2. */
ase = get_alias_set_entry (set2);
if (ase != 0
- && ((ase->has_zero_child && set1 == 0)
+ && (ase->has_zero_child
|| splay_tree_lookup (ase->children,
(splay_tree_key) set1)))
return true;
{
if (alias_sets_conflict_p (MEM_ALIAS_SET(*x), MEM_ALIAS_SET(mem)))
return 1;
-
- return -1;
+
+ return -1;
}
return 0;
}
if (for_each_rtx (pat, (rtx_function) walk_mems_2, *x))
/* Indicate that dependence was determined and stop traversal. */
return 1;
-
+
return -1;
}
return 0;
return alias_sets_must_conflict_p (set1, set2);
}
\f
-/* T is an expression with pointer type. Find the DECL on which this
- expression is based. (For example, in `a[i]' this would be `a'.)
- If there is no such DECL, or a unique decl cannot be determined,
- NULL_TREE is returned. */
-
-static tree
-find_base_decl (tree t)
-{
- tree d0, d1;
-
- if (t == 0 || t == error_mark_node || ! POINTER_TYPE_P (TREE_TYPE (t)))
- return 0;
-
- if (TREE_CODE (t) == SSA_NAME)
- t = SSA_NAME_VAR (t);
-
- /* If this is a declaration, return it. If T is based on a restrict
- qualified decl, return that decl. */
- if (DECL_P (t))
- {
- if (TREE_CODE (t) == VAR_DECL && DECL_BASED_ON_RESTRICT_P (t))
- t = DECL_GET_RESTRICT_BASE (t);
- return t;
- }
-
- /* Handle general expressions. It would be nice to deal with
- COMPONENT_REFs here. If we could tell that `a' and `b' were the
- same, then `a->f' and `b->f' are also the same. */
- switch (TREE_CODE_CLASS (TREE_CODE (t)))
- {
- case tcc_unary:
- return find_base_decl (TREE_OPERAND (t, 0));
-
- case tcc_binary:
- /* Return 0 if found in neither or both are the same. */
- d0 = find_base_decl (TREE_OPERAND (t, 0));
- d1 = find_base_decl (TREE_OPERAND (t, 1));
- if (d0 == d1)
- return d0;
- else if (d0 == 0)
- return d1;
- else if (d1 == 0)
- return d0;
- else
- return 0;
-
- default:
- return 0;
- }
-}
-
/* Return true if all nested component references handled by
get_inner_reference in T are such that we should use the alias set
provided by the object at the heart of T.
if (!flag_strict_aliasing)
return 0;
+ /* All we care about is the type. */
if (! TYPE_P (t))
- {
- tree decl = find_base_decl (t);
-
- if (decl && DECL_POINTER_ALIAS_SET_KNOWN_P (decl))
- {
- /* If we haven't computed the actual alias set, do it now. */
- if (DECL_POINTER_ALIAS_SET (decl) == -2)
- {
- tree pointed_to_type = TREE_TYPE (TREE_TYPE (decl));
-
- /* No two restricted pointers can point at the same thing.
- However, a restricted pointer can point at the same thing
- as an unrestricted pointer, if that unrestricted pointer
- is based on the restricted pointer. So, we make the
- alias set for the restricted pointer a subset of the
- alias set for the type pointed to by the type of the
- decl. */
- alias_set_type pointed_to_alias_set
- = get_alias_set (pointed_to_type);
-
- if (pointed_to_alias_set == 0)
- /* It's not legal to make a subset of alias set zero. */
- DECL_POINTER_ALIAS_SET (decl) = 0;
- else if (AGGREGATE_TYPE_P (pointed_to_type))
- /* For an aggregate, we must treat the restricted
- pointer the same as an ordinary pointer. If we
- were to make the type pointed to by the
- restricted pointer a subset of the pointed-to
- type, then we would believe that other subsets
- of the pointed-to type (such as fields of that
- type) do not conflict with the type pointed to
- by the restricted pointer. */
- DECL_POINTER_ALIAS_SET (decl)
- = pointed_to_alias_set;
- else
- {
- DECL_POINTER_ALIAS_SET (decl) = new_alias_set ();
- record_alias_subset (pointed_to_alias_set,
- DECL_POINTER_ALIAS_SET (decl));
- }
- }
-
- /* We use the alias set indicated in the declaration. */
- return DECL_POINTER_ALIAS_SET (decl);
- }
-
- /* Now all we care about is the type. */
- t = TREE_TYPE (t);
- }
+ t = TREE_TYPE (t);
/* If we have an INDIRECT_REF via a void pointer, we don't
know anything about what that might alias. Likewise if the
aren't types. */
if (! TYPE_P (t))
{
- tree inner = t;
+ tree inner;
/* Remove any nops, then give the language a chance to do
something with this tree before we look at it. */
if (set != -1)
return set;
+ /* Retrieve the original memory reference if needed. */
+ if (TREE_CODE (t) == TARGET_MEM_REF)
+ t = TMR_ORIGINAL (t);
+
/* First see if the actual object referenced is an INDIRECT_REF from a
restrict-qualified pointer or a "void *". */
+ inner = t;
while (handled_component_p (inner))
{
inner = TREE_OPERAND (inner, 0);
}
/* Variant qualifiers don't affect the alias set, so get the main
- variant. Always use the canonical type as well.
- If this is a type with a known alias set, return it. */
+ variant. */
t = TYPE_MAIN_VARIANT (t);
- if (TYPE_CANONICAL (t))
- t = TYPE_CANONICAL (t);
+
+ /* Always use the canonical type as well. If this is a type that
+ requires structural comparisons to identify compatible types
+ use alias set zero. */
+ if (TYPE_STRUCTURAL_EQUALITY_P (t))
+ {
+ /* Allow the language to specify another alias set for this
+ type. */
+ set = lang_hooks.get_alias_set (t);
+ if (set != -1)
+ return set;
+ return 0;
+ }
+ t = TYPE_CANONICAL (t);
+ /* Canonical types shouldn't form a tree nor should the canonical
+ type require structural equality checks. */
+ gcc_assert (!TYPE_STRUCTURAL_EQUALITY_P (t) && TYPE_CANONICAL (t) == t);
+
+ /* If this is a type with a known alias set, return it. */
if (TYPE_ALIAS_SET_KNOWN_P (t))
return TYPE_ALIAS_SET (t);
return 0;
case TRUNCATE:
+ /* As we do not know which address space the pointer is refering to, we can
+ handle this only if the target does not support different pointer or
+ address modes depending on the address space. */
+ if (!target_default_pointer_address_modes_p ())
+ break;
if (GET_MODE_SIZE (GET_MODE (src)) < GET_MODE_SIZE (Pmode))
break;
/* Fall through. */
case ZERO_EXTEND:
case SIGN_EXTEND: /* used for NT/Alpha pointers */
+ /* As we do not know which address space the pointer is refering to, we can
+ handle this only if the target does not support different pointer or
+ address modes depending on the address space. */
+ if (!target_default_pointer_address_modes_p ())
+ break;
+
{
rtx temp = find_base_value (XEXP (src, 0));
return REG_BASE_VALUE (x);
case TRUNCATE:
+ /* As we do not know which address space the pointer is refering to, we can
+ handle this only if the target does not support different pointer or
+ address modes depending on the address space. */
+ if (!target_default_pointer_address_modes_p ())
+ return 0;
if (GET_MODE_SIZE (GET_MODE (x)) < GET_MODE_SIZE (Pmode))
return 0;
/* Fall through. */
case ZERO_EXTEND:
case SIGN_EXTEND: /* Used for Alpha/NT pointers */
+ /* As we do not know which address space the pointer is refering to, we can
+ handle this only if the target does not support different pointer or
+ address modes depending on the address space. */
+ if (!target_default_pointer_address_modes_p ())
+ return 0;
+
{
rtx temp = find_base_term (XEXP (x, 0));
|| (GET_CODE (y_base) == ADDRESS && GET_MODE (y_base) == Pmode))
return 0;
- if (! flag_argument_noalias)
- return 1;
-
- if (flag_argument_noalias > 1)
- return 0;
-
- /* Weak noalias assertion (arguments are distinct, but may match globals). */
- return ! (GET_MODE (x_base) == VOIDmode && GET_MODE (y_base) == VOIDmode);
+ return 1;
}
/* Convert the address X into something we can use. This is done by returning
return addr;
}
-/* Return nonzero if X and Y (memory addresses) could reference the
- same location in memory. C is an offset accumulator. When
+/* Return one if X and Y (memory addresses) reference the
+ same location in memory or if the references overlap.
+ Return zero if they do not overlap, else return
+ minus one in which case they still might reference the same location.
+
+ C is an offset accumulator. When
C is nonzero, we are testing aliases between X and Y + C.
XSIZE is the size in bytes of the X reference,
similarly YSIZE is the size in bytes for Y.
align memory references, as is done on the Alpha.
Nice to notice that varying addresses cannot conflict with fp if no
- local variables had their addresses taken, but that's too hard now. */
+ local variables had their addresses taken, but that's too hard now.
+
+ ??? Contrary to the tree alias oracle this does not return
+ one for X + non-constant and Y + non-constant when X and Y are equal.
+ If that is fixed the TBAA hack for union type-punning can be removed. */
static int
memrefs_conflict_p (int xsize, rtx x, int ysize, rtx y, HOST_WIDE_INT c)
{
if (GET_CODE (x) == VALUE)
- x = get_addr (x);
+ {
+ if (REG_P (y))
+ {
+ struct elt_loc_list *l = NULL;
+ if (CSELIB_VAL_PTR (x))
+ for (l = CSELIB_VAL_PTR (x)->locs; l; l = l->next)
+ if (REG_P (l->loc) && rtx_equal_for_memref_p (l->loc, y))
+ break;
+ if (l)
+ x = y;
+ else
+ x = get_addr (x);
+ }
+ /* Don't call get_addr if y is the same VALUE. */
+ else if (x != y)
+ x = get_addr (x);
+ }
if (GET_CODE (y) == VALUE)
- y = get_addr (y);
+ {
+ if (REG_P (x))
+ {
+ struct elt_loc_list *l = NULL;
+ if (CSELIB_VAL_PTR (y))
+ for (l = CSELIB_VAL_PTR (y)->locs; l; l = l->next)
+ if (REG_P (l->loc) && rtx_equal_for_memref_p (l->loc, x))
+ break;
+ if (l)
+ y = x;
+ else
+ y = get_addr (y);
+ }
+ /* Don't call get_addr if x is the same VALUE. */
+ else if (y != x)
+ y = get_addr (y);
+ }
if (GET_CODE (x) == HIGH)
x = XEXP (x, 0);
else if (GET_CODE (x) == LO_SUM)
else if (CONST_INT_P (y1))
return memrefs_conflict_p (xsize, x, ysize, y0, c + INTVAL (y1));
- return 1;
+ return -1;
}
else if (CONST_INT_P (x1))
return memrefs_conflict_p (xsize, x0, ysize, y, c - INTVAL (x1));
if (CONST_INT_P (y1))
return memrefs_conflict_p (xsize, x, ysize, y0, c + INTVAL (y1));
else
- return 1;
+ return -1;
}
if (GET_CODE (x) == GET_CODE (y))
rtx x1 = canon_rtx (XEXP (x, 1));
rtx y1 = canon_rtx (XEXP (y, 1));
if (! rtx_equal_for_memref_p (x1, y1))
- return 1;
+ return -1;
x0 = canon_rtx (XEXP (x, 0));
y0 = canon_rtx (XEXP (y, 0));
if (rtx_equal_for_memref_p (x0, y0))
/* Can't properly adjust our sizes. */
if (!CONST_INT_P (x1))
- return 1;
+ return -1;
xsize /= INTVAL (x1);
ysize /= INTVAL (x1);
c /= INTVAL (x1);
|| (rtx_equal_for_memref_p (x, y)
&& ((c >= 0 && xsize > c) || (c < 0 && ysize+c > 0))));
- return 1;
+ return -1;
}
- return 1;
+
+ return -1;
}
/* Functions to compute memory dependencies.
{
const_tree fieldx, fieldy, typex, typey, orig_y;
+ if (!flag_strict_aliasing)
+ return false;
+
do
{
/* The comparison has to be done at a common type, since we don't
if (exprx == 0 || expry == 0)
return 0;
+ /* For spill-slot accesses make sure we have valid offsets. */
+ if ((exprx == get_spill_slot_decl (false)
+ && ! MEM_OFFSET (x))
+ || (expry == get_spill_slot_decl (false)
+ && ! MEM_OFFSET (y)))
+ return 0;
+
/* If both are field references, we may be able to determine something. */
if (TREE_CODE (exprx) == COMPONENT_REF
&& TREE_CODE (expry) == COMPONENT_REF
exprx = t;
}
}
- else if (INDIRECT_REF_P (exprx))
- {
- exprx = TREE_OPERAND (exprx, 0);
- if (flag_argument_noalias < 2
- || TREE_CODE (exprx) != PARM_DECL)
- return 0;
- }
moffsety = MEM_OFFSET (y);
if (TREE_CODE (expry) == COMPONENT_REF)
expry = t;
}
}
- else if (INDIRECT_REF_P (expry))
- {
- expry = TREE_OPERAND (expry, 0);
- if (flag_argument_noalias < 2
- || TREE_CODE (expry) != PARM_DECL)
- return 0;
- }
if (! DECL_P (exprx) || ! DECL_P (expry))
return 0;
+ /* With invalid code we can end up storing into the constant pool.
+ Bail out to avoid ICEing when creating RTL for this.
+ See gfortran.dg/lto/20091028-2_0.f90. */
+ if (TREE_CODE (exprx) == CONST_DECL
+ || TREE_CODE (expry) == CONST_DECL)
+ return 1;
+
rtlx = DECL_RTL (exprx);
rtly = DECL_RTL (expry);
&& ! rtx_equal_p (rtlx, rtly))
return 1;
+ /* If we have MEMs refering to different address spaces (which can
+ potentially overlap), we cannot easily tell from the addresses
+ whether the references overlap. */
+ if (MEM_P (rtlx) && MEM_P (rtly)
+ && MEM_ADDR_SPACE (rtlx) != MEM_ADDR_SPACE (rtly))
+ return 0;
+
/* Get the base and offsets of both decls. If either is a register, we
know both are and are the same, so use that as the base. The only
we can avoid overlap is if we can deduce that they are nonoverlapping
{
rtx x_addr, mem_addr;
rtx base;
+ int ret;
if (MEM_VOLATILE_P (x) && MEM_VOLATILE_P (mem))
return 1;
|| MEM_ALIAS_SET (mem) == ALIAS_SET_MEMORY_BARRIER)
return 1;
- if (DIFFERENT_ALIAS_SETS_P (x, mem))
- return 0;
-
/* Read-only memory is by definition never modified, and therefore can't
conflict with anything. We don't expect to find read-only set on MEM,
but stupid user tricks can produce them, so don't die. */
if (MEM_READONLY_P (x))
return 0;
- if (nonoverlapping_memrefs_p (mem, x))
- return 0;
+ /* If we have MEMs refering to different address spaces (which can
+ potentially overlap), we cannot easily tell from the addresses
+ whether the references overlap. */
+ if (MEM_ADDR_SPACE (mem) != MEM_ADDR_SPACE (x))
+ return 1;
if (mem_mode == VOIDmode)
mem_mode = GET_MODE (mem);
- x_addr = get_addr (XEXP (x, 0));
- mem_addr = get_addr (XEXP (mem, 0));
+ x_addr = XEXP (x, 0);
+ mem_addr = XEXP (mem, 0);
+ if (!((GET_CODE (x_addr) == VALUE
+ && GET_CODE (mem_addr) != VALUE
+ && reg_mentioned_p (x_addr, mem_addr))
+ || (GET_CODE (x_addr) != VALUE
+ && GET_CODE (mem_addr) == VALUE
+ && reg_mentioned_p (mem_addr, x_addr))))
+ {
+ x_addr = get_addr (x_addr);
+ mem_addr = get_addr (mem_addr);
+ }
base = find_base_term (x_addr);
if (base && (GET_CODE (base) == LABEL_REF
x_addr = canon_rtx (x_addr);
mem_addr = canon_rtx (mem_addr);
- if (! memrefs_conflict_p (GET_MODE_SIZE (mem_mode), mem_addr,
- SIZE_FOR_MODE (x), x_addr, 0))
+ if ((ret = memrefs_conflict_p (GET_MODE_SIZE (mem_mode), mem_addr,
+ SIZE_FOR_MODE (x), x_addr, 0)) != -1)
+ return ret;
+
+ if (DIFFERENT_ALIAS_SETS_P (x, mem))
+ return 0;
+
+ if (nonoverlapping_memrefs_p (mem, x))
return 0;
if (aliases_everything_p (x))
if (mem_mode == BLKmode || GET_MODE (x) == BLKmode)
return 1;
- return ! fixed_scalar_and_varying_struct_p (mem, x, mem_addr, x_addr,
- varies);
+ if (fixed_scalar_and_varying_struct_p (mem, x, mem_addr, x_addr, varies))
+ return 0;
+
+ return rtx_refs_may_alias_p (x, mem, true);
}
/* Canonical true dependence: X is read after store in MEM takes place.
canon_true_dependence (const_rtx mem, enum machine_mode mem_mode, rtx mem_addr,
const_rtx x, rtx x_addr, bool (*varies) (const_rtx, bool))
{
+ int ret;
+
if (MEM_VOLATILE_P (x) && MEM_VOLATILE_P (mem))
return 1;
|| MEM_ALIAS_SET (mem) == ALIAS_SET_MEMORY_BARRIER)
return 1;
- if (DIFFERENT_ALIAS_SETS_P (x, mem))
- return 0;
-
/* Read-only memory is by definition never modified, and therefore can't
conflict with anything. We don't expect to find read-only set on MEM,
but stupid user tricks can produce them, so don't die. */
if (MEM_READONLY_P (x))
return 0;
- if (nonoverlapping_memrefs_p (x, mem))
- return 0;
+ /* If we have MEMs refering to different address spaces (which can
+ potentially overlap), we cannot easily tell from the addresses
+ whether the references overlap. */
+ if (MEM_ADDR_SPACE (mem) != MEM_ADDR_SPACE (x))
+ return 1;
if (! x_addr)
- x_addr = get_addr (XEXP (x, 0));
+ {
+ x_addr = XEXP (x, 0);
+ if (!((GET_CODE (x_addr) == VALUE
+ && GET_CODE (mem_addr) != VALUE
+ && reg_mentioned_p (x_addr, mem_addr))
+ || (GET_CODE (x_addr) != VALUE
+ && GET_CODE (mem_addr) == VALUE
+ && reg_mentioned_p (mem_addr, x_addr))))
+ x_addr = get_addr (x_addr);
+ }
if (! base_alias_check (x_addr, mem_addr, GET_MODE (x), mem_mode))
return 0;
x_addr = canon_rtx (x_addr);
- if (! memrefs_conflict_p (GET_MODE_SIZE (mem_mode), mem_addr,
- SIZE_FOR_MODE (x), x_addr, 0))
+ if ((ret = memrefs_conflict_p (GET_MODE_SIZE (mem_mode), mem_addr,
+ SIZE_FOR_MODE (x), x_addr, 0)) != -1)
+ return ret;
+
+ if (DIFFERENT_ALIAS_SETS_P (x, mem))
+ return 0;
+
+ if (nonoverlapping_memrefs_p (x, mem))
return 0;
if (aliases_everything_p (x))
if (mem_mode == BLKmode || GET_MODE (x) == BLKmode)
return 1;
- return ! fixed_scalar_and_varying_struct_p (mem, x, mem_addr, x_addr,
- varies);
+ if (fixed_scalar_and_varying_struct_p (mem, x, mem_addr, x_addr, varies))
+ return 0;
+
+ return rtx_refs_may_alias_p (x, mem, true);
}
/* Returns nonzero if a write to X might alias a previous read from
rtx x_addr, mem_addr;
const_rtx fixed_scalar;
rtx base;
+ int ret;
if (MEM_VOLATILE_P (x) && MEM_VOLATILE_P (mem))
return 1;
if (!writep && MEM_READONLY_P (mem))
return 0;
- if (nonoverlapping_memrefs_p (x, mem))
- return 0;
+ /* If we have MEMs refering to different address spaces (which can
+ potentially overlap), we cannot easily tell from the addresses
+ whether the references overlap. */
+ if (MEM_ADDR_SPACE (mem) != MEM_ADDR_SPACE (x))
+ return 1;
- x_addr = get_addr (XEXP (x, 0));
- mem_addr = get_addr (XEXP (mem, 0));
+ x_addr = XEXP (x, 0);
+ mem_addr = XEXP (mem, 0);
+ if (!((GET_CODE (x_addr) == VALUE
+ && GET_CODE (mem_addr) != VALUE
+ && reg_mentioned_p (x_addr, mem_addr))
+ || (GET_CODE (x_addr) != VALUE
+ && GET_CODE (mem_addr) == VALUE
+ && reg_mentioned_p (mem_addr, x_addr))))
+ {
+ x_addr = get_addr (x_addr);
+ mem_addr = get_addr (mem_addr);
+ }
if (! writep)
{
x_addr = canon_rtx (x_addr);
mem_addr = canon_rtx (mem_addr);
- if (!memrefs_conflict_p (SIZE_FOR_MODE (mem), mem_addr,
- SIZE_FOR_MODE (x), x_addr, 0))
+ if ((ret = memrefs_conflict_p (SIZE_FOR_MODE (mem), mem_addr,
+ SIZE_FOR_MODE (x), x_addr, 0)) != -1)
+ return ret;
+
+ if (nonoverlapping_memrefs_p (x, mem))
return 0;
fixed_scalar
= fixed_scalar_and_varying_struct_p (mem, x, mem_addr, x_addr,
rtx_addr_varies_p);
- return (!(fixed_scalar == mem && !aliases_everything_p (x))
- && !(fixed_scalar == x && !aliases_everything_p (mem)));
+ if ((fixed_scalar == mem && !aliases_everything_p (x))
+ || (fixed_scalar == x && !aliases_everything_p (mem)))
+ return 0;
+
+ return rtx_refs_may_alias_p (x, mem, false);
}
/* Anti dependence: X is written after read in MEM takes place. */