/* Alias analysis for trees.
- Copyright (C) 2004, 2005, 2006, 2007, 2008, 2009, 2010
+ Copyright (C) 2004, 2005, 2006, 2007, 2008, 2009, 2010, 2011
Free Software Foundation, Inc.
Contributed by Diego Novillo <dnovillo@redhat.com>
#include "tm.h"
#include "tree.h"
#include "tm_p.h"
+#include "target.h"
#include "basic-block.h"
#include "timevar.h"
#include "ggc.h"
#include "langhooks.h"
#include "flags.h"
-#include "toplev.h"
#include "function.h"
#include "tree-pretty-print.h"
#include "tree-dump.h"
#include "tree-pass.h"
#include "convert.h"
#include "params.h"
-#include "ipa-type-escape.h"
#include "vec.h"
#include "bitmap.h"
#include "vecprim.h"
{
struct ptr_info_def *pi;
- gcc_assert ((TREE_CODE (ptr) == SSA_NAME
- || TREE_CODE (ptr) == ADDR_EXPR
- || TREE_CODE (ptr) == INTEGER_CST)
- && (TREE_CODE (decl) == VAR_DECL
- || TREE_CODE (decl) == PARM_DECL
- || TREE_CODE (decl) == RESULT_DECL));
+ /* Conversions are irrelevant for points-to information and
+ data-dependence analysis can feed us those. */
+ STRIP_NOPS (ptr);
+
+ /* Anything we do not explicilty handle aliases. */
+ if ((TREE_CODE (ptr) != SSA_NAME
+ && TREE_CODE (ptr) != ADDR_EXPR
+ && TREE_CODE (ptr) != POINTER_PLUS_EXPR)
+ || !POINTER_TYPE_P (TREE_TYPE (ptr))
+ || (TREE_CODE (decl) != VAR_DECL
+ && TREE_CODE (decl) != PARM_DECL
+ && TREE_CODE (decl) != RESULT_DECL))
+ return true;
- /* Non-aliased variables can not be pointed to. */
- if (!may_be_aliased (decl))
- return false;
+ /* Disregard pointer offsetting. */
+ if (TREE_CODE (ptr) == POINTER_PLUS_EXPR)
+ {
+ do
+ {
+ ptr = TREE_OPERAND (ptr, 0);
+ }
+ while (TREE_CODE (ptr) == POINTER_PLUS_EXPR);
+ return ptr_deref_may_alias_decl_p (ptr, decl);
+ }
/* ADDR_EXPR pointers either just offset another pointer or directly
specify the pointed-to set. */
{
tree base = get_base_address (TREE_OPERAND (ptr, 0));
if (base
- && (INDIRECT_REF_P (base)
- || TREE_CODE (base) == MEM_REF))
+ && (TREE_CODE (base) == MEM_REF
+ || TREE_CODE (base) == TARGET_MEM_REF))
ptr = TREE_OPERAND (base, 0);
else if (base
- && SSA_VAR_P (base))
+ && DECL_P (base))
return base == decl;
else if (base
&& CONSTANT_CLASS_P (base))
return true;
}
- /* We can end up with dereferencing constant pointers.
- Just bail out in this case. */
- if (TREE_CODE (ptr) == INTEGER_CST)
- return true;
+ /* Non-aliased variables can not be pointed to. */
+ if (!may_be_aliased (decl))
+ return false;
/* If we do not have useful points-to information for this pointer
we cannot disambiguate anything else. */
if (!pi)
return true;
- /* If the decl can be used as a restrict tag and we have a restrict
- pointer and that pointers points-to set doesn't contain this decl
- then they can't alias. */
- if (DECL_RESTRICTED_P (decl)
- && TYPE_RESTRICT (TREE_TYPE (ptr))
- && pi->pt.vars_contains_restrict)
- return bitmap_bit_p (pi->pt.vars, DECL_PT_UID (decl));
-
return pt_solution_includes (&pi->pt, decl);
}
The caller is responsible for applying TBAA to see if accesses
through PTR1 and PTR2 may conflict at all. */
-static bool
+bool
ptr_derefs_may_alias_p (tree ptr1, tree ptr2)
{
struct ptr_info_def *pi1, *pi2;
- gcc_assert ((TREE_CODE (ptr1) == SSA_NAME
- || TREE_CODE (ptr1) == ADDR_EXPR
- || TREE_CODE (ptr1) == INTEGER_CST)
- && (TREE_CODE (ptr2) == SSA_NAME
- || TREE_CODE (ptr2) == ADDR_EXPR
- || TREE_CODE (ptr2) == INTEGER_CST));
+ /* Conversions are irrelevant for points-to information and
+ data-dependence analysis can feed us those. */
+ STRIP_NOPS (ptr1);
+ STRIP_NOPS (ptr2);
+
+ /* Anything we do not explicilty handle aliases. */
+ if ((TREE_CODE (ptr1) != SSA_NAME
+ && TREE_CODE (ptr1) != ADDR_EXPR
+ && TREE_CODE (ptr1) != POINTER_PLUS_EXPR)
+ || (TREE_CODE (ptr2) != SSA_NAME
+ && TREE_CODE (ptr2) != ADDR_EXPR
+ && TREE_CODE (ptr2) != POINTER_PLUS_EXPR)
+ || !POINTER_TYPE_P (TREE_TYPE (ptr1))
+ || !POINTER_TYPE_P (TREE_TYPE (ptr2)))
+ return true;
+
+ /* Disregard pointer offsetting. */
+ if (TREE_CODE (ptr1) == POINTER_PLUS_EXPR)
+ {
+ do
+ {
+ ptr1 = TREE_OPERAND (ptr1, 0);
+ }
+ while (TREE_CODE (ptr1) == POINTER_PLUS_EXPR);
+ return ptr_derefs_may_alias_p (ptr1, ptr2);
+ }
+ if (TREE_CODE (ptr2) == POINTER_PLUS_EXPR)
+ {
+ do
+ {
+ ptr2 = TREE_OPERAND (ptr2, 0);
+ }
+ while (TREE_CODE (ptr2) == POINTER_PLUS_EXPR);
+ return ptr_derefs_may_alias_p (ptr1, ptr2);
+ }
/* ADDR_EXPR pointers either just offset another pointer or directly
specify the pointed-to set. */
{
tree base = get_base_address (TREE_OPERAND (ptr1, 0));
if (base
- && (INDIRECT_REF_P (base)
- || TREE_CODE (base) == MEM_REF))
+ && (TREE_CODE (base) == MEM_REF
+ || TREE_CODE (base) == TARGET_MEM_REF))
ptr1 = TREE_OPERAND (base, 0);
else if (base
- && SSA_VAR_P (base))
+ && DECL_P (base))
return ptr_deref_may_alias_decl_p (ptr2, base);
else
return true;
{
tree base = get_base_address (TREE_OPERAND (ptr2, 0));
if (base
- && (INDIRECT_REF_P (base)
- || TREE_CODE (base) == MEM_REF))
+ && (TREE_CODE (base) == MEM_REF
+ || TREE_CODE (base) == TARGET_MEM_REF))
ptr2 = TREE_OPERAND (base, 0);
else if (base
- && SSA_VAR_P (base))
+ && DECL_P (base))
return ptr_deref_may_alias_decl_p (ptr1, base);
else
return true;
}
- /* We can end up with dereferencing constant pointers.
- Just bail out in this case. */
- if (TREE_CODE (ptr1) == INTEGER_CST
- || TREE_CODE (ptr2) == INTEGER_CST)
- return true;
-
/* We may end up with two empty points-to solutions for two same pointers.
In this case we still want to say both pointers alias, so shortcut
that here. */
if (!pi1 || !pi2)
return true;
- /* If both pointers are restrict-qualified try to disambiguate
- with restrict information. */
- if (TYPE_RESTRICT (TREE_TYPE (ptr1))
- && TYPE_RESTRICT (TREE_TYPE (ptr2))
- && !pt_solutions_same_restrict_base (&pi1->pt, &pi2->pt))
- return false;
-
/* ??? This does not use TBAA to prune decls from the intersection
that not both pointers may access. */
return pt_solutions_intersect (&pi1->pt, &pi2->pt);
{
tree base = ao_ref_base (ref);
- if (INDIRECT_REF_P (base)
- || TREE_CODE (base) == MEM_REF)
+ if (TREE_CODE (base) == MEM_REF
+ || TREE_CODE (base) == TARGET_MEM_REF)
return ptr_derefs_may_alias_p (ptr, TREE_OPERAND (base, 0));
- else if (SSA_VAR_P (base))
+ else if (DECL_P (base))
return ptr_deref_may_alias_decl_p (ptr, base);
return true;
fprintf (file, "Aliased symbols\n\n");
- FOR_EACH_REFERENCED_VAR (var, rvi)
+ FOR_EACH_REFERENCED_VAR (cfun, var, rvi)
{
if (may_be_aliased (var))
dump_variable (file, var);
dump_decl_set (file, pt->vars);
if (pt->vars_contains_global)
fprintf (file, " (includes global vars)");
- if (pt->vars_contains_restrict)
- fprintf (file, " (includes restrict tags)");
}
}
r->max_size = -1;
r->ref_alias_set = -1;
r->base_alias_set = -1;
+ r->volatile_p = ref ? TREE_THIS_VOLATILE (ref) : false;
}
/* Returns the base object of the memory reference *REF. */
ref->max_size = ref->size = -1;
ref->ref_alias_set = 0;
ref->base_alias_set = 0;
+ ref->volatile_p = false;
}
/* Return 1 if TYPE1 and TYPE2 are to be considered equivalent for the
are the respective alias sets. */
static bool
-aliasing_component_refs_p (tree ref1, tree type1,
+aliasing_component_refs_p (tree ref1,
alias_set_type ref1_alias_set,
alias_set_type base1_alias_set,
HOST_WIDE_INT offset1, HOST_WIDE_INT max_size1,
- tree ref2, tree type2,
+ tree ref2,
alias_set_type ref2_alias_set,
alias_set_type base2_alias_set,
HOST_WIDE_INT offset2, HOST_WIDE_INT max_size2,
struct A { int i; int j; } *q;
struct B { struct A a; int k; } *p;
disambiguating q->i and p->a.j. */
+ tree base1, base2;
+ tree type1, type2;
tree *refp;
int same_p;
+ /* Choose bases and base types to search for. */
+ base1 = ref1;
+ while (handled_component_p (base1))
+ base1 = TREE_OPERAND (base1, 0);
+ type1 = TREE_TYPE (base1);
+ base2 = ref2;
+ while (handled_component_p (base2))
+ base2 = TREE_OPERAND (base2, 0);
+ type2 = TREE_TYPE (base2);
+
/* Now search for the type1 in the access path of ref2. This
would be a common base for doing offset based disambiguation on. */
refp = &ref2;
HOST_WIDE_INT offadj, sztmp, msztmp;
get_ref_base_and_extent (*refp, &offadj, &sztmp, &msztmp);
offset2 -= offadj;
+ get_ref_base_and_extent (base1, &offadj, &sztmp, &msztmp);
+ offset1 -= offadj;
return ranges_overlap_p (offset1, max_size1, offset2, max_size2);
}
/* If we didn't find a common base, try the other way around. */
HOST_WIDE_INT offadj, sztmp, msztmp;
get_ref_base_and_extent (*refp, &offadj, &sztmp, &msztmp);
offset1 -= offadj;
+ get_ref_base_and_extent (base2, &offadj, &sztmp, &msztmp);
+ offset2 -= offadj;
return ranges_overlap_p (offset1, max_size1, offset2, max_size2);
}
tree base2,
HOST_WIDE_INT offset2, HOST_WIDE_INT max_size2)
{
- gcc_assert (SSA_VAR_P (base1) && SSA_VAR_P (base2));
+ gcc_checking_assert (DECL_P (base1) && DECL_P (base2));
/* If both references are based on different variables, they cannot alias. */
if (base1 != base2)
alias_set_type base2_alias_set, bool tbaa_p)
{
tree ptr1;
- tree ptrtype1;
+ tree ptrtype1, dbase2;
HOST_WIDE_INT offset1p = offset1, offset2p = offset2;
+ HOST_WIDE_INT doffset1, doffset2;
+ double_int moff;
- if (TREE_CODE (base1) == TARGET_MEM_REF)
- {
- if (TMR_SYMBOL (base1))
- ptr1 = TMR_SYMBOL (base1);
- else if (TMR_BASE (base1))
- {
- if (!POINTER_TYPE_P (TREE_TYPE (TMR_BASE (base1))))
- return true;
- ptr1 = TMR_BASE (base1);
- }
- else
- return true;
- }
- else
- ptr1 = TREE_OPERAND (base1, 0);
+ gcc_checking_assert ((TREE_CODE (base1) == MEM_REF
+ || TREE_CODE (base1) == TARGET_MEM_REF)
+ && DECL_P (base2));
+
+ ptr1 = TREE_OPERAND (base1, 0);
/* The offset embedded in MEM_REFs can be negative. Bias them
so that the resulting offset adjustment is positive. */
- if (TREE_CODE (base1) == MEM_REF
- || TREE_CODE (base1) == TARGET_MEM_REF)
- {
- double_int moff = mem_ref_offset (base1);
- moff = double_int_lshift (moff,
- BITS_PER_UNIT == 8
- ? 3 : exact_log2 (BITS_PER_UNIT),
- HOST_BITS_PER_DOUBLE_INT, true);
- if (double_int_negative_p (moff))
- offset2p += double_int_neg (moff).low;
- else
- offset1p += moff.low;
- }
+ moff = mem_ref_offset (base1);
+ moff = double_int_lshift (moff,
+ BITS_PER_UNIT == 8
+ ? 3 : exact_log2 (BITS_PER_UNIT),
+ HOST_BITS_PER_DOUBLE_INT, true);
+ if (double_int_negative_p (moff))
+ offset2p += double_int_neg (moff).low;
+ else
+ offset1p += moff.low;
/* If only one reference is based on a variable, they cannot alias if
the pointer access is beyond the extent of the variable access.
(the pointer base cannot validly point to an offset less than zero
of the variable).
- They also cannot alias if the pointer may not point to the decl. */
- if ((TREE_CODE (base1) != TARGET_MEM_REF || !TMR_INDEX (base1))
+ ??? IVOPTs creates bases that do not honor this restriction,
+ so do not apply this optimization for TARGET_MEM_REFs. */
+ if (TREE_CODE (base1) != TARGET_MEM_REF
&& !ranges_overlap_p (MAX (0, offset1p), -1, offset2p, max_size2))
return false;
+ /* They also cannot alias if the pointer may not point to the decl. */
if (!ptr_deref_may_alias_decl_p (ptr1, base2))
return false;
if (!flag_strict_aliasing || !tbaa_p)
return true;
- if (TREE_CODE (base1) == MEM_REF)
- ptrtype1 = TREE_TYPE (TREE_OPERAND (base1, 1));
- else if (TREE_CODE (base1) == TARGET_MEM_REF)
- ptrtype1 = TREE_TYPE (TMR_OFFSET (base1));
- else
- ptrtype1 = TREE_TYPE (ptr1);
+ ptrtype1 = TREE_TYPE (TREE_OPERAND (base1, 1));
/* If the alias set for a pointer access is zero all bets are off. */
if (base1_alias_set == -1)
if (base2_alias_set == -1)
base2_alias_set = get_alias_set (base2);
- /* If both references are through the same type, they do not alias
- if the accesses do not overlap. This does extra disambiguation
- for mixed/pointer accesses but requires strict aliasing.
- For MEM_REFs we require that the component-ref offset we computed
- is relative to the start of the type which we ensure by
- comparing rvalue and access type and disregarding the constant
- pointer offset. */
- if ((TREE_CODE (base1) != TARGET_MEM_REF || !TMR_INDEX (base1))
- && (TREE_CODE (base1) != MEM_REF
- || same_type_for_tbaa (TREE_TYPE (base1), TREE_TYPE (ptrtype1)) == 1)
- && same_type_for_tbaa (TREE_TYPE (ptrtype1), TREE_TYPE (base2)) == 1)
- return ranges_overlap_p (offset1, max_size1, offset2, max_size2);
-
/* When we are trying to disambiguate an access with a pointer dereference
as base versus one with a decl as base we can use both the size
of the decl and its dynamic type for extra disambiguation.
&& tree_int_cst_lt (DECL_SIZE (base2), TYPE_SIZE (TREE_TYPE (ptrtype1))))
return false;
+ if (!ref2)
+ return true;
+
+ /* If the decl is accessed via a MEM_REF, reconstruct the base
+ we can use for TBAA and an appropriately adjusted offset. */
+ dbase2 = ref2;
+ while (handled_component_p (dbase2))
+ dbase2 = TREE_OPERAND (dbase2, 0);
+ doffset1 = offset1;
+ doffset2 = offset2;
+ if (TREE_CODE (dbase2) == MEM_REF
+ || TREE_CODE (dbase2) == TARGET_MEM_REF)
+ {
+ double_int moff = mem_ref_offset (dbase2);
+ moff = double_int_lshift (moff,
+ BITS_PER_UNIT == 8
+ ? 3 : exact_log2 (BITS_PER_UNIT),
+ HOST_BITS_PER_DOUBLE_INT, true);
+ if (double_int_negative_p (moff))
+ doffset1 -= double_int_neg (moff).low;
+ else
+ doffset2 -= moff.low;
+ }
+
+ /* If either reference is view-converted, give up now. */
+ if (same_type_for_tbaa (TREE_TYPE (base1), TREE_TYPE (ptrtype1)) != 1
+ || same_type_for_tbaa (TREE_TYPE (dbase2),
+ TREE_TYPE (reference_alias_ptr_type (dbase2))) != 1)
+ return true;
+
+ /* If both references are through the same type, they do not alias
+ if the accesses do not overlap. This does extra disambiguation
+ for mixed/pointer accesses but requires strict aliasing.
+ For MEM_REFs we require that the component-ref offset we computed
+ is relative to the start of the type which we ensure by
+ comparing rvalue and access type and disregarding the constant
+ pointer offset. */
+ if ((TREE_CODE (base1) != TARGET_MEM_REF
+ || (!TMR_INDEX (base1) && !TMR_INDEX2 (base1)))
+ && same_type_for_tbaa (TREE_TYPE (base1), TREE_TYPE (dbase2)) == 1)
+ return ranges_overlap_p (doffset1, max_size1, doffset2, max_size2);
+
/* Do access-path based disambiguation. */
if (ref1 && ref2
- && handled_component_p (ref1)
- && handled_component_p (ref2)
- && TREE_CODE (base1) != TARGET_MEM_REF
- && (TREE_CODE (base1) != MEM_REF
- || same_type_for_tbaa (TREE_TYPE (base1), TREE_TYPE (ptrtype1)) == 1))
- return aliasing_component_refs_p (ref1, TREE_TYPE (ptrtype1),
+ && (handled_component_p (ref1) || handled_component_p (ref2)))
+ return aliasing_component_refs_p (ref1,
ref1_alias_set, base1_alias_set,
offset1, max_size1,
- ref2, TREE_TYPE (base2),
+ ref2,
ref2_alias_set, base2_alias_set,
offset2, max_size2, true);
tree ptr2;
tree ptrtype1, ptrtype2;
- if (TREE_CODE (base1) == TARGET_MEM_REF)
- {
- if (TMR_SYMBOL (base1))
- ptr1 = TMR_SYMBOL (base1);
- else if (TMR_BASE (base1))
- {
- if (!POINTER_TYPE_P (TREE_TYPE (TMR_BASE (base1))))
- return true;
- ptr1 = TMR_BASE (base1);
- }
- else
- return true;
- }
- else
- ptr1 = TREE_OPERAND (base1, 0);
+ gcc_checking_assert ((TREE_CODE (base1) == MEM_REF
+ || TREE_CODE (base1) == TARGET_MEM_REF)
+ && (TREE_CODE (base2) == MEM_REF
+ || TREE_CODE (base2) == TARGET_MEM_REF));
- if (TREE_CODE (base2) == TARGET_MEM_REF)
- {
- if (TMR_SYMBOL (base2))
- ptr2 = TMR_SYMBOL (base2);
- else if (TMR_BASE (base2))
- {
- if (!POINTER_TYPE_P (TREE_TYPE (TMR_BASE (base2))))
- return true;
- ptr2 = TMR_BASE (base2);
- }
- else
- return true;
- }
- else
- ptr2 = TREE_OPERAND (base2, 0);
+ ptr1 = TREE_OPERAND (base1, 0);
+ ptr2 = TREE_OPERAND (base2, 0);
/* If both bases are based on pointers they cannot alias if they may not
point to the same memory object or if they point to the same object
if ((!cfun || gimple_in_ssa_p (cfun))
&& operand_equal_p (ptr1, ptr2, 0)
&& (((TREE_CODE (base1) != TARGET_MEM_REF
- || !TMR_INDEX (base1))
+ || (!TMR_INDEX (base1) && !TMR_INDEX2 (base1)))
&& (TREE_CODE (base2) != TARGET_MEM_REF
- || !TMR_INDEX (base2)))
+ || (!TMR_INDEX (base2) && !TMR_INDEX2 (base2))))
|| (TREE_CODE (base1) == TARGET_MEM_REF
&& TREE_CODE (base2) == TARGET_MEM_REF
&& (TMR_STEP (base1) == TMR_STEP (base2)
|| (TMR_STEP (base1) && TMR_STEP (base2)
&& operand_equal_p (TMR_STEP (base1),
TMR_STEP (base2), 0)))
- && operand_equal_p (TMR_INDEX (base1), TMR_INDEX (base2), 0))))
+ && (TMR_INDEX (base1) == TMR_INDEX (base2)
+ || (TMR_INDEX (base1) && TMR_INDEX (base2)
+ && operand_equal_p (TMR_INDEX (base1),
+ TMR_INDEX (base2), 0)))
+ && (TMR_INDEX2 (base1) == TMR_INDEX2 (base2)
+ || (TMR_INDEX2 (base1) && TMR_INDEX2 (base2)
+ && operand_equal_p (TMR_INDEX2 (base1),
+ TMR_INDEX2 (base2), 0))))))
{
+ double_int moff;
/* The offset embedded in MEM_REFs can be negative. Bias them
so that the resulting offset adjustment is positive. */
- if (TREE_CODE (base1) == MEM_REF
- || TREE_CODE (base1) == TARGET_MEM_REF)
- {
- double_int moff = mem_ref_offset (base1);
- moff = double_int_lshift (moff,
- BITS_PER_UNIT == 8
- ? 3 : exact_log2 (BITS_PER_UNIT),
- HOST_BITS_PER_DOUBLE_INT, true);
- if (double_int_negative_p (moff))
- offset2 += double_int_neg (moff).low;
- else
- offset1 += moff.low;
- }
- if (TREE_CODE (base2) == MEM_REF
- || TREE_CODE (base2) == TARGET_MEM_REF)
- {
- double_int moff = mem_ref_offset (base2);
- moff = double_int_lshift (moff,
- BITS_PER_UNIT == 8
- ? 3 : exact_log2 (BITS_PER_UNIT),
- HOST_BITS_PER_DOUBLE_INT, true);
- if (double_int_negative_p (moff))
- offset1 += double_int_neg (moff).low;
- else
- offset2 += moff.low;
- }
+ moff = mem_ref_offset (base1);
+ moff = double_int_lshift (moff,
+ BITS_PER_UNIT == 8
+ ? 3 : exact_log2 (BITS_PER_UNIT),
+ HOST_BITS_PER_DOUBLE_INT, true);
+ if (double_int_negative_p (moff))
+ offset2 += double_int_neg (moff).low;
+ else
+ offset1 += moff.low;
+ moff = mem_ref_offset (base2);
+ moff = double_int_lshift (moff,
+ BITS_PER_UNIT == 8
+ ? 3 : exact_log2 (BITS_PER_UNIT),
+ HOST_BITS_PER_DOUBLE_INT, true);
+ if (double_int_negative_p (moff))
+ offset1 += double_int_neg (moff).low;
+ else
+ offset2 += moff.low;
return ranges_overlap_p (offset1, max_size1, offset2, max_size2);
}
if (!ptr_derefs_may_alias_p (ptr1, ptr2))
if (!flag_strict_aliasing || !tbaa_p)
return true;
- if (TREE_CODE (base1) == MEM_REF)
- ptrtype1 = TREE_TYPE (TREE_OPERAND (base1, 1));
- else if (TREE_CODE (base1) == TARGET_MEM_REF)
- ptrtype1 = TREE_TYPE (TMR_OFFSET (base1));
- else
- ptrtype1 = TREE_TYPE (ptr1);
- if (TREE_CODE (base2) == MEM_REF)
- ptrtype2 = TREE_TYPE (TREE_OPERAND (base2, 1));
- else if (TREE_CODE (base2) == TARGET_MEM_REF)
- ptrtype2 = TREE_TYPE (TMR_OFFSET (base2));
- else
- ptrtype2 = TREE_TYPE (ptr2);
+ ptrtype1 = TREE_TYPE (TREE_OPERAND (base1, 1));
+ ptrtype2 = TREE_TYPE (TREE_OPERAND (base2, 1));
/* If the alias set for a pointer access is zero all bets are off. */
if (base1_alias_set == -1)
/* If both references are through the same type, they do not alias
if the accesses do not overlap. This does extra disambiguation
for mixed/pointer accesses but requires strict aliasing. */
- if ((TREE_CODE (base1) != TARGET_MEM_REF || !TMR_INDEX (base1))
- && (TREE_CODE (base2) != TARGET_MEM_REF || !TMR_INDEX (base2))
- && (TREE_CODE (base1) != MEM_REF
- || same_type_for_tbaa (TREE_TYPE (base1), TREE_TYPE (ptrtype1)) == 1)
- && (TREE_CODE (base2) != MEM_REF
- || same_type_for_tbaa (TREE_TYPE (base2), TREE_TYPE (ptrtype2)) == 1)
+ if ((TREE_CODE (base1) != TARGET_MEM_REF
+ || (!TMR_INDEX (base1) && !TMR_INDEX2 (base1)))
+ && (TREE_CODE (base2) != TARGET_MEM_REF
+ || (!TMR_INDEX (base2) && !TMR_INDEX2 (base2)))
+ && same_type_for_tbaa (TREE_TYPE (base1), TREE_TYPE (ptrtype1)) == 1
+ && same_type_for_tbaa (TREE_TYPE (base2), TREE_TYPE (ptrtype2)) == 1
&& same_type_for_tbaa (TREE_TYPE (ptrtype1),
TREE_TYPE (ptrtype2)) == 1)
return ranges_overlap_p (offset1, max_size1, offset2, max_size2);
/* Do access-path based disambiguation. */
if (ref1 && ref2
- && handled_component_p (ref1)
- && handled_component_p (ref2)
- && TREE_CODE (base1) != TARGET_MEM_REF
- && TREE_CODE (base2) != TARGET_MEM_REF
- && (TREE_CODE (base1) != MEM_REF
- || same_type_for_tbaa (TREE_TYPE (base1), TREE_TYPE (ptrtype1)) == 1)
- && (TREE_CODE (base2) != MEM_REF
- || same_type_for_tbaa (TREE_TYPE (base2), TREE_TYPE (ptrtype2)) == 1))
- return aliasing_component_refs_p (ref1, TREE_TYPE (ptrtype1),
+ && (handled_component_p (ref1) || handled_component_p (ref2))
+ && same_type_for_tbaa (TREE_TYPE (base1), TREE_TYPE (ptrtype1)) == 1
+ && same_type_for_tbaa (TREE_TYPE (base2), TREE_TYPE (ptrtype2)) == 1)
+ return aliasing_component_refs_p (ref1,
ref1_alias_set, base1_alias_set,
offset1, max_size1,
- ref2, TREE_TYPE (ptrtype2),
+ ref2,
ref2_alias_set, base2_alias_set,
offset2, max_size2, false);
gcc_checking_assert ((!ref1->ref
|| TREE_CODE (ref1->ref) == SSA_NAME
|| DECL_P (ref1->ref)
+ || TREE_CODE (ref1->ref) == STRING_CST
|| handled_component_p (ref1->ref)
- || INDIRECT_REF_P (ref1->ref)
|| TREE_CODE (ref1->ref) == MEM_REF
|| TREE_CODE (ref1->ref) == TARGET_MEM_REF)
&& (!ref2->ref
|| TREE_CODE (ref2->ref) == SSA_NAME
|| DECL_P (ref2->ref)
+ || TREE_CODE (ref2->ref) == STRING_CST
|| handled_component_p (ref2->ref)
- || INDIRECT_REF_P (ref2->ref)
|| TREE_CODE (ref2->ref) == MEM_REF
|| TREE_CODE (ref2->ref) == TARGET_MEM_REF));
*D.1663_44 = VIEW_CONVERT_EXPR<struct DB_LSN>(__tmp$B0F64_59);
which is seen as a struct copy. */
if (TREE_CODE (base1) == SSA_NAME
- || TREE_CODE (base2) == SSA_NAME
|| TREE_CODE (base1) == CONST_DECL
+ || TREE_CODE (base1) == CONSTRUCTOR
+ || TREE_CODE (base1) == ADDR_EXPR
+ || CONSTANT_CLASS_P (base1)
+ || TREE_CODE (base2) == SSA_NAME
|| TREE_CODE (base2) == CONST_DECL
- || is_gimple_min_invariant (base1)
- || is_gimple_min_invariant (base2))
+ || TREE_CODE (base2) == CONSTRUCTOR
+ || TREE_CODE (base2) == ADDR_EXPR
+ || CONSTANT_CLASS_P (base2))
return false;
/* We can end up refering to code via function and label decls.
As we likely do not properly track code aliases conservatively
bail out. */
if (TREE_CODE (base1) == FUNCTION_DECL
- || TREE_CODE (base2) == FUNCTION_DECL
|| TREE_CODE (base1) == LABEL_DECL
+ || TREE_CODE (base2) == FUNCTION_DECL
|| TREE_CODE (base2) == LABEL_DECL)
return true;
+ /* Two volatile accesses always conflict. */
+ if (ref1->volatile_p
+ && ref2->volatile_p)
+ return true;
+
/* Defer to simple offset based disambiguation if we have
references based on two decls. Do this before defering to
TBAA to handle must-alias cases in conformance with the
GCC extension of allowing type-punning through unions. */
- var1_p = SSA_VAR_P (base1);
- var2_p = SSA_VAR_P (base2);
+ var1_p = DECL_P (base1);
+ var2_p = DECL_P (base2);
if (var1_p && var2_p)
return decl_refs_may_alias_p (base1, offset1, max_size1,
base2, offset2, max_size2);
- ind1_p = (INDIRECT_REF_P (base1)
- || (TREE_CODE (base1) == MEM_REF)
- || (TREE_CODE (base1) == TARGET_MEM_REF));
- ind2_p = (INDIRECT_REF_P (base2)
- || (TREE_CODE (base2) == MEM_REF)
- || (TREE_CODE (base2) == TARGET_MEM_REF));
+ ind1_p = (TREE_CODE (base1) == MEM_REF
+ || TREE_CODE (base1) == TARGET_MEM_REF);
+ ind2_p = (TREE_CODE (base2) == MEM_REF
+ || TREE_CODE (base2) == TARGET_MEM_REF);
/* Canonicalize the pointer-vs-decl case. */
if (ind1_p && var2_p)
ao_ref_alias_set (ref2), -1,
tbaa_p);
+ /* We really do not want to end up here, but returning true is safe. */
+#ifdef ENABLE_CHECKING
gcc_unreachable ();
+#else
+ return true;
+#endif
}
bool
if (!base)
return true;
+ /* A call that is not without side-effects might involve volatile
+ accesses and thus conflicts with all other volatile accesses. */
+ if (ref->volatile_p)
+ return true;
+
/* If the reference is based on a decl that is not aliased the call
cannot possibly use it. */
if (DECL_P (base)
&& DECL_BUILT_IN_CLASS (callee) == BUILT_IN_NORMAL)
switch (DECL_FUNCTION_CODE (callee))
{
- /* All the following functions clobber memory pointed to by
- their first argument. */
+ /* All the following functions read memory pointed to by
+ their second argument. strcat/strncat additionally
+ reads memory pointed to by the first argument. */
+ case BUILT_IN_STRCAT:
+ case BUILT_IN_STRNCAT:
+ {
+ ao_ref dref;
+ ao_ref_init_from_ptr_and_size (&dref,
+ gimple_call_arg (call, 0),
+ NULL_TREE);
+ if (refs_may_alias_p_1 (&dref, ref, false))
+ return true;
+ }
+ /* FALLTHRU */
case BUILT_IN_STRCPY:
case BUILT_IN_STRNCPY:
case BUILT_IN_MEMCPY:
case BUILT_IN_MEMPCPY:
case BUILT_IN_STPCPY:
case BUILT_IN_STPNCPY:
- case BUILT_IN_STRCAT:
- case BUILT_IN_STRNCAT:
+ case BUILT_IN_TM_MEMCPY:
+ case BUILT_IN_TM_MEMMOVE:
{
ao_ref dref;
tree size = NULL_TREE;
size);
return refs_may_alias_p_1 (&dref, ref, false);
}
+ case BUILT_IN_STRCAT_CHK:
+ case BUILT_IN_STRNCAT_CHK:
+ {
+ ao_ref dref;
+ ao_ref_init_from_ptr_and_size (&dref,
+ gimple_call_arg (call, 0),
+ NULL_TREE);
+ if (refs_may_alias_p_1 (&dref, ref, false))
+ return true;
+ }
+ /* FALLTHRU */
+ case BUILT_IN_STRCPY_CHK:
+ case BUILT_IN_STRNCPY_CHK:
+ case BUILT_IN_MEMCPY_CHK:
+ case BUILT_IN_MEMMOVE_CHK:
+ case BUILT_IN_MEMPCPY_CHK:
+ case BUILT_IN_STPCPY_CHK:
+ case BUILT_IN_STPNCPY_CHK:
+ {
+ ao_ref dref;
+ tree size = NULL_TREE;
+ if (gimple_call_num_args (call) == 4)
+ size = gimple_call_arg (call, 2);
+ ao_ref_init_from_ptr_and_size (&dref,
+ gimple_call_arg (call, 1),
+ size);
+ return refs_may_alias_p_1 (&dref, ref, false);
+ }
case BUILT_IN_BCOPY:
{
ao_ref dref;
size);
return refs_may_alias_p_1 (&dref, ref, false);
}
+
+ /* The following functions read memory pointed to by their
+ first argument. */
+ CASE_BUILT_IN_TM_LOAD (1):
+ CASE_BUILT_IN_TM_LOAD (2):
+ CASE_BUILT_IN_TM_LOAD (4):
+ CASE_BUILT_IN_TM_LOAD (8):
+ CASE_BUILT_IN_TM_LOAD (FLOAT):
+ CASE_BUILT_IN_TM_LOAD (DOUBLE):
+ CASE_BUILT_IN_TM_LOAD (LDOUBLE):
+ CASE_BUILT_IN_TM_LOAD (M64):
+ CASE_BUILT_IN_TM_LOAD (M128):
+ CASE_BUILT_IN_TM_LOAD (M256):
+ case BUILT_IN_TM_LOG:
+ case BUILT_IN_TM_LOG_1:
+ case BUILT_IN_TM_LOG_2:
+ case BUILT_IN_TM_LOG_4:
+ case BUILT_IN_TM_LOG_8:
+ case BUILT_IN_TM_LOG_FLOAT:
+ case BUILT_IN_TM_LOG_DOUBLE:
+ case BUILT_IN_TM_LOG_LDOUBLE:
+ case BUILT_IN_TM_LOG_M64:
+ case BUILT_IN_TM_LOG_M128:
+ case BUILT_IN_TM_LOG_M256:
+ return ptr_deref_may_alias_ref_p_1 (gimple_call_arg (call, 0), ref);
+
+ /* These read memory pointed to by the first argument. */
+ case BUILT_IN_STRDUP:
+ case BUILT_IN_STRNDUP:
+ {
+ ao_ref dref;
+ tree size = NULL_TREE;
+ if (gimple_call_num_args (call) == 2)
+ size = gimple_call_arg (call, 1);
+ ao_ref_init_from_ptr_and_size (&dref,
+ gimple_call_arg (call, 0),
+ size);
+ return refs_may_alias_p_1 (&dref, ref, false);
+ }
/* The following builtins do not read from memory. */
case BUILT_IN_FREE:
case BUILT_IN_MALLOC:
case BUILT_IN_CALLOC:
+ case BUILT_IN_ALLOCA:
+ case BUILT_IN_ALLOCA_WITH_ALIGN:
+ case BUILT_IN_STACK_SAVE:
+ case BUILT_IN_STACK_RESTORE:
case BUILT_IN_MEMSET:
+ case BUILT_IN_TM_MEMSET:
+ case BUILT_IN_MEMSET_CHK:
case BUILT_IN_FREXP:
case BUILT_IN_FREXPF:
case BUILT_IN_FREXPL:
case BUILT_IN_SINCOS:
case BUILT_IN_SINCOSF:
case BUILT_IN_SINCOSL:
+ case BUILT_IN_ASSUME_ALIGNED:
+ case BUILT_IN_VA_END:
return false;
+ /* __sync_* builtins and some OpenMP builtins act as threading
+ barriers. */
+#undef DEF_SYNC_BUILTIN
+#define DEF_SYNC_BUILTIN(ENUM, NAME, TYPE, ATTRS) case ENUM:
+#include "sync-builtins.def"
+#undef DEF_SYNC_BUILTIN
+ case BUILT_IN_GOMP_ATOMIC_START:
+ case BUILT_IN_GOMP_ATOMIC_END:
+ case BUILT_IN_GOMP_BARRIER:
+ case BUILT_IN_GOMP_TASKWAIT:
+ case BUILT_IN_GOMP_CRITICAL_START:
+ case BUILT_IN_GOMP_CRITICAL_END:
+ case BUILT_IN_GOMP_CRITICAL_NAME_START:
+ case BUILT_IN_GOMP_CRITICAL_NAME_END:
+ case BUILT_IN_GOMP_LOOP_END:
+ case BUILT_IN_GOMP_ORDERED_START:
+ case BUILT_IN_GOMP_ORDERED_END:
+ case BUILT_IN_GOMP_PARALLEL_END:
+ case BUILT_IN_GOMP_SECTIONS_END:
+ case BUILT_IN_GOMP_SINGLE_COPY_START:
+ case BUILT_IN_GOMP_SINGLE_COPY_END:
+ return true;
default:
/* Fallthru to general call handling. */;
/* Check if base is a global static variable that is not read
by the function. */
- if (TREE_CODE (base) == VAR_DECL
+ if (callee != NULL_TREE
+ && TREE_CODE (base) == VAR_DECL
&& TREE_STATIC (base))
{
+ struct cgraph_node *node = cgraph_get_node (callee);
bitmap not_read;
- if (callee != NULL_TREE
- && (not_read
- = ipa_reference_get_not_read_global (cgraph_node (callee)))
+ /* FIXME: Callee can be an OMP builtin that does not have a call graph
+ node yet. We should enforce that there are nodes for all decls in the
+ IL and remove this check instead. */
+ if (node
+ && (not_read = ipa_reference_get_not_read_global (node))
&& bitmap_bit_p (not_read, DECL_UID (base)))
goto process_args;
}
if (pt_solution_includes (gimple_call_use_set (call), base))
return true;
}
- else if ((INDIRECT_REF_P (base)
- || TREE_CODE (base) == MEM_REF)
+ else if ((TREE_CODE (base) == MEM_REF
+ || TREE_CODE (base) == TARGET_MEM_REF)
&& TREE_CODE (TREE_OPERAND (base, 0)) == SSA_NAME)
{
struct ptr_info_def *pi = SSA_NAME_PTR_INFO (TREE_OPERAND (base, 0));
}
else if (is_gimple_call (stmt))
return ref_maybe_used_by_call_p (stmt, ref);
+ else if (gimple_code (stmt) == GIMPLE_RETURN)
+ {
+ tree retval = gimple_return_retval (stmt);
+ tree base;
+ if (retval
+ && TREE_CODE (retval) != SSA_NAME
+ && !is_gimple_min_invariant (retval)
+ && refs_may_alias_p (retval, ref))
+ return true;
+ /* If ref escapes the function then the return acts as a use. */
+ base = get_base_address (ref);
+ if (!base)
+ ;
+ else if (DECL_P (base))
+ return is_global_var (base);
+ else if (TREE_CODE (base) == MEM_REF
+ || TREE_CODE (base) == TARGET_MEM_REF)
+ return ptr_deref_may_alias_global_p (TREE_OPERAND (base, 0));
+ return false;
+ }
return true;
}
|| CONSTANT_CLASS_P (base))
return false;
+ /* A call that is not without side-effects might involve volatile
+ accesses and thus conflicts with all other volatile accesses. */
+ if (ref->volatile_p)
+ return true;
+
/* If the reference is based on a decl that is not aliased the call
cannot possibly clobber it. */
if (DECL_P (base)
case BUILT_IN_STRCAT:
case BUILT_IN_STRNCAT:
case BUILT_IN_MEMSET:
+ case BUILT_IN_TM_MEMSET:
+ CASE_BUILT_IN_TM_STORE (1):
+ CASE_BUILT_IN_TM_STORE (2):
+ CASE_BUILT_IN_TM_STORE (4):
+ CASE_BUILT_IN_TM_STORE (8):
+ CASE_BUILT_IN_TM_STORE (FLOAT):
+ CASE_BUILT_IN_TM_STORE (DOUBLE):
+ CASE_BUILT_IN_TM_STORE (LDOUBLE):
+ CASE_BUILT_IN_TM_STORE (M64):
+ CASE_BUILT_IN_TM_STORE (M128):
+ CASE_BUILT_IN_TM_STORE (M256):
+ case BUILT_IN_TM_MEMCPY:
+ case BUILT_IN_TM_MEMMOVE:
{
ao_ref dref;
tree size = NULL_TREE;
- if (gimple_call_num_args (call) == 3)
+ /* Don't pass in size for strncat, as the maximum size
+ is strlen (dest) + n + 1 instead of n, resp.
+ n + 1 at dest + strlen (dest), but strlen (dest) isn't
+ known. */
+ if (gimple_call_num_args (call) == 3
+ && DECL_FUNCTION_CODE (callee) != BUILT_IN_STRNCAT)
+ size = gimple_call_arg (call, 2);
+ ao_ref_init_from_ptr_and_size (&dref,
+ gimple_call_arg (call, 0),
+ size);
+ return refs_may_alias_p_1 (&dref, ref, false);
+ }
+ case BUILT_IN_STRCPY_CHK:
+ case BUILT_IN_STRNCPY_CHK:
+ case BUILT_IN_MEMCPY_CHK:
+ case BUILT_IN_MEMMOVE_CHK:
+ case BUILT_IN_MEMPCPY_CHK:
+ case BUILT_IN_STPCPY_CHK:
+ case BUILT_IN_STPNCPY_CHK:
+ case BUILT_IN_STRCAT_CHK:
+ case BUILT_IN_STRNCAT_CHK:
+ case BUILT_IN_MEMSET_CHK:
+ {
+ ao_ref dref;
+ tree size = NULL_TREE;
+ /* Don't pass in size for __strncat_chk, as the maximum size
+ is strlen (dest) + n + 1 instead of n, resp.
+ n + 1 at dest + strlen (dest), but strlen (dest) isn't
+ known. */
+ if (gimple_call_num_args (call) == 4
+ && DECL_FUNCTION_CODE (callee) != BUILT_IN_STRNCAT_CHK)
size = gimple_call_arg (call, 2);
ao_ref_init_from_ptr_and_size (&dref,
gimple_call_arg (call, 0),
being the definition point for the pointer. */
case BUILT_IN_MALLOC:
case BUILT_IN_CALLOC:
- /* Unix98 specifies that errno is set on allocation failure.
- Until we properly can track the errno location assume it
- is not a local decl but external or anonymous storage in
- a different translation unit. Also assume it is of
- type int as required by the standard. */
+ case BUILT_IN_STRDUP:
+ case BUILT_IN_STRNDUP:
+ /* Unix98 specifies that errno is set on allocation failure. */
if (flag_errno_math
- && TREE_TYPE (base) == integer_type_node)
- {
- struct ptr_info_def *pi;
- if (DECL_P (base)
- && !TREE_STATIC (base))
- return true;
- else if ((INDIRECT_REF_P (base)
- || TREE_CODE (base) == MEM_REF)
- && TREE_CODE (TREE_OPERAND (base, 0)) == SSA_NAME
- && (pi = SSA_NAME_PTR_INFO (TREE_OPERAND (base, 0))))
- return pi->pt.anything || pi->pt.nonlocal;
- }
+ && targetm.ref_may_alias_errno (ref))
+ return true;
+ return false;
+ case BUILT_IN_STACK_SAVE:
+ case BUILT_IN_ALLOCA:
+ case BUILT_IN_ALLOCA_WITH_ALIGN:
+ case BUILT_IN_ASSUME_ALIGNED:
return false;
/* Freeing memory kills the pointed-to memory. More importantly
the call has to serve as a barrier for moving loads and stores
across it. */
case BUILT_IN_FREE:
+ case BUILT_IN_VA_END:
{
tree ptr = gimple_call_arg (call, 0);
return ptr_deref_may_alias_ref_p_1 (ptr, ref);
return (ptr_deref_may_alias_ref_p_1 (sin, ref)
|| ptr_deref_may_alias_ref_p_1 (cos, ref));
}
+ /* __sync_* builtins and some OpenMP builtins act as threading
+ barriers. */
+#undef DEF_SYNC_BUILTIN
+#define DEF_SYNC_BUILTIN(ENUM, NAME, TYPE, ATTRS) case ENUM:
+#include "sync-builtins.def"
+#undef DEF_SYNC_BUILTIN
+ case BUILT_IN_GOMP_ATOMIC_START:
+ case BUILT_IN_GOMP_ATOMIC_END:
+ case BUILT_IN_GOMP_BARRIER:
+ case BUILT_IN_GOMP_TASKWAIT:
+ case BUILT_IN_GOMP_CRITICAL_START:
+ case BUILT_IN_GOMP_CRITICAL_END:
+ case BUILT_IN_GOMP_CRITICAL_NAME_START:
+ case BUILT_IN_GOMP_CRITICAL_NAME_END:
+ case BUILT_IN_GOMP_LOOP_END:
+ case BUILT_IN_GOMP_ORDERED_START:
+ case BUILT_IN_GOMP_ORDERED_END:
+ case BUILT_IN_GOMP_PARALLEL_END:
+ case BUILT_IN_GOMP_SECTIONS_END:
+ case BUILT_IN_GOMP_SINGLE_COPY_START:
+ case BUILT_IN_GOMP_SINGLE_COPY_END:
+ return true;
default:
/* Fallthru to general call handling. */;
}
&& TREE_CODE (base) == VAR_DECL
&& TREE_STATIC (base))
{
+ struct cgraph_node *node = cgraph_get_node (callee);
bitmap not_written;
- if ((not_written
- = ipa_reference_get_not_written_global (cgraph_node (callee)))
+ if (node
+ && (not_written = ipa_reference_get_not_written_global (node))
&& bitmap_bit_p (not_written, DECL_UID (base)))
return false;
}
/* Check if the base variable is call-clobbered. */
if (DECL_P (base))
return pt_solution_includes (gimple_call_clobber_set (call), base);
- else if ((INDIRECT_REF_P (base)
- || TREE_CODE (base) == MEM_REF)
+ else if ((TREE_CODE (base) == MEM_REF
+ || TREE_CODE (base) == TARGET_MEM_REF)
&& TREE_CODE (TREE_OPERAND (base, 0)) == SSA_NAME)
{
struct ptr_info_def *pi = SSA_NAME_PTR_INFO (TREE_OPERAND (base, 0));
{
tree lhs = gimple_call_lhs (stmt);
if (lhs
- && !is_gimple_reg (lhs))
+ && TREE_CODE (lhs) != SSA_NAME)
{
ao_ref r;
ao_ref_init (&r, lhs);
else if (gimple_assign_single_p (stmt))
{
tree lhs = gimple_assign_lhs (stmt);
- if (!is_gimple_reg (lhs))
+ if (TREE_CODE (lhs) != SSA_NAME)
{
ao_ref r;
- ao_ref_init (&r, gimple_assign_lhs (stmt));
+ ao_ref_init (&r, lhs);
return refs_may_alias_p_1 (ref, &r, true);
}
}
return stmt_may_clobber_ref_p_1 (stmt, &r);
}
+/* If STMT kills the memory reference REF return true, otherwise
+ return false. */
+
+static bool
+stmt_kills_ref_p_1 (gimple stmt, ao_ref *ref)
+{
+ /* For a must-alias check we need to be able to constrain
+ the access properly. */
+ ao_ref_base (ref);
+ if (ref->max_size == -1)
+ return false;
+
+ if (gimple_has_lhs (stmt)
+ && TREE_CODE (gimple_get_lhs (stmt)) != SSA_NAME
+ /* The assignment is not necessarily carried out if it can throw
+ and we can catch it in the current function where we could inspect
+ the previous value.
+ ??? We only need to care about the RHS throwing. For aggregate
+ assignments or similar calls and non-call exceptions the LHS
+ might throw as well. */
+ && !stmt_can_throw_internal (stmt))
+ {
+ tree base, lhs = gimple_get_lhs (stmt);
+ HOST_WIDE_INT size, offset, max_size;
+ base = get_ref_base_and_extent (lhs, &offset, &size, &max_size);
+ /* We can get MEM[symbol: sZ, index: D.8862_1] here,
+ so base == ref->base does not always hold. */
+ if (base == ref->base)
+ {
+ /* For a must-alias check we need to be able to constrain
+ the access properly. */
+ if (size != -1 && size == max_size)
+ {
+ if (offset <= ref->offset
+ && offset + size >= ref->offset + ref->max_size)
+ return true;
+ }
+ }
+ }
+
+ if (is_gimple_call (stmt))
+ {
+ tree callee = gimple_call_fndecl (stmt);
+ if (callee != NULL_TREE
+ && DECL_BUILT_IN_CLASS (callee) == BUILT_IN_NORMAL)
+ switch (DECL_FUNCTION_CODE (callee))
+ {
+ case BUILT_IN_MEMCPY:
+ case BUILT_IN_MEMPCPY:
+ case BUILT_IN_MEMMOVE:
+ case BUILT_IN_MEMSET:
+ case BUILT_IN_MEMCPY_CHK:
+ case BUILT_IN_MEMPCPY_CHK:
+ case BUILT_IN_MEMMOVE_CHK:
+ case BUILT_IN_MEMSET_CHK:
+ {
+ tree dest = gimple_call_arg (stmt, 0);
+ tree len = gimple_call_arg (stmt, 2);
+ tree base = NULL_TREE;
+ HOST_WIDE_INT offset = 0;
+ if (!host_integerp (len, 0))
+ return false;
+ if (TREE_CODE (dest) == ADDR_EXPR)
+ base = get_addr_base_and_unit_offset (TREE_OPERAND (dest, 0),
+ &offset);
+ else if (TREE_CODE (dest) == SSA_NAME)
+ base = dest;
+ if (base
+ && base == ao_ref_base (ref))
+ {
+ HOST_WIDE_INT size = TREE_INT_CST_LOW (len);
+ if (offset <= ref->offset / BITS_PER_UNIT
+ && (offset + size
+ >= ((ref->offset + ref->max_size + BITS_PER_UNIT - 1)
+ / BITS_PER_UNIT)))
+ return true;
+ }
+ break;
+ }
+
+ case BUILT_IN_VA_END:
+ {
+ tree ptr = gimple_call_arg (stmt, 0);
+ if (TREE_CODE (ptr) == ADDR_EXPR)
+ {
+ tree base = ao_ref_base (ref);
+ if (TREE_OPERAND (ptr, 0) == base)
+ return true;
+ }
+ break;
+ }
+
+ default:;
+ }
+ }
+ return false;
+}
+
+bool
+stmt_kills_ref_p (gimple stmt, tree ref)
+{
+ ao_ref r;
+ ao_ref_init (&r, ref);
+ return stmt_kills_ref_p_1 (stmt, &r);
+}
+
/* Walk the virtual use-def chain of VUSE until hitting the virtual operand
TARGET or a statement clobbering the memory reference REF in which
maybe_skip_until (gimple phi, tree target, ao_ref *ref,
tree vuse, bitmap *visited)
{
+ basic_block bb = gimple_bb (phi);
+
if (!*visited)
*visited = BITMAP_ALLOC (NULL);
else if (gimple_nop_p (def_stmt)
|| stmt_may_clobber_ref_p_1 (def_stmt, ref))
return false;
+ /* If we reach a new basic-block see if we already skipped it
+ in a previous walk that ended successfully. */
+ if (gimple_bb (def_stmt) != bb)
+ {
+ if (!bitmap_set_bit (*visited, SSA_NAME_VERSION (vuse)))
+ return true;
+ bb = gimple_bb (def_stmt);
+ }
vuse = gimple_vuse (def_stmt);
}
return true;
}
+/* For two PHI arguments ARG0 and ARG1 try to skip non-aliasing code
+ until we hit the phi argument definition that dominates the other one.
+ Return that, or NULL_TREE if there is no such definition. */
+
+static tree
+get_continuation_for_phi_1 (gimple phi, tree arg0, tree arg1,
+ ao_ref *ref, bitmap *visited)
+{
+ gimple def0 = SSA_NAME_DEF_STMT (arg0);
+ gimple def1 = SSA_NAME_DEF_STMT (arg1);
+ tree common_vuse;
+
+ if (arg0 == arg1)
+ return arg0;
+ else if (gimple_nop_p (def0)
+ || (!gimple_nop_p (def1)
+ && dominated_by_p (CDI_DOMINATORS,
+ gimple_bb (def1), gimple_bb (def0))))
+ {
+ if (maybe_skip_until (phi, arg0, ref, arg1, visited))
+ return arg0;
+ }
+ else if (gimple_nop_p (def1)
+ || dominated_by_p (CDI_DOMINATORS,
+ gimple_bb (def0), gimple_bb (def1)))
+ {
+ if (maybe_skip_until (phi, arg1, ref, arg0, visited))
+ return arg1;
+ }
+ /* Special case of a diamond:
+ MEM_1 = ...
+ goto (cond) ? L1 : L2
+ L1: store1 = ... #MEM_2 = vuse(MEM_1)
+ goto L3
+ L2: store2 = ... #MEM_3 = vuse(MEM_1)
+ L3: MEM_4 = PHI<MEM_2, MEM_3>
+ We were called with the PHI at L3, MEM_2 and MEM_3 don't
+ dominate each other, but still we can easily skip this PHI node
+ if we recognize that the vuse MEM operand is the same for both,
+ and that we can skip both statements (they don't clobber us).
+ This is still linear. Don't use maybe_skip_until, that might
+ potentially be slow. */
+ else if ((common_vuse = gimple_vuse (def0))
+ && common_vuse == gimple_vuse (def1))
+ {
+ if (!stmt_may_clobber_ref_p_1 (def0, ref)
+ && !stmt_may_clobber_ref_p_1 (def1, ref))
+ return common_vuse;
+ }
+
+ return NULL_TREE;
+}
+
+
/* Starting from a PHI node for the virtual operand of the memory reference
REF find a continuation virtual operand that allows to continue walking
statements dominating PHI skipping only statements that cannot possibly
if (nargs == 1)
return PHI_ARG_DEF (phi, 0);
- /* For two arguments try to skip non-aliasing code until we hit
- the phi argument definition that dominates the other one. */
- if (nargs == 2)
+ /* For two or more arguments try to pairwise skip non-aliasing code
+ until we hit the phi argument definition that dominates the other one. */
+ else if (nargs >= 2)
{
- tree arg0 = PHI_ARG_DEF (phi, 0);
- tree arg1 = PHI_ARG_DEF (phi, 1);
- gimple def0 = SSA_NAME_DEF_STMT (arg0);
- gimple def1 = SSA_NAME_DEF_STMT (arg1);
- tree common_vuse;
+ tree arg0, arg1;
+ unsigned i;
+
+ /* Find a candidate for the virtual operand which definition
+ dominates those of all others. */
+ arg0 = PHI_ARG_DEF (phi, 0);
+ if (!SSA_NAME_IS_DEFAULT_DEF (arg0))
+ for (i = 1; i < nargs; ++i)
+ {
+ arg1 = PHI_ARG_DEF (phi, i);
+ if (SSA_NAME_IS_DEFAULT_DEF (arg1))
+ {
+ arg0 = arg1;
+ break;
+ }
+ if (dominated_by_p (CDI_DOMINATORS,
+ gimple_bb (SSA_NAME_DEF_STMT (arg0)),
+ gimple_bb (SSA_NAME_DEF_STMT (arg1))))
+ arg0 = arg1;
+ }
- if (arg0 == arg1)
- return arg0;
- else if (gimple_nop_p (def0)
- || (!gimple_nop_p (def1)
- && dominated_by_p (CDI_DOMINATORS,
- gimple_bb (def1), gimple_bb (def0))))
- {
- if (maybe_skip_until (phi, arg0, ref, arg1, visited))
- return arg0;
- }
- else if (gimple_nop_p (def1)
- || dominated_by_p (CDI_DOMINATORS,
- gimple_bb (def0), gimple_bb (def1)))
+ /* Then pairwise reduce against the found candidate. */
+ for (i = 0; i < nargs; ++i)
{
- if (maybe_skip_until (phi, arg1, ref, arg0, visited))
- return arg1;
- }
- /* Special case of a diamond:
- MEM_1 = ...
- goto (cond) ? L1 : L2
- L1: store1 = ... #MEM_2 = vuse(MEM_1)
- goto L3
- L2: store2 = ... #MEM_3 = vuse(MEM_1)
- L3: MEM_4 = PHI<MEM_2, MEM_3>
- We were called with the PHI at L3, MEM_2 and MEM_3 don't
- dominate each other, but still we can easily skip this PHI node
- if we recognize that the vuse MEM operand is the same for both,
- and that we can skip both statements (they don't clobber us).
- This is still linear. Don't use maybe_skip_until, that might
- potentially be slow. */
- else if ((common_vuse = gimple_vuse (def0))
- && common_vuse == gimple_vuse (def1))
- {
- if (!stmt_may_clobber_ref_p_1 (def0, ref)
- && !stmt_may_clobber_ref_p_1 (def1, ref))
- return common_vuse;
+ arg1 = PHI_ARG_DEF (phi, i);
+ arg0 = get_continuation_for_phi_1 (phi, arg0, arg1, ref, visited);
+ if (!arg0)
+ return NULL_TREE;
}
+
+ return arg0;
}
return NULL_TREE;