X-Git-Url: http://git.sourceforge.jp/view?a=blobdiff_plain;f=gcc%2Ftree-ssa-alias.c;h=e3a8ad0116269547e361f74a31fa74223227cee4;hb=7102e447e01a42a995c0c138630e026b8a8164c7;hp=673073ee81a5f1c572e8a1ec79e8541b653f597a;hpb=912348298a26402dc480c9105401308b9bdaa644;p=pf3gnuchains%2Fgcc-fork.git diff --git a/gcc/tree-ssa-alias.c b/gcc/tree-ssa-alias.c index 673073ee81a..e3a8ad01162 100644 --- a/gcc/tree-ssa-alias.c +++ b/gcc/tree-ssa-alias.c @@ -1,5 +1,5 @@ /* Alias analysis for trees. - Copyright (C) 2004, 2005, 2006, 2007, 2008, 2009, 2010 + Copyright (C) 2004, 2005, 2006, 2007, 2008, 2009, 2010, 2011 Free Software Foundation, Inc. Contributed by Diego Novillo @@ -196,11 +196,11 @@ ptr_deref_may_alias_decl_p (tree ptr, tree decl) { tree base = get_base_address (TREE_OPERAND (ptr, 0)); if (base - && (INDIRECT_REF_P (base) - || TREE_CODE (base) == MEM_REF)) + && (TREE_CODE (base) == MEM_REF + || TREE_CODE (base) == TARGET_MEM_REF)) ptr = TREE_OPERAND (base, 0); else if (base - && SSA_VAR_P (base)) + && DECL_P (base)) return base == decl; else if (base && CONSTANT_CLASS_P (base)) @@ -219,14 +219,6 @@ ptr_deref_may_alias_decl_p (tree ptr, tree decl) if (!pi) return true; - /* If the decl can be used as a restrict tag and we have a restrict - pointer and that pointers points-to set doesn't contain this decl - then they can't alias. */ - if (DECL_RESTRICTED_P (decl) - && TYPE_RESTRICT (TREE_TYPE (ptr)) - && pi->pt.vars_contains_restrict) - return bitmap_bit_p (pi->pt.vars, DECL_PT_UID (decl)); - return pt_solution_includes (&pi->pt, decl); } @@ -244,17 +236,6 @@ ptr_derefs_may_alias_p (tree ptr1, tree ptr2) STRIP_NOPS (ptr1); STRIP_NOPS (ptr2); - /* Anything we do not explicilty handle aliases. */ - if ((TREE_CODE (ptr1) != SSA_NAME - && TREE_CODE (ptr1) != ADDR_EXPR - && TREE_CODE (ptr1) != POINTER_PLUS_EXPR) - || (TREE_CODE (ptr2) != SSA_NAME - && TREE_CODE (ptr2) != ADDR_EXPR - && TREE_CODE (ptr2) != POINTER_PLUS_EXPR) - || !POINTER_TYPE_P (TREE_TYPE (ptr1)) - || !POINTER_TYPE_P (TREE_TYPE (ptr2))) - return true; - /* Disregard pointer offsetting. */ if (TREE_CODE (ptr1) == POINTER_PLUS_EXPR) { @@ -281,11 +262,11 @@ ptr_derefs_may_alias_p (tree ptr1, tree ptr2) { tree base = get_base_address (TREE_OPERAND (ptr1, 0)); if (base - && (INDIRECT_REF_P (base) - || TREE_CODE (base) == MEM_REF)) - ptr1 = TREE_OPERAND (base, 0); + && (TREE_CODE (base) == MEM_REF + || TREE_CODE (base) == TARGET_MEM_REF)) + return ptr_derefs_may_alias_p (TREE_OPERAND (base, 0), ptr2); else if (base - && SSA_VAR_P (base)) + && DECL_P (base)) return ptr_deref_may_alias_decl_p (ptr2, base); else return true; @@ -294,16 +275,23 @@ ptr_derefs_may_alias_p (tree ptr1, tree ptr2) { tree base = get_base_address (TREE_OPERAND (ptr2, 0)); if (base - && (INDIRECT_REF_P (base) - || TREE_CODE (base) == MEM_REF)) - ptr2 = TREE_OPERAND (base, 0); + && (TREE_CODE (base) == MEM_REF + || TREE_CODE (base) == TARGET_MEM_REF)) + return ptr_derefs_may_alias_p (ptr1, TREE_OPERAND (base, 0)); else if (base - && SSA_VAR_P (base)) + && DECL_P (base)) return ptr_deref_may_alias_decl_p (ptr1, base); else return true; } + /* From here we require SSA name pointers. Anything else aliases. */ + if (TREE_CODE (ptr1) != SSA_NAME + || TREE_CODE (ptr2) != SSA_NAME + || !POINTER_TYPE_P (TREE_TYPE (ptr1)) + || !POINTER_TYPE_P (TREE_TYPE (ptr2))) + return true; + /* We may end up with two empty points-to solutions for two same pointers. In this case we still want to say both pointers alias, so shortcut that here. */ @@ -317,13 +305,6 @@ ptr_derefs_may_alias_p (tree ptr1, tree ptr2) if (!pi1 || !pi2) return true; - /* If both pointers are restrict-qualified try to disambiguate - with restrict information. */ - if (TYPE_RESTRICT (TREE_TYPE (ptr1)) - && TYPE_RESTRICT (TREE_TYPE (ptr2)) - && !pt_solutions_same_restrict_base (&pi1->pt, &pi2->pt)) - return false; - /* ??? This does not use TBAA to prune decls from the intersection that not both pointers may access. */ return pt_solutions_intersect (&pi1->pt, &pi2->pt); @@ -338,10 +319,10 @@ ptr_deref_may_alias_ref_p_1 (tree ptr, ao_ref *ref) { tree base = ao_ref_base (ref); - if (INDIRECT_REF_P (base) - || TREE_CODE (base) == MEM_REF) + if (TREE_CODE (base) == MEM_REF + || TREE_CODE (base) == TARGET_MEM_REF) return ptr_derefs_may_alias_p (ptr, TREE_OPERAND (base, 0)); - else if (SSA_VAR_P (base)) + else if (DECL_P (base)) return ptr_deref_may_alias_decl_p (ptr, base); return true; @@ -429,8 +410,6 @@ dump_points_to_solution (FILE *file, struct pt_solution *pt) dump_decl_set (file, pt->vars); if (pt->vars_contains_global) fprintf (file, " (includes global vars)"); - if (pt->vars_contains_restrict) - fprintf (file, " (includes restrict tags)"); } } @@ -473,6 +452,7 @@ ao_ref_init (ao_ref *r, tree ref) r->max_size = -1; r->ref_alias_set = -1; r->base_alias_set = -1; + r->volatile_p = ref ? TREE_THIS_VOLATILE (ref) : false; } /* Returns the base object of the memory reference *REF. */ @@ -542,6 +522,7 @@ ao_ref_init_from_ptr_and_size (ao_ref *ref, tree ptr, tree size) ref->max_size = ref->size = -1; ref->ref_alias_set = 0; ref->base_alias_set = 0; + ref->volatile_p = false; } /* Return 1 if TYPE1 and TYPE2 are to be considered equivalent for the @@ -688,7 +669,7 @@ decl_refs_may_alias_p (tree base1, tree base2, HOST_WIDE_INT offset2, HOST_WIDE_INT max_size2) { - gcc_assert (SSA_VAR_P (base1) && SSA_VAR_P (base2)); + gcc_checking_assert (DECL_P (base1) && DECL_P (base2)); /* If both references are based on different variables, they cannot alias. */ if (base1 != base2) @@ -718,36 +699,39 @@ indirect_ref_may_alias_decl_p (tree ref1 ATTRIBUTE_UNUSED, tree base1, alias_set_type base2_alias_set, bool tbaa_p) { tree ptr1; - tree ptrtype1; + tree ptrtype1, dbase2; HOST_WIDE_INT offset1p = offset1, offset2p = offset2; + HOST_WIDE_INT doffset1, doffset2; + double_int moff; + + gcc_checking_assert ((TREE_CODE (base1) == MEM_REF + || TREE_CODE (base1) == TARGET_MEM_REF) + && DECL_P (base2)); ptr1 = TREE_OPERAND (base1, 0); /* The offset embedded in MEM_REFs can be negative. Bias them so that the resulting offset adjustment is positive. */ - if (TREE_CODE (base1) == MEM_REF - || TREE_CODE (base1) == TARGET_MEM_REF) - { - double_int moff = mem_ref_offset (base1); - moff = double_int_lshift (moff, - BITS_PER_UNIT == 8 - ? 3 : exact_log2 (BITS_PER_UNIT), - HOST_BITS_PER_DOUBLE_INT, true); - if (double_int_negative_p (moff)) - offset2p += double_int_neg (moff).low; - else - offset1p += moff.low; - } + moff = mem_ref_offset (base1); + moff = double_int_lshift (moff, + BITS_PER_UNIT == 8 + ? 3 : exact_log2 (BITS_PER_UNIT), + HOST_BITS_PER_DOUBLE_INT, true); + if (double_int_negative_p (moff)) + offset2p += double_int_neg (moff).low; + else + offset1p += moff.low; /* If only one reference is based on a variable, they cannot alias if the pointer access is beyond the extent of the variable access. (the pointer base cannot validly point to an offset less than zero of the variable). - They also cannot alias if the pointer may not point to the decl. */ - if ((TREE_CODE (base1) != TARGET_MEM_REF - || (!TMR_INDEX (base1) && !TMR_INDEX2 (base1))) + ??? IVOPTs creates bases that do not honor this restriction, + so do not apply this optimization for TARGET_MEM_REFs. */ + if (TREE_CODE (base1) != TARGET_MEM_REF && !ranges_overlap_p (MAX (0, offset1p), -1, offset2p, max_size2)) return false; + /* They also cannot alias if the pointer may not point to the decl. */ if (!ptr_deref_may_alias_decl_p (ptr1, base2)) return false; @@ -755,12 +739,7 @@ indirect_ref_may_alias_decl_p (tree ref1 ATTRIBUTE_UNUSED, tree base1, if (!flag_strict_aliasing || !tbaa_p) return true; - if (TREE_CODE (base1) == MEM_REF) - ptrtype1 = TREE_TYPE (TREE_OPERAND (base1, 1)); - else if (TREE_CODE (base1) == TARGET_MEM_REF) - ptrtype1 = TREE_TYPE (TMR_OFFSET (base1)); - else - ptrtype1 = TREE_TYPE (ptr1); + ptrtype1 = TREE_TYPE (TREE_OPERAND (base1, 1)); /* If the alias set for a pointer access is zero all bets are off. */ if (base1_alias_set == -1) @@ -770,20 +749,6 @@ indirect_ref_may_alias_decl_p (tree ref1 ATTRIBUTE_UNUSED, tree base1, if (base2_alias_set == -1) base2_alias_set = get_alias_set (base2); - /* If both references are through the same type, they do not alias - if the accesses do not overlap. This does extra disambiguation - for mixed/pointer accesses but requires strict aliasing. - For MEM_REFs we require that the component-ref offset we computed - is relative to the start of the type which we ensure by - comparing rvalue and access type and disregarding the constant - pointer offset. */ - if ((TREE_CODE (base1) != TARGET_MEM_REF - || (!TMR_INDEX (base1) && !TMR_INDEX2 (base1))) - && (TREE_CODE (base1) != MEM_REF - || same_type_for_tbaa (TREE_TYPE (base1), TREE_TYPE (ptrtype1)) == 1) - && same_type_for_tbaa (TREE_TYPE (ptrtype1), TREE_TYPE (base2)) == 1) - return ranges_overlap_p (offset1, max_size1, offset2, max_size2); - /* When we are trying to disambiguate an access with a pointer dereference as base versus one with a decl as base we can use both the size of the decl and its dynamic type for extra disambiguation. @@ -813,13 +778,50 @@ indirect_ref_may_alias_decl_p (tree ref1 ATTRIBUTE_UNUSED, tree base1, && tree_int_cst_lt (DECL_SIZE (base2), TYPE_SIZE (TREE_TYPE (ptrtype1)))) return false; + if (!ref2) + return true; + + /* If the decl is accessed via a MEM_REF, reconstruct the base + we can use for TBAA and an appropriately adjusted offset. */ + dbase2 = ref2; + while (handled_component_p (dbase2)) + dbase2 = TREE_OPERAND (dbase2, 0); + doffset1 = offset1; + doffset2 = offset2; + if (TREE_CODE (dbase2) == MEM_REF + || TREE_CODE (dbase2) == TARGET_MEM_REF) + { + double_int moff = mem_ref_offset (dbase2); + moff = double_int_lshift (moff, + BITS_PER_UNIT == 8 + ? 3 : exact_log2 (BITS_PER_UNIT), + HOST_BITS_PER_DOUBLE_INT, true); + if (double_int_negative_p (moff)) + doffset1 -= double_int_neg (moff).low; + else + doffset2 -= moff.low; + } + + /* If either reference is view-converted, give up now. */ + if (same_type_for_tbaa (TREE_TYPE (base1), TREE_TYPE (ptrtype1)) != 1 + || same_type_for_tbaa (TREE_TYPE (dbase2), TREE_TYPE (base2)) != 1) + return true; + + /* If both references are through the same type, they do not alias + if the accesses do not overlap. This does extra disambiguation + for mixed/pointer accesses but requires strict aliasing. + For MEM_REFs we require that the component-ref offset we computed + is relative to the start of the type which we ensure by + comparing rvalue and access type and disregarding the constant + pointer offset. */ + if ((TREE_CODE (base1) != TARGET_MEM_REF + || (!TMR_INDEX (base1) && !TMR_INDEX2 (base1))) + && same_type_for_tbaa (TREE_TYPE (base1), TREE_TYPE (dbase2)) == 1) + return ranges_overlap_p (doffset1, max_size1, doffset2, max_size2); + /* Do access-path based disambiguation. */ if (ref1 && ref2 - && handled_component_p (ref1) - && handled_component_p (ref2) - && TREE_CODE (base1) != TARGET_MEM_REF - && (TREE_CODE (base1) != MEM_REF - || same_type_for_tbaa (TREE_TYPE (base1), TREE_TYPE (ptrtype1)) == 1)) + && (handled_component_p (ref1) || handled_component_p (ref2))) return aliasing_component_refs_p (ref1, ref1_alias_set, base1_alias_set, offset1, max_size1, @@ -851,6 +853,11 @@ indirect_refs_may_alias_p (tree ref1 ATTRIBUTE_UNUSED, tree base1, tree ptr2; tree ptrtype1, ptrtype2; + gcc_checking_assert ((TREE_CODE (base1) == MEM_REF + || TREE_CODE (base1) == TARGET_MEM_REF) + && (TREE_CODE (base2) == MEM_REF + || TREE_CODE (base2) == TARGET_MEM_REF)); + ptr1 = TREE_OPERAND (base1, 0); ptr2 = TREE_OPERAND (base2, 0); @@ -878,34 +885,27 @@ indirect_refs_may_alias_p (tree ref1 ATTRIBUTE_UNUSED, tree base1, && operand_equal_p (TMR_INDEX2 (base1), TMR_INDEX2 (base2), 0)))))) { + double_int moff; /* The offset embedded in MEM_REFs can be negative. Bias them so that the resulting offset adjustment is positive. */ - if (TREE_CODE (base1) == MEM_REF - || TREE_CODE (base1) == TARGET_MEM_REF) - { - double_int moff = mem_ref_offset (base1); - moff = double_int_lshift (moff, - BITS_PER_UNIT == 8 - ? 3 : exact_log2 (BITS_PER_UNIT), - HOST_BITS_PER_DOUBLE_INT, true); - if (double_int_negative_p (moff)) - offset2 += double_int_neg (moff).low; - else - offset1 += moff.low; - } - if (TREE_CODE (base2) == MEM_REF - || TREE_CODE (base2) == TARGET_MEM_REF) - { - double_int moff = mem_ref_offset (base2); - moff = double_int_lshift (moff, - BITS_PER_UNIT == 8 - ? 3 : exact_log2 (BITS_PER_UNIT), - HOST_BITS_PER_DOUBLE_INT, true); - if (double_int_negative_p (moff)) - offset1 += double_int_neg (moff).low; - else - offset2 += moff.low; - } + moff = mem_ref_offset (base1); + moff = double_int_lshift (moff, + BITS_PER_UNIT == 8 + ? 3 : exact_log2 (BITS_PER_UNIT), + HOST_BITS_PER_DOUBLE_INT, true); + if (double_int_negative_p (moff)) + offset2 += double_int_neg (moff).low; + else + offset1 += moff.low; + moff = mem_ref_offset (base2); + moff = double_int_lshift (moff, + BITS_PER_UNIT == 8 + ? 3 : exact_log2 (BITS_PER_UNIT), + HOST_BITS_PER_DOUBLE_INT, true); + if (double_int_negative_p (moff)) + offset1 += double_int_neg (moff).low; + else + offset2 += moff.low; return ranges_overlap_p (offset1, max_size1, offset2, max_size2); } if (!ptr_derefs_may_alias_p (ptr1, ptr2)) @@ -915,18 +915,8 @@ indirect_refs_may_alias_p (tree ref1 ATTRIBUTE_UNUSED, tree base1, if (!flag_strict_aliasing || !tbaa_p) return true; - if (TREE_CODE (base1) == MEM_REF) - ptrtype1 = TREE_TYPE (TREE_OPERAND (base1, 1)); - else if (TREE_CODE (base1) == TARGET_MEM_REF) - ptrtype1 = TREE_TYPE (TMR_OFFSET (base1)); - else - ptrtype1 = TREE_TYPE (ptr1); - if (TREE_CODE (base2) == MEM_REF) - ptrtype2 = TREE_TYPE (TREE_OPERAND (base2, 1)); - else if (TREE_CODE (base2) == TARGET_MEM_REF) - ptrtype2 = TREE_TYPE (TMR_OFFSET (base2)); - else - ptrtype2 = TREE_TYPE (ptr2); + ptrtype1 = TREE_TYPE (TREE_OPERAND (base1, 1)); + ptrtype2 = TREE_TYPE (TREE_OPERAND (base2, 1)); /* If the alias set for a pointer access is zero all bets are off. */ if (base1_alias_set == -1) @@ -941,12 +931,12 @@ indirect_refs_may_alias_p (tree ref1 ATTRIBUTE_UNUSED, tree base1, /* If both references are through the same type, they do not alias if the accesses do not overlap. This does extra disambiguation for mixed/pointer accesses but requires strict aliasing. */ - if ((TREE_CODE (base1) != TARGET_MEM_REF || !TMR_INDEX (base1)) - && (TREE_CODE (base2) != TARGET_MEM_REF || !TMR_INDEX (base2)) - && (TREE_CODE (base1) != MEM_REF - || same_type_for_tbaa (TREE_TYPE (base1), TREE_TYPE (ptrtype1)) == 1) - && (TREE_CODE (base2) != MEM_REF - || same_type_for_tbaa (TREE_TYPE (base2), TREE_TYPE (ptrtype2)) == 1) + if ((TREE_CODE (base1) != TARGET_MEM_REF + || (!TMR_INDEX (base1) && !TMR_INDEX2 (base1))) + && (TREE_CODE (base2) != TARGET_MEM_REF + || (!TMR_INDEX (base2) && !TMR_INDEX2 (base2))) + && same_type_for_tbaa (TREE_TYPE (base1), TREE_TYPE (ptrtype1)) == 1 + && same_type_for_tbaa (TREE_TYPE (base2), TREE_TYPE (ptrtype2)) == 1 && same_type_for_tbaa (TREE_TYPE (ptrtype1), TREE_TYPE (ptrtype2)) == 1) return ranges_overlap_p (offset1, max_size1, offset2, max_size2); @@ -958,14 +948,9 @@ indirect_refs_may_alias_p (tree ref1 ATTRIBUTE_UNUSED, tree base1, /* Do access-path based disambiguation. */ if (ref1 && ref2 - && handled_component_p (ref1) - && handled_component_p (ref2) - && TREE_CODE (base1) != TARGET_MEM_REF - && TREE_CODE (base2) != TARGET_MEM_REF - && (TREE_CODE (base1) != MEM_REF - || same_type_for_tbaa (TREE_TYPE (base1), TREE_TYPE (ptrtype1)) == 1) - && (TREE_CODE (base2) != MEM_REF - || same_type_for_tbaa (TREE_TYPE (base2), TREE_TYPE (ptrtype2)) == 1)) + && (handled_component_p (ref1) || handled_component_p (ref2)) + && same_type_for_tbaa (TREE_TYPE (base1), TREE_TYPE (ptrtype1)) == 1 + && same_type_for_tbaa (TREE_TYPE (base2), TREE_TYPE (ptrtype2)) == 1) return aliasing_component_refs_p (ref1, ref1_alias_set, base1_alias_set, offset1, max_size1, @@ -991,7 +976,6 @@ refs_may_alias_p_1 (ao_ref *ref1, ao_ref *ref2, bool tbaa_p) || DECL_P (ref1->ref) || TREE_CODE (ref1->ref) == STRING_CST || handled_component_p (ref1->ref) - || INDIRECT_REF_P (ref1->ref) || TREE_CODE (ref1->ref) == MEM_REF || TREE_CODE (ref1->ref) == TARGET_MEM_REF) && (!ref2->ref @@ -999,7 +983,6 @@ refs_may_alias_p_1 (ao_ref *ref1, ao_ref *ref2, bool tbaa_p) || DECL_P (ref2->ref) || TREE_CODE (ref2->ref) == STRING_CST || handled_component_p (ref2->ref) - || INDIRECT_REF_P (ref2->ref) || TREE_CODE (ref2->ref) == MEM_REF || TREE_CODE (ref2->ref) == TARGET_MEM_REF)); @@ -1035,22 +1018,25 @@ refs_may_alias_p_1 (ao_ref *ref1, ao_ref *ref2, bool tbaa_p) || TREE_CODE (base2) == LABEL_DECL) return true; + /* Two volatile accesses always conflict. */ + if (ref1->volatile_p + && ref2->volatile_p) + return true; + /* Defer to simple offset based disambiguation if we have references based on two decls. Do this before defering to TBAA to handle must-alias cases in conformance with the GCC extension of allowing type-punning through unions. */ - var1_p = SSA_VAR_P (base1); - var2_p = SSA_VAR_P (base2); + var1_p = DECL_P (base1); + var2_p = DECL_P (base2); if (var1_p && var2_p) return decl_refs_may_alias_p (base1, offset1, max_size1, base2, offset2, max_size2); - ind1_p = (INDIRECT_REF_P (base1) - || (TREE_CODE (base1) == MEM_REF) - || (TREE_CODE (base1) == TARGET_MEM_REF)); - ind2_p = (INDIRECT_REF_P (base2) - || (TREE_CODE (base2) == MEM_REF) - || (TREE_CODE (base2) == TARGET_MEM_REF)); + ind1_p = (TREE_CODE (base1) == MEM_REF + || TREE_CODE (base1) == TARGET_MEM_REF); + ind2_p = (TREE_CODE (base2) == MEM_REF + || TREE_CODE (base2) == TARGET_MEM_REF); /* Canonicalize the pointer-vs-decl case. */ if (ind1_p && var2_p) @@ -1160,6 +1146,11 @@ ref_maybe_used_by_call_p_1 (gimple call, ao_ref *ref) if (!base) return true; + /* A call that is not without side-effects might involve volatile + accesses and thus conflicts with all other volatile accesses. */ + if (ref->volatile_p) + return true; + /* If the reference is based on a decl that is not aliased the call cannot possibly use it. */ if (DECL_P (base) @@ -1177,8 +1168,20 @@ ref_maybe_used_by_call_p_1 (gimple call, ao_ref *ref) && DECL_BUILT_IN_CLASS (callee) == BUILT_IN_NORMAL) switch (DECL_FUNCTION_CODE (callee)) { - /* All the following functions clobber memory pointed to by - their first argument. */ + /* All the following functions read memory pointed to by + their second argument. strcat/strncat additionally + reads memory pointed to by the first argument. */ + case BUILT_IN_STRCAT: + case BUILT_IN_STRNCAT: + { + ao_ref dref; + ao_ref_init_from_ptr_and_size (&dref, + gimple_call_arg (call, 0), + NULL_TREE); + if (refs_may_alias_p_1 (&dref, ref, false)) + return true; + } + /* FALLTHRU */ case BUILT_IN_STRCPY: case BUILT_IN_STRNCPY: case BUILT_IN_MEMCPY: @@ -1186,8 +1189,8 @@ ref_maybe_used_by_call_p_1 (gimple call, ao_ref *ref) case BUILT_IN_MEMPCPY: case BUILT_IN_STPCPY: case BUILT_IN_STPNCPY: - case BUILT_IN_STRCAT: - case BUILT_IN_STRNCAT: + case BUILT_IN_TM_MEMCPY: + case BUILT_IN_TM_MEMMOVE: { ao_ref dref; tree size = NULL_TREE; @@ -1198,6 +1201,34 @@ ref_maybe_used_by_call_p_1 (gimple call, ao_ref *ref) size); return refs_may_alias_p_1 (&dref, ref, false); } + case BUILT_IN_STRCAT_CHK: + case BUILT_IN_STRNCAT_CHK: + { + ao_ref dref; + ao_ref_init_from_ptr_and_size (&dref, + gimple_call_arg (call, 0), + NULL_TREE); + if (refs_may_alias_p_1 (&dref, ref, false)) + return true; + } + /* FALLTHRU */ + case BUILT_IN_STRCPY_CHK: + case BUILT_IN_STRNCPY_CHK: + case BUILT_IN_MEMCPY_CHK: + case BUILT_IN_MEMMOVE_CHK: + case BUILT_IN_MEMPCPY_CHK: + case BUILT_IN_STPCPY_CHK: + case BUILT_IN_STPNCPY_CHK: + { + ao_ref dref; + tree size = NULL_TREE; + if (gimple_call_num_args (call) == 4) + size = gimple_call_arg (call, 2); + ao_ref_init_from_ptr_and_size (&dref, + gimple_call_arg (call, 1), + size); + return refs_may_alias_p_1 (&dref, ref, false); + } case BUILT_IN_BCOPY: { ao_ref dref; @@ -1207,14 +1238,56 @@ ref_maybe_used_by_call_p_1 (gimple call, ao_ref *ref) size); return refs_may_alias_p_1 (&dref, ref, false); } + + /* The following functions read memory pointed to by their + first argument. */ + CASE_BUILT_IN_TM_LOAD (1): + CASE_BUILT_IN_TM_LOAD (2): + CASE_BUILT_IN_TM_LOAD (4): + CASE_BUILT_IN_TM_LOAD (8): + CASE_BUILT_IN_TM_LOAD (FLOAT): + CASE_BUILT_IN_TM_LOAD (DOUBLE): + CASE_BUILT_IN_TM_LOAD (LDOUBLE): + CASE_BUILT_IN_TM_LOAD (M64): + CASE_BUILT_IN_TM_LOAD (M128): + CASE_BUILT_IN_TM_LOAD (M256): + case BUILT_IN_TM_LOG: + case BUILT_IN_TM_LOG_1: + case BUILT_IN_TM_LOG_2: + case BUILT_IN_TM_LOG_4: + case BUILT_IN_TM_LOG_8: + case BUILT_IN_TM_LOG_FLOAT: + case BUILT_IN_TM_LOG_DOUBLE: + case BUILT_IN_TM_LOG_LDOUBLE: + case BUILT_IN_TM_LOG_M64: + case BUILT_IN_TM_LOG_M128: + case BUILT_IN_TM_LOG_M256: + return ptr_deref_may_alias_ref_p_1 (gimple_call_arg (call, 0), ref); + + /* These read memory pointed to by the first argument. */ + case BUILT_IN_STRDUP: + case BUILT_IN_STRNDUP: + { + ao_ref dref; + tree size = NULL_TREE; + if (gimple_call_num_args (call) == 2) + size = gimple_call_arg (call, 1); + ao_ref_init_from_ptr_and_size (&dref, + gimple_call_arg (call, 0), + size); + return refs_may_alias_p_1 (&dref, ref, false); + } /* The following builtins do not read from memory. */ case BUILT_IN_FREE: case BUILT_IN_MALLOC: case BUILT_IN_CALLOC: case BUILT_IN_ALLOCA: + case BUILT_IN_ALLOCA_WITH_ALIGN: case BUILT_IN_STACK_SAVE: case BUILT_IN_STACK_RESTORE: case BUILT_IN_MEMSET: + case BUILT_IN_TM_MEMSET: + case BUILT_IN_MEMSET_CHK: case BUILT_IN_FREXP: case BUILT_IN_FREXPF: case BUILT_IN_FREXPL: @@ -1233,6 +1306,8 @@ ref_maybe_used_by_call_p_1 (gimple call, ao_ref *ref) case BUILT_IN_SINCOS: case BUILT_IN_SINCOSF: case BUILT_IN_SINCOSL: + case BUILT_IN_ASSUME_ALIGNED: + case BUILT_IN_VA_END: return false; /* __sync_* builtins and some OpenMP builtins act as threading barriers. */ @@ -1285,8 +1360,8 @@ ref_maybe_used_by_call_p_1 (gimple call, ao_ref *ref) if (pt_solution_includes (gimple_call_use_set (call), base)) return true; } - else if ((INDIRECT_REF_P (base) - || TREE_CODE (base) == MEM_REF) + else if ((TREE_CODE (base) == MEM_REF + || TREE_CODE (base) == TARGET_MEM_REF) && TREE_CODE (TREE_OPERAND (base, 0)) == SSA_NAME) { struct ptr_info_def *pi = SSA_NAME_PTR_INFO (TREE_OPERAND (base, 0)); @@ -1410,6 +1485,11 @@ call_may_clobber_ref_p_1 (gimple call, ao_ref *ref) || CONSTANT_CLASS_P (base)) return false; + /* A call that is not without side-effects might involve volatile + accesses and thus conflicts with all other volatile accesses. */ + if (ref->volatile_p) + return true; + /* If the reference is based on a decl that is not aliased the call cannot possibly clobber it. */ if (DECL_P (base) @@ -1442,10 +1522,53 @@ call_may_clobber_ref_p_1 (gimple call, ao_ref *ref) case BUILT_IN_STRCAT: case BUILT_IN_STRNCAT: case BUILT_IN_MEMSET: + case BUILT_IN_TM_MEMSET: + CASE_BUILT_IN_TM_STORE (1): + CASE_BUILT_IN_TM_STORE (2): + CASE_BUILT_IN_TM_STORE (4): + CASE_BUILT_IN_TM_STORE (8): + CASE_BUILT_IN_TM_STORE (FLOAT): + CASE_BUILT_IN_TM_STORE (DOUBLE): + CASE_BUILT_IN_TM_STORE (LDOUBLE): + CASE_BUILT_IN_TM_STORE (M64): + CASE_BUILT_IN_TM_STORE (M128): + CASE_BUILT_IN_TM_STORE (M256): + case BUILT_IN_TM_MEMCPY: + case BUILT_IN_TM_MEMMOVE: { ao_ref dref; tree size = NULL_TREE; - if (gimple_call_num_args (call) == 3) + /* Don't pass in size for strncat, as the maximum size + is strlen (dest) + n + 1 instead of n, resp. + n + 1 at dest + strlen (dest), but strlen (dest) isn't + known. */ + if (gimple_call_num_args (call) == 3 + && DECL_FUNCTION_CODE (callee) != BUILT_IN_STRNCAT) + size = gimple_call_arg (call, 2); + ao_ref_init_from_ptr_and_size (&dref, + gimple_call_arg (call, 0), + size); + return refs_may_alias_p_1 (&dref, ref, false); + } + case BUILT_IN_STRCPY_CHK: + case BUILT_IN_STRNCPY_CHK: + case BUILT_IN_MEMCPY_CHK: + case BUILT_IN_MEMMOVE_CHK: + case BUILT_IN_MEMPCPY_CHK: + case BUILT_IN_STPCPY_CHK: + case BUILT_IN_STPNCPY_CHK: + case BUILT_IN_STRCAT_CHK: + case BUILT_IN_STRNCAT_CHK: + case BUILT_IN_MEMSET_CHK: + { + ao_ref dref; + tree size = NULL_TREE; + /* Don't pass in size for __strncat_chk, as the maximum size + is strlen (dest) + n + 1 instead of n, resp. + n + 1 at dest + strlen (dest), but strlen (dest) isn't + known. */ + if (gimple_call_num_args (call) == 4 + && DECL_FUNCTION_CODE (callee) != BUILT_IN_STRNCAT_CHK) size = gimple_call_arg (call, 2); ao_ref_init_from_ptr_and_size (&dref, gimple_call_arg (call, 0), @@ -1465,6 +1588,8 @@ call_may_clobber_ref_p_1 (gimple call, ao_ref *ref) being the definition point for the pointer. */ case BUILT_IN_MALLOC: case BUILT_IN_CALLOC: + case BUILT_IN_STRDUP: + case BUILT_IN_STRNDUP: /* Unix98 specifies that errno is set on allocation failure. */ if (flag_errno_math && targetm.ref_may_alias_errno (ref)) @@ -1472,11 +1597,14 @@ call_may_clobber_ref_p_1 (gimple call, ao_ref *ref) return false; case BUILT_IN_STACK_SAVE: case BUILT_IN_ALLOCA: + case BUILT_IN_ALLOCA_WITH_ALIGN: + case BUILT_IN_ASSUME_ALIGNED: return false; /* Freeing memory kills the pointed-to memory. More importantly the call has to serve as a barrier for moving loads and stores across it. */ case BUILT_IN_FREE: + case BUILT_IN_VA_END: { tree ptr = gimple_call_arg (call, 0); return ptr_deref_may_alias_ref_p_1 (ptr, ref); @@ -1569,8 +1697,8 @@ call_may_clobber_ref_p_1 (gimple call, ao_ref *ref) /* Check if the base variable is call-clobbered. */ if (DECL_P (base)) return pt_solution_includes (gimple_call_clobber_set (call), base); - else if ((INDIRECT_REF_P (base) - || TREE_CODE (base) == MEM_REF) + else if ((TREE_CODE (base) == MEM_REF + || TREE_CODE (base) == TARGET_MEM_REF) && TREE_CODE (TREE_OPERAND (base, 0)) == SSA_NAME) { struct ptr_info_def *pi = SSA_NAME_PTR_INFO (TREE_OPERAND (base, 0)); @@ -1658,7 +1786,14 @@ stmt_kills_ref_p_1 (gimple stmt, ao_ref *ref) return false; if (gimple_has_lhs (stmt) - && TREE_CODE (gimple_get_lhs (stmt)) != SSA_NAME) + && TREE_CODE (gimple_get_lhs (stmt)) != SSA_NAME + /* The assignment is not necessarily carried out if it can throw + and we can catch it in the current function where we could inspect + the previous value. + ??? We only need to care about the RHS throwing. For aggregate + assignments or similar calls and non-call exceptions the LHS + might throw as well. */ + && !stmt_can_throw_internal (stmt)) { tree base, lhs = gimple_get_lhs (stmt); HOST_WIDE_INT size, offset, max_size; @@ -1689,6 +1824,10 @@ stmt_kills_ref_p_1 (gimple stmt, ao_ref *ref) case BUILT_IN_MEMPCPY: case BUILT_IN_MEMMOVE: case BUILT_IN_MEMSET: + case BUILT_IN_MEMCPY_CHK: + case BUILT_IN_MEMPCPY_CHK: + case BUILT_IN_MEMMOVE_CHK: + case BUILT_IN_MEMSET_CHK: { tree dest = gimple_call_arg (stmt, 0); tree len = gimple_call_arg (stmt, 2); @@ -1711,10 +1850,23 @@ stmt_kills_ref_p_1 (gimple stmt, ao_ref *ref) / BITS_PER_UNIT))) return true; } + break; } + + case BUILT_IN_VA_END: + { + tree ptr = gimple_call_arg (stmt, 0); + if (TREE_CODE (ptr) == ADDR_EXPR) + { + tree base = ao_ref_base (ref); + if (TREE_OPERAND (ptr, 0) == base) + return true; + } + break; + } + default:; } - } return false; } @@ -1736,6 +1888,8 @@ static bool maybe_skip_until (gimple phi, tree target, ao_ref *ref, tree vuse, bitmap *visited) { + basic_block bb = gimple_bb (phi); + if (!*visited) *visited = BITMAP_ALLOC (NULL); @@ -1760,11 +1914,73 @@ maybe_skip_until (gimple phi, tree target, ao_ref *ref, else if (gimple_nop_p (def_stmt) || stmt_may_clobber_ref_p_1 (def_stmt, ref)) return false; + /* If we reach a new basic-block see if we already skipped it + in a previous walk that ended successfully. */ + if (gimple_bb (def_stmt) != bb) + { + if (!bitmap_set_bit (*visited, SSA_NAME_VERSION (vuse))) + return true; + bb = gimple_bb (def_stmt); + } vuse = gimple_vuse (def_stmt); } return true; } +/* For two PHI arguments ARG0 and ARG1 try to skip non-aliasing code + until we hit the phi argument definition that dominates the other one. + Return that, or NULL_TREE if there is no such definition. */ + +static tree +get_continuation_for_phi_1 (gimple phi, tree arg0, tree arg1, + ao_ref *ref, bitmap *visited) +{ + gimple def0 = SSA_NAME_DEF_STMT (arg0); + gimple def1 = SSA_NAME_DEF_STMT (arg1); + tree common_vuse; + + if (arg0 == arg1) + return arg0; + else if (gimple_nop_p (def0) + || (!gimple_nop_p (def1) + && dominated_by_p (CDI_DOMINATORS, + gimple_bb (def1), gimple_bb (def0)))) + { + if (maybe_skip_until (phi, arg0, ref, arg1, visited)) + return arg0; + } + else if (gimple_nop_p (def1) + || dominated_by_p (CDI_DOMINATORS, + gimple_bb (def0), gimple_bb (def1))) + { + if (maybe_skip_until (phi, arg1, ref, arg0, visited)) + return arg1; + } + /* Special case of a diamond: + MEM_1 = ... + goto (cond) ? L1 : L2 + L1: store1 = ... #MEM_2 = vuse(MEM_1) + goto L3 + L2: store2 = ... #MEM_3 = vuse(MEM_1) + L3: MEM_4 = PHI + We were called with the PHI at L3, MEM_2 and MEM_3 don't + dominate each other, but still we can easily skip this PHI node + if we recognize that the vuse MEM operand is the same for both, + and that we can skip both statements (they don't clobber us). + This is still linear. Don't use maybe_skip_until, that might + potentially be slow. */ + else if ((common_vuse = gimple_vuse (def0)) + && common_vuse == gimple_vuse (def1)) + { + if (!stmt_may_clobber_ref_p_1 (def0, ref) + && !stmt_may_clobber_ref_p_1 (def1, ref)) + return common_vuse; + } + + return NULL_TREE; +} + + /* Starting from a PHI node for the virtual operand of the memory reference REF find a continuation virtual operand that allows to continue walking statements dominating PHI skipping only statements that cannot possibly @@ -1780,53 +1996,41 @@ get_continuation_for_phi (gimple phi, ao_ref *ref, bitmap *visited) if (nargs == 1) return PHI_ARG_DEF (phi, 0); - /* For two arguments try to skip non-aliasing code until we hit - the phi argument definition that dominates the other one. */ - if (nargs == 2) + /* For two or more arguments try to pairwise skip non-aliasing code + until we hit the phi argument definition that dominates the other one. */ + else if (nargs >= 2) { - tree arg0 = PHI_ARG_DEF (phi, 0); - tree arg1 = PHI_ARG_DEF (phi, 1); - gimple def0 = SSA_NAME_DEF_STMT (arg0); - gimple def1 = SSA_NAME_DEF_STMT (arg1); - tree common_vuse; + tree arg0, arg1; + unsigned i; + + /* Find a candidate for the virtual operand which definition + dominates those of all others. */ + arg0 = PHI_ARG_DEF (phi, 0); + if (!SSA_NAME_IS_DEFAULT_DEF (arg0)) + for (i = 1; i < nargs; ++i) + { + arg1 = PHI_ARG_DEF (phi, i); + if (SSA_NAME_IS_DEFAULT_DEF (arg1)) + { + arg0 = arg1; + break; + } + if (dominated_by_p (CDI_DOMINATORS, + gimple_bb (SSA_NAME_DEF_STMT (arg0)), + gimple_bb (SSA_NAME_DEF_STMT (arg1)))) + arg0 = arg1; + } - if (arg0 == arg1) - return arg0; - else if (gimple_nop_p (def0) - || (!gimple_nop_p (def1) - && dominated_by_p (CDI_DOMINATORS, - gimple_bb (def1), gimple_bb (def0)))) + /* Then pairwise reduce against the found candidate. */ + for (i = 0; i < nargs; ++i) { - if (maybe_skip_until (phi, arg0, ref, arg1, visited)) - return arg0; - } - else if (gimple_nop_p (def1) - || dominated_by_p (CDI_DOMINATORS, - gimple_bb (def0), gimple_bb (def1))) - { - if (maybe_skip_until (phi, arg1, ref, arg0, visited)) - return arg1; - } - /* Special case of a diamond: - MEM_1 = ... - goto (cond) ? L1 : L2 - L1: store1 = ... #MEM_2 = vuse(MEM_1) - goto L3 - L2: store2 = ... #MEM_3 = vuse(MEM_1) - L3: MEM_4 = PHI - We were called with the PHI at L3, MEM_2 and MEM_3 don't - dominate each other, but still we can easily skip this PHI node - if we recognize that the vuse MEM operand is the same for both, - and that we can skip both statements (they don't clobber us). - This is still linear. Don't use maybe_skip_until, that might - potentially be slow. */ - else if ((common_vuse = gimple_vuse (def0)) - && common_vuse == gimple_vuse (def1)) - { - if (!stmt_may_clobber_ref_p_1 (def0, ref) - && !stmt_may_clobber_ref_p_1 (def1, ref)) - return common_vuse; + arg1 = PHI_ARG_DEF (phi, i); + arg0 = get_continuation_for_phi_1 (phi, arg0, arg1, ref, visited); + if (!arg0) + return NULL_TREE; } + + return arg0; } return NULL_TREE;