X-Git-Url: http://git.sourceforge.jp/view?a=blobdiff_plain;f=gcc%2Ftree-ssa-alias.c;h=54badda17723fee264ce9861e951d191ec1080d4;hb=89301f160ab3261dfd33f0192725621924423206;hp=f324099b2360d4e82f9490477d6a57837d599399;hpb=2622064fa2e62d2876df99f02f95d1a0c242e76a;p=pf3gnuchains%2Fgcc-fork.git diff --git a/gcc/tree-ssa-alias.c b/gcc/tree-ssa-alias.c index f324099b236..54badda1772 100644 --- a/gcc/tree-ssa-alias.c +++ b/gcc/tree-ssa-alias.c @@ -1,5 +1,5 @@ /* Alias analysis for trees. - Copyright (C) 2004, 2005, 2006, 2007, 2008, 2009, 2010 + Copyright (C) 2004, 2005, 2006, 2007, 2008, 2009, 2010, 2011 Free Software Foundation, Inc. Contributed by Diego Novillo @@ -219,14 +219,6 @@ ptr_deref_may_alias_decl_p (tree ptr, tree decl) if (!pi) return true; - /* If the decl can be used as a restrict tag and we have a restrict - pointer and that pointers points-to set doesn't contain this decl - then they can't alias. */ - if (DECL_RESTRICTED_P (decl) - && TYPE_RESTRICT (TREE_TYPE (ptr)) - && pi->pt.vars_contains_restrict) - return bitmap_bit_p (pi->pt.vars, DECL_PT_UID (decl)); - return pt_solution_includes (&pi->pt, decl); } @@ -317,13 +309,6 @@ ptr_derefs_may_alias_p (tree ptr1, tree ptr2) if (!pi1 || !pi2) return true; - /* If both pointers are restrict-qualified try to disambiguate - with restrict information. */ - if (TYPE_RESTRICT (TREE_TYPE (ptr1)) - && TYPE_RESTRICT (TREE_TYPE (ptr2)) - && !pt_solutions_same_restrict_base (&pi1->pt, &pi2->pt)) - return false; - /* ??? This does not use TBAA to prune decls from the intersection that not both pointers may access. */ return pt_solutions_intersect (&pi1->pt, &pi2->pt); @@ -429,8 +414,6 @@ dump_points_to_solution (FILE *file, struct pt_solution *pt) dump_decl_set (file, pt->vars); if (pt->vars_contains_global) fprintf (file, " (includes global vars)"); - if (pt->vars_contains_restrict) - fprintf (file, " (includes restrict tags)"); } } @@ -473,6 +456,7 @@ ao_ref_init (ao_ref *r, tree ref) r->max_size = -1; r->ref_alias_set = -1; r->base_alias_set = -1; + r->volatile_p = ref ? TREE_THIS_VOLATILE (ref) : false; } /* Returns the base object of the memory reference *REF. */ @@ -542,6 +526,7 @@ ao_ref_init_from_ptr_and_size (ao_ref *ref, tree ptr, tree size) ref->max_size = ref->size = -1; ref->ref_alias_set = 0; ref->base_alias_set = 0; + ref->volatile_p = false; } /* Return 1 if TYPE1 and TYPE2 are to be considered equivalent for the @@ -718,8 +703,9 @@ indirect_ref_may_alias_decl_p (tree ref1 ATTRIBUTE_UNUSED, tree base1, alias_set_type base2_alias_set, bool tbaa_p) { tree ptr1; - tree ptrtype1; + tree ptrtype1, dbase2; HOST_WIDE_INT offset1p = offset1, offset2p = offset2; + HOST_WIDE_INT doffset1, doffset2; double_int moff; gcc_checking_assert ((TREE_CODE (base1) == MEM_REF @@ -744,11 +730,12 @@ indirect_ref_may_alias_decl_p (tree ref1 ATTRIBUTE_UNUSED, tree base1, the pointer access is beyond the extent of the variable access. (the pointer base cannot validly point to an offset less than zero of the variable). - They also cannot alias if the pointer may not point to the decl. */ - if ((TREE_CODE (base1) != TARGET_MEM_REF - || (!TMR_INDEX (base1) && !TMR_INDEX2 (base1))) + ??? IVOPTs creates bases that do not honor this restriction, + so do not apply this optimization for TARGET_MEM_REFs. */ + if (TREE_CODE (base1) != TARGET_MEM_REF && !ranges_overlap_p (MAX (0, offset1p), -1, offset2p, max_size2)) return false; + /* They also cannot alias if the pointer may not point to the decl. */ if (!ptr_deref_may_alias_decl_p (ptr1, base2)) return false; @@ -766,20 +753,6 @@ indirect_ref_may_alias_decl_p (tree ref1 ATTRIBUTE_UNUSED, tree base1, if (base2_alias_set == -1) base2_alias_set = get_alias_set (base2); - /* If both references are through the same type, they do not alias - if the accesses do not overlap. This does extra disambiguation - for mixed/pointer accesses but requires strict aliasing. - For MEM_REFs we require that the component-ref offset we computed - is relative to the start of the type which we ensure by - comparing rvalue and access type and disregarding the constant - pointer offset. */ - if ((TREE_CODE (base1) != TARGET_MEM_REF - || (!TMR_INDEX (base1) && !TMR_INDEX2 (base1))) - && (TREE_CODE (base1) != MEM_REF - || same_type_for_tbaa (TREE_TYPE (base1), TREE_TYPE (ptrtype1)) == 1) - && same_type_for_tbaa (TREE_TYPE (ptrtype1), TREE_TYPE (base2)) == 1) - return ranges_overlap_p (offset1, max_size1, offset2, max_size2); - /* When we are trying to disambiguate an access with a pointer dereference as base versus one with a decl as base we can use both the size of the decl and its dynamic type for extra disambiguation. @@ -809,13 +782,51 @@ indirect_ref_may_alias_decl_p (tree ref1 ATTRIBUTE_UNUSED, tree base1, && tree_int_cst_lt (DECL_SIZE (base2), TYPE_SIZE (TREE_TYPE (ptrtype1)))) return false; + if (!ref2) + return true; + + /* If the decl is accessed via a MEM_REF, reconstruct the base + we can use for TBAA and an appropriately adjusted offset. */ + dbase2 = ref2; + while (handled_component_p (dbase2)) + dbase2 = TREE_OPERAND (dbase2, 0); + doffset1 = offset1; + doffset2 = offset2; + if (TREE_CODE (dbase2) == MEM_REF + || TREE_CODE (dbase2) == TARGET_MEM_REF) + { + double_int moff = mem_ref_offset (dbase2); + moff = double_int_lshift (moff, + BITS_PER_UNIT == 8 + ? 3 : exact_log2 (BITS_PER_UNIT), + HOST_BITS_PER_DOUBLE_INT, true); + if (double_int_negative_p (moff)) + doffset1 -= double_int_neg (moff).low; + else + doffset2 -= moff.low; + } + + /* If either reference is view-converted, give up now. */ + if (same_type_for_tbaa (TREE_TYPE (base1), TREE_TYPE (ptrtype1)) != 1 + || same_type_for_tbaa (TREE_TYPE (dbase2), + TREE_TYPE (reference_alias_ptr_type (dbase2))) != 1) + return true; + + /* If both references are through the same type, they do not alias + if the accesses do not overlap. This does extra disambiguation + for mixed/pointer accesses but requires strict aliasing. + For MEM_REFs we require that the component-ref offset we computed + is relative to the start of the type which we ensure by + comparing rvalue and access type and disregarding the constant + pointer offset. */ + if ((TREE_CODE (base1) != TARGET_MEM_REF + || (!TMR_INDEX (base1) && !TMR_INDEX2 (base1))) + && same_type_for_tbaa (TREE_TYPE (base1), TREE_TYPE (dbase2)) == 1) + return ranges_overlap_p (doffset1, max_size1, doffset2, max_size2); + /* Do access-path based disambiguation. */ if (ref1 && ref2 - && handled_component_p (ref1) - && handled_component_p (ref2) - && TREE_CODE (base1) != TARGET_MEM_REF - && (TREE_CODE (base1) != MEM_REF - || same_type_for_tbaa (TREE_TYPE (base1), TREE_TYPE (ptrtype1)) == 1)) + && (handled_component_p (ref1) || handled_component_p (ref2))) return aliasing_component_refs_p (ref1, ref1_alias_set, base1_alias_set, offset1, max_size1, @@ -925,12 +936,12 @@ indirect_refs_may_alias_p (tree ref1 ATTRIBUTE_UNUSED, tree base1, /* If both references are through the same type, they do not alias if the accesses do not overlap. This does extra disambiguation for mixed/pointer accesses but requires strict aliasing. */ - if ((TREE_CODE (base1) != TARGET_MEM_REF || !TMR_INDEX (base1)) - && (TREE_CODE (base2) != TARGET_MEM_REF || !TMR_INDEX (base2)) - && (TREE_CODE (base1) != MEM_REF - || same_type_for_tbaa (TREE_TYPE (base1), TREE_TYPE (ptrtype1)) == 1) - && (TREE_CODE (base2) != MEM_REF - || same_type_for_tbaa (TREE_TYPE (base2), TREE_TYPE (ptrtype2)) == 1) + if ((TREE_CODE (base1) != TARGET_MEM_REF + || (!TMR_INDEX (base1) && !TMR_INDEX2 (base1))) + && (TREE_CODE (base2) != TARGET_MEM_REF + || (!TMR_INDEX (base2) && !TMR_INDEX2 (base2))) + && same_type_for_tbaa (TREE_TYPE (base1), TREE_TYPE (ptrtype1)) == 1 + && same_type_for_tbaa (TREE_TYPE (base2), TREE_TYPE (ptrtype2)) == 1 && same_type_for_tbaa (TREE_TYPE (ptrtype1), TREE_TYPE (ptrtype2)) == 1) return ranges_overlap_p (offset1, max_size1, offset2, max_size2); @@ -942,14 +953,9 @@ indirect_refs_may_alias_p (tree ref1 ATTRIBUTE_UNUSED, tree base1, /* Do access-path based disambiguation. */ if (ref1 && ref2 - && handled_component_p (ref1) - && handled_component_p (ref2) - && TREE_CODE (base1) != TARGET_MEM_REF - && TREE_CODE (base2) != TARGET_MEM_REF - && (TREE_CODE (base1) != MEM_REF - || same_type_for_tbaa (TREE_TYPE (base1), TREE_TYPE (ptrtype1)) == 1) - && (TREE_CODE (base2) != MEM_REF - || same_type_for_tbaa (TREE_TYPE (base2), TREE_TYPE (ptrtype2)) == 1)) + && (handled_component_p (ref1) || handled_component_p (ref2)) + && same_type_for_tbaa (TREE_TYPE (base1), TREE_TYPE (ptrtype1)) == 1 + && same_type_for_tbaa (TREE_TYPE (base2), TREE_TYPE (ptrtype2)) == 1) return aliasing_component_refs_p (ref1, ref1_alias_set, base1_alias_set, offset1, max_size1, @@ -1017,6 +1023,11 @@ refs_may_alias_p_1 (ao_ref *ref1, ao_ref *ref2, bool tbaa_p) || TREE_CODE (base2) == LABEL_DECL) return true; + /* Two volatile accesses always conflict. */ + if (ref1->volatile_p + && ref2->volatile_p) + return true; + /* Defer to simple offset based disambiguation if we have references based on two decls. Do this before defering to TBAA to handle must-alias cases in conformance with the @@ -1140,6 +1151,11 @@ ref_maybe_used_by_call_p_1 (gimple call, ao_ref *ref) if (!base) return true; + /* A call that is not without side-effects might involve volatile + accesses and thus conflicts with all other volatile accesses. */ + if (ref->volatile_p) + return true; + /* If the reference is based on a decl that is not aliased the call cannot possibly use it. */ if (DECL_P (base) @@ -1157,8 +1173,20 @@ ref_maybe_used_by_call_p_1 (gimple call, ao_ref *ref) && DECL_BUILT_IN_CLASS (callee) == BUILT_IN_NORMAL) switch (DECL_FUNCTION_CODE (callee)) { - /* All the following functions clobber memory pointed to by - their first argument. */ + /* All the following functions read memory pointed to by + their second argument. strcat/strncat additionally + reads memory pointed to by the first argument. */ + case BUILT_IN_STRCAT: + case BUILT_IN_STRNCAT: + { + ao_ref dref; + ao_ref_init_from_ptr_and_size (&dref, + gimple_call_arg (call, 0), + NULL_TREE); + if (refs_may_alias_p_1 (&dref, ref, false)) + return true; + } + /* FALLTHRU */ case BUILT_IN_STRCPY: case BUILT_IN_STRNCPY: case BUILT_IN_MEMCPY: @@ -1166,8 +1194,8 @@ ref_maybe_used_by_call_p_1 (gimple call, ao_ref *ref) case BUILT_IN_MEMPCPY: case BUILT_IN_STPCPY: case BUILT_IN_STPNCPY: - case BUILT_IN_STRCAT: - case BUILT_IN_STRNCAT: + case BUILT_IN_TM_MEMCPY: + case BUILT_IN_TM_MEMMOVE: { ao_ref dref; tree size = NULL_TREE; @@ -1178,6 +1206,34 @@ ref_maybe_used_by_call_p_1 (gimple call, ao_ref *ref) size); return refs_may_alias_p_1 (&dref, ref, false); } + case BUILT_IN_STRCAT_CHK: + case BUILT_IN_STRNCAT_CHK: + { + ao_ref dref; + ao_ref_init_from_ptr_and_size (&dref, + gimple_call_arg (call, 0), + NULL_TREE); + if (refs_may_alias_p_1 (&dref, ref, false)) + return true; + } + /* FALLTHRU */ + case BUILT_IN_STRCPY_CHK: + case BUILT_IN_STRNCPY_CHK: + case BUILT_IN_MEMCPY_CHK: + case BUILT_IN_MEMMOVE_CHK: + case BUILT_IN_MEMPCPY_CHK: + case BUILT_IN_STPCPY_CHK: + case BUILT_IN_STPNCPY_CHK: + { + ao_ref dref; + tree size = NULL_TREE; + if (gimple_call_num_args (call) == 4) + size = gimple_call_arg (call, 2); + ao_ref_init_from_ptr_and_size (&dref, + gimple_call_arg (call, 1), + size); + return refs_may_alias_p_1 (&dref, ref, false); + } case BUILT_IN_BCOPY: { ao_ref dref; @@ -1187,14 +1243,56 @@ ref_maybe_used_by_call_p_1 (gimple call, ao_ref *ref) size); return refs_may_alias_p_1 (&dref, ref, false); } + + /* The following functions read memory pointed to by their + first argument. */ + CASE_BUILT_IN_TM_LOAD (1): + CASE_BUILT_IN_TM_LOAD (2): + CASE_BUILT_IN_TM_LOAD (4): + CASE_BUILT_IN_TM_LOAD (8): + CASE_BUILT_IN_TM_LOAD (FLOAT): + CASE_BUILT_IN_TM_LOAD (DOUBLE): + CASE_BUILT_IN_TM_LOAD (LDOUBLE): + CASE_BUILT_IN_TM_LOAD (M64): + CASE_BUILT_IN_TM_LOAD (M128): + CASE_BUILT_IN_TM_LOAD (M256): + case BUILT_IN_TM_LOG: + case BUILT_IN_TM_LOG_1: + case BUILT_IN_TM_LOG_2: + case BUILT_IN_TM_LOG_4: + case BUILT_IN_TM_LOG_8: + case BUILT_IN_TM_LOG_FLOAT: + case BUILT_IN_TM_LOG_DOUBLE: + case BUILT_IN_TM_LOG_LDOUBLE: + case BUILT_IN_TM_LOG_M64: + case BUILT_IN_TM_LOG_M128: + case BUILT_IN_TM_LOG_M256: + return ptr_deref_may_alias_ref_p_1 (gimple_call_arg (call, 0), ref); + + /* These read memory pointed to by the first argument. */ + case BUILT_IN_STRDUP: + case BUILT_IN_STRNDUP: + { + ao_ref dref; + tree size = NULL_TREE; + if (gimple_call_num_args (call) == 2) + size = gimple_call_arg (call, 1); + ao_ref_init_from_ptr_and_size (&dref, + gimple_call_arg (call, 0), + size); + return refs_may_alias_p_1 (&dref, ref, false); + } /* The following builtins do not read from memory. */ case BUILT_IN_FREE: case BUILT_IN_MALLOC: case BUILT_IN_CALLOC: case BUILT_IN_ALLOCA: + case BUILT_IN_ALLOCA_WITH_ALIGN: case BUILT_IN_STACK_SAVE: case BUILT_IN_STACK_RESTORE: case BUILT_IN_MEMSET: + case BUILT_IN_TM_MEMSET: + case BUILT_IN_MEMSET_CHK: case BUILT_IN_FREXP: case BUILT_IN_FREXPF: case BUILT_IN_FREXPL: @@ -1213,6 +1311,8 @@ ref_maybe_used_by_call_p_1 (gimple call, ao_ref *ref) case BUILT_IN_SINCOS: case BUILT_IN_SINCOSF: case BUILT_IN_SINCOSL: + case BUILT_IN_ASSUME_ALIGNED: + case BUILT_IN_VA_END: return false; /* __sync_* builtins and some OpenMP builtins act as threading barriers. */ @@ -1390,6 +1490,11 @@ call_may_clobber_ref_p_1 (gimple call, ao_ref *ref) || CONSTANT_CLASS_P (base)) return false; + /* A call that is not without side-effects might involve volatile + accesses and thus conflicts with all other volatile accesses. */ + if (ref->volatile_p) + return true; + /* If the reference is based on a decl that is not aliased the call cannot possibly clobber it. */ if (DECL_P (base) @@ -1422,10 +1527,53 @@ call_may_clobber_ref_p_1 (gimple call, ao_ref *ref) case BUILT_IN_STRCAT: case BUILT_IN_STRNCAT: case BUILT_IN_MEMSET: + case BUILT_IN_TM_MEMSET: + CASE_BUILT_IN_TM_STORE (1): + CASE_BUILT_IN_TM_STORE (2): + CASE_BUILT_IN_TM_STORE (4): + CASE_BUILT_IN_TM_STORE (8): + CASE_BUILT_IN_TM_STORE (FLOAT): + CASE_BUILT_IN_TM_STORE (DOUBLE): + CASE_BUILT_IN_TM_STORE (LDOUBLE): + CASE_BUILT_IN_TM_STORE (M64): + CASE_BUILT_IN_TM_STORE (M128): + CASE_BUILT_IN_TM_STORE (M256): + case BUILT_IN_TM_MEMCPY: + case BUILT_IN_TM_MEMMOVE: { ao_ref dref; tree size = NULL_TREE; - if (gimple_call_num_args (call) == 3) + /* Don't pass in size for strncat, as the maximum size + is strlen (dest) + n + 1 instead of n, resp. + n + 1 at dest + strlen (dest), but strlen (dest) isn't + known. */ + if (gimple_call_num_args (call) == 3 + && DECL_FUNCTION_CODE (callee) != BUILT_IN_STRNCAT) + size = gimple_call_arg (call, 2); + ao_ref_init_from_ptr_and_size (&dref, + gimple_call_arg (call, 0), + size); + return refs_may_alias_p_1 (&dref, ref, false); + } + case BUILT_IN_STRCPY_CHK: + case BUILT_IN_STRNCPY_CHK: + case BUILT_IN_MEMCPY_CHK: + case BUILT_IN_MEMMOVE_CHK: + case BUILT_IN_MEMPCPY_CHK: + case BUILT_IN_STPCPY_CHK: + case BUILT_IN_STPNCPY_CHK: + case BUILT_IN_STRCAT_CHK: + case BUILT_IN_STRNCAT_CHK: + case BUILT_IN_MEMSET_CHK: + { + ao_ref dref; + tree size = NULL_TREE; + /* Don't pass in size for __strncat_chk, as the maximum size + is strlen (dest) + n + 1 instead of n, resp. + n + 1 at dest + strlen (dest), but strlen (dest) isn't + known. */ + if (gimple_call_num_args (call) == 4 + && DECL_FUNCTION_CODE (callee) != BUILT_IN_STRNCAT_CHK) size = gimple_call_arg (call, 2); ao_ref_init_from_ptr_and_size (&dref, gimple_call_arg (call, 0), @@ -1445,6 +1593,8 @@ call_may_clobber_ref_p_1 (gimple call, ao_ref *ref) being the definition point for the pointer. */ case BUILT_IN_MALLOC: case BUILT_IN_CALLOC: + case BUILT_IN_STRDUP: + case BUILT_IN_STRNDUP: /* Unix98 specifies that errno is set on allocation failure. */ if (flag_errno_math && targetm.ref_may_alias_errno (ref)) @@ -1452,11 +1602,14 @@ call_may_clobber_ref_p_1 (gimple call, ao_ref *ref) return false; case BUILT_IN_STACK_SAVE: case BUILT_IN_ALLOCA: + case BUILT_IN_ALLOCA_WITH_ALIGN: + case BUILT_IN_ASSUME_ALIGNED: return false; /* Freeing memory kills the pointed-to memory. More importantly the call has to serve as a barrier for moving loads and stores across it. */ case BUILT_IN_FREE: + case BUILT_IN_VA_END: { tree ptr = gimple_call_arg (call, 0); return ptr_deref_may_alias_ref_p_1 (ptr, ref); @@ -1638,7 +1791,14 @@ stmt_kills_ref_p_1 (gimple stmt, ao_ref *ref) return false; if (gimple_has_lhs (stmt) - && TREE_CODE (gimple_get_lhs (stmt)) != SSA_NAME) + && TREE_CODE (gimple_get_lhs (stmt)) != SSA_NAME + /* The assignment is not necessarily carried out if it can throw + and we can catch it in the current function where we could inspect + the previous value. + ??? We only need to care about the RHS throwing. For aggregate + assignments or similar calls and non-call exceptions the LHS + might throw as well. */ + && !stmt_can_throw_internal (stmt)) { tree base, lhs = gimple_get_lhs (stmt); HOST_WIDE_INT size, offset, max_size; @@ -1669,6 +1829,10 @@ stmt_kills_ref_p_1 (gimple stmt, ao_ref *ref) case BUILT_IN_MEMPCPY: case BUILT_IN_MEMMOVE: case BUILT_IN_MEMSET: + case BUILT_IN_MEMCPY_CHK: + case BUILT_IN_MEMPCPY_CHK: + case BUILT_IN_MEMMOVE_CHK: + case BUILT_IN_MEMSET_CHK: { tree dest = gimple_call_arg (stmt, 0); tree len = gimple_call_arg (stmt, 2); @@ -1691,10 +1855,23 @@ stmt_kills_ref_p_1 (gimple stmt, ao_ref *ref) / BITS_PER_UNIT))) return true; } + break; } + + case BUILT_IN_VA_END: + { + tree ptr = gimple_call_arg (stmt, 0); + if (TREE_CODE (ptr) == ADDR_EXPR) + { + tree base = ao_ref_base (ref); + if (TREE_OPERAND (ptr, 0) == base) + return true; + } + break; + } + default:; } - } return false; } @@ -1716,6 +1893,8 @@ static bool maybe_skip_until (gimple phi, tree target, ao_ref *ref, tree vuse, bitmap *visited) { + basic_block bb = gimple_bb (phi); + if (!*visited) *visited = BITMAP_ALLOC (NULL); @@ -1740,11 +1919,73 @@ maybe_skip_until (gimple phi, tree target, ao_ref *ref, else if (gimple_nop_p (def_stmt) || stmt_may_clobber_ref_p_1 (def_stmt, ref)) return false; + /* If we reach a new basic-block see if we already skipped it + in a previous walk that ended successfully. */ + if (gimple_bb (def_stmt) != bb) + { + if (!bitmap_set_bit (*visited, SSA_NAME_VERSION (vuse))) + return true; + bb = gimple_bb (def_stmt); + } vuse = gimple_vuse (def_stmt); } return true; } +/* For two PHI arguments ARG0 and ARG1 try to skip non-aliasing code + until we hit the phi argument definition that dominates the other one. + Return that, or NULL_TREE if there is no such definition. */ + +static tree +get_continuation_for_phi_1 (gimple phi, tree arg0, tree arg1, + ao_ref *ref, bitmap *visited) +{ + gimple def0 = SSA_NAME_DEF_STMT (arg0); + gimple def1 = SSA_NAME_DEF_STMT (arg1); + tree common_vuse; + + if (arg0 == arg1) + return arg0; + else if (gimple_nop_p (def0) + || (!gimple_nop_p (def1) + && dominated_by_p (CDI_DOMINATORS, + gimple_bb (def1), gimple_bb (def0)))) + { + if (maybe_skip_until (phi, arg0, ref, arg1, visited)) + return arg0; + } + else if (gimple_nop_p (def1) + || dominated_by_p (CDI_DOMINATORS, + gimple_bb (def0), gimple_bb (def1))) + { + if (maybe_skip_until (phi, arg1, ref, arg0, visited)) + return arg1; + } + /* Special case of a diamond: + MEM_1 = ... + goto (cond) ? L1 : L2 + L1: store1 = ... #MEM_2 = vuse(MEM_1) + goto L3 + L2: store2 = ... #MEM_3 = vuse(MEM_1) + L3: MEM_4 = PHI + We were called with the PHI at L3, MEM_2 and MEM_3 don't + dominate each other, but still we can easily skip this PHI node + if we recognize that the vuse MEM operand is the same for both, + and that we can skip both statements (they don't clobber us). + This is still linear. Don't use maybe_skip_until, that might + potentially be slow. */ + else if ((common_vuse = gimple_vuse (def0)) + && common_vuse == gimple_vuse (def1)) + { + if (!stmt_may_clobber_ref_p_1 (def0, ref) + && !stmt_may_clobber_ref_p_1 (def1, ref)) + return common_vuse; + } + + return NULL_TREE; +} + + /* Starting from a PHI node for the virtual operand of the memory reference REF find a continuation virtual operand that allows to continue walking statements dominating PHI skipping only statements that cannot possibly @@ -1760,53 +2001,41 @@ get_continuation_for_phi (gimple phi, ao_ref *ref, bitmap *visited) if (nargs == 1) return PHI_ARG_DEF (phi, 0); - /* For two arguments try to skip non-aliasing code until we hit - the phi argument definition that dominates the other one. */ - if (nargs == 2) + /* For two or more arguments try to pairwise skip non-aliasing code + until we hit the phi argument definition that dominates the other one. */ + else if (nargs >= 2) { - tree arg0 = PHI_ARG_DEF (phi, 0); - tree arg1 = PHI_ARG_DEF (phi, 1); - gimple def0 = SSA_NAME_DEF_STMT (arg0); - gimple def1 = SSA_NAME_DEF_STMT (arg1); - tree common_vuse; + tree arg0, arg1; + unsigned i; + + /* Find a candidate for the virtual operand which definition + dominates those of all others. */ + arg0 = PHI_ARG_DEF (phi, 0); + if (!SSA_NAME_IS_DEFAULT_DEF (arg0)) + for (i = 1; i < nargs; ++i) + { + arg1 = PHI_ARG_DEF (phi, i); + if (SSA_NAME_IS_DEFAULT_DEF (arg1)) + { + arg0 = arg1; + break; + } + if (dominated_by_p (CDI_DOMINATORS, + gimple_bb (SSA_NAME_DEF_STMT (arg0)), + gimple_bb (SSA_NAME_DEF_STMT (arg1)))) + arg0 = arg1; + } - if (arg0 == arg1) - return arg0; - else if (gimple_nop_p (def0) - || (!gimple_nop_p (def1) - && dominated_by_p (CDI_DOMINATORS, - gimple_bb (def1), gimple_bb (def0)))) - { - if (maybe_skip_until (phi, arg0, ref, arg1, visited)) - return arg0; - } - else if (gimple_nop_p (def1) - || dominated_by_p (CDI_DOMINATORS, - gimple_bb (def0), gimple_bb (def1))) - { - if (maybe_skip_until (phi, arg1, ref, arg0, visited)) - return arg1; - } - /* Special case of a diamond: - MEM_1 = ... - goto (cond) ? L1 : L2 - L1: store1 = ... #MEM_2 = vuse(MEM_1) - goto L3 - L2: store2 = ... #MEM_3 = vuse(MEM_1) - L3: MEM_4 = PHI - We were called with the PHI at L3, MEM_2 and MEM_3 don't - dominate each other, but still we can easily skip this PHI node - if we recognize that the vuse MEM operand is the same for both, - and that we can skip both statements (they don't clobber us). - This is still linear. Don't use maybe_skip_until, that might - potentially be slow. */ - else if ((common_vuse = gimple_vuse (def0)) - && common_vuse == gimple_vuse (def1)) + /* Then pairwise reduce against the found candidate. */ + for (i = 0; i < nargs; ++i) { - if (!stmt_may_clobber_ref_p_1 (def0, ref) - && !stmt_may_clobber_ref_p_1 (def1, ref)) - return common_vuse; + arg1 = PHI_ARG_DEF (phi, i); + arg0 = get_continuation_for_phi_1 (phi, arg0, arg1, ref, visited); + if (!arg0) + return NULL_TREE; } + + return arg0; } return NULL_TREE;