OSDN Git Service

* tree-ssa-ccp.c (insert_clobber_before_stack_restore): Recurse on copy
[pf3gnuchains/gcc-fork.git] / gcc / tree-ssa-ccp.c
index 3a2b51c..ac544d3 100644 (file)
@@ -1,6 +1,6 @@
 /* Conditional constant propagation pass for the GNU compiler.
    Copyright (C) 2000, 2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2009,
-   2010 Free Software Foundation, Inc.
+   2010, 2011, 2012, 2013 Free Software Foundation, Inc.
    Adapted from original RTL SSA-CCP by Daniel Berlin <dberlin@dberlin.org>
    Adapted to GIMPLE trees by Diego Novillo <dnovillo@redhat.com>
 
@@ -99,81 +99,6 @@ along with GCC; see the file COPYING3.  If not see
    array CONST_VAL[i].VALUE.  That is fed into substitute_and_fold for
    final substitution and folding.
 
-
-   Constant propagation in stores and loads (STORE-CCP)
-   ----------------------------------------------------
-
-   While CCP has all the logic to propagate constants in GIMPLE
-   registers, it is missing the ability to associate constants with
-   stores and loads (i.e., pointer dereferences, structures and
-   global/aliased variables).  We don't keep loads and stores in
-   SSA, but we do build a factored use-def web for them (in the
-   virtual operands).
-
-   For instance, consider the following code fragment:
-
-         struct A a;
-         const int B = 42;
-
-         void foo (int i)
-         {
-           if (i > 10)
-             a.a = 42;
-           else
-             {
-               a.b = 21;
-               a.a = a.b + 21;
-             }
-
-           if (a.a != B)
-             never_executed ();
-         }
-
-   We should be able to deduce that the predicate 'a.a != B' is always
-   false.  To achieve this, we associate constant values to the SSA
-   names in the VDEF operands for each store.  Additionally,
-   since we also glob partial loads/stores with the base symbol, we
-   also keep track of the memory reference where the constant value
-   was stored (in the MEM_REF field of PROP_VALUE_T).  For instance,
-
-        # a_5 = VDEF <a_4>
-        a.a = 2;
-
-        # VUSE <a_5>
-        x_3 = a.b;
-
-   In the example above, CCP will associate value '2' with 'a_5', but
-   it would be wrong to replace the load from 'a.b' with '2', because
-   '2' had been stored into a.a.
-
-   Note that the initial value of virtual operands is VARYING, not
-   UNDEFINED.  Consider, for instance global variables:
-
-       int A;
-
-       foo (int i)
-       {
-         if (i_3 > 10)
-           A_4 = 3;
-          # A_5 = PHI (A_4, A_2);
-
-         # VUSE <A_5>
-         A.0_6 = A;
-
-         return A.0_6;
-       }
-
-   The value of A_2 cannot be assumed to be UNDEFINED, as it may have
-   been defined outside of foo.  If we were to assume it UNDEFINED, we
-   would erroneously optimize the above into 'return 3;'.
-
-   Though STORE-CCP is not too expensive, it does have to do more work
-   than regular CCP, so it is only enabled at -O2.  Both regular CCP
-   and STORE-CCP use the exact same algorithm.  The only distinction
-   is that when doing STORE-CCP, the boolean variable DO_STORE_CCP is
-   set to true.  This affects the evaluation of statements and PHI
-   nodes.
-
    References:
 
      Constant propagation with conditional branches,
@@ -194,9 +119,7 @@ along with GCC; see the file COPYING3.  If not see
 #include "tm_p.h"
 #include "basic-block.h"
 #include "output.h"
-#include "expr.h"
 #include "function.h"
-#include "diagnostic.h"
 #include "tree-pretty-print.h"
 #include "gimple-pretty-print.h"
 #include "timevar.h"
@@ -207,8 +130,10 @@ along with GCC; see the file COPYING3.  If not see
 #include "value-prof.h"
 #include "langhooks.h"
 #include "target.h"
-#include "toplev.h"
+#include "diagnostic-core.h"
 #include "dbgcnt.h"
+#include "gimple-fold.h"
+#include "params.h"
 
 
 /* Possible lattice values.  */
@@ -220,6 +145,20 @@ typedef enum
   VARYING
 } ccp_lattice_t;
 
+struct prop_value_d {
+    /* Lattice value.  */
+    ccp_lattice_t lattice_val;
+
+    /* Propagated value.  */
+    tree value;
+
+    /* Mask that applies to the propagated value during CCP.  For
+       X with a CONSTANT lattice value X & ~mask == value & ~mask.  */
+    double_int mask;
+};
+
+typedef struct prop_value_d prop_value_t;
+
 /* Array of propagated constant values.  After propagation,
    CONST_VAL[I].VALUE holds the constant value for SSA_NAME(I).  If
    the constant is held in an SSA name representing a memory store
@@ -249,7 +188,18 @@ dump_lattice_value (FILE *outf, const char *prefix, prop_value_t val)
       break;
     case CONSTANT:
       fprintf (outf, "%sCONSTANT ", prefix);
-      print_generic_expr (outf, val.value, dump_flags);
+      if (TREE_CODE (val.value) != INTEGER_CST
+         || double_int_zero_p (val.mask))
+       print_generic_expr (outf, val.value, dump_flags);
+      else
+       {
+         double_int cval = double_int_and_not (tree_to_double_int (val.value),
+                                               val.mask);
+         fprintf (outf, "%sCONSTANT " HOST_WIDE_INT_PRINT_DOUBLE_HEX,
+                  prefix, cval.high, cval.low);
+         fprintf (outf, " (" HOST_WIDE_INT_PRINT_DOUBLE_HEX ")",
+                  val.mask.high, val.mask.low);
+       }
       break;
     default:
       gcc_unreachable ();
@@ -291,7 +241,7 @@ static prop_value_t
 get_default_value (tree var)
 {
   tree sym = SSA_NAME_VAR (var);
-  prop_value_t val = { UNINITIALIZED, NULL_TREE };
+  prop_value_t val = { UNINITIALIZED, NULL_TREE, { 0, 0 } };
   gimple stmt;
 
   stmt = SSA_NAME_DEF_STMT (var);
@@ -302,10 +252,14 @@ get_default_value (tree var)
         before being initialized.  If VAR is a local variable, we
         can assume initially that it is UNDEFINED, otherwise we must
         consider it VARYING.  */
-      if (is_gimple_reg (sym) && TREE_CODE (sym) != PARM_DECL)
+      if (is_gimple_reg (sym)
+         && TREE_CODE (sym) == VAR_DECL)
        val.lattice_val = UNDEFINED;
       else
-       val.lattice_val = VARYING;
+       {
+         val.lattice_val = VARYING;
+         val.mask = double_int_minus_one;
+       }
     }
   else if (is_gimple_assign (stmt)
           /* Value-returning GIMPLE_CALL statements assign to
@@ -331,6 +285,7 @@ get_default_value (tree var)
     {
       /* Otherwise, VAR will never take on a constant value.  */
       val.lattice_val = VARYING;
+      val.mask = double_int_minus_one;
     }
 
   return val;
@@ -356,6 +311,27 @@ get_value (tree var)
   return val;
 }
 
+/* Return the constant tree value associated with VAR.  */
+
+static inline tree
+get_constant_value (tree var)
+{
+  prop_value_t *val;
+  if (TREE_CODE (var) != SSA_NAME)
+    {
+      if (is_gimple_min_invariant (var))
+        return var;
+      return NULL_TREE;
+    }
+  val = get_value (var);
+  if (val
+      && val->lattice_val == CONSTANT
+      && (TREE_CODE (val->value) != INTEGER_CST
+         || double_int_zero_p (val->mask)))
+    return val->value;
+  return NULL_TREE;
+}
+
 /* Sets the value associated with VAR to VARYING.  */
 
 static inline void
@@ -365,6 +341,7 @@ set_value_varying (tree var)
 
   val->lattice_val = VARYING;
   val->value = NULL_TREE;
+  val->mask = double_int_minus_one;
 }
 
 /* For float types, modify the value of VAL to make ccp work correctly
@@ -414,27 +391,82 @@ canonicalize_float_value (prop_value_t *val)
     }
 }
 
+/* Return whether the lattice transition is valid.  */
+
+static bool
+valid_lattice_transition (prop_value_t old_val, prop_value_t new_val)
+{
+  /* Lattice transitions must always be monotonically increasing in
+     value.  */
+  if (old_val.lattice_val < new_val.lattice_val)
+    return true;
+
+  if (old_val.lattice_val != new_val.lattice_val)
+    return false;
+
+  if (!old_val.value && !new_val.value)
+    return true;
+
+  /* Now both lattice values are CONSTANT.  */
+
+  /* Allow transitioning from PHI <&x, not executable> == &x
+     to PHI <&x, &y> == common alignment.  */
+  if (TREE_CODE (old_val.value) != INTEGER_CST
+      && TREE_CODE (new_val.value) == INTEGER_CST)
+    return true;
+
+  /* Bit-lattices have to agree in the still valid bits.  */
+  if (TREE_CODE (old_val.value) == INTEGER_CST
+      && TREE_CODE (new_val.value) == INTEGER_CST)
+    return double_int_equal_p
+               (double_int_and_not (tree_to_double_int (old_val.value),
+                                    new_val.mask),
+                double_int_and_not (tree_to_double_int (new_val.value),
+                                    new_val.mask));
+
+  /* Otherwise constant values have to agree.  */
+  return operand_equal_p (old_val.value, new_val.value, 0);
+}
+
 /* Set the value for variable VAR to NEW_VAL.  Return true if the new
    value is different from VAR's previous value.  */
 
 static bool
 set_lattice_value (tree var, prop_value_t new_val)
 {
-  prop_value_t *old_val = get_value (var);
+  /* We can deal with old UNINITIALIZED values just fine here.  */
+  prop_value_t *old_val = &const_val[SSA_NAME_VERSION (var)];
 
   canonicalize_float_value (&new_val);
 
-  /* Lattice transitions must always be monotonically increasing in
-     value.  If *OLD_VAL and NEW_VAL are the same, return false to
-     inform the caller that this was a non-transition.  */
+  /* We have to be careful to not go up the bitwise lattice
+     represented by the mask.
+     ???  This doesn't seem to be the best place to enforce this.  */
+  if (new_val.lattice_val == CONSTANT
+      && old_val->lattice_val == CONSTANT
+      && TREE_CODE (new_val.value) == INTEGER_CST
+      && TREE_CODE (old_val->value) == INTEGER_CST)
+    {
+      double_int diff;
+      diff = double_int_xor (tree_to_double_int (new_val.value),
+                            tree_to_double_int (old_val->value));
+      new_val.mask = double_int_ior (new_val.mask,
+                                    double_int_ior (old_val->mask, diff));
+    }
 
-  gcc_assert (old_val->lattice_val < new_val.lattice_val
-              || (old_val->lattice_val == new_val.lattice_val
-                 && ((!old_val->value && !new_val.value)
-                     || operand_equal_p (old_val->value, new_val.value, 0))));
+  gcc_assert (valid_lattice_transition (*old_val, new_val));
 
-  if (old_val->lattice_val != new_val.lattice_val)
+  /* If *OLD_VAL and NEW_VAL are the same, return false to inform the
+     caller that this was a non-transition.  */
+  if (old_val->lattice_val != new_val.lattice_val
+      || (new_val.lattice_val == CONSTANT
+         && TREE_CODE (new_val.value) == INTEGER_CST
+         && (TREE_CODE (old_val->value) != INTEGER_CST
+             || !double_int_equal_p (new_val.mask, old_val->mask))))
     {
+      /* ???  We would like to delay creation of INTEGER_CSTs from
+        partially constants here.  */
+
       if (dump_file && (dump_flags & TDF_DETAILS))
        {
          dump_lattice_value (dump_file, "Lattice value changed to ", new_val);
@@ -443,13 +475,96 @@ set_lattice_value (tree var, prop_value_t new_val)
 
       *old_val = new_val;
 
-      gcc_assert (new_val.lattice_val != UNDEFINED);
+      gcc_assert (new_val.lattice_val != UNINITIALIZED);
       return true;
     }
 
   return false;
 }
 
+static prop_value_t get_value_for_expr (tree, bool);
+static prop_value_t bit_value_binop (enum tree_code, tree, tree, tree);
+static void bit_value_binop_1 (enum tree_code, tree, double_int *, double_int *,
+                              tree, double_int, double_int,
+                              tree, double_int, double_int);
+
+/* Return a double_int that can be used for bitwise simplifications
+   from VAL.  */
+
+static double_int
+value_to_double_int (prop_value_t val)
+{
+  if (val.value
+      && TREE_CODE (val.value) == INTEGER_CST)
+    return tree_to_double_int (val.value);
+  else
+    return double_int_zero;
+}
+
+/* Return the value for the address expression EXPR based on alignment
+   information.  */
+
+static prop_value_t
+get_value_from_alignment (tree expr)
+{
+  tree type = TREE_TYPE (expr);
+  prop_value_t val;
+  unsigned HOST_WIDE_INT bitpos;
+  unsigned int align;
+
+  gcc_assert (TREE_CODE (expr) == ADDR_EXPR);
+
+  align = get_object_alignment_1 (TREE_OPERAND (expr, 0), &bitpos);
+  val.mask
+    = double_int_and_not (POINTER_TYPE_P (type) || TYPE_UNSIGNED (type)
+                         ? double_int_mask (TYPE_PRECISION (type))
+                         : double_int_minus_one,
+                         uhwi_to_double_int (align / BITS_PER_UNIT - 1));
+  val.lattice_val = double_int_minus_one_p (val.mask) ? VARYING : CONSTANT;
+  if (val.lattice_val == CONSTANT)
+    val.value
+      = double_int_to_tree (type, uhwi_to_double_int (bitpos / BITS_PER_UNIT));
+  else
+    val.value = NULL_TREE;
+
+  return val;
+}
+
+/* Return the value for the tree operand EXPR.  If FOR_BITS_P is true
+   return constant bits extracted from alignment information for
+   invariant addresses.  */
+
+static prop_value_t
+get_value_for_expr (tree expr, bool for_bits_p)
+{
+  prop_value_t val;
+
+  if (TREE_CODE (expr) == SSA_NAME)
+    {
+      val = *get_value (expr);
+      if (for_bits_p
+         && val.lattice_val == CONSTANT
+         && TREE_CODE (val.value) == ADDR_EXPR)
+       val = get_value_from_alignment (val.value);
+    }
+  else if (is_gimple_min_invariant (expr)
+          && (!for_bits_p || TREE_CODE (expr) != ADDR_EXPR))
+    {
+      val.lattice_val = CONSTANT;
+      val.value = expr;
+      val.mask = double_int_zero;
+      canonicalize_float_value (&val);
+    }
+  else if (TREE_CODE (expr) == ADDR_EXPR)
+    val = get_value_from_alignment (expr);
+  else
+    {
+      val.lattice_val = VARYING;
+      val.mask = double_int_minus_one;
+      val.value = NULL_TREE;
+    }
+  return val;
+}
 
 /* Return the likely CCP lattice value for STMT.
 
@@ -538,14 +653,20 @@ likely_value (gimple stmt)
             the undefined operand may be promoted.  */
          return UNDEFINED;
 
+       case ADDR_EXPR:
+         /* If any part of an address is UNDEFINED, like the index
+            of an ARRAY_EXPR, then treat the result as UNDEFINED.  */
+         return UNDEFINED;
+
        default:
          ;
        }
     }
   /* If there was an UNDEFINED operand but the result may be not UNDEFINED
-     fall back to VARYING even if there were CONSTANT operands.  */
+     fall back to CONSTANT.  During iteration UNDEFINED may still drop
+     to CONSTANT.  */
   if (has_undefined_operand)
-    return VARYING;
+    return CONSTANT;
 
   /* We do not consider virtual operands here -- load from read-only
      memory may have only VARYING virtual operands, but still be
@@ -666,6 +787,7 @@ do_dbg_cnt (void)
       if (!dbg_cnt (ccp))
         {
           const_val[i].lattice_val = VARYING;
+         const_val[i].mask = double_int_minus_one;
           const_val[i].value = NULL_TREE;
         }
     }
@@ -681,10 +803,43 @@ static bool
 ccp_finalize (void)
 {
   bool something_changed;
+  unsigned i;
 
   do_dbg_cnt ();
+
+  /* Derive alignment and misalignment information from partially
+     constant pointers in the lattice.  */
+  for (i = 1; i < num_ssa_names; ++i)
+    {
+      tree name = ssa_name (i);
+      prop_value_t *val;
+      struct ptr_info_def *pi;
+      unsigned int tem, align;
+
+      if (!name
+         || !POINTER_TYPE_P (TREE_TYPE (name)))
+       continue;
+
+      val = get_value (name);
+      if (val->lattice_val != CONSTANT
+         || TREE_CODE (val->value) != INTEGER_CST)
+       continue;
+
+      /* Trailing constant bits specify the alignment, trailing value
+        bits the misalignment.  */
+      tem = val->mask.low;
+      align = (tem & -tem);
+      if (align == 1)
+       continue;
+
+      pi = get_ptr_info (name);
+      pi->align = align;
+      pi->misalign = TREE_INT_CST_LOW (val->value) & (align - 1);
+    }
+
   /* Perform substitutions based on the known constant values.  */
-  something_changed = substitute_and_fold (const_val, ccp_fold_stmt);
+  something_changed = substitute_and_fold (get_constant_value,
+                                          ccp_fold_stmt, true);
 
   free (const_val);
   const_val = NULL;
@@ -720,24 +875,58 @@ ccp_lattice_meet (prop_value_t *val1, prop_value_t *val2)
     {
       /* any M VARYING = VARYING.  */
       val1->lattice_val = VARYING;
+      val1->mask = double_int_minus_one;
       val1->value = NULL_TREE;
     }
   else if (val1->lattice_val == CONSTANT
           && val2->lattice_val == CONSTANT
+          && TREE_CODE (val1->value) == INTEGER_CST
+          && TREE_CODE (val2->value) == INTEGER_CST)
+    {
+      /* Ci M Cj = Ci          if (i == j)
+        Ci M Cj = VARYING      if (i != j)
+
+         For INTEGER_CSTs mask unequal bits.  If no equal bits remain,
+        drop to varying.  */
+      val1->mask
+         = double_int_ior (double_int_ior (val1->mask,
+                                           val2->mask),
+                           double_int_xor (tree_to_double_int (val1->value),
+                                           tree_to_double_int (val2->value)));
+      if (double_int_minus_one_p (val1->mask))
+       {
+         val1->lattice_val = VARYING;
+         val1->value = NULL_TREE;
+       }
+    }
+  else if (val1->lattice_val == CONSTANT
+          && val2->lattice_val == CONSTANT
           && simple_cst_equal (val1->value, val2->value) == 1)
     {
       /* Ci M Cj = Ci          if (i == j)
         Ci M Cj = VARYING      if (i != j)
 
-         If these two values come from memory stores, make sure that
-        they come from the same memory reference.  */
-      val1->lattice_val = CONSTANT;
-      val1->value = val1->value;
+         VAL1 already contains the value we want for equivalent values.  */
+    }
+  else if (val1->lattice_val == CONSTANT
+          && val2->lattice_val == CONSTANT
+          && (TREE_CODE (val1->value) == ADDR_EXPR
+              || TREE_CODE (val2->value) == ADDR_EXPR))
+    {
+      /* When not equal addresses are involved try meeting for
+        alignment.  */
+      prop_value_t tem = *val2;
+      if (TREE_CODE (val1->value) == ADDR_EXPR)
+       *val1 = get_value_for_expr (val1->value, true);
+      if (TREE_CODE (val2->value) == ADDR_EXPR)
+       tem = get_value_for_expr (val2->value, true);
+      ccp_lattice_meet (val1, &tem);
     }
   else
     {
       /* Any other combination is VARYING.  */
       val1->lattice_val = VARYING;
+      val1->mask = double_int_minus_one;
       val1->value = NULL_TREE;
     }
 }
@@ -798,15 +987,7 @@ ccp_visit_phi_node (gimple phi)
       if (e->flags & EDGE_EXECUTABLE)
        {
          tree arg = gimple_phi_arg (phi, i)->def;
-         prop_value_t arg_val;
-
-         if (is_gimple_min_invariant (arg))
-           {
-             arg_val.lattice_val = CONSTANT;
-             arg_val.value = arg;
-           }
-         else
-           arg_val = *(get_value (arg));
+         prop_value_t arg_val = get_value_for_expr (arg, false);
 
          ccp_lattice_meet (&new_val, &arg_val);
 
@@ -841,6 +1022,20 @@ ccp_visit_phi_node (gimple phi)
     return SSA_PROP_NOT_INTERESTING;
 }
 
+/* Return the constant value for OP or OP otherwise.  */
+
+static tree
+valueize_op (tree op)
+{
+  if (TREE_CODE (op) == SSA_NAME)
+    {
+      tree tem = get_constant_value (op);
+      if (tem)
+       return tem;
+    }
+  return op;
+}
+
 /* CCP specific front-end to the non-destructive constant folding
    routines.
 
@@ -856,464 +1051,456 @@ ccp_fold (gimple stmt)
   location_t loc = gimple_location (stmt);
   switch (gimple_code (stmt))
     {
-    case GIMPLE_ASSIGN:
-      {
-        enum tree_code subcode = gimple_assign_rhs_code (stmt);
-
-        switch (get_gimple_rhs_class (subcode))
-          {
-          case GIMPLE_SINGLE_RHS:
-            {
-              tree rhs = gimple_assign_rhs1 (stmt);
-              enum tree_code_class kind = TREE_CODE_CLASS (subcode);
-
-              if (TREE_CODE (rhs) == SSA_NAME)
-                {
-                  /* If the RHS is an SSA_NAME, return its known constant value,
-                     if any.  */
-                  return get_value (rhs)->value;
-                }
-             /* Handle propagating invariant addresses into address operations.
-                The folding we do here matches that in tree-ssa-forwprop.c.  */
-             else if (TREE_CODE (rhs) == ADDR_EXPR)
-               {
-                 tree *base;
-                 base = &TREE_OPERAND (rhs, 0);
-                 while (handled_component_p (*base))
-                   base = &TREE_OPERAND (*base, 0);
-                 if (TREE_CODE (*base) == INDIRECT_REF
-                     && TREE_CODE (TREE_OPERAND (*base, 0)) == SSA_NAME)
-                   {
-                     prop_value_t *val = get_value (TREE_OPERAND (*base, 0));
-                     if (val->lattice_val == CONSTANT
-                         && TREE_CODE (val->value) == ADDR_EXPR
-                         && may_propagate_address_into_dereference
-                              (val->value, *base))
-                       {
-                         /* We need to return a new tree, not modify the IL
-                            or share parts of it.  So play some tricks to
-                            avoid manually building it.  */
-                         tree ret, save = *base;
-                         *base = TREE_OPERAND (val->value, 0);
-                         ret = unshare_expr (rhs);
-                         recompute_tree_invariant_for_addr_expr (ret);
-                         *base = save;
-                         return ret;
-                       }
-                   }
-               }
-             else if (TREE_CODE (rhs) == CONSTRUCTOR
-                      && TREE_CODE (TREE_TYPE (rhs)) == VECTOR_TYPE
-                      && (CONSTRUCTOR_NELTS (rhs)
-                          == TYPE_VECTOR_SUBPARTS (TREE_TYPE (rhs))))
-               {
-                 unsigned i;
-                 tree val, list;
-
-                 list = NULL_TREE;
-                 FOR_EACH_CONSTRUCTOR_VALUE (CONSTRUCTOR_ELTS (rhs), i, val)
-                   {
-                     if (TREE_CODE (val) == SSA_NAME
-                         && get_value (val)->lattice_val == CONSTANT)
-                       val = get_value (val)->value;
-                     if (TREE_CODE (val) == INTEGER_CST
-                         || TREE_CODE (val) == REAL_CST
-                         || TREE_CODE (val) == FIXED_CST)
-                       list = tree_cons (NULL_TREE, val, list);
-                     else
-                       return NULL_TREE;
-                   }
-
-                 return build_vector (TREE_TYPE (rhs), nreverse (list));
-               }
-
-              if (kind == tcc_reference)
-               {
-                 if ((TREE_CODE (rhs) == VIEW_CONVERT_EXPR
-                      || TREE_CODE (rhs) == REALPART_EXPR
-                      || TREE_CODE (rhs) == IMAGPART_EXPR)
-                     && TREE_CODE (TREE_OPERAND (rhs, 0)) == SSA_NAME)
-                   {
-                     prop_value_t *val = get_value (TREE_OPERAND (rhs, 0));
-                     if (val->lattice_val == CONSTANT)
-                       return fold_unary_loc (EXPR_LOCATION (rhs),
-                                          TREE_CODE (rhs),
-                                          TREE_TYPE (rhs), val->value);
-                   }
-                 else if (TREE_CODE (rhs) == INDIRECT_REF
-                          && TREE_CODE (TREE_OPERAND (rhs, 0)) == SSA_NAME)
-                   {
-                     prop_value_t *val = get_value (TREE_OPERAND (rhs, 0));
-                     if (val->lattice_val == CONSTANT
-                         && TREE_CODE (val->value) == ADDR_EXPR
-                         && useless_type_conversion_p (TREE_TYPE (rhs),
-                                                       TREE_TYPE (TREE_TYPE (val->value))))
-                       rhs = TREE_OPERAND (val->value, 0);
-                   }
-                 return fold_const_aggregate_ref (rhs);
-               }
-              else if (kind == tcc_declaration)
-                return get_symbol_constant_value (rhs);
-              return rhs;
-            }
-
-          case GIMPLE_UNARY_RHS:
-            {
-              /* Handle unary operators that can appear in GIMPLE form.
-                 Note that we know the single operand must be a constant,
-                 so this should almost always return a simplified RHS.  */
-              tree lhs = gimple_assign_lhs (stmt);
-              tree op0 = gimple_assign_rhs1 (stmt);
-
-              /* Simplify the operand down to a constant.  */
-              if (TREE_CODE (op0) == SSA_NAME)
-                {
-                  prop_value_t *val = get_value (op0);
-                  if (val->lattice_val == CONSTANT)
-                    op0 = get_value (op0)->value;
-                }
-
-             /* Conversions are useless for CCP purposes if they are
-                value-preserving.  Thus the restrictions that
-                useless_type_conversion_p places for pointer type conversions
-                do not apply here.  Substitution later will only substitute to
-                allowed places.  */
-             if (CONVERT_EXPR_CODE_P (subcode)
-                 && POINTER_TYPE_P (TREE_TYPE (lhs))
-                 && POINTER_TYPE_P (TREE_TYPE (op0))
-                 /* Do not allow differences in volatile qualification
-                    as this might get us confused as to whether a
-                    propagation destination statement is volatile
-                    or not.  See PR36988.  */
-                 && (TYPE_VOLATILE (TREE_TYPE (TREE_TYPE (lhs)))
-                     == TYPE_VOLATILE (TREE_TYPE (TREE_TYPE (op0)))))
-               {
-                 tree tem;
-                 /* Still try to generate a constant of correct type.  */
-                 if (!useless_type_conversion_p (TREE_TYPE (lhs),
-                                                 TREE_TYPE (op0))
-                     && ((tem = maybe_fold_offset_to_address
-                          (loc,
-                           op0, integer_zero_node, TREE_TYPE (lhs)))
-                         != NULL_TREE))
-                   return tem;
-                 return op0;
-               }
-
-              return
-               fold_unary_ignore_overflow_loc (loc, subcode,
-                                               gimple_expr_type (stmt), op0);
-            }
-
-          case GIMPLE_BINARY_RHS:
-            {
-              /* Handle binary operators that can appear in GIMPLE form.  */
-              tree op0 = gimple_assign_rhs1 (stmt);
-              tree op1 = gimple_assign_rhs2 (stmt);
-
-              /* Simplify the operands down to constants when appropriate.  */
-              if (TREE_CODE (op0) == SSA_NAME)
-                {
-                  prop_value_t *val = get_value (op0);
-                  if (val->lattice_val == CONSTANT)
-                    op0 = val->value;
-                }
-
-              if (TREE_CODE (op1) == SSA_NAME)
-                {
-                  prop_value_t *val = get_value (op1);
-                  if (val->lattice_val == CONSTANT)
-                    op1 = val->value;
-                }
-
-             /* Fold &foo + CST into an invariant reference if possible.  */
-             if (gimple_assign_rhs_code (stmt) == POINTER_PLUS_EXPR
-                 && TREE_CODE (op0) == ADDR_EXPR
-                 && TREE_CODE (op1) == INTEGER_CST)
-               {
-                 tree tem = maybe_fold_offset_to_address
-                   (loc, op0, op1, TREE_TYPE (op0));
-                 if (tem != NULL_TREE)
-                   return tem;
-               }
-
-              return fold_binary_loc (loc, subcode,
-                                 gimple_expr_type (stmt), op0, op1);
-            }
-
-          default:
-            gcc_unreachable ();
-          }
-      }
-      break;
-
-    case GIMPLE_CALL:
-      {
-       tree fn = gimple_call_fn (stmt);
-       prop_value_t *val;
-
-       if (TREE_CODE (fn) == SSA_NAME)
-         {
-           val = get_value (fn);
-           if (val->lattice_val == CONSTANT)
-             fn = val->value;
-         }
-       if (TREE_CODE (fn) == ADDR_EXPR
-           && TREE_CODE (TREE_OPERAND (fn, 0)) == FUNCTION_DECL
-           && DECL_BUILT_IN (TREE_OPERAND (fn, 0)))
-         {
-           tree *args = XALLOCAVEC (tree, gimple_call_num_args (stmt));
-           tree call, retval;
-           unsigned i;
-           for (i = 0; i < gimple_call_num_args (stmt); ++i)
-             {
-               args[i] = gimple_call_arg (stmt, i);
-               if (TREE_CODE (args[i]) == SSA_NAME)
-                 {
-                   val = get_value (args[i]);
-                   if (val->lattice_val == CONSTANT)
-                     args[i] = val->value;
-                 }
-             }
-           call = build_call_array_loc (loc,
-                                        gimple_call_return_type (stmt),
-                                        fn, gimple_call_num_args (stmt), args);
-           retval = fold_call_expr (EXPR_LOCATION (call), call, false);
-           if (retval)
-             /* fold_call_expr wraps the result inside a NOP_EXPR.  */
-             STRIP_NOPS (retval);
-           return retval;
-         }
-       return NULL_TREE;
-      }
-
     case GIMPLE_COND:
       {
         /* Handle comparison operators that can appear in GIMPLE form.  */
-        tree op0 = gimple_cond_lhs (stmt);
-        tree op1 = gimple_cond_rhs (stmt);
+        tree op0 = valueize_op (gimple_cond_lhs (stmt));
+        tree op1 = valueize_op (gimple_cond_rhs (stmt));
         enum tree_code code = gimple_cond_code (stmt);
-
-        /* Simplify the operands down to constants when appropriate.  */
-        if (TREE_CODE (op0) == SSA_NAME)
-          {
-            prop_value_t *val = get_value (op0);
-            if (val->lattice_val == CONSTANT)
-              op0 = val->value;
-          }
-
-        if (TREE_CODE (op1) == SSA_NAME)
-          {
-            prop_value_t *val = get_value (op1);
-            if (val->lattice_val == CONSTANT)
-              op1 = val->value;
-          }
-
         return fold_binary_loc (loc, code, boolean_type_node, op0, op1);
       }
 
     case GIMPLE_SWITCH:
       {
-        tree rhs = gimple_switch_index (stmt);
-
-        if (TREE_CODE (rhs) == SSA_NAME)
-          {
-            /* If the RHS is an SSA_NAME, return its known constant value,
-               if any.  */
-            return get_value (rhs)->value;
-          }
-
-        return rhs;
+       /* Return the constant switch index.  */
+        return valueize_op (gimple_switch_index (stmt));
       }
 
+    case GIMPLE_ASSIGN:
+    case GIMPLE_CALL:
+      return gimple_fold_stmt_to_constant_1 (stmt, valueize_op);
+
     default:
       gcc_unreachable ();
     }
 }
 
+/* Apply the operation CODE in type TYPE to the value, mask pair
+   RVAL and RMASK representing a value of type RTYPE and set
+   the value, mask pair *VAL and *MASK to the result.  */
 
-/* Return the tree representing the element referenced by T if T is an
-   ARRAY_REF or COMPONENT_REF into constant aggregates.  Return
-   NULL_TREE otherwise.  */
-
-tree
-fold_const_aggregate_ref (tree t)
+static void
+bit_value_unop_1 (enum tree_code code, tree type,
+                 double_int *val, double_int *mask,
+                 tree rtype, double_int rval, double_int rmask)
 {
-  prop_value_t *value;
-  tree base, ctor, idx, field;
-  unsigned HOST_WIDE_INT cnt;
-  tree cfield, cval;
-
-  if (TREE_CODE_CLASS (TREE_CODE (t)) == tcc_declaration)
-    return get_symbol_constant_value (t);
-
-  switch (TREE_CODE (t))
+  switch (code)
     {
-    case ARRAY_REF:
-      /* Get a CONSTRUCTOR.  If BASE is a VAR_DECL, get its
-        DECL_INITIAL.  If BASE is a nested reference into another
-        ARRAY_REF or COMPONENT_REF, make a recursive call to resolve
-        the inner reference.  */
-      base = TREE_OPERAND (t, 0);
-      switch (TREE_CODE (base))
-       {
-       case VAR_DECL:
-         if (!TREE_READONLY (base)
-             || TREE_CODE (TREE_TYPE (base)) != ARRAY_TYPE
-             || !targetm.binds_local_p (base))
-           return NULL_TREE;
-
-         ctor = DECL_INITIAL (base);
-         break;
+    case BIT_NOT_EXPR:
+      *mask = rmask;
+      *val = double_int_not (rval);
+      break;
 
-       case ARRAY_REF:
-       case COMPONENT_REF:
-         ctor = fold_const_aggregate_ref (base);
-         break;
+    case NEGATE_EXPR:
+      {
+       double_int temv, temm;
+       /* Return ~rval + 1.  */
+       bit_value_unop_1 (BIT_NOT_EXPR, type, &temv, &temm, type, rval, rmask);
+       bit_value_binop_1 (PLUS_EXPR, type, val, mask,
+                        type, temv, temm,
+                        type, double_int_one, double_int_zero);
+       break;
+      }
 
-       case STRING_CST:
-       case CONSTRUCTOR:
-         ctor = base;
-         break;
+    CASE_CONVERT:
+      {
+       bool uns;
+
+       /* First extend mask and value according to the original type.  */
+       uns = (TREE_CODE (rtype) == INTEGER_TYPE && TYPE_IS_SIZETYPE (rtype)
+              ? 0 : TYPE_UNSIGNED (rtype));
+       *mask = double_int_ext (rmask, TYPE_PRECISION (rtype), uns);
+       *val = double_int_ext (rval, TYPE_PRECISION (rtype), uns);
+
+       /* Then extend mask and value according to the target type.  */
+       uns = (TREE_CODE (type) == INTEGER_TYPE && TYPE_IS_SIZETYPE (type)
+              ? 0 : TYPE_UNSIGNED (type));
+       *mask = double_int_ext (*mask, TYPE_PRECISION (type), uns);
+       *val = double_int_ext (*val, TYPE_PRECISION (type), uns);
+       break;
+      }
 
-       default:
-         return NULL_TREE;
-       }
+    default:
+      *mask = double_int_minus_one;
+      break;
+    }
+}
 
-      if (ctor == NULL_TREE
-         || (TREE_CODE (ctor) != CONSTRUCTOR
-             && TREE_CODE (ctor) != STRING_CST)
-         || !TREE_STATIC (ctor))
-       return NULL_TREE;
+/* Apply the operation CODE in type TYPE to the value, mask pairs
+   R1VAL, R1MASK and R2VAL, R2MASK representing a values of type R1TYPE
+   and R2TYPE and set the value, mask pair *VAL and *MASK to the result.  */
 
-      /* Get the index.  If we have an SSA_NAME, try to resolve it
-        with the current lattice value for the SSA_NAME.  */
-      idx = TREE_OPERAND (t, 1);
-      switch (TREE_CODE (idx))
-       {
-       case SSA_NAME:
-         if ((value = get_value (idx))
-             && value->lattice_val == CONSTANT
-             && TREE_CODE (value->value) == INTEGER_CST)
-           idx = value->value;
-         else
-           return NULL_TREE;
-         break;
+static void
+bit_value_binop_1 (enum tree_code code, tree type,
+                  double_int *val, double_int *mask,
+                  tree r1type, double_int r1val, double_int r1mask,
+                  tree r2type, double_int r2val, double_int r2mask)
+{
+  bool uns = (TREE_CODE (type) == INTEGER_TYPE
+             && TYPE_IS_SIZETYPE (type) ? 0 : TYPE_UNSIGNED (type));
+  /* Assume we'll get a constant result.  Use an initial varying value,
+     we fall back to varying in the end if necessary.  */
+  *mask = double_int_minus_one;
+  switch (code)
+    {
+    case BIT_AND_EXPR:
+      /* The mask is constant where there is a known not
+        set bit, (m1 | m2) & ((v1 | m1) & (v2 | m2)) */
+      *mask = double_int_and (double_int_ior (r1mask, r2mask),
+                             double_int_and (double_int_ior (r1val, r1mask),
+                                             double_int_ior (r2val, r2mask)));
+      *val = double_int_and (r1val, r2val);
+      break;
 
-       case INTEGER_CST:
-         break;
+    case BIT_IOR_EXPR:
+      /* The mask is constant where there is a known
+        set bit, (m1 | m2) & ~((v1 & ~m1) | (v2 & ~m2)).  */
+      *mask = double_int_and_not
+               (double_int_ior (r1mask, r2mask),
+                double_int_ior (double_int_and_not (r1val, r1mask),
+                                double_int_and_not (r2val, r2mask)));
+      *val = double_int_ior (r1val, r2val);
+      break;
 
-       default:
-         return NULL_TREE;
-       }
+    case BIT_XOR_EXPR:
+      /* m1 | m2  */
+      *mask = double_int_ior (r1mask, r2mask);
+      *val = double_int_xor (r1val, r2val);
+      break;
 
-      /* Fold read from constant string.  */
-      if (TREE_CODE (ctor) == STRING_CST)
+    case LROTATE_EXPR:
+    case RROTATE_EXPR:
+      if (double_int_zero_p (r2mask))
        {
-         if ((TYPE_MODE (TREE_TYPE (t))
-              == TYPE_MODE (TREE_TYPE (TREE_TYPE (ctor))))
-             && (GET_MODE_CLASS (TYPE_MODE (TREE_TYPE (TREE_TYPE (ctor))))
-                 == MODE_INT)
-             && GET_MODE_SIZE (TYPE_MODE (TREE_TYPE (TREE_TYPE (ctor)))) == 1
-             && compare_tree_int (idx, TREE_STRING_LENGTH (ctor)) < 0)
-           return build_int_cst_type (TREE_TYPE (t),
-                                      (TREE_STRING_POINTER (ctor)
-                                       [TREE_INT_CST_LOW (idx)]));
-         return NULL_TREE;
+         HOST_WIDE_INT shift = r2val.low;
+         if (code == RROTATE_EXPR)
+           shift = -shift;
+         *mask = double_int_lrotate (r1mask, shift, TYPE_PRECISION (type));
+         *val = double_int_lrotate (r1val, shift, TYPE_PRECISION (type));
        }
-
-      /* Whoo-hoo!  I'll fold ya baby.  Yeah!  */
-      FOR_EACH_CONSTRUCTOR_ELT (CONSTRUCTOR_ELTS (ctor), cnt, cfield, cval)
-       if (tree_int_cst_equal (cfield, idx))
-         {
-           STRIP_NOPS (cval);
-           if (TREE_CODE (cval) == ADDR_EXPR)
-             {
-               tree base = get_base_address (TREE_OPERAND (cval, 0));
-               if (base && TREE_CODE (base) == VAR_DECL)
-                 add_referenced_var (base);
-             }
-           return cval;
-         }
       break;
 
-    case COMPONENT_REF:
-      /* Get a CONSTRUCTOR.  If BASE is a VAR_DECL, get its
-        DECL_INITIAL.  If BASE is a nested reference into another
-        ARRAY_REF or COMPONENT_REF, make a recursive call to resolve
-        the inner reference.  */
-      base = TREE_OPERAND (t, 0);
-      switch (TREE_CODE (base))
+    case LSHIFT_EXPR:
+    case RSHIFT_EXPR:
+      /* ???  We can handle partially known shift counts if we know
+        its sign.  That way we can tell that (x << (y | 8)) & 255
+        is zero.  */
+      if (double_int_zero_p (r2mask))
        {
-       case VAR_DECL:
-         if (!TREE_READONLY (base)
-             || TREE_CODE (TREE_TYPE (base)) != RECORD_TYPE
-             || !targetm.binds_local_p (base))
-           return NULL_TREE;
-
-         ctor = DECL_INITIAL (base);
-         break;
-
-       case ARRAY_REF:
-       case COMPONENT_REF:
-         ctor = fold_const_aggregate_ref (base);
-         break;
+         HOST_WIDE_INT shift = r2val.low;
+         if (code == RSHIFT_EXPR)
+           shift = -shift;
+         /* We need to know if we are doing a left or a right shift
+            to properly shift in zeros for left shift and unsigned
+            right shifts and the sign bit for signed right shifts.
+            For signed right shifts we shift in varying in case
+            the sign bit was varying.  */
+         if (shift > 0)
+           {
+             *mask = double_int_lshift (r1mask, shift,
+                                        TYPE_PRECISION (type), false);
+             *val = double_int_lshift (r1val, shift,
+                                       TYPE_PRECISION (type), false);
+           }
+         else if (shift < 0)
+           {
+             /* ???  We can have sizetype related inconsistencies in
+                the IL.  */
+             if ((TREE_CODE (r1type) == INTEGER_TYPE
+                  && (TYPE_IS_SIZETYPE (r1type)
+                      ? 0 : TYPE_UNSIGNED (r1type))) != uns)
+               break;
 
-       default:
-         return NULL_TREE;
+             shift = -shift;
+             *mask = double_int_rshift (r1mask, shift,
+                                        TYPE_PRECISION (type), !uns);
+             *val = double_int_rshift (r1val, shift,
+                                       TYPE_PRECISION (type), !uns);
+           }
+         else
+           {
+             *mask = r1mask;
+             *val = r1val;
+           }
        }
+      break;
 
-      if (ctor == NULL_TREE
-         || TREE_CODE (ctor) != CONSTRUCTOR
-         || !TREE_STATIC (ctor))
-       return NULL_TREE;
+    case PLUS_EXPR:
+    case POINTER_PLUS_EXPR:
+      {
+       double_int lo, hi;
+       /* Do the addition with unknown bits set to zero, to give carry-ins of
+          zero wherever possible.  */
+       lo = double_int_add (double_int_and_not (r1val, r1mask),
+                            double_int_and_not (r2val, r2mask));
+       lo = double_int_ext (lo, TYPE_PRECISION (type), uns);
+       /* Do the addition with unknown bits set to one, to give carry-ins of
+          one wherever possible.  */
+       hi = double_int_add (double_int_ior (r1val, r1mask),
+                            double_int_ior (r2val, r2mask));
+       hi = double_int_ext (hi, TYPE_PRECISION (type), uns);
+       /* Each bit in the result is known if (a) the corresponding bits in
+          both inputs are known, and (b) the carry-in to that bit position
+          is known.  We can check condition (b) by seeing if we got the same
+          result with minimised carries as with maximised carries.  */
+       *mask = double_int_ior (double_int_ior (r1mask, r2mask),
+                               double_int_xor (lo, hi));
+       *mask = double_int_ext (*mask, TYPE_PRECISION (type), uns);
+       /* It shouldn't matter whether we choose lo or hi here.  */
+       *val = lo;
+       break;
+      }
 
-      field = TREE_OPERAND (t, 1);
+    case MINUS_EXPR:
+      {
+       double_int temv, temm;
+       bit_value_unop_1 (NEGATE_EXPR, r2type, &temv, &temm,
+                         r2type, r2val, r2mask);
+       bit_value_binop_1 (PLUS_EXPR, type, val, mask,
+                          r1type, r1val, r1mask,
+                          r2type, temv, temm);
+       break;
+      }
 
-      FOR_EACH_CONSTRUCTOR_ELT (CONSTRUCTOR_ELTS (ctor), cnt, cfield, cval)
-       if (cfield == field
-           /* FIXME: Handle bit-fields.  */
-           && ! DECL_BIT_FIELD (cfield))
+    case MULT_EXPR:
+      {
+       /* Just track trailing zeros in both operands and transfer
+          them to the other.  */
+       int r1tz = double_int_ctz (double_int_ior (r1val, r1mask));
+       int r2tz = double_int_ctz (double_int_ior (r2val, r2mask));
+       if (r1tz + r2tz >= HOST_BITS_PER_DOUBLE_INT)
          {
-           STRIP_NOPS (cval);
-           if (TREE_CODE (cval) == ADDR_EXPR)
-             {
-               tree base = get_base_address (TREE_OPERAND (cval, 0));
-               if (base && TREE_CODE (base) == VAR_DECL)
-                 add_referenced_var (base);
-             }
-           return cval;
+           *mask = double_int_zero;
+           *val = double_int_zero;
          }
-      break;
+       else if (r1tz + r2tz > 0)
+         {
+           *mask = double_int_not (double_int_mask (r1tz + r2tz));
+           *mask = double_int_ext (*mask, TYPE_PRECISION (type), uns);
+           *val = double_int_zero;
+         }
+       break;
+      }
 
-    case REALPART_EXPR:
-    case IMAGPART_EXPR:
+    case EQ_EXPR:
+    case NE_EXPR:
       {
-       tree c = fold_const_aggregate_ref (TREE_OPERAND (t, 0));
-       if (c && TREE_CODE (c) == COMPLEX_CST)
-         return fold_build1_loc (EXPR_LOCATION (t),
-                             TREE_CODE (t), TREE_TYPE (t), c);
+       double_int m = double_int_ior (r1mask, r2mask);
+       if (!double_int_equal_p (double_int_and_not (r1val, m),
+                                double_int_and_not (r2val, m)))
+         {
+           *mask = double_int_zero;
+           *val = ((code == EQ_EXPR) ? double_int_zero : double_int_one);
+         }
+       else
+         {
+           /* We know the result of a comparison is always one or zero.  */
+           *mask = double_int_one;
+           *val = double_int_zero;
+         }
        break;
       }
 
-    case INDIRECT_REF:
+    case GE_EXPR:
+    case GT_EXPR:
+      {
+       double_int tem = r1val;
+       r1val = r2val;
+       r2val = tem;
+       tem = r1mask;
+       r1mask = r2mask;
+       r2mask = tem;
+       code = swap_tree_comparison (code);
+      }
+      /* Fallthru.  */
+    case LT_EXPR:
+    case LE_EXPR:
       {
-       tree base = TREE_OPERAND (t, 0);
-       if (TREE_CODE (base) == SSA_NAME
-           && (value = get_value (base))
-           && value->lattice_val == CONSTANT
-           && TREE_CODE (value->value) == ADDR_EXPR
-           && useless_type_conversion_p (TREE_TYPE (t),
-                                         TREE_TYPE (TREE_TYPE (value->value))))
-         return fold_const_aggregate_ref (TREE_OPERAND (value->value, 0));
+       int minmax, maxmin;
+       /* If the most significant bits are not known we know nothing.  */
+       if (double_int_negative_p (r1mask) || double_int_negative_p (r2mask))
+         break;
+
+       /* For comparisons the signedness is in the comparison operands.  */
+       uns = (TREE_CODE (r1type) == INTEGER_TYPE
+              && TYPE_IS_SIZETYPE (r1type) ? 0 : TYPE_UNSIGNED (r1type));
+       /* ???  We can have sizetype related inconsistencies in the IL.  */
+       if ((TREE_CODE (r2type) == INTEGER_TYPE
+            && TYPE_IS_SIZETYPE (r2type) ? 0 : TYPE_UNSIGNED (r2type)) != uns)
+         break;
+
+       /* If we know the most significant bits we know the values
+          value ranges by means of treating varying bits as zero
+          or one.  Do a cross comparison of the max/min pairs.  */
+       maxmin = double_int_cmp (double_int_ior (r1val, r1mask),
+                                double_int_and_not (r2val, r2mask), uns);
+       minmax = double_int_cmp (double_int_and_not (r1val, r1mask),
+                                double_int_ior (r2val, r2mask), uns);
+       if (maxmin < 0)  /* r1 is less than r2.  */
+         {
+           *mask = double_int_zero;
+           *val = double_int_one;
+         }
+       else if (minmax > 0)  /* r1 is not less or equal to r2.  */
+         {
+           *mask = double_int_zero;
+           *val = double_int_zero;
+         }
+       else if (maxmin == minmax)  /* r1 and r2 are equal.  */
+         {
+           /* This probably should never happen as we'd have
+              folded the thing during fully constant value folding.  */
+           *mask = double_int_zero;
+           *val = (code == LE_EXPR ? double_int_one :  double_int_zero);
+         }
+       else
+         {
+           /* We know the result of a comparison is always one or zero.  */
+           *mask = double_int_one;
+           *val = double_int_zero;
+         }
        break;
       }
 
-    default:
-      break;
+    default:;
     }
+}
 
-  return NULL_TREE;
+/* Return the propagation value when applying the operation CODE to
+   the value RHS yielding type TYPE.  */
+
+static prop_value_t
+bit_value_unop (enum tree_code code, tree type, tree rhs)
+{
+  prop_value_t rval = get_value_for_expr (rhs, true);
+  double_int value, mask;
+  prop_value_t val;
+
+  if (rval.lattice_val == UNDEFINED)
+    return rval;
+
+  gcc_assert ((rval.lattice_val == CONSTANT
+              && TREE_CODE (rval.value) == INTEGER_CST)
+             || double_int_minus_one_p (rval.mask));
+  bit_value_unop_1 (code, type, &value, &mask,
+                   TREE_TYPE (rhs), value_to_double_int (rval), rval.mask);
+  if (!double_int_minus_one_p (mask))
+    {
+      val.lattice_val = CONSTANT;
+      val.mask = mask;
+      /* ???  Delay building trees here.  */
+      val.value = double_int_to_tree (type, value);
+    }
+  else
+    {
+      val.lattice_val = VARYING;
+      val.value = NULL_TREE;
+      val.mask = double_int_minus_one;
+    }
+  return val;
+}
+
+/* Return the propagation value when applying the operation CODE to
+   the values RHS1 and RHS2 yielding type TYPE.  */
+
+static prop_value_t
+bit_value_binop (enum tree_code code, tree type, tree rhs1, tree rhs2)
+{
+  prop_value_t r1val = get_value_for_expr (rhs1, true);
+  prop_value_t r2val = get_value_for_expr (rhs2, true);
+  double_int value, mask;
+  prop_value_t val;
+
+  if (r1val.lattice_val == UNDEFINED
+      || r2val.lattice_val == UNDEFINED)
+    {
+      val.lattice_val = VARYING;
+      val.value = NULL_TREE;
+      val.mask = double_int_minus_one;
+      return val;
+    }
+
+  gcc_assert ((r1val.lattice_val == CONSTANT
+              && TREE_CODE (r1val.value) == INTEGER_CST)
+             || double_int_minus_one_p (r1val.mask));
+  gcc_assert ((r2val.lattice_val == CONSTANT
+              && TREE_CODE (r2val.value) == INTEGER_CST)
+             || double_int_minus_one_p (r2val.mask));
+  bit_value_binop_1 (code, type, &value, &mask,
+                    TREE_TYPE (rhs1), value_to_double_int (r1val), r1val.mask,
+                    TREE_TYPE (rhs2), value_to_double_int (r2val), r2val.mask);
+  if (!double_int_minus_one_p (mask))
+    {
+      val.lattice_val = CONSTANT;
+      val.mask = mask;
+      /* ???  Delay building trees here.  */
+      val.value = double_int_to_tree (type, value);
+    }
+  else
+    {
+      val.lattice_val = VARYING;
+      val.value = NULL_TREE;
+      val.mask = double_int_minus_one;
+    }
+  return val;
+}
+
+/* Return the propagation value when applying __builtin_assume_aligned to
+   its arguments.  */
+
+static prop_value_t
+bit_value_assume_aligned (gimple stmt)
+{
+  tree ptr = gimple_call_arg (stmt, 0), align, misalign = NULL_TREE;
+  tree type = TREE_TYPE (ptr);
+  unsigned HOST_WIDE_INT aligni, misaligni = 0;
+  prop_value_t ptrval = get_value_for_expr (ptr, true);
+  prop_value_t alignval;
+  double_int value, mask;
+  prop_value_t val;
+  if (ptrval.lattice_val == UNDEFINED)
+    return ptrval;
+  gcc_assert ((ptrval.lattice_val == CONSTANT
+              && TREE_CODE (ptrval.value) == INTEGER_CST)
+             || double_int_minus_one_p (ptrval.mask));
+  align = gimple_call_arg (stmt, 1);
+  if (!host_integerp (align, 1))
+    return ptrval;
+  aligni = tree_low_cst (align, 1);
+  if (aligni <= 1
+      || (aligni & (aligni - 1)) != 0)
+    return ptrval;
+  if (gimple_call_num_args (stmt) > 2)
+    {
+      misalign = gimple_call_arg (stmt, 2);
+      if (!host_integerp (misalign, 1))
+       return ptrval;
+      misaligni = tree_low_cst (misalign, 1);
+      if (misaligni >= aligni)
+       return ptrval;
+    }
+  align = build_int_cst_type (type, -aligni);
+  alignval = get_value_for_expr (align, true);
+  bit_value_binop_1 (BIT_AND_EXPR, type, &value, &mask,
+                    type, value_to_double_int (ptrval), ptrval.mask,
+                    type, value_to_double_int (alignval), alignval.mask);
+  if (!double_int_minus_one_p (mask))
+    {
+      val.lattice_val = CONSTANT;
+      val.mask = mask;
+      gcc_assert ((mask.low & (aligni - 1)) == 0);
+      gcc_assert ((value.low & (aligni - 1)) == 0);
+      value.low |= misaligni;
+      /* ???  Delay building trees here.  */
+      val.value = double_int_to_tree (type, value);
+    }
+  else
+    {
+      val.lattice_val = VARYING;
+      val.value = NULL_TREE;
+      val.mask = double_int_minus_one;
+    }
+  return val;
 }
 
 /* Evaluate statement STMT.
@@ -1325,9 +1512,27 @@ evaluate_stmt (gimple stmt)
   prop_value_t val;
   tree simplified = NULL_TREE;
   ccp_lattice_t likelyvalue = likely_value (stmt);
-  bool is_constant;
+  bool is_constant = false;
+  unsigned int align;
 
-  fold_defer_overflow_warnings ();
+  if (dump_file && (dump_flags & TDF_DETAILS))
+    {
+      fprintf (dump_file, "which is likely ");
+      switch (likelyvalue)
+       {
+       case CONSTANT:
+         fprintf (dump_file, "CONSTANT");
+         break;
+       case UNDEFINED:
+         fprintf (dump_file, "UNDEFINED");
+         break;
+       case VARYING:
+         fprintf (dump_file, "VARYING");
+         break;
+       default:;
+       }
+      fprintf (dump_file, "\n");
+    }
 
   /* If the statement is likely to have a CONSTANT result, then try
      to fold the statement to determine the constant value.  */
@@ -1335,7 +1540,19 @@ evaluate_stmt (gimple stmt)
      Since likely_value never returns CONSTANT for calls, we will
      not attempt to fold them, including builtins that may profit.  */
   if (likelyvalue == CONSTANT)
-    simplified = ccp_fold (stmt);
+    {
+      fold_defer_overflow_warnings ();
+      simplified = ccp_fold (stmt);
+      is_constant = simplified && is_gimple_min_invariant (simplified);
+      fold_undefer_overflow_warnings (is_constant, stmt, 0);
+      if (is_constant)
+       {
+         /* The statement produced a constant value.  */
+         val.lattice_val = CONSTANT;
+         val.value = simplified;
+         val.mask = double_int_zero;
+       }
+    }
   /* If the statement is likely to have a VARYING result, then do not
      bother folding the statement.  */
   else if (likelyvalue == VARYING)
@@ -1355,46 +1572,136 @@ evaluate_stmt (gimple stmt)
       else
        /* These cannot satisfy is_gimple_min_invariant without folding.  */
        gcc_assert (code == GIMPLE_CALL || code == GIMPLE_COND);
+      is_constant = simplified && is_gimple_min_invariant (simplified);
+      if (is_constant)
+       {
+         /* The statement produced a constant value.  */
+         val.lattice_val = CONSTANT;
+         val.value = simplified;
+         val.mask = double_int_zero;
+       }
     }
 
-  is_constant = simplified && is_gimple_min_invariant (simplified);
-
-  fold_undefer_overflow_warnings (is_constant, stmt, 0);
-
-  if (dump_file && (dump_flags & TDF_DETAILS))
+  /* Resort to simplification for bitwise tracking.  */
+  if (flag_tree_bit_ccp
+      && (likelyvalue == CONSTANT || is_gimple_call (stmt))
+      && !is_constant)
     {
-      fprintf (dump_file, "which is likely ");
-      switch (likelyvalue)
+      enum gimple_code code = gimple_code (stmt);
+      val.lattice_val = VARYING;
+      val.value = NULL_TREE;
+      val.mask = double_int_minus_one;
+      if (code == GIMPLE_ASSIGN)
        {
-       case CONSTANT:
-         fprintf (dump_file, "CONSTANT");
-         break;
-       case UNDEFINED:
-         fprintf (dump_file, "UNDEFINED");
-         break;
-       case VARYING:
-         fprintf (dump_file, "VARYING");
-         break;
-       default:;
+         enum tree_code subcode = gimple_assign_rhs_code (stmt);
+         tree rhs1 = gimple_assign_rhs1 (stmt);
+         switch (get_gimple_rhs_class (subcode))
+           {
+           case GIMPLE_SINGLE_RHS:
+             if (INTEGRAL_TYPE_P (TREE_TYPE (rhs1))
+                 || POINTER_TYPE_P (TREE_TYPE (rhs1)))
+               val = get_value_for_expr (rhs1, true);
+             break;
+
+           case GIMPLE_UNARY_RHS:
+             if ((INTEGRAL_TYPE_P (TREE_TYPE (rhs1))
+                  || POINTER_TYPE_P (TREE_TYPE (rhs1)))
+                 && (INTEGRAL_TYPE_P (gimple_expr_type (stmt))
+                     || POINTER_TYPE_P (gimple_expr_type (stmt))))
+               val = bit_value_unop (subcode, gimple_expr_type (stmt), rhs1);
+             break;
+
+           case GIMPLE_BINARY_RHS:
+             if (INTEGRAL_TYPE_P (TREE_TYPE (rhs1))
+                 || POINTER_TYPE_P (TREE_TYPE (rhs1)))
+               {
+                 tree lhs = gimple_assign_lhs (stmt);
+                 tree rhs2 = gimple_assign_rhs2 (stmt);
+                 val = bit_value_binop (subcode,
+                                        TREE_TYPE (lhs), rhs1, rhs2);
+               }
+             break;
+
+           default:;
+           }
        }
-      fprintf (dump_file, "\n");
+      else if (code == GIMPLE_COND)
+       {
+         enum tree_code code = gimple_cond_code (stmt);
+         tree rhs1 = gimple_cond_lhs (stmt);
+         tree rhs2 = gimple_cond_rhs (stmt);
+         if (INTEGRAL_TYPE_P (TREE_TYPE (rhs1))
+             || POINTER_TYPE_P (TREE_TYPE (rhs1)))
+           val = bit_value_binop (code, TREE_TYPE (rhs1), rhs1, rhs2);
+       }
+      else if (gimple_call_builtin_class_p (stmt, BUILT_IN_NORMAL))
+       {
+         tree fndecl = gimple_call_fndecl (stmt);
+         switch (DECL_FUNCTION_CODE (fndecl))
+           {
+           case BUILT_IN_MALLOC:
+           case BUILT_IN_REALLOC:
+           case BUILT_IN_CALLOC:
+           case BUILT_IN_STRDUP:
+           case BUILT_IN_STRNDUP:
+             val.lattice_val = CONSTANT;
+             val.value = build_int_cst (TREE_TYPE (gimple_get_lhs (stmt)), 0);
+             val.mask = shwi_to_double_int
+                          (~(((HOST_WIDE_INT) MALLOC_ABI_ALIGNMENT)
+                             / BITS_PER_UNIT - 1));
+             break;
+
+           case BUILT_IN_ALLOCA:
+           case BUILT_IN_ALLOCA_WITH_ALIGN:
+             align = (DECL_FUNCTION_CODE (fndecl) == BUILT_IN_ALLOCA_WITH_ALIGN
+                      ? TREE_INT_CST_LOW (gimple_call_arg (stmt, 1))
+                      : BIGGEST_ALIGNMENT);
+             val.lattice_val = CONSTANT;
+             val.value = build_int_cst (TREE_TYPE (gimple_get_lhs (stmt)), 0);
+             val.mask = shwi_to_double_int
+                          (~(((HOST_WIDE_INT) align)
+                             / BITS_PER_UNIT - 1));
+             break;
+
+           /* These builtins return their first argument, unmodified.  */
+           case BUILT_IN_MEMCPY:
+           case BUILT_IN_MEMMOVE:
+           case BUILT_IN_MEMSET:
+           case BUILT_IN_STRCPY:
+           case BUILT_IN_STRNCPY:
+           case BUILT_IN_MEMCPY_CHK:
+           case BUILT_IN_MEMMOVE_CHK:
+           case BUILT_IN_MEMSET_CHK:
+           case BUILT_IN_STRCPY_CHK:
+           case BUILT_IN_STRNCPY_CHK:
+             val = get_value_for_expr (gimple_call_arg (stmt, 0), true);
+             break;
+
+           case BUILT_IN_ASSUME_ALIGNED:
+             val = bit_value_assume_aligned (stmt);
+             break;
+
+           default:;
+           }
+       }
+      is_constant = (val.lattice_val == CONSTANT);
     }
 
-  if (is_constant)
-    {
-      /* The statement produced a constant value.  */
-      val.lattice_val = CONSTANT;
-      val.value = simplified;
-    }
-  else
+  if (!is_constant)
     {
       /* The statement produced a nonconstant value.  If the statement
         had UNDEFINED operands, then the result of the statement
         should be UNDEFINED.  Otherwise, the statement is VARYING.  */
       if (likelyvalue == UNDEFINED)
-       val.lattice_val = likelyvalue;
+       {
+         val.lattice_val = likelyvalue;
+         val.mask = double_int_zero;
+       }
       else
-       val.lattice_val = VARYING;
+       {
+         val.lattice_val = VARYING;
+         val.mask = double_int_minus_one;
+       }
 
       val.value = NULL_TREE;
     }
@@ -1402,6 +1709,157 @@ evaluate_stmt (gimple stmt)
   return val;
 }
 
+/* Given a BUILT_IN_STACK_SAVE value SAVED_VAL, insert a clobber of VAR before
+   each matching BUILT_IN_STACK_RESTORE.  Mark visited phis in VISITED.  */
+
+static void
+insert_clobber_before_stack_restore (tree saved_val, tree var, htab_t *visited)
+{
+  gimple stmt, clobber_stmt;
+  tree clobber;
+  imm_use_iterator iter;
+  gimple_stmt_iterator i;
+  gimple *slot;
+
+  FOR_EACH_IMM_USE_STMT (stmt, iter, saved_val)
+    if (gimple_call_builtin_p (stmt, BUILT_IN_STACK_RESTORE))
+      {
+       clobber = build_constructor (TREE_TYPE (var), NULL);
+       TREE_THIS_VOLATILE (clobber) = 1;
+       clobber_stmt = gimple_build_assign (var, clobber);
+
+       i = gsi_for_stmt (stmt);
+       gsi_insert_before (&i, clobber_stmt, GSI_SAME_STMT);
+      }
+    else if (gimple_code (stmt) == GIMPLE_PHI)
+      {
+       if (*visited == NULL)
+         *visited = htab_create (10, htab_hash_pointer, htab_eq_pointer, NULL);
+
+       slot = (gimple *)htab_find_slot (*visited, stmt, INSERT);
+       if (*slot != NULL)
+         continue;
+
+       *slot = stmt;
+       insert_clobber_before_stack_restore (gimple_phi_result (stmt), var,
+                                            visited);
+      }
+    else if (gimple_assign_ssa_name_copy_p (stmt))
+      insert_clobber_before_stack_restore (gimple_assign_lhs (stmt), var,
+                                          visited);
+    else
+      gcc_assert (is_gimple_debug (stmt));
+}
+
+/* Advance the iterator to the previous non-debug gimple statement in the same
+   or dominating basic block.  */
+
+static inline void
+gsi_prev_dom_bb_nondebug (gimple_stmt_iterator *i)
+{
+  basic_block dom;
+
+  gsi_prev_nondebug (i);
+  while (gsi_end_p (*i))
+    {
+      dom = get_immediate_dominator (CDI_DOMINATORS, i->bb);
+      if (dom == NULL || dom == ENTRY_BLOCK_PTR)
+       return;
+
+      *i = gsi_last_bb (dom);
+    }
+}
+
+/* Find a BUILT_IN_STACK_SAVE dominating gsi_stmt (I), and insert
+   a clobber of VAR before each matching BUILT_IN_STACK_RESTORE.
+
+   It is possible that BUILT_IN_STACK_SAVE cannot be find in a dominator when a
+   previous pass (such as DOM) duplicated it along multiple paths to a BB.  In
+   that case the function gives up without inserting the clobbers.  */
+
+static void
+insert_clobbers_for_var (gimple_stmt_iterator i, tree var)
+{
+  gimple stmt;
+  tree saved_val;
+  htab_t visited = NULL;
+
+  for (; !gsi_end_p (i); gsi_prev_dom_bb_nondebug (&i))
+    {
+      stmt = gsi_stmt (i);
+
+      if (!gimple_call_builtin_p (stmt, BUILT_IN_STACK_SAVE))
+       continue;
+
+      saved_val = gimple_call_lhs (stmt);
+      if (saved_val == NULL_TREE)
+       continue;
+
+      insert_clobber_before_stack_restore (saved_val, var, &visited);
+      break;
+    }
+
+  if (visited != NULL)
+    htab_delete (visited);
+}
+
+/* Detects a __builtin_alloca_with_align with constant size argument.  Declares
+   fixed-size array and returns the address, if found, otherwise returns
+   NULL_TREE.  */
+
+static tree
+fold_builtin_alloca_with_align (gimple stmt)
+{
+  unsigned HOST_WIDE_INT size, threshold, n_elem;
+  tree lhs, arg, block, var, elem_type, array_type;
+
+  /* Get lhs.  */
+  lhs = gimple_call_lhs (stmt);
+  if (lhs == NULL_TREE)
+    return NULL_TREE;
+
+  /* Detect constant argument.  */
+  arg = get_constant_value (gimple_call_arg (stmt, 0));
+  if (arg == NULL_TREE
+      || TREE_CODE (arg) != INTEGER_CST
+      || !host_integerp (arg, 1))
+    return NULL_TREE;
+
+  size = TREE_INT_CST_LOW (arg);
+
+  /* Heuristic: don't fold large allocas.  */
+  threshold = (unsigned HOST_WIDE_INT)PARAM_VALUE (PARAM_LARGE_STACK_FRAME);
+  /* In case the alloca is located at function entry, it has the same lifetime
+     as a declared array, so we allow a larger size.  */
+  block = gimple_block (stmt);
+  if (!(cfun->after_inlining
+        && TREE_CODE (BLOCK_SUPERCONTEXT (block)) == FUNCTION_DECL))
+    threshold /= 10;
+  if (size > threshold)
+    return NULL_TREE;
+
+  /* Declare array.  */
+  elem_type = build_nonstandard_integer_type (BITS_PER_UNIT, 1);
+  n_elem = size * 8 / BITS_PER_UNIT;
+  array_type = build_array_type_nelts (elem_type, n_elem);
+  var = create_tmp_var (array_type, NULL);
+  DECL_ALIGN (var) = TREE_INT_CST_LOW (gimple_call_arg (stmt, 1));
+  {
+    struct ptr_info_def *pi = SSA_NAME_PTR_INFO (lhs);
+    if (pi != NULL && !pi->pt.anything)
+      {
+       bool singleton_p;
+       unsigned uid;
+       singleton_p = pt_solution_singleton_p (&pi->pt, &uid);
+       gcc_assert (singleton_p);
+       SET_DECL_PT_UID (var, uid);
+      }
+  }
+
+  /* Fold alloca to the address of the array.  */
+  return fold_convert (TREE_TYPE (lhs), build_fold_addr_expr (var));
+}
+
 /* Fold the stmt at *GSI with CCP specific information that propagating
    and regular folding does not catch.  */
 
@@ -1420,9 +1878,18 @@ ccp_fold_stmt (gimple_stmt_iterator *gsi)
           fold more conditionals here.  */
        val = evaluate_stmt (stmt);
        if (val.lattice_val != CONSTANT
-           || TREE_CODE (val.value) != INTEGER_CST)
+           || !double_int_zero_p (val.mask))
          return false;
 
+       if (dump_file)
+         {
+           fprintf (dump_file, "Folding predicate ");
+           print_gimple_expr (dump_file, stmt, 0, 0);
+           fprintf (dump_file, " to ");
+           print_generic_expr (dump_file, val.value, 0);
+           fprintf (dump_file, "\n");
+         }
+
        if (integer_zerop (val.value))
          gimple_cond_make_false (stmt);
        else
@@ -1434,7 +1901,8 @@ ccp_fold_stmt (gimple_stmt_iterator *gsi)
     case GIMPLE_CALL:
       {
        tree lhs = gimple_call_lhs (stmt);
-       prop_value_t *val;
+       int flags = gimple_call_flags (stmt);
+       tree val;
        tree argt;
        bool changed = false;
        unsigned i;
@@ -1444,10 +1912,12 @@ ccp_fold_stmt (gimple_stmt_iterator *gsi)
           type issues.  */
        if (lhs
            && TREE_CODE (lhs) == SSA_NAME
-           && (val = get_value (lhs))
-           && val->lattice_val == CONSTANT)
+           && (val = get_constant_value (lhs))
+           /* Don't optimize away calls that have side-effects.  */
+           && (flags & (ECF_CONST|ECF_PURE)) != 0
+           && (flags & ECF_LOOPING_CONST_OR_PURE) == 0)
          {
-           tree new_rhs = unshare_expr (val->value);
+           tree new_rhs = unshare_expr (val);
            bool res;
            if (!useless_type_conversion_p (TREE_TYPE (lhs),
                                            TREE_TYPE (new_rhs)))
@@ -1457,23 +1927,43 @@ ccp_fold_stmt (gimple_stmt_iterator *gsi)
            return true;
          }
 
+       /* Internal calls provide no argument types, so the extra laxity
+          for normal calls does not apply.  */
+       if (gimple_call_internal_p (stmt))
+         return false;
+
+        /* The heuristic of fold_builtin_alloca_with_align differs before and
+          after inlining, so we don't require the arg to be changed into a
+          constant for folding, but just to be constant.  */
+        if (gimple_call_builtin_p (stmt, BUILT_IN_ALLOCA_WITH_ALIGN))
+          {
+            tree new_rhs = fold_builtin_alloca_with_align (stmt);
+            if (new_rhs)
+             {
+               bool res = update_call_from_tree (gsi, new_rhs);
+               tree var = TREE_OPERAND (TREE_OPERAND (new_rhs, 0),0);
+               gcc_assert (res);
+               insert_clobbers_for_var (*gsi, var);
+               return true;
+             }
+          }
+
        /* Propagate into the call arguments.  Compared to replace_uses_in
           this can use the argument slot types for type verification
           instead of the current argument type.  We also can safely
           drop qualifiers here as we are dealing with constants anyway.  */
-       argt = TYPE_ARG_TYPES (TREE_TYPE (TREE_TYPE (gimple_call_fn (stmt))));
+       argt = TYPE_ARG_TYPES (gimple_call_fntype (stmt));
        for (i = 0; i < gimple_call_num_args (stmt) && argt;
             ++i, argt = TREE_CHAIN (argt))
          {
            tree arg = gimple_call_arg (stmt, i);
            if (TREE_CODE (arg) == SSA_NAME
-               && (val = get_value (arg))
-               && val->lattice_val == CONSTANT
+               && (val = get_constant_value (arg))
                && useless_type_conversion_p
                     (TYPE_MAIN_VARIANT (TREE_VALUE (argt)),
-                     TYPE_MAIN_VARIANT (TREE_TYPE (val->value))))
+                     TYPE_MAIN_VARIANT (TREE_TYPE (val))))
              {
-               gimple_call_set_arg (stmt, i, unshare_expr (val->value));
+               gimple_call_set_arg (stmt, i, unshare_expr (val));
                changed = true;
              }
          }
@@ -1484,18 +1974,17 @@ ccp_fold_stmt (gimple_stmt_iterator *gsi)
     case GIMPLE_ASSIGN:
       {
        tree lhs = gimple_assign_lhs (stmt);
-       prop_value_t *val;
+       tree val;
 
        /* If we have a load that turned out to be constant replace it
           as we cannot propagate into all uses in all cases.  */
        if (gimple_assign_single_p (stmt)
            && TREE_CODE (lhs) == SSA_NAME
-           && (val = get_value (lhs))
-           && val->lattice_val == CONSTANT)
+           && (val = get_constant_value (lhs)))
          {
-           tree rhs = unshare_expr (val->value);
+           tree rhs = unshare_expr (val);
            if (!useless_type_conversion_p (TREE_TYPE (lhs), TREE_TYPE (rhs)))
-             rhs = fold_convert (TREE_TYPE (lhs), rhs);
+             rhs = fold_build1 (VIEW_CONVERT_EXPR, TREE_TYPE (lhs), rhs);
            gimple_assign_set_rhs_from_tree (gsi, rhs);
            return true;
          }
@@ -1526,19 +2015,10 @@ visit_assignment (gimple stmt, tree *output_p)
   gcc_assert (gimple_code (stmt) != GIMPLE_CALL
               || gimple_call_lhs (stmt) != NULL_TREE);
 
-  if (gimple_assign_copy_p (stmt))
-    {
-      tree rhs = gimple_assign_rhs1 (stmt);
-
-      if  (TREE_CODE (rhs) == SSA_NAME)
-        {
-          /* For a simple copy operation, we copy the lattice values.  */
-          prop_value_t *nval = get_value (rhs);
-          val = *nval;
-        }
-      else
-        val = evaluate_stmt (stmt);
-    }
+  if (gimple_assign_single_p (stmt)
+      && gimple_assign_rhs_code (stmt) == SSA_NAME)
+    /* For a simple copy operation, we copy the lattice values.  */
+    val = *get_value (gimple_assign_rhs1 (stmt));
   else
     /* Evaluate the statement, which could be
        either a GIMPLE_ASSIGN or a GIMPLE_CALL.  */
@@ -1577,12 +2057,15 @@ visit_cond_stmt (gimple stmt, edge *taken_edge_p)
 
   block = gimple_bb (stmt);
   val = evaluate_stmt (stmt);
+  if (val.lattice_val != CONSTANT
+      || !double_int_zero_p (val.mask))
+    return SSA_PROP_VARYING;
 
   /* Find which edge out of the conditional block will be taken and add it
      to the worklist.  If no single edge can be determined statically,
      return SSA_PROP_VARYING to feed all the outgoing edges to the
      propagation engine.  */
-  *taken_edge_p = val.value ? find_taken_edge (block, val.value) : 0;
+  *taken_edge_p = find_taken_edge (block, val.value);
   if (*taken_edge_p)
     return SSA_PROP_INTERESTING;
   else
@@ -1647,7 +2130,7 @@ ccp_visit_stmt (gimple stmt, edge *taken_edge_p, tree *output_p)
      Mark them VARYING.  */
   FOR_EACH_SSA_TREE_OPERAND (def, stmt, iter, SSA_OP_ALL_DEFS)
     {
-      prop_value_t v = { VARYING, NULL_TREE };
+      prop_value_t v = { VARYING, NULL_TREE, { -1, (HOST_WIDE_INT) -1 } };
       set_lattice_value (def, v);
     }
 
@@ -1660,12 +2143,14 @@ ccp_visit_stmt (gimple stmt, edge *taken_edge_p, tree *output_p)
 static unsigned int
 do_ssa_ccp (void)
 {
+  unsigned int todo = 0;
+  calculate_dominance_info (CDI_DOMINATORS);
   ccp_initialize ();
   ssa_propagate (ccp_visit_stmt, ccp_visit_phi_node);
   if (ccp_finalize ())
-    return (TODO_cleanup_cfg | TODO_update_ssa | TODO_remove_unused_locals);
-  else
-    return 0;
+    todo = (TODO_cleanup_cfg | TODO_update_ssa | TODO_remove_unused_locals);
+  free_dominance_info (CDI_DOMINATORS);
+  return todo;
 }
 
 
@@ -1691,7 +2176,7 @@ struct gimple_opt_pass pass_ccp =
   0,                                   /* properties_provided */
   0,                                   /* properties_destroyed */
   0,                                   /* todo_flags_start */
-  TODO_dump_func | TODO_verify_ssa
+  TODO_verify_ssa
   | TODO_verify_stmts | TODO_ggc_collect/* todo_flags_finish */
  }
 };
@@ -1731,7 +2216,8 @@ optimize_stack_restore (gimple_stmt_iterator i)
       if (!callee
          || DECL_BUILT_IN_CLASS (callee) != BUILT_IN_NORMAL
          /* All regular builtins are ok, just obviously not alloca.  */
-         || DECL_FUNCTION_CODE (callee) == BUILT_IN_ALLOCA)
+         || DECL_FUNCTION_CODE (callee) == BUILT_IN_ALLOCA
+         || DECL_FUNCTION_CODE (callee) == BUILT_IN_ALLOCA_WITH_ALIGN)
        return NULL_TREE;
 
       if (DECL_FUNCTION_CODE (callee) == BUILT_IN_STACK_RESTORE)
@@ -1810,7 +2296,7 @@ optimize_stdarg_builtin (gimple call)
     case BUILT_IN_VA_START:
       if (!va_list_simple_ptr
          || targetm.expand_builtin_va_start != NULL
-          || built_in_decls[BUILT_IN_NEXT_ARG] == NULL)
+         || !builtin_decl_explicit_p (BUILT_IN_NEXT_ARG))
        return NULL_TREE;
 
       if (gimple_call_num_args (call) != 2)
@@ -1823,7 +2309,7 @@ optimize_stdarg_builtin (gimple call)
        return NULL_TREE;
 
       lhs = build_fold_indirect_ref_loc (loc, lhs);
-      rhs = build_call_expr_loc (loc, built_in_decls[BUILT_IN_NEXT_ARG],
+      rhs = build_call_expr_loc (loc, builtin_decl_explicit (BUILT_IN_NEXT_ARG),
                              1, integer_zero_node);
       rhs = fold_convert_loc (loc, TREE_TYPE (lhs), rhs);
       return build2 (MODIFY_EXPR, TREE_TYPE (lhs), lhs, rhs);
@@ -1908,6 +2394,11 @@ execute_fold_all_builtins (void)
                 result = integer_zero_node;
                break;
 
+             case BUILT_IN_ASSUME_ALIGNED:
+               /* Remove __builtin_assume_aligned.  */
+               result = gimple_call_arg (stmt, 0);
+               break;
+
              case BUILT_IN_STACK_RESTORE:
                result = optimize_stack_restore (i);
                if (result)
@@ -1994,8 +2485,7 @@ struct gimple_opt_pass pass_fold_builtins =
   0,                                   /* properties_provided */
   0,                                   /* properties_destroyed */
   0,                                   /* todo_flags_start */
-  TODO_dump_func
-    | TODO_verify_ssa
+  TODO_verify_ssa
     | TODO_update_ssa                  /* todo_flags_finish */
  }
 };