OSDN Git Service

don't use build_function_type in the ObjC/C++ frontends
[pf3gnuchains/gcc-fork.git] / gcc / tree-vect-stmts.c
index 8fba200..f9164ef 100644 (file)
@@ -38,11 +38,86 @@ along with GCC; see the file COPYING3.  If not see
 #include "recog.h"
 #include "optabs.h"
 #include "diagnostic-core.h"
-#include "toplev.h"
 #include "tree-vectorizer.h"
 #include "langhooks.h"
 
 
+/* Return a variable of type ELEM_TYPE[NELEMS].  */
+
+static tree
+create_vector_array (tree elem_type, unsigned HOST_WIDE_INT nelems)
+{
+  return create_tmp_var (build_array_type_nelts (elem_type, nelems),
+                        "vect_array");
+}
+
+/* ARRAY is an array of vectors created by create_vector_array.
+   Return an SSA_NAME for the vector in index N.  The reference
+   is part of the vectorization of STMT and the vector is associated
+   with scalar destination SCALAR_DEST.  */
+
+static tree
+read_vector_array (gimple stmt, gimple_stmt_iterator *gsi, tree scalar_dest,
+                  tree array, unsigned HOST_WIDE_INT n)
+{
+  tree vect_type, vect, vect_name, array_ref;
+  gimple new_stmt;
+
+  gcc_assert (TREE_CODE (TREE_TYPE (array)) == ARRAY_TYPE);
+  vect_type = TREE_TYPE (TREE_TYPE (array));
+  vect = vect_create_destination_var (scalar_dest, vect_type);
+  array_ref = build4 (ARRAY_REF, vect_type, array,
+                     build_int_cst (size_type_node, n),
+                     NULL_TREE, NULL_TREE);
+
+  new_stmt = gimple_build_assign (vect, array_ref);
+  vect_name = make_ssa_name (vect, new_stmt);
+  gimple_assign_set_lhs (new_stmt, vect_name);
+  vect_finish_stmt_generation (stmt, new_stmt, gsi);
+  mark_symbols_for_renaming (new_stmt);
+
+  return vect_name;
+}
+
+/* ARRAY is an array of vectors created by create_vector_array.
+   Emit code to store SSA_NAME VECT in index N of the array.
+   The store is part of the vectorization of STMT.  */
+
+static void
+write_vector_array (gimple stmt, gimple_stmt_iterator *gsi, tree vect,
+                   tree array, unsigned HOST_WIDE_INT n)
+{
+  tree array_ref;
+  gimple new_stmt;
+
+  array_ref = build4 (ARRAY_REF, TREE_TYPE (vect), array,
+                     build_int_cst (size_type_node, n),
+                     NULL_TREE, NULL_TREE);
+
+  new_stmt = gimple_build_assign (array_ref, vect);
+  vect_finish_stmt_generation (stmt, new_stmt, gsi);
+  mark_symbols_for_renaming (new_stmt);
+}
+
+/* PTR is a pointer to an array of type TYPE.  Return a representation
+   of *PTR.  The memory reference replaces those in FIRST_DR
+   (and its group).  */
+
+static tree
+create_array_ref (tree type, tree ptr, struct data_reference *first_dr)
+{
+  struct ptr_info_def *pi;
+  tree mem_ref, alias_ptr_type;
+
+  alias_ptr_type = reference_alias_ptr_type (DR_REF (first_dr));
+  mem_ref = build2 (MEM_REF, type, ptr, build_int_cst (alias_ptr_type, 0));
+  /* Arrays have the same alignment as their type.  */
+  pi = get_ptr_info (ptr);
+  pi->align = TYPE_ALIGN_UNIT (type);
+  pi->misalign = 0;
+  return mem_ref;
+}
+
 /* Utility functions used by vect_mark_stmts_to_be_vectorized.  */
 
 /* Function vect_mark_relevant.
@@ -649,7 +724,8 @@ vect_cost_strided_group_size (stmt_vec_info stmt_info)
 
 void
 vect_model_store_cost (stmt_vec_info stmt_info, int ncopies,
-                      enum vect_def_type dt, slp_tree slp_node)
+                      bool store_lanes_p, enum vect_def_type dt,
+                      slp_tree slp_node)
 {
   int group_size;
   unsigned int inside_cost = 0, outside_cost = 0;
@@ -686,9 +762,11 @@ vect_model_store_cost (stmt_vec_info stmt_info, int ncopies,
       first_dr = STMT_VINFO_DATA_REF (stmt_info);
     }
 
-  /* Is this an access in a group of stores, which provide strided access?
-     If so, add in the cost of the permutes.  */
-  if (group_size > 1)
+  /* We assume that the cost of a single store-lanes instruction is
+     equivalent to the cost of GROUP_SIZE separate stores.  If a strided
+     access is instead being provided by a permute-and-store operation,
+     include the cost of the permutes.  */
+  if (!store_lanes_p && group_size > 1)
     {
       /* Uses a high and low interleave operation for each needed permute.  */
       inside_cost = ncopies * exact_log2(group_size) * group_size
@@ -764,8 +842,8 @@ vect_get_store_cost (struct data_reference *dr, int ncopies,
    access scheme chosen.  */
 
 void
-vect_model_load_cost (stmt_vec_info stmt_info, int ncopies, slp_tree slp_node)
-
+vect_model_load_cost (stmt_vec_info stmt_info, int ncopies, bool load_lanes_p,
+                     slp_tree slp_node)
 {
   int group_size;
   gimple first_stmt;
@@ -790,9 +868,11 @@ vect_model_load_cost (stmt_vec_info stmt_info, int ncopies, slp_tree slp_node)
       first_dr = dr;
     }
 
-  /* Is this an access in a group of loads providing strided access?
-     If so, add in the cost of the permutes.  */
-  if (group_size > 1)
+  /* We assume that the cost of a single load-lanes instruction is
+     equivalent to the cost of GROUP_SIZE separate loads.  If a strided
+     access is instead being provided by a load-and-permute operation,
+     include the cost of the permutes.  */
+  if (!load_lanes_p && group_size > 1)
     {
       /* Uses an even and odd extract operations for each needed permute.  */
       inside_cost = ncopies * exact_log2(group_size) * group_size
@@ -983,8 +1063,7 @@ vect_get_vec_def_for_operand (tree op, gimple stmt, tree *scalar_def)
   gimple def_stmt;
   stmt_vec_info def_stmt_info = NULL;
   stmt_vec_info stmt_vinfo = vinfo_for_stmt (stmt);
-  tree vectype = STMT_VINFO_VECTYPE (stmt_vinfo);
-  unsigned int nunits = TYPE_VECTOR_SUBPARTS (vectype);
+  unsigned int nunits;
   loop_vec_info loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_vinfo);
   tree vec_inv;
   tree vec_cst;
@@ -1025,6 +1104,7 @@ vect_get_vec_def_for_operand (tree op, gimple stmt, tree *scalar_def)
       {
        vector_type = get_vectype_for_scalar_type (TREE_TYPE (op));
        gcc_assert (vector_type);
+       nunits = TYPE_VECTOR_SUBPARTS (vector_type);
 
        if (scalar_def)
          *scalar_def = op;
@@ -1033,11 +1113,7 @@ vect_get_vec_def_for_operand (tree op, gimple stmt, tree *scalar_def)
         if (vect_print_dump_info (REPORT_DETAILS))
           fprintf (vect_dump, "Create vector_cst. nunits = %d", nunits);
 
-        for (i = nunits - 1; i >= 0; --i)
-          {
-            t = tree_cons (NULL_TREE, op, t);
-          }
-        vec_cst = build_vector (vector_type, t);
+        vec_cst = build_vector_from_val (vector_type, op);
         return vect_init_vector (stmt, vec_cst, vector_type, NULL);
       }
 
@@ -1107,8 +1183,10 @@ vect_get_vec_def_for_operand (tree op, gimple stmt, tree *scalar_def)
         /* Get the def from the vectorized stmt.  */
         def_stmt_info = vinfo_for_stmt (def_stmt);
         vec_stmt = STMT_VINFO_VEC_STMT (def_stmt_info);
-       gcc_assert (vec_stmt && gimple_code (vec_stmt) == GIMPLE_PHI);
-        vec_oprnd = PHI_RESULT (vec_stmt);
+       if (gimple_code (vec_stmt) == GIMPLE_PHI)
+         vec_oprnd = PHI_RESULT (vec_stmt);
+       else
+         vec_oprnd = gimple_get_lhs (vec_stmt);
         return vec_oprnd;
       }
 
@@ -1229,7 +1307,7 @@ vect_get_vec_defs (tree op0, tree op1, gimple stmt,
                   slp_tree slp_node)
 {
   if (slp_node)
-    vect_get_slp_defs (slp_node, vec_oprnds0, vec_oprnds1, -1);
+    vect_get_slp_defs (op0, op1, slp_node, vec_oprnds0, vec_oprnds1, -1);
   else
     {
       tree vec_oprnd;
@@ -1320,7 +1398,8 @@ vectorizable_call (gimple stmt, gimple_stmt_iterator *gsi, gimple *vec_stmt)
   loop_vec_info loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_info);
   tree fndecl, new_temp, def, rhs_type;
   gimple def_stmt;
-  enum vect_def_type dt[2] = {vect_unknown_def_type, vect_unknown_def_type};
+  enum vect_def_type dt[3]
+    = {vect_unknown_def_type, vect_unknown_def_type, vect_unknown_def_type};
   gimple new_stmt = NULL;
   int ncopies, j;
   VEC(tree, heap) *vargs = NULL;
@@ -1347,7 +1426,7 @@ vectorizable_call (gimple stmt, gimple_stmt_iterator *gsi, gimple *vec_stmt)
   if (TREE_CODE (gimple_call_lhs (stmt)) != SSA_NAME)
     return false;
 
-  if (stmt_could_throw_p (stmt))
+  if (stmt_can_throw_internal (stmt))
     return false;
 
   vectype_out = STMT_VINFO_VECTYPE (stmt_info);
@@ -1357,10 +1436,10 @@ vectorizable_call (gimple stmt, gimple_stmt_iterator *gsi, gimple *vec_stmt)
   vectype_in = NULL_TREE;
   nargs = gimple_call_num_args (stmt);
 
-  /* Bail out if the function has more than two arguments, we
-     do not have interesting builtin functions to vectorize with
-     more than two arguments.  No arguments is also not good.  */
-  if (nargs == 0 || nargs > 2)
+  /* Bail out if the function has more than three arguments, we do not have
+     interesting builtin functions to vectorize with more than two arguments
+     except for fma.  No arguments is also not good.  */
+  if (nargs == 0 || nargs > 3)
     return false;
 
   for (i = 0; i < nargs; i++)
@@ -1583,7 +1662,7 @@ vectorizable_call (gimple stmt, gimple_stmt_iterator *gsi, gimple *vec_stmt)
 
   type = TREE_TYPE (scalar_dest);
   new_stmt = gimple_build_assign (gimple_call_lhs (stmt),
-                                 fold_convert (type, integer_zero_node));
+                                 build_zero_cst (type));
   set_vinfo_for_stmt (new_stmt, stmt_info);
   set_vinfo_for_stmt (stmt, NULL);
   STMT_VINFO_STMT (stmt_info) = new_stmt;
@@ -1749,7 +1828,7 @@ vectorizable_conversion (gimple stmt, gimple_stmt_iterator *gsi,
   /* Multiple types in SLP are handled by creating the appropriate number of
      vectorized stmts for each SLP node.  Hence, NCOPIES is always 1 in
      case of SLP.  */
-  if (slp_node)
+  if (slp_node || PURE_SLP_STMT (stmt_info))
     ncopies = 1;
 
   /* Sanity check: make sure that at least one copy of the vectorized stmt
@@ -1882,7 +1961,7 @@ vectorizable_conversion (gimple stmt, gimple_stmt_iterator *gsi,
              vec_oprnd1 = vect_get_vec_def_for_stmt_copy (dt[0], vec_oprnd0);
            }
 
-         /* Arguments are ready. Create the new vector stmt.  */
+         /* Arguments are ready.  Create the new vector stmt.  */
          new_stmt = gimple_build_assign_with_ops (code1, vec_dest, vec_oprnd0,
                                                   vec_oprnd1);
          new_temp = make_ssa_name (vec_dest, new_stmt);
@@ -1942,7 +2021,7 @@ vectorizable_assignment (gimple stmt, gimple_stmt_iterator *gsi,
   /* Multiple types in SLP are handled by creating the appropriate number of
      vectorized stmts for each SLP node. Hence, NCOPIES is always 1 in
      case of SLP.  */
-  if (slp_node)
+  if (slp_node || PURE_SLP_STMT (stmt_info))
     ncopies = 1;
   else
     ncopies = LOOP_VINFO_VECT_FACTOR (loop_vinfo) / nunits;
@@ -2041,16 +2120,17 @@ vectorizable_assignment (gimple stmt, gimple_stmt_iterator *gsi,
   return true;
 }
 
-/* Function vectorizable_operation.
 
-   Check if STMT performs a binary or unary operation that can be vectorized.
+/* Function vectorizable_shift.
+
+   Check if STMT performs a shift operation that can be vectorized.
    If VEC_STMT is also passed, vectorize the STMT: create a vectorized
    stmt to replace it, put it in VEC_STMT, and insert it at BSI.
    Return FALSE if not a vectorizable STMT, TRUE otherwise.  */
 
 static bool
-vectorizable_operation (gimple stmt, gimple_stmt_iterator *gsi,
-                       gimple *vec_stmt, slp_tree slp_node)
+vectorizable_shift (gimple stmt, gimple_stmt_iterator *gsi,
+                    gimple *vec_stmt, slp_tree slp_node)
 {
   tree vec_dest;
   tree scalar_dest;
@@ -2062,7 +2142,6 @@ vectorizable_operation (gimple stmt, gimple_stmt_iterator *gsi,
   enum tree_code code;
   enum machine_mode vec_mode;
   tree new_temp;
-  int op_type;
   optab optab;
   int icode;
   enum machine_mode optab_op2_mode;
@@ -2076,10 +2155,332 @@ vectorizable_operation (gimple stmt, gimple_stmt_iterator *gsi,
   tree vectype_out;
   int ncopies;
   int j, i;
-  VEC(tree,heap) *vec_oprnds0 = NULL, *vec_oprnds1 = NULL;
+  VEC (tree, heap) *vec_oprnds0 = NULL, *vec_oprnds1 = NULL;
   tree vop0, vop1;
   unsigned int k;
-  bool scalar_shift_arg = false;
+  bool scalar_shift_arg = true;
+  bb_vec_info bb_vinfo = STMT_VINFO_BB_VINFO (stmt_info);
+  int vf;
+
+  if (!STMT_VINFO_RELEVANT_P (stmt_info) && !bb_vinfo)
+    return false;
+
+  if (STMT_VINFO_DEF_TYPE (stmt_info) != vect_internal_def)
+    return false;
+
+  /* Is STMT a vectorizable binary/unary operation?   */
+  if (!is_gimple_assign (stmt))
+    return false;
+
+  if (TREE_CODE (gimple_assign_lhs (stmt)) != SSA_NAME)
+    return false;
+
+  code = gimple_assign_rhs_code (stmt);
+
+  if (!(code == LSHIFT_EXPR || code == RSHIFT_EXPR || code == LROTATE_EXPR
+      || code == RROTATE_EXPR))
+    return false;
+
+  scalar_dest = gimple_assign_lhs (stmt);
+  vectype_out = STMT_VINFO_VECTYPE (stmt_info);
+
+  op0 = gimple_assign_rhs1 (stmt);
+  if (!vect_is_simple_use_1 (op0, loop_vinfo, bb_vinfo,
+                             &def_stmt, &def, &dt[0], &vectype))
+    {
+      if (vect_print_dump_info (REPORT_DETAILS))
+        fprintf (vect_dump, "use not simple.");
+      return false;
+    }
+  /* If op0 is an external or constant def use a vector type with
+     the same size as the output vector type.  */
+  if (!vectype)
+    vectype = get_same_sized_vectype (TREE_TYPE (op0), vectype_out);
+  if (vec_stmt)
+    gcc_assert (vectype);
+  if (!vectype)
+    {
+      if (vect_print_dump_info (REPORT_DETAILS))
+        {
+          fprintf (vect_dump, "no vectype for scalar type ");
+          print_generic_expr (vect_dump, TREE_TYPE (op0), TDF_SLIM);
+        }
+
+      return false;
+    }
+
+  nunits_out = TYPE_VECTOR_SUBPARTS (vectype_out);
+  nunits_in = TYPE_VECTOR_SUBPARTS (vectype);
+  if (nunits_out != nunits_in)
+    return false;
+
+  op1 = gimple_assign_rhs2 (stmt);
+  if (!vect_is_simple_use (op1, loop_vinfo, bb_vinfo, &def_stmt, &def, &dt[1]))
+    {
+      if (vect_print_dump_info (REPORT_DETAILS))
+        fprintf (vect_dump, "use not simple.");
+      return false;
+    }
+
+  if (loop_vinfo)
+    vf = LOOP_VINFO_VECT_FACTOR (loop_vinfo);
+  else
+    vf = 1;
+
+  /* Multiple types in SLP are handled by creating the appropriate number of
+     vectorized stmts for each SLP node.  Hence, NCOPIES is always 1 in
+     case of SLP.  */
+  if (slp_node || PURE_SLP_STMT (stmt_info))
+    ncopies = 1;
+  else
+    ncopies = LOOP_VINFO_VECT_FACTOR (loop_vinfo) / nunits_in;
+
+  gcc_assert (ncopies >= 1);
+
+  /* Determine whether the shift amount is a vector, or scalar.  If the
+     shift/rotate amount is a vector, use the vector/vector shift optabs.  */
+
+  if (dt[1] == vect_internal_def && !slp_node)
+    scalar_shift_arg = false;
+  else if (dt[1] == vect_constant_def
+          || dt[1] == vect_external_def
+          || dt[1] == vect_internal_def)
+    {
+      /* In SLP, need to check whether the shift count is the same,
+        in loops if it is a constant or invariant, it is always
+        a scalar shift.  */
+      if (slp_node)
+       {
+         VEC (gimple, heap) *stmts = SLP_TREE_SCALAR_STMTS (slp_node);
+         gimple slpstmt;
+
+         FOR_EACH_VEC_ELT (gimple, stmts, k, slpstmt)
+           if (!operand_equal_p (gimple_assign_rhs2 (slpstmt), op1, 0))
+             scalar_shift_arg = false;
+       }
+    }
+  else
+    {
+      if (vect_print_dump_info (REPORT_DETAILS))
+       fprintf (vect_dump, "operand mode requires invariant argument.");
+      return false;
+    }
+
+  /* Vector shifted by vector.  */
+  if (!scalar_shift_arg)
+    {
+      optab = optab_for_tree_code (code, vectype, optab_vector);
+      if (vect_print_dump_info (REPORT_DETAILS))
+        fprintf (vect_dump, "vector/vector shift/rotate found.");
+    }
+  /* See if the machine has a vector shifted by scalar insn and if not
+     then see if it has a vector shifted by vector insn.  */
+  else
+    {
+      optab = optab_for_tree_code (code, vectype, optab_scalar);
+      if (optab
+          && optab_handler (optab, TYPE_MODE (vectype)) != CODE_FOR_nothing)
+        {
+          if (vect_print_dump_info (REPORT_DETAILS))
+            fprintf (vect_dump, "vector/scalar shift/rotate found.");
+        }
+      else
+        {
+          optab = optab_for_tree_code (code, vectype, optab_vector);
+          if (optab
+               && (optab_handler (optab, TYPE_MODE (vectype))
+                      != CODE_FOR_nothing))
+            {
+             scalar_shift_arg = false;
+
+              if (vect_print_dump_info (REPORT_DETAILS))
+                fprintf (vect_dump, "vector/vector shift/rotate found.");
+
+              /* Unlike the other binary operators, shifts/rotates have
+                 the rhs being int, instead of the same type as the lhs,
+                 so make sure the scalar is the right type if we are
+                 dealing with vectors of short/char.  */
+              if (dt[1] == vect_constant_def)
+                op1 = fold_convert (TREE_TYPE (vectype), op1);
+            }
+        }
+    }
+
+  /* Supportable by target?  */
+  if (!optab)
+    {
+      if (vect_print_dump_info (REPORT_DETAILS))
+        fprintf (vect_dump, "no optab.");
+      return false;
+    }
+  vec_mode = TYPE_MODE (vectype);
+  icode = (int) optab_handler (optab, vec_mode);
+  if (icode == CODE_FOR_nothing)
+    {
+      if (vect_print_dump_info (REPORT_DETAILS))
+        fprintf (vect_dump, "op not supported by target.");
+      /* Check only during analysis.  */
+      if (GET_MODE_SIZE (vec_mode) != UNITS_PER_WORD
+          || (vf < vect_min_worthwhile_factor (code)
+              && !vec_stmt))
+        return false;
+      if (vect_print_dump_info (REPORT_DETAILS))
+        fprintf (vect_dump, "proceeding using word mode.");
+    }
+
+  /* Worthwhile without SIMD support?  Check only during analysis.  */
+  if (!VECTOR_MODE_P (TYPE_MODE (vectype))
+      && vf < vect_min_worthwhile_factor (code)
+      && !vec_stmt)
+    {
+      if (vect_print_dump_info (REPORT_DETAILS))
+        fprintf (vect_dump, "not worthwhile without SIMD support.");
+      return false;
+    }
+
+  if (!vec_stmt) /* transformation not required.  */
+    {
+      STMT_VINFO_TYPE (stmt_info) = shift_vec_info_type;
+      if (vect_print_dump_info (REPORT_DETAILS))
+        fprintf (vect_dump, "=== vectorizable_shift ===");
+      vect_model_simple_cost (stmt_info, ncopies, dt, NULL);
+      return true;
+    }
+
+  /** Transform.  **/
+
+  if (vect_print_dump_info (REPORT_DETAILS))
+    fprintf (vect_dump, "transform binary/unary operation.");
+
+  /* Handle def.  */
+  vec_dest = vect_create_destination_var (scalar_dest, vectype);
+
+  /* Allocate VECs for vector operands.  In case of SLP, vector operands are
+     created in the previous stages of the recursion, so no allocation is
+     needed, except for the case of shift with scalar shift argument.  In that
+     case we store the scalar operand in VEC_OPRNDS1 for every vector stmt to
+     be created to vectorize the SLP group, i.e., SLP_NODE->VEC_STMTS_SIZE.
+     In case of loop-based vectorization we allocate VECs of size 1.  We
+     allocate VEC_OPRNDS1 only in case of binary operation.  */
+  if (!slp_node)
+    {
+      vec_oprnds0 = VEC_alloc (tree, heap, 1);
+      vec_oprnds1 = VEC_alloc (tree, heap, 1);
+    }
+  else if (scalar_shift_arg)
+    vec_oprnds1 = VEC_alloc (tree, heap, slp_node->vec_stmts_size);
+
+  prev_stmt_info = NULL;
+  for (j = 0; j < ncopies; j++)
+    {
+      /* Handle uses.  */
+      if (j == 0)
+        {
+          if (scalar_shift_arg)
+            {
+              /* Vector shl and shr insn patterns can be defined with scalar
+                 operand 2 (shift operand).  In this case, use constant or loop
+                 invariant op1 directly, without extending it to vector mode
+                 first.  */
+              optab_op2_mode = insn_data[icode].operand[2].mode;
+              if (!VECTOR_MODE_P (optab_op2_mode))
+                {
+                  if (vect_print_dump_info (REPORT_DETAILS))
+                    fprintf (vect_dump, "operand 1 using scalar mode.");
+                  vec_oprnd1 = op1;
+                  VEC_quick_push (tree, vec_oprnds1, vec_oprnd1);
+                  if (slp_node)
+                    {
+                      /* Store vec_oprnd1 for every vector stmt to be created
+                         for SLP_NODE.  We check during the analysis that all
+                         the shift arguments are the same.
+                         TODO: Allow different constants for different vector
+                         stmts generated for an SLP instance.  */
+                      for (k = 0; k < slp_node->vec_stmts_size - 1; k++)
+                        VEC_quick_push (tree, vec_oprnds1, vec_oprnd1);
+                    }
+                }
+            }
+
+          /* vec_oprnd1 is available if operand 1 should be of a scalar-type
+             (a special case for certain kind of vector shifts); otherwise,
+             operand 1 should be of a vector type (the usual case).  */
+          if (vec_oprnd1)
+            vect_get_vec_defs (op0, NULL_TREE, stmt, &vec_oprnds0, NULL,
+                               slp_node);
+          else
+            vect_get_vec_defs (op0, op1, stmt, &vec_oprnds0, &vec_oprnds1,
+                               slp_node);
+        }
+      else
+        vect_get_vec_defs_for_stmt_copy (dt, &vec_oprnds0, &vec_oprnds1);
+
+      /* Arguments are ready.  Create the new vector stmt.  */
+      FOR_EACH_VEC_ELT (tree, vec_oprnds0, i, vop0)
+        {
+          vop1 = VEC_index (tree, vec_oprnds1, i);
+          new_stmt = gimple_build_assign_with_ops (code, vec_dest, vop0, vop1);
+          new_temp = make_ssa_name (vec_dest, new_stmt);
+          gimple_assign_set_lhs (new_stmt, new_temp);
+          vect_finish_stmt_generation (stmt, new_stmt, gsi);
+          if (slp_node)
+            VEC_quick_push (gimple, SLP_TREE_VEC_STMTS (slp_node), new_stmt);
+        }
+
+      if (slp_node)
+        continue;
+
+      if (j == 0)
+        STMT_VINFO_VEC_STMT (stmt_info) = *vec_stmt = new_stmt;
+      else
+        STMT_VINFO_RELATED_STMT (prev_stmt_info) = new_stmt;
+      prev_stmt_info = vinfo_for_stmt (new_stmt);
+    }
+
+  VEC_free (tree, heap, vec_oprnds0);
+  VEC_free (tree, heap, vec_oprnds1);
+
+  return true;
+}
+
+
+/* Function vectorizable_operation.
+
+   Check if STMT performs a binary, unary or ternary operation that can
+   be vectorized.
+   If VEC_STMT is also passed, vectorize the STMT: create a vectorized
+   stmt to replace it, put it in VEC_STMT, and insert it at BSI.
+   Return FALSE if not a vectorizable STMT, TRUE otherwise.  */
+
+static bool
+vectorizable_operation (gimple stmt, gimple_stmt_iterator *gsi,
+                       gimple *vec_stmt, slp_tree slp_node)
+{
+  tree vec_dest;
+  tree scalar_dest;
+  tree op0, op1 = NULL_TREE, op2 = NULL_TREE;
+  stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
+  tree vectype;
+  loop_vec_info loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_info);
+  enum tree_code code;
+  enum machine_mode vec_mode;
+  tree new_temp;
+  int op_type;
+  optab optab;
+  int icode;
+  tree def;
+  gimple def_stmt;
+  enum vect_def_type dt[3]
+    = {vect_unknown_def_type, vect_unknown_def_type, vect_unknown_def_type};
+  gimple new_stmt = NULL;
+  stmt_vec_info prev_stmt_info;
+  int nunits_in;
+  int nunits_out;
+  tree vectype_out;
+  int ncopies;
+  int j, i;
+  VEC(tree,heap) *vec_oprnds0 = NULL, *vec_oprnds1 = NULL, *vec_oprnds2 = NULL;
+  tree vop0, vop1, vop2;
   bb_vec_info bb_vinfo = STMT_VINFO_BB_VINFO (stmt_info);
   int vf;
 
@@ -2105,10 +2506,11 @@ vectorizable_operation (gimple stmt, gimple_stmt_iterator *gsi,
 
   /* Support only unary or binary operations.  */
   op_type = TREE_CODE_LENGTH (code);
-  if (op_type != unary_op && op_type != binary_op)
+  if (op_type != unary_op && op_type != binary_op && op_type != ternary_op)
     {
       if (vect_print_dump_info (REPORT_DETAILS))
-       fprintf (vect_dump, "num. args = %d (not unary/binary op).", op_type);
+       fprintf (vect_dump, "num. args = %d (not unary/binary/ternary op).",
+                op_type);
       return false;
     }
 
@@ -2145,7 +2547,7 @@ vectorizable_operation (gimple stmt, gimple_stmt_iterator *gsi,
   if (nunits_out != nunits_in)
     return false;
 
-  if (op_type == binary_op)
+  if (op_type == binary_op || op_type == ternary_op)
     {
       op1 = gimple_assign_rhs2 (stmt);
       if (!vect_is_simple_use (op1, loop_vinfo, bb_vinfo, &def_stmt, &def,
@@ -2156,6 +2558,17 @@ vectorizable_operation (gimple stmt, gimple_stmt_iterator *gsi,
          return false;
        }
     }
+  if (op_type == ternary_op)
+    {
+      op2 = gimple_assign_rhs3 (stmt);
+      if (!vect_is_simple_use (op2, loop_vinfo, bb_vinfo, &def_stmt, &def,
+                               &dt[2]))
+       {
+         if (vect_print_dump_info (REPORT_DETAILS))
+           fprintf (vect_dump, "use not simple.");
+         return false;
+       }
+    }
 
   if (loop_vinfo)
     vf = LOOP_VINFO_VECT_FACTOR (loop_vinfo);
@@ -2165,68 +2578,19 @@ vectorizable_operation (gimple stmt, gimple_stmt_iterator *gsi,
   /* Multiple types in SLP are handled by creating the appropriate number of
      vectorized stmts for each SLP node.  Hence, NCOPIES is always 1 in
      case of SLP.  */
-  if (slp_node)
+  if (slp_node || PURE_SLP_STMT (stmt_info))
     ncopies = 1;
   else
     ncopies = LOOP_VINFO_VECT_FACTOR (loop_vinfo) / nunits_in;
 
   gcc_assert (ncopies >= 1);
 
-  /* If this is a shift/rotate, determine whether the shift amount is a vector,
-     or scalar.  If the shift/rotate amount is a vector, use the vector/vector
-     shift optabs.  */
+  /* Shifts are handled in vectorizable_shift ().  */
   if (code == LSHIFT_EXPR || code == RSHIFT_EXPR || code == LROTATE_EXPR
       || code == RROTATE_EXPR)
-    {
-      /* vector shifted by vector */
-      if (dt[1] == vect_internal_def)
-       {
-         optab = optab_for_tree_code (code, vectype, optab_vector);
-         if (vect_print_dump_info (REPORT_DETAILS))
-           fprintf (vect_dump, "vector/vector shift/rotate found.");
-       }
+   return false;
 
-      /* See if the machine has a vector shifted by scalar insn and if not
-        then see if it has a vector shifted by vector insn */
-      else if (dt[1] == vect_constant_def || dt[1] == vect_external_def)
-       {
-         optab = optab_for_tree_code (code, vectype, optab_scalar);
-         if (optab
-             && optab_handler (optab, TYPE_MODE (vectype)) != CODE_FOR_nothing)
-           {
-             scalar_shift_arg = true;
-             if (vect_print_dump_info (REPORT_DETAILS))
-               fprintf (vect_dump, "vector/scalar shift/rotate found.");
-           }
-         else
-           {
-             optab = optab_for_tree_code (code, vectype, optab_vector);
-             if (optab
-                 && (optab_handler (optab, TYPE_MODE (vectype))
-                     != CODE_FOR_nothing))
-               {
-                 if (vect_print_dump_info (REPORT_DETAILS))
-                   fprintf (vect_dump, "vector/vector shift/rotate found.");
-
-                 /* Unlike the other binary operators, shifts/rotates have
-                    the rhs being int, instead of the same type as the lhs,
-                    so make sure the scalar is the right type if we are
-                    dealing with vectors of short/char.  */
-                 if (dt[1] == vect_constant_def)
-                   op1 = fold_convert (TREE_TYPE (vectype), op1);
-               }
-           }
-       }
-
-      else
-       {
-         if (vect_print_dump_info (REPORT_DETAILS))
-           fprintf (vect_dump, "operand mode requires invariant argument.");
-         return false;
-       }
-    }
-  else
-    optab = optab_for_tree_code (code, vectype, optab_default);
+  optab = optab_for_tree_code (code, vectype, optab_default);
 
   /* Supportable by target?  */
   if (!optab)
@@ -2287,11 +2651,11 @@ vectorizable_operation (gimple stmt, gimple_stmt_iterator *gsi,
   if (!slp_node)
     {
       vec_oprnds0 = VEC_alloc (tree, heap, 1);
-      if (op_type == binary_op)
+      if (op_type == binary_op || op_type == ternary_op)
         vec_oprnds1 = VEC_alloc (tree, heap, 1);
+      if (op_type == ternary_op)
+        vec_oprnds2 = VEC_alloc (tree, heap, 1);
     }
-  else if (scalar_shift_arg)
-    vec_oprnds1 = VEC_alloc (tree, heap, slp_node->vec_stmts_size);
 
   /* In case the vectorization factor (VF) is bigger than the number
      of elements that we can fit in a vectype (nunits), we have to generate
@@ -2352,51 +2716,40 @@ vectorizable_operation (gimple stmt, gimple_stmt_iterator *gsi,
       /* Handle uses.  */
       if (j == 0)
        {
-         if (op_type == binary_op && scalar_shift_arg)
-           {
-             /* Vector shl and shr insn patterns can be defined with scalar
-                operand 2 (shift operand).  In this case, use constant or loop
-                invariant op1 directly, without extending it to vector mode
-                first.  */
-             optab_op2_mode = insn_data[icode].operand[2].mode;
-             if (!VECTOR_MODE_P (optab_op2_mode))
-               {
-                 if (vect_print_dump_info (REPORT_DETAILS))
-                   fprintf (vect_dump, "operand 1 using scalar mode.");
-                 vec_oprnd1 = op1;
-                 VEC_quick_push (tree, vec_oprnds1, vec_oprnd1);
-                 if (slp_node)
-                   {
-                     /* Store vec_oprnd1 for every vector stmt to be created
-                        for SLP_NODE.  We check during the analysis that all
-                         the shift arguments are the same.
-                        TODO: Allow different constants for different vector
-                        stmts generated for an SLP instance.  */
-                     for (k = 0; k < slp_node->vec_stmts_size - 1; k++)
-                       VEC_quick_push (tree, vec_oprnds1, vec_oprnd1);
-                   }
-               }
-           }
-
-          /* vec_oprnd1 is available if operand 1 should be of a scalar-type
-             (a special case for certain kind of vector shifts); otherwise,
-             operand 1 should be of a vector type (the usual case).  */
-         if (op_type == binary_op && !vec_oprnd1)
+         if (op_type == binary_op || op_type == ternary_op)
            vect_get_vec_defs (op0, op1, stmt, &vec_oprnds0, &vec_oprnds1,
                               slp_node);
          else
            vect_get_vec_defs (op0, NULL_TREE, stmt, &vec_oprnds0, NULL,
                               slp_node);
+         if (op_type == ternary_op)
+           {
+             vec_oprnds2 = VEC_alloc (tree, heap, 1);
+             VEC_quick_push (tree, vec_oprnds2,
+                             vect_get_vec_def_for_operand (op2, stmt, NULL));
+           }
        }
       else
-       vect_get_vec_defs_for_stmt_copy (dt, &vec_oprnds0, &vec_oprnds1);
+       {
+         vect_get_vec_defs_for_stmt_copy (dt, &vec_oprnds0, &vec_oprnds1);
+         if (op_type == ternary_op)
+           {
+             tree vec_oprnd = VEC_pop (tree, vec_oprnds2);
+             VEC_quick_push (tree, vec_oprnds2,
+                             vect_get_vec_def_for_stmt_copy (dt[2],
+                                                             vec_oprnd));
+           }
+       }
 
-      /* Arguments are ready. Create the new vector stmt.  */
+      /* Arguments are ready.  Create the new vector stmt.  */
       FOR_EACH_VEC_ELT (tree, vec_oprnds0, i, vop0)
         {
-         vop1 = ((op_type == binary_op)
-                 ? VEC_index (tree, vec_oprnds1, i) : NULL);
-         new_stmt = gimple_build_assign_with_ops (code, vec_dest, vop0, vop1);
+         vop1 = ((op_type == binary_op || op_type == ternary_op)
+                 ? VEC_index (tree, vec_oprnds1, i) : NULL_TREE);
+         vop2 = ((op_type == ternary_op)
+                 ? VEC_index (tree, vec_oprnds2, i) : NULL_TREE);
+         new_stmt = gimple_build_assign_with_ops3 (code, vec_dest,
+                                                   vop0, vop1, vop2);
          new_temp = make_ssa_name (vec_dest, new_stmt);
          gimple_assign_set_lhs (new_stmt, new_temp);
          vect_finish_stmt_generation (stmt, new_stmt, gsi);
@@ -2417,6 +2770,8 @@ vectorizable_operation (gimple stmt, gimple_stmt_iterator *gsi,
   VEC_free (tree, heap, vec_oprnds0);
   if (vec_oprnds1)
     VEC_free (tree, heap, vec_oprnds1);
+  if (vec_oprnds2)
+    VEC_free (tree, heap, vec_oprnds2);
 
   return true;
 }
@@ -2621,7 +2976,7 @@ vectorizable_type_demotion (gimple stmt, gimple_stmt_iterator *gsi,
   /* Multiple types in SLP are handled by creating the appropriate number of
      vectorized stmts for each SLP node.  Hence, NCOPIES is always 1 in
      case of SLP.  */
-  if (slp_node)
+  if (slp_node || PURE_SLP_STMT (stmt_info))
     ncopies = 1;
   else
     ncopies = LOOP_VINFO_VECT_FACTOR (loop_vinfo) / nunits_out;
@@ -2680,7 +3035,7 @@ vectorizable_type_demotion (gimple stmt, gimple_stmt_iterator *gsi,
     {
       /* Handle uses.  */
       if (slp_node)
-        vect_get_slp_defs (slp_node, &vec_oprnds0, NULL, -1);
+        vect_get_slp_defs (op0, NULL_TREE, slp_node, &vec_oprnds0, NULL, -1);
       else
         {
           VEC_free (tree, heap, vec_oprnds0);
@@ -2690,7 +3045,7 @@ vectorizable_type_demotion (gimple stmt, gimple_stmt_iterator *gsi,
                                     vect_pow2 (multi_step_cvt) - 1);
         }
 
-      /* Arguments are ready. Create the new vector stmts.  */
+      /* Arguments are ready.  Create the new vector stmts.  */
       tmp_vec_dsts = VEC_copy (tree, heap, vec_dsts);
       vect_create_vectorized_demotion_stmts (&vec_oprnds0,
                                              multi_step_cvt, stmt, tmp_vec_dsts,
@@ -2901,7 +3256,7 @@ vectorizable_type_promotion (gimple stmt, gimple_stmt_iterator *gsi,
   /* Multiple types in SLP are handled by creating the appropriate number of
      vectorized stmts for each SLP node.  Hence, NCOPIES is always 1 in
      case of SLP.  */
-  if (slp_node)
+  if (slp_node || PURE_SLP_STMT (stmt_info))
     ncopies = 1;
   else
     ncopies = LOOP_VINFO_VECT_FACTOR (loop_vinfo) / nunits_in;
@@ -2991,7 +3346,8 @@ vectorizable_type_promotion (gimple stmt, gimple_stmt_iterator *gsi,
       if (j == 0)
         {
           if (slp_node)
-              vect_get_slp_defs (slp_node, &vec_oprnds0, &vec_oprnds1, -1);
+              vect_get_slp_defs (op0, op1, slp_node, &vec_oprnds0,
+                                 &vec_oprnds1, -1);
           else
             {
               vec_oprnd0 = vect_get_vec_def_for_operand (op0, stmt, NULL);
@@ -3014,7 +3370,7 @@ vectorizable_type_promotion (gimple stmt, gimple_stmt_iterator *gsi,
             }
         }
 
-      /* Arguments are ready. Create the new vector stmts.  */
+      /* Arguments are ready.  Create the new vector stmts.  */
       tmp_vec_dsts = VEC_copy (tree, heap, vec_dsts);
       vect_create_vectorized_promotion_stmts (&vec_oprnds0, &vec_oprnds1,
                                               multi_step_cvt, stmt,
@@ -3054,6 +3410,7 @@ vectorizable_store (gimple stmt, gimple_stmt_iterator *gsi, gimple *vec_stmt,
   stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
   struct data_reference *dr = STMT_VINFO_DATA_REF (stmt_info), *first_dr = NULL;
   tree vectype = STMT_VINFO_VECTYPE (stmt_info);
+  tree elem_type;
   loop_vec_info loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_info);
   struct loop *loop = NULL;
   enum machine_mode vec_mode;
@@ -3069,6 +3426,7 @@ vectorizable_store (gimple stmt, gimple_stmt_iterator *gsi, gimple *vec_stmt,
   int j;
   gimple next_stmt, first_stmt = NULL;
   bool strided_store = false;
+  bool store_lanes_p = false;
   unsigned int group_size, i;
   VEC(tree,heap) *dr_chain = NULL, *oprnds = NULL, *result_chain = NULL;
   bool inv_p;
@@ -3076,6 +3434,7 @@ vectorizable_store (gimple stmt, gimple_stmt_iterator *gsi, gimple *vec_stmt,
   bool slp = (slp_node != NULL);
   unsigned int vec_num;
   bb_vec_info bb_vinfo = STMT_VINFO_BB_VINFO (stmt_info);
+  tree aggr_type;
 
   if (loop_vinfo)
     loop = LOOP_VINFO_LOOP (loop_vinfo);
@@ -3083,7 +3442,7 @@ vectorizable_store (gimple stmt, gimple_stmt_iterator *gsi, gimple *vec_stmt,
   /* Multiple types in SLP are handled by creating the appropriate number of
      vectorized stmts for each SLP node. Hence, NCOPIES is always 1 in
      case of SLP.  */
-  if (slp)
+  if (slp || PURE_SLP_STMT (stmt_info))
     ncopies = 1;
   else
     ncopies = LOOP_VINFO_VECT_FACTOR (loop_vinfo) / nunits;
@@ -3129,7 +3488,8 @@ vectorizable_store (gimple stmt, gimple_stmt_iterator *gsi, gimple *vec_stmt,
 
   /* The scalar rhs type needs to be trivially convertible to the vector
      component type.  This should always be the case.  */
-  if (!useless_type_conversion_p (TREE_TYPE (vectype), TREE_TYPE (op)))
+  elem_type = TREE_TYPE (vectype);
+  if (!useless_type_conversion_p (elem_type, TREE_TYPE (op)))
     {
       if (vect_print_dump_info (REPORT_DETAILS))
         fprintf (vect_dump, "???  operands of different types");
@@ -3156,9 +3516,14 @@ vectorizable_store (gimple stmt, gimple_stmt_iterator *gsi, gimple *vec_stmt,
     {
       strided_store = true;
       first_stmt = DR_GROUP_FIRST_DR (stmt_info);
-      if (!vect_strided_store_supported (vectype)
-         && !PURE_SLP_STMT (stmt_info) && !slp)
-       return false;
+      if (!slp && !PURE_SLP_STMT (stmt_info))
+       {
+         group_size = DR_GROUP_SIZE (vinfo_for_stmt (first_stmt));
+         if (vect_store_lanes_supported (vectype, group_size))
+           store_lanes_p = true;
+         else if (!vect_strided_store_supported (vectype, group_size))
+           return false;
+       }
 
       if (first_stmt == stmt)
        {
@@ -3184,7 +3549,7 @@ vectorizable_store (gimple stmt, gimple_stmt_iterator *gsi, gimple *vec_stmt,
   if (!vec_stmt) /* transformation not required.  */
     {
       STMT_VINFO_TYPE (stmt_info) = store_vec_info_type;
-      vect_model_store_cost (stmt_info, ncopies, dt, NULL);
+      vect_model_store_cost (stmt_info, ncopies, store_lanes_p, dt, NULL);
       return true;
     }
 
@@ -3239,6 +3604,16 @@ vectorizable_store (gimple stmt, gimple_stmt_iterator *gsi, gimple *vec_stmt,
 
   alignment_support_scheme = vect_supportable_dr_alignment (first_dr, false);
   gcc_assert (alignment_support_scheme);
+  /* Targets with store-lane instructions must not require explicit
+     realignment.  */
+  gcc_assert (!store_lanes_p
+             || alignment_support_scheme == dr_aligned
+             || alignment_support_scheme == dr_unaligned_supported);
+
+  if (store_lanes_p)
+    aggr_type = build_array_type_nelts (elem_type, vec_num * nunits);
+  else
+    aggr_type = vectype;
 
   /* In case the vectorization factor (VF) is bigger than the number
      of elements that we can fit in a vectype (nunits), we have to generate
@@ -3290,7 +3665,8 @@ vectorizable_store (gimple stmt, gimple_stmt_iterator *gsi, gimple *vec_stmt,
           if (slp)
             {
              /* Get vectorized arguments for SLP_NODE.  */
-              vect_get_slp_defs (slp_node, &vec_oprnds, NULL, -1);
+              vect_get_slp_defs (NULL_TREE, NULL_TREE, slp_node, &vec_oprnds,
+                                 NULL, -1);
 
               vec_oprnd = VEC_index (tree, vec_oprnds, 0);
             }
@@ -3326,9 +3702,9 @@ vectorizable_store (gimple stmt, gimple_stmt_iterator *gsi, gimple *vec_stmt,
          /* We should have catched mismatched types earlier.  */
          gcc_assert (useless_type_conversion_p (vectype,
                                                 TREE_TYPE (vec_oprnd)));
-         dataref_ptr = vect_create_data_ref_ptr (first_stmt, NULL, NULL_TREE,
-                                                 &dummy, &ptr_incr, false,
-                                                 &inv_p);
+         dataref_ptr = vect_create_data_ref_ptr (first_stmt, aggr_type, NULL,
+                                                 NULL_TREE, &dummy, gsi,
+                                                 &ptr_incr, false, &inv_p);
          gcc_assert (bb_vinfo || !inv_p);
        }
       else
@@ -3349,76 +3725,101 @@ vectorizable_store (gimple stmt, gimple_stmt_iterator *gsi, gimple *vec_stmt,
              VEC_replace(tree, dr_chain, i, vec_oprnd);
              VEC_replace(tree, oprnds, i, vec_oprnd);
            }
-         dataref_ptr =
-               bump_vector_ptr (dataref_ptr, ptr_incr, gsi, stmt, NULL_TREE);
+         dataref_ptr = bump_vector_ptr (dataref_ptr, ptr_incr, gsi, stmt,
+                                        TYPE_SIZE_UNIT (aggr_type));
        }
 
-      if (strided_store)
+      if (store_lanes_p)
        {
-         result_chain = VEC_alloc (tree, heap, group_size);
-         /* Permute.  */
-         if (!vect_permute_store_chain (dr_chain, group_size, stmt, gsi,
-                                        &result_chain))
-           return false;
-       }
+         tree vec_array;
 
-      next_stmt = first_stmt;
-      for (i = 0; i < vec_num; i++)
-       {
-         struct ptr_info_def *pi;
-
-         if (i > 0)
-           /* Bump the vector pointer.  */
-           dataref_ptr = bump_vector_ptr (dataref_ptr, ptr_incr, gsi, stmt,
-                                          NULL_TREE);
-
-         if (slp)
-           vec_oprnd = VEC_index (tree, vec_oprnds, i);
-         else if (strided_store)
-           /* For strided stores vectorized defs are interleaved in
-              vect_permute_store_chain().  */
-           vec_oprnd = VEC_index (tree, result_chain, i);
-
-         data_ref = build2 (MEM_REF, TREE_TYPE (vec_oprnd), dataref_ptr,
-                            build_int_cst (reference_alias_ptr_type
-                                           (DR_REF (first_dr)), 0));
-         pi = get_ptr_info (dataref_ptr);
-         pi->align = TYPE_ALIGN_UNIT (vectype);
-          if (aligned_access_p (first_dr))
-           pi->misalign = 0;
-          else if (DR_MISALIGNMENT (first_dr) == -1)
-           {
-             TREE_TYPE (data_ref)
-               = build_aligned_type (TREE_TYPE (data_ref),
-                                     TYPE_ALIGN (TREE_TYPE (vectype)));
-             pi->align = TYPE_ALIGN_UNIT (TREE_TYPE (vectype));
-             pi->misalign = 0;
-           }
-         else
+         /* Combine all the vectors into an array.  */
+         vec_array = create_vector_array (vectype, vec_num);
+         for (i = 0; i < vec_num; i++)
            {
-             TREE_TYPE (data_ref)
-               = build_aligned_type (TREE_TYPE (data_ref),
-                                     TYPE_ALIGN (TREE_TYPE (vectype)));
-             pi->misalign = DR_MISALIGNMENT (first_dr);
+             vec_oprnd = VEC_index (tree, dr_chain, i);
+             write_vector_array (stmt, gsi, vec_oprnd, vec_array, i);
            }
 
-         /* Arguments are ready. Create the new vector stmt.  */
-         new_stmt = gimple_build_assign (data_ref, vec_oprnd);
+         /* Emit:
+              MEM_REF[...all elements...] = STORE_LANES (VEC_ARRAY).  */
+         data_ref = create_array_ref (aggr_type, dataref_ptr, first_dr);
+         new_stmt = gimple_build_call_internal (IFN_STORE_LANES, 1, vec_array);
+         gimple_call_set_lhs (new_stmt, data_ref);
          vect_finish_stmt_generation (stmt, new_stmt, gsi);
          mark_symbols_for_renaming (new_stmt);
+       }
+      else
+       {
+         new_stmt = NULL;
+         if (strided_store)
+           {
+             result_chain = VEC_alloc (tree, heap, group_size);
+             /* Permute.  */
+             vect_permute_store_chain (dr_chain, group_size, stmt, gsi,
+                                       &result_chain);
+           }
 
-          if (slp)
-            continue;
+         next_stmt = first_stmt;
+         for (i = 0; i < vec_num; i++)
+           {
+             struct ptr_info_def *pi;
+
+             if (i > 0)
+               /* Bump the vector pointer.  */
+               dataref_ptr = bump_vector_ptr (dataref_ptr, ptr_incr, gsi,
+                                              stmt, NULL_TREE);
+
+             if (slp)
+               vec_oprnd = VEC_index (tree, vec_oprnds, i);
+             else if (strided_store)
+               /* For strided stores vectorized defs are interleaved in
+                  vect_permute_store_chain().  */
+               vec_oprnd = VEC_index (tree, result_chain, i);
+
+             data_ref = build2 (MEM_REF, TREE_TYPE (vec_oprnd), dataref_ptr,
+                                build_int_cst (reference_alias_ptr_type
+                                               (DR_REF (first_dr)), 0));
+             pi = get_ptr_info (dataref_ptr);
+             pi->align = TYPE_ALIGN_UNIT (vectype);
+             if (aligned_access_p (first_dr))
+               pi->misalign = 0;
+             else if (DR_MISALIGNMENT (first_dr) == -1)
+               {
+                 TREE_TYPE (data_ref)
+                   = build_aligned_type (TREE_TYPE (data_ref),
+                                         TYPE_ALIGN (elem_type));
+                 pi->align = TYPE_ALIGN_UNIT (elem_type);
+                 pi->misalign = 0;
+               }
+             else
+               {
+                 TREE_TYPE (data_ref)
+                   = build_aligned_type (TREE_TYPE (data_ref),
+                                         TYPE_ALIGN (elem_type));
+                 pi->misalign = DR_MISALIGNMENT (first_dr);
+               }
+
+             /* Arguments are ready.  Create the new vector stmt.  */
+             new_stmt = gimple_build_assign (data_ref, vec_oprnd);
+             vect_finish_stmt_generation (stmt, new_stmt, gsi);
+             mark_symbols_for_renaming (new_stmt);
 
-          if (j == 0)
-            STMT_VINFO_VEC_STMT (stmt_info) = *vec_stmt =  new_stmt;
+             if (slp)
+               continue;
+
+             next_stmt = DR_GROUP_NEXT_DR (vinfo_for_stmt (next_stmt));
+             if (!next_stmt)
+               break;
+           }
+       }
+      if (!slp)
+       {
+         if (j == 0)
+           STMT_VINFO_VEC_STMT (stmt_info) = *vec_stmt = new_stmt;
          else
            STMT_VINFO_RELATED_STMT (prev_stmt_info) = new_stmt;
-
          prev_stmt_info = vinfo_for_stmt (new_stmt);
-         next_stmt = DR_GROUP_NEXT_DR (vinfo_for_stmt (next_stmt));
-         if (!next_stmt)
-           break;
        }
     }
 
@@ -3455,7 +3856,8 @@ perm_mask_for_reverse (tree vectype, tree *mask)
 
   mask_type = get_vectype_for_scalar_type (mask_element_type);
   nunits = TYPE_VECTOR_SUBPARTS (vectype);
-  if (TYPE_VECTOR_SUBPARTS (vectype) != TYPE_VECTOR_SUBPARTS (mask_type))
+  if (!mask_type
+      || TYPE_VECTOR_SUBPARTS (vectype) != TYPE_VECTOR_SUBPARTS (mask_type))
     return NULL;
 
   for (i = 0; i < nunits; i++)
@@ -3487,8 +3889,19 @@ reverse_vec_elements (tree x, gimple stmt, gimple_stmt_iterator *gsi)
 
   /* Generate the permute statement.  */
   perm_stmt = gimple_build_call (builtin_decl, 3, x, x, mask_vec);
+  if (!useless_type_conversion_p (vectype,
+                                 TREE_TYPE (TREE_TYPE (builtin_decl))))
+    {
+      tree tem = create_tmp_reg (TREE_TYPE (TREE_TYPE (builtin_decl)), NULL);
+      tem = make_ssa_name (tem, perm_stmt);
+      gimple_call_set_lhs (perm_stmt, tem);
+      vect_finish_stmt_generation (stmt, perm_stmt, gsi);
+      perm_stmt = gimple_build_assign (NULL_TREE,
+                                      build1 (VIEW_CONVERT_EXPR,
+                                              vectype, tem));
+    }
   data_ref = make_ssa_name (perm_dest, perm_stmt);
-  gimple_call_set_lhs (perm_stmt, data_ref);
+  gimple_set_lhs (perm_stmt, data_ref);
   vect_finish_stmt_generation (stmt, perm_stmt, gsi);
 
   return data_ref;
@@ -3517,6 +3930,7 @@ vectorizable_load (gimple stmt, gimple_stmt_iterator *gsi, gimple *vec_stmt,
   bool nested_in_vect_loop = false;
   struct data_reference *dr = STMT_VINFO_DATA_REF (stmt_info), *first_dr;
   tree vectype = STMT_VINFO_VECTYPE (stmt_info);
+  tree elem_type;
   tree new_temp;
   enum machine_mode mode;
   gimple new_stmt = NULL;
@@ -3533,6 +3947,7 @@ vectorizable_load (gimple stmt, gimple_stmt_iterator *gsi, gimple *vec_stmt,
   gimple phi = NULL;
   VEC(tree,heap) *dr_chain = NULL;
   bool strided_load = false;
+  bool load_lanes_p = false;
   gimple first_stmt;
   tree scalar_type;
   bool inv_p;
@@ -3545,6 +3960,7 @@ vectorizable_load (gimple stmt, gimple_stmt_iterator *gsi, gimple *vec_stmt,
   enum tree_code code;
   bb_vec_info bb_vinfo = STMT_VINFO_BB_VINFO (stmt_info);
   int vf;
+  tree aggr_type;
 
   if (loop_vinfo)
     {
@@ -3558,7 +3974,7 @@ vectorizable_load (gimple stmt, gimple_stmt_iterator *gsi, gimple *vec_stmt,
   /* Multiple types in SLP are handled by creating the appropriate number of
      vectorized stmts for each SLP node.  Hence, NCOPIES is always 1 in
      case of SLP.  */
-  if (slp)
+  if (slp || PURE_SLP_STMT (stmt_info))
     ncopies = 1;
   else
     ncopies = LOOP_VINFO_VECT_FACTOR (loop_vinfo) / nunits;
@@ -3621,7 +4037,8 @@ vectorizable_load (gimple stmt, gimple_stmt_iterator *gsi, gimple *vec_stmt,
 
   /* The vector component type needs to be trivially convertible to the
      scalar lhs.  This should always be the case.  */
-  if (!useless_type_conversion_p (TREE_TYPE (scalar_dest), TREE_TYPE (vectype)))
+  elem_type = TREE_TYPE (vectype);
+  if (!useless_type_conversion_p (TREE_TYPE (scalar_dest), elem_type))
     {
       if (vect_print_dump_info (REPORT_DETAILS))
         fprintf (vect_dump, "???  operands of different types");
@@ -3635,10 +4052,15 @@ vectorizable_load (gimple stmt, gimple_stmt_iterator *gsi, gimple *vec_stmt,
       /* FORNOW */
       gcc_assert (! nested_in_vect_loop);
 
-      /* Check if interleaving is supported.  */
-      if (!vect_strided_load_supported (vectype)
-         && !PURE_SLP_STMT (stmt_info) && !slp)
-       return false;
+      first_stmt = DR_GROUP_FIRST_DR (stmt_info);
+      if (!slp && !PURE_SLP_STMT (stmt_info))
+       {
+         group_size = DR_GROUP_SIZE (vinfo_for_stmt (first_stmt));
+         if (vect_load_lanes_supported (vectype, group_size))
+           load_lanes_p = true;
+         else if (!vect_strided_load_supported (vectype, group_size))
+           return false;
+       }
     }
 
   if (negative)
@@ -3663,12 +4085,12 @@ vectorizable_load (gimple stmt, gimple_stmt_iterator *gsi, gimple *vec_stmt,
   if (!vec_stmt) /* transformation not required.  */
     {
       STMT_VINFO_TYPE (stmt_info) = load_vec_info_type;
-      vect_model_load_cost (stmt_info, ncopies, NULL);
+      vect_model_load_cost (stmt_info, ncopies, load_lanes_p, NULL);
       return true;
     }
 
   if (vect_print_dump_info (REPORT_DETAILS))
-    fprintf (vect_dump, "transform load.");
+    fprintf (vect_dump, "transform load. ncopies = %d", ncopies);
 
   /** Transform.  **/
 
@@ -3694,8 +4116,6 @@ vectorizable_load (gimple stmt, gimple_stmt_iterator *gsi, gimple *vec_stmt,
        }
       else
        vec_num = group_size;
-
-      dr_chain = VEC_alloc (tree, heap, vec_num);
     }
   else
     {
@@ -3706,6 +4126,11 @@ vectorizable_load (gimple stmt, gimple_stmt_iterator *gsi, gimple *vec_stmt,
 
   alignment_support_scheme = vect_supportable_dr_alignment (first_dr, false);
   gcc_assert (alignment_support_scheme);
+  /* Targets with load-lane instructions must not require explicit
+     realignment.  */
+  gcc_assert (!load_lanes_p
+             || alignment_support_scheme == dr_aligned
+             || alignment_support_scheme == dr_unaligned_supported);
 
   /* In case the vectorization factor (VF) is bigger than the number
      of elements that we can fit in a vectype (nunits), we have to generate
@@ -3837,208 +4262,250 @@ vectorizable_load (gimple stmt, gimple_stmt_iterator *gsi, gimple *vec_stmt,
   if (negative)
     offset = size_int (-TYPE_VECTOR_SUBPARTS (vectype) + 1);
 
+  if (load_lanes_p)
+    aggr_type = build_array_type_nelts (elem_type, vec_num * nunits);
+  else
+    aggr_type = vectype;
+
   prev_stmt_info = NULL;
   for (j = 0; j < ncopies; j++)
     {
-      /* 1. Create the vector pointer update chain.  */
+      /* 1. Create the vector or array pointer update chain.  */
       if (j == 0)
-        dataref_ptr = vect_create_data_ref_ptr (first_stmt,
-                                               at_loop, offset,
-                                               &dummy, &ptr_incr, false,
-                                               &inv_p);
+        dataref_ptr = vect_create_data_ref_ptr (first_stmt, aggr_type, at_loop,
+                                               offset, &dummy, gsi,
+                                               &ptr_incr, false, &inv_p);
       else
-        dataref_ptr =
-               bump_vector_ptr (dataref_ptr, ptr_incr, gsi, stmt, NULL_TREE);
+        dataref_ptr = bump_vector_ptr (dataref_ptr, ptr_incr, gsi, stmt,
+                                      TYPE_SIZE_UNIT (aggr_type));
+
+      if (strided_load || slp_perm)
+       dr_chain = VEC_alloc (tree, heap, vec_num);
 
-      for (i = 0; i < vec_num; i++)
+      if (load_lanes_p)
        {
-         if (i > 0)
-           dataref_ptr = bump_vector_ptr (dataref_ptr, ptr_incr, gsi, stmt,
-                                          NULL_TREE);
+         tree vec_array;
 
-         /* 2. Create the vector-load in the loop.  */
-         switch (alignment_support_scheme)
-           {
-           case dr_aligned:
-           case dr_unaligned_supported:
-             {
-               struct ptr_info_def *pi;
-               data_ref
-                 = build2 (MEM_REF, vectype, dataref_ptr,
-                           build_int_cst (reference_alias_ptr_type
-                                          (DR_REF (first_dr)), 0));
-               pi = get_ptr_info (dataref_ptr);
-               pi->align = TYPE_ALIGN_UNIT (vectype);
-               if (alignment_support_scheme == dr_aligned)
-                 {
-                   gcc_assert (aligned_access_p (first_dr));
-                   pi->misalign = 0;
-                 }
-               else if (DR_MISALIGNMENT (first_dr) == -1)
-                 {
-                   TREE_TYPE (data_ref)
-                     = build_aligned_type (TREE_TYPE (data_ref),
-                                           TYPE_ALIGN (TREE_TYPE (vectype)));
-                   pi->align = TYPE_ALIGN_UNIT (TREE_TYPE (vectype));
-                   pi->misalign = 0;
-                 }
-               else
-                 {
-                   TREE_TYPE (data_ref)
-                     = build_aligned_type (TREE_TYPE (data_ref),
-                                           TYPE_ALIGN (TREE_TYPE (vectype)));
-                   pi->misalign = DR_MISALIGNMENT (first_dr);
-                 }
-               break;
-             }
-           case dr_explicit_realign:
-             {
-               tree ptr, bump;
-               tree vs_minus_1 = size_int (TYPE_VECTOR_SUBPARTS (vectype) - 1);
-
-               if (compute_in_loop)
-                 msq = vect_setup_realignment (first_stmt, gsi,
-                                               &realignment_token,
-                                               dr_explicit_realign,
-                                               dataref_ptr, NULL);
-
-               new_stmt = gimple_build_assign_with_ops
-                            (BIT_AND_EXPR, NULL_TREE, dataref_ptr,
-                             build_int_cst
-                               (TREE_TYPE (dataref_ptr),
-                                -(HOST_WIDE_INT)TYPE_ALIGN_UNIT (vectype)));
-               ptr = make_ssa_name (SSA_NAME_VAR (dataref_ptr), new_stmt);
-               gimple_assign_set_lhs (new_stmt, ptr);
-               vect_finish_stmt_generation (stmt, new_stmt, gsi);
-               data_ref
-                 = build2 (MEM_REF, vectype, ptr,
-                           build_int_cst (reference_alias_ptr_type
-                                            (DR_REF (first_dr)), 0));
-               vec_dest = vect_create_destination_var (scalar_dest, vectype);
-               new_stmt = gimple_build_assign (vec_dest, data_ref);
-               new_temp = make_ssa_name (vec_dest, new_stmt);
-               gimple_assign_set_lhs (new_stmt, new_temp);
-               gimple_set_vdef (new_stmt, gimple_vdef (stmt));
-               gimple_set_vuse (new_stmt, gimple_vuse (stmt));
-               vect_finish_stmt_generation (stmt, new_stmt, gsi);
-               msq = new_temp;
-
-               bump = size_binop (MULT_EXPR, vs_minus_1,
-                                  TYPE_SIZE_UNIT (scalar_type));
-               ptr = bump_vector_ptr (dataref_ptr, NULL, gsi, stmt, bump);
-               new_stmt = gimple_build_assign_with_ops
-                            (BIT_AND_EXPR, NULL_TREE, ptr,
-                             build_int_cst
-                               (TREE_TYPE (ptr),
-                                -(HOST_WIDE_INT)TYPE_ALIGN_UNIT (vectype)));
-               ptr = make_ssa_name (SSA_NAME_VAR (dataref_ptr), new_stmt);
-               gimple_assign_set_lhs (new_stmt, ptr);
-               vect_finish_stmt_generation (stmt, new_stmt, gsi);
-               data_ref
-                 = build2 (MEM_REF, vectype, ptr,
-                           build_int_cst (reference_alias_ptr_type
-                                            (DR_REF (first_dr)), 0));
-               break;
-             }
-           case dr_explicit_realign_optimized:
-             new_stmt = gimple_build_assign_with_ops
-                          (BIT_AND_EXPR, NULL_TREE, dataref_ptr,
-                           build_int_cst
-                             (TREE_TYPE (dataref_ptr),
-                              -(HOST_WIDE_INT)TYPE_ALIGN_UNIT (vectype)));
-             new_temp = make_ssa_name (SSA_NAME_VAR (dataref_ptr), new_stmt);
-             gimple_assign_set_lhs (new_stmt, new_temp);
-             vect_finish_stmt_generation (stmt, new_stmt, gsi);
-             data_ref
-               = build2 (MEM_REF, vectype, new_temp,
-                         build_int_cst (reference_alias_ptr_type
-                                          (DR_REF (first_dr)), 0));
-             break;
-           default:
-             gcc_unreachable ();
-           }
-         vec_dest = vect_create_destination_var (scalar_dest, vectype);
-         new_stmt = gimple_build_assign (vec_dest, data_ref);
-         new_temp = make_ssa_name (vec_dest, new_stmt);
-         gimple_assign_set_lhs (new_stmt, new_temp);
+         vec_array = create_vector_array (vectype, vec_num);
+
+         /* Emit:
+              VEC_ARRAY = LOAD_LANES (MEM_REF[...all elements...]).  */
+         data_ref = create_array_ref (aggr_type, dataref_ptr, first_dr);
+         new_stmt = gimple_build_call_internal (IFN_LOAD_LANES, 1, data_ref);
+         gimple_call_set_lhs (new_stmt, vec_array);
          vect_finish_stmt_generation (stmt, new_stmt, gsi);
          mark_symbols_for_renaming (new_stmt);
 
-         /* 3. Handle explicit realignment if necessary/supported.  Create in
-               loop: vec_dest = realign_load (msq, lsq, realignment_token)  */
-         if (alignment_support_scheme == dr_explicit_realign_optimized
-             || alignment_support_scheme == dr_explicit_realign)
+         /* Extract each vector into an SSA_NAME.  */
+         for (i = 0; i < vec_num; i++)
            {
-             tree tmp;
+             new_temp = read_vector_array (stmt, gsi, scalar_dest,
+                                           vec_array, i);
+             VEC_quick_push (tree, dr_chain, new_temp);
+           }
 
-             lsq = gimple_assign_lhs (new_stmt);
-             if (!realignment_token)
-               realignment_token = dataref_ptr;
+         /* Record the mapping between SSA_NAMEs and statements.  */
+         vect_record_strided_load_vectors (stmt, dr_chain);
+       }
+      else
+       {
+         for (i = 0; i < vec_num; i++)
+           {
+             if (i > 0)
+               dataref_ptr = bump_vector_ptr (dataref_ptr, ptr_incr, gsi,
+                                              stmt, NULL_TREE);
+
+             /* 2. Create the vector-load in the loop.  */
+             switch (alignment_support_scheme)
+               {
+               case dr_aligned:
+               case dr_unaligned_supported:
+                 {
+                   struct ptr_info_def *pi;
+                   data_ref
+                     = build2 (MEM_REF, vectype, dataref_ptr,
+                               build_int_cst (reference_alias_ptr_type
+                                              (DR_REF (first_dr)), 0));
+                   pi = get_ptr_info (dataref_ptr);
+                   pi->align = TYPE_ALIGN_UNIT (vectype);
+                   if (alignment_support_scheme == dr_aligned)
+                     {
+                       gcc_assert (aligned_access_p (first_dr));
+                       pi->misalign = 0;
+                     }
+                   else if (DR_MISALIGNMENT (first_dr) == -1)
+                     {
+                       TREE_TYPE (data_ref)
+                         = build_aligned_type (TREE_TYPE (data_ref),
+                                               TYPE_ALIGN (elem_type));
+                       pi->align = TYPE_ALIGN_UNIT (elem_type);
+                       pi->misalign = 0;
+                     }
+                   else
+                     {
+                       TREE_TYPE (data_ref)
+                         = build_aligned_type (TREE_TYPE (data_ref),
+                                               TYPE_ALIGN (elem_type));
+                       pi->misalign = DR_MISALIGNMENT (first_dr);
+                     }
+                   break;
+                 }
+               case dr_explicit_realign:
+                 {
+                   tree ptr, bump;
+                   tree vs_minus_1;
+
+                   vs_minus_1 = size_int (TYPE_VECTOR_SUBPARTS (vectype) - 1);
+
+                   if (compute_in_loop)
+                     msq = vect_setup_realignment (first_stmt, gsi,
+                                                   &realignment_token,
+                                                   dr_explicit_realign,
+                                                   dataref_ptr, NULL);
+
+                   new_stmt = gimple_build_assign_with_ops
+                                (BIT_AND_EXPR, NULL_TREE, dataref_ptr,
+                                 build_int_cst
+                                 (TREE_TYPE (dataref_ptr),
+                                  -(HOST_WIDE_INT)TYPE_ALIGN_UNIT (vectype)));
+                   ptr = make_ssa_name (SSA_NAME_VAR (dataref_ptr), new_stmt);
+                   gimple_assign_set_lhs (new_stmt, ptr);
+                   vect_finish_stmt_generation (stmt, new_stmt, gsi);
+                   data_ref
+                     = build2 (MEM_REF, vectype, ptr,
+                               build_int_cst (reference_alias_ptr_type
+                                                (DR_REF (first_dr)), 0));
+                   vec_dest = vect_create_destination_var (scalar_dest,
+                                                           vectype);
+                   new_stmt = gimple_build_assign (vec_dest, data_ref);
+                   new_temp = make_ssa_name (vec_dest, new_stmt);
+                   gimple_assign_set_lhs (new_stmt, new_temp);
+                   gimple_set_vdef (new_stmt, gimple_vdef (stmt));
+                   gimple_set_vuse (new_stmt, gimple_vuse (stmt));
+                   vect_finish_stmt_generation (stmt, new_stmt, gsi);
+                   msq = new_temp;
+
+                   bump = size_binop (MULT_EXPR, vs_minus_1,
+                                      TYPE_SIZE_UNIT (scalar_type));
+                   ptr = bump_vector_ptr (dataref_ptr, NULL, gsi, stmt, bump);
+                   new_stmt = gimple_build_assign_with_ops
+                                (BIT_AND_EXPR, NULL_TREE, ptr,
+                                 build_int_cst
+                                 (TREE_TYPE (ptr),
+                                  -(HOST_WIDE_INT)TYPE_ALIGN_UNIT (vectype)));
+                   ptr = make_ssa_name (SSA_NAME_VAR (dataref_ptr), new_stmt);
+                   gimple_assign_set_lhs (new_stmt, ptr);
+                   vect_finish_stmt_generation (stmt, new_stmt, gsi);
+                   data_ref
+                     = build2 (MEM_REF, vectype, ptr,
+                               build_int_cst (reference_alias_ptr_type
+                                                (DR_REF (first_dr)), 0));
+                   break;
+                 }
+               case dr_explicit_realign_optimized:
+                 new_stmt = gimple_build_assign_with_ops
+                              (BIT_AND_EXPR, NULL_TREE, dataref_ptr,
+                               build_int_cst
+                                 (TREE_TYPE (dataref_ptr),
+                                  -(HOST_WIDE_INT)TYPE_ALIGN_UNIT (vectype)));
+                 new_temp = make_ssa_name (SSA_NAME_VAR (dataref_ptr),
+                                           new_stmt);
+                 gimple_assign_set_lhs (new_stmt, new_temp);
+                 vect_finish_stmt_generation (stmt, new_stmt, gsi);
+                 data_ref
+                   = build2 (MEM_REF, vectype, new_temp,
+                             build_int_cst (reference_alias_ptr_type
+                                              (DR_REF (first_dr)), 0));
+                 break;
+               default:
+                 gcc_unreachable ();
+               }
              vec_dest = vect_create_destination_var (scalar_dest, vectype);
-             tmp = build3 (REALIGN_LOAD_EXPR, vectype, msq, lsq,
-                           realignment_token);
-             new_stmt = gimple_build_assign (vec_dest, tmp);
+             new_stmt = gimple_build_assign (vec_dest, data_ref);
              new_temp = make_ssa_name (vec_dest, new_stmt);
              gimple_assign_set_lhs (new_stmt, new_temp);
              vect_finish_stmt_generation (stmt, new_stmt, gsi);
+             mark_symbols_for_renaming (new_stmt);
 
-             if (alignment_support_scheme == dr_explicit_realign_optimized)
+             /* 3. Handle explicit realignment if necessary/supported.
+                Create in loop:
+                  vec_dest = realign_load (msq, lsq, realignment_token)  */
+             if (alignment_support_scheme == dr_explicit_realign_optimized
+                 || alignment_support_scheme == dr_explicit_realign)
                {
-                 gcc_assert (phi);
-                 if (i == vec_num - 1 && j == ncopies - 1)
-                   add_phi_arg (phi, lsq, loop_latch_edge (containing_loop),
-                                UNKNOWN_LOCATION);
-                 msq = lsq;
+                 lsq = gimple_assign_lhs (new_stmt);
+                 if (!realignment_token)
+                   realignment_token = dataref_ptr;
+                 vec_dest = vect_create_destination_var (scalar_dest, vectype);
+                 new_stmt
+                   = gimple_build_assign_with_ops3 (REALIGN_LOAD_EXPR,
+                                                    vec_dest, msq, lsq,
+                                                    realignment_token);
+                 new_temp = make_ssa_name (vec_dest, new_stmt);
+                 gimple_assign_set_lhs (new_stmt, new_temp);
+                 vect_finish_stmt_generation (stmt, new_stmt, gsi);
+
+                 if (alignment_support_scheme == dr_explicit_realign_optimized)
+                   {
+                     gcc_assert (phi);
+                     if (i == vec_num - 1 && j == ncopies - 1)
+                       add_phi_arg (phi, lsq,
+                                    loop_latch_edge (containing_loop),
+                                    UNKNOWN_LOCATION);
+                     msq = lsq;
+                   }
                }
-           }
 
-         /* 4. Handle invariant-load.  */
-         if (inv_p && !bb_vinfo)
-           {
-             gcc_assert (!strided_load);
-             gcc_assert (nested_in_vect_loop_p (loop, stmt));
-             if (j == 0)
+             /* 4. Handle invariant-load.  */
+             if (inv_p && !bb_vinfo)
                {
-                 int k;
-                 tree t = NULL_TREE;
-                 tree vec_inv, bitpos, bitsize = TYPE_SIZE (scalar_type);
-
-                 /* CHECKME: bitpos depends on endianess?  */
-                 bitpos = bitsize_zero_node;
-                 vec_inv = build3 (BIT_FIELD_REF, scalar_type, new_temp,
-                                   bitsize, bitpos);
-                 vec_dest =
-                       vect_create_destination_var (scalar_dest, NULL_TREE);
-                 new_stmt = gimple_build_assign (vec_dest, vec_inv);
-                  new_temp = make_ssa_name (vec_dest, new_stmt);
-                 gimple_assign_set_lhs (new_stmt, new_temp);
-                 vect_finish_stmt_generation (stmt, new_stmt, gsi);
+                 gcc_assert (!strided_load);
+                 gcc_assert (nested_in_vect_loop_p (loop, stmt));
+                 if (j == 0)
+                   {
+                     int k;
+                     tree t = NULL_TREE;
+                     tree vec_inv, bitpos, bitsize = TYPE_SIZE (scalar_type);
+
+                     /* CHECKME: bitpos depends on endianess?  */
+                     bitpos = bitsize_zero_node;
+                     vec_inv = build3 (BIT_FIELD_REF, scalar_type, new_temp,
+                                       bitsize, bitpos);
+                     vec_dest = vect_create_destination_var (scalar_dest,
+                                                             NULL_TREE);
+                     new_stmt = gimple_build_assign (vec_dest, vec_inv);
+                     new_temp = make_ssa_name (vec_dest, new_stmt);
+                     gimple_assign_set_lhs (new_stmt, new_temp);
+                     vect_finish_stmt_generation (stmt, new_stmt, gsi);
+
+                     for (k = nunits - 1; k >= 0; --k)
+                       t = tree_cons (NULL_TREE, new_temp, t);
+                     /* FIXME: use build_constructor directly.  */
+                     vec_inv = build_constructor_from_list (vectype, t);
+                     new_temp = vect_init_vector (stmt, vec_inv,
+                                                  vectype, gsi);
+                     new_stmt = SSA_NAME_DEF_STMT (new_temp);
+                   }
+                 else
+                   gcc_unreachable (); /* FORNOW. */
+               }
 
-                 for (k = nunits - 1; k >= 0; --k)
-                   t = tree_cons (NULL_TREE, new_temp, t);
-                 /* FIXME: use build_constructor directly.  */
-                 vec_inv = build_constructor_from_list (vectype, t);
-                 new_temp = vect_init_vector (stmt, vec_inv, vectype, gsi);
+             if (negative)
+               {
+                 new_temp = reverse_vec_elements (new_temp, stmt, gsi);
                  new_stmt = SSA_NAME_DEF_STMT (new_temp);
                }
-             else
-               gcc_unreachable (); /* FORNOW. */
-           }
 
-         if (negative)
-           {
-             new_temp = reverse_vec_elements (new_temp, stmt, gsi);
-             new_stmt = SSA_NAME_DEF_STMT (new_temp);
-           }
+             /* Collect vector loads and later create their permutation in
+                vect_transform_strided_load ().  */
+             if (strided_load || slp_perm)
+               VEC_quick_push (tree, dr_chain, new_temp);
 
-         /* Collect vector loads and later create their permutation in
-            vect_transform_strided_load ().  */
-          if (strided_load || slp_perm)
-            VEC_quick_push (tree, dr_chain, new_temp);
-
-         /* Store vector loads in the corresponding SLP_NODE.  */
-         if (slp && !slp_perm)
-           VEC_quick_push (gimple, SLP_TREE_VEC_STMTS (slp_node), new_stmt);
+             /* Store vector loads in the corresponding SLP_NODE.  */
+             if (slp && !slp_perm)
+               VEC_quick_push (gimple, SLP_TREE_VEC_STMTS (slp_node),
+                               new_stmt);
+           }
        }
 
       if (slp && !slp_perm)
@@ -4057,12 +4524,9 @@ vectorizable_load (gimple stmt, gimple_stmt_iterator *gsi, gimple *vec_stmt,
         {
           if (strided_load)
            {
-             if (!vect_transform_strided_load (stmt, dr_chain, group_size, gsi))
-               return false;
-
+             if (!load_lanes_p)
+               vect_transform_strided_load (stmt, dr_chain, group_size, gsi);
              *vec_stmt = STMT_VINFO_VEC_STMT (stmt_info);
-              VEC_free (tree, heap, dr_chain);
-             dr_chain = VEC_alloc (tree, heap, group_size);
            }
           else
            {
@@ -4073,11 +4537,10 @@ vectorizable_load (gimple stmt, gimple_stmt_iterator *gsi, gimple *vec_stmt,
              prev_stmt_info = vinfo_for_stmt (new_stmt);
            }
         }
+      if (dr_chain)
+       VEC_free (tree, heap, dr_chain);
     }
 
-  if (dr_chain)
-    VEC_free (tree, heap, dr_chain);
-
   return true;
 }
 
@@ -4168,6 +4631,10 @@ vectorizable_condition (gimple stmt, gimple_stmt_iterator *gsi,
   /* FORNOW: unsupported in basic block SLP.  */
   gcc_assert (loop_vinfo);
 
+  /* FORNOW: SLP not supported.  */
+  if (STMT_SLP_TYPE (stmt_info))
+    return false;
+
   gcc_assert (ncopies >= 1);
   if (reduc_index && ncopies > 1)
     return false; /* FORNOW */
@@ -4180,10 +4647,6 @@ vectorizable_condition (gimple stmt, gimple_stmt_iterator *gsi,
            && reduc_def))
     return false;
 
-  /* FORNOW: SLP not supported.  */
-  if (STMT_SLP_TYPE (stmt_info))
-    return false;
-
   /* FORNOW: not yet supported.  */
   if (STMT_VINFO_LIVE_P (stmt_info))
     {
@@ -4301,7 +4764,7 @@ vectorizable_condition (gimple stmt, gimple_stmt_iterator *gsi,
                                                            vec_else_clause);
        }
 
-      /* Arguments are ready. Create the new vector stmt.  */
+      /* Arguments are ready.  Create the new vector stmt.  */
       vec_compare = build2 (TREE_CODE (cond_expr), vectype,
                            vec_cond_lhs, vec_cond_rhs);
       vec_cond_expr = build3 (VEC_COND_EXPR, vectype,
@@ -4430,6 +4893,7 @@ vect_analyze_stmt (gimple stmt, bool *need_to_vectorize, slp_tree node)
       ok = (vectorizable_type_promotion (stmt, NULL, NULL, NULL)
             || vectorizable_type_demotion (stmt, NULL, NULL, NULL)
             || vectorizable_conversion (stmt, NULL, NULL, NULL)
+            || vectorizable_shift (stmt, NULL, NULL, NULL)
             || vectorizable_operation (stmt, NULL, NULL, NULL)
             || vectorizable_assignment (stmt, NULL, NULL, NULL)
             || vectorizable_load (stmt, NULL, NULL, NULL, NULL)
@@ -4440,7 +4904,8 @@ vect_analyze_stmt (gimple stmt, bool *need_to_vectorize, slp_tree node)
     else
       {
         if (bb_vinfo)
-          ok = (vectorizable_operation (stmt, NULL, NULL, node)
+          ok = (vectorizable_shift (stmt, NULL, NULL, node)
+                || vectorizable_operation (stmt, NULL, NULL, node)
                 || vectorizable_assignment (stmt, NULL, NULL, node)
                 || vectorizable_load (stmt, NULL, NULL, node, NULL)
                 || vectorizable_store (stmt, NULL, NULL, node));
@@ -4479,27 +4944,6 @@ vect_analyze_stmt (gimple stmt, bool *need_to_vectorize, slp_tree node)
        return false;
     }
 
-  if (!PURE_SLP_STMT (stmt_info))
-    {
-      /* Groups of strided accesses whose size is not a power of 2 are not
-         vectorizable yet using loop-vectorization.  Therefore, if this stmt
-        feeds non-SLP-able stmts (i.e., this stmt has to be both SLPed and
-        loop-based vectorized), the loop cannot be vectorized.  */
-      if (STMT_VINFO_STRIDED_ACCESS (stmt_info)
-          && exact_log2 (DR_GROUP_SIZE (vinfo_for_stmt (
-                                        DR_GROUP_FIRST_DR (stmt_info)))) == -1)
-        {
-          if (vect_print_dump_info (REPORT_DETAILS))
-            {
-              fprintf (vect_dump, "not vectorized: the size of group "
-                                  "of strided accesses is not a power of 2");
-              print_gimple_stmt (vect_dump, stmt, 0, TDF_SLIM);
-            }
-
-          return false;
-        }
-    }
-
   return true;
 }
 
@@ -4542,6 +4986,11 @@ vect_transform_stmt (gimple stmt, gimple_stmt_iterator *gsi,
       gcc_assert (done);
       break;
 
+    case shift_vec_info_type:
+      done = vectorizable_shift (stmt, gsi, &vec_stmt, slp_node);
+      gcc_assert (done);
+      break;
+
     case op_vec_info_type:
       done = vectorizable_operation (stmt, gsi, &vec_stmt, slp_node);
       gcc_assert (done);
@@ -4784,13 +5233,13 @@ free_stmt_vec_info (gimple stmt)
 }
 
 
-/* Function get_vectype_for_scalar_type.
+/* Function get_vectype_for_scalar_type_and_size.
 
-   Returns the vector type corresponding to SCALAR_TYPE as supported
+   Returns the vector type corresponding to SCALAR_TYPE  and SIZE as supported
    by the target.  */
 
-tree
-get_vectype_for_scalar_type (tree scalar_type)
+static tree
+get_vectype_for_scalar_type_and_size (tree scalar_type, unsigned size)
 {
   enum machine_mode inner_mode = TYPE_MODE (scalar_type);
   enum machine_mode simd_mode;
@@ -4818,7 +5267,12 @@ get_vectype_for_scalar_type (tree scalar_type)
       && GET_MODE_CLASS (inner_mode) != MODE_FLOAT)
     return NULL_TREE;
 
-  simd_mode = targetm.vectorize.preferred_simd_mode (inner_mode);
+  /* If no size was supplied use the mode the target prefers.   Otherwise
+     lookup a vector mode of the specified size.  */
+  if (size == 0)
+    simd_mode = targetm.vectorize.preferred_simd_mode (inner_mode);
+  else
+    simd_mode = mode_for_vector (inner_mode, size / nbytes);
   nunits = GET_MODE_SIZE (simd_mode) / nbytes;
   if (nunits <= 1)
     return NULL_TREE;
@@ -4850,15 +5304,35 @@ get_vectype_for_scalar_type (tree scalar_type)
   return vectype;
 }
 
+unsigned int current_vector_size;
+
+/* Function get_vectype_for_scalar_type.
+
+   Returns the vector type corresponding to SCALAR_TYPE as supported
+   by the target.  */
+
+tree
+get_vectype_for_scalar_type (tree scalar_type)
+{
+  tree vectype;
+  vectype = get_vectype_for_scalar_type_and_size (scalar_type,
+                                                 current_vector_size);
+  if (vectype
+      && current_vector_size == 0)
+    current_vector_size = GET_MODE_SIZE (TYPE_MODE (vectype));
+  return vectype;
+}
+
 /* Function get_same_sized_vectype
 
    Returns a vector type corresponding to SCALAR_TYPE of size
    VECTOR_TYPE if supported by the target.  */
 
 tree
-get_same_sized_vectype (tree scalar_type, tree vector_type ATTRIBUTE_UNUSED)
+get_same_sized_vectype (tree scalar_type, tree vector_type)
 {
-  return get_vectype_for_scalar_type (scalar_type);
+  return get_vectype_for_scalar_type_and_size
+          (scalar_type, GET_MODE_SIZE (TYPE_MODE (vector_type)));
 }
 
 /* Function vect_is_simple_use.