OSDN Git Service

2010-07-24 Tobias Burnus <burnus@net-b.de>
[pf3gnuchains/gcc-fork.git] / gcc / tree-vect-stmts.c
index a70ebfe..e0ae559 100644 (file)
@@ -28,7 +28,6 @@ along with GCC; see the file COPYING3.  If not see
 #include "tree.h"
 #include "target.h"
 #include "basic-block.h"
-#include "diagnostic.h"
 #include "tree-pretty-print.h"
 #include "gimple-pretty-print.h"
 #include "tree-flow.h"
@@ -38,6 +37,7 @@ along with GCC; see the file COPYING3.  If not see
 #include "expr.h"
 #include "recog.h"
 #include "optabs.h"
+#include "diagnostic-core.h"
 #include "toplev.h"
 #include "tree-vectorizer.h"
 #include "langhooks.h"
@@ -546,6 +546,18 @@ vect_mark_stmts_to_be_vectorized (loop_vec_info loop_vinfo)
 }
 
 
+/* Get cost by calling cost target builtin.  */
+
+static inline
+int vect_get_stmt_cost (enum vect_cost_for_stmt type_of_cost)
+{
+  tree dummy_type = NULL;
+  int dummy = 0;
+
+  return targetm.vectorize.builtin_vectorization_cost (type_of_cost,
+                                                       dummy_type, dummy);
+}
+
 int
 cost_for_stmt (gimple stmt)
 {
@@ -554,9 +566,9 @@ cost_for_stmt (gimple stmt)
   switch (STMT_VINFO_TYPE (stmt_info))
   {
   case load_vec_info_type:
-    return TARG_SCALAR_LOAD_COST;
+    return vect_get_stmt_cost (scalar_load);
   case store_vec_info_type:
-    return TARG_SCALAR_STORE_COST;
+    return vect_get_stmt_cost (scalar_store);
   case op_vec_info_type:
   case condition_vec_info_type:
   case assignment_vec_info_type:
@@ -566,7 +578,7 @@ cost_for_stmt (gimple stmt)
   case type_demotion_vec_info_type:
   case type_conversion_vec_info_type:
   case call_vec_info_type:
-    return TARG_SCALAR_STMT_COST;
+    return vect_get_stmt_cost (scalar_stmt);
   case undef_vec_info_type:
   default:
     gcc_unreachable ();
@@ -590,13 +602,13 @@ vect_model_simple_cost (stmt_vec_info stmt_info, int ncopies,
   if (PURE_SLP_STMT (stmt_info))
     return;
 
-  inside_cost = ncopies * TARG_VEC_STMT_COST;
+  inside_cost = ncopies * vect_get_stmt_cost (vector_stmt); 
 
   /* FORNOW: Assuming maximum 2 args per stmts.  */
   for (i = 0; i < 2; i++)
     {
       if (dt[i] == vect_constant_def || dt[i] == vect_external_def)
-       outside_cost += TARG_SCALAR_TO_VEC_COST;
+       outside_cost += vect_get_stmt_cost (vector_stmt); 
     }
 
   if (vect_print_dump_info (REPORT_COST))
@@ -637,21 +649,39 @@ vect_model_store_cost (stmt_vec_info stmt_info, int ncopies,
                       enum vect_def_type dt, slp_tree slp_node)
 {
   int group_size;
-  int inside_cost = 0, outside_cost = 0;
+  unsigned int inside_cost = 0, outside_cost = 0;
+  struct data_reference *first_dr;
+  gimple first_stmt;
 
   /* The SLP costs were already calculated during SLP tree build.  */
   if (PURE_SLP_STMT (stmt_info))
     return;
 
   if (dt == vect_constant_def || dt == vect_external_def)
-    outside_cost = TARG_SCALAR_TO_VEC_COST;
+    outside_cost = vect_get_stmt_cost (scalar_to_vec); 
 
   /* Strided access?  */
-  if (DR_GROUP_FIRST_DR (stmt_info) && !slp_node)
-    group_size = vect_cost_strided_group_size (stmt_info);
+  if (DR_GROUP_FIRST_DR (stmt_info))
+    {
+      if (slp_node)
+        {
+          first_stmt = VEC_index (gimple, SLP_TREE_SCALAR_STMTS (slp_node), 0);
+          group_size = 1;
+        }
+      else
+        {
+          first_stmt = DR_GROUP_FIRST_DR (stmt_info);
+          group_size = vect_cost_strided_group_size (stmt_info);
+        }
+
+      first_dr = STMT_VINFO_DATA_REF (vinfo_for_stmt (first_stmt));
+    }
   /* Not a strided access.  */
   else
-    group_size = 1;
+    {
+      group_size = 1;
+      first_dr = STMT_VINFO_DATA_REF (stmt_info);
+    }
 
   /* Is this an access in a group of stores, which provide strided access?
      If so, add in the cost of the permutes.  */
@@ -659,7 +689,7 @@ vect_model_store_cost (stmt_vec_info stmt_info, int ncopies,
     {
       /* Uses a high and low interleave operation for each needed permute.  */
       inside_cost = ncopies * exact_log2(group_size) * group_size
-             * TARG_VEC_STMT_COST;
+        * vect_get_stmt_cost (vector_stmt);
 
       if (vect_print_dump_info (REPORT_COST))
         fprintf (vect_dump, "vect_model_store_cost: strided group_size = %d .",
@@ -668,7 +698,7 @@ vect_model_store_cost (stmt_vec_info stmt_info, int ncopies,
     }
 
   /* Costs of the stores.  */
-  inside_cost += ncopies * TARG_VEC_STORE_COST;
+  vect_get_store_cost (first_dr, ncopies, &inside_cost);
 
   if (vect_print_dump_info (REPORT_COST))
     fprintf (vect_dump, "vect_model_store_cost: inside_cost = %d, "
@@ -680,6 +710,49 @@ vect_model_store_cost (stmt_vec_info stmt_info, int ncopies,
 }
 
 
+/* Calculate cost of DR's memory access.  */
+void
+vect_get_store_cost (struct data_reference *dr, int ncopies,
+                     unsigned int *inside_cost)
+{
+  int alignment_support_scheme = vect_supportable_dr_alignment (dr, false);
+
+  switch (alignment_support_scheme)
+    {
+    case dr_aligned:
+      {
+        *inside_cost += ncopies * vect_get_stmt_cost (vector_store);
+
+        if (vect_print_dump_info (REPORT_COST))
+          fprintf (vect_dump, "vect_model_store_cost: aligned.");
+
+        break;
+      }
+
+    case dr_unaligned_supported:
+      {
+        gimple stmt = DR_STMT (dr);
+        stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
+        tree vectype = STMT_VINFO_VECTYPE (stmt_info);
+
+        /* Here, we assign an additional cost for the unaligned store.  */
+        *inside_cost += ncopies
+          * targetm.vectorize.builtin_vectorization_cost (unaligned_store,
+                                 vectype, DR_MISALIGNMENT (dr));
+
+        if (vect_print_dump_info (REPORT_COST))
+          fprintf (vect_dump, "vect_model_store_cost: unaligned supported by "
+                   "hardware.");
+
+        break;
+      }
+
+    default:
+      gcc_unreachable ();
+    }
+}
+
+
 /* Function vect_model_load_cost
 
    Models cost for loads.  In the case of strided accesses, the last access
@@ -692,10 +765,9 @@ vect_model_load_cost (stmt_vec_info stmt_info, int ncopies, slp_tree slp_node)
 
 {
   int group_size;
-  int alignment_support_cheme;
   gimple first_stmt;
   struct data_reference *dr = STMT_VINFO_DATA_REF (stmt_info), *first_dr;
-  int inside_cost = 0, outside_cost = 0;
+  unsigned int inside_cost = 0, outside_cost = 0;
 
   /* The SLP costs were already calculated during SLP tree build.  */
   if (PURE_SLP_STMT (stmt_info))
@@ -715,28 +787,47 @@ vect_model_load_cost (stmt_vec_info stmt_info, int ncopies, slp_tree slp_node)
       first_dr = dr;
     }
 
-  alignment_support_cheme = vect_supportable_dr_alignment (first_dr);
-
   /* Is this an access in a group of loads providing strided access?
      If so, add in the cost of the permutes.  */
   if (group_size > 1)
     {
       /* Uses an even and odd extract operations for each needed permute.  */
       inside_cost = ncopies * exact_log2(group_size) * group_size
-       * TARG_VEC_STMT_COST;
+       * vect_get_stmt_cost (vector_stmt);
 
       if (vect_print_dump_info (REPORT_COST))
         fprintf (vect_dump, "vect_model_load_cost: strided group_size = %d .",
                  group_size);
-
     }
 
   /* The loads themselves.  */
-  switch (alignment_support_cheme)
+  vect_get_load_cost (first_dr, ncopies,
+         ((!DR_GROUP_FIRST_DR (stmt_info)) || group_size > 1 || slp_node),
+         &inside_cost, &outside_cost);
+
+  if (vect_print_dump_info (REPORT_COST))
+    fprintf (vect_dump, "vect_model_load_cost: inside_cost = %d, "
+             "outside_cost = %d .", inside_cost, outside_cost);
+
+  /* Set the costs either in STMT_INFO or SLP_NODE (if exists).  */
+  stmt_vinfo_set_inside_of_loop_cost (stmt_info, slp_node, inside_cost);
+  stmt_vinfo_set_outside_of_loop_cost (stmt_info, slp_node, outside_cost);
+}
+
+
+/* Calculate cost of DR's memory access.  */
+void
+vect_get_load_cost (struct data_reference *dr, int ncopies,
+                    bool add_realign_cost, unsigned int *inside_cost,
+                    unsigned int *outside_cost)
+{
+  int alignment_support_scheme = vect_supportable_dr_alignment (dr, false);
+
+  switch (alignment_support_scheme)
     {
     case dr_aligned:
       {
-        inside_cost += ncopies * TARG_VEC_LOAD_COST;
+        *inside_cost += ncopies * vect_get_stmt_cost (vector_load); 
 
         if (vect_print_dump_info (REPORT_COST))
           fprintf (vect_dump, "vect_model_load_cost: aligned.");
@@ -745,9 +836,14 @@ vect_model_load_cost (stmt_vec_info stmt_info, int ncopies, slp_tree slp_node)
       }
     case dr_unaligned_supported:
       {
-        /* Here, we assign an additional cost for the unaligned load.  */
-        inside_cost += ncopies * TARG_VEC_UNALIGNED_LOAD_COST;
+        gimple stmt = DR_STMT (dr);
+        stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
+        tree vectype = STMT_VINFO_VECTYPE (stmt_info);
 
+        /* Here, we assign an additional cost for the unaligned load.  */
+        *inside_cost += ncopies
+          * targetm.vectorize.builtin_vectorization_cost (unaligned_load,
+                                           vectype, DR_MISALIGNMENT (dr));
         if (vect_print_dump_info (REPORT_COST))
           fprintf (vect_dump, "vect_model_load_cost: unaligned supported by "
                    "hardware.");
@@ -756,13 +852,14 @@ vect_model_load_cost (stmt_vec_info stmt_info, int ncopies, slp_tree slp_node)
       }
     case dr_explicit_realign:
       {
-        inside_cost += ncopies * (2*TARG_VEC_LOAD_COST + TARG_VEC_STMT_COST);
+        *inside_cost += ncopies * (2 * vect_get_stmt_cost (vector_load)
+           + vect_get_stmt_cost (vector_stmt));
 
         /* FIXME: If the misalignment remains fixed across the iterations of
            the containing loop, the following cost should be added to the
            outside costs.  */
         if (targetm.vectorize.builtin_mask_for_load)
-          inside_cost += TARG_VEC_STMT_COST;
+          *inside_cost += vect_get_stmt_cost (vector_stmt);
 
         break;
       }
@@ -779,29 +876,21 @@ vect_model_load_cost (stmt_vec_info stmt_info, int ncopies, slp_tree slp_node)
            access in the group. Inside the loop, there is a load op
            and a realignment op.  */
 
-        if ((!DR_GROUP_FIRST_DR (stmt_info)) || group_size > 1 || slp_node)
+        if (add_realign_cost)
           {
-            outside_cost = 2*TARG_VEC_STMT_COST;
+            *outside_cost = 2 * vect_get_stmt_cost (vector_stmt);
             if (targetm.vectorize.builtin_mask_for_load)
-              outside_cost += TARG_VEC_STMT_COST;
+              *outside_cost += vect_get_stmt_cost (vector_stmt);
           }
 
-        inside_cost += ncopies * (TARG_VEC_LOAD_COST + TARG_VEC_STMT_COST);
-
+        *inside_cost += ncopies * (vect_get_stmt_cost (vector_load)
+          + vect_get_stmt_cost (vector_stmt));
         break;
       }
 
     default:
       gcc_unreachable ();
     }
-
-  if (vect_print_dump_info (REPORT_COST))
-    fprintf (vect_dump, "vect_model_load_cost: inside_cost = %d, "
-             "outside_cost = %d .", inside_cost, outside_cost);
-
-  /* Set the costs either in STMT_INFO or SLP_NODE (if exists).  */
-  stmt_vinfo_set_inside_of_loop_cost (stmt_info, slp_node, inside_cost);
-  stmt_vinfo_set_outside_of_loop_cost (stmt_info, slp_node, outside_cost);
 }
 
 
@@ -1829,7 +1918,7 @@ vectorizable_assignment (gimple stmt, gimple_stmt_iterator *gsi,
   tree def;
   gimple def_stmt;
   enum vect_def_type dt[2] = {vect_unknown_def_type, vect_unknown_def_type};
-  int nunits = TYPE_VECTOR_SUBPARTS (vectype);
+  unsigned int nunits = TYPE_VECTOR_SUBPARTS (vectype);
   int ncopies;
   int i, j;
   VEC(tree,heap) *vec_oprnds = NULL;
@@ -1837,6 +1926,8 @@ vectorizable_assignment (gimple stmt, gimple_stmt_iterator *gsi,
   bb_vec_info bb_vinfo = STMT_VINFO_BB_VINFO (stmt_info);
   gimple new_stmt = NULL;
   stmt_vec_info prev_stmt_info = NULL;
+  enum tree_code code;
+  tree vectype_in;
 
   /* Multiple types in SLP are handled by creating the appropriate number of
      vectorized stmts for each SLP node. Hence, NCOPIES is always 1 in
@@ -1862,19 +1953,31 @@ vectorizable_assignment (gimple stmt, gimple_stmt_iterator *gsi,
   if (TREE_CODE (scalar_dest) != SSA_NAME)
     return false;
 
+  code = gimple_assign_rhs_code (stmt);
   if (gimple_assign_single_p (stmt)
-      || gimple_assign_rhs_code (stmt) == PAREN_EXPR)
+      || code == PAREN_EXPR
+      || CONVERT_EXPR_CODE_P (code))
     op = gimple_assign_rhs1 (stmt);
   else
     return false;
 
-  if (!vect_is_simple_use (op, loop_vinfo, bb_vinfo, &def_stmt, &def, &dt[0]))
+  if (!vect_is_simple_use_1 (op, loop_vinfo, bb_vinfo,
+                            &def_stmt, &def, &dt[0], &vectype_in))
     {
       if (vect_print_dump_info (REPORT_DETAILS))
         fprintf (vect_dump, "use not simple.");
       return false;
     }
 
+  /* We can handle NOP_EXPR conversions that do not change the number
+     of elements or the vector size.  */
+  if (CONVERT_EXPR_CODE_P (code)
+      && (!vectype_in
+         || TYPE_VECTOR_SUBPARTS (vectype_in) != nunits
+         || (GET_MODE_SIZE (TYPE_MODE (vectype))
+             != GET_MODE_SIZE (TYPE_MODE (vectype_in)))))
+    return false;
+
   if (!vec_stmt) /* transformation not required.  */
     {
       STMT_VINFO_TYPE (stmt_info) = assignment_vec_info_type;
@@ -1903,6 +2006,8 @@ vectorizable_assignment (gimple stmt, gimple_stmt_iterator *gsi,
       /* Arguments are ready. create the new vector stmt.  */
       for (i = 0; VEC_iterate (tree, vec_oprnds, i, vop); i++)
        {
+        if (CONVERT_EXPR_CODE_P (code))
+          vop = build1 (VIEW_CONVERT_EXPR, vectype, vop);
          new_stmt = gimple_build_assign (vec_dest, vop);
          new_temp = make_ssa_name (vec_dest, new_stmt);
          gimple_assign_set_lhs (new_stmt, new_temp);
@@ -2077,8 +2182,7 @@ vectorizable_operation (gimple stmt, gimple_stmt_iterator *gsi,
        {
          optab = optab_for_tree_code (code, vectype, optab_scalar);
          if (optab
-             && (optab_handler (optab, TYPE_MODE (vectype))->insn_code
-                 != CODE_FOR_nothing))
+             && optab_handler (optab, TYPE_MODE (vectype)) != CODE_FOR_nothing)
            {
              scalar_shift_arg = true;
              if (vect_print_dump_info (REPORT_DETAILS))
@@ -2088,7 +2192,7 @@ vectorizable_operation (gimple stmt, gimple_stmt_iterator *gsi,
            {
              optab = optab_for_tree_code (code, vectype, optab_vector);
              if (optab
-                 && (optab_handler (optab, TYPE_MODE (vectype))->insn_code
+                 && (optab_handler (optab, TYPE_MODE (vectype))
                      != CODE_FOR_nothing))
                {
                  if (vect_print_dump_info (REPORT_DETAILS))
@@ -2122,7 +2226,7 @@ vectorizable_operation (gimple stmt, gimple_stmt_iterator *gsi,
       return false;
     }
   vec_mode = TYPE_MODE (vectype);
-  icode = (int) optab_handler (optab, vec_mode)->insn_code;
+  icode = (int) optab_handler (optab, vec_mode);
   if (icode == CODE_FOR_nothing)
     {
       if (vect_print_dump_info (REPORT_DETAILS))
@@ -2999,7 +3103,8 @@ vectorizable_store (gimple stmt, gimple_stmt_iterator *gsi, gimple *vec_stmt,
       && TREE_CODE (scalar_dest) != INDIRECT_REF
       && TREE_CODE (scalar_dest) != COMPONENT_REF
       && TREE_CODE (scalar_dest) != IMAGPART_EXPR
-      && TREE_CODE (scalar_dest) != REALPART_EXPR)
+      && TREE_CODE (scalar_dest) != REALPART_EXPR
+      && TREE_CODE (scalar_dest) != MEM_REF)
     return false;
 
   gcc_assert (gimple_assign_single_p (stmt));
@@ -3023,7 +3128,7 @@ vectorizable_store (gimple stmt, gimple_stmt_iterator *gsi, gimple *vec_stmt,
   vec_mode = TYPE_MODE (vectype);
   /* FORNOW. In some cases can vectorize even if data-type not supported
      (e.g. - array initialization with 0).  */
-  if (optab_handler (mov_optab, (int)vec_mode)->insn_code == CODE_FOR_nothing)
+  if (optab_handler (mov_optab, vec_mode) == CODE_FOR_nothing)
     return false;
 
   if (!STMT_VINFO_DATA_REF (stmt_info))
@@ -3114,7 +3219,7 @@ vectorizable_store (gimple stmt, gimple_stmt_iterator *gsi, gimple *vec_stmt,
   dr_chain = VEC_alloc (tree, heap, group_size);
   oprnds = VEC_alloc (tree, heap, group_size);
 
-  alignment_support_scheme = vect_supportable_dr_alignment (first_dr);
+  alignment_support_scheme = vect_supportable_dr_alignment (first_dr, false);
   gcc_assert (alignment_support_scheme);
 
   /* In case the vectorization factor (VF) is bigger than the number
@@ -3255,7 +3360,10 @@ vectorizable_store (gimple stmt, gimple_stmt_iterator *gsi, gimple *vec_stmt,
            vec_oprnd = VEC_index (tree, result_chain, i);
 
           if (aligned_access_p (first_dr))
-            data_ref = build_fold_indirect_ref (dataref_ptr);
+           data_ref
+             = build2 (MEM_REF, TREE_TYPE (vec_oprnd), dataref_ptr,
+                       build_int_cst (reference_alias_ptr_type
+                                        (DR_REF (first_dr)), 0));
           else
           {
             int mis = DR_MISALIGNMENT (first_dr);
@@ -3264,11 +3372,6 @@ vectorizable_store (gimple stmt, gimple_stmt_iterator *gsi, gimple *vec_stmt,
             data_ref = build2 (MISALIGNED_INDIRECT_REF, vectype, dataref_ptr, tmis);
            }
 
-         /* If accesses through a pointer to vectype do not alias the original
-            memory reference we have a problem.  This should never happen.  */
-         gcc_assert (alias_sets_conflict_p (get_alias_set (data_ref),
-                     get_alias_set (gimple_assign_lhs (stmt))));
-
          /* Arguments are ready. Create the new vector stmt.  */
          new_stmt = gimple_build_assign (data_ref, vec_oprnd);
          vect_finish_stmt_generation (stmt, new_stmt, gsi);
@@ -3321,7 +3424,7 @@ vectorizable_load (gimple stmt, gimple_stmt_iterator *gsi, gimple *vec_stmt,
   struct data_reference *dr = STMT_VINFO_DATA_REF (stmt_info), *first_dr;
   tree vectype = STMT_VINFO_VECTYPE (stmt_info);
   tree new_temp;
-  int mode;
+  enum machine_mode mode;
   gimple new_stmt = NULL;
   tree dummy;
   enum dr_alignment_support alignment_support_scheme;
@@ -3394,18 +3497,19 @@ vectorizable_load (gimple stmt, gimple_stmt_iterator *gsi, gimple *vec_stmt,
       && code != INDIRECT_REF
       && code != COMPONENT_REF
       && code != IMAGPART_EXPR
-      && code != REALPART_EXPR)
+      && code != REALPART_EXPR
+      && code != MEM_REF)
     return false;
 
   if (!STMT_VINFO_DATA_REF (stmt_info))
     return false;
 
   scalar_type = TREE_TYPE (DR_REF (dr));
-  mode = (int) TYPE_MODE (vectype);
+  mode = TYPE_MODE (vectype);
 
   /* FORNOW. In some cases can vectorize even if data-type not supported
     (e.g. - data copies).  */
-  if (optab_handler (mov_optab, mode)->insn_code == CODE_FOR_nothing)
+  if (optab_handler (mov_optab, mode) == CODE_FOR_nothing)
     {
       if (vect_print_dump_info (REPORT_DETAILS))
        fprintf (vect_dump, "Aligned load, but unsupported type.");
@@ -3478,7 +3582,7 @@ vectorizable_load (gimple stmt, gimple_stmt_iterator *gsi, gimple *vec_stmt,
       group_size = vec_num = 1;
     }
 
-  alignment_support_scheme = vect_supportable_dr_alignment (first_dr);
+  alignment_support_scheme = vect_supportable_dr_alignment (first_dr, false);
   gcc_assert (alignment_support_scheme);
 
   /* In case the vectorization factor (VF) is bigger than the number
@@ -3632,7 +3736,10 @@ vectorizable_load (gimple stmt, gimple_stmt_iterator *gsi, gimple *vec_stmt,
            {
            case dr_aligned:
              gcc_assert (aligned_access_p (first_dr));
-             data_ref = build_fold_indirect_ref (dataref_ptr);
+             data_ref
+               = build2 (MEM_REF, vectype, dataref_ptr,
+                         build_int_cst (reference_alias_ptr_type
+                                          (DR_REF (first_dr)), 0));
              break;
            case dr_unaligned_supported:
              {
@@ -3655,7 +3762,18 @@ vectorizable_load (gimple stmt, gimple_stmt_iterator *gsi, gimple *vec_stmt,
                                                dr_explicit_realign,
                                                dataref_ptr, NULL);
 
-               data_ref = build1 (ALIGN_INDIRECT_REF, vectype, dataref_ptr);
+               new_stmt = gimple_build_assign_with_ops
+                            (BIT_AND_EXPR, NULL_TREE, dataref_ptr,
+                             build_int_cst
+                               (TREE_TYPE (dataref_ptr),
+                                -(HOST_WIDE_INT)TYPE_ALIGN_UNIT (vectype)));
+               ptr = make_ssa_name (SSA_NAME_VAR (dataref_ptr), new_stmt);
+               gimple_assign_set_lhs (new_stmt, ptr);
+               vect_finish_stmt_generation (stmt, new_stmt, gsi);
+               data_ref
+                 = build2 (MEM_REF, vectype, ptr,
+                           build_int_cst (reference_alias_ptr_type
+                                            (DR_REF (first_dr)), 0));
                vec_dest = vect_create_destination_var (scalar_dest, vectype);
                new_stmt = gimple_build_assign (vec_dest, data_ref);
                new_temp = make_ssa_name (vec_dest, new_stmt);
@@ -3668,19 +3786,37 @@ vectorizable_load (gimple stmt, gimple_stmt_iterator *gsi, gimple *vec_stmt,
                bump = size_binop (MULT_EXPR, vs_minus_1,
                                   TYPE_SIZE_UNIT (scalar_type));
                ptr = bump_vector_ptr (dataref_ptr, NULL, gsi, stmt, bump);
-               data_ref = build1 (ALIGN_INDIRECT_REF, vectype, ptr);
+               new_stmt = gimple_build_assign_with_ops
+                            (BIT_AND_EXPR, NULL_TREE, ptr,
+                             build_int_cst
+                               (TREE_TYPE (ptr),
+                                -(HOST_WIDE_INT)TYPE_ALIGN_UNIT (vectype)));
+               ptr = make_ssa_name (SSA_NAME_VAR (dataref_ptr), new_stmt);
+               gimple_assign_set_lhs (new_stmt, ptr);
+               vect_finish_stmt_generation (stmt, new_stmt, gsi);
+               data_ref
+                 = build2 (MEM_REF, vectype, ptr,
+                           build_int_cst (reference_alias_ptr_type
+                                            (DR_REF (first_dr)), 0));
                break;
              }
            case dr_explicit_realign_optimized:
-             data_ref = build1 (ALIGN_INDIRECT_REF, vectype, dataref_ptr);
+             new_stmt = gimple_build_assign_with_ops
+                          (BIT_AND_EXPR, NULL_TREE, dataref_ptr,
+                           build_int_cst
+                             (TREE_TYPE (dataref_ptr),
+                              -(HOST_WIDE_INT)TYPE_ALIGN_UNIT (vectype)));
+             new_temp = make_ssa_name (SSA_NAME_VAR (dataref_ptr), new_stmt);
+             gimple_assign_set_lhs (new_stmt, new_temp);
+             vect_finish_stmt_generation (stmt, new_stmt, gsi);
+             data_ref
+               = build2 (MEM_REF, vectype, new_temp,
+                         build_int_cst (reference_alias_ptr_type
+                                          (DR_REF (first_dr)), 0));
              break;
            default:
              gcc_unreachable ();
            }
-         /* If accesses through a pointer to vectype do not alias the original
-            memory reference we have a problem.  This should never happen. */
-         gcc_assert (alias_sets_conflict_p (get_alias_set (data_ref),
-                     get_alias_set (gimple_assign_rhs1 (stmt))));
          vec_dest = vect_create_destination_var (scalar_dest, vectype);
          new_stmt = gimple_build_assign (vec_dest, data_ref);
          new_temp = make_ssa_name (vec_dest, new_stmt);
@@ -4857,9 +4993,8 @@ supportable_widening_operation (enum tree_code code, gimple stmt,
     return false;
 
   vec_mode = TYPE_MODE (vectype);
-  if ((icode1 = optab_handler (optab1, vec_mode)->insn_code) == CODE_FOR_nothing
-       || (icode2 = optab_handler (optab2, vec_mode)->insn_code)
-                                                       == CODE_FOR_nothing)
+  if ((icode1 = optab_handler (optab1, vec_mode)) == CODE_FOR_nothing
+       || (icode2 = optab_handler (optab2, vec_mode)) == CODE_FOR_nothing)
     return false;
 
   /* Check if it's a multi-step conversion that can be done using intermediate
@@ -4891,16 +5026,16 @@ supportable_widening_operation (enum tree_code code, gimple stmt,
           optab4 = optab_for_tree_code (c2, intermediate_type, optab_default);
 
           if (!optab3 || !optab4
-              || (icode1 = optab1->handlers[(int) prev_mode].insn_code)
-                                                        == CODE_FOR_nothing
+              || ((icode1 = optab_handler (optab1, prev_mode))
+                 == CODE_FOR_nothing)
               || insn_data[icode1].operand[0].mode != intermediate_mode
-              || (icode2 = optab2->handlers[(int) prev_mode].insn_code)
-                                                        == CODE_FOR_nothing
+              || ((icode2 = optab_handler (optab2, prev_mode))
+                 == CODE_FOR_nothing)
               || insn_data[icode2].operand[0].mode != intermediate_mode
-              || (icode1 = optab3->handlers[(int) intermediate_mode].insn_code)
-                                                        == CODE_FOR_nothing
-              || (icode2 = optab4->handlers[(int) intermediate_mode].insn_code)
-                                                        == CODE_FOR_nothing)
+              || ((icode1 = optab_handler (optab3, intermediate_mode))
+                 == CODE_FOR_nothing)
+              || ((icode2 = optab_handler (optab4, intermediate_mode))
+                 == CODE_FOR_nothing))
             return false;
 
           VEC_quick_push (tree, *interm_types, intermediate_type);
@@ -4987,8 +5122,7 @@ supportable_narrowing_operation (enum tree_code code,
     return false;
 
   vec_mode = TYPE_MODE (vectype);
-  if ((icode1 = optab_handler (optab1, vec_mode)->insn_code)
-       == CODE_FOR_nothing)
+  if ((icode1 = optab_handler (optab1, vec_mode)) == CODE_FOR_nothing)
     return false;
 
   /* Check if it's a multi-step conversion that can be done using intermediate
@@ -5011,12 +5145,11 @@ supportable_narrowing_operation (enum tree_code code,
           interm_optab = optab_for_tree_code (c1, intermediate_type,
                                               optab_default);
           if (!interm_optab
-              || (icode1 = optab1->handlers[(int) prev_mode].insn_code)
-                                                        == CODE_FOR_nothing
+              || ((icode1 = optab_handler (optab1, prev_mode))
+                 == CODE_FOR_nothing)
               || insn_data[icode1].operand[0].mode != intermediate_mode
-              || (icode1
-                  = interm_optab->handlers[(int) intermediate_mode].insn_code)
-                 == CODE_FOR_nothing)
+              || ((icode1 = optab_handler (interm_optab, intermediate_mode))
+                 == CODE_FOR_nothing))
             return false;
 
           VEC_quick_push (tree, *interm_types, intermediate_type);