OSDN Git Service

gcc/fortran:
[pf3gnuchains/gcc-fork.git] / gcc / tree-vect-transform.c
index 846d52b..5fdbbe1 100644 (file)
@@ -1,5 +1,5 @@
 /* Transformation Utilities for Loop Vectorization.
-   Copyright (C) 2003,2004,2005,2006 Free Software Foundation, Inc.
+   Copyright (C) 2003, 2004, 2005, 2006, 2007 Free Software Foundation, Inc.
    Contributed by Dorit Naishlos <dorit@il.ibm.com>
 
 This file is part of GCC.
@@ -74,6 +74,490 @@ static void vect_update_inits_of_drs (loop_vec_info, tree);
 static int vect_min_worthwhile_factor (enum tree_code);
 
 
+/* Function vect_estimate_min_profitable_iters
+
+   Return the number of iterations required for the vector version of the
+   loop to be profitable relative to the cost of the scalar version of the
+   loop.
+
+   TODO: Take profile info into account before making vectorization
+   decisions, if available.  */
+
+int
+vect_estimate_min_profitable_iters (loop_vec_info loop_vinfo)
+{
+  int i;
+  int min_profitable_iters;
+  int peel_iters_prologue;
+  int peel_iters_epilogue;
+  int vec_inside_cost = 0;
+  int vec_outside_cost = 0;
+  int scalar_single_iter_cost = 0;
+  int vf = LOOP_VINFO_VECT_FACTOR (loop_vinfo);
+  struct loop *loop = LOOP_VINFO_LOOP (loop_vinfo);
+  basic_block *bbs = LOOP_VINFO_BBS (loop_vinfo);
+  int nbbs = loop->num_nodes;
+
+  /* Cost model disabled.  */
+  if (!flag_vect_cost_model)
+    {
+      if (vect_print_dump_info (REPORT_DETAILS))
+        fprintf (vect_dump, "cost model disabled.");      
+      return 0;
+    }
+
+  /* Requires loop versioning tests to handle misalignment.
+     FIXME: Make cost depend on number of stmts in may_misalign list.  */
+
+  if (LOOP_VINFO_MAY_MISALIGN_STMTS (loop_vinfo))
+    {
+      vec_outside_cost += TARG_COND_BRANCH_COST;
+      if (vect_print_dump_info (REPORT_DETAILS))
+        fprintf (vect_dump, "cost model: Adding cost of checks for loop "
+                 "versioning.\n");
+    }
+
+  /* Requires a prologue loop when peeling to handle misalignment. Add cost of
+     two guards, one for the peeled loop and one for the vector loop.  */
+
+  peel_iters_prologue = LOOP_PEELING_FOR_ALIGNMENT (loop_vinfo);
+  if (peel_iters_prologue)
+    {
+      vec_outside_cost += 2 * TARG_COND_BRANCH_COST;
+      if (vect_print_dump_info (REPORT_DETAILS))
+        fprintf (vect_dump, "cost model: Adding cost of checks for "
+                 "prologue.\n");
+    }
+
+ /* Requires an epilogue loop to finish up remaining iterations after vector
+    loop. Add cost of two guards, one for the peeled loop and one for the
+    vector loop.  */
+
+  if ((peel_iters_prologue < 0)
+      || !LOOP_VINFO_NITERS_KNOWN_P (loop_vinfo)
+      || LOOP_VINFO_INT_NITERS (loop_vinfo) % vf)
+    {
+      vec_outside_cost += 2 * TARG_COND_BRANCH_COST;
+      if (vect_print_dump_info (REPORT_DETAILS))
+        fprintf (vect_dump, "cost model : Adding cost of checks for "
+                 "epilogue.\n");
+    }
+
+  /* Count statements in scalar loop.  Using this as scalar cost for a single
+     iteration for now.
+
+     TODO: Add outer loop support.
+
+     TODO: Consider assigning different costs to different scalar
+     statements.  */
+
+  for (i = 0; i < nbbs; i++)
+    {
+      block_stmt_iterator si;
+      basic_block bb = bbs[i];
+
+      for (si = bsi_start (bb); !bsi_end_p (si); bsi_next (&si))
+        {
+          tree stmt = bsi_stmt (si);
+          stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
+          if (!STMT_VINFO_RELEVANT_P (stmt_info)
+              && !STMT_VINFO_LIVE_P (stmt_info))
+            continue;
+          scalar_single_iter_cost++;
+          vec_inside_cost += STMT_VINFO_INSIDE_OF_LOOP_COST (stmt_info);
+          vec_outside_cost += STMT_VINFO_OUTSIDE_OF_LOOP_COST (stmt_info);
+        }
+    }
+
+  /* Add additional cost for the peeled instructions in prologue and epilogue
+     loop.
+
+     FORNOW: If we dont know the value of peel_iters for prologue or epilogue
+     at compile-time - we assume the worst.  
+
+     TODO: Build an expression that represents peel_iters for prologue and
+     epilogue to be used in a run-time test.  */
+
+  peel_iters_prologue = LOOP_PEELING_FOR_ALIGNMENT (loop_vinfo);
+
+  if (peel_iters_prologue < 0)
+    {
+      peel_iters_prologue = vf - 1;
+      if (vect_print_dump_info (REPORT_DETAILS))
+        fprintf (vect_dump, "cost model: "
+                 "prologue peel iters set conservatively.");
+
+      /* If peeling for alignment is unknown, loop bound of main loop becomes
+         unknown.  */
+      peel_iters_epilogue = vf - 1;
+      if (vect_print_dump_info (REPORT_DETAILS))
+        fprintf (vect_dump, "cost model: "
+                 "epilogue peel iters set conservatively because "
+                 "peeling for alignment is unknown .");
+    }
+  else 
+    {
+      if (!LOOP_VINFO_NITERS_KNOWN_P (loop_vinfo))
+        {
+          peel_iters_epilogue = vf - 1;
+          if (vect_print_dump_info (REPORT_DETAILS))
+            fprintf (vect_dump, "cost model: "
+                     "epilogue peel iters set conservatively because "
+                     "loop iterations are unknown .");
+        }
+      else      
+        peel_iters_epilogue = 
+                     (LOOP_VINFO_INT_NITERS (loop_vinfo) - peel_iters_prologue)
+                     % vf;
+    }
+
+  vec_outside_cost += (peel_iters_prologue * scalar_single_iter_cost)
+                      + (peel_iters_epilogue * scalar_single_iter_cost);
+
+  /* Calculate number of iterations required to make the vector version 
+     profitable, relative to the loop bodies only. The following condition
+     must hold true: ((SIC*VF)-VIC)*niters > VOC*VF, where
+     SIC = scalar iteration cost, VIC = vector iteration cost,
+     VOC = vector outside cost and VF = vectorization factor.  */
+
+  if ((scalar_single_iter_cost * vf) > vec_inside_cost)
+    {
+      if (vec_outside_cost == 0)
+        min_profitable_iters = 1;
+      else
+        {
+          min_profitable_iters = (vec_outside_cost * vf)
+                                 / ((scalar_single_iter_cost * vf)
+                                    - vec_inside_cost);
+
+          if ((scalar_single_iter_cost * vf * min_profitable_iters)
+              <= ((vec_inside_cost * min_profitable_iters)
+                  + (vec_outside_cost * vf)))
+            min_profitable_iters++;
+        }
+    }
+  /* vector version will never be profitable.  */
+  else
+    {
+      if (vect_print_dump_info (REPORT_DETAILS))
+        fprintf (vect_dump, "cost model: vector iteration cost = %d "
+                 "is divisible by scalar iteration cost = %d by a factor "
+                 "greater than or equal to the vectorization factor = %d .",
+                 vec_inside_cost, scalar_single_iter_cost, vf);
+      return -1;
+    }
+
+  if (vect_print_dump_info (REPORT_DETAILS))
+    {
+      fprintf (vect_dump, "Cost model analysis: \n");
+      fprintf (vect_dump, "  Vector inside of loop cost: %d\n",
+              vec_inside_cost);
+      fprintf (vect_dump, "  Vector outside of loop cost: %d\n",
+              vec_outside_cost);
+      fprintf (vect_dump, "  Scalar cost: %d\n", scalar_single_iter_cost);
+      fprintf (vect_dump, "  prologue iterations: %d\n",
+               peel_iters_prologue);
+      fprintf (vect_dump, "  epilogue iterations: %d\n",
+               peel_iters_epilogue);
+      fprintf (vect_dump, "  Calculated minimum iters for profitability: %d\n",
+              min_profitable_iters);
+      fprintf (vect_dump, "  Actual minimum iters for profitability: %d\n",
+              min_profitable_iters < vf ? vf : min_profitable_iters);
+    }
+
+  return min_profitable_iters < vf ? vf : min_profitable_iters;
+}
+
+
+/* TODO: Close dependency between vect_model_*_cost and vectorizable_* 
+   functions. Design better to avoid maintenance issues.  */
+    
+/* Function vect_model_reduction_cost.  
+
+   Models cost for a reduction operation, including the vector ops 
+   generated within the strip-mine loop, the initial definition before
+   the loop, and the epilogue code that must be generated.  */
+
+static void
+vect_model_reduction_cost (stmt_vec_info stmt_info, enum tree_code reduc_code,
+                          int ncopies)
+{
+  int outer_cost = 0;
+  enum tree_code code;
+  optab optab;
+  tree vectype;
+  tree orig_stmt;
+  tree reduction_op;
+  enum machine_mode mode;
+  tree operation = GIMPLE_STMT_OPERAND (STMT_VINFO_STMT (stmt_info), 1);
+  int op_type = TREE_CODE_LENGTH (TREE_CODE (operation));
+
+  /* Cost of reduction op inside loop.  */
+  STMT_VINFO_INSIDE_OF_LOOP_COST (stmt_info) += ncopies * TARG_VEC_STMT_COST;
+
+  reduction_op = TREE_OPERAND (operation, op_type-1);
+  vectype = get_vectype_for_scalar_type (TREE_TYPE (reduction_op));
+  mode = TYPE_MODE (vectype);
+  orig_stmt = STMT_VINFO_RELATED_STMT (stmt_info);
+
+  if (!orig_stmt) 
+    orig_stmt = STMT_VINFO_STMT (stmt_info);
+
+  code = TREE_CODE (GIMPLE_STMT_OPERAND (orig_stmt, 1));
+
+  /* Add in cost for initial definition.  */
+  outer_cost += TARG_VEC_STMT_COST;
+
+  /* Determine cost of epilogue code.
+
+     We have a reduction operator that will reduce the vector in one statement.
+     Also requires scalar extract.  */
+
+  if (reduc_code < NUM_TREE_CODES) 
+    outer_cost += TARG_VEC_STMT_COST + TARG_VEC_TO_SCALAR_COST;
+  else 
+    {
+      int vec_size_in_bits = tree_low_cst (TYPE_SIZE (vectype), 1);
+      tree bitsize =
+       TYPE_SIZE (TREE_TYPE ( GIMPLE_STMT_OPERAND (orig_stmt, 0)));
+      int element_bitsize = tree_low_cst (bitsize, 1);
+      int nelements = vec_size_in_bits / element_bitsize;
+
+      optab = optab_for_tree_code (code, vectype);
+
+      /* We have a whole vector shift available.  */
+      if (!VECTOR_MODE_P (mode) 
+          || optab->handlers[mode].insn_code == CODE_FOR_nothing)
+        /* Final reduction via vector shifts and the reduction operator. Also
+           requires scalar extract.  */
+       outer_cost += ((exact_log2(nelements) * 2 + 1) * TARG_VEC_STMT_COST); 
+      else
+       /* Use extracts and reduction op for final reduction.  For N elements,
+           we have N extracts and N-1 reduction ops.  */
+       outer_cost += ((nelements + nelements - 1) * TARG_VEC_STMT_COST);
+    }
+
+  STMT_VINFO_OUTSIDE_OF_LOOP_COST (stmt_info) = outer_cost;
+
+  if (vect_print_dump_info (REPORT_DETAILS))
+    fprintf (vect_dump, "vect_model_reduction_cost: inside_cost = %d, "
+             "outside_cost = %d .", STMT_VINFO_INSIDE_OF_LOOP_COST (stmt_info),
+             STMT_VINFO_OUTSIDE_OF_LOOP_COST (stmt_info));
+}
+
+
+/* Function vect_model_induction_cost.
+
+   Models cost for induction operations.  */
+
+static void
+vect_model_induction_cost (stmt_vec_info stmt_info, int ncopies)
+{
+  /* loop cost for vec_loop.  */
+  STMT_VINFO_INSIDE_OF_LOOP_COST (stmt_info) = ncopies * TARG_VEC_STMT_COST;
+  /* prologue cost for vec_init and vec_step.  */
+  STMT_VINFO_OUTSIDE_OF_LOOP_COST (stmt_info) = 2 * TARG_VEC_STMT_COST;
+  
+  if (vect_print_dump_info (REPORT_DETAILS))
+    fprintf (vect_dump, "vect_model_induction_cost: inside_cost = %d, "
+             "outside_cost = %d .", STMT_VINFO_INSIDE_OF_LOOP_COST (stmt_info),
+             STMT_VINFO_OUTSIDE_OF_LOOP_COST (stmt_info));
+}
+
+
+/* Function vect_model_simple_cost.  
+
+   Models cost for simple operations, i.e. those that only emit ncopies of a 
+   single op.  Right now, this does not account for multiple insns that could
+   be generated for the single vector op.  We will handle that shortly.  */
+
+static void
+vect_model_simple_cost (stmt_vec_info stmt_info, int ncopies)
+{
+  STMT_VINFO_INSIDE_OF_LOOP_COST (stmt_info) = ncopies * TARG_VEC_STMT_COST;
+
+  if (vect_print_dump_info (REPORT_DETAILS))
+    fprintf (vect_dump, "vect_model_simple_cost: inside_cost = %d, "
+             "outside_cost = %d .", STMT_VINFO_INSIDE_OF_LOOP_COST (stmt_info),
+             STMT_VINFO_OUTSIDE_OF_LOOP_COST (stmt_info));
+}
+
+
+/* Function vect_cost_strided_group_size 
+   For strided load or store, return the group_size only if it is the first
+   load or store of a group, else return 1.  This ensures that group size is
+   only returned once per group.  */
+
+static int
+vect_cost_strided_group_size (stmt_vec_info stmt_info)
+{
+  tree first_stmt = DR_GROUP_FIRST_DR (stmt_info);
+
+  if (first_stmt == STMT_VINFO_STMT (stmt_info))
+    return DR_GROUP_SIZE (stmt_info);
+
+  return 1;
+}
+
+
+/* Function vect_model_store_cost
+
+   Models cost for stores.  In the case of strided accesses, one access
+   has the overhead of the strided access attributed to it.  */
+
+static void
+vect_model_store_cost (stmt_vec_info stmt_info, int ncopies)
+{
+  int cost = 0;
+  int group_size;
+
+  /* Strided access?  */
+  if (DR_GROUP_FIRST_DR (stmt_info)) 
+    group_size = vect_cost_strided_group_size (stmt_info);
+  /* Not a strided access.  */
+  else
+    group_size = 1;
+
+  /* Is this an access in a group of stores, which provide strided access?  
+     If so, add in the cost of the permutes.  */
+  if (group_size > 1) 
+    {
+      /* Uses a high and low interleave operation for each needed permute.  */
+      cost = ncopies * exact_log2(group_size) * group_size 
+             * TARG_VEC_STMT_COST;
+
+      if (vect_print_dump_info (REPORT_DETAILS))
+        fprintf (vect_dump, "vect_model_store_cost: strided group_size = %d .",
+                 group_size);
+
+    }
+
+  /* Costs of the stores.  */
+  cost += ncopies * TARG_VEC_STORE_COST;
+
+  STMT_VINFO_INSIDE_OF_LOOP_COST (stmt_info) = cost;
+
+  if (vect_print_dump_info (REPORT_DETAILS))
+    fprintf (vect_dump, "vect_model_store_cost: inside_cost = %d, "
+             "outside_cost = %d .", STMT_VINFO_INSIDE_OF_LOOP_COST (stmt_info),
+             STMT_VINFO_OUTSIDE_OF_LOOP_COST (stmt_info));
+}
+
+
+/* Function vect_model_load_cost
+
+   Models cost for loads.  In the case of strided accesses, the last access
+   has the overhead of the strided access attributed to it.  Since unaligned
+   accesses are supported for loads, we also account for the costs of the 
+   access scheme chosen.  */
+
+static void
+vect_model_load_cost (stmt_vec_info stmt_info, int ncopies)
+                
+{
+  int inner_cost = 0;
+  int group_size;
+  int alignment_support_cheme;
+  tree first_stmt;
+  struct data_reference *dr = STMT_VINFO_DATA_REF (stmt_info), *first_dr;
+
+  /* Strided accesses?  */
+  first_stmt = DR_GROUP_FIRST_DR (stmt_info);
+  if (first_stmt)
+    {
+      group_size = vect_cost_strided_group_size (stmt_info);
+      first_dr = STMT_VINFO_DATA_REF (vinfo_for_stmt (first_stmt));
+    }
+  /* Not a strided access.  */
+  else
+    {
+      group_size = 1;
+      first_dr = dr;
+    }
+
+  alignment_support_cheme = vect_supportable_dr_alignment (first_dr);
+
+  /* Is this an access in a group of loads providing strided access?  
+     If so, add in the cost of the permutes.  */
+  if (group_size > 1) 
+    {
+      /* Uses an even and odd extract operations for each needed permute.  */
+      inner_cost = ncopies * exact_log2(group_size) * group_size
+                   * TARG_VEC_STMT_COST;
+
+      if (vect_print_dump_info (REPORT_DETAILS))
+        fprintf (vect_dump, "vect_model_load_cost: strided group_size = %d .",
+                 group_size);
+
+    }
+
+  /* The loads themselves.  */
+  switch (alignment_support_cheme)
+    {
+    case dr_aligned:
+      {
+        inner_cost += ncopies * TARG_VEC_LOAD_COST;
+
+        if (vect_print_dump_info (REPORT_DETAILS))
+          fprintf (vect_dump, "vect_model_load_cost: aligned.");
+
+        break;
+      }
+    case dr_unaligned_supported:
+      {
+        /* Here, we assign an additional cost for the unaligned load.  */
+        inner_cost += ncopies * TARG_VEC_UNALIGNED_LOAD_COST;
+
+        if (vect_print_dump_info (REPORT_DETAILS))
+          fprintf (vect_dump, "vect_model_load_cost: unaligned supported by "
+                   "hardware.");
+
+        break;
+      }
+    case dr_unaligned_software_pipeline:
+      {
+        int outer_cost = 0;
+
+        if (vect_print_dump_info (REPORT_DETAILS))
+          fprintf (vect_dump, "vect_model_load_cost: unaligned software "
+                   "pipelined.");
+
+        /* Unaligned software pipeline has a load of an address, an initial
+           load, and possibly a mask operation to "prime" the loop. However,
+           if this is an access in a group of loads, which provide strided
+           access, then the above cost should only be considered for one
+           access in the group. Inside the loop, there is a load op
+           and a realignment op.  */
+
+        if ((!DR_GROUP_FIRST_DR (stmt_info)) || group_size > 1)
+          {
+            outer_cost = 2*TARG_VEC_STMT_COST;
+            if (targetm.vectorize.builtin_mask_for_load)
+              outer_cost += TARG_VEC_STMT_COST;
+          }
+        
+        STMT_VINFO_OUTSIDE_OF_LOOP_COST (stmt_info) = outer_cost;
+
+        inner_cost += ncopies * (TARG_VEC_LOAD_COST + TARG_VEC_STMT_COST);
+
+        break;
+      }
+
+    default:
+      gcc_unreachable ();
+    }
+
+  STMT_VINFO_INSIDE_OF_LOOP_COST (stmt_info) = inner_cost;
+
+  if (vect_print_dump_info (REPORT_DETAILS))
+    fprintf (vect_dump, "vect_model_load_cost: inside_cost = %d, "
+             "outside_cost = %d .", STMT_VINFO_INSIDE_OF_LOOP_COST (stmt_info),
+             STMT_VINFO_OUTSIDE_OF_LOOP_COST (stmt_info));
+
+}
+
+
 /* Function vect_get_new_vect_var.
 
    Returns a name for a new variable. The current naming scheme appends the 
@@ -151,6 +635,7 @@ vect_create_addr_base_for_vector_ref (tree stmt,
 
   /* Create base_offset */
   base_offset = size_binop (PLUS_EXPR, base_offset, init);
+  base_offset = fold_convert (sizetype, base_offset);
   dest = create_tmp_var (TREE_TYPE (base_offset), "base_off");
   add_referenced_var (dest);
   base_offset = force_gimple_operand (base_offset, &new_stmt, false, dest);  
@@ -158,7 +643,7 @@ vect_create_addr_base_for_vector_ref (tree stmt,
 
   if (offset)
     {
-      tree tmp = create_tmp_var (TREE_TYPE (base_offset), "offset");
+      tree tmp = create_tmp_var (sizetype, "offset");
       tree step; 
 
       /* For interleaved access step we divide STEP by the size of the
@@ -174,12 +659,12 @@ vect_create_addr_base_for_vector_ref (tree stmt,
       offset = fold_build2 (MULT_EXPR, TREE_TYPE (offset), offset, step);
       base_offset = fold_build2 (PLUS_EXPR, TREE_TYPE (base_offset),
                                 base_offset, offset);
-      base_offset = force_gimple_operand (base_offset, &new_stmt, false, tmp);  
+      base_offset = force_gimple_operand (base_offset, &new_stmt, false, tmp);
       append_to_statement_list_force (new_stmt, new_stmt_list);
     }
   
   /* base + base_offset */
-  addr_base = fold_build2 (PLUS_EXPR, TREE_TYPE (data_ref_base), data_ref_base,
+  addr_base = fold_build2 (POINTER_PLUS_EXPR, TREE_TYPE (data_ref_base), data_ref_base,
                           base_offset);
 
   vect_ptr_type = build_pointer_type (STMT_VINFO_VECTYPE (stmt_info));
@@ -210,7 +695,7 @@ vect_create_addr_base_for_vector_ref (tree stmt,
    accessed in the loop by STMT, along with the def-use update chain to 
    appropriately advance the pointer through the loop iterations. Also set
    aliasing information for the pointer.  This vector pointer is used by the
-   callers to this function to create a memory reference expression for vector 
+   callers to this function to create a memory reference expression for vector
    load/store access.
 
    Input:
@@ -298,7 +783,7 @@ vect_create_data_ref_ptr (tree stmt,
   /** (2) Add aliasing information to the new vector-pointer:
           (The points-to info (DR_PTR_INFO) may be defined later.)  **/
   
-  tag = DR_MEMTAG (dr);
+  tag = DR_SYMBOL_TAG (dr);
   gcc_assert (tag);
 
   /* If tag is a variable (and NOT_A_TAG) than a new symbol memory
@@ -323,7 +808,7 @@ vect_create_data_ref_ptr (tree stmt,
 
   /* Create: p = (vectype *) initial_base  */
   vec_stmt = fold_convert (vect_ptr_type, new_temp);
-  vec_stmt = build2 (GIMPLE_MODIFY_STMT, void_type_node, vect_ptr, vec_stmt);
+  vec_stmt = build_gimple_modify_stmt (vect_ptr, vec_stmt);
   vect_ptr_init = make_ssa_name (vect_ptr, vec_stmt);
   GIMPLE_STMT_OPERAND (vec_stmt, 0) = vect_ptr_init;
   new_bb = bsi_insert_on_edge_immediate (pe, vec_stmt);
@@ -410,14 +895,15 @@ bump_vector_ptr (tree dataref_ptr, tree ptr_incr, block_stmt_iterator *bsi,
   tree vectype = STMT_VINFO_VECTYPE (stmt_info);
   tree vptr_type = TREE_TYPE (dataref_ptr);
   tree ptr_var = SSA_NAME_VAR (dataref_ptr);
-  tree update = fold_convert (vptr_type, TYPE_SIZE_UNIT (vectype));
+  tree update = TYPE_SIZE_UNIT (vectype);
   tree incr_stmt;
   ssa_op_iter iter;
   use_operand_p use_p;
   tree new_dataref_ptr;
 
-  incr_stmt = build2 (GIMPLE_MODIFY_STMT, void_type_node, ptr_var,
-                build2 (PLUS_EXPR, vptr_type, dataref_ptr, update));
+  incr_stmt = build_gimple_modify_stmt (ptr_var,
+                                       build2 (POINTER_PLUS_EXPR, vptr_type,
+                                               dataref_ptr, update));
   new_dataref_ptr = make_ssa_name (ptr_var, incr_stmt);
   GIMPLE_STMT_OPERAND (incr_stmt, 0) = new_dataref_ptr;
   vect_finish_stmt_generation (stmt, incr_stmt, bsi);
@@ -462,7 +948,7 @@ vect_create_destination_var (tree scalar_dest, tree vectype)
   new_name = get_name (scalar_dest);
   if (!new_name)
     new_name = "var_";
-  vec_dest = vect_get_new_vect_var (type, vect_simple_var, new_name);
+  vec_dest = vect_get_new_vect_var (type, kind, new_name);
   add_referenced_var (vec_dest);
 
   return vec_dest;
@@ -491,7 +977,7 @@ vect_init_vector (tree stmt, tree vector_var, tree vector_type)
   new_var = vect_get_new_vect_var (vector_type, vect_simple_var, "cst_");
   add_referenced_var (new_var); 
  
-  init_stmt = build2 (GIMPLE_MODIFY_STMT, void_type_node, new_var, vector_var);
+  init_stmt = build_gimple_modify_stmt (new_var, vector_var);
   new_temp = make_ssa_name (new_var, init_stmt);
   GIMPLE_STMT_OPERAND (init_stmt, 0) = new_temp;
 
@@ -510,42 +996,227 @@ vect_init_vector (tree stmt, tree vector_var, tree vector_type)
 }
 
 
-/* Function vect_get_vec_def_for_operand.
-
-   OP is an operand in STMT. This function returns a (vector) def that will be
-   used in the vectorized stmt for STMT.
+/* Function get_initial_def_for_induction
 
-   In the case that OP is an SSA_NAME which is defined in the loop, then
-   STMT_VINFO_VEC_STMT of the defining stmt holds the relevant def.
+   Input:
+   IV_PHI - the initial value of the induction variable
 
-   In case OP is an invariant or constant, a new stmt that creates a vector def
-   needs to be introduced.  */
+   Output:
+   Return a vector variable, initialized with the first VF values of
+   the induction variable. E.g., for an iv with IV_PHI='X' and
+   evolution S, for a vector of 4 units, we want to return: 
+   [X, X + S, X + 2*S, X + 3*S].  */
 
 static tree
-vect_get_vec_def_for_operand (tree op, tree stmt, tree *scalar_def)
+get_initial_def_for_induction (tree iv_phi)
 {
-  tree vec_oprnd;
-  tree vec_stmt;
-  tree def_stmt;
-  stmt_vec_info def_stmt_info = NULL;
-  stmt_vec_info stmt_vinfo = vinfo_for_stmt (stmt);
-  tree vectype = STMT_VINFO_VECTYPE (stmt_vinfo);
-  int nunits = TYPE_VECTOR_SUBPARTS (vectype);
+  stmt_vec_info stmt_vinfo = vinfo_for_stmt (iv_phi);
   loop_vec_info loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_vinfo);
   struct loop *loop = LOOP_VINFO_LOOP (loop_vinfo);
-  tree vec_inv;
-  tree vec_cst;
-  tree t = NULL_TREE;
-  tree def;
+  tree scalar_type = TREE_TYPE (PHI_RESULT_TREE (iv_phi));
+  tree vectype = get_vectype_for_scalar_type (scalar_type);
+  int nunits =  TYPE_VECTOR_SUBPARTS (vectype);
+  edge pe = loop_preheader_edge (loop);
+  basic_block new_bb;
+  block_stmt_iterator bsi;
+  tree vec, vec_init, vec_step, t;
+  tree access_fn;
+  tree new_var;
+  tree new_name;
+  tree init_stmt;
+  tree induction_phi, induc_def, new_stmt, vec_def, vec_dest;
+  tree init_expr, step_expr;
+  int vf = LOOP_VINFO_VECT_FACTOR (loop_vinfo);
   int i;
-  enum vect_def_type dt;
-  bool is_simple_use;
-  tree vector_type;
+  bool ok;
+  int ncopies = vf / nunits;
+  tree expr;
+  stmt_vec_info phi_info = vinfo_for_stmt (iv_phi);
+  tree stmts;
+  tree stmt = NULL_TREE;
+  block_stmt_iterator si;
+  basic_block bb = bb_for_stmt (iv_phi);
 
-  if (vect_print_dump_info (REPORT_DETAILS))
-    {
-      fprintf (vect_dump, "vect_get_vec_def_for_operand: ");
-      print_generic_expr (vect_dump, op, TDF_SLIM);
+  gcc_assert (phi_info);
+  gcc_assert (ncopies >= 1);
+
+  /* Find the first insertion point in the BB.  */
+  si = bsi_after_labels (bb);
+  stmt = bsi_stmt (si);
+
+  access_fn = analyze_scalar_evolution (loop, PHI_RESULT (iv_phi));
+  gcc_assert (access_fn);
+  ok = vect_is_simple_iv_evolution (loop->num, access_fn,
+                                   &init_expr, &step_expr);
+  gcc_assert (ok);
+
+  /* Create the vector that holds the initial_value of the induction.  */
+  new_var = vect_get_new_vect_var (scalar_type, vect_scalar_var, "var_");
+  add_referenced_var (new_var);
+
+  new_name = force_gimple_operand (init_expr, &stmts, false, new_var);
+  if (stmts)
+    {
+      new_bb = bsi_insert_on_edge_immediate (pe, stmts);
+      gcc_assert (!new_bb);
+    }
+
+  t = NULL_TREE;
+  t = tree_cons (NULL_TREE, new_name, t);
+  for (i = 1; i < nunits; i++)
+    {
+      tree tmp;
+
+      /* Create: new_name = new_name + step_expr  */
+      tmp = fold_build2 (PLUS_EXPR, scalar_type, new_name, step_expr);
+      init_stmt = build_gimple_modify_stmt (new_var, tmp);
+      new_name = make_ssa_name (new_var, init_stmt);
+      GIMPLE_STMT_OPERAND (init_stmt, 0) = new_name;
+
+      new_bb = bsi_insert_on_edge_immediate (pe, init_stmt);
+      gcc_assert (!new_bb);
+
+      if (vect_print_dump_info (REPORT_DETAILS))
+        {
+          fprintf (vect_dump, "created new init_stmt: ");
+          print_generic_expr (vect_dump, init_stmt, TDF_SLIM);
+        }
+      t = tree_cons (NULL_TREE, new_name, t);
+    }
+  vec = build_constructor_from_list (vectype, nreverse (t));
+  vec_init = vect_init_vector (stmt, vec, vectype);
+
+
+  /* Create the vector that holds the step of the induction.  */
+  expr = build_int_cst (scalar_type, vf);
+  new_name = fold_build2 (MULT_EXPR, scalar_type, expr, step_expr);
+  t = NULL_TREE;
+  for (i = 0; i < nunits; i++)
+    t = tree_cons (NULL_TREE, unshare_expr (new_name), t);
+  vec = build_constructor_from_list (vectype, t);
+  vec_step = vect_init_vector (stmt, vec, vectype);
+
+
+  /* Create the following def-use cycle:
+     loop prolog:
+         vec_init = [X, X+S, X+2*S, X+3*S]
+        vec_step = [VF*S, VF*S, VF*S, VF*S]
+     loop:
+         vec_iv = PHI <vec_init, vec_loop>
+         ...
+         STMT
+         ...
+         vec_loop = vec_iv + vec_step;  */
+
+  /* Create the induction-phi that defines the induction-operand.  */
+  vec_dest = vect_get_new_vect_var (vectype, vect_simple_var, "vec_iv_");
+  add_referenced_var (vec_dest);
+  induction_phi = create_phi_node (vec_dest, loop->header);
+  set_stmt_info (get_stmt_ann (induction_phi),
+                 new_stmt_vec_info (induction_phi, loop_vinfo));
+  induc_def = PHI_RESULT (induction_phi);
+
+  /* Create the iv update inside the loop  */
+  new_stmt = build_gimple_modify_stmt (NULL_TREE,
+                                      build2 (PLUS_EXPR, vectype,
+                                              induc_def, vec_step));
+  vec_def = make_ssa_name (vec_dest, new_stmt);
+  GIMPLE_STMT_OPERAND (new_stmt, 0) = vec_def;
+  bsi = bsi_for_stmt (stmt);
+  vect_finish_stmt_generation (stmt, new_stmt, &bsi);
+
+  /* Set the arguments of the phi node:  */
+  add_phi_arg (induction_phi, vec_init, loop_preheader_edge (loop));
+  add_phi_arg (induction_phi, vec_def, loop_latch_edge (loop));
+
+
+  /* In case the vectorization factor (VF) is bigger than the number
+     of elements that we can fit in a vectype (nunits), we have to generate
+     more than one vector stmt - i.e - we need to "unroll" the
+     vector stmt by a factor VF/nunits.  For more details see documentation
+     in vectorizable_operation.  */
+  
+  if (ncopies > 1)
+    {
+      stmt_vec_info prev_stmt_vinfo;
+
+      /* Create the vector that holds the step of the induction.  */
+      expr = build_int_cst (scalar_type, nunits);
+      new_name = fold_build2 (MULT_EXPR, scalar_type, expr, step_expr);
+      t = NULL_TREE;
+      for (i = 0; i < nunits; i++)
+       t = tree_cons (NULL_TREE, unshare_expr (new_name), t);
+      vec = build_constructor_from_list (vectype, t);
+      vec_step = vect_init_vector (stmt, vec, vectype);
+
+      vec_def = induc_def;
+      prev_stmt_vinfo = vinfo_for_stmt (induction_phi);
+      for (i = 1; i < ncopies; i++)
+       {
+         tree tmp;
+
+         /* vec_i = vec_prev + vec_{step*nunits}  */
+         tmp = build2 (PLUS_EXPR, vectype, vec_def, vec_step);
+         new_stmt = build_gimple_modify_stmt (NULL_TREE, tmp);
+         vec_def = make_ssa_name (vec_dest, new_stmt);
+         GIMPLE_STMT_OPERAND (new_stmt, 0) = vec_def;
+         bsi = bsi_for_stmt (stmt);
+         vect_finish_stmt_generation (stmt, new_stmt, &bsi);
+
+         STMT_VINFO_RELATED_STMT (prev_stmt_vinfo) = new_stmt;
+         prev_stmt_vinfo = vinfo_for_stmt (new_stmt); 
+       }
+    }
+
+  if (vect_print_dump_info (REPORT_DETAILS))
+    {
+      fprintf (vect_dump, "transform induction: created def-use cycle:");
+      print_generic_expr (vect_dump, induction_phi, TDF_SLIM);
+      fprintf (vect_dump, "\n");
+      print_generic_expr (vect_dump, SSA_NAME_DEF_STMT (vec_def), TDF_SLIM);
+    }
+
+  STMT_VINFO_VEC_STMT (phi_info) = induction_phi;
+  return induc_def;
+}
+
+
+/* Function vect_get_vec_def_for_operand.
+
+   OP is an operand in STMT. This function returns a (vector) def that will be
+   used in the vectorized stmt for STMT.
+
+   In the case that OP is an SSA_NAME which is defined in the loop, then
+   STMT_VINFO_VEC_STMT of the defining stmt holds the relevant def.
+
+   In case OP is an invariant or constant, a new stmt that creates a vector def
+   needs to be introduced.  */
+
+static tree
+vect_get_vec_def_for_operand (tree op, tree stmt, tree *scalar_def)
+{
+  tree vec_oprnd;
+  tree vec_stmt;
+  tree def_stmt;
+  stmt_vec_info def_stmt_info = NULL;
+  stmt_vec_info stmt_vinfo = vinfo_for_stmt (stmt);
+  tree vectype = STMT_VINFO_VECTYPE (stmt_vinfo);
+  int nunits = TYPE_VECTOR_SUBPARTS (vectype);
+  loop_vec_info loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_vinfo);
+  struct loop *loop = LOOP_VINFO_LOOP (loop_vinfo);
+  tree vec_inv;
+  tree vec_cst;
+  tree t = NULL_TREE;
+  tree def;
+  int i;
+  enum vect_def_type dt;
+  bool is_simple_use;
+  tree vector_type;
+
+  if (vect_print_dump_info (REPORT_DETAILS))
+    {
+      fprintf (vect_dump, "vect_get_vec_def_for_operand: ");
+      print_generic_expr (vect_dump, op, TDF_SLIM);
     }
 
   is_simple_use = vect_is_simple_use (op, loop_vinfo, &def_stmt, &def, &dt);
@@ -634,9 +1305,10 @@ vect_get_vec_def_for_operand (tree op, tree stmt, tree *scalar_def)
     /* Case 5: operand is defined by loop-header phi - induction.  */
     case vect_induction_def:
       {
-        if (vect_print_dump_info (REPORT_DETAILS))
-          fprintf (vect_dump, "induction - unsupported.");
-        internal_error ("no support for induction"); /* FORNOW */
+       gcc_assert (TREE_CODE (def_stmt) == PHI_NODE);
+
+       /* Get the def before the loop  */
+       return get_initial_def_for_induction (def_stmt);
       }
 
     default:
@@ -651,7 +1323,7 @@ vect_get_vec_def_for_operand (tree op, tree stmt, tree *scalar_def)
    vectorized stmt to be created (by the caller to this function) is a "copy" 
    created in case the vectorized result cannot fit in one vector, and several 
    copies of the vector-stmt are required. In this case the vector-def is 
-   retrieved from the vector stmt recorded in the STMT_VINFO_RELATED_STMT field 
+   retrieved from the vector stmt recorded in the STMT_VINFO_RELATED_STMT field
    of the stmt that defines VEC_OPRND. 
    DT is the type of the vector def VEC_OPRND.
 
@@ -707,11 +1379,9 @@ vect_get_vec_def_for_stmt_copy (enum vect_def_type dt, tree vec_oprnd)
   tree vec_stmt_for_operand;
   stmt_vec_info def_stmt_info;
 
-  if (dt == vect_invariant_def || dt == vect_constant_def)
-    {
-      /* Do nothing; can reuse same def.  */ ;
-      return vec_oprnd;
-    }
+  /* Do nothing; can reuse same def.  */
+  if (dt == vect_invariant_def || dt == vect_constant_def )
+    return vec_oprnd;
 
   vec_stmt_for_operand = SSA_NAME_DEF_STMT (vec_oprnd);
   def_stmt_info = vinfo_for_stmt (vec_stmt_for_operand);
@@ -756,8 +1426,6 @@ vect_finish_stmt_generation (tree stmt, tree vec_stmt,
 }
 
 
-#define ADJUST_IN_EPILOG 1
-
 /* Function get_initial_def_for_reduction
 
    Input:
@@ -765,18 +1433,18 @@ vect_finish_stmt_generation (tree stmt, tree vec_stmt,
    INIT_VAL - the initial value of the reduction variable
 
    Output:
-   SCALAR_DEF - a tree that holds a value to be added to the final result
-       of the reduction (used for "ADJUST_IN_EPILOG" - see below).
+   ADJUSTMENT_DEF - a tree that holds a value to be added to the final result
+        of the reduction (used for adjusting the epilog - see below).
    Return a vector variable, initialized according to the operation that STMT
-       performs. This vector will be used as the initial value of the
-       vector of partial results.
+        performs. This vector will be used as the initial value of the
+        vector of partial results.
 
-   Option1 ("ADJUST_IN_EPILOG"): Initialize the vector as follows:
+   Option1 (adjust in epilog): Initialize the vector as follows:
      add:         [0,0,...,0,0]
      mult:        [1,1,...,1,1]
      min/max:     [init_val,init_val,..,init_val,init_val]
      bit and/or:  [init_val,init_val,..,init_val,init_val]
-   and when necessary (e.g. add/mult case) let the caller know 
+   and when necessary (e.g. add/mult case) let the caller know
    that it needs to adjust the result by init_val.
 
    Option2: Initialize the vector as follows:
@@ -797,84 +1465,56 @@ vect_finish_stmt_generation (tree stmt, tree vec_stmt,
    or [0,0,0,0] and let the caller know that it needs to adjust
    the result at the end by 'init_val'.
 
-   FORNOW: We use the "ADJUST_IN_EPILOG" scheme.
-   TODO: Use some cost-model to estimate which scheme is more profitable.
-*/
+   FORNOW, we are using the 'adjust in epilog' scheme, because this way the
+   initialization vector is simpler (same element in all entries).
+   A cost model should help decide between these two schemes.  */
 
 static tree
-get_initial_def_for_reduction (tree stmt, tree init_val, tree *scalar_def)
+get_initial_def_for_reduction (tree stmt, tree init_val, tree *adjustment_def)
 {
   stmt_vec_info stmt_vinfo = vinfo_for_stmt (stmt);
   tree vectype = STMT_VINFO_VECTYPE (stmt_vinfo);
-  int nunits = GET_MODE_NUNITS (TYPE_MODE (vectype));
-  int nelements;
+  int nunits =  TYPE_VECTOR_SUBPARTS (vectype);
   enum tree_code code = TREE_CODE (GIMPLE_STMT_OPERAND (stmt, 1));
   tree type = TREE_TYPE (init_val);
-  tree def;
-  tree vec, t = NULL_TREE;
-  bool need_epilog_adjust;
+  tree vecdef;
+  tree def_for_init;
+  tree init_def;
+  tree t = NULL_TREE;
   int i;
   tree vector_type;
 
   gcc_assert (INTEGRAL_TYPE_P (type) || SCALAR_FLOAT_TYPE_P (type));
+  vecdef = vect_get_vec_def_for_operand (init_val, stmt, NULL);
 
   switch (code)
   {
   case WIDEN_SUM_EXPR:
   case DOT_PROD_EXPR:
   case PLUS_EXPR:
+    *adjustment_def = init_val;
+    /* Create a vector of zeros for init_def.  */
     if (INTEGRAL_TYPE_P (type))
-      def = build_int_cst (type, 0);
+      def_for_init = build_int_cst (type, 0);
     else
-      def = build_real (type, dconst0);
-
-#ifdef ADJUST_IN_EPILOG
-    /* All the 'nunits' elements are set to 0. The final result will be
-       adjusted by 'init_val' at the loop epilog.  */
-    nelements = nunits;
-    need_epilog_adjust = true;
-#else
-    /* 'nunits - 1' elements are set to 0; The last element is set to 
-        'init_val'.  No further adjustments at the epilog are needed.  */
-    nelements = nunits - 1;
-    need_epilog_adjust = false;
-#endif
+      def_for_init = build_real (type, dconst0);
+      for (i = nunits - 1; i >= 0; --i)
+    t = tree_cons (NULL_TREE, def_for_init, t);
+    vector_type = get_vectype_for_scalar_type (TREE_TYPE (def_for_init));
+    init_def = build_vector (vector_type, t);
     break;
 
   case MIN_EXPR:
   case MAX_EXPR:
-    def = init_val;
-    nelements = nunits;
-    need_epilog_adjust = false;
+    *adjustment_def = NULL_TREE;
+    init_def = vecdef;
     break;
 
   default:
     gcc_unreachable ();
   }
 
-  for (i = nelements - 1; i >= 0; --i)
-    t = tree_cons (NULL_TREE, def, t);
-
-  if (nelements == nunits - 1)
-    {
-      /* Set the last element of the vector.  */
-      t = tree_cons (NULL_TREE, init_val, t);
-      nelements += 1;
-    }
-  gcc_assert (nelements == nunits);
-
-  vector_type = get_vectype_for_scalar_type (TREE_TYPE (def));
-  if (TREE_CODE (init_val) == INTEGER_CST || TREE_CODE (init_val) == REAL_CST)
-    vec = build_vector (vector_type, t);
-  else
-    vec = build_constructor_from_list (vector_type, t);
-    
-  if (!need_epilog_adjust)
-    *scalar_def = NULL_TREE;
-  else
-    *scalar_def = init_val;
-
-  return vect_init_vector (stmt, vec, vector_type);
+  return init_def;
 }
 
 
@@ -889,7 +1529,7 @@ get_initial_def_for_reduction (tree stmt, tree init_val, tree *scalar_def)
    REDUCTION_PHI is the phi-node that carries the reduction computation.
 
    This function:
-   1. Creates the reduction def-use cycle: sets the the arguments for 
+   1. Creates the reduction def-use cycle: sets the arguments for 
       REDUCTION_PHI:
       The loop-entry argument is the vectorized initial-value of the reduction.
       The loop-latch argument is VECT_DEF - the vector of partial sums.
@@ -903,7 +1543,7 @@ get_initial_def_for_reduction (tree stmt, tree init_val, tree *scalar_def)
     
         loop:
           vec_def = phi <null, null>            # REDUCTION_PHI
-          VECT_DEF = vector_stmt                # vectorized form of STMT       
+          VECT_DEF = vector_stmt                # vectorized form of STMT
           s_loop = scalar_stmt                  # (scalar) STMT
         loop_exit:
           s_out0 = phi <s_loop>                 # (scalar) EXIT_PHI
@@ -959,7 +1599,7 @@ vect_create_epilog_for_reduction (tree vect_def, tree stmt,
   tree operation = GIMPLE_STMT_OPERAND (stmt, 1);
   int op_type;
   
-  op_type = TREE_CODE_LENGTH (TREE_CODE (operation));
+  op_type = TREE_OPERAND_LENGTH (operation);
   reduction_op = TREE_OPERAND (operation, op_type-1);
   vectype = get_vectype_for_scalar_type (TREE_TYPE (reduction_op));
   mode = TYPE_MODE (vectype);
@@ -1019,11 +1659,11 @@ vect_create_epilog_for_reduction (tree vect_def, tree stmt,
   exit_bb = single_exit (loop)->dest;
   new_phi = create_phi_node (SSA_NAME_VAR (vect_def), exit_bb);
   SET_PHI_ARG_DEF (new_phi, single_exit (loop)->dest_idx, vect_def);
-  exit_bsi = bsi_start (exit_bb);
+  exit_bsi = bsi_after_labels (exit_bb);
 
   /* 2.2 Get the relevant tree-code to use in the epilog for schemes 2,3 
-         (i.e. when reduc_code is not available) and in the final adjustment code
-         (if needed).  Also get the original scalar reduction variable as
+         (i.e. when reduc_code is not available) and in the final adjustment
+        code (if needed).  Also get the original scalar reduction variable as
          defined in the loop.  In case STMT is a "pattern-stmt" (i.e. - it 
          represents a reduction pattern), the tree-code and scalar-def are 
          taken from the original stmt that the pattern-stmt (STMT) replaces.  
@@ -1055,6 +1695,8 @@ vect_create_epilog_for_reduction (tree vect_def, tree stmt,
 
   if (reduc_code < NUM_TREE_CODES)
     {
+      tree tmp;
+
       /*** Case 1:  Create:
           v_out2 = reduc_expr <v_out1>  */
 
@@ -1062,11 +1704,11 @@ vect_create_epilog_for_reduction (tree vect_def, tree stmt,
        fprintf (vect_dump, "Reduce using direct vector reduction.");
 
       vec_dest = vect_create_destination_var (scalar_dest, vectype);
-      epilog_stmt = build2 (GIMPLE_MODIFY_STMT, void_type_node, vec_dest,
-                       build1 (reduc_code, vectype,  PHI_RESULT (new_phi)));
+      tmp = build1 (reduc_code, vectype,  PHI_RESULT (new_phi));
+      epilog_stmt = build_gimple_modify_stmt (vec_dest, tmp);
       new_temp = make_ssa_name (vec_dest, epilog_stmt);
       GIMPLE_STMT_OPERAND (epilog_stmt, 0) = new_temp;
-      bsi_insert_after (&exit_bsi, epilog_stmt, BSI_NEW_STMT);
+      bsi_insert_before (&exit_bsi, epilog_stmt, BSI_SAME_STMT);
 
       extract_scalar_result = true;
     }
@@ -1119,22 +1761,17 @@ vect_create_epilog_for_reduction (tree vect_def, tree stmt,
               bit_offset /= 2)
            {
              tree bitpos = size_int (bit_offset);
-
-             epilog_stmt = build2 (GIMPLE_MODIFY_STMT, void_type_node,
-                                   vec_dest,
-                                   build2 (shift_code, vectype,
-                                           new_temp, bitpos));
+             tree tmp = build2 (shift_code, vectype, new_temp, bitpos);
+             epilog_stmt = build_gimple_modify_stmt (vec_dest, tmp);
              new_name = make_ssa_name (vec_dest, epilog_stmt);
              GIMPLE_STMT_OPERAND (epilog_stmt, 0) = new_name;
-             bsi_insert_after (&exit_bsi, epilog_stmt, BSI_NEW_STMT);
+             bsi_insert_before (&exit_bsi, epilog_stmt, BSI_SAME_STMT);
 
-             epilog_stmt = build2 (GIMPLE_MODIFY_STMT, void_type_node,
-                                   vec_dest,
-                                   build2 (code, vectype,
-                                           new_name, new_temp));
+             tmp = build2 (code, vectype, new_name, new_temp);
+             epilog_stmt = build_gimple_modify_stmt (vec_dest, tmp);
              new_temp = make_ssa_name (vec_dest, epilog_stmt);
              GIMPLE_STMT_OPERAND (epilog_stmt, 0) = new_temp;
-             bsi_insert_after (&exit_bsi, epilog_stmt, BSI_NEW_STMT);
+             bsi_insert_before (&exit_bsi, epilog_stmt, BSI_SAME_STMT);
            }
 
          extract_scalar_result = true;
@@ -1161,33 +1798,31 @@ vect_create_epilog_for_reduction (tree vect_def, tree stmt,
          rhs = build3 (BIT_FIELD_REF, scalar_type, vec_temp, bitsize,
                         bitsize_zero_node);
          BIT_FIELD_REF_UNSIGNED (rhs) = TYPE_UNSIGNED (scalar_type);
-         epilog_stmt = build2 (GIMPLE_MODIFY_STMT, void_type_node,
-                               new_scalar_dest, rhs);
+         epilog_stmt = build_gimple_modify_stmt (new_scalar_dest, rhs);
          new_temp = make_ssa_name (new_scalar_dest, epilog_stmt);
          GIMPLE_STMT_OPERAND (epilog_stmt, 0) = new_temp;
-         bsi_insert_after (&exit_bsi, epilog_stmt, BSI_NEW_STMT);
+         bsi_insert_before (&exit_bsi, epilog_stmt, BSI_SAME_STMT);
              
          for (bit_offset = element_bitsize;
               bit_offset < vec_size_in_bits;
               bit_offset += element_bitsize)
            { 
+             tree tmp;
              tree bitpos = bitsize_int (bit_offset);
              tree rhs = build3 (BIT_FIELD_REF, scalar_type, vec_temp, bitsize,
                                 bitpos);
                
              BIT_FIELD_REF_UNSIGNED (rhs) = TYPE_UNSIGNED (scalar_type);
-             epilog_stmt = build2 (GIMPLE_MODIFY_STMT, void_type_node,
-                                   new_scalar_dest, rhs);      
+             epilog_stmt = build_gimple_modify_stmt (new_scalar_dest, rhs);
              new_name = make_ssa_name (new_scalar_dest, epilog_stmt);
              GIMPLE_STMT_OPERAND (epilog_stmt, 0) = new_name;
-             bsi_insert_after (&exit_bsi, epilog_stmt, BSI_NEW_STMT);
+             bsi_insert_before (&exit_bsi, epilog_stmt, BSI_SAME_STMT);
 
-             epilog_stmt = build2 (GIMPLE_MODIFY_STMT, void_type_node,
-                               new_scalar_dest,
-                               build2 (code, scalar_type, new_name, new_temp));
+             tmp = build2 (code, scalar_type, new_name, new_temp);
+             epilog_stmt = build_gimple_modify_stmt (new_scalar_dest, tmp);
              new_temp = make_ssa_name (new_scalar_dest, epilog_stmt);
              GIMPLE_STMT_OPERAND (epilog_stmt, 0) = new_temp;
-             bsi_insert_after (&exit_bsi, epilog_stmt, BSI_NEW_STMT);
+             bsi_insert_before (&exit_bsi, epilog_stmt, BSI_SAME_STMT);
            }
 
          extract_scalar_result = false;
@@ -1213,11 +1848,10 @@ vect_create_epilog_for_reduction (tree vect_def, tree stmt,
 
       rhs = build3 (BIT_FIELD_REF, scalar_type, new_temp, bitsize, bitpos);
       BIT_FIELD_REF_UNSIGNED (rhs) = TYPE_UNSIGNED (scalar_type);
-      epilog_stmt = build2 (GIMPLE_MODIFY_STMT, void_type_node,
-                           new_scalar_dest, rhs);
+      epilog_stmt = build_gimple_modify_stmt (new_scalar_dest, rhs);
       new_temp = make_ssa_name (new_scalar_dest, epilog_stmt);
       GIMPLE_STMT_OPERAND (epilog_stmt, 0) = new_temp; 
-      bsi_insert_after (&exit_bsi, epilog_stmt, BSI_NEW_STMT);
+      bsi_insert_before (&exit_bsi, epilog_stmt, BSI_SAME_STMT);
     }
 
   /* 2.4 Adjust the final result by the initial value of the reduction
@@ -1229,12 +1863,11 @@ vect_create_epilog_for_reduction (tree vect_def, tree stmt,
   
   if (scalar_initial_def)
     {
-      epilog_stmt = build2 (GIMPLE_MODIFY_STMT, void_type_node,
-                     new_scalar_dest,
-                      build2 (code, scalar_type, new_temp, scalar_initial_def));
+      tree tmp = build2 (code, scalar_type, new_temp, scalar_initial_def);
+      epilog_stmt = build_gimple_modify_stmt (new_scalar_dest, tmp);
       new_temp = make_ssa_name (new_scalar_dest, epilog_stmt);
       GIMPLE_STMT_OPERAND (epilog_stmt, 0) = new_temp;
-      bsi_insert_after (&exit_bsi, epilog_stmt, BSI_NEW_STMT);
+      bsi_insert_before (&exit_bsi, epilog_stmt, BSI_SAME_STMT);
     }
 
   /* 2.6 Replace uses of s_out0 with uses of s_out3  */
@@ -1275,28 +1908,28 @@ vect_create_epilog_for_reduction (tree vect_def, tree stmt,
    and it's STMT_VINFO_RELATED_STMT points to the last stmt in the original
    sequence that had been detected and replaced by the pattern-stmt (STMT).
   
-   In some cases of reduction patterns, the type of the reduction variable X is 
+   In some cases of reduction patterns, the type of the reduction variable X is
    different than the type of the other arguments of STMT.
    In such cases, the vectype that is used when transforming STMT into a vector
-   stmt is different than the vectype that is used to determine the 
+   stmt is different than the vectype that is used to determine the
    vectorization factor, because it consists of a different number of elements 
    than the actual number of elements that are being operated upon in parallel.
 
-   For example, consider an accumulation of shorts into an int accumulator. 
+   For example, consider an accumulation of shorts into an int accumulator.
    On some targets it's possible to vectorize this pattern operating on 8
    shorts at a time (hence, the vectype for purposes of determining the
    vectorization factor should be V8HI); on the other hand, the vectype that
-   is used to create the vector form is actually V4SI (the type of the result). 
+   is used to create the vector form is actually V4SI (the type of the result).
 
-   Upon entry to this function, STMT_VINFO_VECTYPE records the vectype that 
-   indicates what is the actual level of parallelism (V8HI in the example), so 
-   that the right vectorization factor would be derived. This vectype 
-   corresponds to the type of arguments to the reduction stmt, and should *NOT* 
+   Upon entry to this function, STMT_VINFO_VECTYPE records the vectype that
+   indicates what is the actual level of parallelism (V8HI in the example), so
+   that the right vectorization factor would be derived. This vectype
+   corresponds to the type of arguments to the reduction stmt, and should *NOT*
    be used to create the vectorized stmt. The right vectype for the vectorized
-   stmt is obtained from the type of the result X: 
+   stmt is obtained from the type of the result X:
         get_vectype_for_scalar_type (TREE_TYPE (X))
 
-   This means that, contrary to "regular" reductions (or "regular" stmts in 
+   This means that, contrary to "regular" reductions (or "regular" stmts in
    general), the following equation:
       STMT_VINFO_VECTYPE == get_vectype_for_scalar_type (TREE_TYPE (X))
    does *NOT* necessarily hold for reduction patterns.  */
@@ -1373,7 +2006,7 @@ vectorizable_reduction (tree stmt, block_stmt_iterator *bsi, tree *vec_stmt)
 
   operation = GIMPLE_STMT_OPERAND (stmt, 1);
   code = TREE_CODE (operation);
-  op_type = TREE_CODE_LENGTH (code);
+  op_type = TREE_OPERAND_LENGTH (operation);
   if (op_type != binary_op && op_type != ternary_op)
     return false;
   scalar_dest = GIMPLE_STMT_OPERAND (stmt, 0);
@@ -1386,8 +2019,11 @@ vectorizable_reduction (tree stmt, block_stmt_iterator *bsi, tree *vec_stmt)
       op = TREE_OPERAND (operation, i);
       is_simple_use = vect_is_simple_use (op, loop_vinfo, &def_stmt, &def, &dt);
       gcc_assert (is_simple_use);
-      gcc_assert (dt == vect_loop_def || dt == vect_invariant_def ||
-                  dt == vect_constant_def);
+      if (dt != vect_loop_def
+         && dt != vect_invariant_def
+         && dt != vect_constant_def
+         && dt != vect_induction_def)
+       return false;
     }
 
   op = TREE_OPERAND (operation, i);
@@ -1504,6 +2140,7 @@ vectorizable_reduction (tree stmt, block_stmt_iterator *bsi, tree *vec_stmt)
   if (!vec_stmt) /* transformation not required.  */
     {
       STMT_VINFO_TYPE (stmt_info) = reduc_vec_info_type;
+      vect_model_reduction_cost (stmt_info, epilog_reduc_code, ncopies);
       return true;
     }
 
@@ -1537,7 +2174,7 @@ vectorizable_reduction (tree stmt, block_stmt_iterator *bsi, tree *vec_stmt)
               op = TREE_OPERAND (operation, 1);
               loop_vec_def1 = vect_get_vec_def_for_operand (op, stmt, NULL);
             }
-                                                                                
+
           /* Get the vector def for the reduction variable from the phi node */
           reduc_def = PHI_RESULT (new_phi);
         }
@@ -1547,79 +2184,59 @@ vectorizable_reduction (tree stmt, block_stmt_iterator *bsi, tree *vec_stmt)
           loop_vec_def0 = vect_get_vec_def_for_stmt_copy (dt, loop_vec_def0);
           if (op_type == ternary_op)
             loop_vec_def1 = vect_get_vec_def_for_stmt_copy (dt, loop_vec_def1);
-                                                                                
+
           /* Get the vector def for the reduction variable from the vectorized
              reduction operation generated in the previous iteration (j-1)  */
           reduc_def = GIMPLE_STMT_OPERAND (new_stmt ,0);
         }
-                                                                                
+
       /* Arguments are ready. create the new vector stmt.  */
-                                                                                
       if (op_type == binary_op)
         expr = build2 (code, vectype, loop_vec_def0, reduc_def);
       else
         expr = build3 (code, vectype, loop_vec_def0, loop_vec_def1, 
-                                                               reduc_def);
-      new_stmt = build2 (GIMPLE_MODIFY_STMT, void_type_node, vec_dest, expr);
+                      reduc_def);
+      new_stmt = build_gimple_modify_stmt (vec_dest, expr);
       new_temp = make_ssa_name (vec_dest, new_stmt);
       GIMPLE_STMT_OPERAND (new_stmt, 0) = new_temp;
       vect_finish_stmt_generation (stmt, new_stmt, bsi);
-                                                                                
+
       if (j == 0)
        STMT_VINFO_VEC_STMT (stmt_info) = *vec_stmt = new_stmt;
       else
        STMT_VINFO_RELATED_STMT (prev_stmt_info) = new_stmt;
       prev_stmt_info = vinfo_for_stmt (new_stmt);
     }
-                                                                                
+
   /* Finalize the reduction-phi (set it's arguments) and create the
      epilog reduction code.  */
-  vect_create_epilog_for_reduction (new_temp, stmt, epilog_reduc_code, new_phi);                                                                                
+  vect_create_epilog_for_reduction (new_temp, stmt, epilog_reduc_code, new_phi);
   return true;
 }
 
 /* Checks if CALL can be vectorized in type VECTYPE.  Returns
-   true if the target has a vectorized version of the function,
-   or false if the function cannot be vectorized.  */
+   a function declaration if the target has a vectorized version
+   of the function, or NULL_TREE if the function cannot be vectorized.  */
 
-bool
-vectorizable_function (tree call, tree vectype)
+tree
+vectorizable_function (tree call, tree vectype_out, tree vectype_in)
 {
   tree fndecl = get_callee_fndecl (call);
+  enum built_in_function code;
 
   /* We only handle functions that do not read or clobber memory -- i.e.
      const or novops ones.  */
   if (!(call_expr_flags (call) & (ECF_CONST | ECF_NOVOPS)))
-    return false;
+    return NULL_TREE;
 
   if (!fndecl
       || TREE_CODE (fndecl) != FUNCTION_DECL
       || !DECL_BUILT_IN (fndecl))
-    return false;
-
-  if (targetm.vectorize.builtin_vectorized_function (DECL_FUNCTION_CODE (fndecl), vectype))
-    return true;
+    return NULL_TREE;
 
-  return false;
-}
-
-/* Returns an expression that performs a call to vectorized version
-   of FNDECL in type VECTYPE, with the arguments given by ARGS.
-   If extra statements need to be generated, they are inserted
-   before BSI.  */
-
-static tree
-build_vectorized_function_call (tree fndecl,
-                               tree vectype, tree args)
-{
-  tree vfndecl;
-  enum built_in_function code = DECL_FUNCTION_CODE (fndecl);
-
-  /* The target specific builtin should be available.  */
-  vfndecl = targetm.vectorize.builtin_vectorized_function (code, vectype);
-  gcc_assert (vfndecl != NULL_TREE);
-
-  return build_function_call_expr (vfndecl, args);
+  code = DECL_FUNCTION_CODE (fndecl);
+  return targetm.vectorize.builtin_vectorized_function (code, vectype_out,
+                                                       vectype_in);
 }
 
 /* Function vectorizable_call.
@@ -1635,13 +2252,34 @@ vectorizable_call (tree stmt, block_stmt_iterator *bsi, tree *vec_stmt)
   tree vec_dest;
   tree scalar_dest;
   tree operation;
-  tree op, args, type;
-  tree vec_oprnd, vargs, *pvargs_end;
-  stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
-  tree vectype = STMT_VINFO_VECTYPE (stmt_info);
+  tree op, type;
+  tree vec_oprnd0 = NULL_TREE, vec_oprnd1 = NULL_TREE;
+  stmt_vec_info stmt_info = vinfo_for_stmt (stmt), prev_stmt_info;
+  tree vectype_out, vectype_in;
+  int nunits_in;
+  int nunits_out;
   loop_vec_info loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_info);
-  tree fndecl, rhs, new_temp, def, def_stmt;
-  enum vect_def_type dt;
+  tree fndecl, rhs, new_temp, def, def_stmt, rhs_type, lhs_type;
+  enum vect_def_type dt[2];
+  tree new_stmt;
+  int ncopies, j, nargs;
+  call_expr_arg_iterator iter;
+  tree vargs;
+  enum { NARROW, NONE, WIDEN } modifier;
+
+  if (!STMT_VINFO_RELEVANT_P (stmt_info))
+    return false;
+
+  if (STMT_VINFO_DEF_TYPE (stmt_info) != vect_loop_def)
+    return false;
+
+  /* FORNOW: not yet supported.  */
+  if (STMT_VINFO_LIVE_P (stmt_info))
+    {
+      if (vect_print_dump_info (REPORT_DETAILS))
+        fprintf (vect_dump, "value used after loop.");
+      return false;
+    }
 
   /* Is STMT a vectorizable call?   */
   if (TREE_CODE (stmt) != GIMPLE_MODIFY_STMT)
@@ -1653,35 +2291,89 @@ vectorizable_call (tree stmt, block_stmt_iterator *bsi, tree *vec_stmt)
   operation = GIMPLE_STMT_OPERAND (stmt, 1);
   if (TREE_CODE (operation) != CALL_EXPR)
     return false;
-   
+
+  /* Process function arguments.  */
+  rhs_type = NULL_TREE;
+  nargs = 0;
+  FOR_EACH_CALL_EXPR_ARG (op, iter, operation)
+    {
+      /* Bail out if the function has more than two arguments, we
+        do not have interesting builtin functions to vectorize with
+        more than two arguments.  */
+      if (nargs >= 2)
+       return false;
+
+      /* We can only handle calls with arguments of the same type.  */
+      if (rhs_type
+         && rhs_type != TREE_TYPE (op))
+       {
+         if (vect_print_dump_info (REPORT_DETAILS))
+           fprintf (vect_dump, "argument types differ.");
+         return false;
+       }
+      rhs_type = TREE_TYPE (op);
+
+      if (!vect_is_simple_use (op, loop_vinfo, &def_stmt, &def, &dt[nargs]))
+       {
+         if (vect_print_dump_info (REPORT_DETAILS))
+           fprintf (vect_dump, "use not simple.");
+         return false;
+       }
+
+      ++nargs;
+    }
+
+  /* No arguments is also not good.  */
+  if (nargs == 0)
+    return false;
+
+  vectype_in = get_vectype_for_scalar_type (rhs_type);
+  nunits_in = TYPE_VECTOR_SUBPARTS (vectype_in);
+
+  lhs_type = TREE_TYPE (GIMPLE_STMT_OPERAND (stmt, 0));
+  vectype_out = get_vectype_for_scalar_type (lhs_type);
+  nunits_out = TYPE_VECTOR_SUBPARTS (vectype_out);
+
+  /* FORNOW */
+  if (nunits_in == nunits_out / 2)
+    modifier = NARROW;
+  else if (nunits_out == nunits_in)
+    modifier = NONE;
+  else if (nunits_out == nunits_in / 2)
+    modifier = WIDEN;
+  else
+    return false;
+
   /* For now, we only vectorize functions if a target specific builtin
      is available.  TODO -- in some cases, it might be profitable to
      insert the calls for pieces of the vector, in order to be able
      to vectorize other operations in the loop.  */
-  if (!vectorizable_function (operation, vectype))
+  fndecl = vectorizable_function (operation, vectype_out, vectype_in);
+  if (fndecl == NULL_TREE)
     {
       if (vect_print_dump_info (REPORT_DETAILS))
        fprintf (vect_dump, "function is not vectorizable.");
 
       return false;
     }
+
   gcc_assert (ZERO_SSA_OPERANDS (stmt, SSA_OP_ALL_VIRTUALS));
 
-  for (args = TREE_OPERAND (operation, 1); args; args = TREE_CHAIN (args))
-    {
-      op = TREE_VALUE (args);
+  if (modifier == NARROW)
+    ncopies = LOOP_VINFO_VECT_FACTOR (loop_vinfo) / nunits_out;
+  else
+    ncopies = LOOP_VINFO_VECT_FACTOR (loop_vinfo) / nunits_in;
 
-      if (!vect_is_simple_use (op, loop_vinfo, &def_stmt, &def, &dt))
-       {
-         if (vect_print_dump_info (REPORT_DETAILS))
-           fprintf (vect_dump, "use not simple.");
-         return false;
-       }
-    }
+  /* Sanity check: make sure that at least one copy of the vectorized stmt
+     needs to be generated.  */
+  gcc_assert (ncopies >= 1);
 
   if (!vec_stmt) /* transformation not required.  */
     {
       STMT_VINFO_TYPE (stmt_info) = call_vec_info_type;
+      if (vect_print_dump_info (REPORT_DETAILS))
+        fprintf (vect_dump, "=== vectorizable_call ===");
+      vect_model_simple_cost (stmt_info, ncopies);
       return true;
     }
 
@@ -1692,39 +2384,429 @@ vectorizable_call (tree stmt, block_stmt_iterator *bsi, tree *vec_stmt)
 
   /* Handle def.  */
   scalar_dest = GIMPLE_STMT_OPERAND (stmt, 0);
-  vec_dest = vect_create_destination_var (scalar_dest, vectype);
+  vec_dest = vect_create_destination_var (scalar_dest, vectype_out);
 
-  /* Handle uses.  */
-  vargs = NULL_TREE;
-  pvargs_end = &vargs;
-  for (args = TREE_OPERAND (operation, 1); args; args = TREE_CHAIN (args))
+  prev_stmt_info = NULL;
+  switch (modifier)
     {
-      op = TREE_VALUE (args);
-      vec_oprnd = vect_get_vec_def_for_operand (op, stmt, NULL);
-         
-      *pvargs_end = tree_cons (NULL_TREE, vec_oprnd, NULL_TREE);
-      pvargs_end = &TREE_CHAIN (*pvargs_end);
-    }
+    case NONE:
+      for (j = 0; j < ncopies; ++j)
+       {
+         /* Build argument list for the vectorized call.  */
+         /* FIXME: Rewrite this so that it doesn't
+            construct a temporary list.  */
+         vargs = NULL_TREE;
+         nargs = 0;
+         FOR_EACH_CALL_EXPR_ARG (op, iter, operation)
+           {
+             if (j == 0)
+               vec_oprnd0
+                 = vect_get_vec_def_for_operand (op, stmt, NULL);
+             else
+               vec_oprnd0
+                 = vect_get_vec_def_for_stmt_copy (dt[nargs], vec_oprnd0);
 
-  fndecl = get_callee_fndecl (operation);
-  rhs = build_vectorized_function_call (fndecl, vectype, vargs);
-  *vec_stmt = build2 (GIMPLE_MODIFY_STMT, vectype, vec_dest, rhs);
-  new_temp = make_ssa_name (vec_dest, *vec_stmt);
-  GIMPLE_STMT_OPERAND (*vec_stmt, 0) = new_temp;
+             vargs = tree_cons (NULL_TREE, vec_oprnd0, vargs);
 
-  vect_finish_stmt_generation (stmt, *vec_stmt, bsi);
+             ++nargs;
+           }
+         vargs = nreverse (vargs);
+
+         rhs = build_function_call_expr (fndecl, vargs);
+         new_stmt = build_gimple_modify_stmt (vec_dest, rhs);
+         new_temp = make_ssa_name (vec_dest, new_stmt);
+         GIMPLE_STMT_OPERAND (new_stmt, 0) = new_temp;
+
+         vect_finish_stmt_generation (stmt, new_stmt, bsi);
+
+         if (j == 0)
+           STMT_VINFO_VEC_STMT (stmt_info) = *vec_stmt = new_stmt;
+         else
+           STMT_VINFO_RELATED_STMT (prev_stmt_info) = new_stmt;
+
+         prev_stmt_info = vinfo_for_stmt (new_stmt);
+       }
+
+      break;
+
+    case NARROW:
+      for (j = 0; j < ncopies; ++j)
+       {
+         /* Build argument list for the vectorized call.  */
+         /* FIXME: Rewrite this so that it doesn't
+            construct a temporary list.  */
+         vargs = NULL_TREE;
+         nargs = 0;
+         FOR_EACH_CALL_EXPR_ARG (op, iter, operation)
+           {
+             if (j == 0)
+               {
+                 vec_oprnd0
+                   = vect_get_vec_def_for_operand (op, stmt, NULL);
+                 vec_oprnd1
+                   = vect_get_vec_def_for_stmt_copy (dt[nargs], vec_oprnd0);
+               }
+             else
+               {
+                 vec_oprnd0
+                   = vect_get_vec_def_for_stmt_copy (dt[nargs], vec_oprnd1);
+                 vec_oprnd1
+                   = vect_get_vec_def_for_stmt_copy (dt[nargs], vec_oprnd0);
+               }
+
+             vargs = tree_cons (NULL_TREE, vec_oprnd0, vargs);
+             vargs = tree_cons (NULL_TREE, vec_oprnd1, vargs);
+
+             ++nargs;
+           }
+         vargs = nreverse (vargs);
+
+         rhs = build_function_call_expr (fndecl, vargs);
+         new_stmt = build_gimple_modify_stmt (vec_dest, rhs);
+         new_temp = make_ssa_name (vec_dest, new_stmt);
+         GIMPLE_STMT_OPERAND (new_stmt, 0) = new_temp;
 
-  /* The call in STMT might prevent it from being removed in dce.  We however
-     cannot remove it here, due to the way the ssa name it defines is mapped
-     to the new definition.  So just replace rhs of the statement with something
-     harmless.  */
+         vect_finish_stmt_generation (stmt, new_stmt, bsi);
+
+         if (j == 0)
+           STMT_VINFO_VEC_STMT (stmt_info) = new_stmt;
+         else
+           STMT_VINFO_RELATED_STMT (prev_stmt_info) = new_stmt;
+
+         prev_stmt_info = vinfo_for_stmt (new_stmt);
+       }
+
+      *vec_stmt = STMT_VINFO_VEC_STMT (stmt_info);
+
+      break;
+
+    case WIDEN:
+      /* No current target implements this case.  */
+      return false;
+    }
+
+  /* The call in STMT might prevent it from being removed in dce.
+     We however cannot remove it here, due to the way the ssa name
+     it defines is mapped to the new definition.  So just replace
+     rhs of the statement with something harmless.  */
   type = TREE_TYPE (scalar_dest);
   GIMPLE_STMT_OPERAND (stmt, 1) = fold_convert (type, integer_zero_node);
+  update_stmt (stmt);
 
   return true;
 }
 
 
+/* Function vect_gen_widened_results_half
+
+   Create a vector stmt whose code, type, number of arguments, and result
+   variable are CODE, VECTYPE, OP_TYPE, and VEC_DEST, and its arguments are 
+   VEC_OPRND0 and VEC_OPRND1. The new vector stmt is to be inserted at BSI.
+   In the case that CODE is a CALL_EXPR, this means that a call to DECL
+   needs to be created (DECL is a function-decl of a target-builtin).
+   STMT is the original scalar stmt that we are vectorizing.  */
+
+static tree
+vect_gen_widened_results_half (enum tree_code code, tree vectype, tree decl,
+                               tree vec_oprnd0, tree vec_oprnd1, int op_type,
+                               tree vec_dest, block_stmt_iterator *bsi,
+                              tree stmt)
+{ 
+  tree expr; 
+  tree new_stmt; 
+  tree new_temp; 
+  tree sym; 
+  ssa_op_iter iter;
+  /* Generate half of the widened result:  */ 
+  if (code == CALL_EXPR) 
+    {  
+      /* Target specific support  */ 
+      if (op_type == binary_op)
+       expr = build_call_expr (decl, 2, vec_oprnd0, vec_oprnd1);
+      else
+       expr = build_call_expr (decl, 1, vec_oprnd0);
+    } 
+  else 
+    { 
+      /* Generic support */ 
+      gcc_assert (op_type == TREE_CODE_LENGTH (code)); 
+      if (op_type == binary_op) 
+        expr = build2 (code, vectype, vec_oprnd0, vec_oprnd1); 
+      else  
+        expr = build1 (code, vectype, vec_oprnd0); 
+    } 
+  new_stmt = build_gimple_modify_stmt (vec_dest, expr);
+  new_temp = make_ssa_name (vec_dest, new_stmt); 
+  GIMPLE_STMT_OPERAND (new_stmt, 0) = new_temp; 
+  vect_finish_stmt_generation (stmt, new_stmt, bsi); 
+
+  if (code == CALL_EXPR)
+    {
+      FOR_EACH_SSA_TREE_OPERAND (sym, new_stmt, iter, SSA_OP_ALL_VIRTUALS)
+        {
+          if (TREE_CODE (sym) == SSA_NAME)
+            sym = SSA_NAME_VAR (sym);
+          mark_sym_for_renaming (sym);
+        }
+    }
+
+  return new_stmt;
+}
+
+
+/* Function vectorizable_conversion.
+
+Check if STMT performs a conversion operation, that can be vectorized. 
+If VEC_STMT is also passed, vectorize the STMT: create a vectorized 
+stmt to replace it, put it in VEC_STMT, and insert it at BSI.
+Return FALSE if not a vectorizable STMT, TRUE otherwise.  */
+
+bool
+vectorizable_conversion (tree stmt, block_stmt_iterator * bsi,
+                                  tree * vec_stmt)
+{
+  tree vec_dest;
+  tree scalar_dest;
+  tree operation;
+  tree op0;
+  tree vec_oprnd0 = NULL_TREE, vec_oprnd1 = NULL_TREE;
+  stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
+  loop_vec_info loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_info);
+  enum tree_code code, code1 = ERROR_MARK, code2 = ERROR_MARK;
+  tree decl1 = NULL_TREE, decl2 = NULL_TREE;
+  tree new_temp;
+  tree def, def_stmt;
+  enum vect_def_type dt0;
+  tree new_stmt;
+  stmt_vec_info prev_stmt_info;
+  int nunits_in;
+  int nunits_out;
+  tree vectype_out, vectype_in;
+  int ncopies, j;
+  tree expr;
+  tree rhs_type, lhs_type;
+  tree builtin_decl;
+  enum { NARROW, NONE, WIDEN } modifier;
+
+  /* Is STMT a vectorizable conversion?   */
+
+  if (!STMT_VINFO_RELEVANT_P (stmt_info))
+    return false;
+
+  if (STMT_VINFO_DEF_TYPE (stmt_info) != vect_loop_def)
+    return false;
+
+  if (STMT_VINFO_LIVE_P (stmt_info))
+    {
+      /* FORNOW: not yet supported.  */
+      if (vect_print_dump_info (REPORT_DETAILS))
+       fprintf (vect_dump, "value used after loop.");
+      return false;
+    }
+
+  if (TREE_CODE (stmt) != GIMPLE_MODIFY_STMT)
+    return false;
+
+  if (TREE_CODE (GIMPLE_STMT_OPERAND (stmt, 0)) != SSA_NAME)
+    return false;
+
+  operation = GIMPLE_STMT_OPERAND (stmt, 1);
+  code = TREE_CODE (operation);
+  if (code != FIX_TRUNC_EXPR && code != FLOAT_EXPR)
+    return false;
+
+  /* Check types of lhs and rhs */
+  op0 = TREE_OPERAND (operation, 0);
+  rhs_type = TREE_TYPE (op0);
+  vectype_in = get_vectype_for_scalar_type (rhs_type);
+  nunits_in = TYPE_VECTOR_SUBPARTS (vectype_in);
+
+  scalar_dest = GIMPLE_STMT_OPERAND (stmt, 0);
+  lhs_type = TREE_TYPE (scalar_dest);
+  vectype_out = get_vectype_for_scalar_type (lhs_type);
+  nunits_out = TYPE_VECTOR_SUBPARTS (vectype_out);
+
+  /* FORNOW */
+  if (nunits_in == nunits_out / 2)
+    modifier = NARROW;
+  else if (nunits_out == nunits_in)
+    modifier = NONE;
+  else if (nunits_out == nunits_in / 2)
+    modifier = WIDEN;
+  else
+    return false;
+
+  if (modifier == NONE)
+    gcc_assert (STMT_VINFO_VECTYPE (stmt_info) == vectype_out);
+
+  /* Bail out if the types are both integral or non-integral */
+  if ((INTEGRAL_TYPE_P (rhs_type) && INTEGRAL_TYPE_P (lhs_type))
+      || (!INTEGRAL_TYPE_P (rhs_type) && !INTEGRAL_TYPE_P (lhs_type)))
+    return false;
+
+  if (modifier == NARROW)
+    ncopies = LOOP_VINFO_VECT_FACTOR (loop_vinfo) / nunits_out;
+  else
+    ncopies = LOOP_VINFO_VECT_FACTOR (loop_vinfo) / nunits_in;
+
+  /* Sanity check: make sure that at least one copy of the vectorized stmt
+     needs to be generated.  */
+  gcc_assert (ncopies >= 1);
+
+  /* Check the operands of the operation.  */
+  if (!vect_is_simple_use (op0, loop_vinfo, &def_stmt, &def, &dt0))
+    {
+      if (vect_print_dump_info (REPORT_DETAILS))
+       fprintf (vect_dump, "use not simple.");
+      return false;
+    }
+
+  /* Supportable by target?  */
+  if ((modifier == NONE
+       && !targetm.vectorize.builtin_conversion (code, vectype_in))
+      || (modifier == WIDEN
+         && !supportable_widening_operation (code, stmt, vectype_in,
+                                             &decl1, &decl2,
+                                             &code1, &code2))
+      || (modifier == NARROW
+         && !supportable_narrowing_operation (code, stmt, vectype_in,
+                                              &code1)))
+    {
+      if (vect_print_dump_info (REPORT_DETAILS))
+        fprintf (vect_dump, "op not supported by target.");
+      return false;
+    }
+
+  if (modifier != NONE)
+    STMT_VINFO_VECTYPE (stmt_info) = vectype_in;
+
+  if (!vec_stmt)               /* transformation not required.  */
+    {
+      STMT_VINFO_TYPE (stmt_info) = type_conversion_vec_info_type;
+      return true;
+    }
+
+  /** Transform.  **/
+  if (vect_print_dump_info (REPORT_DETAILS))
+    fprintf (vect_dump, "transform conversion.");
+
+  /* Handle def.  */
+  vec_dest = vect_create_destination_var (scalar_dest, vectype_out);
+
+  prev_stmt_info = NULL;
+  switch (modifier)
+    {
+    case NONE:
+      for (j = 0; j < ncopies; j++)
+       {
+         tree sym;
+         ssa_op_iter iter;
+
+         if (j == 0)
+           vec_oprnd0 = vect_get_vec_def_for_operand (op0, stmt, NULL);
+         else
+           vec_oprnd0 = vect_get_vec_def_for_stmt_copy (dt0, vec_oprnd0);
+
+         builtin_decl =
+           targetm.vectorize.builtin_conversion (code, vectype_in);
+         new_stmt = build_call_expr (builtin_decl, 1, vec_oprnd0);
+
+         /* Arguments are ready. create the new vector stmt.  */
+         new_stmt = build_gimple_modify_stmt (vec_dest, new_stmt);
+         new_temp = make_ssa_name (vec_dest, new_stmt);
+         GIMPLE_STMT_OPERAND (new_stmt, 0) = new_temp;
+         vect_finish_stmt_generation (stmt, new_stmt, bsi);
+         FOR_EACH_SSA_TREE_OPERAND (sym, new_stmt, iter, SSA_OP_ALL_VIRTUALS)
+           {
+             if (TREE_CODE (sym) == SSA_NAME)
+               sym = SSA_NAME_VAR (sym);
+             mark_sym_for_renaming (sym);
+           }
+
+         if (j == 0)
+           STMT_VINFO_VEC_STMT (stmt_info) = *vec_stmt = new_stmt;
+         else
+           STMT_VINFO_RELATED_STMT (prev_stmt_info) = new_stmt;
+         prev_stmt_info = vinfo_for_stmt (new_stmt);
+       }
+      break;
+
+    case WIDEN:
+      /* In case the vectorization factor (VF) is bigger than the number
+        of elements that we can fit in a vectype (nunits), we have to
+        generate more than one vector stmt - i.e - we need to "unroll"
+        the vector stmt by a factor VF/nunits.  */
+      for (j = 0; j < ncopies; j++)
+       {
+         if (j == 0)
+           vec_oprnd0 = vect_get_vec_def_for_operand (op0, stmt, NULL);
+         else
+           vec_oprnd0 = vect_get_vec_def_for_stmt_copy (dt0, vec_oprnd0);
+
+         STMT_VINFO_VECTYPE (stmt_info) = vectype_in;
+
+         /* Generate first half of the widened result:  */
+         new_stmt
+           = vect_gen_widened_results_half (code1, vectype_out, decl1, 
+                                            vec_oprnd0, vec_oprnd1,
+                                            unary_op, vec_dest, bsi, stmt);
+         if (j == 0)
+           STMT_VINFO_VEC_STMT (stmt_info) = new_stmt;
+         else
+           STMT_VINFO_RELATED_STMT (prev_stmt_info) = new_stmt;
+         prev_stmt_info = vinfo_for_stmt (new_stmt);
+
+         /* Generate second half of the widened result:  */
+         new_stmt
+           = vect_gen_widened_results_half (code2, vectype_out, decl2,
+                                            vec_oprnd0, vec_oprnd1,
+                                            unary_op, vec_dest, bsi, stmt);
+         STMT_VINFO_RELATED_STMT (prev_stmt_info) = new_stmt;
+         prev_stmt_info = vinfo_for_stmt (new_stmt);
+       }
+      break;
+
+    case NARROW:
+      /* In case the vectorization factor (VF) is bigger than the number
+        of elements that we can fit in a vectype (nunits), we have to
+        generate more than one vector stmt - i.e - we need to "unroll"
+        the vector stmt by a factor VF/nunits.  */
+      for (j = 0; j < ncopies; j++)
+       {
+         /* Handle uses.  */
+         if (j == 0)
+           {
+             vec_oprnd0 = vect_get_vec_def_for_operand (op0, stmt, NULL);
+             vec_oprnd1 = vect_get_vec_def_for_stmt_copy (dt0, vec_oprnd0);
+           }
+         else
+           {
+             vec_oprnd0 = vect_get_vec_def_for_stmt_copy (dt0, vec_oprnd1);
+             vec_oprnd1 = vect_get_vec_def_for_stmt_copy (dt0, vec_oprnd0);
+           }
+
+         /* Arguments are ready. Create the new vector stmt.  */
+         expr = build2 (code1, vectype_out, vec_oprnd0, vec_oprnd1);
+         new_stmt = build_gimple_modify_stmt (vec_dest, expr);
+         new_temp = make_ssa_name (vec_dest, new_stmt);
+         GIMPLE_STMT_OPERAND (new_stmt, 0) = new_temp;
+         vect_finish_stmt_generation (stmt, new_stmt, bsi);
+
+         if (j == 0)
+           STMT_VINFO_VEC_STMT (stmt_info) = new_stmt;
+         else
+           STMT_VINFO_RELATED_STMT (prev_stmt_info) = new_stmt;
+
+         prev_stmt_info = vinfo_for_stmt (new_stmt);
+       }
+
+      *vec_stmt = STMT_VINFO_VEC_STMT (stmt_info);
+    }
+  return true;
+}
+
+
 /* Function vectorizable_assignment.
 
    Check if STMT performs an assignment (copy) that can be vectorized. 
@@ -1752,12 +2834,21 @@ vectorizable_assignment (tree stmt, block_stmt_iterator *bsi, tree *vec_stmt)
   if (ncopies > 1)
     return false; /* FORNOW */
 
-  /* Is vectorizable assignment?  */
   if (!STMT_VINFO_RELEVANT_P (stmt_info))
     return false;
 
-  gcc_assert (STMT_VINFO_DEF_TYPE (stmt_info) == vect_loop_def);
+  if (STMT_VINFO_DEF_TYPE (stmt_info) != vect_loop_def)
+    return false;
+
+  /* FORNOW: not yet supported.  */
+  if (STMT_VINFO_LIVE_P (stmt_info))
+    {
+      if (vect_print_dump_info (REPORT_DETAILS))
+        fprintf (vect_dump, "value used after loop.");
+      return false;
+    }
 
+  /* Is vectorizable assignment?  */
   if (TREE_CODE (stmt) != GIMPLE_MODIFY_STMT)
     return false;
 
@@ -1776,6 +2867,9 @@ vectorizable_assignment (tree stmt, block_stmt_iterator *bsi, tree *vec_stmt)
   if (!vec_stmt) /* transformation not required.  */
     {
       STMT_VINFO_TYPE (stmt_info) = assignment_vec_info_type;
+      if (vect_print_dump_info (REPORT_DETAILS))
+        fprintf (vect_dump, "=== vectorizable_assignment ===");
+      vect_model_simple_cost (stmt_info, ncopies);
       return true;
     }
 
@@ -1791,7 +2885,7 @@ vectorizable_assignment (tree stmt, block_stmt_iterator *bsi, tree *vec_stmt)
   vec_oprnd = vect_get_vec_def_for_operand (op, stmt, NULL);
 
   /* Arguments are ready. create the new vector stmt.  */
-  *vec_stmt = build2 (GIMPLE_MODIFY_STMT, void_type_node, vec_dest, vec_oprnd);
+  *vec_stmt = build_gimple_modify_stmt (vec_dest, vec_oprnd);
   new_temp = make_ssa_name (vec_dest, *vec_stmt);
   GIMPLE_STMT_OPERAND (*vec_stmt, 0) = new_temp;
   vect_finish_stmt_generation (stmt, *vec_stmt, bsi);
@@ -1800,30 +2894,86 @@ vectorizable_assignment (tree stmt, block_stmt_iterator *bsi, tree *vec_stmt)
 }
 
 
-/* Function vect_min_worthwhile_factor.
+/* Function vect_min_worthwhile_factor.
+
+   For a loop where we could vectorize the operation indicated by CODE,
+   return the minimum vectorization factor that makes it worthwhile
+   to use generic vectors.  */
+static int
+vect_min_worthwhile_factor (enum tree_code code)
+{
+  switch (code)
+    {
+    case PLUS_EXPR:
+    case MINUS_EXPR:
+    case NEGATE_EXPR:
+      return 4;
+
+    case BIT_AND_EXPR:
+    case BIT_IOR_EXPR:
+    case BIT_XOR_EXPR:
+    case BIT_NOT_EXPR:
+      return 2;
+
+    default:
+      return INT_MAX;
+    }
+}
+
+
+/* Function vectorizable_induction
+
+   Check if PHI performs an induction computation that can be vectorized.
+   If VEC_STMT is also passed, vectorize the induction PHI: create a vectorized
+   phi to replace it, put it in VEC_STMT, and add it to the same basic block.
+   Return FALSE if not a vectorizable STMT, TRUE otherwise.  */
+
+bool
+vectorizable_induction (tree phi, block_stmt_iterator *bsi ATTRIBUTE_UNUSED,
+                        tree *vec_stmt)
+{
+  stmt_vec_info stmt_info = vinfo_for_stmt (phi);
+  tree vectype = STMT_VINFO_VECTYPE (stmt_info);
+  loop_vec_info loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_info);
+  int nunits = TYPE_VECTOR_SUBPARTS (vectype);
+  int ncopies = LOOP_VINFO_VECT_FACTOR (loop_vinfo) / nunits;
+  tree vec_def;
+
+  gcc_assert (ncopies >= 1);
+
+  if (!STMT_VINFO_RELEVANT_P (stmt_info))
+    return false;
+
+  gcc_assert (STMT_VINFO_DEF_TYPE (stmt_info) == vect_induction_def);
+
+  if (STMT_VINFO_LIVE_P (stmt_info))
+    {
+      /* FORNOW: not yet supported.  */
+      if (vect_print_dump_info (REPORT_DETAILS))
+        fprintf (vect_dump, "value used after loop.");
+      return false;
+    }
+
+  if (TREE_CODE (phi) != PHI_NODE)
+    return false;
 
-   For a loop where we could vectorize the operation indicated by CODE,
-   return the minimum vectorization factor that makes it worthwhile
-   to use generic vectors.  */
-static int
-vect_min_worthwhile_factor (enum tree_code code)
-{
-  switch (code)
+  if (!vec_stmt) /* transformation not required.  */
     {
-    case PLUS_EXPR:
-    case MINUS_EXPR:
-    case NEGATE_EXPR:
-      return 4;
+      STMT_VINFO_TYPE (stmt_info) = induc_vec_info_type;
+      if (vect_print_dump_info (REPORT_DETAILS))
+        fprintf (vect_dump, "=== vectorizable_induction ===");
+      vect_model_induction_cost (stmt_info, ncopies);
+      return true;
+    }
 
-    case BIT_AND_EXPR:
-    case BIT_IOR_EXPR:
-    case BIT_XOR_EXPR:
-    case BIT_NOT_EXPR:
-      return 2;
+  /** Transform.  **/
 
-    default:
-      return INT_MAX;
-    }
+  if (vect_print_dump_info (REPORT_DETAILS))
+    fprintf (vect_dump, "transform induction phi.");
+
+  vec_def = get_initial_def_for_induction (phi);
+  *vec_stmt = SSA_NAME_DEF_STMT (vec_def);
+  return true;
 }
 
 
@@ -1864,20 +3014,21 @@ vectorizable_operation (tree stmt, block_stmt_iterator *bsi, tree *vec_stmt)
 
   gcc_assert (ncopies >= 1);
 
-  /* Is STMT a vectorizable binary/unary operation?   */
   if (!STMT_VINFO_RELEVANT_P (stmt_info))
     return false;
 
-  gcc_assert (STMT_VINFO_DEF_TYPE (stmt_info) == vect_loop_def);
+  if (STMT_VINFO_DEF_TYPE (stmt_info) != vect_loop_def)
+    return false;
 
+  /* FORNOW: not yet supported.  */
   if (STMT_VINFO_LIVE_P (stmt_info))
     {
-      /* FORNOW: not yet supported.  */
       if (vect_print_dump_info (REPORT_DETAILS))
         fprintf (vect_dump, "value used after loop.");
       return false;
     }
 
+  /* Is STMT a vectorizable binary/unary operation?   */
   if (TREE_CODE (stmt) != GIMPLE_MODIFY_STMT)
     return false;
 
@@ -1892,10 +3043,16 @@ vectorizable_operation (tree stmt, block_stmt_iterator *bsi, tree *vec_stmt)
 
   operation = GIMPLE_STMT_OPERAND (stmt, 1);
   code = TREE_CODE (operation);
+
+  /* For pointer addition, we should use the normal plus for
+     the vector addition.  */
+  if (code == POINTER_PLUS_EXPR)
+    code = PLUS_EXPR;
+
   optab = optab_for_tree_code (code, vectype);
 
   /* Support only unary or binary operations.  */
-  op_type = TREE_CODE_LENGTH (code);
+  op_type = TREE_OPERAND_LENGTH (operation);
   if (op_type != unary_op && op_type != binary_op)
     {
       if (vect_print_dump_info (REPORT_DETAILS))
@@ -1910,7 +3067,7 @@ vectorizable_operation (tree stmt, block_stmt_iterator *bsi, tree *vec_stmt)
         fprintf (vect_dump, "use not simple.");
       return false;
     }
-                                                                                
+
   if (op_type == binary_op)
     {
       op1 = TREE_OPERAND (operation, 1);
@@ -1975,6 +3132,9 @@ vectorizable_operation (tree stmt, block_stmt_iterator *bsi, tree *vec_stmt)
   if (!vec_stmt) /* transformation not required.  */
     {
       STMT_VINFO_TYPE (stmt_info) = op_vec_info_type;
+      if (vect_print_dump_info (REPORT_DETAILS))
+        fprintf (vect_dump, "=== vectorizable_operation ===");
+      vect_model_simple_cost (stmt_info, ncopies);
       return true;
     }
 
@@ -1996,12 +3156,12 @@ vectorizable_operation (tree stmt, block_stmt_iterator *bsi, tree *vec_stmt)
      stmts that use the defs of the current stmt. The example below illustrates
      the vectorization process when VF=16 and nunits=4 (i.e - we need to create
      4 vectorized stmts):
-                                                                                
+
      before vectorization:
                                 RELATED_STMT    VEC_STMT
         S1:     x = memref      -               -
         S2:     z = x + 1       -               -
-                                                                                
+
      step 1: vectorize stmt S1 (done in vectorizable_load. See more details
              there):
                                 RELATED_STMT    VEC_STMT
@@ -2011,7 +3171,7 @@ vectorizable_operation (tree stmt, block_stmt_iterator *bsi, tree *vec_stmt)
         VS1_3:  vx3 = memref3   -               -
         S1:     x = load        -               VS1_0
         S2:     z = x + 1       -               -
-                                                                                
+
      step2: vectorize stmt S2 (done here):
         To vectorize stmt S2 we first need to find the relevant vector
         def for the first operand 'x'. This is, as usual, obtained from
@@ -2038,7 +3198,7 @@ vectorizable_operation (tree stmt, block_stmt_iterator *bsi, tree *vec_stmt)
         VS2_2:  vz2 = vx2 + v1  VS2_3           -
         VS2_3:  vz3 = vx3 + v1  -               -
         S2:     z = x + 1       -               VS2_0  */
-                                                                                
+
   prev_stmt_info = NULL;
   for (j = 0; j < ncopies; j++)
     {
@@ -2074,17 +3234,17 @@ vectorizable_operation (tree stmt, block_stmt_iterator *bsi, tree *vec_stmt)
        }
 
       /* Arguments are ready. create the new vector stmt.  */
-                                                                                
+
       if (op_type == binary_op)
-        new_stmt = build2 (GIMPLE_MODIFY_STMT, void_type_node, vec_dest,
+        new_stmt = build_gimple_modify_stmt (vec_dest,
                     build2 (code, vectype, vec_oprnd0, vec_oprnd1));
       else
-        new_stmt = build2 (GIMPLE_MODIFY_STMT, void_type_node, vec_dest,
+        new_stmt = build_gimple_modify_stmt (vec_dest,
                     build1 (code, vectype, vec_oprnd0));
       new_temp = make_ssa_name (vec_dest, new_stmt);
       GIMPLE_STMT_OPERAND (new_stmt, 0) = new_temp;
       vect_finish_stmt_generation (stmt, new_stmt, bsi);
-                                                                                
+
       if (j == 0)
        STMT_VINFO_VEC_STMT (stmt_info) = *vec_stmt = new_stmt;
       else
@@ -2097,16 +3257,16 @@ vectorizable_operation (tree stmt, block_stmt_iterator *bsi, tree *vec_stmt)
 
 
 /* Function vectorizable_type_demotion
-                                                                                
+
    Check if STMT performs a binary or unary operation that involves
    type demotion, and if it can be vectorized.
    If VEC_STMT is also passed, vectorize the STMT: create a vectorized
    stmt to replace it, put it in VEC_STMT, and insert it at BSI.
    Return FALSE if not a vectorizable STMT, TRUE otherwise.  */
-                                                                                
+
 bool
 vectorizable_type_demotion (tree stmt, block_stmt_iterator *bsi,
-                             tree *vec_stmt)
+                           tree *vec_stmt)
 {
   tree vec_dest;
   tree scalar_dest;
@@ -2115,7 +3275,7 @@ vectorizable_type_demotion (tree stmt, block_stmt_iterator *bsi,
   tree vec_oprnd0=NULL, vec_oprnd1=NULL;
   stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
   loop_vec_info loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_info);
-  enum tree_code code;
+  enum tree_code code, code1 = ERROR_MARK;
   tree new_temp;
   tree def, def_stmt;
   enum vect_def_type dt0;
@@ -2128,54 +3288,53 @@ vectorizable_type_demotion (tree stmt, block_stmt_iterator *bsi,
   int j;
   tree expr;
   tree vectype_in;
-  tree scalar_type;
-  optab optab;
-  enum machine_mode vec_mode;
-                                                                                
-  /* Is STMT a vectorizable type-demotion operation?  */
-                                                                                
+
   if (!STMT_VINFO_RELEVANT_P (stmt_info))
     return false;
-                                                                                
-  gcc_assert (STMT_VINFO_DEF_TYPE (stmt_info) == vect_loop_def);
-                                                                                
+
+  if (STMT_VINFO_DEF_TYPE (stmt_info) != vect_loop_def)
+    return false;
+
+  /* FORNOW: not yet supported.  */
   if (STMT_VINFO_LIVE_P (stmt_info))
     {
-      /* FORNOW: not yet supported.  */
       if (vect_print_dump_info (REPORT_DETAILS))
         fprintf (vect_dump, "value used after loop.");
       return false;
     }
-                                                                                
+
+  /* Is STMT a vectorizable type-demotion operation?  */
   if (TREE_CODE (stmt) != GIMPLE_MODIFY_STMT)
     return false;
-                                                                                
+
   if (TREE_CODE (GIMPLE_STMT_OPERAND (stmt, 0)) != SSA_NAME)
     return false;
-                                                                                
+
   operation = GIMPLE_STMT_OPERAND (stmt, 1);
   code = TREE_CODE (operation);
   if (code != NOP_EXPR && code != CONVERT_EXPR)
     return false;
-                                                                                
+
   op0 = TREE_OPERAND (operation, 0);
   vectype_in = get_vectype_for_scalar_type (TREE_TYPE (op0));
   nunits_in = TYPE_VECTOR_SUBPARTS (vectype_in);
-                                                                                
+
   scalar_dest = GIMPLE_STMT_OPERAND (stmt, 0);
-  scalar_type = TREE_TYPE (scalar_dest);
-  vectype_out = get_vectype_for_scalar_type (scalar_type);
+  vectype_out = get_vectype_for_scalar_type (TREE_TYPE (scalar_dest));
   nunits_out = TYPE_VECTOR_SUBPARTS (vectype_out);
   if (nunits_in != nunits_out / 2) /* FORNOW */
     return false;
-                                                                                
+
   ncopies = LOOP_VINFO_VECT_FACTOR (loop_vinfo) / nunits_out;
   gcc_assert (ncopies >= 1);
 
-  if (! INTEGRAL_TYPE_P (scalar_type)
-      || !INTEGRAL_TYPE_P (TREE_TYPE (op0)))
+  if (! ((INTEGRAL_TYPE_P (TREE_TYPE (scalar_dest))
+         && INTEGRAL_TYPE_P (TREE_TYPE (op0)))
+        || (SCALAR_FLOAT_TYPE_P (TREE_TYPE (scalar_dest))
+            && SCALAR_FLOAT_TYPE_P (TREE_TYPE (op0))
+            && (code == NOP_EXPR || code == CONVERT_EXPR))))
     return false;
-                                                                                
+
   /* Check the operands of the operation.  */
   if (!vect_is_simple_use (op0, loop_vinfo, &def_stmt, &def, &dt0))
     {
@@ -2183,31 +3342,27 @@ vectorizable_type_demotion (tree stmt, block_stmt_iterator *bsi,
         fprintf (vect_dump, "use not simple.");
       return false;
     }
-                                                                                
+
   /* Supportable by target?  */
-  code = VEC_PACK_MOD_EXPR;
-  optab = optab_for_tree_code (VEC_PACK_MOD_EXPR, vectype_in);
-  if (!optab)
-    return false;
-                                                                                
-  vec_mode = TYPE_MODE (vectype_in);
-  if (optab->handlers[(int) vec_mode].insn_code == CODE_FOR_nothing)
+  if (!supportable_narrowing_operation (code, stmt, vectype_in, &code1))
     return false;
-                                                                                
+
   STMT_VINFO_VECTYPE (stmt_info) = vectype_in;
-                                                                                
+
   if (!vec_stmt) /* transformation not required.  */
     {
       STMT_VINFO_TYPE (stmt_info) = type_demotion_vec_info_type;
+      if (vect_print_dump_info (REPORT_DETAILS))
+        fprintf (vect_dump, "=== vectorizable_demotion ===");
+      vect_model_simple_cost (stmt_info, ncopies);
       return true;
     }
-                                                                                
+
   /** Transform.  **/
-                                                                                
   if (vect_print_dump_info (REPORT_DETAILS))
     fprintf (vect_dump, "transform type demotion operation. ncopies = %d.",
-                        ncopies);
-                                                                                
+            ncopies);
+
   /* Handle def.  */
   vec_dest = vect_create_destination_var (scalar_dest, vectype_out);
   
@@ -2221,95 +3376,35 @@ vectorizable_type_demotion (tree stmt, block_stmt_iterator *bsi,
       /* Handle uses.  */
       if (j == 0)
        {
-         enum vect_def_type dt = vect_unknown_def_type; /* Dummy */
          vec_oprnd0 = vect_get_vec_def_for_operand (op0, stmt, NULL);
-         vec_oprnd1 = vect_get_vec_def_for_stmt_copy (dt, vec_oprnd0);
+         vec_oprnd1 = vect_get_vec_def_for_stmt_copy (dt0, vec_oprnd0);
        }
       else
        {
          vec_oprnd0 = vect_get_vec_def_for_stmt_copy (dt0, vec_oprnd1);
          vec_oprnd1 = vect_get_vec_def_for_stmt_copy (dt0, vec_oprnd0);
        }
-                                                                                
+
       /* Arguments are ready. Create the new vector stmt.  */
-      expr = build2 (code, vectype_out, vec_oprnd0, vec_oprnd1);
-      new_stmt = build2 (GIMPLE_MODIFY_STMT, void_type_node, vec_dest, expr);
+      expr = build2 (code1, vectype_out, vec_oprnd0, vec_oprnd1);
+      new_stmt = build_gimple_modify_stmt (vec_dest, expr);
       new_temp = make_ssa_name (vec_dest, new_stmt);
       GIMPLE_STMT_OPERAND (new_stmt, 0) = new_temp;
       vect_finish_stmt_generation (stmt, new_stmt, bsi);
-                                                                                
+
       if (j == 0)
        STMT_VINFO_VEC_STMT (stmt_info) = new_stmt;
       else
        STMT_VINFO_RELATED_STMT (prev_stmt_info) = new_stmt;
-                                                                                
+
       prev_stmt_info = vinfo_for_stmt (new_stmt);
     }
-                                                                                
+
   *vec_stmt = STMT_VINFO_VEC_STMT (stmt_info);
   return true;
 }
 
 
-/* Function vect_gen_widened_results_half
-
-   Create a vector stmt whose code, type, number of arguments, and result
-   variable are CODE, VECTYPE, OP_TYPE, and VEC_DEST, and its arguments are 
-   VEC_OPRND0 and VEC_OPRND1. The new vector stmt is to be inserted at BSI.
-   In the case that CODE is a CALL_EXPR, this means that a call to DECL
-   needs to be created (DECL is a function-decl of a target-builtin).
-   STMT is the original scalar stmt that we are vectorizing.  */
-
-static tree
-vect_gen_widened_results_half (enum tree_code code, tree vectype, tree decl,
-                               tree vec_oprnd0, tree vec_oprnd1, int op_type,
-                               tree vec_dest, block_stmt_iterator *bsi,
-                              tree stmt)
-{ 
-  tree vec_params;
-  tree expr; 
-  tree new_stmt; 
-  tree new_temp; 
-  tree sym; 
-  ssa_op_iter iter;
-  /* Generate half of the widened result:  */ 
-  if (code == CALL_EXPR) 
-    {  
-      /* Target specific support  */ 
-      vec_params = build_tree_list (NULL_TREE, vec_oprnd0); 
-      if (op_type == binary_op) 
-        vec_params = tree_cons (NULL_TREE, vec_oprnd1, vec_params); 
-      expr = build_function_call_expr (decl, vec_params); 
-    } 
-  else 
-    { 
-      /* Generic support */ 
-      gcc_assert (op_type == TREE_CODE_LENGTH (code)); 
-      if (op_type == binary_op) 
-        expr = build2 (code, vectype, vec_oprnd0, vec_oprnd1); 
-      else  
-        expr = build1 (code, vectype, vec_oprnd0); 
-    } 
-  new_stmt = build2 (GIMPLE_MODIFY_STMT, void_type_node, vec_dest, expr);
-  new_temp = make_ssa_name (vec_dest, new_stmt); 
-  GIMPLE_STMT_OPERAND (new_stmt, 0) = new_temp; 
-  vect_finish_stmt_generation (stmt, new_stmt, bsi); 
-
-  if (code == CALL_EXPR)
-    {
-      FOR_EACH_SSA_TREE_OPERAND (sym, new_stmt, iter, SSA_OP_ALL_VIRTUALS)
-        {
-          if (TREE_CODE (sym) == SSA_NAME)
-            sym = SSA_NAME_VAR (sym);
-          mark_sym_for_renaming (sym);
-        }
-    }
-
-  return new_stmt;
-}
-
-
 /* Function vectorizable_type_promotion
 
    Check if STMT performs a binary or unary operation that involves
@@ -2329,7 +3424,7 @@ vectorizable_type_promotion (tree stmt, block_stmt_iterator *bsi,
   tree vec_oprnd0=NULL, vec_oprnd1=NULL;
   stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
   loop_vec_info loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_info);
-  enum tree_code code, code1 = CODE_FOR_nothing, code2 = CODE_FOR_nothing;
+  enum tree_code code, code1 = ERROR_MARK, code2 = ERROR_MARK;
   tree decl1 = NULL_TREE, decl2 = NULL_TREE;
   int op_type; 
   tree def, def_stmt;
@@ -2343,21 +3438,21 @@ vectorizable_type_promotion (tree stmt, block_stmt_iterator *bsi,
   int j;
   tree vectype_in;
   
-  /* Is STMT a vectorizable type-promotion operation?  */
-
   if (!STMT_VINFO_RELEVANT_P (stmt_info))
     return false;
 
-  gcc_assert (STMT_VINFO_DEF_TYPE (stmt_info) == vect_loop_def);
+  if (STMT_VINFO_DEF_TYPE (stmt_info) != vect_loop_def)
+    return false;
 
+  /* FORNOW: not yet supported.  */
   if (STMT_VINFO_LIVE_P (stmt_info))
     {
-      /* FORNOW: not yet supported.  */
       if (vect_print_dump_info (REPORT_DETAILS))
-       fprintf (vect_dump, "value used after loop.");
+        fprintf (vect_dump, "value used after loop.");
       return false;
     }
 
+  /* Is STMT a vectorizable type-promotion operation?  */
   if (TREE_CODE (stmt) != GIMPLE_MODIFY_STMT)
     return false;
 
@@ -2366,14 +3461,13 @@ vectorizable_type_promotion (tree stmt, block_stmt_iterator *bsi,
 
   operation = GIMPLE_STMT_OPERAND (stmt, 1);
   code = TREE_CODE (operation);
-  if (code != NOP_EXPR && code != WIDEN_MULT_EXPR)
+  if (code != NOP_EXPR && code != CONVERT_EXPR
+      && code != WIDEN_MULT_EXPR)
     return false;
 
   op0 = TREE_OPERAND (operation, 0);
   vectype_in = get_vectype_for_scalar_type (TREE_TYPE (op0));
   nunits_in = TYPE_VECTOR_SUBPARTS (vectype_in);
-  ncopies = LOOP_VINFO_VECT_FACTOR (loop_vinfo) / nunits_in;
-  gcc_assert (ncopies >= 1);
 
   scalar_dest = GIMPLE_STMT_OPERAND (stmt, 0);
   vectype_out = get_vectype_for_scalar_type (TREE_TYPE (scalar_dest));
@@ -2381,8 +3475,14 @@ vectorizable_type_promotion (tree stmt, block_stmt_iterator *bsi,
   if (nunits_out != nunits_in / 2) /* FORNOW */
     return false;
 
-  if (! INTEGRAL_TYPE_P (TREE_TYPE (scalar_dest))
-      || !INTEGRAL_TYPE_P (TREE_TYPE (op0))) 
+  ncopies = LOOP_VINFO_VECT_FACTOR (loop_vinfo) / nunits_in;
+  gcc_assert (ncopies >= 1);
+
+  if (! ((INTEGRAL_TYPE_P (TREE_TYPE (scalar_dest))
+         && INTEGRAL_TYPE_P (TREE_TYPE (op0)))
+        || (SCALAR_FLOAT_TYPE_P (TREE_TYPE (scalar_dest))
+            && SCALAR_FLOAT_TYPE_P (TREE_TYPE (op0))
+            && (code == CONVERT_EXPR || code == NOP_EXPR))))
     return false;
 
   /* Check the operands of the operation.  */
@@ -2415,6 +3515,9 @@ vectorizable_type_promotion (tree stmt, block_stmt_iterator *bsi,
   if (!vec_stmt) /* transformation not required.  */
     {
       STMT_VINFO_TYPE (stmt_info) = type_promotion_vec_info_type;
+      if (vect_print_dump_info (REPORT_DETAILS))
+        fprintf (vect_dump, "=== vectorizable_promotion ===");
+      vect_model_simple_cost (stmt_info, 2*ncopies);
       return true;
     }
 
@@ -2583,7 +3686,7 @@ vect_permute_store_chain (VEC(tree,heap) *dr_chain,
 {
   tree perm_dest, perm_stmt, vect1, vect2, high, low;
   tree vectype = STMT_VINFO_VECTYPE (vinfo_for_stmt (stmt));
-  tree scalar_dest;
+  tree scalar_dest, tmp;
   int i;
   unsigned int j;
   VEC(tree,heap) *first, *second;
@@ -2614,13 +3717,10 @@ vect_permute_store_chain (VEC(tree,heap) *dr_chain,
          DECL_GIMPLE_REG_P (perm_dest) = 1;
          add_referenced_var (perm_dest);
           if (BYTES_BIG_ENDIAN)
-           perm_stmt = build2 (GIMPLE_MODIFY_STMT, void_type_node, perm_dest,
-                               build2 (VEC_INTERLEAVE_HIGH_EXPR, vectype, 
-                                        vect1, vect2)); 
+           tmp = build2 (VEC_INTERLEAVE_HIGH_EXPR, vectype, vect1, vect2); 
          else
-            perm_stmt = build2 (GIMPLE_MODIFY_STMT, void_type_node, perm_dest,
-                                build2 (VEC_INTERLEAVE_LOW_EXPR, vectype, 
-                                        vect1, vect2));
+           tmp = build2 (VEC_INTERLEAVE_LOW_EXPR, vectype, vect1, vect2);
+         perm_stmt = build_gimple_modify_stmt (perm_dest, tmp);
          high = make_ssa_name (perm_dest, perm_stmt);
          GIMPLE_STMT_OPERAND (perm_stmt, 0) = high;
          vect_finish_stmt_generation (stmt, perm_stmt, bsi);
@@ -2635,13 +3735,10 @@ vect_permute_store_chain (VEC(tree,heap) *dr_chain,
          DECL_GIMPLE_REG_P (perm_dest) = 1;
          add_referenced_var (perm_dest);
          if (BYTES_BIG_ENDIAN)
-           perm_stmt = build2 (GIMPLE_MODIFY_STMT, void_type_node, perm_dest,
-                               build2 (VEC_INTERLEAVE_LOW_EXPR, vectype, 
-                                       vect1, vect2));
+           tmp = build2 (VEC_INTERLEAVE_LOW_EXPR, vectype, vect1, vect2);
          else
-            perm_stmt = build2 (GIMPLE_MODIFY_STMT, void_type_node, perm_dest,
-                                build2 (VEC_INTERLEAVE_HIGH_EXPR, vectype, 
-                                        vect1, vect2));
+           tmp = build2 (VEC_INTERLEAVE_HIGH_EXPR, vectype, vect1, vect2);
+         perm_stmt = build_gimple_modify_stmt (perm_dest, tmp);
          low = make_ssa_name (perm_dest, perm_stmt);
          GIMPLE_STMT_OPERAND (perm_stmt, 0) = low;
          vect_finish_stmt_generation (stmt, perm_stmt, bsi);
@@ -2690,6 +3787,19 @@ vectorizable_store (tree stmt, block_stmt_iterator *bsi, tree *vec_stmt)
   VEC(tree,heap) *dr_chain = NULL, *oprnds = NULL, *result_chain = NULL;
   gcc_assert (ncopies >= 1);
 
+  if (!STMT_VINFO_RELEVANT_P (stmt_info))
+    return false;
+
+  if (STMT_VINFO_DEF_TYPE (stmt_info) != vect_loop_def)
+    return false;
+
+  if (STMT_VINFO_LIVE_P (stmt_info))
+    {
+      if (vect_print_dump_info (REPORT_DETAILS))
+        fprintf (vect_dump, "value used after loop.");
+      return false;
+    }
+
   /* Is vectorizable store? */
 
   if (TREE_CODE (stmt) != GIMPLE_MODIFY_STMT)
@@ -2728,14 +3838,12 @@ vectorizable_store (tree stmt, block_stmt_iterator *bsi, tree *vec_stmt)
   if (!vec_stmt) /* transformation not required.  */
     {
       STMT_VINFO_TYPE (stmt_info) = store_vec_info_type;
+      vect_model_store_cost (stmt_info, ncopies);
       return true;
     }
 
   /** Transform.  **/
 
-  if (vect_print_dump_info (REPORT_DETAILS))
-    fprintf (vect_dump, "transform store. ncopies = %d",ncopies);
-
   if (strided_store)
     {
       first_stmt = DR_GROUP_FIRST_DR (stmt_info);
@@ -2760,6 +3868,9 @@ vectorizable_store (tree stmt, block_stmt_iterator *bsi, tree *vec_stmt)
       group_size = 1;
     }
   
+  if (vect_print_dump_info (REPORT_DETAILS))
+    fprintf (vect_dump, "transform store. ncopies = %d",ncopies);
+
   dr_chain = VEC_alloc (tree, heap, group_size);
   oprnds = VEC_alloc (tree, heap, group_size);
 
@@ -2844,7 +3955,7 @@ vectorizable_store (tree stmt, block_stmt_iterator *bsi, tree *vec_stmt)
          /* For interleaved stores we created vectorized defs for all the 
             defs stored in OPRNDS in the previous iteration (previous copy). 
             DR_CHAIN is then used as an input to vect_permute_store_chain(), 
-            and OPRNDS as an input to vect_get_vec_def_for_stmt_copy() for the 
+            and OPRNDS as an input to vect_get_vec_def_for_stmt_copy() for the
             next copy.
             If the store is not strided, GROUP_SIZE is 1, and DR_CHAIN and
             OPRNDS are of size 1.  */
@@ -2877,8 +3988,7 @@ vectorizable_store (tree stmt, block_stmt_iterator *bsi, tree *vec_stmt)
 
          data_ref = build_fold_indirect_ref (dataref_ptr);
          /* Arguments are ready. Create the new vector stmt.  */
-         new_stmt = build2 (GIMPLE_MODIFY_STMT, void_type_node, data_ref, 
-                            vec_oprnd);
+         new_stmt = build_gimple_modify_stmt (data_ref, vec_oprnd);
          vect_finish_stmt_generation (stmt, new_stmt, bsi);
 
          /* Set the VDEFs for the vector pointer. If this virtual def
@@ -2989,7 +4099,7 @@ vect_setup_realignment (tree stmt, block_stmt_iterator *bsi,
   ptr = vect_create_data_ref_ptr (stmt, bsi, NULL_TREE, &init_addr, &inc, true,
                                  NULL_TREE);
   data_ref = build1 (ALIGN_INDIRECT_REF, vectype, ptr);
-  new_stmt = build2 (GIMPLE_MODIFY_STMT, void_type_node, vec_dest, data_ref);
+  new_stmt = build_gimple_modify_stmt (vec_dest, data_ref);
   new_temp = make_ssa_name (vec_dest, new_stmt);
   GIMPLE_STMT_OPERAND (new_stmt, 0) = new_temp;
   new_bb = bsi_insert_on_edge_immediate (pe, new_stmt);
@@ -3002,14 +4112,12 @@ vect_setup_realignment (tree stmt, block_stmt_iterator *bsi,
   if (targetm.vectorize.builtin_mask_for_load)
     {
       tree builtin_decl;
-      tree params = build_tree_list (NULL_TREE, init_addr);
 
       builtin_decl = targetm.vectorize.builtin_mask_for_load ();
-      new_stmt = build_function_call_expr (builtin_decl, params);
+      new_stmt = build_call_expr (builtin_decl, 1, init_addr);
       vec_dest = vect_create_destination_var (scalar_dest, 
                                              TREE_TYPE (new_stmt));
-      new_stmt = build2 (GIMPLE_MODIFY_STMT, void_type_node, vec_dest,
-                        new_stmt);
+      new_stmt = build_gimple_modify_stmt (vec_dest, new_stmt);
       new_temp = make_ssa_name (vec_dest, new_stmt);
       GIMPLE_STMT_OPERAND (new_stmt, 0) = new_temp;
       new_bb = bsi_insert_on_edge_immediate (pe, new_stmt);
@@ -3166,6 +4274,7 @@ vect_permute_load_chain (VEC(tree,heap) *dr_chain,
 {
   tree perm_dest, perm_stmt, data_ref, first_vect, second_vect;
   tree vectype = STMT_VINFO_VECTYPE (vinfo_for_stmt (stmt));
+  tree tmp;
   int i;
   unsigned int j;
 
@@ -3185,10 +4294,10 @@ vect_permute_load_chain (VEC(tree,heap) *dr_chain,
          perm_dest = create_tmp_var (vectype, "vect_perm_even");
          DECL_GIMPLE_REG_P (perm_dest) = 1;
          add_referenced_var (perm_dest);
-        
-         perm_stmt = build2 (GIMPLE_MODIFY_STMT, void_type_node, perm_dest,
-                             build2 (VEC_EXTRACT_EVEN_EXPR, vectype, 
-                                     first_vect, second_vect));
+
+         tmp = build2 (VEC_EXTRACT_EVEN_EXPR, vectype,
+                       first_vect, second_vect);
+         perm_stmt = build_gimple_modify_stmt (perm_dest, tmp);
 
          data_ref = make_ssa_name (perm_dest, perm_stmt);
          GIMPLE_STMT_OPERAND (perm_stmt, 0) = data_ref;
@@ -3202,9 +4311,9 @@ vect_permute_load_chain (VEC(tree,heap) *dr_chain,
          DECL_GIMPLE_REG_P (perm_dest) = 1;
          add_referenced_var (perm_dest);
 
-         perm_stmt = build2 (GIMPLE_MODIFY_STMT, void_type_node, perm_dest,
-                             build2 (VEC_EXTRACT_ODD_EXPR, vectype, 
-                                     first_vect, second_vect));
+         tmp = build2 (VEC_EXTRACT_ODD_EXPR, vectype, 
+                       first_vect, second_vect);
+         perm_stmt = build_gimple_modify_stmt (perm_dest, tmp);
          data_ref = make_ssa_name (perm_dest, perm_stmt);
          GIMPLE_STMT_OPERAND (perm_stmt, 0) = data_ref;
          vect_finish_stmt_generation (stmt, perm_stmt, bsi);
@@ -3338,20 +4447,21 @@ vectorizable_load (tree stmt, block_stmt_iterator *bsi, tree *vec_stmt)
   bool strided_load = false;
   tree first_stmt;
 
-  /* Is vectorizable load? */
   if (!STMT_VINFO_RELEVANT_P (stmt_info))
     return false;
 
-  gcc_assert (STMT_VINFO_DEF_TYPE (stmt_info) == vect_loop_def);
+  if (STMT_VINFO_DEF_TYPE (stmt_info) != vect_loop_def)
+    return false;
 
+  /* FORNOW: not yet supported.  */
   if (STMT_VINFO_LIVE_P (stmt_info))
     {
-      /* FORNOW: not yet supported.  */
       if (vect_print_dump_info (REPORT_DETAILS))
         fprintf (vect_dump, "value used after loop.");
       return false;
     }
 
+  /* Is vectorizable load? */
   if (TREE_CODE (stmt) != GIMPLE_MODIFY_STMT)
     return false;
 
@@ -3392,14 +4502,15 @@ vectorizable_load (tree stmt, block_stmt_iterator *bsi, tree *vec_stmt)
   if (!vec_stmt) /* transformation not required.  */
     {
       STMT_VINFO_TYPE (stmt_info) = load_vec_info_type;
+      vect_model_load_cost (stmt_info, ncopies);
       return true;
     }
 
-  /** Transform.  **/
-
   if (vect_print_dump_info (REPORT_DETAILS))
     fprintf (vect_dump, "transform load.");
 
+  /** Transform.  **/
+
   if (strided_load)
     {
       first_stmt = DR_GROUP_FIRST_DR (stmt_info);
@@ -3566,8 +4677,7 @@ vectorizable_load (tree stmt, block_stmt_iterator *bsi, tree *vec_stmt)
              gcc_unreachable ();
            }
          vec_dest = vect_create_destination_var (scalar_dest, vectype);
-         new_stmt = build2 (GIMPLE_MODIFY_STMT, void_type_node, vec_dest,
-                            data_ref);
+         new_stmt = build_gimple_modify_stmt (vec_dest, data_ref);
          new_temp = make_ssa_name (vec_dest, new_stmt);
          GIMPLE_STMT_OPERAND (new_stmt, 0) = new_temp;
          vect_finish_stmt_generation (stmt, new_stmt, bsi);
@@ -3585,8 +4695,7 @@ vectorizable_load (tree stmt, block_stmt_iterator *bsi, tree *vec_stmt)
              vec_dest = vect_create_destination_var (scalar_dest, vectype);
              new_stmt =
                build3 (REALIGN_LOAD_EXPR, vectype, msq, lsq, realignment_token);
-             new_stmt = build2 (GIMPLE_MODIFY_STMT, void_type_node, vec_dest,
-                                new_stmt);
+             new_stmt = build_gimple_modify_stmt (vec_dest, new_stmt);
              new_temp = make_ssa_name (vec_dest, new_stmt);
              GIMPLE_STMT_OPERAND (new_stmt, 0) = new_temp;
              vect_finish_stmt_generation (stmt, new_stmt, bsi);
@@ -3635,13 +4744,14 @@ vectorizable_live_operation (tree stmt,
   stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
   loop_vec_info loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_info);
   int i;
-  enum tree_code code;
   int op_type;
   tree op;
   tree def, def_stmt;
   enum vect_def_type dt; 
 
-  if (!STMT_VINFO_LIVE_P (stmt_info))
+  gcc_assert (STMT_VINFO_LIVE_P (stmt_info));
+
+  if (STMT_VINFO_DEF_TYPE (stmt_info) == vect_reduction_def)
     return false;
 
   if (TREE_CODE (stmt) != GIMPLE_MODIFY_STMT)
@@ -3651,9 +4761,7 @@ vectorizable_live_operation (tree stmt,
     return false;
 
   operation = GIMPLE_STMT_OPERAND (stmt, 1);
-  code = TREE_CODE (operation);
-
-  op_type = TREE_CODE_LENGTH (code);
+  op_type = TREE_OPERAND_LENGTH (operation);
 
   /* FORNOW: support only if all uses are invariant. This means
      that the scalar operations can remain in place, unvectorized.
@@ -3756,16 +4864,18 @@ vectorizable_condition (tree stmt, block_stmt_iterator *bsi, tree *vec_stmt)
   if (!STMT_VINFO_RELEVANT_P (stmt_info))
     return false;
 
-  gcc_assert (STMT_VINFO_DEF_TYPE (stmt_info) == vect_loop_def);
+  if (STMT_VINFO_DEF_TYPE (stmt_info) != vect_loop_def)
+    return false;
 
+  /* FORNOW: not yet supported.  */
   if (STMT_VINFO_LIVE_P (stmt_info))
     {
-      /* FORNOW: not yet supported.  */
       if (vect_print_dump_info (REPORT_DETAILS))
         fprintf (vect_dump, "value used after loop.");
       return false;
     }
 
+  /* Is vectorizable conditional operation?  */
   if (TREE_CODE (stmt) != GIMPLE_MODIFY_STMT)
     return false;
 
@@ -3837,8 +4947,7 @@ vectorizable_condition (tree stmt, block_stmt_iterator *bsi, tree *vec_stmt)
   vec_cond_expr = build3 (VEC_COND_EXPR, vectype, 
                          vec_compare, vec_then_clause, vec_else_clause);
 
-  *vec_stmt = build2 (GIMPLE_MODIFY_STMT, void_type_node, vec_dest,
-                     vec_cond_expr);
+  *vec_stmt = build_gimple_modify_stmt (vec_dest, vec_cond_expr);
   new_temp = make_ssa_name (vec_dest, *vec_stmt);
   GIMPLE_STMT_OPERAND (*vec_stmt, 0) = new_temp;
   vect_finish_stmt_generation (stmt, *vec_stmt, bsi);
@@ -3859,105 +4968,108 @@ vect_transform_stmt (tree stmt, block_stmt_iterator *bsi, bool *strided_store)
   tree orig_stmt_in_pattern;
   bool done;
 
-  if (STMT_VINFO_RELEVANT_P (stmt_info))
+  switch (STMT_VINFO_TYPE (stmt_info))
     {
-      switch (STMT_VINFO_TYPE (stmt_info))
-      {
-      case type_demotion_vec_info_type:
-        done = vectorizable_type_demotion (stmt, bsi, &vec_stmt);
-        gcc_assert (done);
-        break;
-                                                                                
-      case type_promotion_vec_info_type:
-       done = vectorizable_type_promotion (stmt, bsi, &vec_stmt);
-       gcc_assert (done);
-       break;
-
-      case op_vec_info_type:
-       done = vectorizable_operation (stmt, bsi, &vec_stmt);
-       gcc_assert (done);
-       break;
-
-      case assignment_vec_info_type:
-       done = vectorizable_assignment (stmt, bsi, &vec_stmt);
-       gcc_assert (done);
-       break;
-
-      case load_vec_info_type:
-       done = vectorizable_load (stmt, bsi, &vec_stmt);
-       gcc_assert (done);
-       break;
-
-      case store_vec_info_type:
-       done = vectorizable_store (stmt, bsi, &vec_stmt);
-       gcc_assert (done);
-       if (DR_GROUP_FIRST_DR (stmt_info))
-         {
-           /* In case of interleaving, the whole chain is vectorized when the
-              last store in the chain is reached. Store stmts before the last
-              one are skipped, and there vec_stmt_info shouldn't be freed
-              meanwhile.  */
-           *strided_store = true;
-           if (STMT_VINFO_VEC_STMT (stmt_info))
-             is_store = true;
+    case type_demotion_vec_info_type:
+      done = vectorizable_type_demotion (stmt, bsi, &vec_stmt);
+      gcc_assert (done);
+      break;
+
+    case type_promotion_vec_info_type:
+      done = vectorizable_type_promotion (stmt, bsi, &vec_stmt);
+      gcc_assert (done);
+      break;
+
+    case type_conversion_vec_info_type:
+      done = vectorizable_conversion (stmt, bsi, &vec_stmt);
+      gcc_assert (done);
+      break;
+
+    case induc_vec_info_type:
+      done = vectorizable_induction (stmt, bsi, &vec_stmt);
+      gcc_assert (done);
+      break;
+
+    case op_vec_info_type:
+      done = vectorizable_operation (stmt, bsi, &vec_stmt);
+      gcc_assert (done);
+      break;
+
+    case assignment_vec_info_type:
+      done = vectorizable_assignment (stmt, bsi, &vec_stmt);
+      gcc_assert (done);
+      break;
+
+    case load_vec_info_type:
+      done = vectorizable_load (stmt, bsi, &vec_stmt);
+      gcc_assert (done);
+      break;
+
+    case store_vec_info_type:
+      done = vectorizable_store (stmt, bsi, &vec_stmt);
+      gcc_assert (done);
+      if (DR_GROUP_FIRST_DR (stmt_info))
+       {
+         /* In case of interleaving, the whole chain is vectorized when the
+            last store in the chain is reached. Store stmts before the last
+            one are skipped, and there vec_stmt_info shouldn't be freed
+            meanwhile.  */
+         *strided_store = true;
+         if (STMT_VINFO_VEC_STMT (stmt_info))
+           is_store = true;
          }
-       else
-         is_store = true;
-       break;
+      else
+       is_store = true;
+      break;
 
-      case condition_vec_info_type:
-       done = vectorizable_condition (stmt, bsi, &vec_stmt);
-       gcc_assert (done);
-       break;
+    case condition_vec_info_type:
+      done = vectorizable_condition (stmt, bsi, &vec_stmt);
+      gcc_assert (done);
+      break;
 
-      case call_vec_info_type:
-       done = vectorizable_call (stmt, bsi, &vec_stmt);
-       break;
+    case call_vec_info_type:
+      done = vectorizable_call (stmt, bsi, &vec_stmt);
+      break;
 
-      default:
-       if (vect_print_dump_info (REPORT_DETAILS))
-         fprintf (vect_dump, "stmt not supported.");
-       gcc_unreachable ();
-      }
+    case reduc_vec_info_type:
+      done = vectorizable_reduction (stmt, bsi, &vec_stmt);
+      gcc_assert (done);
+      break;
 
-      gcc_assert (vec_stmt || *strided_store);
-      if (vec_stmt)
+    default:
+      if (!STMT_VINFO_LIVE_P (stmt_info))
        {
-         STMT_VINFO_VEC_STMT (stmt_info) = vec_stmt;
-         orig_stmt_in_pattern = STMT_VINFO_RELATED_STMT (stmt_info);
-         if (orig_stmt_in_pattern)
-           {
-             stmt_vec_info stmt_vinfo = vinfo_for_stmt (orig_stmt_in_pattern);
-             if (STMT_VINFO_IN_PATTERN_P (stmt_vinfo))
-               {
-                 gcc_assert (STMT_VINFO_RELATED_STMT (stmt_vinfo) == stmt);
-                 
-                 /* STMT was inserted by the vectorizer to replace a 
-                    computation idiom.  ORIG_STMT_IN_PATTERN is a stmt in the 
-                    original sequence that computed this idiom.  We need to 
-                    record a pointer to VEC_STMT in the stmt_info of 
-                    ORIG_STMT_IN_PATTERN.  See more details in the 
-                    documentation of vect_pattern_recog.  */
-
-                 STMT_VINFO_VEC_STMT (stmt_vinfo) = vec_stmt;
-               }
-           }
+         if (vect_print_dump_info (REPORT_DETAILS))
+           fprintf (vect_dump, "stmt not supported.");
+         gcc_unreachable ();
        }
     }
 
-  if (STMT_VINFO_LIVE_P (stmt_info))
+  if (STMT_VINFO_LIVE_P (stmt_info)
+      && STMT_VINFO_TYPE (stmt_info) != reduc_vec_info_type)
     {
-      switch (STMT_VINFO_TYPE (stmt_info))
-      {
-      case reduc_vec_info_type:
-        done = vectorizable_reduction (stmt, bsi, &vec_stmt);
-        gcc_assert (done);
-        break;
+      done = vectorizable_live_operation (stmt, bsi, &vec_stmt);
+      gcc_assert (done);
+    }
 
-      default:
-        done = vectorizable_live_operation (stmt, bsi, &vec_stmt);
-        gcc_assert (done);
-      }
+  if (vec_stmt)
+    {
+      STMT_VINFO_VEC_STMT (stmt_info) = vec_stmt;
+      orig_stmt_in_pattern = STMT_VINFO_RELATED_STMT (stmt_info);
+      if (orig_stmt_in_pattern)
+       {
+         stmt_vec_info stmt_vinfo = vinfo_for_stmt (orig_stmt_in_pattern);
+         /* STMT was inserted by the vectorizer to replace a computation idiom.
+            ORIG_STMT_IN_PATTERN is a stmt in the original sequence that 
+            computed this idiom.  We need to record a pointer to VEC_STMT in 
+            the stmt_info of ORIG_STMT_IN_PATTERN.  See more details in the 
+            documentation of vect_pattern_recog.  */
+         if (STMT_VINFO_IN_PATTERN_P (stmt_vinfo))
+           {
+             gcc_assert (STMT_VINFO_RELATED_STMT (stmt_vinfo) == stmt);
+             STMT_VINFO_VEC_STMT (stmt_vinfo) = vec_stmt;
+           }
+       }
     }
 
   return is_store; 
@@ -4123,7 +5235,7 @@ update_vuses_to_preheader (tree stmt, struct loop *loop)
          tree phi;
          bool updated = false;
 
-         for (phi = phi_nodes (header_bb); phi; phi = TREE_CHAIN (phi))
+         for (phi = phi_nodes (header_bb); phi; phi = PHI_CHAIN (phi))
            {
              if (SSA_NAME_VAR (PHI_RESULT (phi)) == name_var)
                {
@@ -4240,12 +5352,21 @@ vect_update_ivs_after_vectorizer (loop_vec_info loop_vinfo, tree niters,
       init_expr = unshare_expr (initial_condition_in_loop_num (access_fn, 
                                                               loop->num));
 
-      ni = fold_build2 (PLUS_EXPR, TREE_TYPE (init_expr),
-                       fold_build2 (MULT_EXPR, TREE_TYPE (init_expr),
-                                    fold_convert (TREE_TYPE (init_expr), 
-                                                  niters), 
-                                    step_expr),
-                       init_expr);
+      if (POINTER_TYPE_P (TREE_TYPE (init_expr)))
+       ni = fold_build2 (POINTER_PLUS_EXPR, TREE_TYPE (init_expr), 
+                         init_expr, 
+                         fold_convert (sizetype, 
+                                       fold_build2 (MULT_EXPR, TREE_TYPE (niters),
+                                                    niters, step_expr)));
+      else
+       ni = fold_build2 (PLUS_EXPR, TREE_TYPE (init_expr),
+                         fold_build2 (MULT_EXPR, TREE_TYPE (init_expr),
+                                      fold_convert (TREE_TYPE (init_expr),
+                                                    niters),
+                                      step_expr),
+                         init_expr);
+
+
 
       var = create_tmp_var (TREE_TYPE (init_expr), "tmp");
       add_referenced_var (var);
@@ -4283,6 +5404,8 @@ vect_do_peeling_for_loop_bound (loop_vec_info loop_vinfo, tree *ratio)
   basic_block preheader;
   int loop_num;
   unsigned int th;
+  int min_scalar_loop_bound;
+  int min_profitable_iters;
 
   if (vect_print_dump_info (REPORT_DETAILS))
     fprintf (vect_dump, "=== vect_do_peeling_for_loop_bound ===");
@@ -4298,11 +5421,28 @@ vect_do_peeling_for_loop_bound (loop_vec_info loop_vinfo, tree *ratio)
                                   &ratio_mult_vf_name, ratio);
 
   loop_num  = loop->num; 
-  /* Threshold for vectorized loop.  */
-  th = (PARAM_VALUE (PARAM_MIN_VECT_LOOP_BOUND)) * 
-                       LOOP_VINFO_VECT_FACTOR (loop_vinfo);
+
+  /* Analyze cost to set threshhold for vectorized loop.  */
+  min_profitable_iters = vect_estimate_min_profitable_iters (loop_vinfo);
+
+  min_scalar_loop_bound = (PARAM_VALUE (PARAM_MIN_VECT_LOOP_BOUND))
+                          * LOOP_VINFO_VECT_FACTOR (loop_vinfo);
+
+  /* Use the cost model only if it is more conservative than user specified
+     threshold.  */
+
+  th = (unsigned) min_scalar_loop_bound;
+  if (min_profitable_iters
+      && (!min_scalar_loop_bound
+          || min_profitable_iters > min_scalar_loop_bound))
+    th = (unsigned) min_profitable_iters;
+
+  if (vect_print_dump_info (REPORT_DETAILS))
+    fprintf (vect_dump, "vectorization may not be profitable.");
+
   new_loop = slpeel_tree_peel_loop_to_edge (loop, single_exit (loop),
-                                           ratio_mult_vf_name, ni_name, false, th);
+                                            ratio_mult_vf_name, ni_name, false,
+                                            th);
   gcc_assert (new_loop);
   gcc_assert (loop_num == loop->num);
 #ifdef ENABLE_CHECKING
@@ -4357,13 +5497,17 @@ vect_do_peeling_for_loop_bound (loop_vec_info loop_vinfo, tree *ratio)
    prolog_niters = min ( LOOP_NITERS , 
                         (VF/group_size - addr_mis/elem_size)&(VF/group_size-1) )
         where group_size is the size of the interleaved group.
-*/
+
+   The above formulas assume that VF == number of elements in the vector. This
+   may not hold when there are multiple-types in the loop.
+   In this case, for some data-references in the loop the VF does not represent
+   the number of elements that fit in the vector.  Therefore, instead of VF we
+   use TYPE_VECTOR_SUBPARTS.  */
 
 static tree 
 vect_gen_niters_for_prolog_loop (loop_vec_info loop_vinfo, tree loop_niters)
 {
   struct data_reference *dr = LOOP_VINFO_UNALIGNED_DR (loop_vinfo);
-  int vf = LOOP_VINFO_VECT_FACTOR (loop_vinfo);
   struct loop *loop = LOOP_VINFO_LOOP (loop_vinfo);
   tree var, stmt;
   tree iters, iters_name;
@@ -4376,6 +5520,7 @@ vect_gen_niters_for_prolog_loop (loop_vec_info loop_vinfo, tree loop_niters)
   tree niters_type = TREE_TYPE (loop_niters);
   int group_size = 1;
   int element_size = GET_MODE_SIZE (TYPE_MODE (TREE_TYPE (DR_REF (dr))));
+  int nelements = TYPE_VECTOR_SUBPARTS (vectype);
 
   if (DR_GROUP_FIRST_DR (stmt_info))
     {
@@ -4396,7 +5541,7 @@ vect_gen_niters_for_prolog_loop (loop_vec_info loop_vinfo, tree loop_niters)
       if (vect_print_dump_info (REPORT_DETAILS))
         fprintf (vect_dump, "known alignment = %d.", byte_misalign);
       iters = build_int_cst (niters_type, 
-                            (vf - elem_misalign)&(vf/group_size-1));
+                            (nelements - elem_misalign)&(nelements/group_size-1));
     }
   else
     {
@@ -4408,9 +5553,9 @@ vect_gen_niters_for_prolog_loop (loop_vec_info loop_vinfo, tree loop_niters)
       tree type = lang_hooks.types.type_for_size (tree_low_cst (size, 1), 1);
       tree vectype_size_minus_1 = build_int_cst (type, vectype_align - 1);
       tree elem_size_log =
-        build_int_cst (type, exact_log2 (vectype_align/vf));
-      tree vf_minus_1 = build_int_cst (type, vf - 1);
-      tree vf_tree = build_int_cst (type, vf);
+        build_int_cst (type, exact_log2 (vectype_align/nelements));
+      tree nelements_minus_1 = build_int_cst (type, nelements - 1);
+      tree nelements_tree = build_int_cst (type, nelements);
       tree byte_misalign;
       tree elem_misalign;
 
@@ -4419,15 +5564,15 @@ vect_gen_niters_for_prolog_loop (loop_vec_info loop_vinfo, tree loop_niters)
   
       /* Create:  byte_misalign = addr & (vectype_size - 1)  */
       byte_misalign = 
-        fold_build2 (BIT_AND_EXPR, type, start_addr, vectype_size_minus_1);
+        fold_build2 (BIT_AND_EXPR, type, fold_convert (type, start_addr), vectype_size_minus_1);
   
       /* Create:  elem_misalign = byte_misalign / element_size  */
       elem_misalign =
         fold_build2 (RSHIFT_EXPR, type, byte_misalign, elem_size_log);
 
-      /* Create:  (niters_type) (VF - elem_misalign)&(VF - 1)  */
-      iters = fold_build2 (MINUS_EXPR, type, vf_tree, elem_misalign);
-      iters = fold_build2 (BIT_AND_EXPR, type, iters, vf_minus_1);
+      /* Create:  (niters_type) (nelements - elem_misalign)&(nelements - 1)  */
+      iters = fold_build2 (MINUS_EXPR, type, nelements_tree, elem_misalign);
+      iters = fold_build2 (BIT_AND_EXPR, type, iters, nelements_minus_1);
       iters = fold_convert (niters_type, iters);
     }
 
@@ -4482,8 +5627,8 @@ vect_update_init_of_dr (struct data_reference *dr, tree niters)
    NITERS iterations were peeled from the loop represented by LOOP_VINFO.  
    This function updates the information recorded for the data references in 
    the loop to account for the fact that the first NITERS iterations had 
-   already been executed.  Specifically, it updates the initial_condition of the
-   access_function of all the data_references in the loop.  */
+   already been executed.  Specifically, it updates the initial_condition of
+   the access_function of all the data_references in the loop.  */
 
 static void
 vect_update_inits_of_drs (loop_vec_info loop_vinfo, tree niters)
@@ -4492,7 +5637,7 @@ vect_update_inits_of_drs (loop_vec_info loop_vinfo, tree niters)
   VEC (data_reference_p, heap) *datarefs = LOOP_VINFO_DATAREFS (loop_vinfo);
   struct data_reference *dr;
 
-  if (vect_dump && (dump_flags & TDF_DETAILS))
+  if (vect_print_dump_info (REPORT_DETAILS))
     fprintf (vect_dump, "=== vect_update_inits_of_dr ===");
 
   for (i = 0; VEC_iterate (data_reference_p, datarefs, i, dr); i++)
@@ -4577,7 +5722,7 @@ vect_create_cond_for_align_checks (loop_vec_info loop_vinfo,
 {
   VEC(tree,heap) *may_misalign_stmts
     = LOOP_VINFO_MAY_MISALIGN_STMTS (loop_vinfo);
-  tree ref_stmt;
+  tree ref_stmt, tmp;
   int mask = LOOP_VINFO_PTR_MASK (loop_vinfo);
   tree mask_cst;
   unsigned int i;
@@ -4621,8 +5766,7 @@ vect_create_cond_for_align_checks (loop_vec_info loop_vinfo,
       add_referenced_var (addr_tmp);
       addr_tmp_name = make_ssa_name (addr_tmp, NULL_TREE);
       addr_stmt = fold_convert (int_ptrsize_type, addr_base);
-      addr_stmt = build2 (GIMPLE_MODIFY_STMT, void_type_node,
-                          addr_tmp_name, addr_stmt);
+      addr_stmt = build_gimple_modify_stmt (addr_tmp_name, addr_stmt);
       SSA_NAME_DEF_STMT (addr_tmp_name) = addr_stmt;
       append_to_statement_list_force (addr_stmt, cond_expr_stmt_list);
 
@@ -4635,11 +5779,9 @@ vect_create_cond_for_align_checks (loop_vec_info loop_vinfo,
           or_tmp = create_tmp_var (int_ptrsize_type, tmp_name);
           add_referenced_var (or_tmp);
           new_or_tmp_name = make_ssa_name (or_tmp, NULL_TREE);
-          or_stmt = build2 (GIMPLE_MODIFY_STMT, void_type_node,
-                           new_or_tmp_name,
-                            build2 (BIT_IOR_EXPR, int_ptrsize_type,
-                                   or_tmp_name,
-                                    addr_tmp_name));
+         tmp = build2 (BIT_IOR_EXPR, int_ptrsize_type,
+                       or_tmp_name, addr_tmp_name);
+          or_stmt = build_gimple_modify_stmt (new_or_tmp_name, tmp);
           SSA_NAME_DEF_STMT (new_or_tmp_name) = or_stmt;
           append_to_statement_list_force (or_stmt, cond_expr_stmt_list);
           or_tmp_name = new_or_tmp_name;
@@ -4656,10 +5798,8 @@ vect_create_cond_for_align_checks (loop_vec_info loop_vinfo,
   add_referenced_var (and_tmp);
   and_tmp_name = make_ssa_name (and_tmp, NULL_TREE);
 
-  and_stmt = build2 (GIMPLE_MODIFY_STMT, void_type_node,
-                     and_tmp_name,
-                     build2 (BIT_AND_EXPR, int_ptrsize_type,
-                             or_tmp_name, mask_cst));
+  tmp = build2 (BIT_AND_EXPR, int_ptrsize_type, or_tmp_name, mask_cst);
+  and_stmt = build_gimple_modify_stmt (and_tmp_name, tmp);
   SSA_NAME_DEF_STMT (and_tmp_name) = and_stmt;
   append_to_statement_list_force (and_stmt, cond_expr_stmt_list);
 
@@ -4683,7 +5823,7 @@ vect_transform_loop (loop_vec_info loop_vinfo)
   struct loop *loop = LOOP_VINFO_LOOP (loop_vinfo);
   basic_block *bbs = LOOP_VINFO_BBS (loop_vinfo);
   int nbbs = loop->num_nodes;
-  block_stmt_iterator si;
+  block_stmt_iterator si, next_si;
   int i;
   tree ratio = NULL;
   int vectorization_factor = LOOP_VINFO_VECT_FACTOR (loop_vinfo);
@@ -4790,11 +5930,39 @@ vect_transform_loop (loop_vec_info loop_vinfo)
   for (i = 0; i < nbbs; i++)
     {
       basic_block bb = bbs[i];
+      stmt_vec_info stmt_info;
+      tree phi;
+
+      for (phi = phi_nodes (bb); phi; phi = PHI_CHAIN (phi))
+        {
+         if (vect_print_dump_info (REPORT_DETAILS))
+           {
+             fprintf (vect_dump, "------>vectorizing phi: ");
+             print_generic_expr (vect_dump, phi, TDF_SLIM);
+           }
+         stmt_info = vinfo_for_stmt (phi);
+         if (!stmt_info)
+           continue;
+         if (!STMT_VINFO_RELEVANT_P (stmt_info)
+             && !STMT_VINFO_LIVE_P (stmt_info))
+           continue;
+
+         if ((TYPE_VECTOR_SUBPARTS (STMT_VINFO_VECTYPE (stmt_info))
+               != (unsigned HOST_WIDE_INT) vectorization_factor)
+             && vect_print_dump_info (REPORT_DETAILS))
+           fprintf (vect_dump, "multiple-types.");
+
+         if (STMT_VINFO_DEF_TYPE (stmt_info) == vect_induction_def)
+           {
+             if (vect_print_dump_info (REPORT_DETAILS))
+               fprintf (vect_dump, "transform phi.");
+             vect_transform_stmt (phi, NULL, NULL);
+           }
+       }
 
       for (si = bsi_start (bb); !bsi_end_p (si);)
        {
          tree stmt = bsi_stmt (si);
-         stmt_vec_info stmt_info;
          bool is_store;
 
          if (vect_print_dump_info (REPORT_DETAILS))
@@ -4811,6 +5979,7 @@ vect_transform_loop (loop_vec_info loop_vinfo)
              continue;
            }
 
+         gcc_assert (STMT_VINFO_VECTYPE (stmt_info));
          if ((TYPE_VECTOR_SUBPARTS (STMT_VINFO_VECTYPE (stmt_info))
                 != (unsigned HOST_WIDE_INT) vectorization_factor)
              && vect_print_dump_info (REPORT_DETAILS))
@@ -4836,12 +6005,14 @@ vect_transform_loop (loop_vec_info loop_vinfo)
 
                  while (next)
                    {
+                     next_si = bsi_for_stmt (next);
                      next_stmt_info = vinfo_for_stmt (next);
                      /* Free the attached stmt_vec_info and remove the stmt.  */
                      ann = stmt_ann (next);
                      tmp = DR_GROUP_NEXT_DR (next_stmt_info);
                      free (next_stmt_info);
                      set_stmt_info (ann, NULL);
+                     bsi_remove (&next_si, true);
                      next = tmp;
                    }
                  bsi_remove (&si, true);
@@ -4857,16 +6028,6 @@ vect_transform_loop (loop_vec_info loop_vinfo)
                  continue;
                }
            }
-         else
-           {
-             if (strided_store)
-               {
-                 /* This is case of skipped interleaved store. We don't free
-                    its stmt_vec_info.  */
-                 bsi_remove (&si, true);
-                 continue;
-               }
-            }
          bsi_next (&si);
        }                       /* stmts in BB */
     }                          /* BBs in loop */