data: scalars (which are represented by SSA_NAMES), and memory references
("data-refs"). These two types of data require different handling both
during analysis and transformation. The types of data-refs that the
- vectorizer currently supports are ARRAY_REFS that are one dimensional
- arrays which base is an array DECL (not a pointer), and INDIRECT_REFS
- through pointers; both array and pointer accesses are required to have a
- simple (consecutive) access pattern.
+ vectorizer currently supports are ARRAY_REFS which base is an array DECL
+ (not a pointer), and INDIRECT_REFS through pointers; both array and pointer
+ accesses are required to have a simple (consecutive) access pattern.
Analysis phase:
===============
#include "cfglayout.h"
#include "expr.h"
#include "optabs.h"
+#include "toplev.h"
#include "tree-chrec.h"
#include "tree-data-ref.h"
#include "tree-scalar-evolution.h"
#include "tree-vectorizer.h"
#include "tree-pass.h"
+
+/*************************************************************************
+ Simple Loop Peeling Utilities
+ *************************************************************************/
+
+/* Entry point for peeling of simple loops.
+ Peel the first/last iterations of a loop.
+ It can be used outside of the vectorizer for loops that are simple enough
+ (see function documentation). In the vectorizer it is used to peel the
+ last few iterations when the loop bound is unknown or does not evenly
+ divide by the vectorization factor, and to peel the first few iterations
+ to force the alignment of data references in the loop. */
+struct loop *slpeel_tree_peel_loop_to_edge
+ (struct loop *, struct loops *, edge, tree, tree, bool);
+static struct loop *slpeel_tree_duplicate_loop_to_edge_cfg
+ (struct loop *, struct loops *, edge);
+static void slpeel_update_phis_for_duplicate_loop
+ (struct loop *, struct loop *, bool after);
+static void slpeel_update_phi_nodes_for_guard (edge, struct loop *, bool, bool);
+static void slpeel_make_loop_iterate_ntimes (struct loop *, tree);
+static edge slpeel_add_loop_guard (basic_block, tree, basic_block, basic_block);
+static bool slpeel_can_duplicate_loop_p (struct loop *, edge);
+static void allocate_new_names (bitmap);
+static void rename_use_op (use_operand_p);
+static void rename_def_op (def_operand_p, tree);
+static void rename_variables_in_bb (basic_block);
+static void free_new_names (bitmap);
+static void rename_variables_in_loop (struct loop *);
+#ifdef ENABLE_CHECKING
+static void slpeel_verify_cfg_after_peeling (struct loop *, struct loop *);
+#endif
+
+
+/*************************************************************************
+ Vectorization Utilities.
+ *************************************************************************/
+
/* Main analysis functions. */
static loop_vec_info vect_analyze_loop (struct loop *);
static loop_vec_info vect_analyze_loop_form (struct loop *);
static bool vect_analyze_scalar_cycles (loop_vec_info);
static bool vect_analyze_data_ref_accesses (loop_vec_info);
static bool vect_analyze_data_refs_alignment (loop_vec_info);
-static void vect_compute_data_refs_alignment (loop_vec_info);
+static bool vect_compute_data_refs_alignment (loop_vec_info);
static bool vect_analyze_operations (loop_vec_info);
/* Main code transformation functions. */
static void vect_transform_loop (loop_vec_info, struct loops *);
-static void vect_transform_loop_bound (loop_vec_info);
+static void vect_transform_loop_bound (loop_vec_info, tree niters);
static bool vect_transform_stmt (tree, block_stmt_iterator *);
static bool vectorizable_load (tree, block_stmt_iterator *, tree *);
static bool vectorizable_store (tree, block_stmt_iterator *, tree *);
static bool vectorizable_operation (tree, block_stmt_iterator *, tree *);
static bool vectorizable_assignment (tree, block_stmt_iterator *, tree *);
+static enum dr_alignment_support vect_supportable_dr_alignment
+ (struct data_reference *);
static void vect_align_data_ref (tree);
static void vect_enhance_data_refs_alignment (loop_vec_info);
static bool vect_is_simple_iv_evolution (unsigned, tree, tree *, tree *, bool);
static void vect_mark_relevant (varray_type, tree);
static bool vect_stmt_relevant_p (tree, loop_vec_info);
-static tree vect_get_loop_niters (struct loop *, HOST_WIDE_INT *);
-static void vect_compute_data_ref_alignment
+static tree vect_get_loop_niters (struct loop *, tree *);
+static bool vect_compute_data_ref_alignment
(struct data_reference *, loop_vec_info);
static bool vect_analyze_data_ref_access (struct data_reference *);
static bool vect_get_first_index (tree, tree *);
static bool vect_can_force_dr_alignment_p (tree, unsigned int);
-static tree vect_get_base_decl_and_bit_offset (tree, tree *);
-static struct data_reference * vect_analyze_pointer_ref_access (tree, tree, bool);
+static struct data_reference * vect_analyze_pointer_ref_access
+ (tree, tree, bool);
+static bool vect_can_advance_ivs_p (struct loop *);
+static tree vect_get_base_and_bit_offset
+ (struct data_reference *, tree, tree, loop_vec_info, tree *, bool*);
+static struct data_reference * vect_analyze_pointer_ref_access
+ (tree, tree, bool);
+static tree vect_compute_array_base_alignment (tree, tree, tree *, tree *);
+static tree vect_compute_array_ref_alignment
+ (struct data_reference *, loop_vec_info, tree, tree *);
+static tree vect_get_ptr_offset (tree, tree, tree *);
+static tree vect_get_symbl_and_dr
+ (tree, tree, bool, loop_vec_info, struct data_reference **);
/* Utility functions for the code transformation. */
static tree vect_create_destination_var (tree, tree);
-static tree vect_create_data_ref (tree, block_stmt_iterator *);
-static tree vect_create_index_for_array_ref (tree, block_stmt_iterator *);
+static tree vect_create_data_ref_ptr
+ (tree, block_stmt_iterator *, tree, tree *, bool);
+static tree vect_create_index_for_vector_ref
+ (struct loop *, block_stmt_iterator *);
+static tree vect_create_addr_base_for_vector_ref (tree, tree *, tree);
static tree get_vectype_for_scalar_type (tree);
static tree vect_get_new_vect_var (tree, enum vect_var_kind, const char *);
static tree vect_get_vec_def_for_operand (tree, tree);
static tree vect_init_vector (tree, tree);
+static tree vect_build_symbol_bound (tree, int, struct loop *);
static void vect_finish_stmt_generation
(tree stmt, tree vec_stmt, block_stmt_iterator *bsi);
+/* Utility function dealing with loop peeling (not peeling itself). */
+static void vect_generate_tmps_on_preheader
+ (loop_vec_info, tree *, tree *, tree *);
+static tree vect_build_loop_niters (loop_vec_info);
+static void vect_update_ivs_after_vectorizer (struct loop *, tree, edge);
+static tree vect_gen_niters_for_prolog_loop (loop_vec_info, tree);
+static void vect_update_inits_of_dr
+ (struct data_reference *, struct loop *, tree niters);
+static void vect_update_inits_of_drs (loop_vec_info, tree);
+static void vect_do_peeling_for_alignment (loop_vec_info, struct loops *);
+static void vect_do_peeling_for_loop_bound
+ (loop_vec_info, tree *, struct loops *);
+
/* Utilities for creation and deletion of vec_info structs. */
loop_vec_info new_loop_vec_info (struct loop *loop);
void destroy_loop_vec_info (loop_vec_info);
static bool vect_debug_stats (struct loop *loop);
static bool vect_debug_details (struct loop *loop);
+\f
+/*************************************************************************
+ Simple Loop Peeling Utilities
+
+ Utilities to support loop peeling for vectorization purposes.
+ *************************************************************************/
+
+
+/* For each definition in DEFINITIONS this function allocates
+ new ssa name. */
+
+static void
+allocate_new_names (bitmap definitions)
+{
+ unsigned ver;
+ bitmap_iterator bi;
+
+ EXECUTE_IF_SET_IN_BITMAP (definitions, 0, ver, bi)
+ {
+ tree def = ssa_name (ver);
+ tree *new_name_ptr = xmalloc (sizeof (tree));
+
+ bool abnormal = SSA_NAME_OCCURS_IN_ABNORMAL_PHI (def);
+
+ *new_name_ptr = duplicate_ssa_name (def, SSA_NAME_DEF_STMT (def));
+ SSA_NAME_OCCURS_IN_ABNORMAL_PHI (*new_name_ptr) = abnormal;
+
+ SSA_NAME_AUX (def) = new_name_ptr;
+ }
+}
+
+
+/* Renames the use *OP_P. */
+
+static void
+rename_use_op (use_operand_p op_p)
+{
+ tree *new_name_ptr;
+
+ if (TREE_CODE (USE_FROM_PTR (op_p)) != SSA_NAME)
+ return;
+
+ new_name_ptr = SSA_NAME_AUX (USE_FROM_PTR (op_p));
+
+ /* Something defined outside of the loop. */
+ if (!new_name_ptr)
+ return;
+
+ /* An ordinary ssa name defined in the loop. */
+
+ SET_USE (op_p, *new_name_ptr);
+}
+
+
+/* Renames the def *OP_P in statement STMT. */
+
+static void
+rename_def_op (def_operand_p op_p, tree stmt)
+{
+ tree *new_name_ptr;
+
+ if (TREE_CODE (DEF_FROM_PTR (op_p)) != SSA_NAME)
+ return;
+
+ new_name_ptr = SSA_NAME_AUX (DEF_FROM_PTR (op_p));
+
+ /* Something defined outside of the loop. */
+ if (!new_name_ptr)
+ return;
+
+ /* An ordinary ssa name defined in the loop. */
+
+ SET_DEF (op_p, *new_name_ptr);
+ SSA_NAME_DEF_STMT (DEF_FROM_PTR (op_p)) = stmt;
+}
+
+
+/* Renames the variables in basic block BB. */
+
+static void
+rename_variables_in_bb (basic_block bb)
+{
+ tree phi;
+ block_stmt_iterator bsi;
+ tree stmt;
+ stmt_ann_t ann;
+ use_optype uses;
+ vuse_optype vuses;
+ def_optype defs;
+ v_may_def_optype v_may_defs;
+ v_must_def_optype v_must_defs;
+ unsigned i;
+ edge e;
+ edge_iterator ei;
+ struct loop *loop = bb->loop_father;
+
+ for (phi = phi_nodes (bb); phi; phi = PHI_CHAIN (phi))
+ rename_def_op (PHI_RESULT_PTR (phi), phi);
+
+ for (bsi = bsi_start (bb); !bsi_end_p (bsi); bsi_next (&bsi))
+ {
+ stmt = bsi_stmt (bsi);
+ get_stmt_operands (stmt);
+ ann = stmt_ann (stmt);
+
+ uses = USE_OPS (ann);
+ for (i = 0; i < NUM_USES (uses); i++)
+ rename_use_op (USE_OP_PTR (uses, i));
+
+ defs = DEF_OPS (ann);
+ for (i = 0; i < NUM_DEFS (defs); i++)
+ rename_def_op (DEF_OP_PTR (defs, i), stmt);
+
+ vuses = VUSE_OPS (ann);
+ for (i = 0; i < NUM_VUSES (vuses); i++)
+ rename_use_op (VUSE_OP_PTR (vuses, i));
+
+ v_may_defs = V_MAY_DEF_OPS (ann);
+ for (i = 0; i < NUM_V_MAY_DEFS (v_may_defs); i++)
+ {
+ rename_use_op (V_MAY_DEF_OP_PTR (v_may_defs, i));
+ rename_def_op (V_MAY_DEF_RESULT_PTR (v_may_defs, i), stmt);
+ }
+
+ v_must_defs = V_MUST_DEF_OPS (ann);
+ for (i = 0; i < NUM_V_MUST_DEFS (v_must_defs); i++)
+ {
+ rename_use_op (V_MUST_DEF_KILL_PTR (v_must_defs, i));
+ rename_def_op (V_MUST_DEF_RESULT_PTR (v_must_defs, i), stmt);
+ }
+ }
+
+ FOR_EACH_EDGE (e, ei, bb->succs)
+ {
+ if (!flow_bb_inside_loop_p (loop, e->dest))
+ continue;
+ for (phi = phi_nodes (e->dest); phi; phi = PHI_CHAIN (phi))
+ rename_use_op (PHI_ARG_DEF_PTR_FROM_EDGE (phi, e));
+ }
+}
+
+
+/* Releases the structures holding the new ssa names. */
+
+static void
+free_new_names (bitmap definitions)
+{
+ unsigned ver;
+ bitmap_iterator bi;
+
+ EXECUTE_IF_SET_IN_BITMAP (definitions, 0, ver, bi)
+ {
+ tree def = ssa_name (ver);
+
+ if (SSA_NAME_AUX (def))
+ {
+ free (SSA_NAME_AUX (def));
+ SSA_NAME_AUX (def) = NULL;
+ }
+ }
+}
+
+
+/* Renames variables in new generated LOOP. */
+
+static void
+rename_variables_in_loop (struct loop *loop)
+{
+ unsigned i;
+ basic_block *bbs;
+
+ bbs = get_loop_body (loop);
+
+ for (i = 0; i < loop->num_nodes; i++)
+ rename_variables_in_bb (bbs[i]);
+
+ free (bbs);
+}
+
+
+/* Update the PHI nodes of NEW_LOOP.
+
+ NEW_LOOP is a duplicate of ORIG_LOOP.
+ AFTER indicates whether NEW_LOOP executes before or after ORIG_LOOP:
+ AFTER is true if NEW_LOOP executes after ORIG_LOOP, and false if it
+ executes before it. */
+
+static void
+slpeel_update_phis_for_duplicate_loop (struct loop *orig_loop,
+ struct loop *new_loop, bool after)
+{
+ tree *new_name_ptr, new_ssa_name;
+ tree phi_new, phi_orig;
+ tree def;
+ edge orig_loop_latch = loop_latch_edge (orig_loop);
+ edge orig_entry_e = loop_preheader_edge (orig_loop);
+ edge new_loop_exit_e = new_loop->exit_edges[0];
+ edge new_loop_entry_e = loop_preheader_edge (new_loop);
+ edge entry_arg_e = (after ? orig_loop_latch : orig_entry_e);
+
+ /*
+ step 1. For each loop-header-phi:
+ Add the first phi argument for the phi in NEW_LOOP
+ (the one associated with the entry of NEW_LOOP)
+
+ step 2. For each loop-header-phi:
+ Add the second phi argument for the phi in NEW_LOOP
+ (the one associated with the latch of NEW_LOOP)
+
+ step 3. Update the phis in the successor block of NEW_LOOP.
+
+ case 1: NEW_LOOP was placed before ORIG_LOOP:
+ The successor block of NEW_LOOP is the header of ORIG_LOOP.
+ Updating the phis in the successor block can therefore be done
+ along with the scanning of the loop header phis, because the
+ header blocks of ORIG_LOOP and NEW_LOOP have exactly the same
+ phi nodes, organized in the same order.
+
+ case 2: NEW_LOOP was placed after ORIG_LOOP:
+ The successor block of NEW_LOOP is the original exit block of
+ ORIG_LOOP - the phis to be updated are the loop-closed-ssa phis.
+ We postpone updating these phis to a later stage (when
+ loop guards are added).
+ */
+
+
+ /* Scan the phis in the headers of the old and new loops
+ (they are organized in exactly the same order). */
+
+ for (phi_new = phi_nodes (new_loop->header),
+ phi_orig = phi_nodes (orig_loop->header);
+ phi_new && phi_orig;
+ phi_new = PHI_CHAIN (phi_new), phi_orig = PHI_CHAIN (phi_orig))
+ {
+ /* step 1. */
+ def = PHI_ARG_DEF_FROM_EDGE (phi_orig, entry_arg_e);
+ add_phi_arg (&phi_new, def, new_loop_entry_e);
+
+ /* step 2. */
+ def = PHI_ARG_DEF_FROM_EDGE (phi_orig, orig_loop_latch);
+ if (TREE_CODE (def) != SSA_NAME)
+ continue;
+
+ new_name_ptr = SSA_NAME_AUX (def);
+ if (!new_name_ptr)
+ /* Something defined outside of the loop. */
+ continue;
+
+ /* An ordinary ssa name defined in the loop. */
+ new_ssa_name = *new_name_ptr;
+ add_phi_arg (&phi_new, new_ssa_name, loop_latch_edge (new_loop));
+
+ /* step 3 (case 1). */
+ if (!after)
+ {
+ gcc_assert (new_loop_exit_e == orig_entry_e);
+ SET_PHI_ARG_DEF (phi_orig,
+ phi_arg_from_edge (phi_orig, new_loop_exit_e),
+ new_ssa_name);
+ }
+ }
+}
+
+
+/* Update PHI nodes for a guard of the LOOP.
+
+ Input:
+ - LOOP, GUARD_EDGE: LOOP is a loop for which we added guard code that
+ controls whether LOOP is to be executed. GUARD_EDGE is the edge that
+ originates from the guard-bb, skips LOOP and reaches the (unique) exit
+ bb of LOOP. This loop-exit-bb is an empty bb with one successor.
+ We denote this bb NEW_MERGE_BB because it had a single predecessor (the
+ LOOP header) before the guard code was added, and now it became a merge
+ point of two paths - the path that ends with the LOOP exit-edge, and
+ the path that ends with GUARD_EDGE.
+
+ This function creates and updates the relevant phi nodes to account for
+ the new incoming edge (GUARD_EDGE) into NEW_MERGE_BB:
+ 1. Create phi nodes at NEW_MERGE_BB.
+ 2. Update the phi nodes at the successor of NEW_MERGE_BB (denoted
+ UPDATE_BB). UPDATE_BB was the exit-bb of LOOP before NEW_MERGE_BB
+ was added:
+
+ ===> The CFG before the guard-code was added:
+ LOOP_header_bb:
+ if (exit_loop) goto update_bb : LOOP_header_bb
+ update_bb:
+
+ ==> The CFG after the guard-code was added:
+ guard_bb:
+ if (LOOP_guard_condition) goto new_merge_bb : LOOP_header_bb
+ LOOP_header_bb:
+ if (exit_loop_condition) goto new_merge_bb : LOOP_header_bb
+ new_merge_bb:
+ goto update_bb
+ update_bb:
+
+ - ENTRY_PHIS: If ENTRY_PHIS is TRUE, this indicates that the phis in
+ UPDATE_BB are loop entry phis, like the phis in the LOOP header,
+ organized in the same order.
+ If ENTRY_PHIs is FALSE, this indicates that the phis in UPDATE_BB are
+ loop exit phis.
+
+ - IS_NEW_LOOP: TRUE if LOOP is a new loop (a duplicated copy of another
+ "original" loop). FALSE if LOOP is an original loop (not a newly
+ created copy). The SSA_NAME_AUX fields of the defs in the origianl
+ loop are the corresponding new ssa-names used in the new duplicated
+ loop copy. IS_NEW_LOOP indicates which of the two args of the phi
+ nodes in UPDATE_BB takes the original ssa-name, and which takes the
+ new name: If IS_NEW_LOOP is TRUE, the phi-arg that is associated with
+ the LOOP-exit-edge takes the new-name, and the phi-arg that is
+ associated with GUARD_EDGE takes the original name. If IS_NEW_LOOP is
+ FALSE, it's the other way around.
+ */
+
+static void
+slpeel_update_phi_nodes_for_guard (edge guard_edge,
+ struct loop *loop,
+ bool entry_phis,
+ bool is_new_loop)
+{
+ tree orig_phi, new_phi, update_phi;
+ tree guard_arg, loop_arg;
+ basic_block new_merge_bb = guard_edge->dest;
+ edge e = EDGE_SUCC (new_merge_bb, 0);
+ basic_block update_bb = e->dest;
+ basic_block orig_bb = (entry_phis ? loop->header : update_bb);
+
+ for (orig_phi = phi_nodes (orig_bb), update_phi = phi_nodes (update_bb);
+ orig_phi && update_phi;
+ orig_phi = PHI_CHAIN (orig_phi), update_phi = PHI_CHAIN (update_phi))
+ {
+ /* 1. Generate new phi node in NEW_MERGE_BB: */
+ new_phi = create_phi_node (SSA_NAME_VAR (PHI_RESULT (orig_phi)),
+ new_merge_bb);
+
+ /* 2. NEW_MERGE_BB has two incoming edges: GUARD_EDGE and the exit-edge
+ of LOOP. Set the two phi args in NEW_PHI for these edges: */
+ if (entry_phis)
+ {
+ loop_arg = PHI_ARG_DEF_FROM_EDGE (orig_phi,
+ EDGE_SUCC (loop->latch, 0));
+ guard_arg = PHI_ARG_DEF_FROM_EDGE (orig_phi, loop->entry_edges[0]);
+ }
+ else /* exit phis */
+ {
+ tree orig_def = PHI_ARG_DEF_FROM_EDGE (orig_phi, e);
+ tree *new_name_ptr = SSA_NAME_AUX (orig_def);
+ tree new_name;
+
+ if (new_name_ptr)
+ new_name = *new_name_ptr;
+ else
+ /* Something defined outside of the loop */
+ new_name = orig_def;
+
+ if (is_new_loop)
+ {
+ guard_arg = orig_def;
+ loop_arg = new_name;
+ }
+ else
+ {
+ guard_arg = new_name;
+ loop_arg = orig_def;
+ }
+ }
+ add_phi_arg (&new_phi, loop_arg, loop->exit_edges[0]);
+ add_phi_arg (&new_phi, guard_arg, guard_edge);
+
+ /* 3. Update phi in successor block. */
+ gcc_assert (PHI_ARG_DEF_FROM_EDGE (update_phi, e) == loop_arg
+ || PHI_ARG_DEF_FROM_EDGE (update_phi, e) == guard_arg);
+ SET_PHI_ARG_DEF (update_phi, phi_arg_from_edge (update_phi, e),
+ PHI_RESULT (new_phi));
+ }
+
+ set_phi_nodes (new_merge_bb, phi_reverse (phi_nodes (new_merge_bb)));
+}
+
+
+/* Make the LOOP iterate NITERS times. This is done by adding a new IV
+ that starts at zero, increases by one and its limit is NITERS.
+
+ Assumption: the exit-condition of LOOP is the last stmt in the loop. */
+
+static void
+slpeel_make_loop_iterate_ntimes (struct loop *loop, tree niters)
+{
+ tree indx_before_incr, indx_after_incr, cond_stmt, cond;
+ tree orig_cond;
+ edge exit_edge = loop->exit_edges[0];
+ block_stmt_iterator loop_exit_bsi = bsi_last (exit_edge->src);
+ tree begin_label = tree_block_label (loop->latch);
+ tree exit_label = tree_block_label (loop->single_exit->dest);
+
+ orig_cond = get_loop_exit_condition (loop);
+ gcc_assert (orig_cond);
+ create_iv (integer_zero_node, integer_one_node, NULL_TREE, loop,
+ &loop_exit_bsi, false, &indx_before_incr, &indx_after_incr);
+
+ /* CREATE_IV uses BSI_INSERT with TSI_NEW_STMT, so we want to get
+ back to the exit condition statement. */
+ bsi_next (&loop_exit_bsi);
+ gcc_assert (bsi_stmt (loop_exit_bsi) == orig_cond);
+
+ if (exit_edge->flags & EDGE_TRUE_VALUE) /* 'then' edge exits the loop. */
+ cond = build2 (GE_EXPR, boolean_type_node, indx_after_incr, niters);
+ else /* 'then' edge loops back. */
+ cond = build2 (LT_EXPR, boolean_type_node, indx_after_incr, niters);
+
+ begin_label = build1 (GOTO_EXPR, void_type_node, begin_label);
+ exit_label = build1 (GOTO_EXPR, void_type_node, exit_label);
+ cond_stmt = build (COND_EXPR, TREE_TYPE (orig_cond), cond,
+ begin_label, exit_label);
+ bsi_insert_before (&loop_exit_bsi, cond_stmt, BSI_SAME_STMT);
+
+ /* Remove old loop exit test: */
+ bsi_remove (&loop_exit_bsi);
+
+ if (vect_debug_stats (loop) || vect_debug_details (loop))
+ print_generic_expr (dump_file, cond_stmt, TDF_SLIM);
+
+ loop->nb_iterations = niters;
+}
+
+
+/* Given LOOP this function generates a new copy of it and puts it
+ on E which is either the entry or exit of LOOP. */
+
+static struct loop *
+slpeel_tree_duplicate_loop_to_edge_cfg (struct loop *loop, struct loops *loops,
+ edge e)
+{
+ struct loop *new_loop;
+ basic_block *new_bbs, *bbs;
+ bool at_exit;
+ bool was_imm_dom;
+ basic_block exit_dest;
+ tree phi, phi_arg;
+
+ at_exit = (e == loop->exit_edges[0]);
+ if (!at_exit && e != loop_preheader_edge (loop))
+ {
+ if (dump_file && (dump_flags & TDF_DETAILS))
+ fprintf (dump_file, "Edge is not an entry nor an exit edge.\n");
+ return NULL;
+ }
+
+ bbs = get_loop_body (loop);
+
+ /* Check whether duplication is possible. */
+ if (!can_copy_bbs_p (bbs, loop->num_nodes))
+ {
+ if (vect_debug_stats (loop) || vect_debug_details (loop))
+ fprintf (dump_file, "Cannot copy basic blocks.\n");
+ free (bbs);
+ return NULL;
+ }
+
+ /* Generate new loop structure. */
+ new_loop = duplicate_loop (loops, loop, loop->outer);
+ if (!new_loop)
+ {
+ if (vect_debug_stats (loop) || vect_debug_details (loop))
+ fprintf (dump_file, "duplicate_loop returns NULL.\n");
+ free (bbs);
+ return NULL;
+ }
+
+ exit_dest = loop->exit_edges[0]->dest;
+ was_imm_dom = (get_immediate_dominator (CDI_DOMINATORS,
+ exit_dest) == loop->header ?
+ true : false);
+
+ new_bbs = xmalloc (sizeof (basic_block) * loop->num_nodes);
+
+ copy_bbs (bbs, loop->num_nodes, new_bbs, NULL, 0, NULL, NULL);
+
+ /* Duplicating phi args at exit bbs as coming
+ also from exit of duplicated loop. */
+ for (phi = phi_nodes (exit_dest); phi; phi = PHI_CHAIN (phi))
+ {
+ phi_arg = PHI_ARG_DEF_FROM_EDGE (phi, loop->exit_edges[0]);
+ if (phi_arg)
+ {
+ edge new_loop_exit_edge;
+
+ if (EDGE_SUCC (new_loop->header, 0)->dest == new_loop->latch)
+ new_loop_exit_edge = EDGE_SUCC (new_loop->header, 1);
+ else
+ new_loop_exit_edge = EDGE_SUCC (new_loop->header, 0);
+
+ add_phi_arg (&phi, phi_arg, new_loop_exit_edge);
+ }
+ }
+
+ if (at_exit) /* Add the loop copy at exit. */
+ {
+ redirect_edge_and_branch_force (e, new_loop->header);
+ set_immediate_dominator (CDI_DOMINATORS, new_loop->header, e->src);
+ if (was_imm_dom)
+ set_immediate_dominator (CDI_DOMINATORS, exit_dest, new_loop->header);
+ }
+ else /* Add the copy at entry. */
+ {
+ edge new_exit_e;
+ edge entry_e = loop_preheader_edge (loop);
+ basic_block preheader = entry_e->src;
+
+ if (!flow_bb_inside_loop_p (new_loop,
+ EDGE_SUCC (new_loop->header, 0)->dest))
+ new_exit_e = EDGE_SUCC (new_loop->header, 0);
+ else
+ new_exit_e = EDGE_SUCC (new_loop->header, 1);
+
+ redirect_edge_and_branch_force (new_exit_e, loop->header);
+ set_immediate_dominator (CDI_DOMINATORS, loop->header,
+ new_exit_e->src);
+
+ /* We have to add phi args to the loop->header here as coming
+ from new_exit_e edge. */
+ for (phi = phi_nodes (loop->header); phi; phi = PHI_CHAIN (phi))
+ {
+ phi_arg = PHI_ARG_DEF_FROM_EDGE (phi, entry_e);
+ if (phi_arg)
+ add_phi_arg (&phi, phi_arg, new_exit_e);
+ }
+
+ redirect_edge_and_branch_force (entry_e, new_loop->header);
+ set_immediate_dominator (CDI_DOMINATORS, new_loop->header, preheader);
+ }
+
+ flow_loop_scan (new_loop, LOOP_ALL);
+ flow_loop_scan (loop, LOOP_ALL);
+ free (new_bbs);
+ free (bbs);
+
+ return new_loop;
+}
+
+
+/* Given the condition statement COND, put it as the last statement
+ of GUARD_BB; EXIT_BB is the basic block to skip the loop;
+ Assumes that this is the single exit of the guarded loop.
+ Returns the skip edge. */
+
+static edge
+slpeel_add_loop_guard (basic_block guard_bb, tree cond, basic_block exit_bb,
+ basic_block dom_bb)
+{
+ block_stmt_iterator bsi;
+ edge new_e, enter_e;
+ tree cond_stmt, then_label, else_label;
+
+ enter_e = EDGE_SUCC (guard_bb, 0);
+ enter_e->flags &= ~EDGE_FALLTHRU;
+ enter_e->flags |= EDGE_FALSE_VALUE;
+ bsi = bsi_last (guard_bb);
+
+ then_label = build1 (GOTO_EXPR, void_type_node,
+ tree_block_label (exit_bb));
+ else_label = build1 (GOTO_EXPR, void_type_node,
+ tree_block_label (enter_e->dest));
+ cond_stmt = build (COND_EXPR, void_type_node, cond,
+ then_label, else_label);
+ bsi_insert_after (&bsi, cond_stmt, BSI_NEW_STMT);
+ /* Add new edge to connect entry block to the second loop. */
+ new_e = make_edge (guard_bb, exit_bb, EDGE_TRUE_VALUE);
+ set_immediate_dominator (CDI_DOMINATORS, exit_bb, dom_bb);
+ return new_e;
+}
+
+
+/* This function verifies that the following restrictions apply to LOOP:
+ (1) it is innermost
+ (2) it consists of exactly 2 basic blocks - header, and an empty latch.
+ (3) it is single entry, single exit
+ (4) its exit condition is the last stmt in the header
+ (5) E is the entry/exit edge of LOOP.
+ */
+
+static bool
+slpeel_can_duplicate_loop_p (struct loop *loop, edge e)
+{
+ edge exit_e = loop->exit_edges [0];
+ edge entry_e = loop_preheader_edge (loop);
+ tree orig_cond = get_loop_exit_condition (loop);
+ block_stmt_iterator loop_exit_bsi = bsi_last (exit_e->src);
+
+ if (any_marked_for_rewrite_p ())
+ return false;
+
+ if (loop->inner
+ /* All loops have an outer scope; the only case loop->outer is NULL is for
+ the function itself. */
+ || !loop->outer
+ || loop->num_nodes != 2
+ || !empty_block_p (loop->latch)
+ || loop->num_exits != 1
+ || loop->num_entries != 1
+ /* Verify that new loop exit condition can be trivially modified. */
+ || (!orig_cond || orig_cond != bsi_stmt (loop_exit_bsi))
+ || (e != exit_e && e != entry_e))
+ return false;
+
+ return true;
+}
+
+#ifdef ENABLE_CHECKING
+static void
+slpeel_verify_cfg_after_peeling (struct loop *first_loop,
+ struct loop *second_loop)
+{
+ basic_block loop1_exit_bb = first_loop->exit_edges[0]->dest;
+ basic_block loop2_entry_bb = second_loop->pre_header;
+ basic_block loop1_entry_bb = loop_preheader_edge (first_loop)->src;
+
+ /* A guard that controls whether the second_loop is to be executed or skipped
+ is placed in first_loop->exit. first_loopt->exit therefore has two
+ successors - one is the preheader of second_loop, and the other is a bb
+ after second_loop.
+ */
+ gcc_assert (EDGE_COUNT (loop1_exit_bb->succs) == 2);
+
+
+ /* 1. Verify that one of the successors of first_loopt->exit is the preheader
+ of second_loop. */
+
+ /* The preheader of new_loop is expected to have two predessors:
+ first_loop->exit and the block that precedes first_loop. */
+
+ gcc_assert (EDGE_COUNT (loop2_entry_bb->preds) == 2
+ && ((EDGE_PRED (loop2_entry_bb, 0)->src == loop1_exit_bb
+ && EDGE_PRED (loop2_entry_bb, 1)->src == loop1_entry_bb)
+ || (EDGE_PRED (loop2_entry_bb, 1)->src == loop1_exit_bb
+ && EDGE_PRED (loop2_entry_bb, 0)->src == loop1_entry_bb)));
+
+ /* Verify that the other successor of first_loopt->exit is after the
+ second_loop. */
+ /* TODO */
+}
+#endif
+
+/* Function slpeel_tree_peel_loop_to_edge.
+
+ Peel the first (last) iterations of LOOP into a new prolog (epilog) loop
+ that is placed on the entry (exit) edge E of LOOP. After this transformation
+ we have two loops one after the other - first-loop iterates FIRST_NITERS
+ times, and second-loop iterates the remainder NITERS - FIRST_NITERS times.
+
+ Input:
+ - LOOP: the loop to be peeled.
+ - E: the exit or entry edge of LOOP.
+ If it is the entry edge, we peel the first iterations of LOOP. In this
+ case first-loop is LOOP, and second-loop is the newly created loop.
+ If it is the exit edge, we peel the last iterations of LOOP. In this
+ case, first-loop is the newly created loop, and second-loop is LOOP.
+ - NITERS: the number of iterations that LOOP iterates.
+ - FIRST_NITERS: the number of iterations that the first-loop should iterate.
+ - UPDATE_FIRST_LOOP_COUNT: specified whether this function is responssible
+ for updating the loop bound of the first-loop to FIRST_NITERS. If it
+ is false, the caller of this function may want to take care of this
+ (this can be usefull is we don't want new stmts added to first-loop).
+
+ Output:
+ The function returns a pointer to the new loop-copy, or NULL if it failed
+ to perform the trabsformation.
+
+ The function generates two if-then-else guards: one before the first loop,
+ and the other before the second loop:
+ The first guard is:
+ if (FIRST_NITERS == 0) then skip the first loop,
+ and go directly to the second loop.
+ The second guard is:
+ if (FIRST_NITERS == NITERS) then skip the second loop.
+
+ FORNOW only simple loops are supported (see slpeel_can_duplicate_loop_p).
+ FORNOW the resulting code will not be in loop-closed-ssa form.
+*/
+
+struct loop*
+slpeel_tree_peel_loop_to_edge (struct loop *loop, struct loops *loops,
+ edge e, tree first_niters,
+ tree niters, bool update_first_loop_count)
+{
+ struct loop *new_loop = NULL, *first_loop, *second_loop;
+ edge skip_e;
+ tree pre_condition;
+ bitmap definitions;
+ basic_block bb_before_second_loop, bb_after_second_loop;
+ basic_block bb_before_first_loop;
+ basic_block bb_between_loops;
+ edge exit_e = loop->exit_edges [0];
+
+ if (!slpeel_can_duplicate_loop_p (loop, e))
+ return NULL;
+
+ /* We have to initialize cfg_hooks. Then, when calling
+ cfg_hooks->split_edge, the function tree_split_edge
+ is actually called and, when calling cfg_hooks->duplicate_block,
+ the function tree_duplicate_bb is called. */
+ tree_register_cfg_hooks ();
+
+
+ /* 1. Generate a copy of LOOP and put it on E (E is the entry/exit of LOOP).
+ Resulting CFG would be:
+
+ first_loop:
+ do {
+ } while ...
+
+ second_loop:
+ do {
+ } while ...
+
+ orig_exit_bb:
+ */
+
+ if (!(new_loop = slpeel_tree_duplicate_loop_to_edge_cfg (loop, loops, e)))
+ {
+ if (vect_debug_stats (loop) || vect_debug_details (loop))
+ fprintf (dump_file, "tree_duplicate_loop_to_edge_cfg failed.\n");
+ return NULL;
+ }
+
+ if (e == exit_e)
+ {
+ /* NEW_LOOP was placed after LOOP. */
+ first_loop = loop;
+ second_loop = new_loop;
+ }
+ else
+ {
+ /* NEW_LOOP was placed before LOOP. */
+ first_loop = new_loop;
+ second_loop = loop;
+ }
+
+ definitions = marked_ssa_names ();
+ allocate_new_names (definitions);
+ slpeel_update_phis_for_duplicate_loop (loop, new_loop, e == exit_e);
+ rename_variables_in_loop (new_loop);
+
+
+ /* 2. Add the guard that controls whether the first loop is executed.
+ Resulting CFG would be:
+
+ bb_before_first_loop:
+ if (FIRST_NITERS == 0) GOTO bb_before_second_loop
+ GOTO first-loop
+
+ first_loop:
+ do {
+ } while ...
+
+ bb_before_second_loop:
+
+ second_loop:
+ do {
+ } while ...
+
+ orig_exit_bb:
+ */
+
+ bb_before_first_loop = split_edge (loop_preheader_edge (first_loop));
+ add_bb_to_loop (bb_before_first_loop, first_loop->outer);
+ bb_before_second_loop = split_edge (first_loop->exit_edges[0]);
+ add_bb_to_loop (bb_before_second_loop, first_loop->outer);
+ flow_loop_scan (first_loop, LOOP_ALL);
+ flow_loop_scan (second_loop, LOOP_ALL);
+
+ pre_condition =
+ build (LE_EXPR, boolean_type_node, first_niters, integer_zero_node);
+ skip_e = slpeel_add_loop_guard (bb_before_first_loop, pre_condition,
+ bb_before_second_loop, bb_before_first_loop);
+ slpeel_update_phi_nodes_for_guard (skip_e, first_loop, true /* entry-phis */,
+ first_loop == new_loop);
+
+
+ /* 3. Add the guard that controls whether the second loop is executed.
+ Resulting CFG would be:
+
+ bb_before_first_loop:
+ if (FIRST_NITERS == 0) GOTO bb_before_second_loop (skip first loop)
+ GOTO first-loop
+
+ first_loop:
+ do {
+ } while ...
+
+ bb_between_loops:
+ if (FIRST_NITERS == NITERS) GOTO bb_after_second_loop (skip second loop)
+ GOTO bb_before_second_loop
+
+ bb_before_second_loop:
+
+ second_loop:
+ do {
+ } while ...
+
+ bb_after_second_loop:
+
+ orig_exit_bb:
+ */
+
+ bb_between_loops = split_edge (first_loop->exit_edges[0]);
+ add_bb_to_loop (bb_between_loops, first_loop->outer);
+ bb_after_second_loop = split_edge (second_loop->exit_edges[0]);
+ add_bb_to_loop (bb_after_second_loop, second_loop->outer);
+ flow_loop_scan (first_loop, LOOP_ALL);
+ flow_loop_scan (second_loop, LOOP_ALL);
+
+ pre_condition = build (EQ_EXPR, boolean_type_node, first_niters, niters);
+ skip_e = slpeel_add_loop_guard (bb_between_loops, pre_condition,
+ bb_after_second_loop, bb_before_first_loop);
+ slpeel_update_phi_nodes_for_guard (skip_e, second_loop, false /* exit-phis */,
+ second_loop == new_loop);
+
+ /* Flow loop scan does not update loop->single_exit field. */
+ first_loop->single_exit = first_loop->exit_edges[0];
+ second_loop->single_exit = second_loop->exit_edges[0];
+
+ /* 4. Make first-loop iterate FIRST_NITERS times, if requested.
+ */
+ if (update_first_loop_count)
+ slpeel_make_loop_iterate_ntimes (first_loop, first_niters);
+
+ free_new_names (definitions);
+ BITMAP_XFREE (definitions);
+ unmark_all_for_rewrite ();
+
+ return new_loop;
+}
+
+\f
+/* Here the proper Vectorizer starts. */
+
+/*************************************************************************
+ Vectorization Utilities.
+ *************************************************************************/
/* Function new_stmt_vec_info.
STMT_VINFO_VEC_STMT (res) = NULL;
STMT_VINFO_DATA_REF (res) = NULL;
STMT_VINFO_MEMTAG (res) = NULL;
+ STMT_VINFO_VECT_DR_BASE (res) = NULL;
return res;
}
LOOP_VINFO_LOOP (res) = loop;
LOOP_VINFO_BBS (res) = bbs;
LOOP_VINFO_EXIT_COND (res) = NULL;
- LOOP_VINFO_NITERS (res) = -1;
+ LOOP_VINFO_NITERS (res) = NULL;
LOOP_VINFO_VECTORIZABLE_P (res) = 0;
+ LOOP_DO_PEELING_FOR_ALIGNMENT (res) = false;
LOOP_VINFO_VECT_FACTOR (res) = 0;
VARRAY_GENERIC_PTR_INIT (LOOP_VINFO_DATAREF_WRITES (res), 20,
"loop_write_datarefs");
VARRAY_GENERIC_PTR_INIT (LOOP_VINFO_DATAREF_READS (res), 20,
"loop_read_datarefs");
+ LOOP_VINFO_UNALIGNED_DR (res) = NULL;
+
return res;
}
return false;
}
-/* Function vect_get_base_decl_and_bit_offset
+
+/* Function vect_get_ptr_offset
+
+ Compute the OFFSET modulo vector-type alignment of pointer REF in bits. */
+
+static tree
+vect_get_ptr_offset (tree ref ATTRIBUTE_UNUSED,
+ tree vectype ATTRIBUTE_UNUSED,
+ tree *offset ATTRIBUTE_UNUSED)
+{
+ /* TODO: Use alignment information. */
+ return NULL_TREE;
+}
+
+
+/* Function vect_get_base_and_bit_offset
+
+ Return the BASE of the data reference EXPR.
+ If VECTYPE is given, also compute the OFFSET from BASE in bits.
+ E.g., for EXPR a.b[i] + 4B, BASE is a, and OFFSET is the overall offset in
+ bits of 'a.b[i] + 4B' from a.
+
+ Input:
+ EXPR - the memory reference that is being analyzed
+ DR - the data_reference struct of the _original_ memory reference
+ (Note: DR_REF (DR) is not necessarily EXPR)
+ VECTYPE - the type that defines the alignment (i.e, we compute
+ alignment relative to TYPE_ALIGN(VECTYPE))
- Get the decl from which the data reference REF is based,
- and compute the OFFSET from it in bits on the way.
- FORNOW: Handle only component-refs that consist of
- VAR_DECLs (no ARRAY_REF or INDIRECT_REF). */
+ Output:
+ BASE (returned value) - the base of the data reference EXPR.
+ E.g, if EXPR is a.b[k].c[i][j] the returned
+ base is a.
+ OFFSET - offset of EXPR from BASE in bits
+ BASE_ALIGNED_P - indicates if BASE is aligned
+
+ If something unexpected is encountered (an unsupported form of data-ref),
+ or if VECTYPE is given but OFFSET cannot be determined:
+ then NULL_TREE is returned. */
static tree
-vect_get_base_decl_and_bit_offset (tree ref, tree *offset)
+vect_get_base_and_bit_offset (struct data_reference *dr,
+ tree expr,
+ tree vectype,
+ loop_vec_info loop_vinfo,
+ tree *offset,
+ bool *base_aligned_p)
{
- tree decl;
- if (TREE_CODE (ref) == VAR_DECL)
- return ref;
+ tree this_offset = size_zero_node;
+ tree base = NULL_TREE;
+ tree next_ref;
+ tree oprnd0, oprnd1;
+ struct data_reference *array_dr;
+ enum tree_code code = TREE_CODE (expr);
+
+ *base_aligned_p = false;
- if (TREE_CODE (ref) == COMPONENT_REF)
+ switch (code)
{
- tree this_offset;
- tree oprnd0 = TREE_OPERAND (ref, 0);
- tree oprnd1 = TREE_OPERAND (ref, 1);
+ /* These cases end the recursion: */
+ case VAR_DECL:
+ *offset = size_zero_node;
+ if (vectype && DECL_ALIGN (expr) >= TYPE_ALIGN (vectype))
+ *base_aligned_p = true;
+ return expr;
+
+ case SSA_NAME:
+ if (!vectype)
+ return expr;
+
+ if (TREE_CODE (TREE_TYPE (expr)) != POINTER_TYPE)
+ return NULL_TREE;
+
+ if (TYPE_ALIGN (TREE_TYPE (TREE_TYPE (expr))) < TYPE_ALIGN (vectype))
+ {
+ base = vect_get_ptr_offset (expr, vectype, offset);
+ if (base)
+ *base_aligned_p = true;
+ }
+ else
+ {
+ *base_aligned_p = true;
+ *offset = size_zero_node;
+ base = expr;
+ }
+ return base;
+
+ case INTEGER_CST:
+ *offset = int_const_binop (MULT_EXPR, expr,
+ build_int_cst (NULL_TREE, BITS_PER_UNIT), 1);
+ return expr;
+
+ /* These cases continue the recursion: */
+ case COMPONENT_REF:
+ oprnd0 = TREE_OPERAND (expr, 0);
+ oprnd1 = TREE_OPERAND (expr, 1);
this_offset = bit_position (oprnd1);
- if (!host_integerp (this_offset,1))
+ if (vectype && !host_integerp (this_offset, 1))
+ return NULL_TREE;
+ next_ref = oprnd0;
+ break;
+
+ case ADDR_EXPR:
+ oprnd0 = TREE_OPERAND (expr, 0);
+ next_ref = oprnd0;
+ break;
+
+ case INDIRECT_REF:
+ oprnd0 = TREE_OPERAND (expr, 0);
+ next_ref = oprnd0;
+ break;
+
+ case ARRAY_REF:
+ if (DR_REF (dr) != expr)
+ /* Build array data_reference struct if the existing DR_REF
+ doesn't match EXPR. This happens, for example, when the
+ EXPR is *T and T is initialized to &arr[indx]. The DR struct
+ contains information on the access of T, not of arr. In order
+ to continue the analysis, we create a new DR struct that
+ describes the access of arr.
+ */
+ array_dr = analyze_array (DR_STMT (dr), expr, DR_IS_READ (dr));
+ else
+ array_dr = dr;
+
+ next_ref = vect_compute_array_ref_alignment (array_dr, loop_vinfo,
+ vectype, &this_offset);
+ if (!next_ref)
return NULL_TREE;
-
- decl = vect_get_base_decl_and_bit_offset (oprnd0, offset);
- if (decl)
+ if (vectype &&
+ TYPE_ALIGN (TREE_TYPE (TREE_TYPE (next_ref))) >= TYPE_ALIGN (vectype))
{
- *offset = int_const_binop (PLUS_EXPR, *offset, this_offset, 1);
+ *offset = this_offset;
+ *base_aligned_p = true;
+ return next_ref;
+ }
+ break;
- if (!host_integerp (*offset,1) || TREE_OVERFLOW (*offset))
- return NULL_TREE;
+ case PLUS_EXPR:
+ case MINUS_EXPR:
+ /* In case we have a PLUS_EXPR of the form
+ (oprnd0 + oprnd1), we assume that only oprnd0 determines the base.
+ This is verified in vect_get_symbl_and_dr. */
+ oprnd0 = TREE_OPERAND (expr, 0);
+ oprnd1 = TREE_OPERAND (expr, 1);
+
+ base = vect_get_base_and_bit_offset
+ (dr, oprnd1, vectype, loop_vinfo, &this_offset, base_aligned_p);
+ if (vectype && !base)
+ return NULL_TREE;
- if (vect_debug_details (NULL))
- {
- print_generic_expr (dump_file, ref, TDF_SLIM);
- fprintf (dump_file, " --> total offset for ref: ");
- print_generic_expr (dump_file, *offset, TDF_SLIM);
- }
- }
+ next_ref = oprnd0;
+ break;
- return decl;
+ default:
+ return NULL_TREE;
}
- /* TODO: extend to handle more cases. */
- return NULL_TREE;
+ base = vect_get_base_and_bit_offset (dr, next_ref, vectype,
+ loop_vinfo, offset, base_aligned_p);
+
+ if (vectype && base)
+ {
+ *offset = int_const_binop (PLUS_EXPR, *offset, this_offset, 1);
+ if (!host_integerp (*offset, 1) || TREE_OVERFLOW (*offset))
+ return NULL_TREE;
+
+ if (vect_debug_details (NULL))
+ {
+ print_generic_expr (dump_file, expr, TDF_SLIM);
+ fprintf (dump_file, " --> total offset for ref: ");
+ print_generic_expr (dump_file, *offset, TDF_SLIM);
+ }
+ }
+ return base;
}
}
-/* Function create_index_for_array_ref.
+/* Function vect_create_index_for_vector_ref.
Create (and return) an index variable, along with it's update chain in the
loop. This variable will be used to access a memory location in a vector
operation.
Input:
- STMT: The stmt that contains a memory data-ref.
+ LOOP: The loop being vectorized.
BSI: The block_stmt_iterator where STMT is. Any new stmts created by this
function can be added here, or in the loop pre-header.
+ Output:
+ Return an index that will be used to index a vector array. It is expected
+ that a pointer to the first vector will be used as the base address for the
+ indexed reference.
+
+ FORNOW: we are not trying to be efficient, just creating a new index each
+ time from scratch. At this time all vector references could use the same
+ index.
+
+ TODO: create only one index to be used by all vector references. Record
+ the index in the LOOP_VINFO the first time this procedure is called and
+ return it on subsequent calls. The increment of this index must be placed
+ just before the conditional expression that ends the single block loop. */
+
+static tree
+vect_create_index_for_vector_ref (struct loop *loop, block_stmt_iterator *bsi)
+{
+ tree init, step;
+ tree indx_before_incr, indx_after_incr;
+
+ /* It is assumed that the base pointer used for vectorized access contains
+ the address of the first vector. Therefore the index used for vectorized
+ access must be initialized to zero and incremented by 1. */
+
+ init = integer_zero_node;
+ step = integer_one_node;
+
+ /* Assuming that bsi_insert is used with BSI_NEW_STMT */
+ create_iv (init, step, NULL_TREE, loop, bsi, false,
+ &indx_before_incr, &indx_after_incr);
+
+ return indx_before_incr;
+}
+
+
+/* Function vect_create_addr_base_for_vector_ref.
+
+ Create an expression that computes the address of the first memory location
+ that will be accessed for a data reference.
+
+ Input:
+ STMT: The statement containing the data reference.
+ NEW_STMT_LIST: Must be initialized to NULL_TREE or a statement list.
+ OFFSET: Optional. If supplied, it is be added to the initial address.
+
+ Output:
+ 1. Return an SSA_NAME whose value is the address of the memory location of
+ the first vector of the data reference.
+ 2. If new_stmt_list is not NULL_TREE after return then the caller must insert
+ these statement(s) which define the returned SSA_NAME.
+
FORNOW: We are only handling array accesses with step 1. */
static tree
-vect_create_index_for_array_ref (tree stmt, block_stmt_iterator *bsi)
+vect_create_addr_base_for_vector_ref (tree stmt,
+ tree *new_stmt_list,
+ tree offset)
{
stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
struct loop *loop = STMT_VINFO_LOOP (stmt_info);
struct data_reference *dr = STMT_VINFO_DATA_REF (stmt_info);
- tree expr = DR_REF (dr);
+ tree data_ref_base = unshare_expr (STMT_VINFO_VECT_DR_BASE (stmt_info));
+ tree base_name = unshare_expr (DR_BASE_NAME (dr));
+ tree ref = DR_REF (dr);
+ tree data_ref_base_type = TREE_TYPE (data_ref_base);
+ tree scalar_type = TREE_TYPE (ref);
+ tree scalar_ptr_type = build_pointer_type (scalar_type);
tree access_fn;
- tree init, step;
- loop_vec_info loop_info = loop->aux;
- int vectorization_factor = LOOP_VINFO_VECT_FACTOR (loop_info);
- tree vf;
- tree array_first_index;
- tree indx_before_incr, indx_after_incr;
- int loopnum = loop->num;
+ tree init_val, step, init_oval;
bool ok;
-#ifdef ENABLE_CHECKING
- varray_type access_fns = DR_ACCESS_FNS (dr);
-
- /* FORNOW: handling only one dimensional arrays. */
- if (VARRAY_ACTIVE_SIZE (access_fns) != 1)
- abort ();
-
- if (!vectorization_factor)
- abort ();
-#endif
+ bool is_ptr_ref, is_array_ref, is_addr_expr;
+ tree array_base;
+ tree vec_stmt;
+ tree new_temp;
+ tree array_ref;
+ tree addr_base, addr_expr;
+ tree dest, new_stmt;
+ /* Only the access function of the last index is relevant (i_n in
+ a[i_1][i_2]...[i_n]), the others correspond to loop invariants. */
access_fn = DR_ACCESS_FN (dr, 0);
- ok = vect_is_simple_iv_evolution (loopnum, access_fn, &init, &step, true)
- && vect_get_first_index (expr, &array_first_index);
-
-#ifdef ENABLE_CHECKING
+ ok = vect_is_simple_iv_evolution (loop->num, access_fn, &init_oval, &step,
+ true);
if (!ok)
- abort ();
+ init_oval = integer_zero_node;
- /* FORNOW: Handling only constant 'init'. */
- if (TREE_CODE (init) != INTEGER_CST)
- abort ();
-#endif
+ is_ptr_ref = TREE_CODE (data_ref_base_type) == POINTER_TYPE
+ && TREE_CODE (data_ref_base) == SSA_NAME;
+ is_array_ref = TREE_CODE (data_ref_base_type) == ARRAY_TYPE;
+ is_addr_expr = TREE_CODE (data_ref_base) == ADDR_EXPR
+ || TREE_CODE (data_ref_base) == PLUS_EXPR
+ || TREE_CODE (data_ref_base) == MINUS_EXPR;
+ gcc_assert (is_ptr_ref || is_array_ref || is_addr_expr);
- vf = build_int_cst (unsigned_type_node, vectorization_factor, 0);
+ /** Create: &(base[init_val])
- if (vect_debug_details (NULL))
+ if data_ref_base is an ARRAY_TYPE:
+ base = data_ref_base
+
+ if data_ref_base is the SSA_NAME of a POINTER_TYPE:
+ base = *((scalar_array *) data_ref_base)
+ **/
+
+ if (is_array_ref)
+ array_base = data_ref_base;
+ else /* is_ptr_ref or is_addr_expr */
{
- fprintf (dump_file, "int vf = %d",vectorization_factor);
- fprintf (dump_file, ", vf:");
- print_generic_expr (dump_file, vf, TDF_SLIM);
- fprintf (dump_file, ", init:");
- print_generic_expr (dump_file, init, TDF_SLIM);
- fprintf (dump_file, ", array_first_index:");
- print_generic_expr (dump_file, array_first_index, TDF_SLIM);
+ /* array_ptr = (scalar_array_ptr_type *) data_ref_base; */
+ tree scalar_array_type = build_array_type (scalar_type, 0);
+ tree scalar_array_ptr_type = build_pointer_type (scalar_array_type);
+ tree array_ptr = create_tmp_var (scalar_array_ptr_type, "array_ptr");
+ add_referenced_tmp_var (array_ptr);
+
+ dest = create_tmp_var (TREE_TYPE (data_ref_base), "dataref");
+ add_referenced_tmp_var (dest);
+ data_ref_base =
+ force_gimple_operand (data_ref_base, &new_stmt, false, dest);
+ append_to_statement_list_force (new_stmt, new_stmt_list);
+
+ vec_stmt = fold_convert (scalar_array_ptr_type, data_ref_base);
+ vec_stmt = build2 (MODIFY_EXPR, void_type_node, array_ptr, vec_stmt);
+ new_temp = make_ssa_name (array_ptr, vec_stmt);
+ TREE_OPERAND (vec_stmt, 0) = new_temp;
+ append_to_statement_list_force (vec_stmt, new_stmt_list);
+
+ /* (*array_ptr) */
+ array_base = build_fold_indirect_ref (new_temp);
}
- /* Calculate the 'init' of the new index.
- init = (init - array_first_index) / vectorization_factor */
- init = int_const_binop (TRUNC_DIV_EXPR,
- int_const_binop (MINUS_EXPR, init, array_first_index, 1),
- vf, 1);
+ dest = create_tmp_var (TREE_TYPE (init_oval), "newinit");
+ add_referenced_tmp_var (dest);
+ init_val = force_gimple_operand (init_oval, &new_stmt, false, dest);
+ append_to_statement_list_force (new_stmt, new_stmt_list);
- /* Calculate the 'step' of the new index. FORNOW: always 1. */
- step = size_one_node;
-
- if (vect_debug_details (NULL))
+ if (offset)
{
- fprintf (dump_file, "create iv for (");
- print_generic_expr (dump_file, init, TDF_SLIM);
- fprintf (dump_file, ", + ,");
- print_generic_expr (dump_file, step, TDF_SLIM);
- fprintf (dump_file, ")");
+ tree tmp = create_tmp_var (TREE_TYPE (init_val), "offset");
+ add_referenced_tmp_var (tmp);
+ vec_stmt = build2 (PLUS_EXPR, TREE_TYPE (init_val), init_val, offset);
+ vec_stmt = build2 (MODIFY_EXPR, TREE_TYPE (init_val), tmp, vec_stmt);
+ init_val = make_ssa_name (tmp, vec_stmt);
+ TREE_OPERAND (vec_stmt, 0) = init_val;
+ append_to_statement_list_force (vec_stmt, new_stmt_list);
}
- create_iv (init, step, NULL_TREE, loop, bsi, false,
- &indx_before_incr, &indx_after_incr);
+ array_ref = build4 (ARRAY_REF, scalar_type, array_base, init_val,
+ NULL_TREE, NULL_TREE);
+ addr_base = build_fold_addr_expr (array_ref);
- return indx_before_incr;
+ /* addr_expr = addr_base */
+ addr_expr = vect_get_new_vect_var (scalar_ptr_type, vect_pointer_var,
+ get_name (base_name));
+ add_referenced_tmp_var (addr_expr);
+ vec_stmt = build2 (MODIFY_EXPR, void_type_node, addr_expr, addr_base);
+ new_temp = make_ssa_name (addr_expr, vec_stmt);
+ TREE_OPERAND (vec_stmt, 0) = new_temp;
+ append_to_statement_list_force (vec_stmt, new_stmt_list);
+
+ return new_temp;
}
enum machine_mode inner_mode = TYPE_MODE (scalar_type);
int nbytes = GET_MODE_SIZE (inner_mode);
int nunits;
+ tree vectype;
if (nbytes == 0)
return NULL_TREE;
is expected. */
nunits = UNITS_PER_SIMD_WORD / nbytes;
- return build_vector_type (scalar_type, nunits);
+ vectype = build_vector_type (scalar_type, nunits);
+ if (vect_debug_details (NULL))
+ {
+ fprintf (dump_file, "get vectype with %d units of type ", nunits);
+ print_generic_expr (dump_file, scalar_type, TDF_SLIM);
+ }
+
+ if (!vectype)
+ return NULL_TREE;
+
+ if (vect_debug_details (NULL))
+ {
+ fprintf (dump_file, "vectype: ");
+ print_generic_expr (dump_file, vectype, TDF_SLIM);
+ }
+
+ if (!VECTOR_MODE_P (TYPE_MODE (vectype)))
+ {
+ /* TODO: tree-complex.c sometimes can parallelize operations
+ on generic vectors. We can vectorize the loop in that case,
+ but then we should re-run the lowering pass. */
+ if (vect_debug_details (NULL))
+ fprintf (dump_file, "mode not supported by target.");
+ return NULL_TREE;
+ }
+
+ return vectype;
}
/* FORNOW: can't handle misaligned accesses;
all accesses expected to be aligned. */
- if (!aligned_access_p (dr))
- abort ();
+ gcc_assert (aligned_access_p (dr));
}
-/* Function vect_create_data_ref.
+/* Function vect_create_data_ref_ptr.
Create a memory reference expression for vector access, to be used in a
- vector load/store stmt.
+ vector load/store stmt. The reference is based on a new pointer to vector
+ type (vp).
Input:
- STMT: a stmt that references memory. expected to be of the form
- MODIFY_EXPR <name, data-ref> or MODIFY_EXPR <data-ref, name>.
- BSI: block_stmt_iterator where new stmts can be added.
+ 1. STMT: a stmt that references memory. Expected to be of the form
+ MODIFY_EXPR <name, data-ref> or MODIFY_EXPR <data-ref, name>.
+ 2. BSI: block_stmt_iterator where new stmts can be added.
+ 3. OFFSET (optional): an offset to be added to the initial address accessed
+ by the data-ref in STMT.
+ 4. ONLY_INIT: indicate if vp is to be updated in the loop, or remain
+ pointing to the initial address.
Output:
- 1. Declare a new ptr to vector_type, and have it point to the array base.
- For example, for vector of type V8HI:
- v8hi *p0;
- p0 = (v8hi *)&a;
- 2. Create a data-reference based on the new vector pointer p0, and using
- a new index variable 'idx'. Return the expression '(*p0)[idx]'.
+ 1. Declare a new ptr to vector_type, and have it point to the base of the
+ data reference (initial addressed accessed by the data reference).
+ For example, for vector of type V8HI, the following code is generated:
+
+ v8hi *vp;
+ vp = (v8hi *)initial_address;
+
+ if OFFSET is not supplied:
+ initial_address = &a[init];
+ if OFFSET is supplied:
+ initial_address = &a[init + OFFSET];
+
+ Return the initial_address in INITIAL_ADDRESS.
+
+ 2. Create a data-reference in the loop based on the new vector pointer vp,
+ and using a new index variable 'idx' as follows:
+
+ vp' = vp + update
+
+ where if ONLY_INIT is true:
+ update = zero
+ and otherwise
+ update = idx + vector_type_size
+
+ Return the pointer vp'.
+
FORNOW: handle only aligned and consecutive accesses. */
static tree
-vect_create_data_ref (tree stmt, block_stmt_iterator *bsi)
+vect_create_data_ref_ptr (tree stmt, block_stmt_iterator *bsi, tree offset,
+ tree *initial_address, bool only_init)
{
- tree new_base;
- tree data_ref;
- tree idx;
- tree vec_stmt;
- tree new_temp;
+ tree base_name;
stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
+ struct data_reference *dr = STMT_VINFO_DATA_REF (stmt_info);
+ struct loop *loop = STMT_VINFO_LOOP (stmt_info);
tree vectype = STMT_VINFO_VECTYPE (stmt_info);
tree vect_ptr_type;
tree vect_ptr;
- tree addr_ref;
+ tree tag;
v_may_def_optype v_may_defs = STMT_V_MAY_DEF_OPS (stmt);
v_must_def_optype v_must_defs = STMT_V_MUST_DEF_OPS (stmt);
vuse_optype vuses = STMT_VUSE_OPS (stmt);
int nvuses, nv_may_defs, nv_must_defs;
int i;
- struct data_reference *dr = STMT_VINFO_DATA_REF (stmt_info);
- tree array_type;
- tree base_addr = NULL_TREE;
- struct loop *loop = STMT_VINFO_LOOP (stmt_info);
- edge pe;
- tree tag;
- tree addr_expr;
- tree scalar_ptr_type;
-
- /* FORNOW: make sure the data reference is aligned. */
- vect_align_data_ref (stmt);
-
- addr_ref = DR_BASE_NAME (dr);
-
- array_type = build_array_type (vectype, 0);
- TYPE_ALIGN (array_type) = TYPE_ALIGN (TREE_TYPE (addr_ref));
- vect_ptr_type = build_pointer_type (array_type);
- scalar_ptr_type = build_pointer_type (TREE_TYPE (addr_ref));
-
+ tree new_temp;
+ tree vec_stmt;
+ tree new_stmt_list = NULL_TREE;
+ tree idx;
+ edge pe = loop_preheader_edge (loop);
+ basic_block new_bb;
+ tree vect_ptr_init;
+ tree vectype_size;
+ tree ptr_update;
+ tree data_ref_ptr;
+
+ base_name = unshare_expr (DR_BASE_NAME (dr));
if (vect_debug_details (NULL))
{
+ tree data_ref_base = base_name;
fprintf (dump_file, "create array_ref of type: ");
- print_generic_expr (dump_file, vectype, TDF_SLIM);
- }
-
- /*** create: vectype_array *p; ***/
- vect_ptr = vect_get_new_vect_var (vect_ptr_type, vect_pointer_var,
- get_name (addr_ref));
- add_referenced_tmp_var (vect_ptr);
-
-#ifdef ENABLE_CHECKING
- if (TREE_CODE (addr_ref) != VAR_DECL
- && TREE_CODE (addr_ref) != COMPONENT_REF
- && TREE_CODE (addr_ref) != SSA_NAME)
- abort ();
-#endif
-
- if (vect_debug_details (NULL))
- {
- if (TREE_CODE (addr_ref) == VAR_DECL)
- fprintf (dump_file, "vectorizing an array ref: ");
- else if (TREE_CODE (addr_ref) == SSA_NAME)
- fprintf (dump_file, "vectorizing a pointer ref: ");
- else if (TREE_CODE (addr_ref) == COMPONENT_REF)
- fprintf (dump_file, "vectorizing a record ref: ");
- print_generic_expr (dump_file, addr_ref, TDF_SLIM);
+ print_generic_expr (dump_file, vectype, TDF_SLIM);
+ if (TREE_CODE (data_ref_base) == VAR_DECL)
+ fprintf (dump_file, "vectorizing a one dimensional array ref: ");
+ else if (TREE_CODE (data_ref_base) == ARRAY_REF)
+ fprintf (dump_file, "vectorizing a multidimensional array ref: ");
+ else if (TREE_CODE (data_ref_base) == COMPONENT_REF)
+ fprintf (dump_file, "vectorizing a record based array ref: ");
+ else if (TREE_CODE (data_ref_base) == SSA_NAME)
+ fprintf (dump_file, "vectorizing a pointer ref: ");
+ print_generic_expr (dump_file, base_name, TDF_SLIM);
}
- /* Get base address: */
- if (TREE_CODE (addr_ref) == SSA_NAME)
- base_addr = addr_ref;
- else
- base_addr = build_fold_addr_expr (addr_ref);
+ /** (1) Create the new vector-pointer variable: **/
- /* Handle aliasing: */
+ vect_ptr_type = build_pointer_type (vectype);
+ vect_ptr = vect_get_new_vect_var (vect_ptr_type, vect_pointer_var,
+ get_name (base_name));
+ add_referenced_tmp_var (vect_ptr);
+
+
+ /** (2) Handle aliasing information of the new vector-pointer: **/
+
tag = STMT_VINFO_MEMTAG (stmt_info);
-#ifdef ENABLE_CHECKING
- if (!tag)
- abort ();
-#endif
+ gcc_assert (tag);
get_var_ann (vect_ptr)->type_mem_tag = tag;
/* Mark for renaming all aliased variables
- (i.e, the may-aliases of the type-mem-tag) */
+ (i.e, the may-aliases of the type-mem-tag). */
nvuses = NUM_VUSES (vuses);
nv_may_defs = NUM_V_MAY_DEFS (v_may_defs);
nv_must_defs = NUM_V_MUST_DEFS (v_must_defs);
}
for (i = 0; i < nv_must_defs; i++)
{
- tree def = V_MUST_DEF_OP (v_must_defs, i);
+ tree def = V_MUST_DEF_RESULT (v_must_defs, i);
if (TREE_CODE (def) == SSA_NAME)
bitmap_set_bit (vars_to_rename, var_ann (SSA_NAME_VAR (def))->uid);
}
- pe = loop_preheader_edge (loop);
- /*** create: p = (vectype *)&a; ***/
+ /** (3) Calculate the initial address the vector-pointer, and set
+ the vector-pointer to point to it before the loop: **/
- /* addr_expr = &a */
- addr_expr = vect_get_new_vect_var (scalar_ptr_type, vect_pointer_var,
- get_name (addr_ref));
- add_referenced_tmp_var (addr_expr);
- vec_stmt = build2 (MODIFY_EXPR, void_type_node, addr_expr, base_addr);
- new_temp = make_ssa_name (addr_expr, vec_stmt);
- TREE_OPERAND (vec_stmt, 0) = new_temp;
- bsi_insert_on_edge (pe, vec_stmt);
+ /* Create: (&(base[init_val+offset]) in the loop preheader. */
+ new_temp = vect_create_addr_base_for_vector_ref (stmt, &new_stmt_list,
+ offset);
+ pe = loop_preheader_edge (loop);
+ new_bb = bsi_insert_on_edge_immediate (pe, new_stmt_list);
+ gcc_assert (!new_bb);
+ *initial_address = new_temp;
- /* vect_ptr = (vectype_array *)&a; */
- vec_stmt = fold_convert (vect_ptr_type, new_temp);
+ /* Create: p = (vectype *) initial_base */
+ vec_stmt = fold_convert (vect_ptr_type, new_temp);
vec_stmt = build2 (MODIFY_EXPR, void_type_node, vect_ptr, vec_stmt);
new_temp = make_ssa_name (vect_ptr, vec_stmt);
TREE_OPERAND (vec_stmt, 0) = new_temp;
- bsi_insert_on_edge (pe, vec_stmt);
+ new_bb = bsi_insert_on_edge_immediate (pe, vec_stmt);
+ gcc_assert (!new_bb);
+ vect_ptr_init = TREE_OPERAND (vec_stmt, 0);
- /*** create data ref: '(*p)[idx]' ***/
- idx = vect_create_index_for_array_ref (stmt, bsi);
+ /** (4) Handle the updating of the vector-pointer inside the loop: **/
- new_base = build_fold_indirect_ref (new_temp);
- data_ref = build4 (ARRAY_REF, vectype, new_base, idx, NULL_TREE, NULL_TREE);
+ if (only_init) /* No update in loop is required. */
+ return vect_ptr_init;
- if (vect_debug_details (NULL))
- {
- fprintf (dump_file, "created new data-ref: ");
- print_generic_expr (dump_file, data_ref, TDF_SLIM);
- }
+ idx = vect_create_index_for_vector_ref (loop, bsi);
+
+ /* Create: update = idx * vectype_size */
+ ptr_update = create_tmp_var (integer_type_node, "update");
+ add_referenced_tmp_var (ptr_update);
+ vectype_size = build_int_cst (integer_type_node,
+ GET_MODE_SIZE (TYPE_MODE (vectype)));
+ vec_stmt = build2 (MULT_EXPR, integer_type_node, idx, vectype_size);
+ vec_stmt = build2 (MODIFY_EXPR, void_type_node, ptr_update, vec_stmt);
+ new_temp = make_ssa_name (ptr_update, vec_stmt);
+ TREE_OPERAND (vec_stmt, 0) = new_temp;
+ bsi_insert_before (bsi, vec_stmt, BSI_SAME_STMT);
+
+ /* Create: data_ref_ptr = vect_ptr_init + update */
+ vec_stmt = build2 (PLUS_EXPR, vect_ptr_type, vect_ptr_init, new_temp);
+ vec_stmt = build2 (MODIFY_EXPR, void_type_node, vect_ptr, vec_stmt);
+ new_temp = make_ssa_name (vect_ptr, vec_stmt);
+ TREE_OPERAND (vec_stmt, 0) = new_temp;
+ bsi_insert_before (bsi, vec_stmt, BSI_SAME_STMT);
+ data_ref_ptr = TREE_OPERAND (vec_stmt, 0);
- return data_ref;
+ return data_ref_ptr;
}
tree vec_dest;
const char *new_name;
-#ifdef ENABLE_CHECKING
- if (TREE_CODE (scalar_dest) != SSA_NAME)
- abort ();
-#endif
+ gcc_assert (TREE_CODE (scalar_dest) == SSA_NAME);
new_name = get_name (scalar_dest);
if (!new_name)
tree vec_oprnd;
edge pe;
tree new_temp;
+ basic_block new_bb;
new_var = vect_get_new_vect_var (vectype, vect_simple_var, "cst_");
add_referenced_tmp_var (new_var);
TREE_OPERAND (init_stmt, 0) = new_temp;
pe = loop_preheader_edge (loop);
- bsi_insert_on_edge (pe, init_stmt);
+ new_bb = bsi_insert_on_edge_immediate (pe, init_stmt);
+ gcc_assert (!new_bb);
if (vect_debug_details (NULL))
{
/* Create 'vect_cst_ = {cst,cst,...,cst}' */
tree vec_cst;
- stmt_vec_info stmt_vinfo = vinfo_for_stmt (stmt);
- tree vectype = STMT_VINFO_VECTYPE (stmt_vinfo);
- int nunits = GET_MODE_NUNITS (TYPE_MODE (vectype));
- tree t = NULL_TREE;
- int i;
/* Build a tree with vector elements. */
if (vect_debug_details (NULL))
return vect_init_vector (stmt, vec_cst);
}
-#ifdef ENABLE_CHECKING
- if (TREE_CODE (op) != SSA_NAME)
- abort ();
-#endif
+ gcc_assert (TREE_CODE (op) == SSA_NAME);
/** ===> Case 2: operand is an SSA_NAME - find the stmt that defines it. **/
/* Get the def from the vectorized stmt. */
vec_stmt = STMT_VINFO_VEC_STMT (def_stmt_info);
-#ifdef ENABLE_CHECKING
- if (!vec_stmt)
- abort ();
-#endif
+ gcc_assert (vec_stmt);
vec_oprnd = TREE_OPERAND (vec_stmt, 0);
return vec_oprnd;
}
{
if (vect_debug_details (NULL))
fprintf (dump_file, "reduction/induction - unsupported.");
- abort (); /* FORNOW no support for reduction/induction. */
+ internal_error ("no support for reduction/induction"); /* FORNOW */
}
break;
case NOP_EXPR:
def = TREE_OPERAND (def_stmt, 0);
-#ifdef ENABLE_CHECKING
- if (!IS_EMPTY_STMT (def_stmt))
- abort ();
-#endif
+ gcc_assert (IS_EMPTY_STMT (def_stmt));
def = op;
break;
default:
fprintf (dump_file, "unsupported defining stmt: ");
print_generic_expr (dump_file, def_stmt, TDF_SLIM);
}
- abort ();
+ internal_error ("unsupported defining stmt");
}
/* Build a tree with vector elements. Create 'vec_inv = {inv,inv,..,inv}' */
/* Make sure bsi points to the stmt that is being vectorized. */
- /* Assumption: any stmts created for the vectorization of smtmt S are
- inserted before S. BSI may point to S or some new stmt before it. */
+ /* Assumption: any stmts created for the vectorization of stmt S were
+ inserted before S. BSI is expected to point to S or some new stmt before S.
+ */
while (stmt != bsi_stmt (*bsi) && !bsi_end_p (*bsi))
bsi_next (bsi);
-#ifdef ENABLE_CHECKING
- if (stmt != bsi_stmt (*bsi))
- abort ();
-#endif
+ gcc_assert (stmt == bsi_stmt (*bsi));
}
return true;
}
- /** Trasform. **/
+ /** Transform. **/
if (vect_debug_details (NULL))
fprintf (dump_file, "transform binary/unary operation.");
tree op;
tree vec_oprnd1;
stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
+ struct data_reference *dr = STMT_VINFO_DATA_REF (stmt_info);
tree vectype = STMT_VINFO_VECTYPE (stmt_info);
struct loop *loop = STMT_VINFO_LOOP (stmt_info);
enum machine_mode vec_mode;
+ tree dummy;
+ enum dr_alignment_support alignment_support_cheme;
/* Is vectorizable store? */
if (!STMT_VINFO_DATA_REF (stmt_info))
return false;
+
if (!vec_stmt) /* transformation not required. */
{
STMT_VINFO_TYPE (stmt_info) = store_vec_info_type;
if (vect_debug_details (NULL))
fprintf (dump_file, "transform store");
+ alignment_support_cheme = vect_supportable_dr_alignment (dr);
+ gcc_assert (alignment_support_cheme);
+ gcc_assert (alignment_support_cheme = dr_aligned); /* FORNOW */
+
/* Handle use - get the vectorized def from the defining stmt. */
vec_oprnd1 = vect_get_vec_def_for_operand (op, stmt);
/* Handle def. */
- data_ref = vect_create_data_ref (stmt, bsi);
+ /* FORNOW: make sure the data reference is aligned. */
+ vect_align_data_ref (stmt);
+ data_ref = vect_create_data_ref_ptr (stmt, bsi, NULL_TREE, &dummy, false);
+ data_ref = build_fold_indirect_ref (data_ref);
/* Arguments are ready. create the new vector stmt. */
*vec_stmt = build2 (MODIFY_EXPR, vectype, data_ref, vec_oprnd1);
tree data_ref = NULL;
tree op;
stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
+ struct data_reference *dr = STMT_VINFO_DATA_REF (stmt_info);
tree vectype = STMT_VINFO_VECTYPE (stmt_info);
tree new_temp;
- enum machine_mode vec_mode;
+ int mode;
+ tree init_addr;
+ tree new_stmt;
+ tree dummy;
+ basic_block new_bb;
+ struct loop *loop = STMT_VINFO_LOOP (stmt_info);
+ edge pe = loop_preheader_edge (loop);
+ enum dr_alignment_support alignment_support_cheme;
/* Is vectorizable load? */
if (TREE_CODE (op) != ARRAY_REF && TREE_CODE (op) != INDIRECT_REF)
return false;
- if (!STMT_VINFO_DATA_REF (stmt_info))
- return false;
+ if (!STMT_VINFO_DATA_REF (stmt_info))
+ return false;
+
+ mode = (int) TYPE_MODE (vectype);
+
+ /* FORNOW. In some cases can vectorize even if data-type not supported
+ (e.g. - data copies). */
+ if (mov_optab->handlers[mode].insn_code == CODE_FOR_nothing)
+ {
+ if (vect_debug_details (loop))
+ fprintf (dump_file, "Aligned load, but unsupported type.");
+ return false;
+ }
+
+ if (!vec_stmt) /* transformation not required. */
+ {
+ STMT_VINFO_TYPE (stmt_info) = load_vec_info_type;
+ return true;
+ }
+
+ /** Trasform. **/
+
+ if (vect_debug_details (NULL))
+ fprintf (dump_file, "transform load.");
+
+ alignment_support_cheme = vect_supportable_dr_alignment (dr);
+ gcc_assert (alignment_support_cheme);
+
+ if (alignment_support_cheme == dr_aligned
+ || alignment_support_cheme == dr_unaligned_supported)
+ {
+ /* Create:
+ p = initial_addr;
+ indx = 0;
+ loop {
+ vec_dest = *(p);
+ indx = indx + 1;
+ }
+ */
+
+ vec_dest = vect_create_destination_var (scalar_dest, vectype);
+ data_ref = vect_create_data_ref_ptr (stmt, bsi, NULL_TREE, &dummy, false);
+ if (aligned_access_p (dr))
+ data_ref = build_fold_indirect_ref (data_ref);
+ else
+ {
+ int mis = DR_MISALIGNMENT (dr);
+ tree tmis = (mis == -1 ?
+ integer_zero_node :
+ build_int_cst (integer_type_node, mis));
+ tmis = int_const_binop (MULT_EXPR, tmis,
+ build_int_cst (integer_type_node, BITS_PER_UNIT), 1);
+ data_ref = build2 (MISALIGNED_INDIRECT_REF, vectype, data_ref, tmis);
+ }
+ new_stmt = build2 (MODIFY_EXPR, vectype, vec_dest, data_ref);
+ new_temp = make_ssa_name (vec_dest, new_stmt);
+ TREE_OPERAND (new_stmt, 0) = new_temp;
+ vect_finish_stmt_generation (stmt, new_stmt, bsi);
+ }
+ else if (alignment_support_cheme == dr_unaligned_software_pipeline)
+ {
+ /* Create:
+ p1 = initial_addr;
+ msq_init = *(floor(p1))
+ p2 = initial_addr + VS - 1;
+ magic = have_builtin ? builtin_result : initial_address;
+ indx = 0;
+ loop {
+ p2' = p2 + indx * vectype_size
+ lsq = *(floor(p2'))
+ vec_dest = realign_load (msq, lsq, magic)
+ indx = indx + 1;
+ msq = lsq;
+ }
+ */
+
+ tree offset;
+ tree magic;
+ tree phi_stmt;
+ tree msq_init;
+ tree msq, lsq;
+ tree dataref_ptr;
+ tree params;
+
+ /* <1> Create msq_init = *(floor(p1)) in the loop preheader */
+ vec_dest = vect_create_destination_var (scalar_dest, vectype);
+ data_ref = vect_create_data_ref_ptr (stmt, bsi, NULL_TREE,
+ &init_addr, true);
+ data_ref = build1 (ALIGN_INDIRECT_REF, vectype, data_ref);
+ new_stmt = build2 (MODIFY_EXPR, vectype, vec_dest, data_ref);
+ new_temp = make_ssa_name (vec_dest, new_stmt);
+ TREE_OPERAND (new_stmt, 0) = new_temp;
+ new_bb = bsi_insert_on_edge_immediate (pe, new_stmt);
+ gcc_assert (!new_bb);
+ msq_init = TREE_OPERAND (new_stmt, 0);
+
+
+ /* <2> Create lsq = *(floor(p2')) in the loop */
+ offset = build_int_cst (integer_type_node,
+ GET_MODE_NUNITS (TYPE_MODE (vectype)));
+ offset = int_const_binop (MINUS_EXPR, offset, integer_one_node, 1);
+ vec_dest = vect_create_destination_var (scalar_dest, vectype);
+ dataref_ptr = vect_create_data_ref_ptr (stmt, bsi, offset, &dummy, false);
+ data_ref = build1 (ALIGN_INDIRECT_REF, vectype, dataref_ptr);
+ new_stmt = build2 (MODIFY_EXPR, vectype, vec_dest, data_ref);
+ new_temp = make_ssa_name (vec_dest, new_stmt);
+ TREE_OPERAND (new_stmt, 0) = new_temp;
+ vect_finish_stmt_generation (stmt, new_stmt, bsi);
+ lsq = TREE_OPERAND (new_stmt, 0);
+
+
+ /* <3> */
+ if (targetm.vectorize.builtin_mask_for_load)
+ {
+ /* Create permutation mask, if required, in loop preheader. */
+ tree builtin_decl;
+ params = build_tree_list (NULL_TREE, init_addr);
+ vec_dest = vect_create_destination_var (scalar_dest, vectype);
+ builtin_decl = targetm.vectorize.builtin_mask_for_load ();
+ new_stmt = build_function_call_expr (builtin_decl, params);
+ new_stmt = build2 (MODIFY_EXPR, vectype, vec_dest, new_stmt);
+ new_temp = make_ssa_name (vec_dest, new_stmt);
+ TREE_OPERAND (new_stmt, 0) = new_temp;
+ new_bb = bsi_insert_on_edge_immediate (pe, new_stmt);
+ gcc_assert (!new_bb);
+ magic = TREE_OPERAND (new_stmt, 0);
+ }
+ else
+ {
+ /* Use current address instead of init_addr for reduced reg pressure.
+ */
+ magic = dataref_ptr;
+ }
+
+
+ /* <4> Create msq = phi <msq_init, lsq> in loop */
+ vec_dest = vect_create_destination_var (scalar_dest, vectype);
+ msq = make_ssa_name (vec_dest, NULL_TREE);
+ phi_stmt = create_phi_node (msq, loop->header); /* CHECKME */
+ SSA_NAME_DEF_STMT (msq) = phi_stmt;
+ add_phi_arg (&phi_stmt, msq_init, loop_preheader_edge (loop));
+ add_phi_arg (&phi_stmt, lsq, loop_latch_edge (loop));
+
+
+ /* <5> Create <vec_dest = realign_load (msq, lsq, magic)> in loop */
+ vec_dest = vect_create_destination_var (scalar_dest, vectype);
+ new_stmt = build3 (REALIGN_LOAD_EXPR, vectype, msq, lsq, magic);
+ new_stmt = build2 (MODIFY_EXPR, vectype, vec_dest, new_stmt);
+ new_temp = make_ssa_name (vec_dest, new_stmt);
+ TREE_OPERAND (new_stmt, 0) = new_temp;
+ vect_finish_stmt_generation (stmt, new_stmt, bsi);
+ }
+ else
+ gcc_unreachable ();
+
+ *vec_stmt = new_stmt;
+ return true;
+}
+
+
+/* Function vect_supportable_dr_alignment
+
+ Return whether the data reference DR is supported with respect to its
+ alignment. */
+
+static enum dr_alignment_support
+vect_supportable_dr_alignment (struct data_reference *dr)
+{
+ tree vectype = STMT_VINFO_VECTYPE (vinfo_for_stmt (DR_STMT (dr)));
+ enum machine_mode mode = (int) TYPE_MODE (vectype);
+
+ if (aligned_access_p (dr))
+ return dr_aligned;
+
+ /* Possibly unaligned access. */
+
+ if (DR_IS_READ (dr))
+ {
+ if (vec_realign_load_optab->handlers[mode].insn_code != CODE_FOR_nothing
+ && (!targetm.vectorize.builtin_mask_for_load
+ || targetm.vectorize.builtin_mask_for_load ()))
+ return dr_unaligned_software_pipeline;
+
+ if (targetm.vectorize.misaligned_mem_ok (mode))
+ /* Can't software pipeline the loads. */
+ return dr_unaligned_supported;
+ }
+
+ /* Unsupported. */
+ return dr_unaligned_unsupported;
+}
+
+
+/* Function vect_transform_stmt.
+
+ Create a vectorized stmt to replace STMT, and insert it at BSI. */
+
+static bool
+vect_transform_stmt (tree stmt, block_stmt_iterator *bsi)
+{
+ bool is_store = false;
+ tree vec_stmt = NULL_TREE;
+ stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
+ bool done;
+
+ switch (STMT_VINFO_TYPE (stmt_info))
+ {
+ case op_vec_info_type:
+ done = vectorizable_operation (stmt, bsi, &vec_stmt);
+ gcc_assert (done);
+ break;
+
+ case assignment_vec_info_type:
+ done = vectorizable_assignment (stmt, bsi, &vec_stmt);
+ gcc_assert (done);
+ break;
+
+ case load_vec_info_type:
+ done = vectorizable_load (stmt, bsi, &vec_stmt);
+ gcc_assert (done);
+ break;
+
+ case store_vec_info_type:
+ done = vectorizable_store (stmt, bsi, &vec_stmt);
+ gcc_assert (done);
+ is_store = true;
+ break;
+ default:
+ if (vect_debug_details (NULL))
+ fprintf (dump_file, "stmt not supported.");
+ gcc_unreachable ();
+ }
+
+ STMT_VINFO_VEC_STMT (stmt_info) = vec_stmt;
+
+ return is_store;
+}
+
+
+/* This function builds ni_name = number of iterations loop executes
+ on the loop preheader. */
+
+static tree
+vect_build_loop_niters (loop_vec_info loop_vinfo)
+{
+ tree ni_name, stmt, var;
+ edge pe;
+ basic_block new_bb = NULL;
+ struct loop *loop = LOOP_VINFO_LOOP (loop_vinfo);
+ tree ni = unshare_expr (LOOP_VINFO_NITERS(loop_vinfo));
+
+ var = create_tmp_var (TREE_TYPE (ni), "niters");
+ add_referenced_tmp_var (var);
+ if (TREE_CODE (ni) == INTEGER_CST)
+ {
+ /* This case is generated when treating a known loop bound
+ indivisible by VF. Here we cannot use force_gimple_operand. */
+ stmt = build (MODIFY_EXPR, void_type_node, var, ni);
+ ni_name = make_ssa_name (var, stmt);
+ TREE_OPERAND (stmt, 0) = ni_name;
+ }
+ else
+ ni_name = force_gimple_operand (ni, &stmt, false, var);
+
+ pe = loop_preheader_edge (loop);
+ if (stmt)
+ new_bb = bsi_insert_on_edge_immediate (pe, stmt);
+ if (new_bb)
+ add_bb_to_loop (new_bb, EDGE_PRED (new_bb, 0)->src->loop_father);
+
+ return ni_name;
+}
+
+
+/* This function generates the following statements:
+
+ ni_name = number of iterations loop executes
+ ratio = ni_name / vf
+ ratio_mult_vf_name = ratio * vf
+
+ and places them at the loop preheader edge. */
+
+static void
+vect_generate_tmps_on_preheader (loop_vec_info loop_vinfo, tree *ni_name_p,
+ tree *ratio_mult_vf_name_p, tree *ratio_p)
+{
+
+ edge pe;
+ basic_block new_bb;
+ tree stmt, ni_name;
+ tree ratio;
+ tree ratio_mult_vf_name, ratio_mult_vf;
+ struct loop *loop = LOOP_VINFO_LOOP (loop_vinfo);
+ tree ni = LOOP_VINFO_NITERS(loop_vinfo);
+
+ int vf, i;
+
+ /* Generate temporary variable that contains
+ number of iterations loop executes. */
+
+ ni_name = vect_build_loop_niters (loop_vinfo);
+
+ /* ratio = ni / vf.
+ vf is power of 2; then if ratio = = n >> log2 (vf). */
+ vf = LOOP_VINFO_VECT_FACTOR (loop_vinfo);
+ ratio = vect_build_symbol_bound (ni_name, vf, loop);
+
+ /* Update initial conditions of loop copy. */
+
+ /* ratio_mult_vf = ratio * vf;
+ then if ratio_mult_vf = ratio << log2 (vf). */
+
+ i = exact_log2 (vf);
+ ratio_mult_vf = create_tmp_var (TREE_TYPE (ni), "ratio_mult_vf");
+ add_referenced_tmp_var (ratio_mult_vf);
+
+ ratio_mult_vf_name = make_ssa_name (ratio_mult_vf, NULL_TREE);
+
+ stmt = build2 (MODIFY_EXPR, void_type_node, ratio_mult_vf_name,
+ build2 (LSHIFT_EXPR, TREE_TYPE (ratio),
+ ratio, build_int_cst (unsigned_type_node,
+ i)));
+
+ SSA_NAME_DEF_STMT (ratio_mult_vf_name) = stmt;
+
+ pe = loop_preheader_edge (loop);
+ new_bb = bsi_insert_on_edge_immediate (pe, stmt);
+ if (new_bb)
+ add_bb_to_loop (new_bb, EDGE_PRED (new_bb, 0)->src->loop_father);
+
+ *ni_name_p = ni_name;
+ *ratio_mult_vf_name_p = ratio_mult_vf_name;
+ *ratio_p = ratio;
+
+ return;
+}
+
+
+/* This function generates stmt
+
+ tmp = n / vf;
+
+ and attaches it to preheader of LOOP. */
+
+static tree
+vect_build_symbol_bound (tree n, int vf, struct loop * loop)
+{
+ tree var, stmt, var_name;
+ edge pe;
+ basic_block new_bb;
+ int i;
+
+ /* create temporary variable */
+ var = create_tmp_var (TREE_TYPE (n), "bnd");
+ add_referenced_tmp_var (var);
+
+ var_name = make_ssa_name (var, NULL_TREE);
+
+ /* vf is power of 2; then n/vf = n >> log2 (vf). */
+
+ i = exact_log2 (vf);
+ stmt = build2 (MODIFY_EXPR, void_type_node, var_name,
+ build2 (RSHIFT_EXPR, TREE_TYPE (n),
+ n, build_int_cst (unsigned_type_node,i)));
+
+ SSA_NAME_DEF_STMT (var_name) = stmt;
+
+ pe = loop_preheader_edge (loop);
+ new_bb = bsi_insert_on_edge_immediate (pe, stmt);
+ if (new_bb)
+ add_bb_to_loop (new_bb, EDGE_PRED (new_bb, 0)->src->loop_father);
+ else
+ if (vect_debug_details (NULL))
+ fprintf (dump_file, "New bb on preheader edge was not generated.");
+
+ return var_name;
+}
+
+
+/* Function vect_transform_loop_bound.
+
+ Create a new exit condition for the loop. */
+
+static void
+vect_transform_loop_bound (loop_vec_info loop_vinfo, tree niters)
+{
+ struct loop *loop = LOOP_VINFO_LOOP (loop_vinfo);
+ tree orig_cond_expr;
+ HOST_WIDE_INT old_N = 0;
+ int vf;
+ tree new_loop_bound;
+ bool symbol_niters;
+ tree lb_type;
+
+ symbol_niters = !LOOP_VINFO_NITERS_KNOWN_P (loop_vinfo);
+
+ if (!symbol_niters)
+ old_N = LOOP_VINFO_INT_NITERS (loop_vinfo);
+
+ vf = LOOP_VINFO_VECT_FACTOR (loop_vinfo);
+
+ orig_cond_expr = LOOP_VINFO_EXIT_COND (loop_vinfo);
+#ifdef ENABLE_CHECKING
+ gcc_assert (orig_cond_expr);
+#endif
+
+ /* new loop exit test: */
+ lb_type = TREE_TYPE (TREE_OPERAND (COND_EXPR_COND (orig_cond_expr), 1));
+ if (!symbol_niters)
+ new_loop_bound =
+ fold_convert (lb_type, build_int_cst (unsigned_type_node, old_N/vf));
+ else
+ new_loop_bound = niters;
+
+ slpeel_make_loop_iterate_ntimes (loop, new_loop_bound);
+}
+
+
+/* Function vect_update_ivs_after_vectorizer.
+
+ "Advance" the induction variables of LOOP to the value they should take
+ after the execution of LOOP. This is currently necessary because the
+ vectorizer does not handle induction variables that are used after the
+ loop. Such a situation occurs when the last iterations of LOOP are
+ peeled, because:
+ 1. We introduced new uses after LOOP for IVs that were not originally used
+ after LOOP: the IVs of LOOP are now used by an epilog loop.
+ 2. LOOP is going to be vectorized; this means that it will iterate N/VF
+ times, whereas the loop IVs should be bumped N times.
+
+ Input:
+ - LOOP - a loop that is going to be vectorized. The last few iterations
+ of LOOP were peeled.
+ - NITERS - the number of iterations that LOOP executes (before it is
+ vectorized). i.e, the number of times the ivs should be bumped.
+ - UPDATE_E - a successor edge of LOOP->exit that is on the (only) path
+ coming out from LOOP on which there are uses of the LOOP ivs
+ (this is the path from LOOP->exit to epilog_loop->preheader).
+
+ The new definitions of the ivs are placed in LOOP->exit.
+ The phi args associated with the edge UPDATE_E in the bb
+ UPDATE_E->dest are updated accordingly.
+
+ Assumption 1: Like the rest of the vectorizer, this function assumes
+ a single loop exit that has a single predecessor.
+
+ Assumption 2: The phi nodes in the LOOP header and in update_bb are
+ organized in the same order.
+
+ Assumption 3: The access function of the ivs is simple enough (see
+ vect_can_advance_ivs_p). This assumption will be relaxed in the future.
+
+ Assumption 4: Exactly one of the successors of LOOP exit-bb is on a path
+ coming out of LOOP on which the ivs of LOOP are used (this is the path
+ that leads to the epilog loop; other paths skip the epilog loop). This
+ path starts with the edge UPDATE_E, and its destination (denoted update_bb)
+ needs to have its phis updated.
+ */
+
+static void
+vect_update_ivs_after_vectorizer (struct loop *loop, tree niters, edge update_e)
+{
+ basic_block exit_bb = loop->exit_edges[0]->dest;
+ tree phi, phi1;
+ basic_block update_bb = update_e->dest;
+
+ /* gcc_assert (vect_can_advance_ivs_p (loop)); */
+
+ /* Make sure there exists a single-predecessor exit bb: */
+ gcc_assert (EDGE_COUNT (exit_bb->preds) == 1);
+
+ for (phi = phi_nodes (loop->header), phi1 = phi_nodes (update_bb);
+ phi && phi1;
+ phi = PHI_CHAIN (phi), phi1 = PHI_CHAIN (phi1))
+ {
+ tree access_fn = NULL;
+ tree evolution_part;
+ tree init_expr;
+ tree step_expr;
+ tree var, stmt, ni, ni_name;
+ block_stmt_iterator last_bsi;
+
+ /* Skip virtual phi's. */
+ if (!is_gimple_reg (SSA_NAME_VAR (PHI_RESULT (phi))))
+ {
+ if (vect_debug_details (NULL))
+ fprintf (dump_file, "virtual phi. skip.");
+ continue;
+ }
+
+ access_fn = analyze_scalar_evolution (loop, PHI_RESULT (phi));
+ gcc_assert (access_fn);
+ evolution_part =
+ unshare_expr (evolution_part_in_loop_num (access_fn, loop->num));
+ gcc_assert (evolution_part != NULL_TREE);
+
+ /* FORNOW: We do not support IVs whose evolution function is a polynomial
+ of degree >= 2 or exponential. */
+ gcc_assert (!tree_is_chrec (evolution_part));
+
+ step_expr = evolution_part;
+ init_expr = unshare_expr (initial_condition (access_fn));
+
+ ni = build2 (PLUS_EXPR, TREE_TYPE (init_expr),
+ build2 (MULT_EXPR, TREE_TYPE (niters),
+ niters, step_expr), init_expr);
+
+ var = create_tmp_var (TREE_TYPE (init_expr), "tmp");
+ add_referenced_tmp_var (var);
+
+ ni_name = force_gimple_operand (ni, &stmt, false, var);
+
+ /* Insert stmt into exit_bb. */
+ last_bsi = bsi_last (exit_bb);
+ if (stmt)
+ bsi_insert_before (&last_bsi, stmt, BSI_SAME_STMT);
+
+ /* Fix phi expressions in the successor bb. */
+ gcc_assert (PHI_ARG_DEF_FROM_EDGE (phi1, update_e) ==
+ PHI_ARG_DEF_FROM_EDGE (phi, EDGE_SUCC (loop->latch, 0)));
+ SET_PHI_ARG_DEF (phi1, phi_arg_from_edge (phi1, update_e), ni_name);
+ }
+}
+
+
+/* Function vect_do_peeling_for_loop_bound
+
+ Peel the last iterations of the loop represented by LOOP_VINFO.
+ The peeled iterations form a new epilog loop. Given that the loop now
+ iterates NITERS times, the new epilog loop iterates
+ NITERS % VECTORIZATION_FACTOR times.
+
+ The original loop will later be made to iterate
+ NITERS / VECTORIZATION_FACTOR times (this value is placed into RATIO). */
+
+static void
+vect_do_peeling_for_loop_bound (loop_vec_info loop_vinfo, tree *ratio,
+ struct loops *loops)
+{
+
+ tree ni_name, ratio_mult_vf_name;
+ struct loop *loop = LOOP_VINFO_LOOP (loop_vinfo);
+ struct loop *new_loop;
+ edge update_e;
+#ifdef ENABLE_CHECKING
+ int loop_num;
+#endif
+
+ if (vect_debug_details (NULL))
+ fprintf (dump_file, "\n<<vect_transtorm_for_unknown_loop_bound>>\n");
- vec_mode = TYPE_MODE (vectype);
- /* FORNOW. In some cases can vectorize even if data-type not supported
- (e.g. - data copies). */
- if (mov_optab->handlers[(int)vec_mode].insn_code == CODE_FOR_nothing)
- return false;
+ /* Generate the following variables on the preheader of original loop:
+
+ ni_name = number of iteration the original loop executes
+ ratio = ni_name / vf
+ ratio_mult_vf_name = ratio * vf */
+ vect_generate_tmps_on_preheader (loop_vinfo, &ni_name,
+ &ratio_mult_vf_name, ratio);
- if (!vec_stmt) /* transformation not required. */
- {
- STMT_VINFO_TYPE (stmt_info) = load_vec_info_type;
- return true;
- }
+ /* Update loop info. */
+ loop->pre_header = loop_preheader_edge (loop)->src;
+ loop->pre_header_edges[0] = loop_preheader_edge (loop);
- /** Trasform. **/
+#ifdef ENABLE_CHECKING
+ loop_num = loop->num;
+#endif
+ new_loop = slpeel_tree_peel_loop_to_edge (loop, loops, loop->exit_edges[0],
+ ratio_mult_vf_name, ni_name, false);
+#ifdef ENABLE_CHECKING
+ gcc_assert (new_loop);
+ gcc_assert (loop_num == loop->num);
+ slpeel_verify_cfg_after_peeling (loop, new_loop);
+#endif
- if (vect_debug_details (NULL))
- fprintf (dump_file, "transform load.");
+ /* A guard that controls whether the new_loop is to be executed or skipped
+ is placed in LOOP->exit. LOOP->exit therefore has two successors - one
+ is the preheader of NEW_LOOP, where the IVs from LOOP are used. The other
+ is a bb after NEW_LOOP, where these IVs are not used. Find the edge that
+ is on the path where the LOOP IVs are used and need to be updated. */
- /* Handle def. */
- vec_dest = vect_create_destination_var (scalar_dest, vectype);
+ if (EDGE_PRED (new_loop->pre_header, 0)->src == loop->exit_edges[0]->dest)
+ update_e = EDGE_PRED (new_loop->pre_header, 0);
+ else
+ update_e = EDGE_PRED (new_loop->pre_header, 1);
- /* Handle use. */
- op = TREE_OPERAND (stmt, 1);
- data_ref = vect_create_data_ref (stmt, bsi);
+ /* Update IVs of original loop as if they were advanced
+ by ratio_mult_vf_name steps. */
+ vect_update_ivs_after_vectorizer (loop, ratio_mult_vf_name, update_e);
- /* Arguments are ready. create the new vector stmt. */
- *vec_stmt = build2 (MODIFY_EXPR, vectype, vec_dest, data_ref);
- new_temp = make_ssa_name (vec_dest, *vec_stmt);
- TREE_OPERAND (*vec_stmt, 0) = new_temp;
- vect_finish_stmt_generation (stmt, *vec_stmt, bsi);
+ /* After peeling we have to reset scalar evolution analyzer. */
+ scev_reset ();
- return true;
+ return;
}
-/* Function vect_transform_stmt.
+/* Function vect_gen_niters_for_prolog_loop
- Create a vectorized stmt to replace STMT, and insert it at BSI. */
+ Set the number of iterations for the loop represented by LOOP_VINFO
+ to the minimum between NITERS (the original iteration count of the loop)
+ and the misalignment of DR - the first data reference recorded in
+ LOOP_VINFO_UNALIGNED_DR (LOOP_VINFO). As a result, after the execution of
+ this loop, the data reference DR will refer to an aligned location. */
-static bool
-vect_transform_stmt (tree stmt, block_stmt_iterator *bsi)
+static tree
+vect_gen_niters_for_prolog_loop (loop_vec_info loop_vinfo, tree niters)
{
- bool is_store = false;
- tree vec_stmt = NULL_TREE;
- stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
-
- switch (STMT_VINFO_TYPE (stmt_info))
- {
- case op_vec_info_type:
- if (!vectorizable_operation (stmt, bsi, &vec_stmt))
- abort ();
- break;
+ struct data_reference *dr = LOOP_VINFO_UNALIGNED_DR (loop_vinfo);
+ int vf = LOOP_VINFO_VECT_FACTOR (loop_vinfo);
+ struct loop *loop = LOOP_VINFO_LOOP (loop_vinfo);
+ tree var, stmt;
+ tree iters, iters_name;
+ edge pe;
+ basic_block new_bb;
+ tree dr_stmt = DR_STMT (dr);
+ stmt_vec_info stmt_info = vinfo_for_stmt (dr_stmt);
+ tree start_addr, byte_miss_align, elem_miss_align;
+ int vec_type_align =
+ GET_MODE_ALIGNMENT (TYPE_MODE (STMT_VINFO_VECTYPE (stmt_info)))
+ / BITS_PER_UNIT;
+ tree tmp1, tmp2;
+ tree new_stmt_list = NULL_TREE;
+
+ start_addr = vect_create_addr_base_for_vector_ref (dr_stmt,
+ &new_stmt_list, NULL_TREE);
+
+ pe = loop_preheader_edge (loop);
+ new_bb = bsi_insert_on_edge_immediate (pe, new_stmt_list);
+ if (new_bb)
+ add_bb_to_loop (new_bb, EDGE_PRED (new_bb, 0)->src->loop_father);
+
+ byte_miss_align =
+ build (BIT_AND_EXPR, integer_type_node, start_addr,
+ build (MINUS_EXPR, integer_type_node,
+ build_int_cst (unsigned_type_node,
+ vec_type_align), integer_one_node));
+ tmp1 = build_int_cst (unsigned_type_node, vec_type_align/vf);
+ elem_miss_align = build (FLOOR_DIV_EXPR, integer_type_node,
+ byte_miss_align, tmp1);
+
+ tmp2 =
+ build (BIT_AND_EXPR, integer_type_node,
+ build (MINUS_EXPR, integer_type_node,
+ build_int_cst (unsigned_type_node, vf), elem_miss_align),
+ build (MINUS_EXPR, integer_type_node,
+ build_int_cst (unsigned_type_node, vf), integer_one_node));
+
+ iters = build2 (MIN_EXPR, TREE_TYPE (tmp2), tmp2, niters);
+ var = create_tmp_var (TREE_TYPE (iters), "iters");
+ add_referenced_tmp_var (var);
+ iters_name = force_gimple_operand (iters, &stmt, false, var);
+
+ /* Insert stmt on loop preheader edge. */
+ pe = loop_preheader_edge (loop);
+ if (stmt)
+ new_bb = bsi_insert_on_edge_immediate (pe, stmt);
+ if (new_bb)
+ add_bb_to_loop (new_bb, EDGE_PRED (new_bb, 0)->src->loop_father);
- case assignment_vec_info_type:
- if (!vectorizable_assignment (stmt, bsi, &vec_stmt))
- abort ();
- break;
+ return iters_name;
+}
- case load_vec_info_type:
- if (!vectorizable_load (stmt, bsi, &vec_stmt))
- abort ();
- break;
- case store_vec_info_type:
- if (!vectorizable_store (stmt, bsi, &vec_stmt))
- abort ();
- is_store = true;
- break;
- default:
- if (vect_debug_details (NULL))
- fprintf (dump_file, "stmt not supported.");
- abort ();
- }
+/* Function vect_update_inits_of_dr
- STMT_VINFO_VEC_STMT (stmt_info) = vec_stmt;
+ NITERS iterations were peeled from LOOP. DR represents a data reference
+ in LOOP. This function updates the information recorded in DR to
+ account for the fact that the first NITERS iterations had already been
+ executed. Specifically, it updates the initial_condition of the
+ access_function of DR. */
- return is_store;
+static void
+vect_update_inits_of_dr (struct data_reference *dr, struct loop *loop,
+ tree niters)
+{
+ tree access_fn = DR_ACCESS_FN (dr, 0);
+ tree init, init_new, step;
+
+ step = evolution_part_in_loop_num (access_fn, loop->num);
+ init = initial_condition (access_fn);
+
+ init_new = build (PLUS_EXPR, TREE_TYPE (init),
+ build (MULT_EXPR, TREE_TYPE (niters),
+ niters, step), init);
+ DR_ACCESS_FN (dr, 0) = chrec_replace_initial_condition (access_fn, init_new);
+
+ return;
}
-/* Function vect_transform_loop_bound.
+/* Function vect_update_inits_of_drs
- Create a new exit condition for the loop. */
+ NITERS iterations were peeled from the loop represented by LOOP_VINFO.
+ This function updates the information recorded for the data references in
+ the loop to account for the fact that the first NITERS iterations had
+ already been executed. Specifically, it updates the initial_condition of the
+ access_function of all the data_references in the loop. */
static void
-vect_transform_loop_bound (loop_vec_info loop_vinfo)
+vect_update_inits_of_drs (loop_vec_info loop_vinfo, tree niters)
{
+ unsigned int i;
+ varray_type loop_write_datarefs = LOOP_VINFO_DATAREF_WRITES (loop_vinfo);
+ varray_type loop_read_datarefs = LOOP_VINFO_DATAREF_READS (loop_vinfo);
struct loop *loop = LOOP_VINFO_LOOP (loop_vinfo);
- edge exit_edge = loop->single_exit;
- block_stmt_iterator loop_exit_bsi = bsi_last (exit_edge->src);
- tree indx_before_incr, indx_after_incr;
- tree orig_cond_expr;
- HOST_WIDE_INT old_N = 0;
- int vf;
- tree cond_stmt;
- tree new_loop_bound;
- tree cond;
- tree lb_type;
-#ifdef ENABLE_CHECKING
- if (!LOOP_VINFO_NITERS_KNOWN_P (loop_vinfo))
- abort ();
-#endif
- old_N = LOOP_VINFO_NITERS (loop_vinfo);
- vf = LOOP_VINFO_VECT_FACTOR (loop_vinfo);
+ if (dump_file && (dump_flags & TDF_DETAILS))
+ fprintf (dump_file, "\n<<vect_update_inits_of_dr>>\n");
-#ifdef ENABLE_CHECKING
- /* FORNOW:
- assuming number-of-iterations divides by the vectorization factor. */
- if (old_N % vf)
- abort ();
-#endif
+ for (i = 0; i < VARRAY_ACTIVE_SIZE (loop_write_datarefs); i++)
+ {
+ struct data_reference *dr = VARRAY_GENERIC_PTR (loop_write_datarefs, i);
+ vect_update_inits_of_dr (dr, loop, niters);
+ }
- orig_cond_expr = LOOP_VINFO_EXIT_COND (loop_vinfo);
-#ifdef ENABLE_CHECKING
- if (!orig_cond_expr)
- abort ();
-#endif
- if (orig_cond_expr != bsi_stmt (loop_exit_bsi))
- abort ();
+ for (i = 0; i < VARRAY_ACTIVE_SIZE (loop_read_datarefs); i++)
+ {
+ struct data_reference *dr = VARRAY_GENERIC_PTR (loop_read_datarefs, i);
+ vect_update_inits_of_dr (dr, loop, niters);
+ }
+}
- create_iv (integer_zero_node, integer_one_node, NULL_TREE, loop,
- &loop_exit_bsi, false, &indx_before_incr, &indx_after_incr);
- /* bsi_insert is using BSI_NEW_STMT. We need to bump it back
- to point to the exit condition. */
- bsi_next (&loop_exit_bsi);
- if (bsi_stmt (loop_exit_bsi) != orig_cond_expr)
- abort ();
+/* Function vect_do_peeling_for_alignment
- /* new loop exit test: */
- lb_type = TREE_TYPE (TREE_OPERAND (TREE_OPERAND (orig_cond_expr, 0), 1));
- new_loop_bound = build_int_cst (lb_type, old_N/vf, 0);
+ Peel the first 'niters' iterations of the loop represented by LOOP_VINFO.
+ 'niters' is set to the misalignment of one of the data references in the
+ loop, thereby forcing it to refer to an aligned location at the beginning
+ of the execution of this loop. The data reference for which we are
+ peeling is recorded in LOOP_VINFO_UNALIGNED_DR. */
- if (exit_edge->flags & EDGE_TRUE_VALUE) /* 'then' edge exits the loop. */
- cond = build2 (GE_EXPR, boolean_type_node, indx_after_incr, new_loop_bound);
- else /* 'then' edge loops back. */
- cond = build2 (LT_EXPR, boolean_type_node, indx_after_incr, new_loop_bound);
+static void
+vect_do_peeling_for_alignment (loop_vec_info loop_vinfo, struct loops *loops)
+{
+ struct loop *loop = LOOP_VINFO_LOOP (loop_vinfo);
+ tree niters_of_prolog_loop, ni_name;
+ tree n_iters;
+ struct loop *new_loop;
+
+ if (vect_debug_details (NULL))
+ fprintf (dump_file, "\n<<vect_do_peeling_for_alignment>>\n");
+
+ ni_name = vect_build_loop_niters (loop_vinfo);
+ niters_of_prolog_loop = vect_gen_niters_for_prolog_loop (loop_vinfo, ni_name);
+
+ /* Peel the prolog loop and iterate it niters_of_prolog_loop. */
+ new_loop =
+ slpeel_tree_peel_loop_to_edge (loop, loops, loop_preheader_edge (loop),
+ niters_of_prolog_loop, ni_name, true);
+#ifdef ENABLE_CHECKING
+ gcc_assert (new_loop);
+ slpeel_verify_cfg_after_peeling (new_loop, loop);
+#endif
- cond_stmt = build3 (COND_EXPR, TREE_TYPE (orig_cond_expr), cond,
- TREE_OPERAND (orig_cond_expr, 1), TREE_OPERAND (orig_cond_expr, 2));
+ /* Update number of times loop executes. */
+ n_iters = LOOP_VINFO_NITERS (loop_vinfo);
+ LOOP_VINFO_NITERS (loop_vinfo) =
+ build (MINUS_EXPR, integer_type_node, n_iters, niters_of_prolog_loop);
- bsi_insert_before (&loop_exit_bsi, cond_stmt, BSI_SAME_STMT);
+ /* Update the init conditions of the access functions of all data refs. */
+ vect_update_inits_of_drs (loop_vinfo, niters_of_prolog_loop);
- /* remove old loop exit test: */
- bsi_remove (&loop_exit_bsi);
+ /* After peeling we have to reset scalar evolution analyzer. */
+ scev_reset ();
- if (vect_debug_details (NULL))
- print_generic_expr (dump_file, cond_stmt, TDF_SLIM);
+ return;
}
int nbbs = loop->num_nodes;
block_stmt_iterator si;
int i;
-#ifdef ENABLE_CHECKING
+ tree ratio = NULL;
int vectorization_factor = LOOP_VINFO_VECT_FACTOR (loop_vinfo);
-#endif
if (vect_debug_details (NULL))
fprintf (dump_file, "\n<<vec_transform_loop>>\n");
+
+ /* Peel the loop if there are data refs with unknown alignment.
+ Only one data ref with unknown store is allowed. */
+
+ if (LOOP_DO_PEELING_FOR_ALIGNMENT (loop_vinfo))
+ vect_do_peeling_for_alignment (loop_vinfo, loops);
+
+ /* If the loop has a symbolic number of iterations 'n' (i.e. it's not a
+ compile time constant), or it is a constant that doesn't divide by the
+ vectorization factor, then an epilog loop needs to be created.
+ We therefore duplicate the loop: the original loop will be vectorized,
+ and will compute the first (n/VF) iterations. The second copy of the loop
+ will remain scalar and will compute the remaining (n%VF) iterations.
+ (VF is the vectorization factor). */
+
+ if (!LOOP_VINFO_NITERS_KNOWN_P (loop_vinfo)
+ || (LOOP_VINFO_NITERS_KNOWN_P (loop_vinfo)
+ && LOOP_VINFO_INT_NITERS (loop_vinfo) % vectorization_factor != 0))
+ vect_do_peeling_for_loop_bound (loop_vinfo, &ratio, loops);
+
/* 1) Make sure the loop header has exactly two entries
2) Make sure we have a preheader basic block. */
- if (!loop->header->pred->pred_next
- || loop->header->pred->pred_next->pred_next)
- abort ();
+ gcc_assert (EDGE_COUNT (loop->header->preds) == 2);
loop_split_edge_with (loop_preheader_edge (loop), NULL);
tree stmt = bsi_stmt (si);
stmt_vec_info stmt_info;
bool is_store;
-#ifdef ENABLE_CHECKING
- tree vectype;
-#endif
if (vect_debug_details (NULL))
{
print_generic_expr (dump_file, stmt, TDF_SLIM);
}
stmt_info = vinfo_for_stmt (stmt);
-#ifdef ENABLE_CHECKING
- if (!stmt_info)
- abort ();
-#endif
+ gcc_assert (stmt_info);
if (!STMT_VINFO_RELEVANT_P (stmt_info))
{
bsi_next (&si);
#ifdef ENABLE_CHECKING
/* FORNOW: Verify that all stmts operate on the same number of
units and no inner unrolling is necessary. */
- vectype = STMT_VINFO_VECTYPE (stmt_info);
- if (GET_MODE_NUNITS (TYPE_MODE (vectype)) != vectorization_factor)
- abort ();
+ gcc_assert
+ (GET_MODE_NUNITS (TYPE_MODE (STMT_VINFO_VECTYPE (stmt_info)))
+ == vectorization_factor);
#endif
/* -------- vectorize statement ------------ */
if (vect_debug_details (NULL))
} /* stmts in BB */
} /* BBs in loop */
- vect_transform_loop_bound (loop_vinfo);
+ vect_transform_loop_bound (loop_vinfo, ratio);
if (vect_debug_details (loop))
fprintf (dump_file,"Success! loop vectorized.");
Returns whether a stmt with OPERAND can be vectorized.
Supportable operands are constants, loop invariants, and operands that are
- defined by the current iteration of the loop. Unsupportable opernads are
+ defined by the current iteration of the loop. Unsupportable operands are
those that are defined by a previous iteration of the loop (as is the case
in reduction/induction computations). */
fprintf (dump_file, "==> examining statement: ");
print_generic_expr (dump_file, stmt, TDF_SLIM);
}
-#ifdef ENABLE_CHECKING
- if (!stmt_info)
- abort ();
-#endif
+
+ gcc_assert (stmt_info);
+
/* skip stmts which do not need to be vectorized.
this is expected to include:
- the COND_EXPR which is the loop exit condition
}
else
vectorization_factor = nunits;
+
+#ifdef ENABLE_CHECKING
+ gcc_assert (GET_MODE_SIZE (TYPE_MODE (scalar_type))
+ * vectorization_factor == UNITS_PER_SIMD_WORD);
+#endif
}
}
/* TODO: Analyze cost. Decide if worth while to vectorize. */
- if (!vectorization_factor)
+
+ if (vectorization_factor <= 1)
{
if (vect_debug_stats (loop) || vect_debug_details (loop))
fprintf (dump_file, "not vectorized: unsupported data-type");
}
LOOP_VINFO_VECT_FACTOR (loop_vinfo) = vectorization_factor;
- /* FORNOW: handle only cases where the loop bound divides by the
- vectorization factor. */
-
- if (vect_debug_details (NULL))
- fprintf (dump_file,
- "vectorization_factor = %d, niters = " HOST_WIDE_INT_PRINT_DEC,
- vectorization_factor, LOOP_VINFO_NITERS (loop_vinfo));
-
- if (!LOOP_VINFO_NITERS_KNOWN_P (loop_vinfo))
- {
- if (vect_debug_stats (loop) || vect_debug_details (loop))
- fprintf (dump_file, "not vectorized: Unknown loop bound.");
- return false;
- }
+ if (LOOP_VINFO_NITERS_KNOWN_P (loop_vinfo) && vect_debug_details (NULL))
+ fprintf (dump_file,
+ "vectorization_factor = %d, niters = " HOST_WIDE_INT_PRINT_DEC,
+ vectorization_factor, LOOP_VINFO_INT_NITERS (loop_vinfo));
- if (LOOP_VINFO_NITERS_KNOWN_P (loop_vinfo)
- && LOOP_VINFO_NITERS (loop_vinfo) % vectorization_factor != 0)
+ if (!LOOP_VINFO_NITERS_KNOWN_P (loop_vinfo)
+ || LOOP_VINFO_INT_NITERS (loop_vinfo) % vectorization_factor != 0)
{
if (vect_debug_stats (loop) || vect_debug_details (loop))
- fprintf (dump_file, "not vectorized: loop bound doesn't divided by %d.",
- vectorization_factor);
- return false;
+ fprintf (dump_file, "epilog loop required.");
+ if (!vect_can_advance_ivs_p (loop))
+ {
+ if (vect_debug_stats (loop) || vect_debug_details (loop))
+ fprintf (dump_file, "not vectorized: can't create epilog loop 1.");
+ return false;
+ }
+ if (!slpeel_can_duplicate_loop_p (loop, loop->exit_edges[0]))
+ {
+ if (vect_debug_stats (loop) || vect_debug_details (loop))
+ fprintf (dump_file, "not vectorized: can't create epilog loop 2.");
+ return false;
+ }
}
return true;
return false;
step_expr = evolution_part;
- init_expr = initial_condition (access_fn);
+ init_expr = unshare_expr (initial_condition (access_fn));
if (vect_debug_details (NULL))
{
if (vect_debug_details (NULL))
fprintf (dump_file, "\n<<vect_analyze_scalar_cycles>>\n");
- for (phi = phi_nodes (bb); phi; phi = TREE_CHAIN (phi))
+ for (phi = phi_nodes (bb); phi; phi = PHI_CHAIN (phi))
{
tree access_fn = NULL;
vectorization yet. This property is verified in vect_is_simple_use,
during vect_analyze_operations. */
- access_fn = instantiate_parameters
- (loop,
- analyze_scalar_evolution (loop, PHI_RESULT (phi)));
+ access_fn = /* instantiate_parameters
+ (loop,*/
+ analyze_scalar_evolution (loop, PHI_RESULT (phi));
if (!access_fn)
{
struct data_reference *drb,
struct loop *loop)
{
- bool differ_p;
+ bool differ_p;
struct data_dependence_relation *ddr;
-
+
if (!array_base_name_differ_p (dra, drb, &differ_p))
{
- if (vect_debug_stats (loop) || vect_debug_details (loop))
+ if (vect_debug_stats (loop) || vect_debug_details (loop))
{
- fprintf (dump_file,
- "not vectorized: can't determine dependence between: ");
+ fprintf (dump_file,
+ "not vectorized: can't determine dependence between: ");
print_generic_expr (dump_file, DR_REF (dra), TDF_SLIM);
fprintf (dump_file, " and ");
print_generic_expr (dump_file, DR_REF (drb), TDF_SLIM);
exist any data dependences between them.
TODO: dependences which distance is greater than the vectorization factor
- can be ignored. */
+ can be ignored. */
static bool
vect_analyze_data_ref_dependences (loop_vec_info loop_vinfo)
}
+/* Function vect_compute_array_base_alignment.
+ A utility function of vect_compute_array_ref_alignment.
+
+ Compute the misalignment of ARRAY in bits.
+
+ Input:
+ ARRAY - an array_ref (possibly multidimensional) of type ARRAY_TYPE.
+ VECTYPE - we are interested in the misalignment modulo the size of vectype.
+ if NULL: don't compute misalignment, just return the base of ARRAY.
+ PREV_DIMENSIONS - initialized to one.
+ MISALIGNMENT - the computed misalignment in bits.
+
+ Output:
+ If VECTYPE is not NULL:
+ Return NULL_TREE if the misalignment cannot be computed. Otherwise, return
+ the base of the array, and put the computed misalignment in MISALIGNMENT.
+ If VECTYPE is NULL:
+ Return the base of the array.
+
+ For a[idx_N]...[idx_2][idx_1][idx_0], the address of
+ a[idx_N]...[idx_2][idx_1] is
+ {&a + idx_1 * dim_0 + idx_2 * dim_0 * dim_1 + ...
+ ... + idx_N * dim_0 * ... * dim_N-1}.
+ (The misalignment of &a is not checked here).
+ Note, that every term contains dim_0, therefore, if dim_0 is a
+ multiple of NUNITS, the whole sum is a multiple of NUNITS.
+ Otherwise, if idx_1 is constant, and dim_1 is a multiple of
+ NUINTS, we can say that the misalignment of the sum is equal to
+ the misalignment of {idx_1 * dim_0}. If idx_1 is not constant,
+ we can't determine this array misalignment, and we return
+ false.
+ We proceed recursively in this manner, accumulating total misalignment
+ and the multiplication of previous dimensions for correct misalignment
+ calculation. */
+
+static tree
+vect_compute_array_base_alignment (tree array,
+ tree vectype,
+ tree *prev_dimensions,
+ tree *misalignment)
+{
+ tree index;
+ tree domain;
+ tree dimension_size;
+ tree mis;
+ tree bits_per_vectype;
+ tree bits_per_vectype_unit;
+
+ /* The 'stop condition' of the recursion. */
+ if (TREE_CODE (array) != ARRAY_REF)
+ return array;
+
+ if (!vectype)
+ /* Just get the base decl. */
+ return vect_compute_array_base_alignment
+ (TREE_OPERAND (array, 0), NULL, NULL, NULL);
+
+ if (!host_integerp (*misalignment, 1) || TREE_OVERFLOW (*misalignment) ||
+ !host_integerp (*prev_dimensions, 1) || TREE_OVERFLOW (*prev_dimensions))
+ return NULL_TREE;
+
+ domain = TYPE_DOMAIN (TREE_TYPE (array));
+ dimension_size =
+ int_const_binop (PLUS_EXPR,
+ int_const_binop (MINUS_EXPR, TYPE_MAX_VALUE (domain),
+ TYPE_MIN_VALUE (domain), 1),
+ size_one_node, 1);
+
+ /* Check if the dimension size is a multiple of NUNITS, the remaining sum
+ is a multiple of NUNITS:
+
+ dimension_size % GET_MODE_NUNITS (TYPE_MODE (vectype)) == 0 ?
+ */
+ mis = int_const_binop (TRUNC_MOD_EXPR, dimension_size,
+ build_int_cst (NULL_TREE, GET_MODE_NUNITS (TYPE_MODE (vectype))), 1);
+ if (integer_zerop (mis))
+ /* This array is aligned. Continue just in order to get the base decl. */
+ return vect_compute_array_base_alignment
+ (TREE_OPERAND (array, 0), NULL, NULL, NULL);
+
+ index = TREE_OPERAND (array, 1);
+ if (!host_integerp (index, 1))
+ /* The current index is not constant. */
+ return NULL_TREE;
+
+ index = int_const_binop (MINUS_EXPR, index, TYPE_MIN_VALUE (domain), 0);
+
+ bits_per_vectype = fold_convert (unsigned_type_node,
+ build_int_cst (NULL_TREE, BITS_PER_UNIT *
+ GET_MODE_SIZE (TYPE_MODE (vectype))));
+ bits_per_vectype_unit = fold_convert (unsigned_type_node,
+ build_int_cst (NULL_TREE, BITS_PER_UNIT *
+ GET_MODE_SIZE (TYPE_MODE (TREE_TYPE (vectype)))));
+
+ /* Add {idx_i * dim_i-1 * ... * dim_0 } to the misalignment computed
+ earlier:
+
+ *misalignment =
+ (*misalignment + index_val * dimension_size * *prev_dimensions)
+ % vectype_nunits;
+ */
+
+ mis = int_const_binop (MULT_EXPR, index, dimension_size, 1);
+ mis = int_const_binop (MULT_EXPR, mis, *prev_dimensions, 1);
+ mis = int_const_binop (MULT_EXPR, mis, bits_per_vectype_unit, 1);
+ mis = int_const_binop (PLUS_EXPR, *misalignment, mis, 1);
+ *misalignment = int_const_binop (TRUNC_MOD_EXPR, mis, bits_per_vectype, 1);
+
+
+ *prev_dimensions = int_const_binop (MULT_EXPR,
+ *prev_dimensions, dimension_size, 1);
+
+ return vect_compute_array_base_alignment (TREE_OPERAND (array, 0), vectype,
+ prev_dimensions,
+ misalignment);
+}
+
+
/* Function vect_compute_data_ref_alignment
Compute the misalignment of the data reference DR.
+ Output:
+ 1. If during the misalignment computation it is found that the data reference
+ cannot be vectorized then false is returned.
+ 2. DR_MISALIGNMENT (DR) is defined.
+
FOR NOW: No analysis is actually performed. Misalignment is calculated
only for trivial cases. TODO. */
-static void
+static bool
vect_compute_data_ref_alignment (struct data_reference *dr,
- loop_vec_info loop_vinfo ATTRIBUTE_UNUSED)
+ loop_vec_info loop_vinfo)
{
tree stmt = DR_STMT (dr);
+ stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
tree ref = DR_REF (dr);
tree vectype;
- tree access_fn = DR_ACCESS_FN (dr, 0); /* FORNOW: single access_fn. */
- tree init;
tree scalar_type;
- tree misalign;
- tree array_first_index;
- tree array_base = DR_BASE_NAME (dr);
- tree base_decl = NULL_TREE;
- tree bit_offset = size_zero_node;
tree offset = size_zero_node;
- tree unit_bits = build_int_cst (unsigned_type_node, BITS_PER_UNIT, 0);
- tree nunits;
- tree alignment;
-
+ tree base, bit_offset, alignment;
+ tree unit_bits = fold_convert (unsigned_type_node,
+ build_int_cst (NULL_TREE, BITS_PER_UNIT));
+ tree dr_base;
+ bool base_aligned_p;
+
if (vect_debug_details (NULL))
fprintf (dump_file, "vect_compute_data_ref_alignment:");
{
fprintf (dump_file, "no vectype for stmt: ");
print_generic_expr (dump_file, stmt, TDF_SLIM);
- fprintf (dump_file, "scalar_type: ");
+ fprintf (dump_file, " scalar_type: ");
print_generic_expr (dump_file, scalar_type, TDF_DETAILS);
}
- return;
+ /* It is not possible to vectorize this data reference. */
+ return false;
}
+ STMT_VINFO_VECTYPE (stmt_info) = vectype;
+ gcc_assert (TREE_CODE (ref) == ARRAY_REF || TREE_CODE (ref) == INDIRECT_REF);
+
+ if (TREE_CODE (ref) == ARRAY_REF)
+ dr_base = ref;
+ else
+ dr_base = STMT_VINFO_VECT_DR_BASE (stmt_info);
- if (TYPE_ALIGN (TREE_TYPE (TREE_TYPE (array_base))) < TYPE_ALIGN (vectype))
+ base = vect_get_base_and_bit_offset (dr, dr_base, vectype,
+ loop_vinfo, &bit_offset, &base_aligned_p);
+ if (!base)
{
- base_decl = vect_get_base_decl_and_bit_offset (array_base, &bit_offset);
- if (!base_decl)
+ if (vect_debug_details (NULL))
{
- if (vect_debug_details (NULL))
- fprintf (dump_file, "Unknown alignment for access");
- return;
+ fprintf (dump_file, "Unknown alignment for access: ");
+ print_generic_expr (dump_file,
+ STMT_VINFO_VECT_DR_BASE (stmt_info), TDF_SLIM);
}
+ return true;
+ }
- offset = int_const_binop (TRUNC_DIV_EXPR, bit_offset, unit_bits, 1);
- bit_offset = int_const_binop (TRUNC_MOD_EXPR, bit_offset, unit_bits, 1);
- if (!integer_zerop (bit_offset))
+ if (!base_aligned_p)
+ {
+ if (!vect_can_force_dr_alignment_p (base, TYPE_ALIGN (vectype)))
{
if (vect_debug_details (NULL))
- {
- fprintf (dump_file, "bit offset alignment: ");
- print_generic_expr (dump_file, bit_offset, TDF_SLIM);
- }
- return;
+ {
+ fprintf (dump_file, "can't force alignment of ref: ");
+ print_generic_expr (dump_file, ref, TDF_SLIM);
+ }
+ return true;
}
+
+ /* Force the alignment of the decl.
+ NOTE: This is the only change to the code we make during
+ the analysis phase, before deciding to vectorize the loop. */
+ if (vect_debug_details (NULL))
+ fprintf (dump_file, "force alignment");
+ DECL_ALIGN (base) = TYPE_ALIGN (vectype);
+ DECL_USER_ALIGN (base) = TYPE_ALIGN (vectype);
+ }
- if (!base_decl ||
- (DECL_ALIGN (base_decl) < TYPE_ALIGN (vectype)
- && !vect_can_force_dr_alignment_p (base_decl, TYPE_ALIGN (vectype))))
+ /* At this point we assume that the base is aligned, and the offset from it
+ (including index, if relevant) has been computed and is in BIT_OFFSET. */
+ gcc_assert (base_aligned_p
+ || (TREE_CODE (base) == VAR_DECL
+ && DECL_ALIGN (base) >= TYPE_ALIGN (vectype)));
+
+ /* Convert into bytes. */
+ offset = int_const_binop (TRUNC_DIV_EXPR, bit_offset, unit_bits, 1);
+ /* Check that there is no remainder in bits. */
+ bit_offset = int_const_binop (TRUNC_MOD_EXPR, bit_offset, unit_bits, 1);
+ if (!integer_zerop (bit_offset))
+ {
+ if (vect_debug_details (NULL))
{
- if (vect_debug_details (NULL))
- {
- fprintf (dump_file, "can't force alignment of ref: ");
- print_generic_expr (dump_file, array_base, TDF_SLIM);
- }
- return;
+ fprintf (dump_file, "bit offset alignment: ");
+ print_generic_expr (dump_file, bit_offset, TDF_SLIM);
}
+ return false;
+ }
+
+ /* Alignment required, in bytes: */
+ alignment = fold_convert (unsigned_type_node,
+ build_int_cst (NULL_TREE, TYPE_ALIGN (vectype)/BITS_PER_UNIT));
- if (DECL_ALIGN (base_decl) < TYPE_ALIGN (vectype))
- {
- /* Force the alignment of the decl.
- NOTE: This is the only change to the code we make during
- the analysis phase, before deciding to vectorize the loop. */
- if (vect_debug_details (NULL))
- fprintf (dump_file, "force alignment");
- DECL_ALIGN (base_decl) = TYPE_ALIGN (vectype);
- DECL_USER_ALIGN (base_decl) = TYPE_ALIGN (vectype);
- }
+ /* Modulo alignment. */
+ offset = int_const_binop (TRUNC_MOD_EXPR, offset, alignment, 0);
+ if (!host_integerp (offset, 1) || TREE_OVERFLOW (offset))
+ {
+ if (vect_debug_details (NULL))
+ fprintf (dump_file, "unexpected misalign value");
+ return false;
}
- /* The misalignement is:
- (base_alignment + offset + index_access_fn_init) % alignment.
- At this point we already guaranteed that base_alignment == 0,
- and computed the offset.
- It remains to check the first index accessed. */
+ DR_MISALIGNMENT (dr) = tree_low_cst (offset, 1);
+
+ if (vect_debug_details (NULL))
+ fprintf (dump_file, "misalign = %d", DR_MISALIGNMENT (dr));
+
+ return true;
+}
+
+
+/* Function vect_compute_array_ref_alignment
+
+ Compute the alignment of an array-ref.
+ The alignment we compute here is relative to
+ TYPE_ALIGN(VECTYPE) boundary.
+
+ Output:
+ OFFSET - the alignment in bits
+ Return value - the base of the array-ref. E.g,
+ if the array-ref is a.b[k].c[i][j] the returned
+ base is a.b[k].c
+*/
+
+static tree
+vect_compute_array_ref_alignment (struct data_reference *dr,
+ loop_vec_info loop_vinfo,
+ tree vectype,
+ tree *offset)
+{
+ tree array_first_index = size_zero_node;
+ tree init;
+ tree ref = DR_REF (dr);
+ tree scalar_type = TREE_TYPE (ref);
+ tree oprnd0 = TREE_OPERAND (ref, 0);
+ tree dims = size_one_node;
+ tree misalign = size_zero_node;
+ tree next_ref, this_offset = size_zero_node;
+ tree nunits;
+ tree nbits;
+
+ if (TREE_CODE (TREE_TYPE (ref)) == ARRAY_TYPE)
+ /* The reference is an array without its last index. */
+ next_ref = vect_compute_array_base_alignment (ref, vectype, &dims,
+ &misalign);
+ else
+ next_ref = vect_compute_array_base_alignment (oprnd0, vectype, &dims,
+ &misalign);
+ if (!vectype)
+ /* Alignment is not requested. Just return the base. */
+ return next_ref;
+
+ /* Compute alignment. */
+ if (!host_integerp (misalign, 1) || TREE_OVERFLOW (misalign) || !next_ref)
+ return NULL_TREE;
+ this_offset = misalign;
+ /* Check the first index accessed. */
if (!vect_get_first_index (ref, &array_first_index))
{
if (vect_debug_details (NULL))
fprintf (dump_file, "no first_index for array.");
- return;
+ return NULL_TREE;
}
-
- /* Check the index of the array_ref. */
- init = initial_condition (access_fn);
+ /* Check the index of the array_ref. */
+ init = initial_condition_in_loop_num (DR_ACCESS_FN (dr, 0),
+ LOOP_VINFO_LOOP (loop_vinfo)->num);
- /* FORNOW: In order to simplify the handling of alignment, we make sure
- that the first location at which the array is accessed ('init') is on an
+ /* FORNOW: In order to simplify the handling of alignment, we make sure
+ that the first location at which the array is accessed ('init') is on an
'NUNITS' boundary, since we are assuming here that 'array base' is aligned.
- This is too conservative, since we require that
- both {'array_base' is a multiple of NUNITS} && {'init' is a multiple of
+ This is too conservative, since we require that
+ both {'array_base' is a multiple of NUNITS} && {'init' is a multiple of
NUNITS}, instead of just {('array_base' + 'init') is a multiple of NUNITS}.
This should be relaxed in the future. */
- if (!init || !host_integerp (init,0))
+ if (!init || !host_integerp (init, 0))
{
if (vect_debug_details (NULL))
- fprintf (dump_file, "init not simple INTEGER_CST.");
- return;
+ fprintf (dump_file, "non constant init. ");
+ return NULL_TREE;
}
- /* alignment required, in bytes: */
- alignment = build_int_cst (unsigned_type_node,
- TYPE_ALIGN (vectype)/BITS_PER_UNIT, 0);
/* bytes per scalar element: */
- nunits = build_int_cst (unsigned_type_node,
- GET_MODE_SIZE (TYPE_MODE (scalar_type)), 0);
-
- /* misalign = (offset + (init-array_first_index)*nunits) % alignment */
- if (vect_debug_details (NULL))
- {
- fprintf (dump_file, "misalign = ( offset <");
- print_generic_expr (dump_file, offset, TDF_SLIM);
- fprintf (dump_file, "> + (init <");
- print_generic_expr (dump_file, init, TDF_SLIM);
- fprintf (dump_file, "> - first_indx <");
- print_generic_expr (dump_file, array_first_index, TDF_SLIM);
- fprintf (dump_file, ">) * nunits <");
- print_generic_expr (dump_file, nunits, TDF_SLIM);
- fprintf (dump_file, ">) mod alignment <");
- print_generic_expr (dump_file, alignment, TDF_SLIM);
- fprintf (dump_file, ">");
- }
+ nunits = fold_convert (unsigned_type_node,
+ build_int_cst (NULL_TREE, GET_MODE_SIZE (TYPE_MODE (scalar_type))));
+ nbits = int_const_binop (MULT_EXPR, nunits,
+ build_int_cst (NULL_TREE, BITS_PER_UNIT), 1);
+ /* misalign = offset + (init-array_first_index)*nunits*bits_in_byte */
misalign = int_const_binop (MINUS_EXPR, init, array_first_index, 0);
- misalign = int_const_binop (MULT_EXPR, misalign, nunits, 0);
- misalign = int_const_binop (PLUS_EXPR, misalign, offset, 0);
- misalign = int_const_binop (TRUNC_MOD_EXPR, misalign, alignment, 0);
-
- if (vect_debug_details (NULL))
- {
- fprintf (dump_file, "misalign = ");
- print_generic_expr (dump_file, misalign, TDF_SLIM);
- }
+ misalign = int_const_binop (MULT_EXPR, misalign, nbits, 0);
+ misalign = int_const_binop (PLUS_EXPR, misalign, this_offset, 0);
- if (!host_integerp (misalign,1) || TREE_OVERFLOW (misalign))
+ /* TODO: allow negative misalign values. */
+ if (!host_integerp (misalign, 1) || TREE_OVERFLOW (misalign))
{
if (vect_debug_details (NULL))
- fprintf (dump_file, "unexpected misalign value");
- return;
+ fprintf (dump_file, "unexpected misalign value");
+ return NULL_TREE;
}
-
- DR_MISALIGNMENT (dr) = tree_low_cst (misalign,1);
-
- if (vect_debug_details (NULL))
- fprintf (dump_file, "misalign = %d",DR_MISALIGNMENT (dr));
+ *offset = misalign;
+ return next_ref;
}
FOR NOW: No analysis is actually performed. Misalignment is calculated
only for trivial cases. TODO. */
-static void
+static bool
vect_compute_data_refs_alignment (loop_vec_info loop_vinfo)
{
varray_type loop_write_datarefs = LOOP_VINFO_DATAREF_WRITES (loop_vinfo);
varray_type loop_read_datarefs = LOOP_VINFO_DATAREF_READS (loop_vinfo);
unsigned int i;
-
+
for (i = 0; i < VARRAY_ACTIVE_SIZE (loop_write_datarefs); i++)
{
struct data_reference *dr = VARRAY_GENERIC_PTR (loop_write_datarefs, i);
- vect_compute_data_ref_alignment (dr, loop_vinfo);
+ if (!vect_compute_data_ref_alignment (dr, loop_vinfo))
+ return false;
}
for (i = 0; i < VARRAY_ACTIVE_SIZE (loop_read_datarefs); i++)
{
struct data_reference *dr = VARRAY_GENERIC_PTR (loop_read_datarefs, i);
- vect_compute_data_ref_alignment (dr, loop_vinfo);
+ if (!vect_compute_data_ref_alignment (dr, loop_vinfo))
+ return false;
}
+
+ return true;
}
FOR NOW: we assume that whatever versioning/peeling takes place, only the
original loop is to be vectorized; Any other loops that are created by
the transformations performed in this pass - are not supposed to be
- vectorized. This restriction will be relaxed.
-
- FOR NOW: No transformation is actually performed. TODO. */
+ vectorized. This restriction will be relaxed. */
static void
-vect_enhance_data_refs_alignment (loop_vec_info loop_vinfo ATTRIBUTE_UNUSED)
+vect_enhance_data_refs_alignment (loop_vec_info loop_vinfo)
{
+ varray_type loop_read_datarefs = LOOP_VINFO_DATAREF_READS (loop_vinfo);
+ varray_type loop_write_datarefs = LOOP_VINFO_DATAREF_WRITES (loop_vinfo);
+ struct loop *loop = LOOP_VINFO_LOOP (loop_vinfo);
+ unsigned int i;
+
/*
This pass will require a cost model to guide it whether to apply peeling
or versioning or a combination of the two. For example, the scheme that
(whether to generate regular loads/stores, or with special handling for
misalignment).
*/
+
+ /* (1) Peeling to force alignment. */
+
+ /* (1.1) Decide whether to perform peeling, and how many iterations to peel:
+ Considerations:
+ + How many accesses will become aligned due to the peeling
+ - How many accesses will become unaligned due to the peeling,
+ and the cost of misaligned accesses.
+ - The cost of peeling (the extra runtime checks, the increase
+ in code size).
+
+ The scheme we use FORNOW: peel to force the alignment of the first
+ misaligned store in the loop.
+ Rationale: misaligned stores are not yet supported.
+
+ TODO: Use a better cost model. */
+
+ for (i = 0; i < VARRAY_ACTIVE_SIZE (loop_write_datarefs); i++)
+ {
+ struct data_reference *dr = VARRAY_GENERIC_PTR (loop_write_datarefs, i);
+ if (!aligned_access_p (dr))
+ {
+ LOOP_VINFO_UNALIGNED_DR (loop_vinfo) = dr;
+ LOOP_DO_PEELING_FOR_ALIGNMENT (loop_vinfo) = true;
+ break;
+ }
+ }
+
+ if (!LOOP_VINFO_UNALIGNED_DR (loop_vinfo))
+ {
+ if (vect_debug_details (loop))
+ fprintf (dump_file, "Peeling for alignment will not be applied.");
+ return;
+ }
+ else
+ if (vect_debug_details (loop))
+ fprintf (dump_file, "Peeling for alignment will be applied.");
+
+
+ /* (1.2) Update the alignment info according to the peeling factor.
+ If the misalignment of the DR we peel for is M, then the
+ peeling factor is VF - M, and the misalignment of each access DR_i
+ in the loop is DR_MISALIGNMENT (DR_i) + VF - M.
+ If the misalignment of the DR we peel for is unknown, then the
+ misalignment of each access DR_i in the loop is also unknown.
+
+ FORNOW: set the misalignment of the accesses to unknown even
+ if the peeling factor is known at compile time.
+
+ TODO: - if the peeling factor is known at compile time, use that
+ when updating the misalignment info of the loop DRs.
+ - consider accesses that are known to have the same
+ alignment, even if that alignment is unknown. */
+
+ for (i = 0; i < VARRAY_ACTIVE_SIZE (loop_write_datarefs); i++)
+ {
+ struct data_reference *dr = VARRAY_GENERIC_PTR (loop_write_datarefs, i);
+ if (dr == LOOP_VINFO_UNALIGNED_DR (loop_vinfo))
+ DR_MISALIGNMENT (dr) = 0;
+ else
+ DR_MISALIGNMENT (dr) = -1;
+ }
+ for (i = 0; i < VARRAY_ACTIVE_SIZE (loop_read_datarefs); i++)
+ {
+ struct data_reference *dr = VARRAY_GENERIC_PTR (loop_read_datarefs, i);
+ if (dr == LOOP_VINFO_UNALIGNED_DR (loop_vinfo))
+ DR_MISALIGNMENT (dr) = 0;
+ else
+ DR_MISALIGNMENT (dr) = -1;
+ }
}
static bool
vect_analyze_data_refs_alignment (loop_vec_info loop_vinfo)
{
- varray_type loop_write_datarefs = LOOP_VINFO_DATAREF_WRITES (loop_vinfo);
varray_type loop_read_datarefs = LOOP_VINFO_DATAREF_READS (loop_vinfo);
+ varray_type loop_write_datarefs = LOOP_VINFO_DATAREF_WRITES (loop_vinfo);
+ struct loop *loop = LOOP_VINFO_LOOP (loop_vinfo);
+ enum dr_alignment_support supportable_dr_alignment;
unsigned int i;
if (vect_debug_details (NULL))
/* This pass may take place at function granularity instead of at loop
granularity. */
- vect_compute_data_refs_alignment (loop_vinfo);
+ if (!vect_compute_data_refs_alignment (loop_vinfo))
+ {
+ if (vect_debug_details (loop) || vect_debug_stats (loop))
+ fprintf (dump_file,
+ "not vectorized: can't calculate alignment for data ref.");
+ return false;
+ }
- /* This pass will use loop versioning and loop peeling in order to enhance
- the alignment of data references in the loop.
- FOR NOW: we assume that whatever versioning/peeling took place, the
- original loop is to be vectorized. Any other loops that were created by
- the transformations performed in this pass - are not supposed to be
- vectorized. This restriction will be relaxed. */
+ /* This pass will decide on using loop versioning and/or loop peeling in
+ order to enhance the alignment of data references in the loop. */
vect_enhance_data_refs_alignment (loop_vinfo);
- /* Finally, check that loop can be vectorized.
- FOR NOW: Until support for misaligned accesses is in place, only if all
- accesses are aligned can the loop be vectorized. This restriction will be
- relaxed. */
+ /* Finally, check that all the data references in the loop can be
+ handled with respect to their alignment. */
- for (i = 0; i < VARRAY_ACTIVE_SIZE (loop_write_datarefs); i++)
+ for (i = 0; i < VARRAY_ACTIVE_SIZE (loop_read_datarefs); i++)
{
- struct data_reference *dr = VARRAY_GENERIC_PTR (loop_write_datarefs, i);
- if (!aligned_access_p (dr))
+ struct data_reference *dr = VARRAY_GENERIC_PTR (loop_read_datarefs, i);
+ supportable_dr_alignment = vect_supportable_dr_alignment (dr);
+ if (!supportable_dr_alignment)
{
- if (vect_debug_stats (LOOP_VINFO_LOOP (loop_vinfo))
- || vect_debug_details (LOOP_VINFO_LOOP (loop_vinfo)))
- fprintf (dump_file, "not vectorized: unaligned store.");
+ if (vect_debug_details (loop) || vect_debug_stats (loop))
+ fprintf (dump_file, "not vectorized: unsupported unaligned load.");
return false;
}
}
-
- for (i = 0; i < VARRAY_ACTIVE_SIZE (loop_read_datarefs); i++)
+ for (i = 0; i < VARRAY_ACTIVE_SIZE (loop_write_datarefs); i++)
{
- struct data_reference *dr = VARRAY_GENERIC_PTR (loop_read_datarefs, i);
- if (!aligned_access_p (dr))
+ struct data_reference *dr = VARRAY_GENERIC_PTR (loop_write_datarefs, i);
+ supportable_dr_alignment = vect_supportable_dr_alignment (dr);
+ if (!supportable_dr_alignment)
{
- if (vect_debug_stats (LOOP_VINFO_LOOP (loop_vinfo))
- || vect_debug_details (LOOP_VINFO_LOOP (loop_vinfo)))
- fprintf (dump_file, "not vectorized: unaligned load.");
+ if (vect_debug_details (loop) || vect_debug_stats (loop))
+ fprintf (dump_file, "not vectorized: unsupported unaligned store.");
return false;
}
}
varray_type access_fns = DR_ACCESS_FNS (dr);
tree access_fn;
tree init, step;
+ unsigned int dimensions, i;
- /* FORNOW: handle only one dimensional arrays.
- This restriction will be relaxed in the future. */
- if (VARRAY_ACTIVE_SIZE (access_fns) != 1)
+ /* Check that in case of multidimensional array ref A[i1][i2]..[iN],
+ i1, i2, ..., iN-1 are loop invariant (to make sure that the memory
+ access is contiguous). */
+ dimensions = VARRAY_ACTIVE_SIZE (access_fns);
+
+ for (i = 1; i < dimensions; i++) /* Not including the last dimension. */
{
- if (vect_debug_details (NULL))
- fprintf (dump_file, "multi dimensional array reference.");
- return false;
- }
- access_fn = DR_ACCESS_FN (dr, 0);
+ access_fn = DR_ACCESS_FN (dr, i);
- if (!vect_is_simple_iv_evolution (loop_containing_stmt (DR_STMT (dr))->num,
- access_fn, &init, &step, true))
+ if (evolution_part_in_loop_num (access_fn,
+ loop_containing_stmt (DR_STMT (dr))->num))
+ {
+ /* Evolution part is not NULL in this loop (it is neither constant
+ nor invariant). */
+ if (vect_debug_details (NULL))
+ {
+ fprintf (dump_file,
+ "not vectorized: complicated multidim. array access.");
+ print_generic_expr (dump_file, access_fn, TDF_SLIM);
+ }
+ return false;
+ }
+ }
+
+ access_fn = DR_ACCESS_FN (dr, 0); /* The last dimension access function. */
+ if (!evolution_function_is_constant_p (access_fn)
+ && !vect_is_simple_iv_evolution (loop_containing_stmt (DR_STMT (dr))->num,
+ access_fn, &init, &step, true))
{
if (vect_debug_details (NULL))
{
- fprintf (dump_file, "too complicated access function.");
+ fprintf (dump_file, "not vectorized: complicated access function.");
print_generic_expr (dump_file, access_fn, TDF_SLIM);
}
return false;
}
-
+
return true;
}
FORNOW: the only access pattern that is considered vectorizable is a
simple step 1 (consecutive) access.
- FORNOW: handle only one dimensional arrays, and pointer accesses. */
+ FORNOW: handle only arrays and pointer accesses. */
static bool
vect_analyze_data_ref_accesses (loop_vec_info loop_vinfo)
MEMREF - a data-ref in STMT, which is an INDIRECT_REF.
If the data-ref access is vectorizable, return a data_reference structure
- that represents it (DR). Otherwise - return NULL. */
+ that represents it (DR). Otherwise - return NULL. */
static struct data_reference *
vect_analyze_pointer_ref_access (tree memref, tree stmt, bool is_read)
return NULL;
}
- if (TREE_CODE (init) != SSA_NAME /* FORNOW */
- || !host_integerp (step,0))
+ STRIP_NOPS (init);
+
+ if (!host_integerp (step,0))
{
if (vect_debug_stats (loop) || vect_debug_details (loop))
fprintf (dump_file,
- "not vectorized: non constant init/step for pointer access.");
+ "not vectorized: non constant step for pointer access.");
return NULL;
}
}
+/* Function vect_get_symbl_and_dr.
+
+ The function returns SYMBL - the relevant variable for
+ memory tag (for aliasing purposes).
+ Also data reference structure DR is created.
+
+ Input:
+ MEMREF - data reference in STMT
+ IS_READ - TRUE if STMT reads from MEMREF, FALSE if writes to MEMREF
+
+ Output:
+ DR - data_reference struct for MEMREF
+ return value - the relevant variable for memory tag (for aliasing purposes).
+
+*/
+
+static tree
+vect_get_symbl_and_dr (tree memref, tree stmt, bool is_read,
+ loop_vec_info loop_vinfo, struct data_reference **dr)
+{
+ tree symbl, oprnd0, oprnd1;
+ stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
+ tree offset;
+ tree array_base, base;
+ struct data_reference *new_dr;
+ bool base_aligned_p;
+
+ *dr = NULL;
+ switch (TREE_CODE (memref))
+ {
+ case INDIRECT_REF:
+ new_dr = vect_analyze_pointer_ref_access (memref, stmt, is_read);
+ if (! new_dr)
+ return NULL_TREE;
+ *dr = new_dr;
+ symbl = DR_BASE_NAME (new_dr);
+ STMT_VINFO_VECT_DR_BASE (stmt_info) = symbl;
+
+ switch (TREE_CODE (symbl))
+ {
+ case PLUS_EXPR:
+ case MINUS_EXPR:
+ oprnd0 = TREE_OPERAND (symbl, 0);
+ oprnd1 = TREE_OPERAND (symbl, 1);
+
+ STRIP_NOPS(oprnd1);
+ /* Only {address_base + offset} expressions are supported,
+ where address_base can be POINTER_TYPE or ARRAY_TYPE and
+ offset can be anything but POINTER_TYPE or ARRAY_TYPE.
+ TODO: swap operands if {offset + address_base}. */
+ if ((TREE_CODE (TREE_TYPE (oprnd1)) == POINTER_TYPE
+ && TREE_CODE (oprnd1) != INTEGER_CST)
+ || TREE_CODE (TREE_TYPE (oprnd1)) == ARRAY_TYPE)
+ return NULL_TREE;
+
+ if (TREE_CODE (TREE_TYPE (oprnd0)) == POINTER_TYPE)
+ symbl = oprnd0;
+ else
+ symbl = vect_get_symbl_and_dr (oprnd0, stmt, is_read,
+ loop_vinfo, &new_dr);
+
+ case SSA_NAME:
+ case ADDR_EXPR:
+ /* symbl remains unchanged. */
+ break;
+
+ default:
+ if (vect_debug_details (NULL))
+ {
+ fprintf (dump_file, "unhandled data ref: ");
+ print_generic_expr (dump_file, memref, TDF_SLIM);
+ fprintf (dump_file, " (symbl ");
+ print_generic_expr (dump_file, symbl, TDF_SLIM);
+ fprintf (dump_file, ") in stmt ");
+ print_generic_expr (dump_file, stmt, TDF_SLIM);
+ }
+ return NULL_TREE;
+ }
+ break;
+
+ case ARRAY_REF:
+ offset = size_zero_node;
+
+ /* Store the array base in the stmt info.
+ For one dimensional array ref a[i], the base is a,
+ for multidimensional a[i1][i2]..[iN], the base is
+ a[i1][i2]..[iN-1]. */
+ array_base = TREE_OPERAND (memref, 0);
+ STMT_VINFO_VECT_DR_BASE (stmt_info) = array_base;
+
+ new_dr = analyze_array (stmt, memref, is_read);
+ *dr = new_dr;
+
+ /* Find the relevant symbol for aliasing purposes. */
+ base = DR_BASE_NAME (new_dr);
+ switch (TREE_CODE (base))
+ {
+ case VAR_DECL:
+ symbl = base;
+ break;
+
+ case INDIRECT_REF:
+ symbl = TREE_OPERAND (base, 0);
+ break;
+
+ case COMPONENT_REF:
+ /* Could have recorded more accurate information -
+ i.e, the actual FIELD_DECL that is being referenced -
+ but later passes expect VAR_DECL as the nmt. */
+ symbl = vect_get_base_and_bit_offset (new_dr, base, NULL_TREE,
+ loop_vinfo, &offset, &base_aligned_p);
+ if (symbl)
+ break;
+ /* fall through */
+ default:
+ if (vect_debug_details (NULL))
+ {
+ fprintf (dump_file, "unhandled struct/class field access ");
+ print_generic_expr (dump_file, stmt, TDF_SLIM);
+ }
+ return NULL_TREE;
+ }
+ break;
+
+ default:
+ if (vect_debug_details (NULL))
+ {
+ fprintf (dump_file, "unhandled data ref: ");
+ print_generic_expr (dump_file, memref, TDF_SLIM);
+ fprintf (dump_file, " in stmt ");
+ print_generic_expr (dump_file, stmt, TDF_SLIM);
+ }
+ return NULL_TREE;
+ }
+ return symbl;
+}
+
+
/* Function vect_analyze_data_refs.
Find all the data references in the loop.
- FORNOW: Handle aligned INDIRECT_REFs and one dimensional ARRAY_REFs
+ FORNOW: Handle aligned INDIRECT_REFs and ARRAY_REFs
which base is really an array (not a pointer) and which alignment
- can be forced. This restriction will be relaxed. */
+ can be forced. This restriction will be relaxed. */
static bool
vect_analyze_data_refs (loop_vec_info loop_vinfo)
block_stmt_iterator si;
int j;
struct data_reference *dr;
+ tree tag;
+ tree address_base;
+ bool base_aligned_p;
+ tree offset;
if (vect_debug_details (NULL))
fprintf (dump_file, "\n<<vect_analyze_data_refs>>\n");
varray_type *datarefs = NULL;
int nvuses, nv_may_defs, nv_must_defs;
tree memref = NULL;
- tree array_base;
tree symbl;
/* Assumption: there exists a data-ref in stmt, if and only if
is_read = false;
}
- if (TREE_CODE (memref) == INDIRECT_REF)
- {
- dr = vect_analyze_pointer_ref_access (memref, stmt, is_read);
- if (! dr)
- return false;
- symbl = DR_BASE_NAME (dr);
- }
- else if (TREE_CODE (memref) == ARRAY_REF)
- {
- tree base;
- tree offset = size_zero_node;
- array_base = TREE_OPERAND (memref, 0);
-
- /* FORNOW: make sure that the array is one dimensional.
- This restriction will be relaxed in the future. */
- if (TREE_CODE (array_base) == ARRAY_REF)
- {
- if (vect_debug_stats (loop) || vect_debug_details (loop))
- {
- fprintf (dump_file,
- "not vectorized: multi-dimensional array.");
- print_generic_expr (dump_file, stmt, TDF_SLIM);
- }
- return false;
- }
-
- dr = analyze_array (stmt, memref, is_read);
-
- /* Find the relevant symbol for aliasing purposes. */
- base = DR_BASE_NAME (dr);
- switch (TREE_CODE (base))
- {
- case VAR_DECL:
- symbl = base;
- break;
- /* FORNOW: Disabled.
- case INDIRECT_REF:
- symbl = TREE_OPERAND (base, 0);
- break;
- */
- case COMPONENT_REF:
- /* CHECKME: could have recorded more accurate information -
- i.e, the actual FIELD_DECL that is being referenced -
- but later passes expect VAR_DECL as the nmt. */
- symbl = vect_get_base_decl_and_bit_offset (base, &offset);
- if (symbl)
- break;
- /* fall through */
- default:
- if (vect_debug_stats (loop) || vect_debug_details (loop))
- {
- fprintf (dump_file,
- "not vectorized: unhandled struct/class field access ");
- print_generic_expr (dump_file, stmt, TDF_SLIM);
- }
- return false;
- } /* switch */
- }
- else
+ /* Analyze MEMREF. If it is of a supported form, build data_reference
+ struct for it (DR) and find the relevant symbol for aliasing
+ purposes. */
+ symbl = vect_get_symbl_and_dr (memref, stmt, is_read, loop_vinfo,
+ &dr);
+ if (!symbl)
{
if (vect_debug_stats (loop) || vect_debug_details (loop))
{
- fprintf (dump_file, "not vectorized: unhandled data ref: ");
+ fprintf (dump_file, "not vectorized: unhandled data ref: ");
print_generic_expr (dump_file, stmt, TDF_SLIM);
}
return false;
}
-
+
/* Find and record the memtag assigned to this data-ref. */
- if (TREE_CODE (symbl) == VAR_DECL)
- STMT_VINFO_MEMTAG (stmt_info) = symbl;
- else if (TREE_CODE (symbl) == SSA_NAME)
+ switch (TREE_CODE (symbl))
{
- tree tag;
+ case VAR_DECL:
+ STMT_VINFO_MEMTAG (stmt_info) = symbl;
+ break;
+
+ case SSA_NAME:
symbl = SSA_NAME_VAR (symbl);
tag = get_var_ann (symbl)->type_mem_tag;
if (!tag)
return false;
}
STMT_VINFO_MEMTAG (stmt_info) = tag;
- }
- else
- {
+ break;
+
+ case ADDR_EXPR:
+ address_base = TREE_OPERAND (symbl, 0);
+
+ switch (TREE_CODE (address_base))
+ {
+ case ARRAY_REF:
+ dr = analyze_array (stmt, TREE_OPERAND (symbl, 0),
+ DR_IS_READ(dr));
+ STMT_VINFO_MEMTAG (stmt_info) =
+ vect_get_base_and_bit_offset (dr, DR_BASE_NAME (dr), NULL_TREE,
+ loop_vinfo, &offset,
+ &base_aligned_p);
+ break;
+
+ case VAR_DECL:
+ STMT_VINFO_MEMTAG (stmt_info) = address_base;
+ break;
+
+ default:
+ if (vect_debug_stats (loop) || vect_debug_details (loop))
+ {
+ fprintf (dump_file,
+ "not vectorized: unhandled address expr: ");
+ print_generic_expr (dump_file, stmt, TDF_SLIM);
+ }
+ return false;
+ }
+ break;
+
+ default:
if (vect_debug_stats (loop) || vect_debug_details (loop))
{
fprintf (dump_file, "not vectorized: unsupported data-ref: ");
print_generic_expr (dump_file, memref, TDF_SLIM);
}
return false;
- }
+ }
VARRAY_PUSH_GENERIC_PTR (*datarefs, dr);
STMT_VINFO_DATA_REF (stmt_info) = dr;
}
-/* Utility functions used by vect_mark_stmts_to_be_vectorized. */
+/* Utility functions used by vect_mark_stmts_to_be_vectorized. */
/* Function vect_mark_relevant.
}
+/* Function vect_can_advance_ivs_p
+
+ In case the number of iterations that LOOP iterates in unknown at compile
+ time, an epilog loop will be generated, and the loop induction variables
+ (IVs) will be "advanced" to the value they are supposed to take just before
+ the epilog loop. Here we check that the access function of the loop IVs
+ and the expression that represents the loop bound are simple enough.
+ These restrictions will be relaxed in the future. */
+
+static bool
+vect_can_advance_ivs_p (struct loop *loop)
+{
+ basic_block bb = loop->header;
+ tree phi;
+
+ /* Analyze phi functions of the loop header. */
+
+ for (phi = phi_nodes (bb); phi; phi = PHI_CHAIN (phi))
+ {
+ tree access_fn = NULL;
+ tree evolution_part;
+
+ if (vect_debug_details (NULL))
+ {
+ fprintf (dump_file, "Analyze phi: ");
+ print_generic_expr (dump_file, phi, TDF_SLIM);
+ }
+
+ /* Skip virtual phi's. The data dependences that are associated with
+ virtual defs/uses (i.e., memory accesses) are analyzed elsewhere. */
+
+ if (!is_gimple_reg (SSA_NAME_VAR (PHI_RESULT (phi))))
+ {
+ if (vect_debug_details (NULL))
+ fprintf (dump_file, "virtual phi. skip.");
+ continue;
+ }
+
+ /* Analyze the evolution function. */
+
+ access_fn = instantiate_parameters
+ (loop, analyze_scalar_evolution (loop, PHI_RESULT (phi)));
+
+ if (!access_fn)
+ {
+ if (vect_debug_details (NULL))
+ fprintf (dump_file, "No Access function.");
+ return false;
+ }
+
+ if (vect_debug_details (NULL))
+ {
+ fprintf (dump_file, "Access function of PHI: ");
+ print_generic_expr (dump_file, access_fn, TDF_SLIM);
+ }
+
+ evolution_part = evolution_part_in_loop_num (access_fn, loop->num);
+
+ if (evolution_part == NULL_TREE)
+ return false;
+
+ /* FORNOW: We do not transform initial conditions of IVs
+ which evolution functions are a polynomial of degree >= 2. */
+
+ if (tree_is_chrec (evolution_part))
+ return false;
+ }
+
+ return true;
+}
+
+
/* Function vect_get_loop_niters.
- Determine how many iterations the loop is executed. */
+ Determine how many iterations the loop is executed.
+ If an expression that represents the number of iterations
+ can be constructed, place it in NUMBER_OF_ITERATIONS.
+ Return the loop exit condition. */
static tree
-vect_get_loop_niters (struct loop *loop, HOST_WIDE_INT *number_of_iterations)
+vect_get_loop_niters (struct loop *loop, tree *number_of_iterations)
{
tree niters;
niters = number_of_iterations_in_loop (loop);
if (niters != NULL_TREE
- && niters != chrec_dont_know
- && host_integerp (niters,0))
+ && niters != chrec_dont_know)
{
- *number_of_iterations = TREE_INT_CST_LOW (niters);
+ *number_of_iterations = niters;
if (vect_debug_details (NULL))
- fprintf (dump_file, "==> get_loop_niters:" HOST_WIDE_INT_PRINT_DEC,
- *number_of_iterations);
+ {
+ fprintf (dump_file, "==> get_loop_niters:" );
+ print_generic_expr (dump_file, *number_of_iterations, TDF_SLIM);
+ }
}
return get_loop_exit_condition (loop);
{
loop_vec_info loop_vinfo;
tree loop_cond;
- HOST_WIDE_INT number_of_iterations = -1;
+ tree number_of_iterations = NULL;
+ bool rescan = false;
if (vect_debug_details (loop))
fprintf (dump_file, "\n<<vect_analyze_loop_form>>\n");
if (loop->inner
|| !loop->single_exit
- || loop->num_nodes != 2)
+ || loop->num_nodes != 2
+ || EDGE_COUNT (loop->header->preds) != 2
+ || loop->num_entries != 1)
{
if (vect_debug_stats (loop) || vect_debug_details (loop))
{
fprintf (dump_file, "multiple exits.");
else if (loop->num_nodes != 2)
fprintf (dump_file, "too many BBs in loop.");
+ else if (EDGE_COUNT (loop->header->preds) != 2)
+ fprintf (dump_file, "too many incoming edges.");
+ else if (loop->num_entries != 1)
+ fprintf (dump_file, "too many entries.");
}
return NULL;
return NULL;
}
+ /* Make sure we have a preheader basic block. */
+ if (!loop->pre_header)
+ {
+ rescan = true;
+ loop_split_edge_with (loop_preheader_edge (loop), NULL);
+ }
+
+ /* Make sure there exists a single-predecessor exit bb: */
+ if (EDGE_COUNT (loop->exit_edges[0]->dest->preds) != 1)
+ {
+ rescan = true;
+ loop_split_edge_with (loop->exit_edges[0], NULL);
+ }
+
+ if (rescan)
+ {
+ flow_loop_scan (loop, LOOP_ALL);
+ /* Flow loop scan does not update loop->single_exit field. */
+ loop->single_exit = loop->exit_edges[0];
+ }
+
if (empty_block_p (loop->header))
{
if (vect_debug_stats (loop) || vect_debug_details (loop))
fprintf (dump_file, "not vectorized: complicated exit condition.");
return NULL;
}
-
- if (number_of_iterations < 0)
+
+ if (!number_of_iterations)
{
if (vect_debug_stats (loop) || vect_debug_details (loop))
- fprintf (dump_file, "not vectorized: unknown loop bound.");
+ fprintf (dump_file,
+ "not vectorized: number of iterations cannot be computed.");
return NULL;
}
- if (number_of_iterations == 0) /* CHECKME: can this happen? */
+ if (chrec_contains_undetermined (number_of_iterations))
+ {
+ if (vect_debug_details (NULL))
+ fprintf (dump_file, "Infinite number of iterations.");
+ return false;
+ }
+
+ loop_vinfo = new_loop_vec_info (loop);
+ LOOP_VINFO_NITERS (loop_vinfo) = number_of_iterations;
+
+ if (!LOOP_VINFO_NITERS_KNOWN_P (loop_vinfo))
+ {
+ if (vect_debug_details (loop))
+ {
+ fprintf (dump_file, "loop bound unknown.\n");
+ fprintf (dump_file, "Symbolic number of iterations is ");
+ print_generic_expr (dump_file, number_of_iterations, TDF_DETAILS);
+ }
+ }
+ else
+ if (LOOP_VINFO_INT_NITERS (loop_vinfo) == 0)
{
if (vect_debug_stats (loop) || vect_debug_details (loop))
fprintf (dump_file, "not vectorized: number of iterations = 0.");
return NULL;
}
- loop_vinfo = new_loop_vec_info (loop);
LOOP_VINFO_EXIT_COND (loop_vinfo) = loop_cond;
- LOOP_VINFO_NITERS (loop_vinfo) = number_of_iterations;
return loop_vinfo;
}
/* Find all data references in the loop (which correspond to vdefs/vuses)
and analyze their evolution in the loop.
- FORNOW: Handle only simple, one-dimensional, array references, which
+ FORNOW: Handle only simple, array references, which
alignment can be forced, and aligned pointer-references. */
ok = vect_analyze_data_refs (loop_vinfo);
return NULL;
}
-
/* Data-flow analysis to detect stmts that do not need to be vectorized. */
ok = vect_mark_stmts_to_be_vectorized (loop_vinfo);
return NULL;
}
-
/* Check that all cross-iteration scalar data-flow cycles are OK.
Cross-iteration cycles caused by virtual phis are analyzed separately. */
return NULL;
}
-
/* Analyze data dependences between the data-refs in the loop.
FORNOW: fail at the first data dependence that we encounter. */
return NULL;
}
-
/* Analyze the access patterns of the data-refs in the loop (consecutive,
complex, etc.). FORNOW: Only handle consecutive access pattern. */
return NULL;
}
-
/* Analyze the alignment of the data-refs in the loop.
FORNOW: Only aligned accesses are handled. */
return NULL;
}
-
/* Scan all the operations in the loop and make sure they are
vectorizable. */
return;
}
+#ifdef ENABLE_CHECKING
+ verify_loop_closed_ssa ();
+#endif
+
compute_immediate_uses (TDFA_USE_OPS, need_imm_uses_for);
/* ----------- Analyze loops. ----------- */
for (i = 1; i < loops_num; i++)
{
struct loop *loop = loops->parray[i];
- loop_vec_info loop_vinfo = loop->aux;
+ loop_vec_info loop_vinfo;
+
if (!loop)
- continue;
+ continue;
+ loop_vinfo = loop->aux;
destroy_loop_vec_info (loop_vinfo);
loop->aux = NULL;
}
- loop_commit_inserts ();
rewrite_into_ssa (false);
- if (bitmap_first_set_bit (vars_to_rename) >= 0)
- {
- /* The rewrite of ssa names may cause violation of loop closed ssa
- form invariants. TODO -- avoid these rewrites completely.
- Information in virtual phi nodes is sufficient for it. */
- rewrite_into_loop_closed_ssa ();
- }
+ rewrite_into_loop_closed_ssa (); /* FORNOW */
bitmap_clear (vars_to_rename);
}