/* Gimple Represented as Polyhedra.
- Copyright (C) 2006, 2007, 2008 Free Software Foundation, Inc.
+ Copyright (C) 2006, 2007, 2008, 2009 Free Software Foundation, Inc.
Contributed by Sebastian Pop <sebastian.pop@inria.fr>.
This file is part of GCC.
static VEC (scop_p, heap) *current_scops;
-/* Print GMP value V on stderr. */
+/* Converts a GMP constant V to a tree and returns it. */
+
+static tree
+gmp_cst_to_tree (tree type, Value v)
+{
+ return build_int_cst (type, value_get_si (v));
+}
+
+/* Returns true when BB is in REGION. */
+
+static bool
+bb_in_sese_p (basic_block bb, sese region)
+{
+ return pointer_set_contains (SESE_REGION_BBS (region), bb);
+}
+
+/* Returns true when LOOP is in the SESE region R. */
+
+static inline bool
+loop_in_sese_p (struct loop *loop, sese r)
+{
+ return (bb_in_sese_p (loop->header, r)
+ && bb_in_sese_p (loop->latch, r));
+}
+
+/* For a USE in BB, if BB is outside REGION, mark the USE in the
+ SESE_LIVEIN and SESE_LIVEOUT sets. */
+
+static void
+sese_build_livein_liveouts_use (sese region, basic_block bb, tree use)
+{
+ unsigned ver;
+ basic_block def_bb;
+
+ if (TREE_CODE (use) != SSA_NAME)
+ return;
+
+ ver = SSA_NAME_VERSION (use);
+ def_bb = gimple_bb (SSA_NAME_DEF_STMT (use));
+ if (!def_bb
+ || !bb_in_sese_p (def_bb, region)
+ || bb_in_sese_p (bb, region))
+ return;
+
+ if (!SESE_LIVEIN_VER (region, ver))
+ SESE_LIVEIN_VER (region, ver) = BITMAP_ALLOC (NULL);
+
+ bitmap_set_bit (SESE_LIVEIN_VER (region, ver), bb->index);
+ bitmap_set_bit (SESE_LIVEOUT (region), ver);
+}
+
+/* Marks for rewrite all the SSA_NAMES defined in REGION and that are
+ used in BB that is outside of the REGION. */
+
+static void
+sese_build_livein_liveouts_bb (sese region, basic_block bb)
+{
+ gimple_stmt_iterator bsi;
+ edge e;
+ edge_iterator ei;
+ ssa_op_iter iter;
+ tree var;
+
+ FOR_EACH_EDGE (e, ei, bb->succs)
+ for (bsi = gsi_start_phis (e->dest); !gsi_end_p (bsi); gsi_next (&bsi))
+ sese_build_livein_liveouts_use (region, bb,
+ PHI_ARG_DEF_FROM_EDGE (gsi_stmt (bsi), e));
+
+ for (bsi = gsi_start_bb (bb); !gsi_end_p (bsi); gsi_next (&bsi))
+ FOR_EACH_SSA_TREE_OPERAND (var, gsi_stmt (bsi), iter, SSA_OP_ALL_USES)
+ sese_build_livein_liveouts_use (region, bb, var);
+}
+
+/* Build the SESE_LIVEIN and SESE_LIVEOUT for REGION. */
void
-debug_value (Value v)
+sese_build_livein_liveouts (sese region)
+{
+ basic_block bb;
+
+ SESE_LIVEOUT (region) = BITMAP_ALLOC (NULL);
+ SESE_NUM_VER (region) = num_ssa_names;
+ SESE_LIVEIN (region) = XCNEWVEC (bitmap, SESE_NUM_VER (region));
+
+ FOR_EACH_BB (bb)
+ sese_build_livein_liveouts_bb (region, bb);
+}
+
+/* Register basic blocks belonging to a region in a pointer set. */
+
+static void
+register_bb_in_sese (basic_block entry_bb, basic_block exit_bb, sese region)
{
- value_print (stderr, "%4s\n", v);
+ edge_iterator ei;
+ edge e;
+ basic_block bb = entry_bb;
+
+ FOR_EACH_EDGE (e, ei, bb->succs)
+ {
+ if (!pointer_set_contains (SESE_REGION_BBS (region), e->dest) &&
+ e->dest->index != exit_bb->index)
+ {
+ pointer_set_insert (SESE_REGION_BBS (region), e->dest);
+ register_bb_in_sese (e->dest, exit_bb, region);
+ }
+ }
}
-/* Converts a GMP constant V to a tree and returns it. */
+/* Builds a new SESE region from edges ENTRY and EXIT. */
-static tree
-gmp_cst_to_tree (tree type, Value v)
+sese
+new_sese (edge entry, edge exit)
{
- return build_int_cst (type, value_get_si (v));
+ sese res = XNEW (struct sese);
+
+ SESE_ENTRY (res) = entry;
+ SESE_EXIT (res) = exit;
+ SESE_REGION_BBS (res) = pointer_set_create ();
+ register_bb_in_sese (entry->dest, exit->dest, res);
+
+ SESE_LIVEOUT (res) = NULL;
+ SESE_NUM_VER (res) = 0;
+ SESE_LIVEIN (res) = NULL;
+
+ return res;
}
+/* Deletes REGION. */
+
+void
+free_sese (sese region)
+{
+ int i;
+
+ for (i = 0; i < SESE_NUM_VER (region); i++)
+ BITMAP_FREE (SESE_LIVEIN_VER (region, i));
+
+ if (SESE_LIVEIN (region))
+ free (SESE_LIVEIN (region));
+
+ if (SESE_LIVEOUT (region))
+ BITMAP_FREE (SESE_LIVEOUT (region));
+
+ pointer_set_destroy (SESE_REGION_BBS (region));
+ XDELETE (region);
+}
+
+\f
+
/* Debug the list of old induction variables for this SCOP. */
void
if (bb == exit)
fprintf (file, "%d [shape=box];\n", bb->index);
- if (bb_in_scop_p (bb, scop))
+ if (bb_in_sese_p (bb, SCOP_REGION (scop)))
fprintf (file, "%d [color=red];\n", bb->index);
FOR_EACH_EDGE (e, ei, bb->succs)
/* Select color for SCoP. */
for (i = 0; VEC_iterate (scop_p, current_scops, i, scop); i++)
- if (bb_in_scop_p (bb, scop)
+ if (bb_in_sese_p (bb, SCOP_REGION (scop))
|| (SCOP_EXIT (scop) == bb)
|| (SCOP_ENTRY (scop) == bb))
{
fprintf (file, " <TR><TD WIDTH=\"50\" BGCOLOR=\"%s\">", color);
- if (!bb_in_scop_p (bb, scop))
+ if (!bb_in_sese_p (bb, SCOP_REGION (scop)))
fprintf (file, " (");
if (bb == SCOP_ENTRY (scop)
else
fprintf (file, " %d ", bb->index);
- if (!bb_in_scop_p (bb, scop))
+ if (!bb_in_sese_p (bb, SCOP_REGION (scop)))
fprintf (file, ")");
fprintf (file, "</TD></TR>\n");
struct loop *nest;
nest = bb->loop_father;
- while (loop_outer (nest) && loop_in_scop_p (loop_outer (nest), scop))
+ while (loop_outer (nest)
+ && loop_in_sese_p (loop_outer (nest), SCOP_REGION (scop)))
nest = loop_outer (nest);
return nest;
|| evolution_function_is_affine_multivariate_p (scev, n));
}
-/* Return false if the tree_code of the operand OP or any of its operands
- is component_ref. */
+/* Return true if REF or any of its subtrees contains a
+ component_ref. */
static bool
-exclude_component_ref (tree op)
+contains_component_ref_p (tree ref)
{
- int i;
- int len;
+ if (!ref)
+ return false;
- if (op)
+ while (handled_component_p (ref))
{
- if (TREE_CODE (op) == COMPONENT_REF)
- return false;
- else
- {
- len = TREE_OPERAND_LENGTH (op);
- for (i = 0; i < len; ++i)
- {
- if (!exclude_component_ref (TREE_OPERAND (op, i)))
- return false;
- }
- }
+ if (TREE_CODE (ref) == COMPONENT_REF)
+ return true;
+
+ ref = TREE_OPERAND (ref, 0);
}
- return true;
+ return false;
}
/* Return true if the operand OP is simple. */
if (DECL_P (op)
/* or a structure, */
|| AGGREGATE_TYPE_P (TREE_TYPE (op))
+ /* or a COMPONENT_REF, */
+ || contains_component_ref_p (op)
/* or a memory access that cannot be analyzed by the data
reference analysis. */
|| ((handled_component_p (op) || INDIRECT_REF_P (op))
&& !stmt_simple_memref_p (loop, stmt, op)))
return false;
- return exclude_component_ref (op);
+ return true;
}
/* Return true only when STMT is simple enough for being handled by
size_t n = gimple_call_num_args (stmt);
tree lhs = gimple_call_lhs (stmt);
- for (i = 0; i < n; i++)
- {
- tree arg = gimple_call_arg (stmt, i);
+ if (lhs && !is_simple_operand (loop, stmt, lhs))
+ return false;
- if (!(is_simple_operand (loop, stmt, lhs)
- && is_simple_operand (loop, stmt, arg)))
- return false;
- }
+ for (i = 0; i < n; i++)
+ if (!is_simple_operand (loop, stmt, gimple_call_arg (stmt, i)))
+ return false;
return true;
}
harmful_stmt_in_bb (basic_block scop_entry, basic_block bb)
{
gimple_stmt_iterator gsi;
+ gimple stmt;
for (gsi = gsi_start_bb (bb); !gsi_end_p (gsi); gsi_next (&gsi))
if (!stmt_simple_for_scop_p (scop_entry, gsi_stmt (gsi)))
return gsi_stmt (gsi);
+ stmt = last_stmt (bb);
+ if (stmt && gimple_code (stmt) == GIMPLE_COND)
+ {
+ tree lhs = gimple_cond_lhs (stmt);
+ tree rhs = gimple_cond_rhs (stmt);
+
+ if (TREE_CODE (TREE_TYPE (lhs)) == REAL_TYPE
+ || TREE_CODE (TREE_TYPE (rhs)) == REAL_TYPE)
+ return stmt;
+ }
+
return NULL;
}
struct loop *nest = outermost_loop_in_scop (scop, bb);
gimple_stmt_iterator gsi;
- bitmap_set_bit (SCOP_BBS_B (scop), bb->index);
-
for (gsi = gsi_start_bb (bb); !gsi_end_p (gsi); gsi_next (&gsi))
find_data_references_in_stmt (nest, gsi_stmt (gsi), &drs);
eq_rename_map_elts (const void *e1, const void *e2)
{
const struct rename_map_elt *elt1 = (const struct rename_map_elt *) e1;
- const struct rename_map_elt *elt2 = (const struct rename_map_elt *) e2;
-
- return (elt1->old_name == elt2->old_name);
-}
-
-/* Returns the new name associated to OLD_NAME in MAP. */
-
-static tree
-get_new_name_from_old_name (htab_t map, tree old_name)
-{
- struct rename_map_elt tmp;
- PTR *slot;
-
- tmp.old_name = old_name;
- slot = htab_find_slot (map, &tmp, NO_INSERT);
-
- if (slot && *slot)
- return ((rename_map_elt) *slot)->new_name;
-
- return old_name;
-}
-
-\f
-
-/* Returns true when BB is in REGION. */
-
-static bool
-bb_in_sese_p (basic_block bb, sese region)
-{
- return pointer_set_contains (SESE_REGION_BBS (region), bb);
-}
-
-/* For a USE in BB, if BB is outside REGION, mark the USE in the
- SESE_LIVEIN and SESE_LIVEOUT sets. */
-
-static void
-sese_build_livein_liveouts_use (sese region, basic_block bb, tree use)
-{
- unsigned ver;
- basic_block def_bb;
-
- if (TREE_CODE (use) != SSA_NAME)
- return;
-
- ver = SSA_NAME_VERSION (use);
- def_bb = gimple_bb (SSA_NAME_DEF_STMT (use));
- if (!def_bb
- || !bb_in_sese_p (def_bb, region)
- || bb_in_sese_p (bb, region))
- return;
-
- if (!SESE_LIVEIN_VER (region, ver))
- SESE_LIVEIN_VER (region, ver) = BITMAP_ALLOC (NULL);
-
- bitmap_set_bit (SESE_LIVEIN_VER (region, ver), bb->index);
- bitmap_set_bit (SESE_LIVEOUT (region), ver);
-}
-
-/* Marks for rewrite all the SSA_NAMES defined in REGION and that are
- used in BB that is outside of the REGION. */
-
-static void
-sese_build_livein_liveouts_bb (sese region, basic_block bb)
-{
- gimple_stmt_iterator bsi;
- edge e;
- edge_iterator ei;
- ssa_op_iter iter;
- tree var;
-
- FOR_EACH_EDGE (e, ei, bb->succs)
- for (bsi = gsi_start_phis (e->dest); !gsi_end_p (bsi); gsi_next (&bsi))
- sese_build_livein_liveouts_use (region, bb,
- PHI_ARG_DEF_FROM_EDGE (gsi_stmt (bsi), e));
-
- for (bsi = gsi_start_bb (bb); !gsi_end_p (bsi); gsi_next (&bsi))
- FOR_EACH_SSA_TREE_OPERAND (var, gsi_stmt (bsi), iter, SSA_OP_ALL_USES)
- sese_build_livein_liveouts_use (region, bb, var);
-}
-
-/* Build the SESE_LIVEIN and SESE_LIVEOUT for REGION. */
-
-void
-sese_build_livein_liveouts (sese region)
-{
- basic_block bb;
-
- SESE_LIVEOUT (region) = BITMAP_ALLOC (NULL);
- SESE_NUM_VER (region) = num_ssa_names;
- SESE_LIVEIN (region) = XCNEWVEC (bitmap, SESE_NUM_VER (region));
-
- FOR_EACH_BB (bb)
- sese_build_livein_liveouts_bb (region, bb);
-}
-
-/* Register basic blocks belonging to a region in a pointer set. */
-
-static void
-register_bb_in_sese (basic_block entry_bb, basic_block exit_bb, sese region)
-{
- edge_iterator ei;
- edge e;
- basic_block bb = entry_bb;
-
- FOR_EACH_EDGE (e, ei, bb->succs)
- {
- if (!pointer_set_contains (SESE_REGION_BBS (region), e->dest) &&
- e->dest->index != exit_bb->index)
- {
- pointer_set_insert (SESE_REGION_BBS (region), e->dest);
- register_bb_in_sese (e->dest, exit_bb, region);
- }
- }
-}
-
-/* Builds a new SESE region from edges ENTRY and EXIT. */
-
-sese
-new_sese (edge entry, edge exit)
-{
- sese res = XNEW (struct sese);
-
- SESE_ENTRY (res) = entry;
- SESE_EXIT (res) = exit;
- SESE_REGION_BBS (res) = pointer_set_create ();
- register_bb_in_sese (entry->dest, exit->dest, res);
-
- SESE_LIVEOUT (res) = NULL;
- SESE_NUM_VER (res) = 0;
- SESE_LIVEIN (res) = NULL;
+ const struct rename_map_elt *elt2 = (const struct rename_map_elt *) e2;
- return res;
+ return (elt1->old_name == elt2->old_name);
}
-/* Deletes REGION. */
+/* Returns the new name associated to OLD_NAME in MAP. */
-void
-free_sese (sese region)
+static tree
+get_new_name_from_old_name (htab_t map, tree old_name)
{
- int i;
-
- for (i = 0; i < SESE_NUM_VER (region); i++)
- BITMAP_FREE (SESE_LIVEIN_VER (region, i));
+ struct rename_map_elt tmp;
+ PTR *slot;
- if (SESE_LIVEIN (region))
- free (SESE_LIVEIN (region));
+ tmp.old_name = old_name;
+ slot = htab_find_slot (map, &tmp, NO_INSERT);
- if (SESE_LIVEOUT (region))
- BITMAP_FREE (SESE_LIVEOUT (region));
+ if (slot && *slot)
+ return ((rename_map_elt) *slot)->new_name;
- pointer_set_destroy (SESE_REGION_BBS (region));
- XDELETE (region);
+ return old_name;
}
\f
SCOP_REGION (scop) = new_sese (entry, exit);
SCOP_BBS (scop) = VEC_alloc (graphite_bb_p, heap, 3);
SCOP_OLDIVS (scop) = VEC_alloc (name_tree, heap, 3);
- SCOP_BBS_B (scop) = BITMAP_ALLOC (NULL);
SCOP_LOOPS (scop) = BITMAP_ALLOC (NULL);
SCOP_LOOP_NEST (scop) = VEC_alloc (loop_p, heap, 3);
SCOP_ADD_PARAMS (scop) = true;
free_graphite_bb (gb);
VEC_free (graphite_bb_p, heap, SCOP_BBS (scop));
- BITMAP_FREE (SCOP_BBS_B (scop));
BITMAP_FREE (SCOP_LOOPS (scop));
VEC_free (loop_p, heap, SCOP_LOOP_NEST (scop));
result.next = NULL;
result.exits = false;
result.last = bb;
+
+ /* Mark bbs terminating a SESE region difficult, if they start
+ a condition. */
+ if (VEC_length (edge, bb->succs) > 1)
+ result.difficult = true;
+
break;
case GBB_SIMPLE:
verify_dominators (CDI_DOMINATORS);
verify_dominators (CDI_POST_DOMINATORS);
verify_ssa (false);
+ verify_loop_closed_ssa ();
#endif
}
scev = analyze_scalar_evolution (loop, PHI_RESULT (phi));
scev = instantiate_parameters (loop, scev);
- if (!simple_iv (loop, phi, PHI_RESULT (phi), &iv, true))
+ if (!simple_iv (loop, loop, PHI_RESULT (phi), &iv, true))
res++;
}
tree nit;
gimple_seq stmts;
edge exit = single_dom_exit (loop);
+ bool known_niter = number_of_iterations_exit (loop, exit, &niter, false);
+
+ gcc_assert (known_niter);
- gcc_assert (number_of_iterations_exit (loop, exit, &niter, false));
nit = force_gimple_operand (unshare_expr (niter.niter), &stmts, true,
NULL_TREE);
if (stmts)
if (nb_reductions_in_loop (loop) > 0)
return NULL_TREE;
- return canonicalize_loop_ivs (loop, NULL, nit);
+ return canonicalize_loop_ivs (loop, NULL, &nit);
}
/* Record LOOP as occuring in SCOP. Returns true when the operation
struct loop *loop0, *loop1;
FOR_EACH_BB (bb)
- if (bb_in_scop_p (bb, scop))
+ if (bb_in_sese_p (bb, SCOP_REGION (scop)))
{
struct loop *loop = bb->loop_father;
/* Only add loops if they are completely contained in the SCoP. */
if (loop->header == bb
- && bb_in_scop_p (loop->latch, scop))
+ && bb_in_sese_p (loop->latch, SCOP_REGION (scop)))
{
if (!scop_record_loop (scop, loop))
return false;
return true;
}
+/* Calculate the number of loops around LOOP in the SCOP. */
+
+static inline int
+nb_loops_around_loop_in_scop (struct loop *l, scop_p scop)
+{
+ int d = 0;
+
+ for (; loop_in_sese_p (l, SCOP_REGION (scop)); d++, l = loop_outer (l));
+
+ return d;
+}
+
+/* Calculate the number of loops around GB in the current SCOP. */
+
+int
+nb_loops_around_gb (graphite_bb_p gb)
+{
+ return nb_loops_around_loop_in_scop (gbb_loop (gb), GBB_SCOP (gb));
+}
+
+/* Returns the dimensionality of an enclosing loop iteration domain
+ with respect to enclosing SCoP for a given data reference REF. The
+ returned dimensionality is homogeneous (depth of loop nest + number
+ of SCoP parameters + const). */
+
+int
+ref_nb_loops (data_reference_p ref)
+{
+ loop_p loop = loop_containing_stmt (DR_STMT (ref));
+ scop_p scop = DR_SCOP (ref);
+
+ return nb_loops_around_loop_in_scop (loop, scop) + scop_nb_params (scop) + 2;
+}
+
/* Build dynamic schedules for all the BBs. */
static void
}
}
+/* Returns the number of loops that are identical at the beginning of
+ the vectors A and B. */
+
+static int
+compare_prefix_loops (VEC (loop_p, heap) *a, VEC (loop_p, heap) *b)
+{
+ int i;
+ loop_p ea;
+ int lb;
+
+ if (!a || !b)
+ return 0;
+
+ lb = VEC_length (loop_p, b);
+
+ for (i = 0; VEC_iterate (loop_p, a, i, ea); i++)
+ if (i >= lb
+ || ea != VEC_index (loop_p, b, i))
+ return i;
+
+ return 0;
+}
+
/* Build for BB the static schedule.
The STATIC_SCHEDULE is defined like this:
static void
build_scop_canonical_schedules (scop_p scop)
{
- int i, j;
+ int i;
graphite_bb_p gb;
- int nb = scop_nb_loops (scop) + 1;
+ int nb_loops = scop_nb_loops (scop);
+ lambda_vector static_schedule = lambda_vector_new (nb_loops + 1);
+ VEC (loop_p, heap) *loops_previous = NULL;
- SCOP_STATIC_SCHEDULE (scop) = lambda_vector_new (nb);
+ /* We have to start schedules at 0 on the first component and
+ because we cannot compare_prefix_loops against a previous loop,
+ prefix will be equal to zero, and that index will be
+ incremented before copying. */
+ static_schedule[0] = -1;
for (i = 0; VEC_iterate (graphite_bb_p, SCOP_BBS (scop), i, gb); i++)
{
- int offset = nb_loops_around_gb (gb);
-
- /* After leaving a loop, it is possible that the schedule is not
- set at zero. This loop reinitializes components located
- after OFFSET. */
-
- for (j = offset + 1; j < nb; j++)
- if (SCOP_STATIC_SCHEDULE (scop)[j])
- {
- memset (&(SCOP_STATIC_SCHEDULE (scop)[j]), 0,
- sizeof (int) * (nb - j));
- ++SCOP_STATIC_SCHEDULE (scop)[offset];
- break;
- }
-
- GBB_STATIC_SCHEDULE (gb) = lambda_vector_new (offset + 1);
- lambda_vector_copy (SCOP_STATIC_SCHEDULE (scop),
- GBB_STATIC_SCHEDULE (gb), offset + 1);
-
- ++SCOP_STATIC_SCHEDULE (scop)[offset];
+ int prefix = compare_prefix_loops (loops_previous, GBB_LOOPS (gb));
+ int nb = gbb_nb_loops (gb);
+
+ loops_previous = GBB_LOOPS (gb);
+ memset (&(static_schedule[prefix + 1]), 0, sizeof (int) * (nb_loops - prefix));
+ ++static_schedule[prefix];
+ GBB_STATIC_SCHEDULE (gb) = lambda_vector_new (nb + 1);
+ lambda_vector_copy (static_schedule,
+ GBB_STATIC_SCHEDULE (gb), nb + 1);
}
}
case MINUS_EXPR:
scan_tree_for_params (s, TREE_OPERAND (e, 0), c, r, k, subtract);
- value_oppose (k, k);
- scan_tree_for_params (s, TREE_OPERAND (e, 1), c, r, k, subtract);
+ scan_tree_for_params (s, TREE_OPERAND (e, 1), c, r, k, !subtract);
break;
case NEGATE_EXPR:
- value_oppose (k, k);
- scan_tree_for_params (s, TREE_OPERAND (e, 0), c, r, k, subtract);
+ scan_tree_for_params (s, TREE_OPERAND (e, 0), c, r, k, !subtract);
break;
case SSA_NAME:
else
gcc_unreachable ();
- if (loop->inner && loop_in_scop_p (loop->inner, scop))
+ if (loop->inner && loop_in_sese_p (loop->inner, SCOP_REGION (scop)))
build_loop_iteration_domains (scop, loop->inner, cstr, nb_outer_loops + 1);
/* Only go to the next loops, if we are not at the outermost layer. These
have to be handled seperately, as we can be sure, that the chain at this
layer will be connected. */
- if (nb_outer_loops != 0 && loop->next && loop_in_scop_p (loop->next, scop))
+ if (nb_outer_loops != 0 && loop->next && loop_in_sese_p (loop->next,
+ SCOP_REGION (scop)))
build_loop_iteration_domains (scop, loop->next, outer_cstr, nb_outer_loops);
for (i = 0; VEC_iterate (graphite_bb_p, SCOP_BBS (scop), i, gb); i++)
else
{
nb_rows = 0;
- nb_cols = scop_nb_params (scop) + 2;
+ nb_cols = nb_loops_around_gb (gb) + scop_nb_params (scop) + 2;
}
/* Count number of necessary new rows to add the conditions to the
CloogMatrix *new_domain;
new_domain = cloog_matrix_alloc (nb_rows + nb_new_rows, nb_cols);
- for (i = 0; i < nb_rows; i++)
- for (j = 0; j < nb_cols; j++)
- value_assign (new_domain->p[i][j], domain->p[i][j]);
+ if (domain)
+ {
+ for (i = 0; i < nb_rows; i++)
+ for (j = 0; j < nb_cols; j++)
+ value_assign (new_domain->p[i][j], domain->p[i][j]);
+
+ cloog_matrix_free (domain);
+ }
- cloog_matrix_free (domain);
domain = new_domain;
GBB_DOMAIN (gb) = new_domain;
- }
+ }
/* Add the conditions to the new enlarged domain matrix. */
row = nb_rows;
bool res = true;
int i, j;
graphite_bb_p gbb;
- gimple_stmt_iterator gsi;
basic_block bb_child, bb_iter;
VEC (basic_block, heap) *dom;
+ gimple stmt;
/* Make sure we are in the SCoP. */
- if (!bb_in_scop_p (bb, scop))
+ if (!bb_in_sese_p (bb, SCOP_REGION (scop)))
return true;
if (bb_contains_non_iv_scalar_phi_nodes (bb))
{
GBB_CONDITIONS (gbb) = VEC_copy (gimple, heap, *conditions);
GBB_CONDITION_CASES (gbb) = VEC_copy (gimple, heap, *cases);
- add_conditions_to_domain (gbb);
}
dom = get_dominated_by (CDI_DOMINATORS, bb);
- for (gsi = gsi_start_bb (bb); !gsi_end_p (gsi); gsi_next (&gsi))
+ stmt = last_stmt (bb);
+ if (stmt)
{
- gimple stmt = gsi_stmt (gsi);
VEC (edge, gc) *edges;
edge e;
return res;
}
+/* Traverses all the GBBs of the SCOP and add their constraints to the
+ iteration domains. */
+
+static void
+add_conditions_to_constraints (scop_p scop)
+{
+ int i;
+ graphite_bb_p gbb;
+
+ for (i = 0; VEC_iterate (graphite_bb_p, SCOP_BBS (scop), i, gbb); i++)
+ add_conditions_to_domain (gbb);
+}
+
/* Build the current domain matrix: the loops belonging to the current
SCOP, and that vary for the execution of the current basic block.
Returns false if there is no loop in SCOP. */
/* Build cloog loop for all loops, that are in the uppermost loop layer of
this SCoP. */
for (i = 0; VEC_iterate (loop_p, SCOP_LOOP_NEST (scop), i, loop); i++)
- if (!loop_in_scop_p (loop_outer (loop), scop))
+ if (!loop_in_sese_p (loop_outer (loop), SCOP_REGION (scop)))
{
/* The outermost constraints is a matrix that has:
-first column: eq/ineq boolean
int i, ndim = DR_NUM_DIMENSIONS (ref);
struct access_matrix *am = GGC_NEW (struct access_matrix);
- AM_MATRIX (am) = VEC_alloc (lambda_vector, heap, ndim);
+ AM_MATRIX (am) = VEC_alloc (lambda_vector, gc, ndim);
DR_SCOP (ref) = GBB_SCOP (gb);
for (i = 0; i < ndim; i++)
if (!build_access_matrix_with_af (af, v, scop, ref_nb_loops (ref)))
return false;
- VEC_safe_push (lambda_vector, heap, AM_MATRIX (am), v);
+ VEC_quick_push (lambda_vector, AM_MATRIX (am), v);
}
DR_ACCESS_MATRIX (ref) = am;
return TYPE_PRECISION (type1) > TYPE_PRECISION (type2) ? type1 : type2;
}
-/* Converts a Cloog AST expression E back to a GCC expression tree
- of type TYPE. */
+static tree
+clast_to_gcc_expression (tree, struct clast_expr *, VEC (name_tree, heap) *,
+ loop_iv_stack);
+
+/* Converts a Cloog reduction expression R with reduction operation OP
+ to a GCC expression tree of type TYPE. PARAMS is a vector of
+ parameters of the scop, and IVSTACK contains the stack of induction
+ variables. */
+
+static tree
+clast_to_gcc_expression_red (tree type, enum tree_code op,
+ struct clast_reduction *r,
+ VEC (name_tree, heap) *params,
+ loop_iv_stack ivstack)
+{
+ int i;
+ tree res = clast_to_gcc_expression (type, r->elts[0], params, ivstack);
+
+ for (i = 1; i < r->n; i++)
+ {
+ tree t = clast_to_gcc_expression (type, r->elts[i], params, ivstack);
+ res = fold_build2 (op, type, res, t);
+ }
+ return res;
+}
+
+/* Converts a Cloog AST expression E back to a GCC expression tree of
+ type TYPE. PARAMS is a vector of parameters of the scop, and
+ IVSTACK contains the stack of induction variables. */
static tree
clast_to_gcc_expression (tree type, struct clast_expr *e,
switch (r->type)
{
case clast_red_sum:
- if (r->n == 1)
- return clast_to_gcc_expression (type, r->elts[0], params, ivstack);
-
- else
- {
- tree tl = clast_to_gcc_expression (type, r->elts[0], params, ivstack);
- tree tr = clast_to_gcc_expression (type, r->elts[1], params, ivstack);
-
- gcc_assert (r->n >= 1
- && r->elts[0]->type == expr_term
- && r->elts[1]->type == expr_term);
-
- return fold_build2 (PLUS_EXPR, type, tl, tr);
- }
-
- break;
+ return clast_to_gcc_expression_red (type, PLUS_EXPR, r, params, ivstack);
case clast_red_min:
- if (r->n == 1)
- return clast_to_gcc_expression (type, r->elts[0], params, ivstack);
-
- else if (r->n == 2)
- {
- tree tl = clast_to_gcc_expression (type, r->elts[0], params, ivstack);
- tree tr = clast_to_gcc_expression (type, r->elts[1], params, ivstack);
- return fold_build2 (MIN_EXPR, type, tl, tr);
- }
-
- else
- gcc_unreachable();
-
- break;
+ return clast_to_gcc_expression_red (type, MIN_EXPR, r, params, ivstack);
case clast_red_max:
- if (r->n == 1)
- return clast_to_gcc_expression (type, r->elts[0], params, ivstack);
-
- else if (r->n == 2)
- {
- tree tl = clast_to_gcc_expression (type, r->elts[0], params, ivstack);
- tree tr = clast_to_gcc_expression (type, r->elts[1], params, ivstack);
- return fold_build2 (MAX_EXPR, type, tl, tr);
- }
-
- else
- gcc_unreachable();
-
- break;
+ return clast_to_gcc_expression_red (type, MAX_EXPR, r, params, ivstack);
default:
gcc_unreachable ();
ssa_op_iter iter;
use_operand_p use_p;
- FOR_EACH_SSA_USE_OPERAND (use_p, stmt, iter, SSA_OP_USE)
+ FOR_EACH_SSA_USE_OPERAND (use_p, stmt, iter, SSA_OP_ALL_USES)
{
tree use = USE_FROM_PTR (use_p);
tree new_name = get_new_name_from_old_name (map, use);
}
static void expand_scalar_variables_stmt (gimple, basic_block, scop_p,
- loop_p, htab_t);
+ htab_t);
+static tree
+expand_scalar_variables_expr (tree, tree, enum tree_code, tree, basic_block,
+ scop_p, htab_t, gimple_stmt_iterator *);
+
+/* Copies at GSI all the scalar computations on which the ssa_name OP0
+ depends on in the SCOP: these are all the scalar variables used in
+ the definition of OP0, that are defined outside BB and still in the
+ SCOP, i.e. not a parameter of the SCOP. The expression that is
+ returned contains only induction variables from the generated code:
+ MAP contains the induction variables renaming mapping, and is used
+ to translate the names of induction variables. */
+
+static tree
+expand_scalar_variables_ssa_name (tree op0, basic_block bb,
+ scop_p scop, htab_t map,
+ gimple_stmt_iterator *gsi)
+{
+ tree var0, var1, type;
+ gimple def_stmt;
+ enum tree_code subcode;
+
+ if (is_parameter (scop, op0)
+ || is_iv (op0))
+ return get_new_name_from_old_name (map, op0);
+
+ def_stmt = SSA_NAME_DEF_STMT (op0);
+
+ if (gimple_bb (def_stmt) == bb)
+ {
+ /* If the defining statement is in the basic block already
+ we do not need to create a new expression for it, we
+ only need to ensure its operands are expanded. */
+ expand_scalar_variables_stmt (def_stmt, bb, scop, map);
+ return get_new_name_from_old_name (map, op0);
+ }
+ else
+ {
+ if (gimple_code (def_stmt) != GIMPLE_ASSIGN
+ || !bb_in_sese_p (gimple_bb (def_stmt), SCOP_REGION (scop)))
+ return get_new_name_from_old_name (map, op0);
+
+ var0 = gimple_assign_rhs1 (def_stmt);
+ subcode = gimple_assign_rhs_code (def_stmt);
+ var1 = gimple_assign_rhs2 (def_stmt);
+ type = gimple_expr_type (def_stmt);
+
+ return expand_scalar_variables_expr (type, var0, subcode, var1, bb, scop,
+ map, gsi);
+ }
+}
-/* Constructs a tree which only contains old_ivs and parameters. Any
- other variables that are defined outside BB will be eliminated by
- using their definitions in the constructed tree. OLD_LOOP_FATHER
- is the original loop that contained BB. */
+/* Copies at GSI all the scalar computations on which the expression
+ OP0 CODE OP1 depends on in the SCOP: these are all the scalar
+ variables used in OP0 and OP1, defined outside BB and still defined
+ in the SCOP, i.e. not a parameter of the SCOP. The expression that
+ is returned contains only induction variables from the generated
+ code: MAP contains the induction variables renaming mapping, and is
+ used to translate the names of induction variables. */
static tree
expand_scalar_variables_expr (tree type, tree op0, enum tree_code code,
tree op1, basic_block bb, scop_p scop,
- loop_p old_loop_father, htab_t map)
+ htab_t map, gimple_stmt_iterator *gsi)
{
- if ((TREE_CODE_CLASS (code) == tcc_constant
- && code == INTEGER_CST)
- || TREE_CODE_CLASS (code) == tcc_reference)
+ if (TREE_CODE_CLASS (code) == tcc_constant
+ || TREE_CODE_CLASS (code) == tcc_declaration)
return op0;
+ /* For data references we have to duplicate also its memory
+ indexing. */
+ if (TREE_CODE_CLASS (code) == tcc_reference)
+ {
+ switch (code)
+ {
+ case INDIRECT_REF:
+ {
+ tree old_name = TREE_OPERAND (op0, 0);
+ tree expr = expand_scalar_variables_ssa_name
+ (old_name, bb, scop, map, gsi);
+ tree new_name = force_gimple_operand_gsi (gsi, expr, true, NULL,
+ true, GSI_SAME_STMT);
+
+ return fold_build1 (code, type, new_name);
+ }
+
+ case ARRAY_REF:
+ {
+ tree op00 = TREE_OPERAND (op0, 0);
+ tree op01 = TREE_OPERAND (op0, 1);
+ tree op02 = TREE_OPERAND (op0, 2);
+ tree op03 = TREE_OPERAND (op0, 3);
+ tree base = expand_scalar_variables_expr
+ (TREE_TYPE (op00), op00, TREE_CODE (op00), NULL, bb, scop,
+ map, gsi);
+ tree subscript = expand_scalar_variables_expr
+ (TREE_TYPE (op01), op01, TREE_CODE (op01), NULL, bb, scop,
+ map, gsi);
+
+ return build4 (ARRAY_REF, type, base, subscript, op02, op03);
+ }
+
+ default:
+ /* The above cases should catch everything. */
+ gcc_unreachable ();
+ }
+ }
+
if (TREE_CODE_CLASS (code) == tcc_unary)
{
tree op0_type = TREE_TYPE (op0);
enum tree_code op0_code = TREE_CODE (op0);
- tree op0_expr =
- expand_scalar_variables_expr (op0_type, op0, op0_code,
- NULL, bb, scop, old_loop_father, map);
-
+ tree op0_expr = expand_scalar_variables_expr (op0_type, op0, op0_code,
+ NULL, bb, scop, map, gsi);
+
return fold_build1 (code, type, op0_expr);
}
{
tree op0_type = TREE_TYPE (op0);
enum tree_code op0_code = TREE_CODE (op0);
- tree op0_expr =
- expand_scalar_variables_expr (op0_type, op0, op0_code,
- NULL, bb, scop, old_loop_father, map);
+ tree op0_expr = expand_scalar_variables_expr (op0_type, op0, op0_code,
+ NULL, bb, scop, map, gsi);
tree op1_type = TREE_TYPE (op1);
enum tree_code op1_code = TREE_CODE (op1);
- tree op1_expr =
- expand_scalar_variables_expr (op1_type, op1, op1_code,
- NULL, bb, scop, old_loop_father, map);
+ tree op1_expr = expand_scalar_variables_expr (op1_type, op1, op1_code,
+ NULL, bb, scop, map, gsi);
return fold_build2 (code, type, op0_expr, op1_expr);
}
if (code == SSA_NAME)
- {
- tree var0, var1;
- gimple def_stmt;
- enum tree_code subcode;
-
- if (is_parameter (scop, op0)
- || is_iv (op0))
- return get_new_name_from_old_name (map, op0);
-
- def_stmt = SSA_NAME_DEF_STMT (op0);
-
- if (gimple_bb (def_stmt) == bb)
- {
- /* If the defining statement is in the basic block already
- we do not need to create a new expression for it, we
- only need to ensure its operands are expanded. */
- expand_scalar_variables_stmt (def_stmt, bb, scop,
- old_loop_father, map);
- return get_new_name_from_old_name (map, op0);
- }
- else
- {
- if (gimple_code (def_stmt) != GIMPLE_ASSIGN
- || !bb_in_scop_p (gimple_bb (def_stmt), scop))
- return get_new_name_from_old_name (map, op0);
-
- var0 = gimple_assign_rhs1 (def_stmt);
- subcode = gimple_assign_rhs_code (def_stmt);
- var1 = gimple_assign_rhs2 (def_stmt);
-
- return expand_scalar_variables_expr (type, var0, subcode, var1,
- bb, scop, old_loop_father, map);
- }
- }
+ return expand_scalar_variables_ssa_name (op0, bb, scop, map, gsi);
gcc_unreachable ();
return NULL;
}
-/* Replicates any uses of non-parameters and non-old-ivs variablesthat
- are defind outside BB with code that is inserted in BB.
- OLD_LOOP_FATHER is the original loop that contained STMT. */
+/* Copies at the beginning of BB all the scalar computations on which
+ STMT depends on in the SCOP: these are all the scalar variables used
+ in STMT, defined outside BB and still defined in the SCOP, i.e. not a
+ parameter of the SCOP. The expression that is returned contains
+ only induction variables from the generated code: MAP contains the
+ induction variables renaming mapping, and is used to translate the
+ names of induction variables. */
static void
expand_scalar_variables_stmt (gimple stmt, basic_block bb, scop_p scop,
- loop_p old_loop_father, htab_t map)
+ htab_t map)
{
ssa_op_iter iter;
use_operand_p use_p;
+ gimple_stmt_iterator gsi = gsi_after_labels (bb);
FOR_EACH_SSA_USE_OPERAND (use_p, stmt, iter, SSA_OP_USE)
{
tree type = TREE_TYPE (use);
enum tree_code code = TREE_CODE (use);
tree use_expr = expand_scalar_variables_expr (type, use, code, NULL, bb,
- scop, old_loop_father, map);
+ scop, map, &gsi);
if (use_expr != use)
{
- gimple_stmt_iterator gsi = gsi_after_labels (bb);
tree new_use =
force_gimple_operand_gsi (&gsi, use_expr, true, NULL,
true, GSI_NEW_STMT);
update_stmt (stmt);
}
-/* Copies the definitions outside of BB of variables that are not
- induction variables nor parameters. BB must only contain
- "external" references to these types of variables. OLD_LOOP_FATHER
- is the original loop that contained BB. */
+/* Copies at the beginning of BB all the scalar computations on which
+ BB depends on in the SCOP: these are all the scalar variables used
+ in BB, defined outside BB and still defined in the SCOP, i.e. not a
+ parameter of the SCOP. The expression that is returned contains
+ only induction variables from the generated code: MAP contains the
+ induction variables renaming mapping, and is used to translate the
+ names of induction variables. */
static void
-expand_scalar_variables (basic_block bb, scop_p scop,
- loop_p old_loop_father, htab_t map)
+expand_scalar_variables (basic_block bb, scop_p scop, htab_t map)
{
gimple_stmt_iterator gsi;
for (gsi = gsi_after_labels (bb); !gsi_end_p (gsi);)
{
gimple stmt = gsi_stmt (gsi);
- expand_scalar_variables_stmt (stmt, bb, scop, old_loop_father, map);
+ expand_scalar_variables_stmt (stmt, bb, scop, map);
gsi_next (&gsi);
}
}
operands. */
copy = gimple_copy (stmt);
gsi_insert_after (&gsi_tgt, copy, GSI_NEW_STMT);
- mark_symbols_for_renaming (copy);
+ mark_sym_for_renaming (gimple_vop (cfun));
region = lookup_stmt_eh_region (stmt);
if (region >= 0)
/* Create new names for all the definitions created by COPY and
add replacement mappings for each new name. */
- FOR_EACH_SSA_DEF_OPERAND (def_p, copy, op_iter, SSA_OP_DEF)
+ FOR_EACH_SSA_DEF_OPERAND (def_p, copy, op_iter, SSA_OP_ALL_DEFS)
{
tree old_name = DEF_FROM_PTR (def_p);
tree new_name = create_new_def_for (old_name, copy, def_p);
static edge
copy_bb_and_scalar_dependences (basic_block bb, scop_p scop,
- loop_p context_loop,
edge next_e, htab_t map)
{
basic_block new_bb = split_edge (next_e);
remove_condition (new_bb);
rename_variables (new_bb, map);
remove_phi_nodes (new_bb);
- expand_scalar_variables (new_bb, scop, context_loop, map);
+ expand_scalar_variables (new_bb, scop, map);
register_scop_liveout_renames (scop, map);
return next_e;
loop_iv_stack_patch_for_consts (ivstack, (struct clast_user_stmt *) stmt);
build_iv_mapping (ivstack, map, gbb, scop);
next_e = copy_bb_and_scalar_dependences (GBB_BB (gbb), scop,
- context_loop, next_e, map);
+ next_e, map);
htab_delete (map);
loop_iv_stack_remove_constants (ivstack);
- update_ssa (TODO_update_ssa);
recompute_all_dominators ();
+ update_ssa (TODO_update_ssa);
graphite_verify ();
return translate_clast (scop, context_loop, stmt->next, next_e, ivstack);
}
return stmt;
}
-/* Returns true when it is possible to generate code for this STMT.
- For the moment we cannot generate code when Cloog decides to
- duplicate a statement, as we do not do a copy, but a move.
- USED_BASIC_BLOCKS records the blocks that have already been seen.
- We return false if we have to generate code twice for the same
- block. */
-
-static bool
-can_generate_code_stmt (struct clast_stmt *stmt,
- struct pointer_set_t *used_basic_blocks)
-{
- if (!stmt)
- return true;
-
- if (CLAST_STMT_IS_A (stmt, stmt_root))
- return can_generate_code_stmt (stmt->next, used_basic_blocks);
-
- if (CLAST_STMT_IS_A (stmt, stmt_user))
- {
- CloogStatement *cs = ((struct clast_user_stmt *) stmt)->statement;
- graphite_bb_p gbb = (graphite_bb_p) cloog_statement_usr (cs);
-
- if (pointer_set_contains (used_basic_blocks, gbb))
- return false;
- pointer_set_insert (used_basic_blocks, gbb);
- return can_generate_code_stmt (stmt->next, used_basic_blocks);
- }
-
- if (CLAST_STMT_IS_A (stmt, stmt_for))
- return can_generate_code_stmt (((struct clast_for *) stmt)->body,
- used_basic_blocks)
- && can_generate_code_stmt (stmt->next, used_basic_blocks);
-
- if (CLAST_STMT_IS_A (stmt, stmt_guard))
- return can_generate_code_stmt (((struct clast_guard *) stmt)->then,
- used_basic_blocks);
-
- if (CLAST_STMT_IS_A (stmt, stmt_block))
- return can_generate_code_stmt (((struct clast_block *) stmt)->body,
- used_basic_blocks)
- && can_generate_code_stmt (stmt->next, used_basic_blocks);
-
- return false;
-}
-
-/* Returns true when it is possible to generate code for this STMT. */
-
-static bool
-can_generate_code (struct clast_stmt *stmt)
-{
- bool result;
- struct pointer_set_t *used_basic_blocks = pointer_set_create ();
-
- result = can_generate_code_stmt (stmt, used_basic_blocks);
- pointer_set_destroy (used_basic_blocks);
- return result;
-}
-
/* Remove from the CFG the REGION. */
static inline void
{
basic_block condition = if_region_get_condition_block (if_region);
edge false_edge = get_false_edge_from_guard_bb (condition);
+ basic_block dummy = false_edge->dest;
edge entry_region = SESE_ENTRY (region);
edge exit_region = SESE_EXIT (region);
basic_block before_region = entry_region->src;
redirect_edge_pred (entry_region, condition);
redirect_edge_pred (exit_region, before_region);
redirect_edge_pred (false_edge, last_in_region);
+ redirect_edge_succ (false_edge, single_succ (dummy));
+ delete_basic_block (dummy);
exit_region->flags = EDGE_FALLTHRU;
recompute_all_dominators ();
- SESE_EXIT (region) = single_succ_edge (false_edge->dest);
+ SESE_EXIT (region) = false_edge;
if_region->false_region = region;
if (slot)
update_ssa (TODO_update_ssa);
}
+/* Get the definition of NAME before the SCOP. Keep track of the
+ basic blocks that have been VISITED in a bitmap. */
+
+static tree
+get_vdef_before_scop (scop_p scop, tree name, sbitmap visited)
+{
+ unsigned i;
+ gimple def_stmt = SSA_NAME_DEF_STMT (name);
+ basic_block def_bb = gimple_bb (def_stmt);
+
+ if (!def_bb
+ || !bb_in_sese_p (def_bb, SCOP_REGION (scop)))
+ return name;
+
+ if (TEST_BIT (visited, def_bb->index))
+ return NULL_TREE;
+
+ SET_BIT (visited, def_bb->index);
+
+ switch (gimple_code (def_stmt))
+ {
+ case GIMPLE_PHI:
+ for (i = 0; i < gimple_phi_num_args (def_stmt); i++)
+ {
+ tree arg = gimple_phi_arg_def (def_stmt, i);
+ tree res = get_vdef_before_scop (scop, arg, visited);
+ if (res)
+ return res;
+ }
+ return NULL_TREE;
+
+ default:
+ return NULL_TREE;
+ }
+}
+
+/* Adjust a virtual phi node PHI that is placed at the end of the
+ generated code for SCOP:
+
+ | if (1)
+ | generated code from REGION;
+ | else
+ | REGION;
+
+ The FALSE_E edge comes from the original code, TRUE_E edge comes
+ from the code generated for the SCOP. */
+
+static void
+scop_adjust_vphi (scop_p scop, gimple phi, edge true_e)
+{
+ unsigned i;
+
+ gcc_assert (gimple_phi_num_args (phi) == 2);
+
+ for (i = 0; i < gimple_phi_num_args (phi); i++)
+ if (gimple_phi_arg_edge (phi, i) == true_e)
+ {
+ tree true_arg, false_arg, before_scop_arg;
+ sbitmap visited;
+
+ true_arg = gimple_phi_arg_def (phi, i);
+ if (!SSA_NAME_IS_DEFAULT_DEF (true_arg))
+ return;
+
+ false_arg = gimple_phi_arg_def (phi, i == 0 ? 1 : 0);
+ if (SSA_NAME_IS_DEFAULT_DEF (false_arg))
+ return;
+
+ visited = sbitmap_alloc (last_basic_block);
+ sbitmap_zero (visited);
+ before_scop_arg = get_vdef_before_scop (scop, false_arg, visited);
+ gcc_assert (before_scop_arg != NULL_TREE);
+ SET_PHI_ARG_DEF (phi, i, before_scop_arg);
+ sbitmap_free (visited);
+ }
+}
+
/* Adjusts the phi nodes in the block BB for variables defined in
SCOP_REGION and used outside the SCOP_REGION. The code generation
moves SCOP_REGION in the else clause of an "if (1)" and generates
for (si = gsi_start_phis (bb); !gsi_end_p (si); gsi_next (&si))
{
- unsigned i, false_i;
+ unsigned i;
+ unsigned false_i = 0;
gimple phi = gsi_stmt (si);
if (!is_gimple_reg (PHI_RESULT (phi)))
- continue;
+ {
+ scop_adjust_vphi (scop, phi, true_e);
+ continue;
+ }
for (i = 0; i < gimple_phi_num_args (phi); i++)
if (gimple_phi_arg_edge (phi, i) == false_e)
}
/* GIMPLE Loop Generator: generates loops from STMT in GIMPLE form for
- the given SCOP. */
+ the given SCOP. Return true if code generation succeeded. */
-static void
+static bool
gloog (scop_p scop, struct clast_stmt *stmt)
{
edge new_scop_exit_edge = NULL;
loop_p context_loop;
ifsese if_region = NULL;
- if (!can_generate_code (stmt))
- {
- cloog_clast_free (stmt);
- return;
- }
-
+ recompute_all_dominators ();
+ graphite_verify ();
if_region = move_sese_in_condition (SCOP_REGION (scop));
sese_build_livein_liveouts (SCOP_REGION (scop));
scop_insert_phis_for_liveouts (SCOP_REGION (scop),
recompute_all_dominators ();
graphite_verify ();
- cleanup_tree_cfg ();
- recompute_all_dominators ();
- graphite_verify ();
+ return true;
}
/* Returns the number of data references in SCOP. */
bool transform_done = false;
/* TODO: - Calculate the stride size automatically. */
- int stride_size = 64;
+ int stride_size = 51;
for (i = 0; VEC_iterate (graphite_bb_p, bbs, i, gb); i++)
transform_done |= graphite_trans_bb_block (gb, stride_size, loops);
j++;
/* Found perfect loop nest. */
- if (last_nb_loops - j > 0)
+ if (last_nb_loops - j >= 2)
transform_done |= graphite_trans_loop_block (bbs, last_nb_loops - j);
VEC_free (graphite_bb_p, heap, bbs);
continue;
for (j = 0; VEC_iterate (loop_p, SCOP_LOOP_NEST (scop), j, loop); j++)
- if (!loop_in_scop_p (loop_outer (loop), scop))
+ if (!loop_in_sese_p (loop_outer (loop), SCOP_REGION (scop)))
{
sd_region open_scop;
open_scop.entry = loop->header;
{
int i;
scop_p scop;
+ bool transform_done = false;
if (number_of_loops () <= 1)
return;
if (!build_scop_loop_nests (scop))
continue;
- build_scop_canonical_schedules (scop);
build_bb_loops (scop);
+
if (!build_scop_conditions (scop))
continue;
+
find_scop_parameters (scop);
build_scop_context (scop);
if (!build_scop_iteration_domain (scop))
continue;
+ add_conditions_to_constraints (scop);
+ build_scop_canonical_schedules (scop);
+
build_scop_data_accesses (scop);
build_scop_dynamic_schedules (scop);
}
if (graphite_apply_transformations (scop))
- gloog (scop, find_transform (scop));
+ transform_done = gloog (scop, find_transform (scop));
#ifdef ENABLE_CHECKING
else
{
}
/* Cleanup. */
+ if (transform_done)
+ cleanup_tree_cfg ();
+
free_scops (current_scops);
cloog_finalize ();
free_original_copy_tables ();