1 /* Gimple Represented as Polyhedra.
2 Copyright (C) 2006, 2007, 2008, 2009 Free Software Foundation, Inc.
3 Contributed by Sebastian Pop <sebastian.pop@inria.fr>.
5 This file is part of GCC.
7 GCC is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License as published by
9 the Free Software Foundation; either version 3, or (at your option)
12 GCC is distributed in the hope that it will be useful,
13 but WITHOUT ANY WARRANTY; without even the implied warranty of
14 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 GNU General Public License for more details.
17 You should have received a copy of the GNU General Public License
18 along with GCC; see the file COPYING3. If not see
19 <http://www.gnu.org/licenses/>. */
21 /* This pass converts GIMPLE to GRAPHITE, performs some loop
22 transformations and then converts the resulting representation back
25 An early description of this pass can be found in the GCC Summit'06
26 paper "GRAPHITE: Polyhedral Analyses and Optimizations for GCC".
27 The wiki page http://gcc.gnu.org/wiki/Graphite contains pointers to
30 One important document to read is CLooG's internal manual:
31 http://repo.or.cz/w/cloog-ppl.git?a=blob_plain;f=doc/cloog.texi;hb=HEAD
32 that describes the data structure of loops used in this file, and
33 the functions that are used for transforming the code. */
37 #include "coretypes.h"
42 #include "basic-block.h"
43 #include "diagnostic.h"
44 #include "tree-flow.h"
46 #include "tree-dump.h"
49 #include "tree-chrec.h"
50 #include "tree-data-ref.h"
51 #include "tree-scalar-evolution.h"
52 #include "tree-pass.h"
54 #include "value-prof.h"
55 #include "pointer-set.h"
60 /* The CLooG header file is not -Wc++-compat ready as of 2009-05-11.
61 This #pragma should be removed when it is ready. */
62 #if GCC_VERSION >= 4003
63 #pragma GCC diagnostic warning "-Wc++-compat"
66 #include "cloog/cloog.h"
69 static VEC (scop_p, heap) *current_scops;
71 /* Converts a GMP constant V to a tree and returns it. */
74 gmp_cst_to_tree (tree type, Value v)
76 return build_int_cst (type, value_get_si (v));
79 /* Returns true when BB is in REGION. */
82 bb_in_sese_p (basic_block bb, sese region)
84 return pointer_set_contains (SESE_REGION_BBS (region), bb);
87 /* Returns true when LOOP is in the SESE region R. */
90 loop_in_sese_p (struct loop *loop, sese r)
92 return (bb_in_sese_p (loop->header, r)
93 && bb_in_sese_p (loop->latch, r));
96 /* For a USE in BB, if BB is outside REGION, mark the USE in the
97 SESE_LIVEIN and SESE_LIVEOUT sets. */
100 sese_build_livein_liveouts_use (sese region, basic_block bb, tree use)
105 if (TREE_CODE (use) != SSA_NAME)
108 ver = SSA_NAME_VERSION (use);
109 def_bb = gimple_bb (SSA_NAME_DEF_STMT (use));
111 || !bb_in_sese_p (def_bb, region)
112 || bb_in_sese_p (bb, region))
115 if (!SESE_LIVEIN_VER (region, ver))
116 SESE_LIVEIN_VER (region, ver) = BITMAP_ALLOC (NULL);
118 bitmap_set_bit (SESE_LIVEIN_VER (region, ver), bb->index);
119 bitmap_set_bit (SESE_LIVEOUT (region), ver);
122 /* Marks for rewrite all the SSA_NAMES defined in REGION and that are
123 used in BB that is outside of the REGION. */
126 sese_build_livein_liveouts_bb (sese region, basic_block bb)
128 gimple_stmt_iterator bsi;
134 FOR_EACH_EDGE (e, ei, bb->succs)
135 for (bsi = gsi_start_phis (e->dest); !gsi_end_p (bsi); gsi_next (&bsi))
136 sese_build_livein_liveouts_use (region, bb,
137 PHI_ARG_DEF_FROM_EDGE (gsi_stmt (bsi), e));
139 for (bsi = gsi_start_bb (bb); !gsi_end_p (bsi); gsi_next (&bsi))
140 FOR_EACH_SSA_TREE_OPERAND (var, gsi_stmt (bsi), iter, SSA_OP_ALL_USES)
141 sese_build_livein_liveouts_use (region, bb, var);
144 /* Build the SESE_LIVEIN and SESE_LIVEOUT for REGION. */
147 sese_build_livein_liveouts (sese region)
151 SESE_LIVEOUT (region) = BITMAP_ALLOC (NULL);
152 SESE_NUM_VER (region) = num_ssa_names;
153 SESE_LIVEIN (region) = XCNEWVEC (bitmap, SESE_NUM_VER (region));
156 sese_build_livein_liveouts_bb (region, bb);
159 /* Register basic blocks belonging to a region in a pointer set. */
162 register_bb_in_sese (basic_block entry_bb, basic_block exit_bb, sese region)
166 basic_block bb = entry_bb;
168 FOR_EACH_EDGE (e, ei, bb->succs)
170 if (!pointer_set_contains (SESE_REGION_BBS (region), e->dest) &&
171 e->dest->index != exit_bb->index)
173 pointer_set_insert (SESE_REGION_BBS (region), e->dest);
174 register_bb_in_sese (e->dest, exit_bb, region);
179 /* Builds a new SESE region from edges ENTRY and EXIT. */
182 new_sese (edge entry, edge exit)
184 sese res = XNEW (struct sese_d);
186 SESE_ENTRY (res) = entry;
187 SESE_EXIT (res) = exit;
188 SESE_REGION_BBS (res) = pointer_set_create ();
189 register_bb_in_sese (entry->dest, exit->dest, res);
191 SESE_LIVEOUT (res) = NULL;
192 SESE_NUM_VER (res) = 0;
193 SESE_LIVEIN (res) = NULL;
198 /* Deletes REGION. */
201 free_sese (sese region)
205 for (i = 0; i < SESE_NUM_VER (region); i++)
206 BITMAP_FREE (SESE_LIVEIN_VER (region, i));
208 if (SESE_LIVEIN (region))
209 free (SESE_LIVEIN (region));
211 if (SESE_LIVEOUT (region))
212 BITMAP_FREE (SESE_LIVEOUT (region));
214 pointer_set_destroy (SESE_REGION_BBS (region));
220 /* Debug the list of old induction variables for this SCOP. */
223 debug_oldivs (scop_p scop)
228 fprintf (stderr, "Old IVs:");
230 for (i = 0; VEC_iterate (name_tree, SCOP_OLDIVS (scop), i, oldiv); i++)
232 fprintf (stderr, "(");
233 print_generic_expr (stderr, oldiv->t, 0);
234 fprintf (stderr, ", %s, %d)\n", oldiv->name, oldiv->loop->num);
236 fprintf (stderr, "\n");
239 /* Debug the loops around basic block GB. */
242 debug_loop_vec (graphite_bb_p gb)
247 fprintf (stderr, "Loop Vec:");
249 for (i = 0; VEC_iterate (loop_p, GBB_LOOPS (gb), i, loop); i++)
250 fprintf (stderr, "%d: %d, ", i, loop ? loop->num : -1);
252 fprintf (stderr, "\n");
255 /* Returns true if stack ENTRY is a constant. */
258 iv_stack_entry_is_constant (iv_stack_entry *entry)
260 return entry->kind == iv_stack_entry_const;
263 /* Returns true if stack ENTRY is an induction variable. */
266 iv_stack_entry_is_iv (iv_stack_entry *entry)
268 return entry->kind == iv_stack_entry_iv;
271 /* Push (IV, NAME) on STACK. */
274 loop_iv_stack_push_iv (loop_iv_stack stack, tree iv, const char *name)
276 iv_stack_entry *entry = XNEW (iv_stack_entry);
277 name_tree named_iv = XNEW (struct name_tree_d);
280 named_iv->name = name;
282 entry->kind = iv_stack_entry_iv;
283 entry->data.iv = named_iv;
285 VEC_safe_push (iv_stack_entry_p, heap, *stack, entry);
288 /* Inserts a CONSTANT in STACK at INDEX. */
291 loop_iv_stack_insert_constant (loop_iv_stack stack, int index,
294 iv_stack_entry *entry = XNEW (iv_stack_entry);
296 entry->kind = iv_stack_entry_const;
297 entry->data.constant = constant;
299 VEC_safe_insert (iv_stack_entry_p, heap, *stack, index, entry);
302 /* Pops and frees an element out of STACK. */
305 loop_iv_stack_pop (loop_iv_stack stack)
307 iv_stack_entry_p entry = VEC_pop (iv_stack_entry_p, *stack);
309 free (entry->data.iv);
313 /* Get the IV at INDEX in STACK. */
316 loop_iv_stack_get_iv (loop_iv_stack stack, int index)
318 iv_stack_entry_p entry = VEC_index (iv_stack_entry_p, *stack, index);
319 iv_stack_entry_data data = entry->data;
321 return iv_stack_entry_is_iv (entry) ? data.iv->t : data.constant;
324 /* Get the IV from its NAME in STACK. */
327 loop_iv_stack_get_iv_from_name (loop_iv_stack stack, const char* name)
330 iv_stack_entry_p entry;
332 for (i = 0; VEC_iterate (iv_stack_entry_p, *stack, i, entry); i++)
334 name_tree iv = entry->data.iv;
335 if (!strcmp (name, iv->name))
342 /* Prints on stderr the contents of STACK. */
345 debug_loop_iv_stack (loop_iv_stack stack)
348 iv_stack_entry_p entry;
351 fprintf (stderr, "(");
353 for (i = 0; VEC_iterate (iv_stack_entry_p, *stack, i, entry); i++)
358 fprintf (stderr, " ");
360 if (iv_stack_entry_is_iv (entry))
362 name_tree iv = entry->data.iv;
363 fprintf (stderr, "%s:", iv->name);
364 print_generic_expr (stderr, iv->t, 0);
368 tree constant = entry->data.constant;
369 print_generic_expr (stderr, constant, 0);
370 fprintf (stderr, ":");
371 print_generic_expr (stderr, constant, 0);
375 fprintf (stderr, ")\n");
381 free_loop_iv_stack (loop_iv_stack stack)
384 iv_stack_entry_p entry;
386 for (i = 0; VEC_iterate (iv_stack_entry_p, *stack, i, entry); i++)
388 free (entry->data.iv);
392 VEC_free (iv_stack_entry_p, heap, *stack);
397 /* Structure containing the mapping between the CLooG's induction
398 variable and the type of the old induction variable. */
399 typedef struct ivtype_map_elt_d
402 const char *cloog_iv;
405 /* Print to stderr the element ELT. */
408 debug_ivtype_elt (ivtype_map_elt elt)
410 fprintf (stderr, "(%s, ", elt->cloog_iv);
411 print_generic_expr (stderr, elt->type, 0);
412 fprintf (stderr, ")\n");
415 /* Helper function for debug_ivtype_map. */
418 debug_ivtype_map_1 (void **slot, void *s ATTRIBUTE_UNUSED)
420 struct ivtype_map_elt_d *entry = (struct ivtype_map_elt_d *) *slot;
421 debug_ivtype_elt (entry);
425 /* Print to stderr all the elements of MAP. */
428 debug_ivtype_map (htab_t map)
430 htab_traverse (map, debug_ivtype_map_1, NULL);
433 /* Constructs a new SCEV_INFO_STR structure for VAR and INSTANTIATED_BELOW. */
435 static inline ivtype_map_elt
436 new_ivtype_map_elt (const char *cloog_iv, tree type)
440 res = XNEW (struct ivtype_map_elt_d);
441 res->cloog_iv = cloog_iv;
447 /* Computes a hash function for database element ELT. */
450 ivtype_map_elt_info (const void *elt)
452 return htab_hash_pointer (((const struct ivtype_map_elt_d *) elt)->cloog_iv);
455 /* Compares database elements E1 and E2. */
458 eq_ivtype_map_elts (const void *e1, const void *e2)
460 const struct ivtype_map_elt_d *elt1 = (const struct ivtype_map_elt_d *) e1;
461 const struct ivtype_map_elt_d *elt2 = (const struct ivtype_map_elt_d *) e2;
463 return (elt1->cloog_iv == elt2->cloog_iv);
468 /* Given a CLOOG_IV, returns the type that it should have in GCC land.
469 If the information is not available, i.e. in the case one of the
470 transforms created the loop, just return integer_type_node. */
473 gcc_type_for_cloog_iv (const char *cloog_iv, graphite_bb_p gbb)
475 struct ivtype_map_elt_d tmp;
478 tmp.cloog_iv = cloog_iv;
479 slot = htab_find_slot (GBB_CLOOG_IV_TYPES (gbb), &tmp, NO_INSERT);
482 return ((ivtype_map_elt) *slot)->type;
484 return integer_type_node;
487 /* Inserts constants derived from the USER_STMT argument list into the
488 STACK. This is needed to map old ivs to constants when loops have
492 loop_iv_stack_patch_for_consts (loop_iv_stack stack,
493 struct clast_user_stmt *user_stmt)
495 struct clast_stmt *t;
497 CloogStatement *cs = user_stmt->statement;
498 graphite_bb_p gbb = (graphite_bb_p) cloog_statement_usr (cs);
500 for (t = user_stmt->substitutions; t; t = t->next)
502 struct clast_expr *expr = (struct clast_expr *)
503 ((struct clast_assignment *)t)->RHS;
504 struct clast_term *term = (struct clast_term *) expr;
506 /* FIXME: What should be done with expr_bin, expr_red? */
507 if (expr->type == expr_term
510 loop_p loop = gbb_loop_at_index (gbb, index);
511 tree oldiv = oldiv_for_loop (GBB_SCOP (gbb), loop);
512 tree type = oldiv ? TREE_TYPE (oldiv) : integer_type_node;
513 tree value = gmp_cst_to_tree (type, term->val);
514 loop_iv_stack_insert_constant (stack, index, value);
520 /* Removes all constants in the iv STACK. */
523 loop_iv_stack_remove_constants (loop_iv_stack stack)
526 iv_stack_entry *entry;
528 for (i = 0; VEC_iterate (iv_stack_entry_p, *stack, i, entry);)
530 if (iv_stack_entry_is_constant (entry))
532 free (VEC_index (iv_stack_entry_p, *stack, i));
533 VEC_ordered_remove (iv_stack_entry_p, *stack, i);
540 /* Returns a new loop_to_cloog_loop_str structure. */
542 static inline struct loop_to_cloog_loop_str *
543 new_loop_to_cloog_loop_str (int loop_num,
545 CloogLoop *cloog_loop)
547 struct loop_to_cloog_loop_str *result;
549 result = XNEW (struct loop_to_cloog_loop_str);
550 result->loop_num = loop_num;
551 result->cloog_loop = cloog_loop;
552 result->loop_position = loop_position;
557 /* Hash function for SCOP_LOOP2CLOOG_LOOP hash table. */
560 hash_loop_to_cloog_loop (const void *elt)
562 return ((const struct loop_to_cloog_loop_str *) elt)->loop_num;
565 /* Equality function for SCOP_LOOP2CLOOG_LOOP hash table. */
568 eq_loop_to_cloog_loop (const void *el1, const void *el2)
570 const struct loop_to_cloog_loop_str *elt1, *elt2;
572 elt1 = (const struct loop_to_cloog_loop_str *) el1;
573 elt2 = (const struct loop_to_cloog_loop_str *) el2;
574 return elt1->loop_num == elt2->loop_num;
577 /* Compares two graphite bbs and returns an integer less than, equal to, or
578 greater than zero if the first argument is considered to be respectively
579 less than, equal to, or greater than the second.
580 We compare using the lexicographic order of the static schedules. */
583 gbb_compare (const void *p_1, const void *p_2)
585 const struct graphite_bb *const gbb_1
586 = *(const struct graphite_bb *const*) p_1;
587 const struct graphite_bb *const gbb_2
588 = *(const struct graphite_bb *const*) p_2;
590 return lambda_vector_compare (GBB_STATIC_SCHEDULE (gbb_1),
591 gbb_nb_loops (gbb_1) + 1,
592 GBB_STATIC_SCHEDULE (gbb_2),
593 gbb_nb_loops (gbb_2) + 1);
596 /* Sort graphite bbs in SCOP. */
599 graphite_sort_gbbs (scop_p scop)
601 VEC (graphite_bb_p, heap) *bbs = SCOP_BBS (scop);
603 qsort (VEC_address (graphite_bb_p, bbs),
604 VEC_length (graphite_bb_p, bbs),
605 sizeof (graphite_bb_p), gbb_compare);
608 /* Dump conditions of a graphite basic block GBB on FILE. */
611 dump_gbb_conditions (FILE *file, graphite_bb_p gbb)
615 VEC (gimple, heap) *conditions = GBB_CONDITIONS (gbb);
617 if (VEC_empty (gimple, conditions))
620 fprintf (file, "\tbb %d\t: cond = {", GBB_BB (gbb)->index);
622 for (i = 0; VEC_iterate (gimple, conditions, i, stmt); i++)
623 print_gimple_stmt (file, stmt, 0, 0);
625 fprintf (file, "}\n");
628 /* Converts the graphite scheduling function into a cloog scattering
629 matrix. This scattering matrix is used to limit the possible cloog
630 output to valid programs in respect to the scheduling function.
632 SCATTERING_DIMENSIONS specifies the dimensionality of the scattering
633 matrix. CLooG 0.14.0 and previous versions require, that all scattering
634 functions of one CloogProgram have the same dimensionality, therefore we
635 allow to specify it. (Should be removed in future versions) */
638 schedule_to_scattering (graphite_bb_p gb, int scattering_dimensions)
641 scop_p scop = GBB_SCOP (gb);
643 int nb_iterators = gbb_nb_loops (gb);
645 /* The cloog scattering matrix consists of these colums:
647 scattering_dimensions cols = Scattering dimensions,
648 nb_iterators cols = bb's iterators,
649 scop_nb_params cols = Parameters,
654 scattering_dimensions = 5
664 s1 s2 s3 s4 s5 i p1 p2 1
665 1 0 0 0 0 0 0 0 -4 = 0
666 0 1 0 0 0 -1 0 0 0 = 0
667 0 0 1 0 0 0 0 0 -5 = 0 */
668 int nb_params = scop_nb_params (scop);
669 int nb_cols = 1 + scattering_dimensions + nb_iterators + nb_params + 1;
670 int col_const = nb_cols - 1;
671 int col_iter_offset = 1 + scattering_dimensions;
673 CloogMatrix *scat = cloog_matrix_alloc (scattering_dimensions, nb_cols);
675 gcc_assert (scattering_dimensions >= nb_iterators * 2 + 1);
677 /* Initialize the identity matrix. */
678 for (i = 0; i < scattering_dimensions; i++)
679 value_set_si (scat->p[i][i + 1], 1);
681 /* Textual order outside the first loop */
682 value_set_si (scat->p[0][col_const], -GBB_STATIC_SCHEDULE (gb)[0]);
684 /* For all surrounding loops. */
685 for (i = 0; i < nb_iterators; i++)
687 int schedule = GBB_STATIC_SCHEDULE (gb)[i + 1];
689 /* Iterations of this loop. */
690 value_set_si (scat->p[2 * i + 1][col_iter_offset + i], -1);
692 /* Textual order inside this loop. */
693 value_set_si (scat->p[2 * i + 2][col_const], -schedule);
699 /* Print the schedules of GB to FILE with INDENT white spaces before.
700 VERBOSITY determines how verbose the code pretty printers are. */
703 print_graphite_bb (FILE *file, graphite_bb_p gb, int indent, int verbosity)
705 CloogMatrix *scattering;
708 fprintf (file, "\nGBB (\n");
710 print_loops_bb (file, GBB_BB (gb), indent+2, verbosity);
714 fprintf (file, " (domain: \n");
715 cloog_matrix_print (file, GBB_DOMAIN (gb));
716 fprintf (file, " )\n");
719 if (GBB_STATIC_SCHEDULE (gb))
721 fprintf (file, " (static schedule: ");
722 print_lambda_vector (file, GBB_STATIC_SCHEDULE (gb),
723 gbb_nb_loops (gb) + 1);
724 fprintf (file, " )\n");
729 fprintf (file, " (contained loops: \n");
730 for (i = 0; VEC_iterate (loop_p, GBB_LOOPS (gb), i, loop); i++)
732 fprintf (file, " iterator %d => NULL \n", i);
734 fprintf (file, " iterator %d => loop %d \n", i,
736 fprintf (file, " )\n");
739 if (GBB_DATA_REFS (gb))
740 dump_data_references (file, GBB_DATA_REFS (gb));
742 if (GBB_CONDITIONS (gb))
744 fprintf (file, " (conditions: \n");
745 dump_gbb_conditions (file, gb);
746 fprintf (file, " )\n");
750 && GBB_STATIC_SCHEDULE (gb))
752 fprintf (file, " (scattering: \n");
753 scattering = schedule_to_scattering (gb, 2 * gbb_nb_loops (gb) + 1);
754 cloog_matrix_print (file, scattering);
755 cloog_matrix_free (scattering);
756 fprintf (file, " )\n");
759 fprintf (file, ")\n");
762 /* Print to STDERR the schedules of GB with VERBOSITY level. */
765 debug_gbb (graphite_bb_p gb, int verbosity)
767 print_graphite_bb (stderr, gb, 0, verbosity);
771 /* Print SCOP to FILE. VERBOSITY determines how verbose the pretty
775 print_scop (FILE *file, scop_p scop, int verbosity)
780 fprintf (file, "\nSCoP_%d_%d (\n",
781 SCOP_ENTRY (scop)->index, SCOP_EXIT (scop)->index);
783 fprintf (file, " (cloog: \n");
784 cloog_program_print (file, SCOP_PROG (scop));
785 fprintf (file, " )\n");
792 for (i = 0; VEC_iterate (graphite_bb_p, SCOP_BBS (scop), i, gb); i++)
793 print_graphite_bb (file, gb, 0, verbosity);
796 fprintf (file, ")\n");
799 /* Print all the SCOPs to FILE. VERBOSITY determines how verbose the
800 code pretty printers are. */
803 print_scops (FILE *file, int verbosity)
808 for (i = 0; VEC_iterate (scop_p, current_scops, i, scop); i++)
809 print_scop (file, scop, verbosity);
812 /* Debug SCOP. VERBOSITY determines how verbose the code pretty
816 debug_scop (scop_p scop, int verbosity)
818 print_scop (stderr, scop, verbosity);
821 /* Debug all SCOPs from CURRENT_SCOPS. VERBOSITY determines how
822 verbose the code pretty printers are. */
825 debug_scops (int verbosity)
827 print_scops (stderr, verbosity);
830 /* Pretty print to FILE the SCOP in DOT format. */
833 dot_scop_1 (FILE *file, scop_p scop)
838 basic_block entry = SCOP_ENTRY (scop);
839 basic_block exit = SCOP_EXIT (scop);
841 fprintf (file, "digraph SCoP_%d_%d {\n", entry->index,
847 fprintf (file, "%d [shape=triangle];\n", bb->index);
850 fprintf (file, "%d [shape=box];\n", bb->index);
852 if (bb_in_sese_p (bb, SCOP_REGION (scop)))
853 fprintf (file, "%d [color=red];\n", bb->index);
855 FOR_EACH_EDGE (e, ei, bb->succs)
856 fprintf (file, "%d -> %d;\n", bb->index, e->dest->index);
859 fputs ("}\n\n", file);
862 /* Display SCOP using dotty. */
865 dot_scop (scop_p scop)
867 dot_scop_1 (stderr, scop);
870 /* Pretty print all SCoPs in DOT format and mark them with different colors.
871 If there are not enough colors, paint later SCoPs gray.
873 - "*" after the node number: entry of a SCoP,
874 - "#" after the node number: exit of a SCoP,
875 - "()" entry or exit not part of SCoP. */
878 dot_all_scops_1 (FILE *file)
887 /* Disable debugging while printing graph. */
888 int tmp_dump_flags = dump_flags;
891 fprintf (file, "digraph all {\n");
895 int part_of_scop = false;
897 /* Use HTML for every bb label. So we are able to print bbs
898 which are part of two different SCoPs, with two different
899 background colors. */
900 fprintf (file, "%d [label=<\n <TABLE BORDER=\"0\" CELLBORDER=\"1\" ",
902 fprintf (file, "CELLSPACING=\"0\">\n");
904 /* Select color for SCoP. */
905 for (i = 0; VEC_iterate (scop_p, current_scops, i, scop); i++)
906 if (bb_in_sese_p (bb, SCOP_REGION (scop))
907 || (SCOP_EXIT (scop) == bb)
908 || (SCOP_ENTRY (scop) == bb))
967 fprintf (file, " <TR><TD WIDTH=\"50\" BGCOLOR=\"%s\">", color);
969 if (!bb_in_sese_p (bb, SCOP_REGION (scop)))
970 fprintf (file, " (");
972 if (bb == SCOP_ENTRY (scop)
973 && bb == SCOP_EXIT (scop))
974 fprintf (file, " %d*# ", bb->index);
975 else if (bb == SCOP_ENTRY (scop))
976 fprintf (file, " %d* ", bb->index);
977 else if (bb == SCOP_EXIT (scop))
978 fprintf (file, " %d# ", bb->index);
980 fprintf (file, " %d ", bb->index);
982 if (!bb_in_sese_p (bb, SCOP_REGION (scop)))
985 fprintf (file, "</TD></TR>\n");
991 fprintf (file, " <TR><TD WIDTH=\"50\" BGCOLOR=\"#ffffff\">");
992 fprintf (file, " %d </TD></TR>\n", bb->index);
995 fprintf (file, " </TABLE>>, shape=box, style=\"setlinewidth(0)\"]\n");
1000 FOR_EACH_EDGE (e, ei, bb->succs)
1001 fprintf (file, "%d -> %d;\n", bb->index, e->dest->index);
1004 fputs ("}\n\n", file);
1006 /* Enable debugging again. */
1007 dump_flags = tmp_dump_flags;
1010 /* Display all SCoPs using dotty. */
1013 dot_all_scops (void)
1015 /* When debugging, enable the following code. This cannot be used
1016 in production compilers because it calls "system". */
1018 FILE *stream = fopen ("/tmp/allscops.dot", "w");
1019 gcc_assert (stream);
1021 dot_all_scops_1 (stream);
1024 system ("dotty /tmp/allscops.dot");
1026 dot_all_scops_1 (stderr);
1030 /* Returns the outermost loop in SCOP that contains BB. */
1032 static struct loop *
1033 outermost_loop_in_scop (scop_p scop, basic_block bb)
1037 nest = bb->loop_father;
1038 while (loop_outer (nest)
1039 && loop_in_sese_p (loop_outer (nest), SCOP_REGION (scop)))
1040 nest = loop_outer (nest);
1045 /* Returns the block preceding the entry of SCOP. */
1048 block_before_scop (scop_p scop)
1050 return SESE_ENTRY (SCOP_REGION (scop))->src;
1053 /* Return true when EXPR is an affine function in LOOP with parameters
1054 instantiated relative to SCOP_ENTRY. */
1057 loop_affine_expr (basic_block scop_entry, struct loop *loop, tree expr)
1060 tree scev = analyze_scalar_evolution (loop, expr);
1062 scev = instantiate_scev (scop_entry, loop, scev);
1064 return (evolution_function_is_invariant_p (scev, n)
1065 || evolution_function_is_affine_multivariate_p (scev, n));
1068 /* Return true if REF or any of its subtrees contains a
1072 contains_component_ref_p (tree ref)
1077 while (handled_component_p (ref))
1079 if (TREE_CODE (ref) == COMPONENT_REF)
1082 ref = TREE_OPERAND (ref, 0);
1088 /* Return true if the operand OP is simple. */
1091 is_simple_operand (loop_p loop, gimple stmt, tree op)
1093 /* It is not a simple operand when it is a declaration, */
1095 /* or a structure, */
1096 || AGGREGATE_TYPE_P (TREE_TYPE (op))
1097 /* or a COMPONENT_REF, */
1098 || contains_component_ref_p (op)
1099 /* or a memory access that cannot be analyzed by the data
1100 reference analysis. */
1101 || ((handled_component_p (op) || INDIRECT_REF_P (op))
1102 && !stmt_simple_memref_p (loop, stmt, op)))
1108 /* Return true only when STMT is simple enough for being handled by
1109 Graphite. This depends on SCOP_ENTRY, as the parametetrs are
1110 initialized relatively to this basic block. */
1113 stmt_simple_for_scop_p (basic_block scop_entry, gimple stmt)
1115 basic_block bb = gimple_bb (stmt);
1116 struct loop *loop = bb->loop_father;
1118 /* GIMPLE_ASM and GIMPLE_CALL may embed arbitrary side effects.
1119 Calls have side-effects, except those to const or pure
1121 if (gimple_has_volatile_ops (stmt)
1122 || (gimple_code (stmt) == GIMPLE_CALL
1123 && !(gimple_call_flags (stmt) & (ECF_CONST | ECF_PURE)))
1124 || (gimple_code (stmt) == GIMPLE_ASM))
1127 switch (gimple_code (stmt))
1136 ssa_op_iter op_iter;
1137 enum tree_code code = gimple_cond_code (stmt);
1139 /* We can only handle this kind of conditional expressions.
1140 For inequalities like "if (i != 3 * k)" we need unions of
1141 polyhedrons. Expressions like "if (a)" or "if (a == 15)" need
1142 them for the else branch. */
1143 if (!(code == LT_EXPR
1146 || code == GE_EXPR))
1152 FOR_EACH_SSA_TREE_OPERAND (op, stmt, op_iter, SSA_OP_ALL_USES)
1153 if (!loop_affine_expr (scop_entry, loop, op))
1161 enum tree_code code = gimple_assign_rhs_code (stmt);
1163 switch (get_gimple_rhs_class (code))
1165 case GIMPLE_UNARY_RHS:
1166 case GIMPLE_SINGLE_RHS:
1167 return (is_simple_operand (loop, stmt, gimple_assign_lhs (stmt))
1168 && is_simple_operand (loop, stmt, gimple_assign_rhs1 (stmt)));
1170 case GIMPLE_BINARY_RHS:
1171 return (is_simple_operand (loop, stmt, gimple_assign_lhs (stmt))
1172 && is_simple_operand (loop, stmt, gimple_assign_rhs1 (stmt))
1173 && is_simple_operand (loop, stmt, gimple_assign_rhs2 (stmt)));
1175 case GIMPLE_INVALID_RHS:
1184 size_t n = gimple_call_num_args (stmt);
1185 tree lhs = gimple_call_lhs (stmt);
1187 if (lhs && !is_simple_operand (loop, stmt, lhs))
1190 for (i = 0; i < n; i++)
1191 if (!is_simple_operand (loop, stmt, gimple_call_arg (stmt, i)))
1198 /* These nodes cut a new scope. */
1205 /* Returns the statement of BB that contains a harmful operation: that
1206 can be a function call with side effects, the induction variables
1207 are not linear with respect to SCOP_ENTRY, etc. The current open
1208 scop should end before this statement. */
1211 harmful_stmt_in_bb (basic_block scop_entry, basic_block bb)
1213 gimple_stmt_iterator gsi;
1216 for (gsi = gsi_start_bb (bb); !gsi_end_p (gsi); gsi_next (&gsi))
1217 if (!stmt_simple_for_scop_p (scop_entry, gsi_stmt (gsi)))
1218 return gsi_stmt (gsi);
1220 stmt = last_stmt (bb);
1221 if (stmt && gimple_code (stmt) == GIMPLE_COND)
1223 tree lhs = gimple_cond_lhs (stmt);
1224 tree rhs = gimple_cond_rhs (stmt);
1226 if (TREE_CODE (TREE_TYPE (lhs)) == REAL_TYPE
1227 || TREE_CODE (TREE_TYPE (rhs)) == REAL_TYPE)
1234 /* Returns true when BB will be represented in graphite. Return false
1235 for the basic blocks that contain code eliminated in the code
1236 generation pass: i.e. induction variables and exit conditions. */
1239 graphite_stmt_p (scop_p scop, basic_block bb,
1240 VEC (data_reference_p, heap) *drs)
1242 gimple_stmt_iterator gsi;
1243 loop_p loop = bb->loop_father;
1245 if (VEC_length (data_reference_p, drs) > 0)
1248 for (gsi = gsi_start_bb (bb); !gsi_end_p (gsi); gsi_next (&gsi))
1250 gimple stmt = gsi_stmt (gsi);
1252 switch (gimple_code (stmt))
1254 /* Control flow expressions can be ignored, as they are
1255 represented in the iteration domains and will be
1256 regenerated by graphite. */
1264 tree var = gimple_assign_lhs (stmt);
1265 var = analyze_scalar_evolution (loop, var);
1266 var = instantiate_scev (block_before_scop (scop), loop, var);
1268 if (chrec_contains_undetermined (var))
1282 /* Store the GRAPHITE representation of BB. */
1285 new_graphite_bb (scop_p scop, basic_block bb)
1287 struct graphite_bb *gbb;
1288 VEC (data_reference_p, heap) *drs = VEC_alloc (data_reference_p, heap, 5);
1289 struct loop *nest = outermost_loop_in_scop (scop, bb);
1290 gimple_stmt_iterator gsi;
1292 for (gsi = gsi_start_bb (bb); !gsi_end_p (gsi); gsi_next (&gsi))
1293 find_data_references_in_stmt (nest, gsi_stmt (gsi), &drs);
1295 if (!graphite_stmt_p (scop, bb, drs))
1297 free_data_refs (drs);
1301 gbb = XNEW (struct graphite_bb);
1304 GBB_SCOP (gbb) = scop;
1305 GBB_DATA_REFS (gbb) = drs;
1306 GBB_DOMAIN (gbb) = NULL;
1307 GBB_CONDITIONS (gbb) = NULL;
1308 GBB_CONDITION_CASES (gbb) = NULL;
1309 GBB_LOOPS (gbb) = NULL;
1310 GBB_STATIC_SCHEDULE (gbb) = NULL;
1311 GBB_CLOOG_IV_TYPES (gbb) = NULL;
1312 VEC_safe_push (graphite_bb_p, heap, SCOP_BBS (scop), gbb);
1318 free_graphite_bb (struct graphite_bb *gbb)
1320 if (GBB_DOMAIN (gbb))
1321 cloog_matrix_free (GBB_DOMAIN (gbb));
1323 if (GBB_CLOOG_IV_TYPES (gbb))
1324 htab_delete (GBB_CLOOG_IV_TYPES (gbb));
1326 /* FIXME: free_data_refs is disabled for the moment, but should be
1329 free_data_refs (GBB_DATA_REFS (gbb)); */
1331 VEC_free (gimple, heap, GBB_CONDITIONS (gbb));
1332 VEC_free (gimple, heap, GBB_CONDITION_CASES (gbb));
1333 VEC_free (loop_p, heap, GBB_LOOPS (gbb));
1334 GBB_BB (gbb)->aux = 0;
1340 /* Structure containing the mapping between the old names and the new
1341 names used after block copy in the new loop context. */
1342 typedef struct rename_map_elt_d
1344 tree old_name, new_name;
1348 /* Print to stderr the element ELT. */
1351 debug_rename_elt (rename_map_elt elt)
1353 fprintf (stderr, "(");
1354 print_generic_expr (stderr, elt->old_name, 0);
1355 fprintf (stderr, ", ");
1356 print_generic_expr (stderr, elt->new_name, 0);
1357 fprintf (stderr, ")\n");
1360 /* Helper function for debug_rename_map. */
1363 debug_rename_map_1 (void **slot, void *s ATTRIBUTE_UNUSED)
1365 struct rename_map_elt_d *entry = (struct rename_map_elt_d *) *slot;
1366 debug_rename_elt (entry);
1370 /* Print to stderr all the elements of MAP. */
1373 debug_rename_map (htab_t map)
1375 htab_traverse (map, debug_rename_map_1, NULL);
1378 /* Constructs a new SCEV_INFO_STR structure for VAR and INSTANTIATED_BELOW. */
1380 static inline rename_map_elt
1381 new_rename_map_elt (tree old_name, tree new_name)
1385 res = XNEW (struct rename_map_elt_d);
1386 res->old_name = old_name;
1387 res->new_name = new_name;
1392 /* Computes a hash function for database element ELT. */
1395 rename_map_elt_info (const void *elt)
1397 return htab_hash_pointer (((const struct rename_map_elt_d *) elt)->old_name);
1400 /* Compares database elements E1 and E2. */
1403 eq_rename_map_elts (const void *e1, const void *e2)
1405 const struct rename_map_elt_d *elt1 = (const struct rename_map_elt_d *) e1;
1406 const struct rename_map_elt_d *elt2 = (const struct rename_map_elt_d *) e2;
1408 return (elt1->old_name == elt2->old_name);
1411 /* Returns the new name associated to OLD_NAME in MAP. */
1414 get_new_name_from_old_name (htab_t map, tree old_name)
1416 struct rename_map_elt_d tmp;
1419 tmp.old_name = old_name;
1420 slot = htab_find_slot (map, &tmp, NO_INSERT);
1423 return ((rename_map_elt) *slot)->new_name;
1430 /* Creates a new scop starting with ENTRY. */
1433 new_scop (edge entry, edge exit)
1435 scop_p scop = XNEW (struct scop);
1437 gcc_assert (entry && exit);
1439 SCOP_REGION (scop) = new_sese (entry, exit);
1440 SCOP_BBS (scop) = VEC_alloc (graphite_bb_p, heap, 3);
1441 SCOP_OLDIVS (scop) = VEC_alloc (name_tree, heap, 3);
1442 SCOP_LOOPS (scop) = BITMAP_ALLOC (NULL);
1443 SCOP_LOOP_NEST (scop) = VEC_alloc (loop_p, heap, 3);
1444 SCOP_ADD_PARAMS (scop) = true;
1445 SCOP_PARAMS (scop) = VEC_alloc (name_tree, heap, 3);
1446 SCOP_PROG (scop) = cloog_program_malloc ();
1447 cloog_program_set_names (SCOP_PROG (scop), cloog_names_malloc ());
1448 SCOP_LOOP2CLOOG_LOOP (scop) = htab_create (10, hash_loop_to_cloog_loop,
1449 eq_loop_to_cloog_loop,
1451 SCOP_LIVEOUT_RENAMES (scop) = htab_create (10, rename_map_elt_info,
1452 eq_rename_map_elts, free);
1459 free_scop (scop_p scop)
1463 struct graphite_bb *gb;
1466 for (i = 0; VEC_iterate (graphite_bb_p, SCOP_BBS (scop), i, gb); i++)
1467 free_graphite_bb (gb);
1469 VEC_free (graphite_bb_p, heap, SCOP_BBS (scop));
1470 BITMAP_FREE (SCOP_LOOPS (scop));
1471 VEC_free (loop_p, heap, SCOP_LOOP_NEST (scop));
1473 for (i = 0; VEC_iterate (name_tree, SCOP_OLDIVS (scop), i, iv); i++)
1475 VEC_free (name_tree, heap, SCOP_OLDIVS (scop));
1477 for (i = 0; VEC_iterate (name_tree, SCOP_PARAMS (scop), i, p); i++)
1480 VEC_free (name_tree, heap, SCOP_PARAMS (scop));
1481 cloog_program_free (SCOP_PROG (scop));
1482 htab_delete (SCOP_LOOP2CLOOG_LOOP (scop));
1483 htab_delete (SCOP_LIVEOUT_RENAMES (scop));
1484 free_sese (SCOP_REGION (scop));
1488 /* Deletes all scops in SCOPS. */
1491 free_scops (VEC (scop_p, heap) *scops)
1496 for (i = 0; VEC_iterate (scop_p, scops, i, scop); i++)
1499 VEC_free (scop_p, heap, scops);
1502 typedef enum gbb_type {
1504 GBB_LOOP_SING_EXIT_HEADER,
1505 GBB_LOOP_MULT_EXIT_HEADER,
1512 /* Detect the type of BB. Loop headers are only marked, if they are
1513 new. This means their loop_father is different to LAST_LOOP.
1514 Otherwise they are treated like any other bb and their type can be
1518 get_bb_type (basic_block bb, struct loop *last_loop)
1520 VEC (basic_block, heap) *dom;
1522 struct loop *loop = bb->loop_father;
1524 /* Check, if we entry into a new loop. */
1525 if (loop != last_loop)
1527 if (single_exit (loop) != NULL)
1528 return GBB_LOOP_SING_EXIT_HEADER;
1529 else if (loop->num != 0)
1530 return GBB_LOOP_MULT_EXIT_HEADER;
1532 return GBB_COND_HEADER;
1535 dom = get_dominated_by (CDI_DOMINATORS, bb);
1536 nb_dom = VEC_length (basic_block, dom);
1537 VEC_free (basic_block, heap, dom);
1542 nb_suc = VEC_length (edge, bb->succs);
1544 if (nb_dom == 1 && nb_suc == 1)
1547 return GBB_COND_HEADER;
1550 /* A SCoP detection region, defined using bbs as borders.
1551 All control flow touching this region, comes in passing basic_block ENTRY and
1552 leaves passing basic_block EXIT. By using bbs instead of edges for the
1553 borders we are able to represent also regions that do not have a single
1555 But as they have a single entry basic_block and a single exit basic_block, we
1556 are able to generate for every sd_region a single entry and exit edge.
1563 / \ This region contains: {3, 4, 5, 6, 7, 8}
1571 typedef struct sd_region_p
1573 /* The entry bb dominates all bbs in the sd_region. It is part of the
1577 /* The exit bb postdominates all bbs in the sd_region, but is not
1578 part of the region. */
1582 DEF_VEC_O(sd_region);
1583 DEF_VEC_ALLOC_O(sd_region, heap);
1586 /* Moves the scops from SOURCE to TARGET and clean up SOURCE. */
1589 move_sd_regions (VEC (sd_region, heap) **source, VEC (sd_region, heap) **target)
1594 for (i = 0; VEC_iterate (sd_region, *source, i, s); i++)
1595 VEC_safe_push (sd_region, heap, *target, s);
1597 VEC_free (sd_region, heap, *source);
1600 /* Return true when it is not possible to represent the upper bound of
1601 LOOP in the polyhedral representation. */
1604 graphite_cannot_represent_loop_niter (loop_p loop)
1606 tree niter = number_of_latch_executions (loop);
1608 return chrec_contains_undetermined (niter)
1609 || !scev_is_linear_expression (niter);
1611 /* Store information needed by scopdet_* functions. */
1615 /* Where the last open scop would stop if the current BB is harmful. */
1618 /* Where the next scop would start if the current BB is harmful. */
1621 /* The bb or one of its children contains open loop exits. That means
1622 loop exit nodes that are not surrounded by a loop dominated by bb. */
1625 /* The bb or one of its children contains only structures we can handle. */
1630 static struct scopdet_info build_scops_1 (basic_block, VEC (sd_region, heap) **,
1633 /* Calculates BB infos. If bb is difficult we add valid SCoPs dominated by BB
1634 to SCOPS. TYPE is the gbb_type of BB. */
1636 static struct scopdet_info
1637 scopdet_basic_block_info (basic_block bb, VEC (sd_region, heap) **scops,
1640 struct loop *loop = bb->loop_father;
1641 struct scopdet_info result;
1644 /* XXX: ENTRY_BLOCK_PTR could be optimized in later steps. */
1645 stmt = harmful_stmt_in_bb (ENTRY_BLOCK_PTR, bb);
1646 result.difficult = (stmt != NULL);
1653 result.exits = false;
1656 /* Mark bbs terminating a SESE region difficult, if they start
1658 if (VEC_length (edge, bb->succs) > 1)
1659 result.difficult = true;
1664 result.next = single_succ (bb);
1665 result.exits = false;
1669 case GBB_LOOP_SING_EXIT_HEADER:
1671 VEC (sd_region, heap) *tmp_scops = VEC_alloc (sd_region, heap,3);
1672 struct scopdet_info sinfo;
1674 sinfo = build_scops_1 (bb, &tmp_scops, loop);
1676 result.last = single_exit (bb->loop_father)->src;
1677 result.next = single_exit (bb->loop_father)->dest;
1679 /* If we do not dominate result.next, remove it. It's either
1680 the EXIT_BLOCK_PTR, or another bb dominates it and will
1681 call the scop detection for this bb. */
1682 if (!dominated_by_p (CDI_DOMINATORS, result.next, bb))
1685 if (result.last->loop_father != loop)
1688 if (graphite_cannot_represent_loop_niter (loop))
1689 result.difficult = true;
1691 if (sinfo.difficult)
1692 move_sd_regions (&tmp_scops, scops);
1694 VEC_free (sd_region, heap, tmp_scops);
1696 result.exits = false;
1697 result.difficult |= sinfo.difficult;
1701 case GBB_LOOP_MULT_EXIT_HEADER:
1703 /* XXX: For now we just do not join loops with multiple exits. If the
1704 exits lead to the same bb it may be possible to join the loop. */
1705 VEC (sd_region, heap) *tmp_scops = VEC_alloc (sd_region, heap, 3);
1706 VEC (edge, heap) *exits = get_loop_exit_edges (loop);
1709 build_scops_1 (bb, &tmp_scops, loop);
1711 /* Scan the code dominated by this loop. This means all bbs, that are
1712 are dominated by a bb in this loop, but are not part of this loop.
1715 - The loop exit destination is dominated by the exit sources.
1717 TODO: We miss here the more complex cases:
1718 - The exit destinations are dominated by another bb inside the
1720 - The loop dominates bbs, that are not exit destinations. */
1721 for (i = 0; VEC_iterate (edge, exits, i, e); i++)
1722 if (e->src->loop_father == loop
1723 && dominated_by_p (CDI_DOMINATORS, e->dest, e->src))
1725 /* Pass loop_outer to recognize e->dest as loop header in
1727 if (e->dest->loop_father->header == e->dest)
1728 build_scops_1 (e->dest, &tmp_scops,
1729 loop_outer (e->dest->loop_father));
1731 build_scops_1 (e->dest, &tmp_scops, e->dest->loop_father);
1736 result.difficult = true;
1737 result.exits = false;
1738 move_sd_regions (&tmp_scops, scops);
1739 VEC_free (edge, heap, exits);
1742 case GBB_COND_HEADER:
1744 VEC (sd_region, heap) *tmp_scops = VEC_alloc (sd_region, heap, 3);
1745 struct scopdet_info sinfo;
1746 VEC (basic_block, heap) *dominated;
1749 basic_block last_bb = NULL;
1751 result.exits = false;
1753 /* First check the successors of BB, and check if it is possible to join
1754 the different branches. */
1755 for (i = 0; VEC_iterate (edge, bb->succs, i, e); i++)
1757 /* Ignore loop exits. They will be handled after the loop body. */
1758 if (is_loop_exit (loop, e->dest))
1760 result.exits = true;
1764 /* Do not follow edges that lead to the end of the
1765 conditions block. For example, in
1775 the edge from 0 => 6. Only check if all paths lead to
1778 if (!single_pred_p (e->dest))
1780 /* Check, if edge leads directly to the end of this
1787 if (e->dest != last_bb)
1788 result.difficult = true;
1793 if (!dominated_by_p (CDI_DOMINATORS, e->dest, bb))
1795 result.difficult = true;
1799 sinfo = build_scops_1 (e->dest, &tmp_scops, loop);
1801 result.exits |= sinfo.exits;
1802 result.last = sinfo.last;
1803 result.difficult |= sinfo.difficult;
1805 /* Checks, if all branches end at the same point.
1806 If that is true, the condition stays joinable.
1807 Have a look at the example above. */
1808 if (sinfo.last && single_succ_p (sinfo.last))
1810 basic_block next_tmp = single_succ (sinfo.last);
1815 if (next_tmp != last_bb)
1816 result.difficult = true;
1819 result.difficult = true;
1822 /* If the condition is joinable. */
1823 if (!result.exits && !result.difficult)
1825 /* Only return a next pointer if we dominate this pointer.
1826 Otherwise it will be handled by the bb dominating it. */
1827 if (dominated_by_p (CDI_DOMINATORS, last_bb, bb) && last_bb != bb)
1828 result.next = last_bb;
1832 VEC_free (sd_region, heap, tmp_scops);
1836 /* Scan remaining bbs dominated by BB. */
1837 dominated = get_dominated_by (CDI_DOMINATORS, bb);
1839 for (i = 0; VEC_iterate (basic_block, dominated, i, dom_bb); i++)
1841 /* Ignore loop exits: they will be handled after the loop body. */
1842 if (loop_depth (find_common_loop (loop, dom_bb->loop_father))
1843 < loop_depth (loop))
1845 result.exits = true;
1849 /* Ignore the bbs processed above. */
1850 if (single_pred_p (dom_bb) && single_pred (dom_bb) == bb)
1853 if (loop_depth (loop) > loop_depth (dom_bb->loop_father))
1854 sinfo = build_scops_1 (dom_bb, &tmp_scops, loop_outer (loop));
1856 sinfo = build_scops_1 (dom_bb, &tmp_scops, loop);
1859 result.exits |= sinfo.exits;
1860 result.difficult = true;
1864 VEC_free (basic_block, heap, dominated);
1867 move_sd_regions (&tmp_scops, scops);
1879 /* Creates the SCoPs and writes entry and exit points for every SCoP. */
1881 static struct scopdet_info
1882 build_scops_1 (basic_block current, VEC (sd_region, heap) **scops, loop_p loop)
1884 bool in_scop = false;
1885 sd_region open_scop;
1886 struct scopdet_info sinfo;
1888 /* Initialize result. */
1889 struct scopdet_info result;
1890 result.exits = false;
1891 result.difficult = false;
1894 open_scop.entry = NULL;
1895 open_scop.exit = NULL;
1898 /* Loop over the dominance tree. If we meet a difficult bb, close
1899 the current SCoP. Loop and condition header start a new layer,
1900 and can only be added if all bbs in deeper layers are simple. */
1901 while (current != NULL)
1903 sinfo = scopdet_basic_block_info (current, scops, get_bb_type (current,
1906 if (!in_scop && !(sinfo.exits || sinfo.difficult))
1908 open_scop.entry = current;
1909 open_scop.exit = NULL;
1912 else if (in_scop && (sinfo.exits || sinfo.difficult))
1914 open_scop.exit = current;
1915 VEC_safe_push (sd_region, heap, *scops, &open_scop);
1919 result.difficult |= sinfo.difficult;
1920 result.exits |= sinfo.exits;
1922 current = sinfo.next;
1925 /* Try to close open_scop, if we are still in an open SCoP. */
1931 for (i = 0; VEC_iterate (edge, sinfo.last->succs, i, e); i++)
1932 if (dominated_by_p (CDI_POST_DOMINATORS, sinfo.last, e->dest))
1933 open_scop.exit = e->dest;
1935 if (!open_scop.exit && open_scop.entry != sinfo.last)
1936 open_scop.exit = sinfo.last;
1939 VEC_safe_push (sd_region, heap, *scops, &open_scop);
1943 result.last = sinfo.last;
1947 /* Checks if a bb is contained in REGION. */
1950 bb_in_sd_region (basic_block bb, sd_region *region)
1952 return dominated_by_p (CDI_DOMINATORS, bb, region->entry)
1953 && !(dominated_by_p (CDI_DOMINATORS, bb, region->exit)
1954 && !dominated_by_p (CDI_DOMINATORS, region->entry,
1958 /* Returns the single entry edge of REGION, if it does not exits NULL. */
1961 find_single_entry_edge (sd_region *region)
1967 FOR_EACH_EDGE (e, ei, region->entry->preds)
1968 if (!bb_in_sd_region (e->src, region))
1983 /* Returns the single exit edge of REGION, if it does not exits NULL. */
1986 find_single_exit_edge (sd_region *region)
1992 FOR_EACH_EDGE (e, ei, region->exit->preds)
1993 if (bb_in_sd_region (e->src, region))
2008 /* Create a single entry edge for REGION. */
2011 create_single_entry_edge (sd_region *region)
2013 if (find_single_entry_edge (region))
2016 /* There are multiple predecessors for bb_3
2029 There are two edges (1->3, 2->3), that point from outside into the region,
2030 and another one (5->3), a loop latch, lead to bb_3.
2038 | |\ (3.0 -> 3.1) = single entry edge
2047 If the loop is part of the SCoP, we have to redirect the loop latches.
2053 | | (3.0 -> 3.1) = entry edge
2062 if (region->entry->loop_father->header != region->entry
2063 || dominated_by_p (CDI_DOMINATORS,
2064 loop_latch_edge (region->entry->loop_father)->src,
2067 edge forwarder = split_block_after_labels (region->entry);
2068 region->entry = forwarder->dest;
2071 /* This case is never executed, as the loop headers seem always to have a
2072 single edge pointing from outside into the loop. */
2075 #ifdef ENABLE_CHECKING
2076 gcc_assert (find_single_entry_edge (region));
2080 /* Check if the sd_region, mentioned in EDGE, has no exit bb. */
2083 sd_region_without_exit (edge e)
2085 sd_region *r = (sd_region *) e->aux;
2088 return r->exit == NULL;
2093 /* Create a single exit edge for REGION. */
2096 create_single_exit_edge (sd_region *region)
2100 edge forwarder = NULL;
2103 if (find_single_exit_edge (region))
2106 /* We create a forwarder bb (5) for all edges leaving this region
2107 (3->5, 4->5). All other edges leading to the same bb, are moved
2108 to a new bb (6). If these edges where part of another region (2->5)
2109 we update the region->exit pointer, of this region.
2111 To identify which edge belongs to which region we depend on the e->aux
2112 pointer in every edge. It points to the region of the edge or to NULL,
2113 if the edge is not part of any region.
2115 1 2 3 4 1->5 no region, 2->5 region->exit = 5,
2116 \| |/ 3->5 region->exit = NULL, 4->5 region->exit = NULL
2121 1 2 3 4 1->6 no region, 2->6 region->exit = 6,
2122 | | \/ 3->5 no region, 4->5 no region,
2124 \| / 5->6 region->exit = 6
2127 Now there is only a single exit edge (5->6). */
2128 exit = region->exit;
2129 region->exit = NULL;
2130 forwarder = make_forwarder_block (exit, &sd_region_without_exit, NULL);
2132 /* Unmark the edges, that are no longer exit edges. */
2133 FOR_EACH_EDGE (e, ei, forwarder->src->preds)
2137 /* Mark the new exit edge. */
2138 single_succ_edge (forwarder->src)->aux = region;
2140 /* Update the exit bb of all regions, where exit edges lead to
2142 FOR_EACH_EDGE (e, ei, forwarder->dest->preds)
2144 ((sd_region *) e->aux)->exit = forwarder->dest;
2146 #ifdef ENABLE_CHECKING
2147 gcc_assert (find_single_exit_edge (region));
2151 /* Unmark the exit edges of all REGIONS.
2152 See comment in "create_single_exit_edge". */
2155 unmark_exit_edges (VEC (sd_region, heap) *regions)
2162 for (i = 0; VEC_iterate (sd_region, regions, i, s); i++)
2163 FOR_EACH_EDGE (e, ei, s->exit->preds)
2168 /* Mark the exit edges of all REGIONS.
2169 See comment in "create_single_exit_edge". */
2172 mark_exit_edges (VEC (sd_region, heap) *regions)
2179 for (i = 0; VEC_iterate (sd_region, regions, i, s); i++)
2180 FOR_EACH_EDGE (e, ei, s->exit->preds)
2181 if (bb_in_sd_region (e->src, s))
2185 /* Free and compute again all the dominators information. */
2188 recompute_all_dominators (void)
2190 mark_irreducible_loops ();
2191 free_dominance_info (CDI_DOMINATORS);
2192 free_dominance_info (CDI_POST_DOMINATORS);
2193 calculate_dominance_info (CDI_DOMINATORS);
2194 calculate_dominance_info (CDI_POST_DOMINATORS);
2197 /* Verifies properties that GRAPHITE should maintain during translation. */
2200 graphite_verify (void)
2202 #ifdef ENABLE_CHECKING
2203 verify_loop_structure ();
2204 verify_dominators (CDI_DOMINATORS);
2205 verify_dominators (CDI_POST_DOMINATORS);
2207 verify_loop_closed_ssa ();
2211 /* Create for all scop regions a single entry and a single exit edge. */
2214 create_sese_edges (VEC (sd_region, heap) *regions)
2219 for (i = 0; VEC_iterate (sd_region, regions, i, s); i++)
2220 create_single_entry_edge (s);
2222 mark_exit_edges (regions);
2224 for (i = 0; VEC_iterate (sd_region, regions, i, s); i++)
2225 create_single_exit_edge (s);
2227 unmark_exit_edges (regions);
2229 fix_loop_structure (NULL);
2231 #ifdef ENABLE_CHECKING
2232 verify_loop_structure ();
2233 verify_dominators (CDI_DOMINATORS);
2238 /* Create graphite SCoPs from an array of scop detection regions. */
2241 build_graphite_scops (VEC (sd_region, heap) *scop_regions)
2246 for (i = 0; VEC_iterate (sd_region, scop_regions, i, s); i++)
2248 edge entry = find_single_entry_edge (s);
2249 edge exit = find_single_exit_edge (s);
2250 scop_p scop = new_scop (entry, exit);
2251 VEC_safe_push (scop_p, heap, current_scops, scop);
2253 /* Are there overlapping SCoPs? */
2254 #ifdef ENABLE_CHECKING
2259 for (j = 0; VEC_iterate (sd_region, scop_regions, j, s2); j++)
2261 gcc_assert (!bb_in_sd_region (s->entry, s2));
2267 /* Find static control parts. */
2272 struct loop *loop = current_loops->tree_root;
2273 VEC (sd_region, heap) *tmp_scops = VEC_alloc (sd_region, heap, 3);
2275 build_scops_1 (single_succ (ENTRY_BLOCK_PTR), &tmp_scops, loop);
2276 create_sese_edges (tmp_scops);
2277 build_graphite_scops (tmp_scops);
2278 VEC_free (sd_region, heap, tmp_scops);
2281 /* Gather the basic blocks belonging to the SCOP. */
2284 build_scop_bbs (scop_p scop)
2286 basic_block *stack = XNEWVEC (basic_block, n_basic_blocks + 1);
2287 sbitmap visited = sbitmap_alloc (last_basic_block);
2290 sbitmap_zero (visited);
2291 stack[sp++] = SCOP_ENTRY (scop);
2295 basic_block bb = stack[--sp];
2296 int depth = loop_depth (bb->loop_father);
2297 int num = bb->loop_father->num;
2301 /* Scop's exit is not in the scop. Exclude also bbs, which are
2302 dominated by the SCoP exit. These are e.g. loop latches. */
2303 if (TEST_BIT (visited, bb->index)
2304 || dominated_by_p (CDI_DOMINATORS, bb, SCOP_EXIT (scop))
2305 /* Every block in the scop is dominated by scop's entry. */
2306 || !dominated_by_p (CDI_DOMINATORS, bb, SCOP_ENTRY (scop)))
2309 new_graphite_bb (scop, bb);
2310 SET_BIT (visited, bb->index);
2312 /* First push the blocks that have to be processed last. Note
2313 that this means that the order in which the code is organized
2314 below is important: do not reorder the following code. */
2315 FOR_EACH_EDGE (e, ei, bb->succs)
2316 if (! TEST_BIT (visited, e->dest->index)
2317 && (int) loop_depth (e->dest->loop_father) < depth)
2318 stack[sp++] = e->dest;
2320 FOR_EACH_EDGE (e, ei, bb->succs)
2321 if (! TEST_BIT (visited, e->dest->index)
2322 && (int) loop_depth (e->dest->loop_father) == depth
2323 && e->dest->loop_father->num != num)
2324 stack[sp++] = e->dest;
2326 FOR_EACH_EDGE (e, ei, bb->succs)
2327 if (! TEST_BIT (visited, e->dest->index)
2328 && (int) loop_depth (e->dest->loop_father) == depth
2329 && e->dest->loop_father->num == num
2330 && EDGE_COUNT (e->dest->preds) > 1)
2331 stack[sp++] = e->dest;
2333 FOR_EACH_EDGE (e, ei, bb->succs)
2334 if (! TEST_BIT (visited, e->dest->index)
2335 && (int) loop_depth (e->dest->loop_father) == depth
2336 && e->dest->loop_father->num == num
2337 && EDGE_COUNT (e->dest->preds) == 1)
2338 stack[sp++] = e->dest;
2340 FOR_EACH_EDGE (e, ei, bb->succs)
2341 if (! TEST_BIT (visited, e->dest->index)
2342 && (int) loop_depth (e->dest->loop_father) > depth)
2343 stack[sp++] = e->dest;
2347 sbitmap_free (visited);
2350 /* Returns the number of reduction phi nodes in LOOP. */
2353 nb_reductions_in_loop (loop_p loop)
2356 gimple_stmt_iterator gsi;
2358 for (gsi = gsi_start_phis (loop->header); !gsi_end_p (gsi); gsi_next (&gsi))
2360 gimple phi = gsi_stmt (gsi);
2364 if (!is_gimple_reg (PHI_RESULT (phi)))
2367 scev = analyze_scalar_evolution (loop, PHI_RESULT (phi));
2368 scev = instantiate_parameters (loop, scev);
2369 if (!simple_iv (loop, loop, PHI_RESULT (phi), &iv, true))
2376 /* A LOOP is in normal form when it contains only one scalar phi node
2377 that defines the main induction variable of the loop, only one
2378 increment of the IV, and only one exit condition. */
2381 graphite_loop_normal_form (loop_p loop)
2383 struct tree_niter_desc niter;
2386 edge exit = single_dom_exit (loop);
2387 bool known_niter = number_of_iterations_exit (loop, exit, &niter, false);
2389 gcc_assert (known_niter);
2391 nit = force_gimple_operand (unshare_expr (niter.niter), &stmts, true,
2394 gsi_insert_seq_on_edge_immediate (loop_preheader_edge (loop), stmts);
2396 /* One IV per loop. */
2397 if (nb_reductions_in_loop (loop) > 0)
2400 return canonicalize_loop_ivs (loop, NULL, &nit);
2403 /* Record LOOP as occuring in SCOP. Returns true when the operation
2407 scop_record_loop (scop_p scop, loop_p loop)
2412 if (bitmap_bit_p (SCOP_LOOPS (scop), loop->num))
2415 bitmap_set_bit (SCOP_LOOPS (scop), loop->num);
2416 VEC_safe_push (loop_p, heap, SCOP_LOOP_NEST (scop), loop);
2418 induction_var = graphite_loop_normal_form (loop);
2422 oldiv = XNEW (struct name_tree_d);
2423 oldiv->t = induction_var;
2424 oldiv->name = get_name (SSA_NAME_VAR (oldiv->t));
2426 VEC_safe_push (name_tree, heap, SCOP_OLDIVS (scop), oldiv);
2430 /* Build the loop nests contained in SCOP. Returns true when the
2431 operation was successful. */
2434 build_scop_loop_nests (scop_p scop)
2438 struct loop *loop0, *loop1;
2441 if (bb_in_sese_p (bb, SCOP_REGION (scop)))
2443 struct loop *loop = bb->loop_father;
2445 /* Only add loops if they are completely contained in the SCoP. */
2446 if (loop->header == bb
2447 && bb_in_sese_p (loop->latch, SCOP_REGION (scop)))
2449 if (!scop_record_loop (scop, loop))
2454 /* Make sure that the loops in the SCOP_LOOP_NEST are ordered. It
2455 can be the case that an inner loop is inserted before an outer
2456 loop. To avoid this, semi-sort once. */
2457 for (i = 0; VEC_iterate (loop_p, SCOP_LOOP_NEST (scop), i, loop0); i++)
2459 if (VEC_length (loop_p, SCOP_LOOP_NEST (scop)) == i + 1)
2462 loop1 = VEC_index (loop_p, SCOP_LOOP_NEST (scop), i + 1);
2463 if (loop0->num > loop1->num)
2465 VEC_replace (loop_p, SCOP_LOOP_NEST (scop), i, loop1);
2466 VEC_replace (loop_p, SCOP_LOOP_NEST (scop), i + 1, loop0);
2473 /* Calculate the number of loops around LOOP in the SCOP. */
2476 nb_loops_around_loop_in_scop (struct loop *l, scop_p scop)
2480 for (; loop_in_sese_p (l, SCOP_REGION (scop)); d++, l = loop_outer (l));
2485 /* Calculate the number of loops around GB in the current SCOP. */
2488 nb_loops_around_gb (graphite_bb_p gb)
2490 return nb_loops_around_loop_in_scop (gbb_loop (gb), GBB_SCOP (gb));
2493 /* Returns the dimensionality of an enclosing loop iteration domain
2494 with respect to enclosing SCoP for a given data reference REF. The
2495 returned dimensionality is homogeneous (depth of loop nest + number
2496 of SCoP parameters + const). */
2499 ref_nb_loops (data_reference_p ref)
2501 loop_p loop = loop_containing_stmt (DR_STMT (ref));
2502 scop_p scop = DR_SCOP (ref);
2504 return nb_loops_around_loop_in_scop (loop, scop) + scop_nb_params (scop) + 2;
2507 /* Build dynamic schedules for all the BBs. */
2510 build_scop_dynamic_schedules (scop_p scop)
2512 int i, dim, loop_num, row, col;
2515 for (i = 0; VEC_iterate (graphite_bb_p, SCOP_BBS (scop), i, gb); i++)
2517 loop_num = GBB_BB (gb)->loop_father->num;
2521 dim = nb_loops_around_gb (gb);
2522 GBB_DYNAMIC_SCHEDULE (gb) = cloog_matrix_alloc (dim, dim);
2524 for (row = 0; row < GBB_DYNAMIC_SCHEDULE (gb)->NbRows; row++)
2525 for (col = 0; col < GBB_DYNAMIC_SCHEDULE (gb)->NbColumns; col++)
2527 value_set_si (GBB_DYNAMIC_SCHEDULE (gb)->p[row][col], 1);
2529 value_set_si (GBB_DYNAMIC_SCHEDULE (gb)->p[row][col], 0);
2532 GBB_DYNAMIC_SCHEDULE (gb) = NULL;
2536 /* Returns the number of loops that are identical at the beginning of
2537 the vectors A and B. */
2540 compare_prefix_loops (VEC (loop_p, heap) *a, VEC (loop_p, heap) *b)
2549 lb = VEC_length (loop_p, b);
2551 for (i = 0; VEC_iterate (loop_p, a, i, ea); i++)
2553 || ea != VEC_index (loop_p, b, i))
2559 /* Build for BB the static schedule.
2561 The STATIC_SCHEDULE is defined like this:
2580 Static schedules for A to F:
2593 build_scop_canonical_schedules (scop_p scop)
2597 int nb_loops = scop_nb_loops (scop);
2598 lambda_vector static_schedule = lambda_vector_new (nb_loops + 1);
2599 VEC (loop_p, heap) *loops_previous = NULL;
2601 /* We have to start schedules at 0 on the first component and
2602 because we cannot compare_prefix_loops against a previous loop,
2603 prefix will be equal to zero, and that index will be
2604 incremented before copying. */
2605 static_schedule[0] = -1;
2607 for (i = 0; VEC_iterate (graphite_bb_p, SCOP_BBS (scop), i, gb); i++)
2609 int prefix = compare_prefix_loops (loops_previous, GBB_LOOPS (gb));
2610 int nb = gbb_nb_loops (gb);
2612 loops_previous = GBB_LOOPS (gb);
2613 memset (&(static_schedule[prefix + 1]), 0, sizeof (int) * (nb_loops - prefix));
2614 ++static_schedule[prefix];
2615 GBB_STATIC_SCHEDULE (gb) = lambda_vector_new (nb + 1);
2616 lambda_vector_copy (static_schedule,
2617 GBB_STATIC_SCHEDULE (gb), nb + 1);
2621 /* Build the LOOPS vector for all bbs in SCOP. */
2624 build_bb_loops (scop_p scop)
2629 for (i = 0; VEC_iterate (graphite_bb_p, SCOP_BBS (scop), i, gb); i++)
2634 depth = nb_loops_around_gb (gb) - 1;
2636 GBB_LOOPS (gb) = VEC_alloc (loop_p, heap, 3);
2637 VEC_safe_grow_cleared (loop_p, heap, GBB_LOOPS (gb), depth + 1);
2639 loop = GBB_BB (gb)->loop_father;
2641 while (scop_contains_loop (scop, loop))
2643 VEC_replace (loop_p, GBB_LOOPS (gb), depth, loop);
2644 loop = loop_outer (loop);
2650 /* Get the index for parameter VAR in SCOP. */
2653 param_index (tree var, scop_p scop)
2659 gcc_assert (TREE_CODE (var) == SSA_NAME);
2661 for (i = 0; VEC_iterate (name_tree, SCOP_PARAMS (scop), i, p); i++)
2665 gcc_assert (SCOP_ADD_PARAMS (scop));
2667 nvar = XNEW (struct name_tree_d);
2670 VEC_safe_push (name_tree, heap, SCOP_PARAMS (scop), nvar);
2671 return VEC_length (name_tree, SCOP_PARAMS (scop)) - 1;
2674 /* Scan EXPR and translate it to an inequality vector INEQ that will
2675 be added, or subtracted, in the constraint domain matrix C at row
2676 R. K is the number of columns for loop iterators in C. */
2679 scan_tree_for_params (scop_p s, tree e, CloogMatrix *c, int r, Value k,
2682 int cst_col, param_col;
2684 if (e == chrec_dont_know)
2687 switch (TREE_CODE (e))
2689 case POLYNOMIAL_CHREC:
2691 tree left = CHREC_LEFT (e);
2692 tree right = CHREC_RIGHT (e);
2693 int var = CHREC_VARIABLE (e);
2695 if (TREE_CODE (right) != INTEGER_CST)
2700 int loop_col = scop_gimple_loop_depth (s, get_loop (var)) + 1;
2703 value_sub_int (c->p[r][loop_col], c->p[r][loop_col],
2704 int_cst_value (right));
2706 value_add_int (c->p[r][loop_col], c->p[r][loop_col],
2707 int_cst_value (right));
2710 switch (TREE_CODE (left))
2712 case POLYNOMIAL_CHREC:
2713 scan_tree_for_params (s, left, c, r, k, subtract);
2717 /* Constant part. */
2720 int v = int_cst_value (left);
2721 cst_col = c->NbColumns - 1;
2726 subtract = subtract ? false : true;
2730 value_sub_int (c->p[r][cst_col], c->p[r][cst_col], v);
2732 value_add_int (c->p[r][cst_col], c->p[r][cst_col], v);
2737 scan_tree_for_params (s, left, c, r, k, subtract);
2744 if (chrec_contains_symbols (TREE_OPERAND (e, 0)))
2749 gcc_assert (host_integerp (TREE_OPERAND (e, 1), 0));
2751 value_set_si (val, int_cst_value (TREE_OPERAND (e, 1)));
2752 value_multiply (k, k, val);
2755 scan_tree_for_params (s, TREE_OPERAND (e, 0), c, r, k, subtract);
2762 gcc_assert (host_integerp (TREE_OPERAND (e, 0), 0));
2764 value_set_si (val, int_cst_value (TREE_OPERAND (e, 0)));
2765 value_multiply (k, k, val);
2768 scan_tree_for_params (s, TREE_OPERAND (e, 1), c, r, k, subtract);
2773 case POINTER_PLUS_EXPR:
2774 scan_tree_for_params (s, TREE_OPERAND (e, 0), c, r, k, subtract);
2775 scan_tree_for_params (s, TREE_OPERAND (e, 1), c, r, k, subtract);
2779 scan_tree_for_params (s, TREE_OPERAND (e, 0), c, r, k, subtract);
2780 scan_tree_for_params (s, TREE_OPERAND (e, 1), c, r, k, !subtract);
2784 scan_tree_for_params (s, TREE_OPERAND (e, 0), c, r, k, !subtract);
2788 param_col = param_index (e, s);
2792 param_col += c->NbColumns - scop_nb_params (s) - 1;
2795 value_subtract (c->p[r][param_col], c->p[r][param_col], k);
2797 value_addto (c->p[r][param_col], c->p[r][param_col], k);
2804 int v = int_cst_value (e);
2805 cst_col = c->NbColumns - 1;
2810 subtract = subtract ? false : true;
2814 value_sub_int (c->p[r][cst_col], c->p[r][cst_col], v);
2816 value_add_int (c->p[r][cst_col], c->p[r][cst_col], v);
2821 case NON_LVALUE_EXPR:
2822 scan_tree_for_params (s, TREE_OPERAND (e, 0), c, r, k, subtract);
2831 /* Data structure for idx_record_params. */
2839 /* For a data reference with an ARRAY_REF as its BASE, record the
2840 parameters occurring in IDX. DTA is passed in as complementary
2841 information, and is used by the automatic walker function. This
2842 function is a callback for for_each_index. */
2845 idx_record_params (tree base, tree *idx, void *dta)
2847 struct irp_data *data = (struct irp_data *) dta;
2849 if (TREE_CODE (base) != ARRAY_REF)
2852 if (TREE_CODE (*idx) == SSA_NAME)
2855 scop_p scop = data->scop;
2856 struct loop *loop = data->loop;
2859 scev = analyze_scalar_evolution (loop, *idx);
2860 scev = instantiate_scev (block_before_scop (scop), loop, scev);
2863 value_set_si (one, 1);
2864 scan_tree_for_params (scop, scev, NULL, 0, one, false);
2871 /* Find parameters with respect to SCOP in BB. We are looking in memory
2872 access functions, conditions and loop bounds. */
2875 find_params_in_bb (scop_p scop, graphite_bb_p gb)
2878 data_reference_p dr;
2880 loop_p father = GBB_BB (gb)->loop_father;
2882 for (i = 0; VEC_iterate (data_reference_p, GBB_DATA_REFS (gb), i, dr); i++)
2884 struct irp_data irp;
2888 for_each_index (&dr->ref, idx_record_params, &irp);
2891 /* Find parameters in conditional statements. */
2892 for (i = 0; VEC_iterate (gimple, GBB_CONDITIONS (gb), i, stmt); i++)
2895 loop_p loop = father;
2899 lhs = gimple_cond_lhs (stmt);
2900 lhs = analyze_scalar_evolution (loop, lhs);
2901 lhs = instantiate_scev (block_before_scop (scop), loop, lhs);
2903 rhs = gimple_cond_rhs (stmt);
2904 rhs = analyze_scalar_evolution (loop, rhs);
2905 rhs = instantiate_scev (block_before_scop (scop), loop, rhs);
2908 scan_tree_for_params (scop, lhs, NULL, 0, one, false);
2909 value_set_si (one, 1);
2910 scan_tree_for_params (scop, rhs, NULL, 0, one, false);
2915 /* Saves in NV the name of variable P->T. */
2918 save_var_name (char **nv, int i, name_tree p)
2920 const char *name = get_name (SSA_NAME_VAR (p->t));
2924 int len = strlen (name) + 16;
2925 nv[i] = XNEWVEC (char, len);
2926 snprintf (nv[i], len, "%s_%d", name, SSA_NAME_VERSION (p->t));
2930 nv[i] = XNEWVEC (char, 16);
2931 snprintf (nv[i], 2 + 16, "T_%d", SSA_NAME_VERSION (p->t));
2937 /* Return the maximal loop depth in SCOP. */
2940 scop_max_loop_depth (scop_p scop)
2944 int max_nb_loops = 0;
2946 for (i = 0; VEC_iterate (graphite_bb_p, SCOP_BBS (scop), i, gbb); i++)
2948 int nb_loops = gbb_nb_loops (gbb);
2949 if (max_nb_loops < nb_loops)
2950 max_nb_loops = nb_loops;
2953 return max_nb_loops;
2956 /* Initialize Cloog's parameter names from the names used in GIMPLE.
2957 Initialize Cloog's iterator names, using 'graphite_iterator_%d'
2958 from 0 to scop_nb_loops (scop). */
2961 initialize_cloog_names (scop_p scop)
2963 int i, nb_params = VEC_length (name_tree, SCOP_PARAMS (scop));
2964 char **params = XNEWVEC (char *, nb_params);
2965 int nb_iterators = scop_max_loop_depth (scop);
2966 int nb_scattering= cloog_program_nb_scattdims (SCOP_PROG (scop));
2967 char **iterators = XNEWVEC (char *, nb_iterators * 2);
2968 char **scattering = XNEWVEC (char *, nb_scattering);
2971 for (i = 0; VEC_iterate (name_tree, SCOP_PARAMS (scop), i, p); i++)
2972 save_var_name (params, i, p);
2974 cloog_names_set_nb_parameters (cloog_program_names (SCOP_PROG (scop)),
2976 cloog_names_set_parameters (cloog_program_names (SCOP_PROG (scop)),
2979 for (i = 0; i < nb_iterators; i++)
2982 iterators[i] = XNEWVEC (char, len);
2983 snprintf (iterators[i], len, "graphite_iterator_%d", i);
2986 cloog_names_set_nb_iterators (cloog_program_names (SCOP_PROG (scop)),
2988 cloog_names_set_iterators (cloog_program_names (SCOP_PROG (scop)),
2991 for (i = 0; i < nb_scattering; i++)
2994 scattering[i] = XNEWVEC (char, len);
2995 snprintf (scattering[i], len, "s_%d", i);
2998 cloog_names_set_nb_scattering (cloog_program_names (SCOP_PROG (scop)),
3000 cloog_names_set_scattering (cloog_program_names (SCOP_PROG (scop)),
3004 /* Record the parameters used in the SCOP. A variable is a parameter
3005 in a scop if it does not vary during the execution of that scop. */
3008 find_scop_parameters (scop_p scop)
3016 value_set_si (one, 1);
3018 /* Find the parameters used in the loop bounds. */
3019 for (i = 0; VEC_iterate (loop_p, SCOP_LOOP_NEST (scop), i, loop); i++)
3021 tree nb_iters = number_of_latch_executions (loop);
3023 if (!chrec_contains_symbols (nb_iters))
3026 nb_iters = analyze_scalar_evolution (loop, nb_iters);
3027 nb_iters = instantiate_scev (block_before_scop (scop), loop, nb_iters);
3028 scan_tree_for_params (scop, nb_iters, NULL, 0, one, false);
3033 /* Find the parameters used in data accesses. */
3034 for (i = 0; VEC_iterate (graphite_bb_p, SCOP_BBS (scop), i, gb); i++)
3035 find_params_in_bb (scop, gb);
3037 SCOP_ADD_PARAMS (scop) = false;
3040 /* Build the context constraints for SCOP: constraints and relations
3044 build_scop_context (scop_p scop)
3046 int nb_params = scop_nb_params (scop);
3047 CloogMatrix *matrix = cloog_matrix_alloc (1, nb_params + 2);
3049 /* Insert '0 >= 0' in the context matrix, as it is not allowed to be
3052 value_set_si (matrix->p[0][0], 1);
3054 value_set_si (matrix->p[0][nb_params + 1], 0);
3056 cloog_program_set_context (SCOP_PROG (scop),
3057 cloog_domain_matrix2domain (matrix));
3058 cloog_matrix_free (matrix);
3061 /* Returns a graphite_bb from BB. */
3063 static inline graphite_bb_p
3064 gbb_from_bb (basic_block bb)
3066 return (graphite_bb_p) bb->aux;
3069 /* Builds the constraint matrix for LOOP in SCOP. NB_OUTER_LOOPS is the
3070 number of loops surrounding LOOP in SCOP. OUTER_CSTR gives the
3071 constraints matrix for the surrounding loops. */
3074 build_loop_iteration_domains (scop_p scop, struct loop *loop,
3075 CloogMatrix *outer_cstr, int nb_outer_loops)
3081 int nb_rows = outer_cstr->NbRows + 1;
3082 int nb_cols = outer_cstr->NbColumns + 1;
3084 /* Last column of CSTR is the column of constants. */
3085 int cst_col = nb_cols - 1;
3087 /* The column for the current loop is just after the columns of
3088 other outer loops. */
3089 int loop_col = nb_outer_loops + 1;
3091 tree nb_iters = number_of_latch_executions (loop);
3093 /* When the number of iterations is a constant or a parameter, we
3094 add a constraint for the upper bound of the loop. So add a row
3095 to the constraint matrix before allocating it. */
3096 if (TREE_CODE (nb_iters) == INTEGER_CST
3097 || !chrec_contains_undetermined (nb_iters))
3100 cstr = cloog_matrix_alloc (nb_rows, nb_cols);
3102 /* Copy the outer constraints. */
3103 for (i = 0; i < outer_cstr->NbRows; i++)
3105 /* Copy the eq/ineq and loops columns. */
3106 for (j = 0; j < loop_col; j++)
3107 value_assign (cstr->p[i][j], outer_cstr->p[i][j]);
3109 /* Leave an empty column in CSTR for the current loop, and then
3110 copy the parameter columns. */
3111 for (j = loop_col; j < outer_cstr->NbColumns; j++)
3112 value_assign (cstr->p[i][j + 1], outer_cstr->p[i][j]);
3116 row = outer_cstr->NbRows;
3117 value_set_si (cstr->p[row][0], 1);
3118 value_set_si (cstr->p[row][loop_col], 1);
3120 /* loop_i <= nb_iters */
3121 if (TREE_CODE (nb_iters) == INTEGER_CST)
3124 value_set_si (cstr->p[row][0], 1);
3125 value_set_si (cstr->p[row][loop_col], -1);
3127 value_set_si (cstr->p[row][cst_col],
3128 int_cst_value (nb_iters));
3130 else if (!chrec_contains_undetermined (nb_iters))
3132 /* Otherwise nb_iters contains parameters: scan the nb_iters
3133 expression and build its matrix representation. */
3137 value_set_si (cstr->p[row][0], 1);
3138 value_set_si (cstr->p[row][loop_col], -1);
3140 nb_iters = analyze_scalar_evolution (loop, nb_iters);
3141 nb_iters = instantiate_scev (block_before_scop (scop), loop, nb_iters);
3144 value_set_si (one, 1);
3145 scan_tree_for_params (scop, nb_iters, cstr, row, one, false);
3151 if (loop->inner && loop_in_sese_p (loop->inner, SCOP_REGION (scop)))
3152 build_loop_iteration_domains (scop, loop->inner, cstr, nb_outer_loops + 1);
3154 /* Only go to the next loops, if we are not at the outermost layer. These
3155 have to be handled seperately, as we can be sure, that the chain at this
3156 layer will be connected. */
3157 if (nb_outer_loops != 0 && loop->next && loop_in_sese_p (loop->next,
3158 SCOP_REGION (scop)))
3159 build_loop_iteration_domains (scop, loop->next, outer_cstr, nb_outer_loops);
3161 for (i = 0; VEC_iterate (graphite_bb_p, SCOP_BBS (scop), i, gb); i++)
3162 if (gbb_loop (gb) == loop)
3163 GBB_DOMAIN (gb) = cloog_matrix_copy (cstr);
3165 cloog_matrix_free (cstr);
3168 /* Add conditions to the domain of GB. */
3171 add_conditions_to_domain (graphite_bb_p gb)
3175 VEC (gimple, heap) *conditions = GBB_CONDITIONS (gb);
3176 CloogMatrix *domain = GBB_DOMAIN (gb);
3177 scop_p scop = GBB_SCOP (gb);
3181 unsigned nb_new_rows = 0;
3184 if (VEC_empty (gimple, conditions))
3189 nb_rows = domain->NbRows;
3190 nb_cols = domain->NbColumns;
3195 nb_cols = nb_loops_around_gb (gb) + scop_nb_params (scop) + 2;
3198 /* Count number of necessary new rows to add the conditions to the
3200 for (i = 0; VEC_iterate (gimple, conditions, i, stmt); i++)
3202 switch (gimple_code (stmt))
3206 enum tree_code code = gimple_cond_code (stmt);
3212 /* NE and EQ statements are not supported right know. */
3228 /* Switch statements are not supported right know. */
3239 /* Enlarge the matrix. */
3241 CloogMatrix *new_domain;
3242 new_domain = cloog_matrix_alloc (nb_rows + nb_new_rows, nb_cols);
3246 for (i = 0; i < nb_rows; i++)
3247 for (j = 0; j < nb_cols; j++)
3248 value_assign (new_domain->p[i][j], domain->p[i][j]);
3250 cloog_matrix_free (domain);
3253 domain = new_domain;
3254 GBB_DOMAIN (gb) = new_domain;
3257 /* Add the conditions to the new enlarged domain matrix. */
3259 for (i = 0; VEC_iterate (gimple, conditions, i, stmt); i++)
3261 switch (gimple_code (stmt))
3266 enum tree_code code;
3269 loop_p loop = GBB_BB (gb)->loop_father;
3271 left = gimple_cond_lhs (stmt);
3272 right = gimple_cond_rhs (stmt);
3274 left = analyze_scalar_evolution (loop, left);
3275 right = analyze_scalar_evolution (loop, right);
3277 left = instantiate_scev (block_before_scop (scop), loop, left);
3278 right = instantiate_scev (block_before_scop (scop), loop, right);
3280 code = gimple_cond_code (stmt);
3282 /* The conditions for ELSE-branches are inverted. */
3283 if (VEC_index (gimple, gb->condition_cases, i) == NULL)
3284 code = invert_tree_comparison (code, false);
3289 /* NE statements are not supported right know. */
3293 value_set_si (domain->p[row][0], 1);
3295 value_set_si (one, 1);
3296 scan_tree_for_params (scop, left, domain, row, one, true);
3297 value_set_si (one, 1);
3298 scan_tree_for_params (scop, right, domain, row, one, false);
3300 value_set_si (domain->p[row][0], 1);
3301 value_set_si (one, 1);
3302 scan_tree_for_params (scop, left, domain, row, one, false);
3303 value_set_si (one, 1);
3304 scan_tree_for_params (scop, right, domain, row, one, true);
3309 value_set_si (domain->p[row][0], 1);
3311 value_set_si (one, 1);
3312 scan_tree_for_params (scop, left, domain, row, one, true);
3313 value_set_si (one, 1);
3314 scan_tree_for_params (scop, right, domain, row, one, false);
3315 value_sub_int (domain->p[row][nb_cols - 1],
3316 domain->p[row][nb_cols - 1], 1);
3321 value_set_si (domain->p[row][0], 1);
3323 value_set_si (one, 1);
3324 scan_tree_for_params (scop, left, domain, row, one, false);
3325 value_set_si (one, 1);
3326 scan_tree_for_params (scop, right, domain, row, one, true);
3327 value_sub_int (domain->p[row][nb_cols - 1],
3328 domain->p[row][nb_cols - 1], 1);
3333 value_set_si (domain->p[row][0], 1);
3335 value_set_si (one, 1);
3336 scan_tree_for_params (scop, left, domain, row, one, true);
3337 value_set_si (one, 1);
3338 scan_tree_for_params (scop, right, domain, row, one, false);
3343 value_set_si (domain->p[row][0], 1);
3345 value_set_si (one, 1);
3346 scan_tree_for_params (scop, left, domain, row, one, false);
3347 value_set_si (one, 1);
3348 scan_tree_for_params (scop, right, domain, row, one, true);
3359 /* Switch statements are not supported right know. */
3370 /* Returns true when PHI defines an induction variable in the loop
3371 containing the PHI node. */
3374 phi_node_is_iv (gimple phi)
3376 loop_p loop = gimple_bb (phi)->loop_father;
3377 tree scev = analyze_scalar_evolution (loop, gimple_phi_result (phi));
3379 return tree_contains_chrecs (scev, NULL);
3382 /* Returns true when BB contains scalar phi nodes that are not an
3383 induction variable of a loop. */
3386 bb_contains_non_iv_scalar_phi_nodes (basic_block bb)
3389 gimple_stmt_iterator si;
3391 for (si = gsi_start_phis (bb); !gsi_end_p (si); gsi_next (&si))
3392 if (is_gimple_reg (gimple_phi_result (gsi_stmt (si))))
3394 /* Store the unique scalar PHI node: at this point, loops
3395 should be in cannonical form, so we expect to see at most
3396 one scalar phi node in the loop header. */
3398 || bb != bb->loop_father->header)
3401 phi = gsi_stmt (si);
3405 || phi_node_is_iv (phi))
3411 /* Helper recursive function. Record in CONDITIONS and CASES all
3412 conditions from 'if's and 'switch'es occurring in BB from SCOP.
3414 Returns false when the conditions contain scalar computations that
3415 depend on the condition, i.e. when there are scalar phi nodes on
3416 the junction after the condition. Only the computations occurring
3417 on memory can be handled in the polyhedral model: operations that
3418 define scalar evolutions in conditions, that can potentially be
3419 used to index memory, can't be handled by the polyhedral model. */
3422 build_scop_conditions_1 (VEC (gimple, heap) **conditions,
3423 VEC (gimple, heap) **cases, basic_block bb,
3429 basic_block bb_child, bb_iter;
3430 VEC (basic_block, heap) *dom;
3433 /* Make sure we are in the SCoP. */
3434 if (!bb_in_sese_p (bb, SCOP_REGION (scop)))
3437 if (bb_contains_non_iv_scalar_phi_nodes (bb))
3440 gbb = gbb_from_bb (bb);
3443 GBB_CONDITIONS (gbb) = VEC_copy (gimple, heap, *conditions);
3444 GBB_CONDITION_CASES (gbb) = VEC_copy (gimple, heap, *cases);
3447 dom = get_dominated_by (CDI_DOMINATORS, bb);
3449 stmt = last_stmt (bb);
3452 VEC (edge, gc) *edges;
3455 switch (gimple_code (stmt))
3459 for (i = 0; VEC_iterate (edge, edges, i, e); i++)
3460 if ((dominated_by_p (CDI_DOMINATORS, e->dest, bb))
3461 && VEC_length (edge, e->dest->preds) == 1)
3463 /* Remove the scanned block from the dominator successors. */
3464 for (j = 0; VEC_iterate (basic_block, dom, j, bb_iter); j++)
3465 if (bb_iter == e->dest)
3467 VEC_unordered_remove (basic_block, dom, j);
3471 /* Recursively scan the then or else part. */
3472 if (e->flags & EDGE_TRUE_VALUE)
3473 VEC_safe_push (gimple, heap, *cases, stmt);
3476 gcc_assert (e->flags & EDGE_FALSE_VALUE);
3477 VEC_safe_push (gimple, heap, *cases, NULL);
3480 VEC_safe_push (gimple, heap, *conditions, stmt);
3481 if (!build_scop_conditions_1 (conditions, cases, e->dest, scop))
3486 VEC_pop (gimple, *conditions);
3487 VEC_pop (gimple, *cases);
3494 gimple_stmt_iterator gsi_search_gimple_label;
3496 for (i = 0; i < gimple_switch_num_labels (stmt); ++i)
3498 basic_block bb_iter;
3500 size_t n_cases = VEC_length (gimple, *conditions);
3501 unsigned n = gimple_switch_num_labels (stmt);
3503 bb_child = label_to_block
3504 (CASE_LABEL (gimple_switch_label (stmt, i)));
3506 for (k = 0; k < n; k++)
3509 (CASE_LABEL (gimple_switch_label (stmt, k))) == bb_child)
3512 /* Switches with multiple case values for the same
3513 block are not handled. */
3515 /* Switch cases with more than one predecessor are
3517 || VEC_length (edge, bb_child->preds) != 1)
3523 /* Recursively scan the corresponding 'case' block. */
3524 for (gsi_search_gimple_label = gsi_start_bb (bb_child);
3525 !gsi_end_p (gsi_search_gimple_label);
3526 gsi_next (&gsi_search_gimple_label))
3528 gimple label = gsi_stmt (gsi_search_gimple_label);
3530 if (gimple_code (label) == GIMPLE_LABEL)
3532 tree t = gimple_label_label (label);
3534 gcc_assert (t == gimple_switch_label (stmt, i));
3535 VEC_replace (gimple, *cases, n_cases, label);
3540 if (!build_scop_conditions_1 (conditions, cases, bb_child, scop))
3546 /* Remove the scanned block from the dominator successors. */
3547 for (j = 0; VEC_iterate (basic_block, dom, j, bb_iter); j++)
3548 if (bb_iter == bb_child)
3550 VEC_unordered_remove (basic_block, dom, j);
3555 VEC_pop (gimple, *conditions);
3556 VEC_pop (gimple, *cases);
3565 /* Scan all immediate dominated successors. */
3566 for (i = 0; VEC_iterate (basic_block, dom, i, bb_child); i++)
3567 if (!build_scop_conditions_1 (conditions, cases, bb_child, scop))
3574 VEC_free (basic_block, heap, dom);
3578 /* Record all conditions from SCOP.
3580 Returns false when the conditions contain scalar computations that
3581 depend on the condition, i.e. when there are scalar phi nodes on
3582 the junction after the condition. Only the computations occurring
3583 on memory can be handled in the polyhedral model: operations that
3584 define scalar evolutions in conditions, that can potentially be
3585 used to index memory, can't be handled by the polyhedral model. */
3588 build_scop_conditions (scop_p scop)
3591 VEC (gimple, heap) *conditions = NULL;
3592 VEC (gimple, heap) *cases = NULL;
3594 res = build_scop_conditions_1 (&conditions, &cases, SCOP_ENTRY (scop), scop);
3596 VEC_free (gimple, heap, conditions);
3597 VEC_free (gimple, heap, cases);
3601 /* Traverses all the GBBs of the SCOP and add their constraints to the
3602 iteration domains. */
3605 add_conditions_to_constraints (scop_p scop)
3610 for (i = 0; VEC_iterate (graphite_bb_p, SCOP_BBS (scop), i, gbb); i++)
3611 add_conditions_to_domain (gbb);
3614 /* Build the current domain matrix: the loops belonging to the current
3615 SCOP, and that vary for the execution of the current basic block.
3616 Returns false if there is no loop in SCOP. */
3619 build_scop_iteration_domain (scop_p scop)
3622 CloogMatrix *outer_cstr;
3625 /* Build cloog loop for all loops, that are in the uppermost loop layer of
3627 for (i = 0; VEC_iterate (loop_p, SCOP_LOOP_NEST (scop), i, loop); i++)
3628 if (!loop_in_sese_p (loop_outer (loop), SCOP_REGION (scop)))
3630 /* The outermost constraints is a matrix that has:
3631 -first column: eq/ineq boolean
3632 -last column: a constant
3633 -scop_nb_params columns for the parameters used in the scop. */
3634 outer_cstr = cloog_matrix_alloc (0, scop_nb_params (scop) + 2);
3635 build_loop_iteration_domains (scop, loop, outer_cstr, 0);
3636 cloog_matrix_free (outer_cstr);
3642 /* Initializes an equation CY of the access matrix using the
3643 information for a subscript from AF, relatively to the loop
3644 indexes from LOOP_NEST and parameter indexes from PARAMS. NDIM is
3645 the dimension of the array access, i.e. the number of
3646 subscripts. Returns true when the operation succeeds. */
3649 build_access_matrix_with_af (tree af, lambda_vector cy,
3650 scop_p scop, int ndim)
3654 switch (TREE_CODE (af))
3656 case POLYNOMIAL_CHREC:
3658 struct loop *outer_loop;
3659 tree left = CHREC_LEFT (af);
3660 tree right = CHREC_RIGHT (af);
3663 if (TREE_CODE (right) != INTEGER_CST)
3666 outer_loop = get_loop (CHREC_VARIABLE (af));
3667 var = nb_loops_around_loop_in_scop (outer_loop, scop);
3668 cy[var] = int_cst_value (right);
3670 switch (TREE_CODE (left))
3672 case POLYNOMIAL_CHREC:
3673 return build_access_matrix_with_af (left, cy, scop, ndim);
3676 cy[ndim - 1] = int_cst_value (left);
3680 return build_access_matrix_with_af (left, cy, scop, ndim);
3685 build_access_matrix_with_af (TREE_OPERAND (af, 0), cy, scop, ndim);
3686 build_access_matrix_with_af (TREE_OPERAND (af, 1), cy, scop, ndim);
3690 build_access_matrix_with_af (TREE_OPERAND (af, 0), cy, scop, ndim);
3691 build_access_matrix_with_af (TREE_OPERAND (af, 1), cy, scop, ndim);
3695 cy[ndim - 1] = int_cst_value (af);
3699 param_col = param_index (af, scop);
3700 cy [ndim - scop_nb_params (scop) + param_col - 1] = 1;
3704 /* FIXME: access_fn can have parameters. */
3709 /* Initialize the access matrix in the data reference REF with respect
3710 to the loop nesting LOOP_NEST. Return true when the operation
3714 build_access_matrix (data_reference_p ref, graphite_bb_p gb)
3716 int i, ndim = DR_NUM_DIMENSIONS (ref);
3717 struct access_matrix *am = GGC_NEW (struct access_matrix);
3719 AM_MATRIX (am) = VEC_alloc (lambda_vector, gc, ndim);
3720 DR_SCOP (ref) = GBB_SCOP (gb);
3722 for (i = 0; i < ndim; i++)
3724 lambda_vector v = lambda_vector_new (ref_nb_loops (ref));
3725 scop_p scop = GBB_SCOP (gb);
3726 tree af = DR_ACCESS_FN (ref, i);
3728 if (!build_access_matrix_with_af (af, v, scop, ref_nb_loops (ref)))
3731 VEC_quick_push (lambda_vector, AM_MATRIX (am), v);
3734 DR_ACCESS_MATRIX (ref) = am;
3738 /* Build the access matrices for the data references in the SCOP. */
3741 build_scop_data_accesses (scop_p scop)
3746 /* FIXME: Construction of access matrix is disabled until some
3747 pass, like the data dependence analysis, is using it. */
3750 for (i = 0; VEC_iterate (graphite_bb_p, SCOP_BBS (scop), i, gb); i++)
3753 data_reference_p dr;
3755 /* Construct the access matrix for each data ref, with respect to
3756 the loop nest of the current BB in the considered SCOP. */
3758 VEC_iterate (data_reference_p, GBB_DATA_REFS (gb), j, dr);
3761 bool res = build_access_matrix (dr, gb);
3763 /* FIXME: At this point the DRs should always have an affine
3764 form. For the moment this fails as build_access_matrix
3765 does not build matrices with parameters. */
3771 /* Returns the tree variable from the name NAME that was given in
3772 Cloog representation. All the parameters are stored in PARAMS, and
3773 all the loop induction variables are stored in IVSTACK.
3775 FIXME: This is a hack, and Cloog should be fixed to not work with
3776 variable names represented as "char *string", but with void
3777 pointers that could be casted back to a tree. The only problem in
3778 doing that is that Cloog's pretty printer still assumes that
3779 variable names are char *strings. The solution would be to have a
3780 function pointer for pretty-printing that can be redirected to be
3781 print_generic_stmt in our case, or fprintf by default.
3782 ??? Too ugly to live. */
3785 clast_name_to_gcc (const char *name, VEC (name_tree, heap) *params,
3786 loop_iv_stack ivstack)
3793 for (i = 0; VEC_iterate (name_tree, params, i, t); i++)
3794 if (!strcmp (name, t->name))
3797 iv = loop_iv_stack_get_iv_from_name (ivstack, name);
3804 /* Returns the maximal precision type for expressions E1 and E2. */
3807 max_precision_type (tree e1, tree e2)
3809 tree type1 = TREE_TYPE (e1);
3810 tree type2 = TREE_TYPE (e2);
3811 return TYPE_PRECISION (type1) > TYPE_PRECISION (type2) ? type1 : type2;
3815 clast_to_gcc_expression (tree, struct clast_expr *, VEC (name_tree, heap) *,
3818 /* Converts a Cloog reduction expression R with reduction operation OP
3819 to a GCC expression tree of type TYPE. PARAMS is a vector of
3820 parameters of the scop, and IVSTACK contains the stack of induction
3824 clast_to_gcc_expression_red (tree type, enum tree_code op,
3825 struct clast_reduction *r,
3826 VEC (name_tree, heap) *params,
3827 loop_iv_stack ivstack)
3830 tree res = clast_to_gcc_expression (type, r->elts[0], params, ivstack);
3832 for (i = 1; i < r->n; i++)
3834 tree t = clast_to_gcc_expression (type, r->elts[i], params, ivstack);
3835 res = fold_build2 (op, type, res, t);
3840 /* Converts a Cloog AST expression E back to a GCC expression tree of
3841 type TYPE. PARAMS is a vector of parameters of the scop, and
3842 IVSTACK contains the stack of induction variables. */
3845 clast_to_gcc_expression (tree type, struct clast_expr *e,
3846 VEC (name_tree, heap) *params,
3847 loop_iv_stack ivstack)
3853 struct clast_term *t = (struct clast_term *) e;
3857 if (value_one_p (t->val))
3859 tree name = clast_name_to_gcc (t->var, params, ivstack);
3860 return fold_convert (type, name);
3863 else if (value_mone_p (t->val))
3865 tree name = clast_name_to_gcc (t->var, params, ivstack);
3866 name = fold_convert (type, name);
3867 return fold_build1 (NEGATE_EXPR, type, name);
3871 tree name = clast_name_to_gcc (t->var, params, ivstack);
3872 tree cst = gmp_cst_to_tree (type, t->val);
3873 name = fold_convert (type, name);
3874 return fold_build2 (MULT_EXPR, type, cst, name);
3878 return gmp_cst_to_tree (type, t->val);
3883 struct clast_reduction *r = (struct clast_reduction *) e;
3888 return clast_to_gcc_expression_red (type, PLUS_EXPR, r, params, ivstack);
3891 return clast_to_gcc_expression_red (type, MIN_EXPR, r, params, ivstack);
3894 return clast_to_gcc_expression_red (type, MAX_EXPR, r, params, ivstack);
3904 struct clast_binary *b = (struct clast_binary *) e;
3905 struct clast_expr *lhs = (struct clast_expr *) b->LHS;
3906 tree tl = clast_to_gcc_expression (type, lhs, params, ivstack);
3907 tree tr = gmp_cst_to_tree (type, b->RHS);
3911 case clast_bin_fdiv:
3912 return fold_build2 (FLOOR_DIV_EXPR, type, tl, tr);
3914 case clast_bin_cdiv:
3915 return fold_build2 (CEIL_DIV_EXPR, type, tl, tr);
3918 return fold_build2 (EXACT_DIV_EXPR, type, tl, tr);
3921 return fold_build2 (TRUNC_MOD_EXPR, type, tl, tr);
3935 /* Returns the type for the expression E. */
3938 gcc_type_for_clast_expr (struct clast_expr *e,
3939 VEC (name_tree, heap) *params,
3940 loop_iv_stack ivstack)
3946 struct clast_term *t = (struct clast_term *) e;
3949 return TREE_TYPE (clast_name_to_gcc (t->var, params, ivstack));
3956 struct clast_reduction *r = (struct clast_reduction *) e;
3959 return gcc_type_for_clast_expr (r->elts[0], params, ivstack);
3963 for (i = 0; i < r->n; i++)
3965 tree type = gcc_type_for_clast_expr (r->elts[i], params, ivstack);
3975 struct clast_binary *b = (struct clast_binary *) e;
3976 struct clast_expr *lhs = (struct clast_expr *) b->LHS;
3977 return gcc_type_for_clast_expr (lhs, params, ivstack);
3987 /* Returns the type for the equation CLEQ. */
3990 gcc_type_for_clast_eq (struct clast_equation *cleq,
3991 VEC (name_tree, heap) *params,
3992 loop_iv_stack ivstack)
3994 tree type = gcc_type_for_clast_expr (cleq->LHS, params, ivstack);
3998 return gcc_type_for_clast_expr (cleq->RHS, params, ivstack);
4001 /* Translates a clast equation CLEQ to a tree. */
4004 graphite_translate_clast_equation (scop_p scop,
4005 struct clast_equation *cleq,
4006 loop_iv_stack ivstack)
4008 enum tree_code comp;
4009 tree type = gcc_type_for_clast_eq (cleq, SCOP_PARAMS (scop), ivstack);
4010 tree lhs = clast_to_gcc_expression (type, cleq->LHS, SCOP_PARAMS (scop), ivstack);
4011 tree rhs = clast_to_gcc_expression (type, cleq->RHS, SCOP_PARAMS (scop), ivstack);
4013 if (cleq->sign == 0)
4016 else if (cleq->sign > 0)
4022 return fold_build2 (comp, type, lhs, rhs);
4025 /* Creates the test for the condition in STMT. */
4028 graphite_create_guard_cond_expr (scop_p scop, struct clast_guard *stmt,
4029 loop_iv_stack ivstack)
4034 for (i = 0; i < stmt->n; i++)
4036 tree eq = graphite_translate_clast_equation (scop, &stmt->eq[i], ivstack);
4039 cond = fold_build2 (TRUTH_AND_EXPR, TREE_TYPE (eq), cond, eq);
4047 /* Creates a new if region corresponding to Cloog's guard. */
4050 graphite_create_new_guard (scop_p scop, edge entry_edge,
4051 struct clast_guard *stmt,
4052 loop_iv_stack ivstack)
4054 tree cond_expr = graphite_create_guard_cond_expr (scop, stmt, ivstack);
4055 edge exit_edge = create_empty_if_region_on_edge (entry_edge, cond_expr);
4059 /* Walks a CLAST and returns the first statement in the body of a
4062 static struct clast_user_stmt *
4063 clast_get_body_of_loop (struct clast_stmt *stmt)
4066 || CLAST_STMT_IS_A (stmt, stmt_user))
4067 return (struct clast_user_stmt *) stmt;
4069 if (CLAST_STMT_IS_A (stmt, stmt_for))
4070 return clast_get_body_of_loop (((struct clast_for *) stmt)->body);
4072 if (CLAST_STMT_IS_A (stmt, stmt_guard))
4073 return clast_get_body_of_loop (((struct clast_guard *) stmt)->then);
4075 if (CLAST_STMT_IS_A (stmt, stmt_block))
4076 return clast_get_body_of_loop (((struct clast_block *) stmt)->body);
4081 /* Returns the induction variable for the loop that gets translated to
4085 gcc_type_for_iv_of_clast_loop (struct clast_for *stmt_for)
4087 struct clast_user_stmt *stmt = clast_get_body_of_loop ((struct clast_stmt *) stmt_for);
4088 const char *cloog_iv = stmt_for->iterator;
4089 CloogStatement *cs = stmt->statement;
4090 graphite_bb_p gbb = (graphite_bb_p) cloog_statement_usr (cs);
4092 return gcc_type_for_cloog_iv (cloog_iv, gbb);
4095 /* Creates a new LOOP corresponding to Cloog's STMT. Inserts an induction
4096 variable for the new LOOP. New LOOP is attached to CFG starting at
4097 ENTRY_EDGE. LOOP is inserted into the loop tree and becomes the child
4098 loop of the OUTER_LOOP. */
4100 static struct loop *
4101 graphite_create_new_loop (scop_p scop, edge entry_edge,
4102 struct clast_for *stmt, loop_iv_stack ivstack,
4105 tree type = gcc_type_for_iv_of_clast_loop (stmt);
4106 VEC (name_tree, heap) *params = SCOP_PARAMS (scop);
4107 tree lb = clast_to_gcc_expression (type, stmt->LB, params, ivstack);
4108 tree ub = clast_to_gcc_expression (type, stmt->UB, params, ivstack);
4109 tree stride = gmp_cst_to_tree (type, stmt->stride);
4110 tree ivvar = create_tmp_var (type, "graphiteIV");
4112 loop_p loop = create_empty_loop_on_edge
4113 (entry_edge, lb, stride, ub, ivvar, &iv_before,
4114 outer ? outer : entry_edge->src->loop_father);
4116 add_referenced_var (ivvar);
4117 loop_iv_stack_push_iv (ivstack, iv_before, stmt->iterator);
4121 /* Rename the SSA_NAMEs used in STMT and that appear in IVSTACK. */
4124 rename_variables_in_stmt (gimple stmt, htab_t map)
4127 use_operand_p use_p;
4129 FOR_EACH_SSA_USE_OPERAND (use_p, stmt, iter, SSA_OP_ALL_USES)
4131 tree use = USE_FROM_PTR (use_p);
4132 tree new_name = get_new_name_from_old_name (map, use);
4134 replace_exp (use_p, new_name);
4140 /* Returns true if SSA_NAME is a parameter of SCOP. */
4143 is_parameter (scop_p scop, tree ssa_name)
4146 VEC (name_tree, heap) *params = SCOP_PARAMS (scop);
4149 for (i = 0; VEC_iterate (name_tree, params, i, param); i++)
4150 if (param->t == ssa_name)
4156 /* Returns true if NAME is an induction variable. */
4161 return gimple_code (SSA_NAME_DEF_STMT (name)) == GIMPLE_PHI;
4164 static void expand_scalar_variables_stmt (gimple, basic_block, scop_p,
4167 expand_scalar_variables_expr (tree, tree, enum tree_code, tree, basic_block,
4168 scop_p, htab_t, gimple_stmt_iterator *);
4170 /* Copies at GSI all the scalar computations on which the ssa_name OP0
4171 depends on in the SCOP: these are all the scalar variables used in
4172 the definition of OP0, that are defined outside BB and still in the
4173 SCOP, i.e. not a parameter of the SCOP. The expression that is
4174 returned contains only induction variables from the generated code:
4175 MAP contains the induction variables renaming mapping, and is used
4176 to translate the names of induction variables. */
4179 expand_scalar_variables_ssa_name (tree op0, basic_block bb,
4180 scop_p scop, htab_t map,
4181 gimple_stmt_iterator *gsi)
4183 tree var0, var1, type;
4185 enum tree_code subcode;
4187 if (is_parameter (scop, op0)
4189 return get_new_name_from_old_name (map, op0);
4191 def_stmt = SSA_NAME_DEF_STMT (op0);
4193 if (gimple_bb (def_stmt) == bb)
4195 /* If the defining statement is in the basic block already
4196 we do not need to create a new expression for it, we
4197 only need to ensure its operands are expanded. */
4198 expand_scalar_variables_stmt (def_stmt, bb, scop, map);
4199 return get_new_name_from_old_name (map, op0);
4203 if (gimple_code (def_stmt) != GIMPLE_ASSIGN
4204 || !bb_in_sese_p (gimple_bb (def_stmt), SCOP_REGION (scop)))
4205 return get_new_name_from_old_name (map, op0);
4207 var0 = gimple_assign_rhs1 (def_stmt);
4208 subcode = gimple_assign_rhs_code (def_stmt);
4209 var1 = gimple_assign_rhs2 (def_stmt);
4210 type = gimple_expr_type (def_stmt);
4212 return expand_scalar_variables_expr (type, var0, subcode, var1, bb, scop,
4217 /* Copies at GSI all the scalar computations on which the expression
4218 OP0 CODE OP1 depends on in the SCOP: these are all the scalar
4219 variables used in OP0 and OP1, defined outside BB and still defined
4220 in the SCOP, i.e. not a parameter of the SCOP. The expression that
4221 is returned contains only induction variables from the generated
4222 code: MAP contains the induction variables renaming mapping, and is
4223 used to translate the names of induction variables. */
4226 expand_scalar_variables_expr (tree type, tree op0, enum tree_code code,
4227 tree op1, basic_block bb, scop_p scop,
4228 htab_t map, gimple_stmt_iterator *gsi)
4230 if (TREE_CODE_CLASS (code) == tcc_constant
4231 || TREE_CODE_CLASS (code) == tcc_declaration)
4234 /* For data references we have to duplicate also its memory
4236 if (TREE_CODE_CLASS (code) == tcc_reference)
4242 tree old_name = TREE_OPERAND (op0, 0);
4243 tree expr = expand_scalar_variables_ssa_name
4244 (old_name, bb, scop, map, gsi);
4245 tree new_name = force_gimple_operand_gsi (gsi, expr, true, NULL,
4246 true, GSI_SAME_STMT);
4248 return fold_build1 (code, type, new_name);
4253 tree op00 = TREE_OPERAND (op0, 0);
4254 tree op01 = TREE_OPERAND (op0, 1);
4255 tree op02 = TREE_OPERAND (op0, 2);
4256 tree op03 = TREE_OPERAND (op0, 3);
4257 tree base = expand_scalar_variables_expr
4258 (TREE_TYPE (op00), op00, TREE_CODE (op00), NULL, bb, scop,
4260 tree subscript = expand_scalar_variables_expr
4261 (TREE_TYPE (op01), op01, TREE_CODE (op01), NULL, bb, scop,
4264 return build4 (ARRAY_REF, type, base, subscript, op02, op03);
4268 /* The above cases should catch everything. */
4273 if (TREE_CODE_CLASS (code) == tcc_unary)
4275 tree op0_type = TREE_TYPE (op0);
4276 enum tree_code op0_code = TREE_CODE (op0);
4277 tree op0_expr = expand_scalar_variables_expr (op0_type, op0, op0_code,
4278 NULL, bb, scop, map, gsi);
4280 return fold_build1 (code, type, op0_expr);
4283 if (TREE_CODE_CLASS (code) == tcc_binary)
4285 tree op0_type = TREE_TYPE (op0);
4286 enum tree_code op0_code = TREE_CODE (op0);
4287 tree op0_expr = expand_scalar_variables_expr (op0_type, op0, op0_code,
4288 NULL, bb, scop, map, gsi);
4289 tree op1_type = TREE_TYPE (op1);
4290 enum tree_code op1_code = TREE_CODE (op1);
4291 tree op1_expr = expand_scalar_variables_expr (op1_type, op1, op1_code,
4292 NULL, bb, scop, map, gsi);
4294 return fold_build2 (code, type, op0_expr, op1_expr);
4297 if (code == SSA_NAME)
4298 return expand_scalar_variables_ssa_name (op0, bb, scop, map, gsi);
4304 /* Copies at the beginning of BB all the scalar computations on which
4305 STMT depends on in the SCOP: these are all the scalar variables used
4306 in STMT, defined outside BB and still defined in the SCOP, i.e. not a
4307 parameter of the SCOP. The expression that is returned contains
4308 only induction variables from the generated code: MAP contains the
4309 induction variables renaming mapping, and is used to translate the
4310 names of induction variables. */
4313 expand_scalar_variables_stmt (gimple stmt, basic_block bb, scop_p scop,
4317 use_operand_p use_p;
4318 gimple_stmt_iterator gsi = gsi_after_labels (bb);
4320 FOR_EACH_SSA_USE_OPERAND (use_p, stmt, iter, SSA_OP_USE)
4322 tree use = USE_FROM_PTR (use_p);
4323 tree type = TREE_TYPE (use);
4324 enum tree_code code = TREE_CODE (use);
4325 tree use_expr = expand_scalar_variables_expr (type, use, code, NULL, bb,
4327 if (use_expr != use)
4330 force_gimple_operand_gsi (&gsi, use_expr, true, NULL,
4331 true, GSI_NEW_STMT);
4332 replace_exp (use_p, new_use);
4339 /* Copies at the beginning of BB all the scalar computations on which
4340 BB depends on in the SCOP: these are all the scalar variables used
4341 in BB, defined outside BB and still defined in the SCOP, i.e. not a
4342 parameter of the SCOP. The expression that is returned contains
4343 only induction variables from the generated code: MAP contains the
4344 induction variables renaming mapping, and is used to translate the
4345 names of induction variables. */
4348 expand_scalar_variables (basic_block bb, scop_p scop, htab_t map)
4350 gimple_stmt_iterator gsi;
4352 for (gsi = gsi_after_labels (bb); !gsi_end_p (gsi);)
4354 gimple stmt = gsi_stmt (gsi);
4355 expand_scalar_variables_stmt (stmt, bb, scop, map);
4360 /* Rename all the SSA_NAMEs from block BB according to the MAP. */
4363 rename_variables (basic_block bb, htab_t map)
4365 gimple_stmt_iterator gsi;
4367 for (gsi = gsi_after_labels (bb); !gsi_end_p (gsi); gsi_next (&gsi))
4368 rename_variables_in_stmt (gsi_stmt (gsi), map);
4371 /* Remove condition from BB. */
4374 remove_condition (basic_block bb)
4376 gimple last = last_stmt (bb);
4378 if (last && gimple_code (last) == GIMPLE_COND)
4380 gimple_stmt_iterator gsi = gsi_last_bb (bb);
4381 gsi_remove (&gsi, true);
4385 /* Returns the first successor edge of BB with EDGE_TRUE_VALUE flag set. */
4388 get_true_edge_from_guard_bb (basic_block bb)
4393 FOR_EACH_EDGE (e, ei, bb->succs)
4394 if (e->flags & EDGE_TRUE_VALUE)
4401 /* Returns the first successor edge of BB with EDGE_TRUE_VALUE flag cleared. */
4404 get_false_edge_from_guard_bb (basic_block bb)
4409 FOR_EACH_EDGE (e, ei, bb->succs)
4410 if (!(e->flags & EDGE_TRUE_VALUE))
4417 /* Inserts in MAP a tuple (OLD_NAME, NEW_NAME) for the induction
4418 variables of the loops around GBB in SCOP, i.e. GBB_LOOPS.
4419 NEW_NAME is obtained from IVSTACK. IVSTACK has the same stack
4420 ordering as GBB_LOOPS. */
4423 build_iv_mapping (loop_iv_stack ivstack, htab_t map, gbb_p gbb, scop_p scop)
4429 for (i = 0; VEC_iterate (name_tree, SCOP_OLDIVS (scop), i, iv); i++)
4431 struct rename_map_elt_d tmp;
4433 if (!flow_bb_inside_loop_p (iv->loop, GBB_BB (gbb)))
4436 tmp.old_name = iv->t;
4437 slot = htab_find_slot (map, &tmp, INSERT);
4441 tree new_name = loop_iv_stack_get_iv (ivstack,
4442 gbb_loop_index (gbb, iv->loop));
4443 *slot = new_rename_map_elt (iv->t, new_name);
4448 /* Register in MAP the tuple (old_name, new_name). */
4451 register_old_and_new_names (htab_t map, tree old_name, tree new_name)
4453 struct rename_map_elt_d tmp;
4456 tmp.old_name = old_name;
4457 slot = htab_find_slot (map, &tmp, INSERT);
4460 *slot = new_rename_map_elt (old_name, new_name);
4463 /* Create a duplicate of the basic block BB. NOTE: This does not
4464 preserve SSA form. */
4467 graphite_copy_stmts_from_block (basic_block bb, basic_block new_bb, htab_t map)
4469 gimple_stmt_iterator gsi, gsi_tgt;
4471 gsi_tgt = gsi_start_bb (new_bb);
4472 for (gsi = gsi_start_bb (bb); !gsi_end_p (gsi); gsi_next (&gsi))
4474 def_operand_p def_p;
4475 ssa_op_iter op_iter;
4477 gimple stmt = gsi_stmt (gsi);
4480 if (gimple_code (stmt) == GIMPLE_LABEL)
4483 /* Create a new copy of STMT and duplicate STMT's virtual
4485 copy = gimple_copy (stmt);
4486 gsi_insert_after (&gsi_tgt, copy, GSI_NEW_STMT);
4487 mark_sym_for_renaming (gimple_vop (cfun));
4489 region = lookup_stmt_eh_region (stmt);
4491 add_stmt_to_eh_region (copy, region);
4492 gimple_duplicate_stmt_histograms (cfun, copy, cfun, stmt);
4494 /* Create new names for all the definitions created by COPY and
4495 add replacement mappings for each new name. */
4496 FOR_EACH_SSA_DEF_OPERAND (def_p, copy, op_iter, SSA_OP_ALL_DEFS)
4498 tree old_name = DEF_FROM_PTR (def_p);
4499 tree new_name = create_new_def_for (old_name, copy, def_p);
4500 register_old_and_new_names (map, old_name, new_name);
4505 /* Records in SCOP_LIVEOUT_RENAMES the names that are live out of
4506 the SCOP and that appear in the RENAME_MAP. */
4509 register_scop_liveout_renames (scop_p scop, htab_t rename_map)
4512 sese region = SCOP_REGION (scop);
4514 for (i = 0; i < SESE_NUM_VER (region); i++)
4515 if (bitmap_bit_p (SESE_LIVEOUT (region), i)
4516 && is_gimple_reg (ssa_name (i)))
4518 tree old_name = ssa_name (i);
4519 tree new_name = get_new_name_from_old_name (rename_map, old_name);
4521 register_old_and_new_names (SCOP_LIVEOUT_RENAMES (scop),
4522 old_name, new_name);
4526 /* Copies BB and includes in the copied BB all the statements that can
4527 be reached following the use-def chains from the memory accesses,
4528 and returns the next edge following this new block. */
4531 copy_bb_and_scalar_dependences (basic_block bb, scop_p scop,
4532 edge next_e, htab_t map)
4534 basic_block new_bb = split_edge (next_e);
4536 next_e = single_succ_edge (new_bb);
4537 graphite_copy_stmts_from_block (bb, new_bb, map);
4538 remove_condition (new_bb);
4539 rename_variables (new_bb, map);
4540 remove_phi_nodes (new_bb);
4541 expand_scalar_variables (new_bb, scop, map);
4542 register_scop_liveout_renames (scop, map);
4547 /* Helper function for htab_traverse in insert_loop_close_phis. */
4550 add_loop_exit_phis (void **slot, void *s)
4552 struct rename_map_elt_d *entry = (struct rename_map_elt_d *) *slot;
4553 tree new_name = entry->new_name;
4554 basic_block bb = (basic_block) s;
4555 gimple phi = create_phi_node (new_name, bb);
4556 tree res = create_new_def_for (gimple_phi_result (phi), phi,
4557 gimple_phi_result_ptr (phi));
4559 add_phi_arg (phi, new_name, single_pred_edge (bb));
4561 entry->new_name = res;
4566 /* Iterate over the SCOP_LIVEOUT_RENAMES (SCOP) and get tuples of the
4567 form (OLD_NAME, NEW_NAME). Insert in BB "RES = phi (NEW_NAME)",
4568 and finally register in SCOP_LIVEOUT_RENAMES (scop) the tuple
4572 insert_loop_close_phis (scop_p scop, basic_block bb)
4574 update_ssa (TODO_update_ssa);
4575 htab_traverse (SCOP_LIVEOUT_RENAMES (scop), add_loop_exit_phis, bb);
4576 update_ssa (TODO_update_ssa);
4579 /* Helper structure for htab_traverse in insert_guard_phis. */
4583 edge true_edge, false_edge;
4584 htab_t liveout_before_guard;
4587 /* Return the default name that is before the guard. */
4590 default_liveout_before_guard (htab_t liveout_before_guard, tree old_name)
4592 tree res = get_new_name_from_old_name (liveout_before_guard, old_name);
4594 if (res == old_name)
4596 if (is_gimple_reg (res))
4597 return fold_convert (TREE_TYPE (res), integer_zero_node);
4598 return gimple_default_def (cfun, res);
4604 /* Helper function for htab_traverse in insert_guard_phis. */
4607 add_guard_exit_phis (void **slot, void *s)
4609 struct rename_map_elt_d *entry = (struct rename_map_elt_d *) *slot;
4610 struct igp *i = (struct igp *) s;
4611 basic_block bb = i->bb;
4612 edge true_edge = i->true_edge;
4613 edge false_edge = i->false_edge;
4614 tree name1 = entry->new_name;
4615 tree name2 = default_liveout_before_guard (i->liveout_before_guard,
4617 gimple phi = create_phi_node (name1, bb);
4618 tree res = create_new_def_for (gimple_phi_result (phi), phi,
4619 gimple_phi_result_ptr (phi));
4621 add_phi_arg (phi, name1, true_edge);
4622 add_phi_arg (phi, name2, false_edge);
4624 entry->new_name = res;
4629 /* Iterate over the SCOP_LIVEOUT_RENAMES (SCOP) and get tuples of the
4630 form (OLD_NAME, NAME1). If there is a correspondent tuple of
4631 OLD_NAME in LIVEOUT_BEFORE_GUARD, i.e. (OLD_NAME, NAME2) then
4634 | RES = phi (NAME1 (on TRUE_EDGE), NAME2 (on FALSE_EDGE))"
4636 if there is no tuple for OLD_NAME in LIVEOUT_BEFORE_GUARD, insert
4638 | RES = phi (NAME1 (on TRUE_EDGE),
4639 | DEFAULT_DEFINITION of NAME1 (on FALSE_EDGE))".
4641 Finally register in SCOP_LIVEOUT_RENAMES (scop) the tuple
4645 insert_guard_phis (scop_p scop, basic_block bb, edge true_edge,
4646 edge false_edge, htab_t liveout_before_guard)
4650 i.true_edge = true_edge;
4651 i.false_edge = false_edge;
4652 i.liveout_before_guard = liveout_before_guard;
4654 update_ssa (TODO_update_ssa);
4655 htab_traverse (SCOP_LIVEOUT_RENAMES (scop), add_guard_exit_phis, &i);
4656 update_ssa (TODO_update_ssa);
4659 /* Helper function for htab_traverse. */
4662 copy_renames (void **slot, void *s)
4664 struct rename_map_elt_d *entry = (struct rename_map_elt_d *) *slot;
4665 htab_t res = (htab_t) s;
4666 tree old_name = entry->old_name;
4667 tree new_name = entry->new_name;
4668 struct rename_map_elt_d tmp;
4671 tmp.old_name = old_name;
4672 x = htab_find_slot (res, &tmp, INSERT);
4675 *x = new_rename_map_elt (old_name, new_name);
4680 /* Translates a CLAST statement STMT to GCC representation in the
4683 - NEXT_E is the edge where new generated code should be attached.
4684 - CONTEXT_LOOP is the loop in which the generated code will be placed
4686 - IVSTACK contains the surrounding loops around the statement to be
4691 translate_clast (scop_p scop, struct loop *context_loop,
4692 struct clast_stmt *stmt, edge next_e, loop_iv_stack ivstack)
4697 if (CLAST_STMT_IS_A (stmt, stmt_root))
4698 return translate_clast (scop, context_loop, stmt->next, next_e, ivstack);
4700 if (CLAST_STMT_IS_A (stmt, stmt_user))
4703 CloogStatement *cs = ((struct clast_user_stmt *) stmt)->statement;
4704 graphite_bb_p gbb = (graphite_bb_p) cloog_statement_usr (cs);
4706 if (GBB_BB (gbb) == ENTRY_BLOCK_PTR)
4709 map = htab_create (10, rename_map_elt_info, eq_rename_map_elts, free);
4710 loop_iv_stack_patch_for_consts (ivstack, (struct clast_user_stmt *) stmt);
4711 build_iv_mapping (ivstack, map, gbb, scop);
4712 next_e = copy_bb_and_scalar_dependences (GBB_BB (gbb), scop,
4715 loop_iv_stack_remove_constants (ivstack);
4716 recompute_all_dominators ();
4717 update_ssa (TODO_update_ssa);
4719 return translate_clast (scop, context_loop, stmt->next, next_e, ivstack);
4722 if (CLAST_STMT_IS_A (stmt, stmt_for))
4725 = graphite_create_new_loop (scop, next_e, (struct clast_for *) stmt,
4726 ivstack, context_loop ? context_loop
4728 edge last_e = single_exit (loop);
4730 next_e = translate_clast (scop, loop, ((struct clast_for *) stmt)->body,
4731 single_pred_edge (loop->latch), ivstack);
4732 redirect_edge_succ_nodup (next_e, loop->latch);
4734 set_immediate_dominator (CDI_DOMINATORS, next_e->dest, next_e->src);
4735 loop_iv_stack_pop (ivstack);
4736 last_e = single_succ_edge (split_edge (last_e));
4737 insert_loop_close_phis (scop, last_e->src);
4739 recompute_all_dominators ();
4741 return translate_clast (scop, context_loop, stmt->next, last_e, ivstack);
4744 if (CLAST_STMT_IS_A (stmt, stmt_guard))
4746 htab_t liveout_before_guard = htab_create (10, rename_map_elt_info,
4747 eq_rename_map_elts, free);
4748 edge last_e = graphite_create_new_guard (scop, next_e,
4749 ((struct clast_guard *) stmt),
4751 edge true_e = get_true_edge_from_guard_bb (next_e->dest);
4752 edge false_e = get_false_edge_from_guard_bb (next_e->dest);
4753 edge exit_true_e = single_succ_edge (true_e->dest);
4754 edge exit_false_e = single_succ_edge (false_e->dest);
4756 htab_traverse (SCOP_LIVEOUT_RENAMES (scop), copy_renames,
4757 liveout_before_guard);
4759 next_e = translate_clast (scop, context_loop,
4760 ((struct clast_guard *) stmt)->then,
4762 insert_guard_phis (scop, last_e->src, exit_true_e, exit_false_e,
4763 liveout_before_guard);
4764 htab_delete (liveout_before_guard);
4765 recompute_all_dominators ();
4768 return translate_clast (scop, context_loop, stmt->next, last_e, ivstack);
4771 if (CLAST_STMT_IS_A (stmt, stmt_block))
4773 next_e = translate_clast (scop, context_loop,
4774 ((struct clast_block *) stmt)->body,
4776 recompute_all_dominators ();
4778 return translate_clast (scop, context_loop, stmt->next, next_e, ivstack);
4784 /* Free the SCATTERING domain list. */
4787 free_scattering (CloogDomainList *scattering)
4791 CloogDomain *dom = cloog_domain (scattering);
4792 CloogDomainList *next = cloog_next_domain (scattering);
4794 cloog_domain_free (dom);
4800 /* Build cloog program for SCoP. */
4803 build_cloog_prog (scop_p scop)
4806 int max_nb_loops = scop_max_loop_depth (scop);
4808 CloogLoop *loop_list = NULL;
4809 CloogBlockList *block_list = NULL;
4810 CloogDomainList *scattering = NULL;
4811 CloogProgram *prog = SCOP_PROG (scop);
4812 int nbs = 2 * max_nb_loops + 1;
4813 int *scaldims = (int *) xmalloc (nbs * (sizeof (int)));
4815 cloog_program_set_nb_scattdims (prog, nbs);
4816 initialize_cloog_names (scop);
4818 for (i = 0; VEC_iterate (graphite_bb_p, SCOP_BBS (scop), i, gbb); i++)
4820 /* Build new block. */
4821 CloogMatrix *domain = GBB_DOMAIN (gbb);
4822 CloogStatement *stmt = cloog_statement_alloc (GBB_BB (gbb)->index);
4823 CloogBlock *block = cloog_block_alloc (stmt, 0, NULL,
4824 nb_loops_around_gb (gbb));
4825 cloog_statement_set_usr (stmt, gbb);
4827 /* Add empty domain to all bbs, which do not yet have a domain, as they
4828 are not part of any loop. */
4831 domain = cloog_matrix_alloc (0, scop_nb_params (scop) + 2);
4832 GBB_DOMAIN (gbb) = domain;
4835 /* Build loop list. */
4837 CloogLoop *new_loop_list = cloog_loop_malloc ();
4838 cloog_loop_set_next (new_loop_list, loop_list);
4839 cloog_loop_set_domain (new_loop_list,
4840 cloog_domain_matrix2domain (domain));
4841 cloog_loop_set_block (new_loop_list, block);
4842 loop_list = new_loop_list;
4845 /* Build block list. */
4847 CloogBlockList *new_block_list = cloog_block_list_malloc ();
4849 cloog_block_list_set_next (new_block_list, block_list);
4850 cloog_block_list_set_block (new_block_list, block);
4851 block_list = new_block_list;
4854 /* Build scattering list. */
4856 /* XXX: Replace with cloog_domain_list_alloc(), when available. */
4857 CloogDomainList *new_scattering
4858 = (CloogDomainList *) xmalloc (sizeof (CloogDomainList));
4859 CloogMatrix *scat_mat = schedule_to_scattering (gbb, nbs);
4861 cloog_set_next_domain (new_scattering, scattering);
4862 cloog_set_domain (new_scattering,
4863 cloog_domain_matrix2domain (scat_mat));
4864 scattering = new_scattering;
4865 cloog_matrix_free (scat_mat);
4869 cloog_program_set_loop (prog, loop_list);
4870 cloog_program_set_blocklist (prog, block_list);
4872 for (i = 0; i < nbs; i++)
4875 cloog_program_set_scaldims (prog, scaldims);
4877 /* Extract scalar dimensions to simplify the code generation problem. */
4878 cloog_program_extract_scalars (prog, scattering);
4880 /* Apply scattering. */
4881 cloog_program_scatter (prog, scattering);
4882 free_scattering (scattering);
4884 /* Iterators corresponding to scalar dimensions have to be extracted. */
4885 cloog_names_scalarize (cloog_program_names (prog), nbs,
4886 cloog_program_scaldims (prog));
4888 /* Free blocklist. */
4890 CloogBlockList *next = cloog_program_blocklist (prog);
4894 CloogBlockList *toDelete = next;
4895 next = cloog_block_list_next (next);
4896 cloog_block_list_set_next (toDelete, NULL);
4897 cloog_block_list_set_block (toDelete, NULL);
4898 cloog_block_list_free (toDelete);
4900 cloog_program_set_blocklist (prog, NULL);
4904 /* Return the options that will be used in GLOOG. */
4906 static CloogOptions *
4907 set_cloog_options (void)
4909 CloogOptions *options = cloog_options_malloc ();
4911 /* Change cloog output language to C. If we do use FORTRAN instead, cloog
4912 will stop e.g. with "ERROR: unbounded loops not allowed in FORTRAN.", if
4913 we pass an incomplete program to cloog. */
4914 options->language = LANGUAGE_C;
4916 /* Enable complex equality spreading: removes dummy statements
4917 (assignments) in the generated code which repeats the
4918 substitution equations for statements. This is useless for
4922 /* Enable C pretty-printing mode: normalizes the substitution
4923 equations for statements. */
4926 /* Allow cloog to build strides with a stride width different to one.
4927 This example has stride = 4:
4929 for (i = 0; i < 20; i += 4)
4931 options->strides = 1;
4933 /* Disable optimizations and make cloog generate source code closer to the
4934 input. This is useful for debugging, but later we want the optimized
4937 XXX: We can not disable optimizations, as loop blocking is not working
4942 options->l = INT_MAX;
4948 /* Prints STMT to STDERR. */
4951 debug_clast_stmt (struct clast_stmt *stmt)
4953 CloogOptions *options = set_cloog_options ();
4955 pprint (stderr, stmt, 0, options);
4958 /* Find the right transform for the SCOP, and return a Cloog AST
4959 representing the new form of the program. */
4961 static struct clast_stmt *
4962 find_transform (scop_p scop)
4964 struct clast_stmt *stmt;
4965 CloogOptions *options = set_cloog_options ();
4967 /* Connect new cloog prog generation to graphite. */
4968 build_cloog_prog (scop);
4970 if (dump_file && (dump_flags & TDF_DETAILS))
4972 fprintf (dump_file, "Cloog Input [\n");
4973 cloog_program_print (dump_file, SCOP_PROG(scop));
4974 fprintf (dump_file, "]\n");
4977 SCOP_PROG (scop) = cloog_program_generate (SCOP_PROG (scop), options);
4978 stmt = cloog_clast_create (SCOP_PROG (scop), options);
4980 if (dump_file && (dump_flags & TDF_DETAILS))
4982 fprintf (dump_file, "Cloog Output[\n");
4983 pprint (dump_file, stmt, 0, options);
4984 cloog_program_dump_cloog (dump_file, SCOP_PROG (scop));
4985 fprintf (dump_file, "]\n");
4988 cloog_options_free (options);
4992 /* Remove from the CFG the REGION. */
4995 remove_sese_region (sese region)
4997 VEC (basic_block, heap) *bbs = NULL;
4998 basic_block entry_bb = SESE_ENTRY (region)->dest;
4999 basic_block exit_bb = SESE_EXIT (region)->dest;
5003 VEC_safe_push (basic_block, heap, bbs, entry_bb);
5004 gather_blocks_in_sese_region (entry_bb, exit_bb, &bbs);
5006 for (i = 0; VEC_iterate (basic_block, bbs, i, bb); i++)
5007 delete_basic_block (bb);
5009 VEC_free (basic_block, heap, bbs);
5012 typedef struct ifsese_d
5020 if_region_entry (ifsese if_region)
5022 return SESE_ENTRY (if_region->region);
5026 if_region_exit (ifsese if_region)
5028 return SESE_EXIT (if_region->region);
5031 static inline basic_block
5032 if_region_get_condition_block (ifsese if_region)
5034 return if_region_entry (if_region)->dest;
5038 if_region_set_false_region (ifsese if_region, sese region)
5040 basic_block condition = if_region_get_condition_block (if_region);
5041 edge false_edge = get_false_edge_from_guard_bb (condition);
5042 basic_block dummy = false_edge->dest;
5043 edge entry_region = SESE_ENTRY (region);
5044 edge exit_region = SESE_EXIT (region);
5045 basic_block before_region = entry_region->src;
5046 basic_block last_in_region = exit_region->src;
5047 void **slot = htab_find_slot_with_hash (current_loops->exits, exit_region,
5048 htab_hash_pointer (exit_region),
5051 entry_region->flags = false_edge->flags;
5052 false_edge->flags = exit_region->flags;
5054 redirect_edge_pred (entry_region, condition);
5055 redirect_edge_pred (exit_region, before_region);
5056 redirect_edge_pred (false_edge, last_in_region);
5057 redirect_edge_succ (false_edge, single_succ (dummy));
5058 delete_basic_block (dummy);
5060 exit_region->flags = EDGE_FALLTHRU;
5061 recompute_all_dominators ();
5063 SESE_EXIT (region) = false_edge;
5064 if_region->false_region = region;
5068 struct loop_exit *loop_exit = GGC_CNEW (struct loop_exit);
5070 memcpy (loop_exit, *((struct loop_exit **) slot), sizeof (struct loop_exit));
5071 htab_clear_slot (current_loops->exits, slot);
5073 slot = htab_find_slot_with_hash (current_loops->exits, false_edge,
5074 htab_hash_pointer (false_edge),
5076 loop_exit->e = false_edge;
5078 false_edge->src->loop_father->exits->next = loop_exit;
5083 create_if_region_on_edge (edge entry, tree condition)
5087 sese sese_region = GGC_NEW (struct sese_d);
5088 sese true_region = GGC_NEW (struct sese_d);
5089 sese false_region = GGC_NEW (struct sese_d);
5090 ifsese if_region = GGC_NEW (struct ifsese_d);
5091 edge exit = create_empty_if_region_on_edge (entry, condition);
5093 if_region->region = sese_region;
5094 if_region->region->entry = entry;
5095 if_region->region->exit = exit;
5097 FOR_EACH_EDGE (e, ei, entry->dest->succs)
5099 if (e->flags & EDGE_TRUE_VALUE)
5101 true_region->entry = e;
5102 true_region->exit = single_succ_edge (e->dest);
5103 if_region->true_region = true_region;
5105 else if (e->flags & EDGE_FALSE_VALUE)
5107 false_region->entry = e;
5108 false_region->exit = single_succ_edge (e->dest);
5109 if_region->false_region = false_region;
5116 /* Moves REGION in a condition expression:
5124 move_sese_in_condition (sese region)
5126 basic_block pred_block = split_edge (SESE_ENTRY (region));
5127 ifsese if_region = NULL;
5129 SESE_ENTRY (region) = single_succ_edge (pred_block);
5130 if_region = create_if_region_on_edge (single_pred_edge (pred_block), integer_one_node);
5131 if_region_set_false_region (if_region, region);
5136 /* Add exit phis for USE on EXIT. */
5139 scop_add_exit_phis_edge (basic_block exit, tree use, edge false_e, edge true_e)
5141 gimple phi = create_phi_node (use, exit);
5143 create_new_def_for (gimple_phi_result (phi), phi,
5144 gimple_phi_result_ptr (phi));
5145 add_phi_arg (phi, use, false_e);
5146 add_phi_arg (phi, use, true_e);
5149 /* Add phi nodes for VAR that is used in LIVEIN. Phi nodes are
5150 inserted in block BB. */
5153 scop_add_exit_phis_var (basic_block bb, tree var, bitmap livein,
5154 edge false_e, edge true_e)
5157 basic_block def_bb = gimple_bb (SSA_NAME_DEF_STMT (var));
5159 if (is_gimple_reg (var))
5160 bitmap_clear_bit (livein, def_bb->index);
5162 bitmap_set_bit (livein, def_bb->index);
5164 def = BITMAP_ALLOC (NULL);
5165 bitmap_set_bit (def, def_bb->index);
5166 compute_global_livein (livein, def);
5169 scop_add_exit_phis_edge (bb, var, false_e, true_e);
5172 /* Insert in the block BB phi nodes for variables defined in REGION
5173 and used outside the REGION. The code generation moves REGION in
5174 the else clause of an "if (1)" and generates code in the then
5175 clause that is at this point empty:
5184 scop_insert_phis_for_liveouts (sese region, basic_block bb,
5185 edge false_e, edge true_e)
5190 update_ssa (TODO_update_ssa);
5192 EXECUTE_IF_SET_IN_BITMAP (SESE_LIVEOUT (region), 0, i, bi)
5193 scop_add_exit_phis_var (bb, ssa_name (i), SESE_LIVEIN_VER (region, i),
5196 update_ssa (TODO_update_ssa);
5199 /* Get the definition of NAME before the SCOP. Keep track of the
5200 basic blocks that have been VISITED in a bitmap. */
5203 get_vdef_before_scop (scop_p scop, tree name, sbitmap visited)
5206 gimple def_stmt = SSA_NAME_DEF_STMT (name);
5207 basic_block def_bb = gimple_bb (def_stmt);
5210 || !bb_in_sese_p (def_bb, SCOP_REGION (scop)))
5213 if (TEST_BIT (visited, def_bb->index))
5216 SET_BIT (visited, def_bb->index);
5218 switch (gimple_code (def_stmt))
5221 for (i = 0; i < gimple_phi_num_args (def_stmt); i++)
5223 tree arg = gimple_phi_arg_def (def_stmt, i);
5224 tree res = get_vdef_before_scop (scop, arg, visited);
5235 /* Adjust a virtual phi node PHI that is placed at the end of the
5236 generated code for SCOP:
5239 | generated code from REGION;
5243 The FALSE_E edge comes from the original code, TRUE_E edge comes
5244 from the code generated for the SCOP. */
5247 scop_adjust_vphi (scop_p scop, gimple phi, edge true_e)
5251 gcc_assert (gimple_phi_num_args (phi) == 2);
5253 for (i = 0; i < gimple_phi_num_args (phi); i++)
5254 if (gimple_phi_arg_edge (phi, i) == true_e)
5256 tree true_arg, false_arg, before_scop_arg;
5259 true_arg = gimple_phi_arg_def (phi, i);
5260 if (!SSA_NAME_IS_DEFAULT_DEF (true_arg))
5263 false_arg = gimple_phi_arg_def (phi, i == 0 ? 1 : 0);
5264 if (SSA_NAME_IS_DEFAULT_DEF (false_arg))
5267 visited = sbitmap_alloc (last_basic_block);
5268 sbitmap_zero (visited);
5269 before_scop_arg = get_vdef_before_scop (scop, false_arg, visited);
5270 gcc_assert (before_scop_arg != NULL_TREE);
5271 SET_PHI_ARG_DEF (phi, i, before_scop_arg);
5272 sbitmap_free (visited);
5276 /* Adjusts the phi nodes in the block BB for variables defined in
5277 SCOP_REGION and used outside the SCOP_REGION. The code generation
5278 moves SCOP_REGION in the else clause of an "if (1)" and generates
5279 code in the then clause:
5282 | generated code from REGION;
5286 To adjust the phi nodes after the condition, SCOP_LIVEOUT_RENAMES
5287 hash table is used: this stores for a name that is part of the
5288 LIVEOUT of SCOP_REGION its new name in the generated code. */
5291 scop_adjust_phis_for_liveouts (scop_p scop, basic_block bb, edge false_e,
5294 gimple_stmt_iterator si;
5296 for (si = gsi_start_phis (bb); !gsi_end_p (si); gsi_next (&si))
5299 unsigned false_i = 0;
5300 gimple phi = gsi_stmt (si);
5302 if (!is_gimple_reg (PHI_RESULT (phi)))
5304 scop_adjust_vphi (scop, phi, true_e);
5308 for (i = 0; i < gimple_phi_num_args (phi); i++)
5309 if (gimple_phi_arg_edge (phi, i) == false_e)
5315 for (i = 0; i < gimple_phi_num_args (phi); i++)
5316 if (gimple_phi_arg_edge (phi, i) == true_e)
5318 tree old_name = gimple_phi_arg_def (phi, false_i);
5319 tree new_name = get_new_name_from_old_name
5320 (SCOP_LIVEOUT_RENAMES (scop), old_name);
5322 gcc_assert (old_name != new_name);
5323 SET_PHI_ARG_DEF (phi, i, new_name);
5328 /* Returns the first cloog name used in EXPR. */
5331 find_cloog_iv_in_expr (struct clast_expr *expr)
5333 struct clast_term *term = (struct clast_term *) expr;
5335 if (expr->type == expr_term
5339 if (expr->type == expr_term)
5342 if (expr->type == expr_red)
5345 struct clast_reduction *red = (struct clast_reduction *) expr;
5347 for (i = 0; i < red->n; i++)
5349 const char *res = find_cloog_iv_in_expr ((red)->elts[i]);
5359 /* Build for a clast_user_stmt USER_STMT a map between the CLAST
5360 induction variables and the corresponding GCC old induction
5361 variables. This information is stored on each GRAPHITE_BB. */
5364 compute_cloog_iv_types_1 (graphite_bb_p gbb,
5365 struct clast_user_stmt *user_stmt)
5367 struct clast_stmt *t;
5370 for (t = user_stmt->substitutions; t; t = t->next, index++)
5373 struct ivtype_map_elt_d tmp;
5374 struct clast_expr *expr = (struct clast_expr *)
5375 ((struct clast_assignment *)t)->RHS;
5377 /* Create an entry (clast_var, type). */
5378 tmp.cloog_iv = find_cloog_iv_in_expr (expr);
5382 slot = htab_find_slot (GBB_CLOOG_IV_TYPES (gbb), &tmp, INSERT);
5386 loop_p loop = gbb_loop_at_index (gbb, index);
5387 tree oldiv = oldiv_for_loop (GBB_SCOP (gbb), loop);
5388 tree type = oldiv ? TREE_TYPE (oldiv) : integer_type_node;
5389 *slot = new_ivtype_map_elt (tmp.cloog_iv, type);
5394 /* Walk the CLAST tree starting from STMT and build for each
5395 clast_user_stmt a map between the CLAST induction variables and the
5396 corresponding GCC old induction variables. This information is
5397 stored on each GRAPHITE_BB. */
5400 compute_cloog_iv_types (struct clast_stmt *stmt)
5405 if (CLAST_STMT_IS_A (stmt, stmt_root))
5408 if (CLAST_STMT_IS_A (stmt, stmt_user))
5410 CloogStatement *cs = ((struct clast_user_stmt *) stmt)->statement;
5411 graphite_bb_p gbb = (graphite_bb_p) cloog_statement_usr (cs);
5412 GBB_CLOOG_IV_TYPES (gbb) = htab_create (10, ivtype_map_elt_info,
5413 eq_ivtype_map_elts, free);
5414 compute_cloog_iv_types_1 (gbb, (struct clast_user_stmt *) stmt);
5418 if (CLAST_STMT_IS_A (stmt, stmt_for))
5420 struct clast_stmt *s = ((struct clast_for *) stmt)->body;
5421 compute_cloog_iv_types (s);
5425 if (CLAST_STMT_IS_A (stmt, stmt_guard))
5427 struct clast_stmt *s = ((struct clast_guard *) stmt)->then;
5428 compute_cloog_iv_types (s);
5432 if (CLAST_STMT_IS_A (stmt, stmt_block))
5434 struct clast_stmt *s = ((struct clast_block *) stmt)->body;
5435 compute_cloog_iv_types (s);
5442 compute_cloog_iv_types (stmt->next);
5445 /* GIMPLE Loop Generator: generates loops from STMT in GIMPLE form for
5446 the given SCOP. Return true if code generation succeeded. */
5449 gloog (scop_p scop, struct clast_stmt *stmt)
5451 edge new_scop_exit_edge = NULL;
5452 VEC (iv_stack_entry_p, heap) *ivstack = VEC_alloc (iv_stack_entry_p, heap,
5454 loop_p context_loop;
5455 ifsese if_region = NULL;
5457 recompute_all_dominators ();
5459 if_region = move_sese_in_condition (SCOP_REGION (scop));
5460 sese_build_livein_liveouts (SCOP_REGION (scop));
5461 scop_insert_phis_for_liveouts (SCOP_REGION (scop),
5462 if_region->region->exit->src,
5463 if_region->false_region->exit,
5464 if_region->true_region->exit);
5465 recompute_all_dominators ();
5467 context_loop = SESE_ENTRY (SCOP_REGION (scop))->src->loop_father;
5468 compute_cloog_iv_types (stmt);
5470 new_scop_exit_edge = translate_clast (scop, context_loop, stmt,
5471 if_region->true_region->entry,
5473 free_loop_iv_stack (&ivstack);
5474 cloog_clast_free (stmt);
5477 scop_adjust_phis_for_liveouts (scop,
5478 if_region->region->exit->src,
5479 if_region->false_region->exit,
5480 if_region->true_region->exit);
5482 recompute_all_dominators ();
5487 /* Returns the number of data references in SCOP. */
5490 nb_data_refs_in_scop (scop_p scop)
5496 for (i = 0; VEC_iterate (graphite_bb_p, SCOP_BBS (scop), i, gbb); i++)
5497 res += VEC_length (data_reference_p, GBB_DATA_REFS (gbb));
5502 /* Move the loop at index LOOP and insert it before index NEW_LOOP_POS.
5503 This transformartion is only valid, if the loop nest between i and k is
5504 perfectly nested. Therefore we do not need to change the static schedule.
5508 for (i = 0; i < 50; i++)
5510 for (k = 5; k < 100; k++)
5513 To move k before i use:
5515 graphite_trans_bb_move_loop (A, 2, 0)
5517 for (k = 5; k < 100; k++)
5518 for (i = 0; i < 50; i++)
5524 graphite_trans_bb_move_loop (A, 0, 2)
5526 This function does not check the validity of interchanging loops.
5527 This should be checked before calling this function. */
5530 graphite_trans_bb_move_loop (graphite_bb_p gb, int loop,
5533 CloogMatrix *domain = GBB_DOMAIN (gb);
5537 gcc_assert (loop < gbb_nb_loops (gb)
5538 && new_loop_pos < gbb_nb_loops (gb));
5540 /* Update LOOPS vector. */
5541 tmp_loop_p = VEC_index (loop_p, GBB_LOOPS (gb), loop);
5542 VEC_ordered_remove (loop_p, GBB_LOOPS (gb), loop);
5543 VEC_safe_insert (loop_p, heap, GBB_LOOPS (gb), new_loop_pos, tmp_loop_p);
5545 /* Move the domain columns. */
5546 if (loop < new_loop_pos)
5547 for (row = 0; row < domain->NbRows; row++)
5551 value_assign (tmp, domain->p[row][loop + 1]);
5553 for (j = loop ; j < new_loop_pos - 1; j++)
5554 value_assign (domain->p[row][j + 1], domain->p[row][j + 2]);
5556 value_assign (domain->p[row][new_loop_pos], tmp);
5560 for (row = 0; row < domain->NbRows; row++)
5564 value_assign (tmp, domain->p[row][loop + 1]);
5566 for (j = loop ; j > new_loop_pos; j--)
5567 value_assign (domain->p[row][j + 1], domain->p[row][j]);
5569 value_assign (domain->p[row][new_loop_pos + 1], tmp);
5574 /* Get the index of the column representing constants in the DOMAIN
5578 const_column_index (CloogMatrix *domain)
5580 return domain->NbColumns - 1;
5584 /* Get the first index that is positive or negative, determined
5585 following the value of POSITIVE, in matrix DOMAIN in COLUMN. */
5588 get_first_matching_sign_row_index (CloogMatrix *domain, int column,
5593 for (row = 0; row < domain->NbRows; row++)
5595 int val = value_get_si (domain->p[row][column]);
5597 if (val > 0 && positive)
5600 else if (val < 0 && !positive)
5607 /* Get the lower bound of COLUMN in matrix DOMAIN. */
5610 get_lower_bound_row (CloogMatrix *domain, int column)
5612 return get_first_matching_sign_row_index (domain, column, true);
5615 /* Get the upper bound of COLUMN in matrix DOMAIN. */
5618 get_upper_bound_row (CloogMatrix *domain, int column)
5620 return get_first_matching_sign_row_index (domain, column, false);
5623 /* Copies the OLD_ROW constraint from OLD_DOMAIN to the NEW_DOMAIN at
5627 copy_constraint (CloogMatrix *old_domain, CloogMatrix *new_domain,
5628 int old_row, int new_row)
5632 gcc_assert (old_domain->NbColumns == new_domain->NbColumns
5633 && old_row < old_domain->NbRows
5634 && new_row < new_domain->NbRows);
5636 for (i = 0; i < old_domain->NbColumns; i++)
5637 value_assign (new_domain->p[new_row][i], old_domain->p[old_row][i]);
5640 /* Swap coefficients of variables X and Y on row R. */
5643 swap_constraint_variables (CloogMatrix *domain,
5644 int r, int x, int y)
5646 value_swap (domain->p[r][x], domain->p[r][y]);
5649 /* Scale by X the coefficient C of constraint at row R in DOMAIN. */
5652 scale_constraint_variable (CloogMatrix *domain,
5653 int r, int c, int x)
5655 Value strip_size_value;
5656 value_init (strip_size_value);
5657 value_set_si (strip_size_value, x);
5658 value_multiply (domain->p[r][c], domain->p[r][c], strip_size_value);
5659 value_clear (strip_size_value);
5662 /* Strip mines the loop of BB at the position LOOP_DEPTH with STRIDE.
5663 Always valid, but not always a performance improvement. */
5666 graphite_trans_bb_strip_mine (graphite_bb_p gb, int loop_depth, int stride)
5670 CloogMatrix *domain = GBB_DOMAIN (gb);
5671 CloogMatrix *new_domain = cloog_matrix_alloc (domain->NbRows + 3,
5672 domain->NbColumns + 1);
5674 int col_loop_old = loop_depth + 2;
5675 int col_loop_strip = col_loop_old - 1;
5677 gcc_assert (loop_depth <= gbb_nb_loops (gb) - 1);
5679 VEC_safe_insert (loop_p, heap, GBB_LOOPS (gb), loop_depth, NULL);
5681 GBB_DOMAIN (gb) = new_domain;
5683 for (row = 0; row < domain->NbRows; row++)
5684 for (col = 0; col < domain->NbColumns; col++)
5685 if (col <= loop_depth)
5686 value_assign (new_domain->p[row][col], domain->p[row][col]);
5688 value_assign (new_domain->p[row][col + 1], domain->p[row][col]);
5690 row = domain->NbRows;
5692 /* Lower bound of the outer stripped loop. */
5693 copy_constraint (new_domain, new_domain,
5694 get_lower_bound_row (new_domain, col_loop_old), row);
5695 swap_constraint_variables (new_domain, row, col_loop_old, col_loop_strip);
5698 /* Upper bound of the outer stripped loop. */
5699 copy_constraint (new_domain, new_domain,
5700 get_upper_bound_row (new_domain, col_loop_old), row);
5701 swap_constraint_variables (new_domain, row, col_loop_old, col_loop_strip);
5702 scale_constraint_variable (new_domain, row, col_loop_strip, stride);
5705 /* Lower bound of a tile starts at "stride * outer_iv". */
5706 row = get_lower_bound_row (new_domain, col_loop_old);
5707 value_set_si (new_domain->p[row][0], 1);
5708 value_set_si (new_domain->p[row][const_column_index (new_domain)], 0);
5709 value_set_si (new_domain->p[row][col_loop_old], 1);
5710 value_set_si (new_domain->p[row][col_loop_strip], -1 * stride);
5712 /* Upper bound of a tile stops at "stride * outer_iv + stride - 1",
5713 or at the old upper bound that is not modified. */
5714 row = new_domain->NbRows - 1;
5715 value_set_si (new_domain->p[row][0], 1);
5716 value_set_si (new_domain->p[row][col_loop_old], -1);
5717 value_set_si (new_domain->p[row][col_loop_strip], stride);
5718 value_set_si (new_domain->p[row][const_column_index (new_domain)],
5721 cloog_matrix_free (domain);
5723 /* Update static schedule. */
5726 int nb_loops = gbb_nb_loops (gb);
5727 lambda_vector new_schedule = lambda_vector_new (nb_loops + 1);
5729 for (i = 0; i <= loop_depth; i++)
5730 new_schedule[i] = GBB_STATIC_SCHEDULE (gb)[i];
5732 for (i = loop_depth + 1; i <= nb_loops - 2; i++)
5733 new_schedule[i + 2] = GBB_STATIC_SCHEDULE (gb)[i];
5735 GBB_STATIC_SCHEDULE (gb) = new_schedule;
5739 /* Returns true when the strip mining of LOOP_INDEX by STRIDE is
5740 profitable or undecidable. GB is the statement around which the
5741 loops will be strip mined. */
5744 strip_mine_profitable_p (graphite_bb_p gb, int stride,
5753 loop = VEC_index (loop_p, GBB_LOOPS (gb), loop_index);
5754 exit = single_exit (loop);
5756 niter = find_loop_niter (loop, &exit);
5757 if (niter == chrec_dont_know
5758 || TREE_CODE (niter) != INTEGER_CST)
5761 niter_val = int_cst_value (niter);
5763 if (niter_val < stride)
5766 if (dump_file && (dump_flags & TDF_DETAILS))
5768 fprintf (dump_file, "\nStrip Mining is not profitable for loop %d:",
5770 fprintf (dump_file, "number of iterations is too low.\n");
5777 /* Determines when the interchange of LOOP_A and LOOP_B belonging to
5778 SCOP is legal. DEPTH is the number of loops around. */
5781 is_interchange_valid (scop_p scop, int loop_a, int loop_b, int depth)
5784 VEC (ddr_p, heap) *dependence_relations;
5785 VEC (data_reference_p, heap) *datarefs;
5787 struct loop *nest = VEC_index (loop_p, SCOP_LOOP_NEST (scop), loop_a);
5788 lambda_trans_matrix trans;
5790 gcc_assert (loop_a < loop_b);
5792 dependence_relations = VEC_alloc (ddr_p, heap, 10 * 10);
5793 datarefs = VEC_alloc (data_reference_p, heap, 10);
5795 if (!compute_data_dependences_for_loop (nest, true, &datarefs,
5796 &dependence_relations))
5799 if (dump_file && (dump_flags & TDF_DETAILS))
5800 dump_ddrs (dump_file, dependence_relations);
5802 trans = lambda_trans_matrix_new (depth, depth);
5803 lambda_matrix_id (LTM_MATRIX (trans), depth);
5805 lambda_matrix_row_exchange (LTM_MATRIX (trans), 0, loop_b - loop_a);
5807 if (!lambda_transform_legal_p (trans, depth, dependence_relations))
5809 lambda_matrix_row_exchange (LTM_MATRIX (trans), 0, loop_b - loop_a);
5815 free_dependence_relations (dependence_relations);
5816 free_data_refs (datarefs);
5820 /* Loop block the LOOPS innermost loops of GB with stride size STRIDE.
5824 for (i = 0; i <= 50; i++=4)
5825 for (k = 0; k <= 100; k++=4)
5826 for (l = 0; l <= 200; l++=4)
5829 To strip mine the two inner most loops with stride = 4 call:
5831 graphite_trans_bb_block (A, 4, 2)
5833 for (i = 0; i <= 50; i++)
5834 for (kk = 0; kk <= 100; kk+=4)
5835 for (ll = 0; ll <= 200; ll+=4)
5836 for (k = kk; k <= min (100, kk + 3); k++)
5837 for (l = ll; l <= min (200, ll + 3); l++)
5842 graphite_trans_bb_block (graphite_bb_p gb, int stride, int loops)
5845 int nb_loops = gbb_nb_loops (gb);
5846 int start = nb_loops - loops;
5847 scop_p scop = GBB_SCOP (gb);
5849 gcc_assert (scop_contains_loop (scop, gbb_loop (gb)));
5851 for (i = start ; i < nb_loops; i++)
5852 for (j = i + 1; j < nb_loops; j++)
5853 if (!is_interchange_valid (scop, i, j, nb_loops))
5855 if (dump_file && (dump_flags & TDF_DETAILS))
5857 "\nInterchange not valid for loops %d and %d:\n", i, j);
5860 else if (dump_file && (dump_flags & TDF_DETAILS))
5862 "\nInterchange valid for loops %d and %d:\n", i, j);
5864 /* Check if strip mining is profitable for every loop. */
5865 for (i = 0; i < nb_loops - start; i++)
5866 if (!strip_mine_profitable_p (gb, stride, start + i))
5869 /* Strip mine loops. */
5870 for (i = 0; i < nb_loops - start; i++)
5871 graphite_trans_bb_strip_mine (gb, start + 2 * i, stride);
5873 /* Interchange loops. */
5874 for (i = 1; i < nb_loops - start; i++)
5875 graphite_trans_bb_move_loop (gb, start + 2 * i, start + i);
5877 if (dump_file && (dump_flags & TDF_DETAILS))
5878 fprintf (dump_file, "\nLoops containing BB %d will be loop blocked.\n",
5879 GBB_BB (gb)->index);
5884 /* Loop block LOOPS innermost loops of a loop nest. BBS represent the
5885 basic blocks that belong to the loop nest to be blocked. */
5888 graphite_trans_loop_block (VEC (graphite_bb_p, heap) *bbs, int loops)
5892 bool transform_done = false;
5894 /* TODO: - Calculate the stride size automatically. */
5895 int stride_size = 51;
5897 for (i = 0; VEC_iterate (graphite_bb_p, bbs, i, gb); i++)
5898 transform_done |= graphite_trans_bb_block (gb, stride_size, loops);
5900 return transform_done;
5903 /* Loop block all basic blocks of SCOP. Return false when the
5904 transform is not performed. */
5907 graphite_trans_scop_block (scop_p scop)
5913 bool perfect = true;
5914 bool transform_done = false;
5916 VEC (graphite_bb_p, heap) *bbs = VEC_alloc (graphite_bb_p, heap, 3);
5917 int max_schedule = scop_max_loop_depth (scop) + 1;
5918 lambda_vector last_schedule = lambda_vector_new (max_schedule);
5920 if (VEC_length (graphite_bb_p, SCOP_BBS (scop)) == 0)
5923 /* Get the data of the first bb. */
5924 gb = VEC_index (graphite_bb_p, SCOP_BBS (scop), 0);
5925 last_nb_loops = gbb_nb_loops (gb);
5926 lambda_vector_copy (GBB_STATIC_SCHEDULE (gb), last_schedule,
5928 VEC_safe_push (graphite_bb_p, heap, bbs, gb);
5930 for (i = 0; VEC_iterate (graphite_bb_p, SCOP_BBS (scop), i, gb); i++)
5932 /* We did the first bb before. */
5936 nb_loops = gbb_nb_loops (gb);
5938 /* If the number of loops is unchanged and only the last element of the
5939 schedule changes, we stay in the loop nest. */
5940 if (nb_loops == last_nb_loops
5941 && (last_schedule [nb_loops + 1]
5942 != GBB_STATIC_SCHEDULE (gb)[nb_loops + 1]))
5944 VEC_safe_push (graphite_bb_p, heap, bbs, gb);
5948 /* Otherwise, we left the innermost loop. So check, if the last bb was in
5949 a perfect loop nest and how many loops are contained in this perfect
5952 Count the number of zeros from the end of the schedule. They are the
5953 number of surrounding loops.
5956 last_bb 2 3 2 0 0 0 0 3
5960 last_bb 2 3 2 0 0 0 0 3
5964 If there is no zero, there were other bbs in outer loops and the loop
5965 nest is not perfect. */
5966 for (j = last_nb_loops - 1; j >= 0; j--)
5968 if (last_schedule [j] != 0
5969 || (j <= nb_loops && GBB_STATIC_SCHEDULE (gb)[j] == 1))
5978 /* Found perfect loop nest. */
5979 if (perfect && last_nb_loops - j >= 2)
5980 transform_done |= graphite_trans_loop_block (bbs, last_nb_loops - j);
5982 /* Check if we start with a new loop.
5986 last_bb 2 3 2 0 0 0 0 3
5989 Here we start with the loop "2 3 2 0 0 1"
5991 last_bb 2 3 2 0 0 0 0 3
5994 But here not, so the loop nest can never be perfect. */
5996 perfect = (GBB_STATIC_SCHEDULE (gb)[nb_loops] == 0);
5998 /* Update the last_bb infos. We do not do that for the bbs in the same
5999 loop, as the data we use is not changed. */
6000 last_nb_loops = nb_loops;
6001 lambda_vector_copy (GBB_STATIC_SCHEDULE (gb), last_schedule,
6003 VEC_truncate (graphite_bb_p, bbs, 0);
6004 VEC_safe_push (graphite_bb_p, heap, bbs, gb);
6007 /* Check if the last loop nest was perfect. It is the same check as above,
6008 but the comparison with the next bb is missing. */
6009 for (j = last_nb_loops - 1; j >= 0; j--)
6010 if (last_schedule [j] != 0)
6018 /* Found perfect loop nest. */
6019 if (last_nb_loops - j >= 2)
6020 transform_done |= graphite_trans_loop_block (bbs, last_nb_loops - j);
6021 VEC_free (graphite_bb_p, heap, bbs);
6023 return transform_done;
6026 /* Apply graphite transformations to all the basic blocks of SCOP. */
6029 graphite_apply_transformations (scop_p scop)
6031 bool transform_done = false;
6033 /* Sort the list of bbs. Keep them always sorted. */
6034 graphite_sort_gbbs (scop);
6036 if (flag_loop_block)
6037 transform_done = graphite_trans_scop_block (scop);
6039 /* Generate code even if we did not apply any real transformation.
6040 This also allows to check the performance for the identity
6041 transformation: GIMPLE -> GRAPHITE -> GIMPLE
6042 Keep in mind that CLooG optimizes in control, so the loop structure
6043 may change, even if we only use -fgraphite-identity. */
6044 if (flag_graphite_identity)
6045 transform_done = true;
6047 return transform_done;
6050 /* We limit all SCoPs to SCoPs, that are completely surrounded by a loop.
6060 * SCoP frontier, as this line is not surrounded by any loop. *
6064 This is necessary as scalar evolution and parameter detection need a
6065 outermost loop to initialize parameters correctly.
6067 TODO: FIX scalar evolution and parameter detection to allow more flexible
6073 VEC (sd_region, heap) *tmp_scops = VEC_alloc (sd_region, heap, 3);
6078 for (i = 0; VEC_iterate (scop_p, current_scops, i, scop); i++)
6082 build_scop_bbs (scop);
6084 if (!build_scop_loop_nests (scop))
6087 for (j = 0; VEC_iterate (loop_p, SCOP_LOOP_NEST (scop), j, loop); j++)
6088 if (!loop_in_sese_p (loop_outer (loop), SCOP_REGION (scop)))
6090 sd_region open_scop;
6091 open_scop.entry = loop->header;
6092 open_scop.exit = single_exit (loop)->dest;
6093 VEC_safe_push (sd_region, heap, tmp_scops, &open_scop);
6097 free_scops (current_scops);
6098 current_scops = VEC_alloc (scop_p, heap, 3);
6100 create_sese_edges (tmp_scops);
6101 build_graphite_scops (tmp_scops);
6102 VEC_free (sd_region, heap, tmp_scops);
6105 /* Perform a set of linear transforms on the loops of the current
6109 graphite_transform_loops (void)
6113 bool transform_done = false;
6115 if (number_of_loops () <= 1)
6118 current_scops = VEC_alloc (scop_p, heap, 3);
6119 recompute_all_dominators ();
6121 if (dump_file && (dump_flags & TDF_DETAILS))
6122 fprintf (dump_file, "Graphite loop transformations \n");
6124 initialize_original_copy_tables ();
6125 cloog_initialize ();
6129 if (dump_file && (dump_flags & TDF_DETAILS))
6130 fprintf (dump_file, "\nnumber of SCoPs: %d\n",
6131 VEC_length (scop_p, current_scops));
6133 for (i = 0; VEC_iterate (scop_p, current_scops, i, scop); i++)
6135 build_scop_bbs (scop);
6136 if (!build_scop_loop_nests (scop))
6139 build_bb_loops (scop);
6141 if (!build_scop_conditions (scop))
6144 find_scop_parameters (scop);
6145 build_scop_context (scop);
6147 if (dump_file && (dump_flags & TDF_DETAILS))
6149 fprintf (dump_file, "\n(In SCoP %d:\n", i);
6150 fprintf (dump_file, "\nnumber of bbs: %d\n",
6151 VEC_length (graphite_bb_p, SCOP_BBS (scop)));
6152 fprintf (dump_file, "\nnumber of loops: %d)\n",
6153 VEC_length (loop_p, SCOP_LOOP_NEST (scop)));
6156 if (!build_scop_iteration_domain (scop))
6159 add_conditions_to_constraints (scop);
6160 build_scop_canonical_schedules (scop);
6162 build_scop_data_accesses (scop);
6163 build_scop_dynamic_schedules (scop);
6165 if (dump_file && (dump_flags & TDF_DETAILS))
6167 int nbrefs = nb_data_refs_in_scop (scop);
6168 fprintf (dump_file, "\nnumber of data refs: %d\n", nbrefs);
6171 if (graphite_apply_transformations (scop))
6172 transform_done = gloog (scop, find_transform (scop));
6173 #ifdef ENABLE_CHECKING
6176 struct clast_stmt *stmt = find_transform (scop);
6177 cloog_clast_free (stmt);
6184 cleanup_tree_cfg ();
6186 free_scops (current_scops);
6188 free_original_copy_tables ();
6191 #else /* If Cloog is not available: #ifndef HAVE_cloog. */
6194 graphite_transform_loops (void)
6196 sorry ("Graphite loop optimizations cannot be used");