1 /* Gimple Represented as Polyhedra.
2 Copyright (C) 2006, 2007, 2008, 2009 Free Software Foundation, Inc.
3 Contributed by Sebastian Pop <sebastian.pop@inria.fr>.
5 This file is part of GCC.
7 GCC is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License as published by
9 the Free Software Foundation; either version 3, or (at your option)
12 GCC is distributed in the hope that it will be useful,
13 but WITHOUT ANY WARRANTY; without even the implied warranty of
14 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 GNU General Public License for more details.
17 You should have received a copy of the GNU General Public License
18 along with GCC; see the file COPYING3. If not see
19 <http://www.gnu.org/licenses/>. */
21 /* This pass converts GIMPLE to GRAPHITE, performs some loop
22 transformations and then converts the resulting representation back
25 An early description of this pass can be found in the GCC Summit'06
26 paper "GRAPHITE: Polyhedral Analyses and Optimizations for GCC".
27 The wiki page http://gcc.gnu.org/wiki/Graphite contains pointers to
30 One important document to read is CLooG's internal manual:
31 http://repo.or.cz/w/cloog-ppl.git?a=blob_plain;f=doc/cloog.texi;hb=HEAD
32 that describes the data structure of loops used in this file, and
33 the functions that are used for transforming the code. */
37 #include "coretypes.h"
42 #include "basic-block.h"
43 #include "diagnostic.h"
44 #include "tree-flow.h"
46 #include "tree-dump.h"
49 #include "tree-chrec.h"
50 #include "tree-data-ref.h"
51 #include "tree-scalar-evolution.h"
52 #include "tree-pass.h"
54 #include "value-prof.h"
55 #include "pointer-set.h"
59 #include "cloog/cloog.h"
62 static VEC (scop_p, heap) *current_scops;
64 /* Converts a GMP constant V to a tree and returns it. */
67 gmp_cst_to_tree (tree type, Value v)
69 return build_int_cst (type, value_get_si (v));
72 /* Returns true when BB is in REGION. */
75 bb_in_sese_p (basic_block bb, sese region)
77 return pointer_set_contains (SESE_REGION_BBS (region), bb);
80 /* Returns true when LOOP is in the SESE region R. */
83 loop_in_sese_p (struct loop *loop, sese r)
85 return (bb_in_sese_p (loop->header, r)
86 && bb_in_sese_p (loop->latch, r));
89 /* For a USE in BB, if BB is outside REGION, mark the USE in the
90 SESE_LIVEIN and SESE_LIVEOUT sets. */
93 sese_build_livein_liveouts_use (sese region, basic_block bb, tree use)
98 if (TREE_CODE (use) != SSA_NAME)
101 ver = SSA_NAME_VERSION (use);
102 def_bb = gimple_bb (SSA_NAME_DEF_STMT (use));
104 || !bb_in_sese_p (def_bb, region)
105 || bb_in_sese_p (bb, region))
108 if (!SESE_LIVEIN_VER (region, ver))
109 SESE_LIVEIN_VER (region, ver) = BITMAP_ALLOC (NULL);
111 bitmap_set_bit (SESE_LIVEIN_VER (region, ver), bb->index);
112 bitmap_set_bit (SESE_LIVEOUT (region), ver);
115 /* Marks for rewrite all the SSA_NAMES defined in REGION and that are
116 used in BB that is outside of the REGION. */
119 sese_build_livein_liveouts_bb (sese region, basic_block bb)
121 gimple_stmt_iterator bsi;
127 FOR_EACH_EDGE (e, ei, bb->succs)
128 for (bsi = gsi_start_phis (e->dest); !gsi_end_p (bsi); gsi_next (&bsi))
129 sese_build_livein_liveouts_use (region, bb,
130 PHI_ARG_DEF_FROM_EDGE (gsi_stmt (bsi), e));
132 for (bsi = gsi_start_bb (bb); !gsi_end_p (bsi); gsi_next (&bsi))
133 FOR_EACH_SSA_TREE_OPERAND (var, gsi_stmt (bsi), iter, SSA_OP_ALL_USES)
134 sese_build_livein_liveouts_use (region, bb, var);
137 /* Build the SESE_LIVEIN and SESE_LIVEOUT for REGION. */
140 sese_build_livein_liveouts (sese region)
144 SESE_LIVEOUT (region) = BITMAP_ALLOC (NULL);
145 SESE_NUM_VER (region) = num_ssa_names;
146 SESE_LIVEIN (region) = XCNEWVEC (bitmap, SESE_NUM_VER (region));
149 sese_build_livein_liveouts_bb (region, bb);
152 /* Register basic blocks belonging to a region in a pointer set. */
155 register_bb_in_sese (basic_block entry_bb, basic_block exit_bb, sese region)
159 basic_block bb = entry_bb;
161 FOR_EACH_EDGE (e, ei, bb->succs)
163 if (!pointer_set_contains (SESE_REGION_BBS (region), e->dest) &&
164 e->dest->index != exit_bb->index)
166 pointer_set_insert (SESE_REGION_BBS (region), e->dest);
167 register_bb_in_sese (e->dest, exit_bb, region);
172 /* Builds a new SESE region from edges ENTRY and EXIT. */
175 new_sese (edge entry, edge exit)
177 sese res = XNEW (struct sese);
179 SESE_ENTRY (res) = entry;
180 SESE_EXIT (res) = exit;
181 SESE_REGION_BBS (res) = pointer_set_create ();
182 register_bb_in_sese (entry->dest, exit->dest, res);
184 SESE_LIVEOUT (res) = NULL;
185 SESE_NUM_VER (res) = 0;
186 SESE_LIVEIN (res) = NULL;
191 /* Deletes REGION. */
194 free_sese (sese region)
198 for (i = 0; i < SESE_NUM_VER (region); i++)
199 BITMAP_FREE (SESE_LIVEIN_VER (region, i));
201 if (SESE_LIVEIN (region))
202 free (SESE_LIVEIN (region));
204 if (SESE_LIVEOUT (region))
205 BITMAP_FREE (SESE_LIVEOUT (region));
207 pointer_set_destroy (SESE_REGION_BBS (region));
213 /* Debug the list of old induction variables for this SCOP. */
216 debug_oldivs (scop_p scop)
221 fprintf (stderr, "Old IVs:");
223 for (i = 0; VEC_iterate (name_tree, SCOP_OLDIVS (scop), i, oldiv); i++)
225 fprintf (stderr, "(");
226 print_generic_expr (stderr, oldiv->t, 0);
227 fprintf (stderr, ", %s, %d)\n", oldiv->name, oldiv->loop->num);
229 fprintf (stderr, "\n");
232 /* Debug the loops around basic block GB. */
235 debug_loop_vec (graphite_bb_p gb)
240 fprintf (stderr, "Loop Vec:");
242 for (i = 0; VEC_iterate (loop_p, GBB_LOOPS (gb), i, loop); i++)
243 fprintf (stderr, "%d: %d, ", i, loop ? loop->num : -1);
245 fprintf (stderr, "\n");
248 /* Returns true if stack ENTRY is a constant. */
251 iv_stack_entry_is_constant (iv_stack_entry *entry)
253 return entry->kind == iv_stack_entry_const;
256 /* Returns true if stack ENTRY is an induction variable. */
259 iv_stack_entry_is_iv (iv_stack_entry *entry)
261 return entry->kind == iv_stack_entry_iv;
264 /* Push (IV, NAME) on STACK. */
267 loop_iv_stack_push_iv (loop_iv_stack stack, tree iv, const char *name)
269 iv_stack_entry *entry = XNEW (iv_stack_entry);
270 name_tree named_iv = XNEW (struct name_tree);
273 named_iv->name = name;
275 entry->kind = iv_stack_entry_iv;
276 entry->data.iv = named_iv;
278 VEC_safe_push (iv_stack_entry_p, heap, *stack, entry);
281 /* Inserts a CONSTANT in STACK at INDEX. */
284 loop_iv_stack_insert_constant (loop_iv_stack stack, int index,
287 iv_stack_entry *entry = XNEW (iv_stack_entry);
289 entry->kind = iv_stack_entry_const;
290 entry->data.constant = constant;
292 VEC_safe_insert (iv_stack_entry_p, heap, *stack, index, entry);
295 /* Pops and frees an element out of STACK. */
298 loop_iv_stack_pop (loop_iv_stack stack)
300 iv_stack_entry_p entry = VEC_pop (iv_stack_entry_p, *stack);
302 free (entry->data.iv);
306 /* Get the IV at INDEX in STACK. */
309 loop_iv_stack_get_iv (loop_iv_stack stack, int index)
311 iv_stack_entry_p entry = VEC_index (iv_stack_entry_p, *stack, index);
312 iv_stack_entry_data data = entry->data;
314 return iv_stack_entry_is_iv (entry) ? data.iv->t : data.constant;
317 /* Get the IV from its NAME in STACK. */
320 loop_iv_stack_get_iv_from_name (loop_iv_stack stack, const char* name)
323 iv_stack_entry_p entry;
325 for (i = 0; VEC_iterate (iv_stack_entry_p, *stack, i, entry); i++)
327 name_tree iv = entry->data.iv;
328 if (!strcmp (name, iv->name))
335 /* Prints on stderr the contents of STACK. */
338 debug_loop_iv_stack (loop_iv_stack stack)
341 iv_stack_entry_p entry;
344 fprintf (stderr, "(");
346 for (i = 0; VEC_iterate (iv_stack_entry_p, *stack, i, entry); i++)
351 fprintf (stderr, " ");
353 if (iv_stack_entry_is_iv (entry))
355 name_tree iv = entry->data.iv;
356 fprintf (stderr, "%s:", iv->name);
357 print_generic_expr (stderr, iv->t, 0);
361 tree constant = entry->data.constant;
362 print_generic_expr (stderr, constant, 0);
363 fprintf (stderr, ":");
364 print_generic_expr (stderr, constant, 0);
368 fprintf (stderr, ")\n");
374 free_loop_iv_stack (loop_iv_stack stack)
377 iv_stack_entry_p entry;
379 for (i = 0; VEC_iterate (iv_stack_entry_p, *stack, i, entry); i++)
381 free (entry->data.iv);
385 VEC_free (iv_stack_entry_p, heap, *stack);
390 /* Structure containing the mapping between the CLooG's induction
391 variable and the type of the old induction variable. */
392 typedef struct ivtype_map_elt
395 const char *cloog_iv;
398 /* Print to stderr the element ELT. */
401 debug_ivtype_elt (ivtype_map_elt elt)
403 fprintf (stderr, "(%s, ", elt->cloog_iv);
404 print_generic_expr (stderr, elt->type, 0);
405 fprintf (stderr, ")\n");
408 /* Helper function for debug_ivtype_map. */
411 debug_ivtype_map_1 (void **slot, void *s ATTRIBUTE_UNUSED)
413 struct ivtype_map_elt *entry = (struct ivtype_map_elt *) *slot;
414 debug_ivtype_elt (entry);
418 /* Print to stderr all the elements of MAP. */
421 debug_ivtype_map (htab_t map)
423 htab_traverse (map, debug_ivtype_map_1, NULL);
426 /* Constructs a new SCEV_INFO_STR structure for VAR and INSTANTIATED_BELOW. */
428 static inline ivtype_map_elt
429 new_ivtype_map_elt (const char *cloog_iv, tree type)
433 res = XNEW (struct ivtype_map_elt);
434 res->cloog_iv = cloog_iv;
440 /* Computes a hash function for database element ELT. */
443 ivtype_map_elt_info (const void *elt)
445 return htab_hash_pointer (((const struct ivtype_map_elt *) elt)->cloog_iv);
448 /* Compares database elements E1 and E2. */
451 eq_ivtype_map_elts (const void *e1, const void *e2)
453 const struct ivtype_map_elt *elt1 = (const struct ivtype_map_elt *) e1;
454 const struct ivtype_map_elt *elt2 = (const struct ivtype_map_elt *) e2;
456 return (elt1->cloog_iv == elt2->cloog_iv);
461 /* Given a CLOOG_IV, returns the type that it should have in GCC land.
462 If the information is not available, i.e. in the case one of the
463 transforms created the loop, just return integer_type_node. */
466 gcc_type_for_cloog_iv (const char *cloog_iv, graphite_bb_p gbb)
468 struct ivtype_map_elt tmp;
471 tmp.cloog_iv = cloog_iv;
472 slot = htab_find_slot (GBB_CLOOG_IV_TYPES (gbb), &tmp, NO_INSERT);
475 return ((ivtype_map_elt) *slot)->type;
477 return integer_type_node;
480 /* Inserts constants derived from the USER_STMT argument list into the
481 STACK. This is needed to map old ivs to constants when loops have
485 loop_iv_stack_patch_for_consts (loop_iv_stack stack,
486 struct clast_user_stmt *user_stmt)
488 struct clast_stmt *t;
490 CloogStatement *cs = user_stmt->statement;
491 graphite_bb_p gbb = (graphite_bb_p) cloog_statement_usr (cs);
493 for (t = user_stmt->substitutions; t; t = t->next)
495 struct clast_expr *expr = (struct clast_expr *)
496 ((struct clast_assignment *)t)->RHS;
497 struct clast_term *term = (struct clast_term *) expr;
499 /* FIXME: What should be done with expr_bin, expr_red? */
500 if (expr->type == expr_term
503 loop_p loop = gbb_loop_at_index (gbb, index);
504 tree oldiv = oldiv_for_loop (GBB_SCOP (gbb), loop);
505 tree type = oldiv ? TREE_TYPE (oldiv) : integer_type_node;
506 tree value = gmp_cst_to_tree (type, term->val);
507 loop_iv_stack_insert_constant (stack, index, value);
513 /* Removes all constants in the iv STACK. */
516 loop_iv_stack_remove_constants (loop_iv_stack stack)
519 iv_stack_entry *entry;
521 for (i = 0; VEC_iterate (iv_stack_entry_p, *stack, i, entry);)
523 if (iv_stack_entry_is_constant (entry))
525 free (VEC_index (iv_stack_entry_p, *stack, i));
526 VEC_ordered_remove (iv_stack_entry_p, *stack, i);
533 /* Returns a new loop_to_cloog_loop_str structure. */
535 static inline struct loop_to_cloog_loop_str *
536 new_loop_to_cloog_loop_str (int loop_num,
538 CloogLoop *cloog_loop)
540 struct loop_to_cloog_loop_str *result;
542 result = XNEW (struct loop_to_cloog_loop_str);
543 result->loop_num = loop_num;
544 result->cloog_loop = cloog_loop;
545 result->loop_position = loop_position;
550 /* Hash function for SCOP_LOOP2CLOOG_LOOP hash table. */
553 hash_loop_to_cloog_loop (const void *elt)
555 return ((const struct loop_to_cloog_loop_str *) elt)->loop_num;
558 /* Equality function for SCOP_LOOP2CLOOG_LOOP hash table. */
561 eq_loop_to_cloog_loop (const void *el1, const void *el2)
563 const struct loop_to_cloog_loop_str *elt1, *elt2;
565 elt1 = (const struct loop_to_cloog_loop_str *) el1;
566 elt2 = (const struct loop_to_cloog_loop_str *) el2;
567 return elt1->loop_num == elt2->loop_num;
570 /* Compares two graphite bbs and returns an integer less than, equal to, or
571 greater than zero if the first argument is considered to be respectively
572 less than, equal to, or greater than the second.
573 We compare using the lexicographic order of the static schedules. */
576 gbb_compare (const void *p_1, const void *p_2)
578 const struct graphite_bb *const gbb_1
579 = *(const struct graphite_bb *const*) p_1;
580 const struct graphite_bb *const gbb_2
581 = *(const struct graphite_bb *const*) p_2;
583 return lambda_vector_compare (GBB_STATIC_SCHEDULE (gbb_1),
584 gbb_nb_loops (gbb_1) + 1,
585 GBB_STATIC_SCHEDULE (gbb_2),
586 gbb_nb_loops (gbb_2) + 1);
589 /* Sort graphite bbs in SCOP. */
592 graphite_sort_gbbs (scop_p scop)
594 VEC (graphite_bb_p, heap) *bbs = SCOP_BBS (scop);
596 qsort (VEC_address (graphite_bb_p, bbs),
597 VEC_length (graphite_bb_p, bbs),
598 sizeof (graphite_bb_p), gbb_compare);
601 /* Dump conditions of a graphite basic block GBB on FILE. */
604 dump_gbb_conditions (FILE *file, graphite_bb_p gbb)
608 VEC (gimple, heap) *conditions = GBB_CONDITIONS (gbb);
610 if (VEC_empty (gimple, conditions))
613 fprintf (file, "\tbb %d\t: cond = {", GBB_BB (gbb)->index);
615 for (i = 0; VEC_iterate (gimple, conditions, i, stmt); i++)
616 print_gimple_stmt (file, stmt, 0, 0);
618 fprintf (file, "}\n");
621 /* Converts the graphite scheduling function into a cloog scattering
622 matrix. This scattering matrix is used to limit the possible cloog
623 output to valid programs in respect to the scheduling function.
625 SCATTERING_DIMENSIONS specifies the dimensionality of the scattering
626 matrix. CLooG 0.14.0 and previous versions require, that all scattering
627 functions of one CloogProgram have the same dimensionality, therefore we
628 allow to specify it. (Should be removed in future versions) */
631 schedule_to_scattering (graphite_bb_p gb, int scattering_dimensions)
634 scop_p scop = GBB_SCOP (gb);
636 int nb_iterators = gbb_nb_loops (gb);
638 /* The cloog scattering matrix consists of these colums:
640 scattering_dimensions cols = Scattering dimensions,
641 nb_iterators cols = bb's iterators,
642 scop_nb_params cols = Parameters,
647 scattering_dimensions = 5
657 s1 s2 s3 s4 s5 i p1 p2 1
658 1 0 0 0 0 0 0 0 -4 = 0
659 0 1 0 0 0 -1 0 0 0 = 0
660 0 0 1 0 0 0 0 0 -5 = 0 */
661 int nb_params = scop_nb_params (scop);
662 int nb_cols = 1 + scattering_dimensions + nb_iterators + nb_params + 1;
663 int col_const = nb_cols - 1;
664 int col_iter_offset = 1 + scattering_dimensions;
666 CloogMatrix *scat = cloog_matrix_alloc (scattering_dimensions, nb_cols);
668 gcc_assert (scattering_dimensions >= nb_iterators * 2 + 1);
670 /* Initialize the identity matrix. */
671 for (i = 0; i < scattering_dimensions; i++)
672 value_set_si (scat->p[i][i + 1], 1);
674 /* Textual order outside the first loop */
675 value_set_si (scat->p[0][col_const], -GBB_STATIC_SCHEDULE (gb)[0]);
677 /* For all surrounding loops. */
678 for (i = 0; i < nb_iterators; i++)
680 int schedule = GBB_STATIC_SCHEDULE (gb)[i + 1];
682 /* Iterations of this loop. */
683 value_set_si (scat->p[2 * i + 1][col_iter_offset + i], -1);
685 /* Textual order inside this loop. */
686 value_set_si (scat->p[2 * i + 2][col_const], -schedule);
692 /* Print the schedules of GB to FILE with INDENT white spaces before.
693 VERBOSITY determines how verbose the code pretty printers are. */
696 print_graphite_bb (FILE *file, graphite_bb_p gb, int indent, int verbosity)
698 CloogMatrix *scattering;
701 fprintf (file, "\nGBB (\n");
703 print_loops_bb (file, GBB_BB (gb), indent+2, verbosity);
707 fprintf (file, " (domain: \n");
708 cloog_matrix_print (file, GBB_DOMAIN (gb));
709 fprintf (file, " )\n");
712 if (GBB_STATIC_SCHEDULE (gb))
714 fprintf (file, " (static schedule: ");
715 print_lambda_vector (file, GBB_STATIC_SCHEDULE (gb),
716 gbb_nb_loops (gb) + 1);
717 fprintf (file, " )\n");
722 fprintf (file, " (contained loops: \n");
723 for (i = 0; VEC_iterate (loop_p, GBB_LOOPS (gb), i, loop); i++)
725 fprintf (file, " iterator %d => NULL \n", i);
727 fprintf (file, " iterator %d => loop %d \n", i,
729 fprintf (file, " )\n");
732 if (GBB_DATA_REFS (gb))
733 dump_data_references (file, GBB_DATA_REFS (gb));
735 if (GBB_CONDITIONS (gb))
737 fprintf (file, " (conditions: \n");
738 dump_gbb_conditions (file, gb);
739 fprintf (file, " )\n");
743 && GBB_STATIC_SCHEDULE (gb))
745 fprintf (file, " (scattering: \n");
746 scattering = schedule_to_scattering (gb, 2 * gbb_nb_loops (gb) + 1);
747 cloog_matrix_print (file, scattering);
748 cloog_matrix_free (scattering);
749 fprintf (file, " )\n");
752 fprintf (file, ")\n");
755 /* Print to STDERR the schedules of GB with VERBOSITY level. */
758 debug_gbb (graphite_bb_p gb, int verbosity)
760 print_graphite_bb (stderr, gb, 0, verbosity);
764 /* Print SCOP to FILE. VERBOSITY determines how verbose the pretty
768 print_scop (FILE *file, scop_p scop, int verbosity)
773 fprintf (file, "\nSCoP_%d_%d (\n",
774 SCOP_ENTRY (scop)->index, SCOP_EXIT (scop)->index);
776 fprintf (file, " (cloog: \n");
777 cloog_program_print (file, SCOP_PROG (scop));
778 fprintf (file, " )\n");
785 for (i = 0; VEC_iterate (graphite_bb_p, SCOP_BBS (scop), i, gb); i++)
786 print_graphite_bb (file, gb, 0, verbosity);
789 fprintf (file, ")\n");
792 /* Print all the SCOPs to FILE. VERBOSITY determines how verbose the
793 code pretty printers are. */
796 print_scops (FILE *file, int verbosity)
801 for (i = 0; VEC_iterate (scop_p, current_scops, i, scop); i++)
802 print_scop (file, scop, verbosity);
805 /* Debug SCOP. VERBOSITY determines how verbose the code pretty
809 debug_scop (scop_p scop, int verbosity)
811 print_scop (stderr, scop, verbosity);
814 /* Debug all SCOPs from CURRENT_SCOPS. VERBOSITY determines how
815 verbose the code pretty printers are. */
818 debug_scops (int verbosity)
820 print_scops (stderr, verbosity);
823 /* Pretty print to FILE the SCOP in DOT format. */
826 dot_scop_1 (FILE *file, scop_p scop)
831 basic_block entry = SCOP_ENTRY (scop);
832 basic_block exit = SCOP_EXIT (scop);
834 fprintf (file, "digraph SCoP_%d_%d {\n", entry->index,
840 fprintf (file, "%d [shape=triangle];\n", bb->index);
843 fprintf (file, "%d [shape=box];\n", bb->index);
845 if (bb_in_sese_p (bb, SCOP_REGION (scop)))
846 fprintf (file, "%d [color=red];\n", bb->index);
848 FOR_EACH_EDGE (e, ei, bb->succs)
849 fprintf (file, "%d -> %d;\n", bb->index, e->dest->index);
852 fputs ("}\n\n", file);
855 /* Display SCOP using dotty. */
858 dot_scop (scop_p scop)
860 dot_scop_1 (stderr, scop);
863 /* Pretty print all SCoPs in DOT format and mark them with different colors.
864 If there are not enough colors, paint later SCoPs gray.
866 - "*" after the node number: entry of a SCoP,
867 - "#" after the node number: exit of a SCoP,
868 - "()" entry or exit not part of SCoP. */
871 dot_all_scops_1 (FILE *file)
880 /* Disable debugging while printing graph. */
881 int tmp_dump_flags = dump_flags;
884 fprintf (file, "digraph all {\n");
888 int part_of_scop = false;
890 /* Use HTML for every bb label. So we are able to print bbs
891 which are part of two different SCoPs, with two different
892 background colors. */
893 fprintf (file, "%d [label=<\n <TABLE BORDER=\"0\" CELLBORDER=\"1\" ",
895 fprintf (file, "CELLSPACING=\"0\">\n");
897 /* Select color for SCoP. */
898 for (i = 0; VEC_iterate (scop_p, current_scops, i, scop); i++)
899 if (bb_in_sese_p (bb, SCOP_REGION (scop))
900 || (SCOP_EXIT (scop) == bb)
901 || (SCOP_ENTRY (scop) == bb))
960 fprintf (file, " <TR><TD WIDTH=\"50\" BGCOLOR=\"%s\">", color);
962 if (!bb_in_sese_p (bb, SCOP_REGION (scop)))
963 fprintf (file, " (");
965 if (bb == SCOP_ENTRY (scop)
966 && bb == SCOP_EXIT (scop))
967 fprintf (file, " %d*# ", bb->index);
968 else if (bb == SCOP_ENTRY (scop))
969 fprintf (file, " %d* ", bb->index);
970 else if (bb == SCOP_EXIT (scop))
971 fprintf (file, " %d# ", bb->index);
973 fprintf (file, " %d ", bb->index);
975 if (!bb_in_sese_p (bb, SCOP_REGION (scop)))
978 fprintf (file, "</TD></TR>\n");
984 fprintf (file, " <TR><TD WIDTH=\"50\" BGCOLOR=\"#ffffff\">");
985 fprintf (file, " %d </TD></TR>\n", bb->index);
988 fprintf (file, " </TABLE>>, shape=box, style=\"setlinewidth(0)\"]\n");
993 FOR_EACH_EDGE (e, ei, bb->succs)
994 fprintf (file, "%d -> %d;\n", bb->index, e->dest->index);
997 fputs ("}\n\n", file);
999 /* Enable debugging again. */
1000 dump_flags = tmp_dump_flags;
1003 /* Display all SCoPs using dotty. */
1006 dot_all_scops (void)
1008 /* When debugging, enable the following code. This cannot be used
1009 in production compilers because it calls "system". */
1011 FILE *stream = fopen ("/tmp/allscops.dot", "w");
1012 gcc_assert (stream);
1014 dot_all_scops_1 (stream);
1017 system ("dotty /tmp/allscops.dot");
1019 dot_all_scops_1 (stderr);
1023 /* Returns the outermost loop in SCOP that contains BB. */
1025 static struct loop *
1026 outermost_loop_in_scop (scop_p scop, basic_block bb)
1030 nest = bb->loop_father;
1031 while (loop_outer (nest)
1032 && loop_in_sese_p (loop_outer (nest), SCOP_REGION (scop)))
1033 nest = loop_outer (nest);
1038 /* Returns the block preceding the entry of SCOP. */
1041 block_before_scop (scop_p scop)
1043 return SESE_ENTRY (SCOP_REGION (scop))->src;
1046 /* Return true when EXPR is an affine function in LOOP with parameters
1047 instantiated relative to SCOP_ENTRY. */
1050 loop_affine_expr (basic_block scop_entry, struct loop *loop, tree expr)
1053 tree scev = analyze_scalar_evolution (loop, expr);
1055 scev = instantiate_scev (scop_entry, loop, scev);
1057 return (evolution_function_is_invariant_p (scev, n)
1058 || evolution_function_is_affine_multivariate_p (scev, n));
1061 /* Return false if the tree_code of the operand OP or any of its operands
1062 is component_ref. */
1065 exclude_component_ref (tree op)
1072 if (TREE_CODE (op) == COMPONENT_REF)
1076 len = TREE_OPERAND_LENGTH (op);
1077 for (i = 0; i < len; ++i)
1079 if (!exclude_component_ref (TREE_OPERAND (op, i)))
1088 /* Return true if the operand OP is simple. */
1091 is_simple_operand (loop_p loop, gimple stmt, tree op)
1093 /* It is not a simple operand when it is a declaration, */
1095 /* or a structure, */
1096 || AGGREGATE_TYPE_P (TREE_TYPE (op))
1097 /* or a memory access that cannot be analyzed by the data
1098 reference analysis. */
1099 || ((handled_component_p (op) || INDIRECT_REF_P (op))
1100 && !stmt_simple_memref_p (loop, stmt, op)))
1103 return exclude_component_ref (op);
1106 /* Return true only when STMT is simple enough for being handled by
1107 Graphite. This depends on SCOP_ENTRY, as the parametetrs are
1108 initialized relatively to this basic block. */
1111 stmt_simple_for_scop_p (basic_block scop_entry, gimple stmt)
1113 basic_block bb = gimple_bb (stmt);
1114 struct loop *loop = bb->loop_father;
1116 /* GIMPLE_ASM and GIMPLE_CALL may embed arbitrary side effects.
1117 Calls have side-effects, except those to const or pure
1119 if (gimple_has_volatile_ops (stmt)
1120 || (gimple_code (stmt) == GIMPLE_CALL
1121 && !(gimple_call_flags (stmt) & (ECF_CONST | ECF_PURE)))
1122 || (gimple_code (stmt) == GIMPLE_ASM))
1125 switch (gimple_code (stmt))
1134 ssa_op_iter op_iter;
1135 enum tree_code code = gimple_cond_code (stmt);
1137 /* We can only handle this kind of conditional expressions.
1138 For inequalities like "if (i != 3 * k)" we need unions of
1139 polyhedrons. Expressions like "if (a)" or "if (a == 15)" need
1140 them for the else branch. */
1141 if (!(code == LT_EXPR
1144 || code == GE_EXPR))
1150 FOR_EACH_SSA_TREE_OPERAND (op, stmt, op_iter, SSA_OP_ALL_USES)
1151 if (!loop_affine_expr (scop_entry, loop, op))
1159 enum tree_code code = gimple_assign_rhs_code (stmt);
1161 switch (get_gimple_rhs_class (code))
1163 case GIMPLE_UNARY_RHS:
1164 case GIMPLE_SINGLE_RHS:
1165 return (is_simple_operand (loop, stmt, gimple_assign_lhs (stmt))
1166 && is_simple_operand (loop, stmt, gimple_assign_rhs1 (stmt)));
1168 case GIMPLE_BINARY_RHS:
1169 return (is_simple_operand (loop, stmt, gimple_assign_lhs (stmt))
1170 && is_simple_operand (loop, stmt, gimple_assign_rhs1 (stmt))
1171 && is_simple_operand (loop, stmt, gimple_assign_rhs2 (stmt)));
1173 case GIMPLE_INVALID_RHS:
1182 size_t n = gimple_call_num_args (stmt);
1183 tree lhs = gimple_call_lhs (stmt);
1185 if (lhs && !is_simple_operand (loop, stmt, lhs))
1188 for (i = 0; i < n; i++)
1189 if (!is_simple_operand (loop, stmt, gimple_call_arg (stmt, i)))
1196 /* These nodes cut a new scope. */
1203 /* Returns the statement of BB that contains a harmful operation: that
1204 can be a function call with side effects, the induction variables
1205 are not linear with respect to SCOP_ENTRY, etc. The current open
1206 scop should end before this statement. */
1209 harmful_stmt_in_bb (basic_block scop_entry, basic_block bb)
1211 gimple_stmt_iterator gsi;
1214 for (gsi = gsi_start_bb (bb); !gsi_end_p (gsi); gsi_next (&gsi))
1215 if (!stmt_simple_for_scop_p (scop_entry, gsi_stmt (gsi)))
1216 return gsi_stmt (gsi);
1218 stmt = last_stmt (bb);
1219 if (stmt && gimple_code (stmt) == GIMPLE_COND)
1221 tree lhs = gimple_cond_lhs (stmt);
1222 tree rhs = gimple_cond_rhs (stmt);
1224 if (TREE_CODE (TREE_TYPE (lhs)) == REAL_TYPE
1225 || TREE_CODE (TREE_TYPE (rhs)) == REAL_TYPE)
1232 /* Returns true when BB will be represented in graphite. Return false
1233 for the basic blocks that contain code eliminated in the code
1234 generation pass: i.e. induction variables and exit conditions. */
1237 graphite_stmt_p (scop_p scop, basic_block bb,
1238 VEC (data_reference_p, heap) *drs)
1240 gimple_stmt_iterator gsi;
1241 loop_p loop = bb->loop_father;
1243 if (VEC_length (data_reference_p, drs) > 0)
1246 for (gsi = gsi_start_bb (bb); !gsi_end_p (gsi); gsi_next (&gsi))
1248 gimple stmt = gsi_stmt (gsi);
1250 switch (gimple_code (stmt))
1252 /* Control flow expressions can be ignored, as they are
1253 represented in the iteration domains and will be
1254 regenerated by graphite. */
1262 tree var = gimple_assign_lhs (stmt);
1263 var = analyze_scalar_evolution (loop, var);
1264 var = instantiate_scev (block_before_scop (scop), loop, var);
1266 if (chrec_contains_undetermined (var))
1280 /* Store the GRAPHITE representation of BB. */
1283 new_graphite_bb (scop_p scop, basic_block bb)
1285 struct graphite_bb *gbb;
1286 VEC (data_reference_p, heap) *drs = VEC_alloc (data_reference_p, heap, 5);
1287 struct loop *nest = outermost_loop_in_scop (scop, bb);
1288 gimple_stmt_iterator gsi;
1290 for (gsi = gsi_start_bb (bb); !gsi_end_p (gsi); gsi_next (&gsi))
1291 find_data_references_in_stmt (nest, gsi_stmt (gsi), &drs);
1293 if (!graphite_stmt_p (scop, bb, drs))
1295 free_data_refs (drs);
1299 gbb = XNEW (struct graphite_bb);
1302 GBB_SCOP (gbb) = scop;
1303 GBB_DATA_REFS (gbb) = drs;
1304 GBB_DOMAIN (gbb) = NULL;
1305 GBB_CONDITIONS (gbb) = NULL;
1306 GBB_CONDITION_CASES (gbb) = NULL;
1307 GBB_LOOPS (gbb) = NULL;
1308 GBB_STATIC_SCHEDULE (gbb) = NULL;
1309 GBB_CLOOG_IV_TYPES (gbb) = NULL;
1310 VEC_safe_push (graphite_bb_p, heap, SCOP_BBS (scop), gbb);
1316 free_graphite_bb (struct graphite_bb *gbb)
1318 if (GBB_DOMAIN (gbb))
1319 cloog_matrix_free (GBB_DOMAIN (gbb));
1321 if (GBB_CLOOG_IV_TYPES (gbb))
1322 htab_delete (GBB_CLOOG_IV_TYPES (gbb));
1324 /* FIXME: free_data_refs is disabled for the moment, but should be
1327 free_data_refs (GBB_DATA_REFS (gbb)); */
1329 VEC_free (gimple, heap, GBB_CONDITIONS (gbb));
1330 VEC_free (gimple, heap, GBB_CONDITION_CASES (gbb));
1331 VEC_free (loop_p, heap, GBB_LOOPS (gbb));
1332 GBB_BB (gbb)->aux = 0;
1338 /* Structure containing the mapping between the old names and the new
1339 names used after block copy in the new loop context. */
1340 typedef struct rename_map_elt
1342 tree old_name, new_name;
1346 /* Print to stderr the element ELT. */
1349 debug_rename_elt (rename_map_elt elt)
1351 fprintf (stderr, "(");
1352 print_generic_expr (stderr, elt->old_name, 0);
1353 fprintf (stderr, ", ");
1354 print_generic_expr (stderr, elt->new_name, 0);
1355 fprintf (stderr, ")\n");
1358 /* Helper function for debug_rename_map. */
1361 debug_rename_map_1 (void **slot, void *s ATTRIBUTE_UNUSED)
1363 struct rename_map_elt *entry = (struct rename_map_elt *) *slot;
1364 debug_rename_elt (entry);
1368 /* Print to stderr all the elements of MAP. */
1371 debug_rename_map (htab_t map)
1373 htab_traverse (map, debug_rename_map_1, NULL);
1376 /* Constructs a new SCEV_INFO_STR structure for VAR and INSTANTIATED_BELOW. */
1378 static inline rename_map_elt
1379 new_rename_map_elt (tree old_name, tree new_name)
1383 res = XNEW (struct rename_map_elt);
1384 res->old_name = old_name;
1385 res->new_name = new_name;
1390 /* Computes a hash function for database element ELT. */
1393 rename_map_elt_info (const void *elt)
1395 return htab_hash_pointer (((const struct rename_map_elt *) elt)->old_name);
1398 /* Compares database elements E1 and E2. */
1401 eq_rename_map_elts (const void *e1, const void *e2)
1403 const struct rename_map_elt *elt1 = (const struct rename_map_elt *) e1;
1404 const struct rename_map_elt *elt2 = (const struct rename_map_elt *) e2;
1406 return (elt1->old_name == elt2->old_name);
1409 /* Returns the new name associated to OLD_NAME in MAP. */
1412 get_new_name_from_old_name (htab_t map, tree old_name)
1414 struct rename_map_elt tmp;
1417 tmp.old_name = old_name;
1418 slot = htab_find_slot (map, &tmp, NO_INSERT);
1421 return ((rename_map_elt) *slot)->new_name;
1428 /* Creates a new scop starting with ENTRY. */
1431 new_scop (edge entry, edge exit)
1433 scop_p scop = XNEW (struct scop);
1435 gcc_assert (entry && exit);
1437 SCOP_REGION (scop) = new_sese (entry, exit);
1438 SCOP_BBS (scop) = VEC_alloc (graphite_bb_p, heap, 3);
1439 SCOP_OLDIVS (scop) = VEC_alloc (name_tree, heap, 3);
1440 SCOP_LOOPS (scop) = BITMAP_ALLOC (NULL);
1441 SCOP_LOOP_NEST (scop) = VEC_alloc (loop_p, heap, 3);
1442 SCOP_ADD_PARAMS (scop) = true;
1443 SCOP_PARAMS (scop) = VEC_alloc (name_tree, heap, 3);
1444 SCOP_PROG (scop) = cloog_program_malloc ();
1445 cloog_program_set_names (SCOP_PROG (scop), cloog_names_malloc ());
1446 SCOP_LOOP2CLOOG_LOOP (scop) = htab_create (10, hash_loop_to_cloog_loop,
1447 eq_loop_to_cloog_loop,
1449 SCOP_LIVEOUT_RENAMES (scop) = htab_create (10, rename_map_elt_info,
1450 eq_rename_map_elts, free);
1457 free_scop (scop_p scop)
1461 struct graphite_bb *gb;
1464 for (i = 0; VEC_iterate (graphite_bb_p, SCOP_BBS (scop), i, gb); i++)
1465 free_graphite_bb (gb);
1467 VEC_free (graphite_bb_p, heap, SCOP_BBS (scop));
1468 BITMAP_FREE (SCOP_LOOPS (scop));
1469 VEC_free (loop_p, heap, SCOP_LOOP_NEST (scop));
1471 for (i = 0; VEC_iterate (name_tree, SCOP_OLDIVS (scop), i, iv); i++)
1473 VEC_free (name_tree, heap, SCOP_OLDIVS (scop));
1475 for (i = 0; VEC_iterate (name_tree, SCOP_PARAMS (scop), i, p); i++)
1478 VEC_free (name_tree, heap, SCOP_PARAMS (scop));
1479 cloog_program_free (SCOP_PROG (scop));
1480 htab_delete (SCOP_LOOP2CLOOG_LOOP (scop));
1481 htab_delete (SCOP_LIVEOUT_RENAMES (scop));
1482 free_sese (SCOP_REGION (scop));
1486 /* Deletes all scops in SCOPS. */
1489 free_scops (VEC (scop_p, heap) *scops)
1494 for (i = 0; VEC_iterate (scop_p, scops, i, scop); i++)
1497 VEC_free (scop_p, heap, scops);
1500 typedef enum gbb_type {
1502 GBB_LOOP_SING_EXIT_HEADER,
1503 GBB_LOOP_MULT_EXIT_HEADER,
1510 /* Detect the type of BB. Loop headers are only marked, if they are
1511 new. This means their loop_father is different to LAST_LOOP.
1512 Otherwise they are treated like any other bb and their type can be
1516 get_bb_type (basic_block bb, struct loop *last_loop)
1518 VEC (basic_block, heap) *dom;
1520 struct loop *loop = bb->loop_father;
1522 /* Check, if we entry into a new loop. */
1523 if (loop != last_loop)
1525 if (single_exit (loop) != NULL)
1526 return GBB_LOOP_SING_EXIT_HEADER;
1527 else if (loop->num != 0)
1528 return GBB_LOOP_MULT_EXIT_HEADER;
1530 return GBB_COND_HEADER;
1533 dom = get_dominated_by (CDI_DOMINATORS, bb);
1534 nb_dom = VEC_length (basic_block, dom);
1535 VEC_free (basic_block, heap, dom);
1540 nb_suc = VEC_length (edge, bb->succs);
1542 if (nb_dom == 1 && nb_suc == 1)
1545 return GBB_COND_HEADER;
1548 /* A SCoP detection region, defined using bbs as borders.
1549 All control flow touching this region, comes in passing basic_block ENTRY and
1550 leaves passing basic_block EXIT. By using bbs instead of edges for the
1551 borders we are able to represent also regions that do not have a single
1553 But as they have a single entry basic_block and a single exit basic_block, we
1554 are able to generate for every sd_region a single entry and exit edge.
1561 / \ This region contains: {3, 4, 5, 6, 7, 8}
1569 typedef struct sd_region_p
1571 /* The entry bb dominates all bbs in the sd_region. It is part of the
1575 /* The exit bb postdominates all bbs in the sd_region, but is not
1576 part of the region. */
1580 DEF_VEC_O(sd_region);
1581 DEF_VEC_ALLOC_O(sd_region, heap);
1584 /* Moves the scops from SOURCE to TARGET and clean up SOURCE. */
1587 move_sd_regions (VEC (sd_region, heap) **source, VEC (sd_region, heap) **target)
1592 for (i = 0; VEC_iterate (sd_region, *source, i, s); i++)
1593 VEC_safe_push (sd_region, heap, *target, s);
1595 VEC_free (sd_region, heap, *source);
1598 /* Return true when it is not possible to represent the upper bound of
1599 LOOP in the polyhedral representation. */
1602 graphite_cannot_represent_loop_niter (loop_p loop)
1604 tree niter = number_of_latch_executions (loop);
1606 return chrec_contains_undetermined (niter)
1607 || !scev_is_linear_expression (niter);
1609 /* Store information needed by scopdet_* functions. */
1613 /* Where the last open scop would stop if the current BB is harmful. */
1616 /* Where the next scop would start if the current BB is harmful. */
1619 /* The bb or one of its children contains open loop exits. That means
1620 loop exit nodes that are not surrounded by a loop dominated by bb. */
1623 /* The bb or one of its children contains only structures we can handle. */
1628 static struct scopdet_info build_scops_1 (basic_block, VEC (sd_region, heap) **,
1631 /* Calculates BB infos. If bb is difficult we add valid SCoPs dominated by BB
1632 to SCOPS. TYPE is the gbb_type of BB. */
1634 static struct scopdet_info
1635 scopdet_basic_block_info (basic_block bb, VEC (sd_region, heap) **scops,
1638 struct loop *loop = bb->loop_father;
1639 struct scopdet_info result;
1642 /* XXX: ENTRY_BLOCK_PTR could be optimized in later steps. */
1643 stmt = harmful_stmt_in_bb (ENTRY_BLOCK_PTR, bb);
1644 result.difficult = (stmt != NULL);
1651 result.exits = false;
1654 /* Mark bbs terminating a SESE region difficult, if they start
1656 if (VEC_length (edge, bb->succs) > 1)
1657 result.difficult = true;
1662 result.next = single_succ (bb);
1663 result.exits = false;
1667 case GBB_LOOP_SING_EXIT_HEADER:
1669 VEC (sd_region, heap) *tmp_scops = VEC_alloc (sd_region, heap,3);
1670 struct scopdet_info sinfo;
1672 sinfo = build_scops_1 (bb, &tmp_scops, loop);
1674 result.last = single_exit (bb->loop_father)->src;
1675 result.next = single_exit (bb->loop_father)->dest;
1677 /* If we do not dominate result.next, remove it. It's either
1678 the EXIT_BLOCK_PTR, or another bb dominates it and will
1679 call the scop detection for this bb. */
1680 if (!dominated_by_p (CDI_DOMINATORS, result.next, bb))
1683 if (result.last->loop_father != loop)
1686 if (graphite_cannot_represent_loop_niter (loop))
1687 result.difficult = true;
1689 if (sinfo.difficult)
1690 move_sd_regions (&tmp_scops, scops);
1692 VEC_free (sd_region, heap, tmp_scops);
1694 result.exits = false;
1695 result.difficult |= sinfo.difficult;
1699 case GBB_LOOP_MULT_EXIT_HEADER:
1701 /* XXX: For now we just do not join loops with multiple exits. If the
1702 exits lead to the same bb it may be possible to join the loop. */
1703 VEC (sd_region, heap) *tmp_scops = VEC_alloc (sd_region, heap, 3);
1704 VEC (edge, heap) *exits = get_loop_exit_edges (loop);
1707 build_scops_1 (bb, &tmp_scops, loop);
1709 /* Scan the code dominated by this loop. This means all bbs, that are
1710 are dominated by a bb in this loop, but are not part of this loop.
1713 - The loop exit destination is dominated by the exit sources.
1715 TODO: We miss here the more complex cases:
1716 - The exit destinations are dominated by another bb inside the
1718 - The loop dominates bbs, that are not exit destinations. */
1719 for (i = 0; VEC_iterate (edge, exits, i, e); i++)
1720 if (e->src->loop_father == loop
1721 && dominated_by_p (CDI_DOMINATORS, e->dest, e->src))
1723 /* Pass loop_outer to recognize e->dest as loop header in
1725 if (e->dest->loop_father->header == e->dest)
1726 build_scops_1 (e->dest, &tmp_scops,
1727 loop_outer (e->dest->loop_father));
1729 build_scops_1 (e->dest, &tmp_scops, e->dest->loop_father);
1734 result.difficult = true;
1735 result.exits = false;
1736 move_sd_regions (&tmp_scops, scops);
1737 VEC_free (edge, heap, exits);
1740 case GBB_COND_HEADER:
1742 VEC (sd_region, heap) *tmp_scops = VEC_alloc (sd_region, heap, 3);
1743 struct scopdet_info sinfo;
1744 VEC (basic_block, heap) *dominated;
1747 basic_block last_bb = NULL;
1749 result.exits = false;
1751 /* First check the successors of BB, and check if it is possible to join
1752 the different branches. */
1753 for (i = 0; VEC_iterate (edge, bb->succs, i, e); i++)
1755 /* Ignore loop exits. They will be handled after the loop body. */
1756 if (is_loop_exit (loop, e->dest))
1758 result.exits = true;
1762 /* Do not follow edges that lead to the end of the
1763 conditions block. For example, in
1773 the edge from 0 => 6. Only check if all paths lead to
1776 if (!single_pred_p (e->dest))
1778 /* Check, if edge leads directly to the end of this
1785 if (e->dest != last_bb)
1786 result.difficult = true;
1791 if (!dominated_by_p (CDI_DOMINATORS, e->dest, bb))
1793 result.difficult = true;
1797 sinfo = build_scops_1 (e->dest, &tmp_scops, loop);
1799 result.exits |= sinfo.exits;
1800 result.last = sinfo.last;
1801 result.difficult |= sinfo.difficult;
1803 /* Checks, if all branches end at the same point.
1804 If that is true, the condition stays joinable.
1805 Have a look at the example above. */
1806 if (sinfo.last && single_succ_p (sinfo.last))
1808 basic_block next_tmp = single_succ (sinfo.last);
1813 if (next_tmp != last_bb)
1814 result.difficult = true;
1817 result.difficult = true;
1820 /* If the condition is joinable. */
1821 if (!result.exits && !result.difficult)
1823 /* Only return a next pointer if we dominate this pointer.
1824 Otherwise it will be handled by the bb dominating it. */
1825 if (dominated_by_p (CDI_DOMINATORS, last_bb, bb) && last_bb != bb)
1826 result.next = last_bb;
1830 VEC_free (sd_region, heap, tmp_scops);
1834 /* Scan remaining bbs dominated by BB. */
1835 dominated = get_dominated_by (CDI_DOMINATORS, bb);
1837 for (i = 0; VEC_iterate (basic_block, dominated, i, dom_bb); i++)
1839 /* Ignore loop exits: they will be handled after the loop body. */
1840 if (loop_depth (find_common_loop (loop, dom_bb->loop_father))
1841 < loop_depth (loop))
1843 result.exits = true;
1847 /* Ignore the bbs processed above. */
1848 if (single_pred_p (dom_bb) && single_pred (dom_bb) == bb)
1851 if (loop_depth (loop) > loop_depth (dom_bb->loop_father))
1852 sinfo = build_scops_1 (dom_bb, &tmp_scops, loop_outer (loop));
1854 sinfo = build_scops_1 (dom_bb, &tmp_scops, loop);
1857 result.exits |= sinfo.exits;
1858 result.difficult = true;
1862 VEC_free (basic_block, heap, dominated);
1865 move_sd_regions (&tmp_scops, scops);
1877 /* Creates the SCoPs and writes entry and exit points for every SCoP. */
1879 static struct scopdet_info
1880 build_scops_1 (basic_block current, VEC (sd_region, heap) **scops, loop_p loop)
1882 bool in_scop = false;
1883 sd_region open_scop;
1884 struct scopdet_info sinfo;
1886 /* Initialize result. */
1887 struct scopdet_info result;
1888 result.exits = false;
1889 result.difficult = false;
1892 open_scop.entry = NULL;
1893 open_scop.exit = NULL;
1896 /* Loop over the dominance tree. If we meet a difficult bb, close
1897 the current SCoP. Loop and condition header start a new layer,
1898 and can only be added if all bbs in deeper layers are simple. */
1899 while (current != NULL)
1901 sinfo = scopdet_basic_block_info (current, scops, get_bb_type (current,
1904 if (!in_scop && !(sinfo.exits || sinfo.difficult))
1906 open_scop.entry = current;
1907 open_scop.exit = NULL;
1910 else if (in_scop && (sinfo.exits || sinfo.difficult))
1912 open_scop.exit = current;
1913 VEC_safe_push (sd_region, heap, *scops, &open_scop);
1917 result.difficult |= sinfo.difficult;
1918 result.exits |= sinfo.exits;
1920 current = sinfo.next;
1923 /* Try to close open_scop, if we are still in an open SCoP. */
1929 for (i = 0; VEC_iterate (edge, sinfo.last->succs, i, e); i++)
1930 if (dominated_by_p (CDI_POST_DOMINATORS, sinfo.last, e->dest))
1931 open_scop.exit = e->dest;
1933 if (!open_scop.exit && open_scop.entry != sinfo.last)
1934 open_scop.exit = sinfo.last;
1937 VEC_safe_push (sd_region, heap, *scops, &open_scop);
1941 result.last = sinfo.last;
1945 /* Checks if a bb is contained in REGION. */
1948 bb_in_sd_region (basic_block bb, sd_region *region)
1950 return dominated_by_p (CDI_DOMINATORS, bb, region->entry)
1951 && !(dominated_by_p (CDI_DOMINATORS, bb, region->exit)
1952 && !dominated_by_p (CDI_DOMINATORS, region->entry,
1956 /* Returns the single entry edge of REGION, if it does not exits NULL. */
1959 find_single_entry_edge (sd_region *region)
1965 FOR_EACH_EDGE (e, ei, region->entry->preds)
1966 if (!bb_in_sd_region (e->src, region))
1981 /* Returns the single exit edge of REGION, if it does not exits NULL. */
1984 find_single_exit_edge (sd_region *region)
1990 FOR_EACH_EDGE (e, ei, region->exit->preds)
1991 if (bb_in_sd_region (e->src, region))
2006 /* Create a single entry edge for REGION. */
2009 create_single_entry_edge (sd_region *region)
2011 if (find_single_entry_edge (region))
2014 /* There are multiple predecessors for bb_3
2027 There are two edges (1->3, 2->3), that point from outside into the region,
2028 and another one (5->3), a loop latch, lead to bb_3.
2036 | |\ (3.0 -> 3.1) = single entry edge
2045 If the loop is part of the SCoP, we have to redirect the loop latches.
2051 | | (3.0 -> 3.1) = entry edge
2060 if (region->entry->loop_father->header != region->entry
2061 || dominated_by_p (CDI_DOMINATORS,
2062 loop_latch_edge (region->entry->loop_father)->src,
2065 edge forwarder = split_block_after_labels (region->entry);
2066 region->entry = forwarder->dest;
2069 /* This case is never executed, as the loop headers seem always to have a
2070 single edge pointing from outside into the loop. */
2073 #ifdef ENABLE_CHECKING
2074 gcc_assert (find_single_entry_edge (region));
2078 /* Check if the sd_region, mentioned in EDGE, has no exit bb. */
2081 sd_region_without_exit (edge e)
2083 sd_region *r = (sd_region *) e->aux;
2086 return r->exit == NULL;
2091 /* Create a single exit edge for REGION. */
2094 create_single_exit_edge (sd_region *region)
2098 edge forwarder = NULL;
2101 if (find_single_exit_edge (region))
2104 /* We create a forwarder bb (5) for all edges leaving this region
2105 (3->5, 4->5). All other edges leading to the same bb, are moved
2106 to a new bb (6). If these edges where part of another region (2->5)
2107 we update the region->exit pointer, of this region.
2109 To identify which edge belongs to which region we depend on the e->aux
2110 pointer in every edge. It points to the region of the edge or to NULL,
2111 if the edge is not part of any region.
2113 1 2 3 4 1->5 no region, 2->5 region->exit = 5,
2114 \| |/ 3->5 region->exit = NULL, 4->5 region->exit = NULL
2119 1 2 3 4 1->6 no region, 2->6 region->exit = 6,
2120 | | \/ 3->5 no region, 4->5 no region,
2122 \| / 5->6 region->exit = 6
2125 Now there is only a single exit edge (5->6). */
2126 exit = region->exit;
2127 region->exit = NULL;
2128 forwarder = make_forwarder_block (exit, &sd_region_without_exit, NULL);
2130 /* Unmark the edges, that are no longer exit edges. */
2131 FOR_EACH_EDGE (e, ei, forwarder->src->preds)
2135 /* Mark the new exit edge. */
2136 single_succ_edge (forwarder->src)->aux = region;
2138 /* Update the exit bb of all regions, where exit edges lead to
2140 FOR_EACH_EDGE (e, ei, forwarder->dest->preds)
2142 ((sd_region *) e->aux)->exit = forwarder->dest;
2144 #ifdef ENABLE_CHECKING
2145 gcc_assert (find_single_exit_edge (region));
2149 /* Unmark the exit edges of all REGIONS.
2150 See comment in "create_single_exit_edge". */
2153 unmark_exit_edges (VEC (sd_region, heap) *regions)
2160 for (i = 0; VEC_iterate (sd_region, regions, i, s); i++)
2161 FOR_EACH_EDGE (e, ei, s->exit->preds)
2166 /* Mark the exit edges of all REGIONS.
2167 See comment in "create_single_exit_edge". */
2170 mark_exit_edges (VEC (sd_region, heap) *regions)
2177 for (i = 0; VEC_iterate (sd_region, regions, i, s); i++)
2178 FOR_EACH_EDGE (e, ei, s->exit->preds)
2179 if (bb_in_sd_region (e->src, s))
2183 /* Free and compute again all the dominators information. */
2186 recompute_all_dominators (void)
2188 mark_irreducible_loops ();
2189 free_dominance_info (CDI_DOMINATORS);
2190 free_dominance_info (CDI_POST_DOMINATORS);
2191 calculate_dominance_info (CDI_DOMINATORS);
2192 calculate_dominance_info (CDI_POST_DOMINATORS);
2195 /* Verifies properties that GRAPHITE should maintain during translation. */
2198 graphite_verify (void)
2200 #ifdef ENABLE_CHECKING
2201 verify_loop_structure ();
2202 verify_dominators (CDI_DOMINATORS);
2203 verify_dominators (CDI_POST_DOMINATORS);
2205 verify_loop_closed_ssa ();
2209 /* Create for all scop regions a single entry and a single exit edge. */
2212 create_sese_edges (VEC (sd_region, heap) *regions)
2217 for (i = 0; VEC_iterate (sd_region, regions, i, s); i++)
2218 create_single_entry_edge (s);
2220 mark_exit_edges (regions);
2222 for (i = 0; VEC_iterate (sd_region, regions, i, s); i++)
2223 create_single_exit_edge (s);
2225 unmark_exit_edges (regions);
2227 fix_loop_structure (NULL);
2229 #ifdef ENABLE_CHECKING
2230 verify_loop_structure ();
2231 verify_dominators (CDI_DOMINATORS);
2236 /* Create graphite SCoPs from an array of scop detection regions. */
2239 build_graphite_scops (VEC (sd_region, heap) *scop_regions)
2244 for (i = 0; VEC_iterate (sd_region, scop_regions, i, s); i++)
2246 edge entry = find_single_entry_edge (s);
2247 edge exit = find_single_exit_edge (s);
2248 scop_p scop = new_scop (entry, exit);
2249 VEC_safe_push (scop_p, heap, current_scops, scop);
2251 /* Are there overlapping SCoPs? */
2252 #ifdef ENABLE_CHECKING
2257 for (j = 0; VEC_iterate (sd_region, scop_regions, j, s2); j++)
2259 gcc_assert (!bb_in_sd_region (s->entry, s2));
2265 /* Find static control parts. */
2270 struct loop *loop = current_loops->tree_root;
2271 VEC (sd_region, heap) *tmp_scops = VEC_alloc (sd_region, heap, 3);
2273 build_scops_1 (single_succ (ENTRY_BLOCK_PTR), &tmp_scops, loop);
2274 create_sese_edges (tmp_scops);
2275 build_graphite_scops (tmp_scops);
2276 VEC_free (sd_region, heap, tmp_scops);
2279 /* Gather the basic blocks belonging to the SCOP. */
2282 build_scop_bbs (scop_p scop)
2284 basic_block *stack = XNEWVEC (basic_block, n_basic_blocks + 1);
2285 sbitmap visited = sbitmap_alloc (last_basic_block);
2288 sbitmap_zero (visited);
2289 stack[sp++] = SCOP_ENTRY (scop);
2293 basic_block bb = stack[--sp];
2294 int depth = loop_depth (bb->loop_father);
2295 int num = bb->loop_father->num;
2299 /* Scop's exit is not in the scop. Exclude also bbs, which are
2300 dominated by the SCoP exit. These are e.g. loop latches. */
2301 if (TEST_BIT (visited, bb->index)
2302 || dominated_by_p (CDI_DOMINATORS, bb, SCOP_EXIT (scop))
2303 /* Every block in the scop is dominated by scop's entry. */
2304 || !dominated_by_p (CDI_DOMINATORS, bb, SCOP_ENTRY (scop)))
2307 new_graphite_bb (scop, bb);
2308 SET_BIT (visited, bb->index);
2310 /* First push the blocks that have to be processed last. Note
2311 that this means that the order in which the code is organized
2312 below is important: do not reorder the following code. */
2313 FOR_EACH_EDGE (e, ei, bb->succs)
2314 if (! TEST_BIT (visited, e->dest->index)
2315 && (int) loop_depth (e->dest->loop_father) < depth)
2316 stack[sp++] = e->dest;
2318 FOR_EACH_EDGE (e, ei, bb->succs)
2319 if (! TEST_BIT (visited, e->dest->index)
2320 && (int) loop_depth (e->dest->loop_father) == depth
2321 && e->dest->loop_father->num != num)
2322 stack[sp++] = e->dest;
2324 FOR_EACH_EDGE (e, ei, bb->succs)
2325 if (! TEST_BIT (visited, e->dest->index)
2326 && (int) loop_depth (e->dest->loop_father) == depth
2327 && e->dest->loop_father->num == num
2328 && EDGE_COUNT (e->dest->preds) > 1)
2329 stack[sp++] = e->dest;
2331 FOR_EACH_EDGE (e, ei, bb->succs)
2332 if (! TEST_BIT (visited, e->dest->index)
2333 && (int) loop_depth (e->dest->loop_father) == depth
2334 && e->dest->loop_father->num == num
2335 && EDGE_COUNT (e->dest->preds) == 1)
2336 stack[sp++] = e->dest;
2338 FOR_EACH_EDGE (e, ei, bb->succs)
2339 if (! TEST_BIT (visited, e->dest->index)
2340 && (int) loop_depth (e->dest->loop_father) > depth)
2341 stack[sp++] = e->dest;
2345 sbitmap_free (visited);
2348 /* Returns the number of reduction phi nodes in LOOP. */
2351 nb_reductions_in_loop (loop_p loop)
2354 gimple_stmt_iterator gsi;
2356 for (gsi = gsi_start_phis (loop->header); !gsi_end_p (gsi); gsi_next (&gsi))
2358 gimple phi = gsi_stmt (gsi);
2362 if (!is_gimple_reg (PHI_RESULT (phi)))
2365 scev = analyze_scalar_evolution (loop, PHI_RESULT (phi));
2366 scev = instantiate_parameters (loop, scev);
2367 if (!simple_iv (loop, loop, PHI_RESULT (phi), &iv, true))
2374 /* A LOOP is in normal form when it contains only one scalar phi node
2375 that defines the main induction variable of the loop, only one
2376 increment of the IV, and only one exit condition. */
2379 graphite_loop_normal_form (loop_p loop)
2381 struct tree_niter_desc niter;
2384 edge exit = single_dom_exit (loop);
2385 bool known_niter = number_of_iterations_exit (loop, exit, &niter, false);
2387 gcc_assert (known_niter);
2389 nit = force_gimple_operand (unshare_expr (niter.niter), &stmts, true,
2392 gsi_insert_seq_on_edge_immediate (loop_preheader_edge (loop), stmts);
2394 /* One IV per loop. */
2395 if (nb_reductions_in_loop (loop) > 0)
2398 return canonicalize_loop_ivs (loop, NULL, &nit);
2401 /* Record LOOP as occuring in SCOP. Returns true when the operation
2405 scop_record_loop (scop_p scop, loop_p loop)
2410 if (bitmap_bit_p (SCOP_LOOPS (scop), loop->num))
2413 bitmap_set_bit (SCOP_LOOPS (scop), loop->num);
2414 VEC_safe_push (loop_p, heap, SCOP_LOOP_NEST (scop), loop);
2416 induction_var = graphite_loop_normal_form (loop);
2420 oldiv = XNEW (struct name_tree);
2421 oldiv->t = induction_var;
2422 oldiv->name = get_name (SSA_NAME_VAR (oldiv->t));
2424 VEC_safe_push (name_tree, heap, SCOP_OLDIVS (scop), oldiv);
2428 /* Build the loop nests contained in SCOP. Returns true when the
2429 operation was successful. */
2432 build_scop_loop_nests (scop_p scop)
2436 struct loop *loop0, *loop1;
2439 if (bb_in_sese_p (bb, SCOP_REGION (scop)))
2441 struct loop *loop = bb->loop_father;
2443 /* Only add loops if they are completely contained in the SCoP. */
2444 if (loop->header == bb
2445 && bb_in_sese_p (loop->latch, SCOP_REGION (scop)))
2447 if (!scop_record_loop (scop, loop))
2452 /* Make sure that the loops in the SCOP_LOOP_NEST are ordered. It
2453 can be the case that an inner loop is inserted before an outer
2454 loop. To avoid this, semi-sort once. */
2455 for (i = 0; VEC_iterate (loop_p, SCOP_LOOP_NEST (scop), i, loop0); i++)
2457 if (VEC_length (loop_p, SCOP_LOOP_NEST (scop)) == i + 1)
2460 loop1 = VEC_index (loop_p, SCOP_LOOP_NEST (scop), i + 1);
2461 if (loop0->num > loop1->num)
2463 VEC_replace (loop_p, SCOP_LOOP_NEST (scop), i, loop1);
2464 VEC_replace (loop_p, SCOP_LOOP_NEST (scop), i + 1, loop0);
2471 /* Calculate the number of loops around LOOP in the SCOP. */
2474 nb_loops_around_loop_in_scop (struct loop *l, scop_p scop)
2478 for (; loop_in_sese_p (l, SCOP_REGION (scop)); d++, l = loop_outer (l));
2483 /* Calculate the number of loops around GB in the current SCOP. */
2486 nb_loops_around_gb (graphite_bb_p gb)
2488 return nb_loops_around_loop_in_scop (gbb_loop (gb), GBB_SCOP (gb));
2491 /* Returns the dimensionality of an enclosing loop iteration domain
2492 with respect to enclosing SCoP for a given data reference REF. The
2493 returned dimensionality is homogeneous (depth of loop nest + number
2494 of SCoP parameters + const). */
2497 ref_nb_loops (data_reference_p ref)
2499 loop_p loop = loop_containing_stmt (DR_STMT (ref));
2500 scop_p scop = DR_SCOP (ref);
2502 return nb_loops_around_loop_in_scop (loop, scop) + scop_nb_params (scop) + 2;
2505 /* Build dynamic schedules for all the BBs. */
2508 build_scop_dynamic_schedules (scop_p scop)
2510 int i, dim, loop_num, row, col;
2513 for (i = 0; VEC_iterate (graphite_bb_p, SCOP_BBS (scop), i, gb); i++)
2515 loop_num = GBB_BB (gb)->loop_father->num;
2519 dim = nb_loops_around_gb (gb);
2520 GBB_DYNAMIC_SCHEDULE (gb) = cloog_matrix_alloc (dim, dim);
2522 for (row = 0; row < GBB_DYNAMIC_SCHEDULE (gb)->NbRows; row++)
2523 for (col = 0; col < GBB_DYNAMIC_SCHEDULE (gb)->NbColumns; col++)
2525 value_set_si (GBB_DYNAMIC_SCHEDULE (gb)->p[row][col], 1);
2527 value_set_si (GBB_DYNAMIC_SCHEDULE (gb)->p[row][col], 0);
2530 GBB_DYNAMIC_SCHEDULE (gb) = NULL;
2534 /* Returns the number of loops that are identical at the beginning of
2535 the vectors A and B. */
2538 compare_prefix_loops (VEC (loop_p, heap) *a, VEC (loop_p, heap) *b)
2547 lb = VEC_length (loop_p, b);
2549 for (i = 0; VEC_iterate (loop_p, a, i, ea); i++)
2551 || ea != VEC_index (loop_p, b, i))
2557 /* Build for BB the static schedule.
2559 The STATIC_SCHEDULE is defined like this:
2578 Static schedules for A to F:
2591 build_scop_canonical_schedules (scop_p scop)
2595 int nb_loops = scop_nb_loops (scop);
2596 lambda_vector static_schedule = lambda_vector_new (nb_loops + 1);
2597 VEC (loop_p, heap) *loops_previous = NULL;
2599 /* We have to start schedules at 0 on the first component and
2600 because we cannot compare_prefix_loops against a previous loop,
2601 prefix will be equal to zero, and that index will be
2602 incremented before copying. */
2603 static_schedule[0] = -1;
2605 for (i = 0; VEC_iterate (graphite_bb_p, SCOP_BBS (scop), i, gb); i++)
2607 int prefix = compare_prefix_loops (loops_previous, GBB_LOOPS (gb));
2608 int nb = gbb_nb_loops (gb);
2610 loops_previous = GBB_LOOPS (gb);
2611 memset (&(static_schedule[prefix + 1]), 0, sizeof (int) * (nb_loops - prefix));
2612 ++static_schedule[prefix];
2613 GBB_STATIC_SCHEDULE (gb) = lambda_vector_new (nb + 1);
2614 lambda_vector_copy (static_schedule,
2615 GBB_STATIC_SCHEDULE (gb), nb + 1);
2619 /* Build the LOOPS vector for all bbs in SCOP. */
2622 build_bb_loops (scop_p scop)
2627 for (i = 0; VEC_iterate (graphite_bb_p, SCOP_BBS (scop), i, gb); i++)
2632 depth = nb_loops_around_gb (gb) - 1;
2634 GBB_LOOPS (gb) = VEC_alloc (loop_p, heap, 3);
2635 VEC_safe_grow_cleared (loop_p, heap, GBB_LOOPS (gb), depth + 1);
2637 loop = GBB_BB (gb)->loop_father;
2639 while (scop_contains_loop (scop, loop))
2641 VEC_replace (loop_p, GBB_LOOPS (gb), depth, loop);
2642 loop = loop_outer (loop);
2648 /* Get the index for parameter VAR in SCOP. */
2651 param_index (tree var, scop_p scop)
2657 gcc_assert (TREE_CODE (var) == SSA_NAME);
2659 for (i = 0; VEC_iterate (name_tree, SCOP_PARAMS (scop), i, p); i++)
2663 gcc_assert (SCOP_ADD_PARAMS (scop));
2665 nvar = XNEW (struct name_tree);
2668 VEC_safe_push (name_tree, heap, SCOP_PARAMS (scop), nvar);
2669 return VEC_length (name_tree, SCOP_PARAMS (scop)) - 1;
2672 /* Scan EXPR and translate it to an inequality vector INEQ that will
2673 be added, or subtracted, in the constraint domain matrix C at row
2674 R. K is the number of columns for loop iterators in C. */
2677 scan_tree_for_params (scop_p s, tree e, CloogMatrix *c, int r, Value k,
2680 int cst_col, param_col;
2682 if (e == chrec_dont_know)
2685 switch (TREE_CODE (e))
2687 case POLYNOMIAL_CHREC:
2689 tree left = CHREC_LEFT (e);
2690 tree right = CHREC_RIGHT (e);
2691 int var = CHREC_VARIABLE (e);
2693 if (TREE_CODE (right) != INTEGER_CST)
2698 int loop_col = scop_gimple_loop_depth (s, get_loop (var)) + 1;
2701 value_sub_int (c->p[r][loop_col], c->p[r][loop_col],
2702 int_cst_value (right));
2704 value_add_int (c->p[r][loop_col], c->p[r][loop_col],
2705 int_cst_value (right));
2708 switch (TREE_CODE (left))
2710 case POLYNOMIAL_CHREC:
2711 scan_tree_for_params (s, left, c, r, k, subtract);
2715 /* Constant part. */
2718 int v = int_cst_value (left);
2719 cst_col = c->NbColumns - 1;
2724 subtract = subtract ? false : true;
2728 value_sub_int (c->p[r][cst_col], c->p[r][cst_col], v);
2730 value_add_int (c->p[r][cst_col], c->p[r][cst_col], v);
2735 scan_tree_for_params (s, left, c, r, k, subtract);
2742 if (chrec_contains_symbols (TREE_OPERAND (e, 0)))
2747 gcc_assert (host_integerp (TREE_OPERAND (e, 1), 0));
2749 value_set_si (val, int_cst_value (TREE_OPERAND (e, 1)));
2750 value_multiply (k, k, val);
2753 scan_tree_for_params (s, TREE_OPERAND (e, 0), c, r, k, subtract);
2760 gcc_assert (host_integerp (TREE_OPERAND (e, 0), 0));
2762 value_set_si (val, int_cst_value (TREE_OPERAND (e, 0)));
2763 value_multiply (k, k, val);
2766 scan_tree_for_params (s, TREE_OPERAND (e, 1), c, r, k, subtract);
2771 case POINTER_PLUS_EXPR:
2772 scan_tree_for_params (s, TREE_OPERAND (e, 0), c, r, k, subtract);
2773 scan_tree_for_params (s, TREE_OPERAND (e, 1), c, r, k, subtract);
2777 scan_tree_for_params (s, TREE_OPERAND (e, 0), c, r, k, subtract);
2778 scan_tree_for_params (s, TREE_OPERAND (e, 1), c, r, k, !subtract);
2782 scan_tree_for_params (s, TREE_OPERAND (e, 0), c, r, k, !subtract);
2786 param_col = param_index (e, s);
2790 param_col += c->NbColumns - scop_nb_params (s) - 1;
2793 value_subtract (c->p[r][param_col], c->p[r][param_col], k);
2795 value_addto (c->p[r][param_col], c->p[r][param_col], k);
2802 int v = int_cst_value (e);
2803 cst_col = c->NbColumns - 1;
2808 subtract = subtract ? false : true;
2812 value_sub_int (c->p[r][cst_col], c->p[r][cst_col], v);
2814 value_add_int (c->p[r][cst_col], c->p[r][cst_col], v);
2819 case NON_LVALUE_EXPR:
2820 scan_tree_for_params (s, TREE_OPERAND (e, 0), c, r, k, subtract);
2829 /* Data structure for idx_record_params. */
2837 /* For a data reference with an ARRAY_REF as its BASE, record the
2838 parameters occurring in IDX. DTA is passed in as complementary
2839 information, and is used by the automatic walker function. This
2840 function is a callback for for_each_index. */
2843 idx_record_params (tree base, tree *idx, void *dta)
2845 struct irp_data *data = (struct irp_data *) dta;
2847 if (TREE_CODE (base) != ARRAY_REF)
2850 if (TREE_CODE (*idx) == SSA_NAME)
2853 scop_p scop = data->scop;
2854 struct loop *loop = data->loop;
2857 scev = analyze_scalar_evolution (loop, *idx);
2858 scev = instantiate_scev (block_before_scop (scop), loop, scev);
2861 value_set_si (one, 1);
2862 scan_tree_for_params (scop, scev, NULL, 0, one, false);
2869 /* Find parameters with respect to SCOP in BB. We are looking in memory
2870 access functions, conditions and loop bounds. */
2873 find_params_in_bb (scop_p scop, graphite_bb_p gb)
2876 data_reference_p dr;
2878 loop_p father = GBB_BB (gb)->loop_father;
2880 for (i = 0; VEC_iterate (data_reference_p, GBB_DATA_REFS (gb), i, dr); i++)
2882 struct irp_data irp;
2886 for_each_index (&dr->ref, idx_record_params, &irp);
2889 /* Find parameters in conditional statements. */
2890 for (i = 0; VEC_iterate (gimple, GBB_CONDITIONS (gb), i, stmt); i++)
2893 loop_p loop = father;
2897 lhs = gimple_cond_lhs (stmt);
2898 lhs = analyze_scalar_evolution (loop, lhs);
2899 lhs = instantiate_scev (block_before_scop (scop), loop, lhs);
2901 rhs = gimple_cond_rhs (stmt);
2902 rhs = analyze_scalar_evolution (loop, rhs);
2903 rhs = instantiate_scev (block_before_scop (scop), loop, rhs);
2906 scan_tree_for_params (scop, lhs, NULL, 0, one, false);
2907 value_set_si (one, 1);
2908 scan_tree_for_params (scop, rhs, NULL, 0, one, false);
2913 /* Saves in NV the name of variable P->T. */
2916 save_var_name (char **nv, int i, name_tree p)
2918 const char *name = get_name (SSA_NAME_VAR (p->t));
2922 int len = strlen (name) + 16;
2923 nv[i] = XNEWVEC (char, len);
2924 snprintf (nv[i], len, "%s_%d", name, SSA_NAME_VERSION (p->t));
2928 nv[i] = XNEWVEC (char, 16);
2929 snprintf (nv[i], 2 + 16, "T_%d", SSA_NAME_VERSION (p->t));
2935 /* Return the maximal loop depth in SCOP. */
2938 scop_max_loop_depth (scop_p scop)
2942 int max_nb_loops = 0;
2944 for (i = 0; VEC_iterate (graphite_bb_p, SCOP_BBS (scop), i, gbb); i++)
2946 int nb_loops = gbb_nb_loops (gbb);
2947 if (max_nb_loops < nb_loops)
2948 max_nb_loops = nb_loops;
2951 return max_nb_loops;
2954 /* Initialize Cloog's parameter names from the names used in GIMPLE.
2955 Initialize Cloog's iterator names, using 'graphite_iterator_%d'
2956 from 0 to scop_nb_loops (scop). */
2959 initialize_cloog_names (scop_p scop)
2961 int i, nb_params = VEC_length (name_tree, SCOP_PARAMS (scop));
2962 char **params = XNEWVEC (char *, nb_params);
2963 int nb_iterators = scop_max_loop_depth (scop);
2964 int nb_scattering= cloog_program_nb_scattdims (SCOP_PROG (scop));
2965 char **iterators = XNEWVEC (char *, nb_iterators * 2);
2966 char **scattering = XNEWVEC (char *, nb_scattering);
2969 for (i = 0; VEC_iterate (name_tree, SCOP_PARAMS (scop), i, p); i++)
2970 save_var_name (params, i, p);
2972 cloog_names_set_nb_parameters (cloog_program_names (SCOP_PROG (scop)),
2974 cloog_names_set_parameters (cloog_program_names (SCOP_PROG (scop)),
2977 for (i = 0; i < nb_iterators; i++)
2980 iterators[i] = XNEWVEC (char, len);
2981 snprintf (iterators[i], len, "graphite_iterator_%d", i);
2984 cloog_names_set_nb_iterators (cloog_program_names (SCOP_PROG (scop)),
2986 cloog_names_set_iterators (cloog_program_names (SCOP_PROG (scop)),
2989 for (i = 0; i < nb_scattering; i++)
2992 scattering[i] = XNEWVEC (char, len);
2993 snprintf (scattering[i], len, "s_%d", i);
2996 cloog_names_set_nb_scattering (cloog_program_names (SCOP_PROG (scop)),
2998 cloog_names_set_scattering (cloog_program_names (SCOP_PROG (scop)),
3002 /* Record the parameters used in the SCOP. A variable is a parameter
3003 in a scop if it does not vary during the execution of that scop. */
3006 find_scop_parameters (scop_p scop)
3014 value_set_si (one, 1);
3016 /* Find the parameters used in the loop bounds. */
3017 for (i = 0; VEC_iterate (loop_p, SCOP_LOOP_NEST (scop), i, loop); i++)
3019 tree nb_iters = number_of_latch_executions (loop);
3021 if (!chrec_contains_symbols (nb_iters))
3024 nb_iters = analyze_scalar_evolution (loop, nb_iters);
3025 nb_iters = instantiate_scev (block_before_scop (scop), loop, nb_iters);
3026 scan_tree_for_params (scop, nb_iters, NULL, 0, one, false);
3031 /* Find the parameters used in data accesses. */
3032 for (i = 0; VEC_iterate (graphite_bb_p, SCOP_BBS (scop), i, gb); i++)
3033 find_params_in_bb (scop, gb);
3035 SCOP_ADD_PARAMS (scop) = false;
3038 /* Build the context constraints for SCOP: constraints and relations
3042 build_scop_context (scop_p scop)
3044 int nb_params = scop_nb_params (scop);
3045 CloogMatrix *matrix = cloog_matrix_alloc (1, nb_params + 2);
3047 /* Insert '0 >= 0' in the context matrix, as it is not allowed to be
3050 value_set_si (matrix->p[0][0], 1);
3052 value_set_si (matrix->p[0][nb_params + 1], 0);
3054 cloog_program_set_context (SCOP_PROG (scop),
3055 cloog_domain_matrix2domain (matrix));
3056 cloog_matrix_free (matrix);
3059 /* Returns a graphite_bb from BB. */
3061 static inline graphite_bb_p
3062 gbb_from_bb (basic_block bb)
3064 return (graphite_bb_p) bb->aux;
3067 /* Builds the constraint matrix for LOOP in SCOP. NB_OUTER_LOOPS is the
3068 number of loops surrounding LOOP in SCOP. OUTER_CSTR gives the
3069 constraints matrix for the surrounding loops. */
3072 build_loop_iteration_domains (scop_p scop, struct loop *loop,
3073 CloogMatrix *outer_cstr, int nb_outer_loops)
3079 int nb_rows = outer_cstr->NbRows + 1;
3080 int nb_cols = outer_cstr->NbColumns + 1;
3082 /* Last column of CSTR is the column of constants. */
3083 int cst_col = nb_cols - 1;
3085 /* The column for the current loop is just after the columns of
3086 other outer loops. */
3087 int loop_col = nb_outer_loops + 1;
3089 tree nb_iters = number_of_latch_executions (loop);
3091 /* When the number of iterations is a constant or a parameter, we
3092 add a constraint for the upper bound of the loop. So add a row
3093 to the constraint matrix before allocating it. */
3094 if (TREE_CODE (nb_iters) == INTEGER_CST
3095 || !chrec_contains_undetermined (nb_iters))
3098 cstr = cloog_matrix_alloc (nb_rows, nb_cols);
3100 /* Copy the outer constraints. */
3101 for (i = 0; i < outer_cstr->NbRows; i++)
3103 /* Copy the eq/ineq and loops columns. */
3104 for (j = 0; j < loop_col; j++)
3105 value_assign (cstr->p[i][j], outer_cstr->p[i][j]);
3107 /* Leave an empty column in CSTR for the current loop, and then
3108 copy the parameter columns. */
3109 for (j = loop_col; j < outer_cstr->NbColumns; j++)
3110 value_assign (cstr->p[i][j + 1], outer_cstr->p[i][j]);
3114 row = outer_cstr->NbRows;
3115 value_set_si (cstr->p[row][0], 1);
3116 value_set_si (cstr->p[row][loop_col], 1);
3118 /* loop_i <= nb_iters */
3119 if (TREE_CODE (nb_iters) == INTEGER_CST)
3122 value_set_si (cstr->p[row][0], 1);
3123 value_set_si (cstr->p[row][loop_col], -1);
3125 value_set_si (cstr->p[row][cst_col],
3126 int_cst_value (nb_iters));
3128 else if (!chrec_contains_undetermined (nb_iters))
3130 /* Otherwise nb_iters contains parameters: scan the nb_iters
3131 expression and build its matrix representation. */
3135 value_set_si (cstr->p[row][0], 1);
3136 value_set_si (cstr->p[row][loop_col], -1);
3138 nb_iters = analyze_scalar_evolution (loop, nb_iters);
3139 nb_iters = instantiate_scev (block_before_scop (scop), loop, nb_iters);
3142 value_set_si (one, 1);
3143 scan_tree_for_params (scop, nb_iters, cstr, row, one, false);
3149 if (loop->inner && loop_in_sese_p (loop->inner, SCOP_REGION (scop)))
3150 build_loop_iteration_domains (scop, loop->inner, cstr, nb_outer_loops + 1);
3152 /* Only go to the next loops, if we are not at the outermost layer. These
3153 have to be handled seperately, as we can be sure, that the chain at this
3154 layer will be connected. */
3155 if (nb_outer_loops != 0 && loop->next && loop_in_sese_p (loop->next,
3156 SCOP_REGION (scop)))
3157 build_loop_iteration_domains (scop, loop->next, outer_cstr, nb_outer_loops);
3159 for (i = 0; VEC_iterate (graphite_bb_p, SCOP_BBS (scop), i, gb); i++)
3160 if (gbb_loop (gb) == loop)
3161 GBB_DOMAIN (gb) = cloog_matrix_copy (cstr);
3163 cloog_matrix_free (cstr);
3166 /* Add conditions to the domain of GB. */
3169 add_conditions_to_domain (graphite_bb_p gb)
3173 VEC (gimple, heap) *conditions = GBB_CONDITIONS (gb);
3174 CloogMatrix *domain = GBB_DOMAIN (gb);
3175 scop_p scop = GBB_SCOP (gb);
3179 unsigned nb_new_rows = 0;
3182 if (VEC_empty (gimple, conditions))
3187 nb_rows = domain->NbRows;
3188 nb_cols = domain->NbColumns;
3193 nb_cols = nb_loops_around_gb (gb) + scop_nb_params (scop) + 2;
3196 /* Count number of necessary new rows to add the conditions to the
3198 for (i = 0; VEC_iterate (gimple, conditions, i, stmt); i++)
3200 switch (gimple_code (stmt))
3204 enum tree_code code = gimple_cond_code (stmt);
3210 /* NE and EQ statements are not supported right know. */
3226 /* Switch statements are not supported right know. */
3237 /* Enlarge the matrix. */
3239 CloogMatrix *new_domain;
3240 new_domain = cloog_matrix_alloc (nb_rows + nb_new_rows, nb_cols);
3244 for (i = 0; i < nb_rows; i++)
3245 for (j = 0; j < nb_cols; j++)
3246 value_assign (new_domain->p[i][j], domain->p[i][j]);
3248 cloog_matrix_free (domain);
3251 domain = new_domain;
3252 GBB_DOMAIN (gb) = new_domain;
3255 /* Add the conditions to the new enlarged domain matrix. */
3257 for (i = 0; VEC_iterate (gimple, conditions, i, stmt); i++)
3259 switch (gimple_code (stmt))
3264 enum tree_code code;
3267 loop_p loop = GBB_BB (gb)->loop_father;
3269 left = gimple_cond_lhs (stmt);
3270 right = gimple_cond_rhs (stmt);
3272 left = analyze_scalar_evolution (loop, left);
3273 right = analyze_scalar_evolution (loop, right);
3275 left = instantiate_scev (block_before_scop (scop), loop, left);
3276 right = instantiate_scev (block_before_scop (scop), loop, right);
3278 code = gimple_cond_code (stmt);
3280 /* The conditions for ELSE-branches are inverted. */
3281 if (VEC_index (gimple, gb->condition_cases, i) == NULL)
3282 code = invert_tree_comparison (code, false);
3287 /* NE statements are not supported right know. */
3291 value_set_si (domain->p[row][0], 1);
3293 value_set_si (one, 1);
3294 scan_tree_for_params (scop, left, domain, row, one, true);
3295 value_set_si (one, 1);
3296 scan_tree_for_params (scop, right, domain, row, one, false);
3298 value_set_si (domain->p[row][0], 1);
3299 value_set_si (one, 1);
3300 scan_tree_for_params (scop, left, domain, row, one, false);
3301 value_set_si (one, 1);
3302 scan_tree_for_params (scop, right, domain, row, one, true);
3307 value_set_si (domain->p[row][0], 1);
3309 value_set_si (one, 1);
3310 scan_tree_for_params (scop, left, domain, row, one, true);
3311 value_set_si (one, 1);
3312 scan_tree_for_params (scop, right, domain, row, one, false);
3313 value_sub_int (domain->p[row][nb_cols - 1],
3314 domain->p[row][nb_cols - 1], 1);
3319 value_set_si (domain->p[row][0], 1);
3321 value_set_si (one, 1);
3322 scan_tree_for_params (scop, left, domain, row, one, false);
3323 value_set_si (one, 1);
3324 scan_tree_for_params (scop, right, domain, row, one, true);
3325 value_sub_int (domain->p[row][nb_cols - 1],
3326 domain->p[row][nb_cols - 1], 1);
3331 value_set_si (domain->p[row][0], 1);
3333 value_set_si (one, 1);
3334 scan_tree_for_params (scop, left, domain, row, one, true);
3335 value_set_si (one, 1);
3336 scan_tree_for_params (scop, right, domain, row, one, false);
3341 value_set_si (domain->p[row][0], 1);
3343 value_set_si (one, 1);
3344 scan_tree_for_params (scop, left, domain, row, one, false);
3345 value_set_si (one, 1);
3346 scan_tree_for_params (scop, right, domain, row, one, true);
3357 /* Switch statements are not supported right know. */
3368 /* Returns true when PHI defines an induction variable in the loop
3369 containing the PHI node. */
3372 phi_node_is_iv (gimple phi)
3374 loop_p loop = gimple_bb (phi)->loop_father;
3375 tree scev = analyze_scalar_evolution (loop, gimple_phi_result (phi));
3377 return tree_contains_chrecs (scev, NULL);
3380 /* Returns true when BB contains scalar phi nodes that are not an
3381 induction variable of a loop. */
3384 bb_contains_non_iv_scalar_phi_nodes (basic_block bb)
3387 gimple_stmt_iterator si;
3389 for (si = gsi_start_phis (bb); !gsi_end_p (si); gsi_next (&si))
3390 if (is_gimple_reg (gimple_phi_result (gsi_stmt (si))))
3392 /* Store the unique scalar PHI node: at this point, loops
3393 should be in cannonical form, so we expect to see at most
3394 one scalar phi node in the loop header. */
3396 || bb != bb->loop_father->header)
3399 phi = gsi_stmt (si);
3403 || phi_node_is_iv (phi))
3409 /* Helper recursive function. Record in CONDITIONS and CASES all
3410 conditions from 'if's and 'switch'es occurring in BB from SCOP.
3412 Returns false when the conditions contain scalar computations that
3413 depend on the condition, i.e. when there are scalar phi nodes on
3414 the junction after the condition. Only the computations occurring
3415 on memory can be handled in the polyhedral model: operations that
3416 define scalar evolutions in conditions, that can potentially be
3417 used to index memory, can't be handled by the polyhedral model. */
3420 build_scop_conditions_1 (VEC (gimple, heap) **conditions,
3421 VEC (gimple, heap) **cases, basic_block bb,
3427 basic_block bb_child, bb_iter;
3428 VEC (basic_block, heap) *dom;
3431 /* Make sure we are in the SCoP. */
3432 if (!bb_in_sese_p (bb, SCOP_REGION (scop)))
3435 if (bb_contains_non_iv_scalar_phi_nodes (bb))
3438 gbb = gbb_from_bb (bb);
3441 GBB_CONDITIONS (gbb) = VEC_copy (gimple, heap, *conditions);
3442 GBB_CONDITION_CASES (gbb) = VEC_copy (gimple, heap, *cases);
3445 dom = get_dominated_by (CDI_DOMINATORS, bb);
3447 stmt = last_stmt (bb);
3450 VEC (edge, gc) *edges;
3453 switch (gimple_code (stmt))
3457 for (i = 0; VEC_iterate (edge, edges, i, e); i++)
3458 if ((dominated_by_p (CDI_DOMINATORS, e->dest, bb))
3459 && VEC_length (edge, e->dest->preds) == 1)
3461 /* Remove the scanned block from the dominator successors. */
3462 for (j = 0; VEC_iterate (basic_block, dom, j, bb_iter); j++)
3463 if (bb_iter == e->dest)
3465 VEC_unordered_remove (basic_block, dom, j);
3469 /* Recursively scan the then or else part. */
3470 if (e->flags & EDGE_TRUE_VALUE)
3471 VEC_safe_push (gimple, heap, *cases, stmt);
3474 gcc_assert (e->flags & EDGE_FALSE_VALUE);
3475 VEC_safe_push (gimple, heap, *cases, NULL);
3478 VEC_safe_push (gimple, heap, *conditions, stmt);
3479 if (!build_scop_conditions_1 (conditions, cases, e->dest, scop))
3484 VEC_pop (gimple, *conditions);
3485 VEC_pop (gimple, *cases);
3492 gimple_stmt_iterator gsi_search_gimple_label;
3494 for (i = 0; i < gimple_switch_num_labels (stmt); ++i)
3496 basic_block bb_iter;
3498 size_t n_cases = VEC_length (gimple, *conditions);
3499 unsigned n = gimple_switch_num_labels (stmt);
3501 bb_child = label_to_block
3502 (CASE_LABEL (gimple_switch_label (stmt, i)));
3504 for (k = 0; k < n; k++)
3507 (CASE_LABEL (gimple_switch_label (stmt, k))) == bb_child)
3510 /* Switches with multiple case values for the same
3511 block are not handled. */
3513 /* Switch cases with more than one predecessor are
3515 || VEC_length (edge, bb_child->preds) != 1)
3521 /* Recursively scan the corresponding 'case' block. */
3522 for (gsi_search_gimple_label = gsi_start_bb (bb_child);
3523 !gsi_end_p (gsi_search_gimple_label);
3524 gsi_next (&gsi_search_gimple_label))
3526 gimple label = gsi_stmt (gsi_search_gimple_label);
3528 if (gimple_code (label) == GIMPLE_LABEL)
3530 tree t = gimple_label_label (label);
3532 gcc_assert (t == gimple_switch_label (stmt, i));
3533 VEC_replace (gimple, *cases, n_cases, label);
3538 if (!build_scop_conditions_1 (conditions, cases, bb_child, scop))
3544 /* Remove the scanned block from the dominator successors. */
3545 for (j = 0; VEC_iterate (basic_block, dom, j, bb_iter); j++)
3546 if (bb_iter == bb_child)
3548 VEC_unordered_remove (basic_block, dom, j);
3553 VEC_pop (gimple, *conditions);
3554 VEC_pop (gimple, *cases);
3563 /* Scan all immediate dominated successors. */
3564 for (i = 0; VEC_iterate (basic_block, dom, i, bb_child); i++)
3565 if (!build_scop_conditions_1 (conditions, cases, bb_child, scop))
3572 VEC_free (basic_block, heap, dom);
3576 /* Record all conditions from SCOP.
3578 Returns false when the conditions contain scalar computations that
3579 depend on the condition, i.e. when there are scalar phi nodes on
3580 the junction after the condition. Only the computations occurring
3581 on memory can be handled in the polyhedral model: operations that
3582 define scalar evolutions in conditions, that can potentially be
3583 used to index memory, can't be handled by the polyhedral model. */
3586 build_scop_conditions (scop_p scop)
3589 VEC (gimple, heap) *conditions = NULL;
3590 VEC (gimple, heap) *cases = NULL;
3592 res = build_scop_conditions_1 (&conditions, &cases, SCOP_ENTRY (scop), scop);
3594 VEC_free (gimple, heap, conditions);
3595 VEC_free (gimple, heap, cases);
3599 /* Traverses all the GBBs of the SCOP and add their constraints to the
3600 iteration domains. */
3603 add_conditions_to_constraints (scop_p scop)
3608 for (i = 0; VEC_iterate (graphite_bb_p, SCOP_BBS (scop), i, gbb); i++)
3609 add_conditions_to_domain (gbb);
3612 /* Build the current domain matrix: the loops belonging to the current
3613 SCOP, and that vary for the execution of the current basic block.
3614 Returns false if there is no loop in SCOP. */
3617 build_scop_iteration_domain (scop_p scop)
3620 CloogMatrix *outer_cstr;
3623 /* Build cloog loop for all loops, that are in the uppermost loop layer of
3625 for (i = 0; VEC_iterate (loop_p, SCOP_LOOP_NEST (scop), i, loop); i++)
3626 if (!loop_in_sese_p (loop_outer (loop), SCOP_REGION (scop)))
3628 /* The outermost constraints is a matrix that has:
3629 -first column: eq/ineq boolean
3630 -last column: a constant
3631 -scop_nb_params columns for the parameters used in the scop. */
3632 outer_cstr = cloog_matrix_alloc (0, scop_nb_params (scop) + 2);
3633 build_loop_iteration_domains (scop, loop, outer_cstr, 0);
3634 cloog_matrix_free (outer_cstr);
3640 /* Initializes an equation CY of the access matrix using the
3641 information for a subscript from AF, relatively to the loop
3642 indexes from LOOP_NEST and parameter indexes from PARAMS. NDIM is
3643 the dimension of the array access, i.e. the number of
3644 subscripts. Returns true when the operation succeeds. */
3647 build_access_matrix_with_af (tree af, lambda_vector cy,
3648 scop_p scop, int ndim)
3652 switch (TREE_CODE (af))
3654 case POLYNOMIAL_CHREC:
3656 struct loop *outer_loop;
3657 tree left = CHREC_LEFT (af);
3658 tree right = CHREC_RIGHT (af);
3661 if (TREE_CODE (right) != INTEGER_CST)
3664 outer_loop = get_loop (CHREC_VARIABLE (af));
3665 var = nb_loops_around_loop_in_scop (outer_loop, scop);
3666 cy[var] = int_cst_value (right);
3668 switch (TREE_CODE (left))
3670 case POLYNOMIAL_CHREC:
3671 return build_access_matrix_with_af (left, cy, scop, ndim);
3674 cy[ndim - 1] = int_cst_value (left);
3678 return build_access_matrix_with_af (left, cy, scop, ndim);
3683 build_access_matrix_with_af (TREE_OPERAND (af, 0), cy, scop, ndim);
3684 build_access_matrix_with_af (TREE_OPERAND (af, 1), cy, scop, ndim);
3688 build_access_matrix_with_af (TREE_OPERAND (af, 0), cy, scop, ndim);
3689 build_access_matrix_with_af (TREE_OPERAND (af, 1), cy, scop, ndim);
3693 cy[ndim - 1] = int_cst_value (af);
3697 param_col = param_index (af, scop);
3698 cy [ndim - scop_nb_params (scop) + param_col - 1] = 1;
3702 /* FIXME: access_fn can have parameters. */
3707 /* Initialize the access matrix in the data reference REF with respect
3708 to the loop nesting LOOP_NEST. Return true when the operation
3712 build_access_matrix (data_reference_p ref, graphite_bb_p gb)
3714 int i, ndim = DR_NUM_DIMENSIONS (ref);
3715 struct access_matrix *am = GGC_NEW (struct access_matrix);
3717 AM_MATRIX (am) = VEC_alloc (lambda_vector, gc, ndim);
3718 DR_SCOP (ref) = GBB_SCOP (gb);
3720 for (i = 0; i < ndim; i++)
3722 lambda_vector v = lambda_vector_new (ref_nb_loops (ref));
3723 scop_p scop = GBB_SCOP (gb);
3724 tree af = DR_ACCESS_FN (ref, i);
3726 if (!build_access_matrix_with_af (af, v, scop, ref_nb_loops (ref)))
3729 VEC_quick_push (lambda_vector, AM_MATRIX (am), v);
3732 DR_ACCESS_MATRIX (ref) = am;
3736 /* Build the access matrices for the data references in the SCOP. */
3739 build_scop_data_accesses (scop_p scop)
3744 /* FIXME: Construction of access matrix is disabled until some
3745 pass, like the data dependence analysis, is using it. */
3748 for (i = 0; VEC_iterate (graphite_bb_p, SCOP_BBS (scop), i, gb); i++)
3751 data_reference_p dr;
3753 /* Construct the access matrix for each data ref, with respect to
3754 the loop nest of the current BB in the considered SCOP. */
3756 VEC_iterate (data_reference_p, GBB_DATA_REFS (gb), j, dr);
3759 bool res = build_access_matrix (dr, gb);
3761 /* FIXME: At this point the DRs should always have an affine
3762 form. For the moment this fails as build_access_matrix
3763 does not build matrices with parameters. */
3769 /* Returns the tree variable from the name NAME that was given in
3770 Cloog representation. All the parameters are stored in PARAMS, and
3771 all the loop induction variables are stored in IVSTACK.
3773 FIXME: This is a hack, and Cloog should be fixed to not work with
3774 variable names represented as "char *string", but with void
3775 pointers that could be casted back to a tree. The only problem in
3776 doing that is that Cloog's pretty printer still assumes that
3777 variable names are char *strings. The solution would be to have a
3778 function pointer for pretty-printing that can be redirected to be
3779 print_generic_stmt in our case, or fprintf by default.
3780 ??? Too ugly to live. */
3783 clast_name_to_gcc (const char *name, VEC (name_tree, heap) *params,
3784 loop_iv_stack ivstack)
3791 for (i = 0; VEC_iterate (name_tree, params, i, t); i++)
3792 if (!strcmp (name, t->name))
3795 iv = loop_iv_stack_get_iv_from_name (ivstack, name);
3802 /* Returns the maximal precision type for expressions E1 and E2. */
3805 max_precision_type (tree e1, tree e2)
3807 tree type1 = TREE_TYPE (e1);
3808 tree type2 = TREE_TYPE (e2);
3809 return TYPE_PRECISION (type1) > TYPE_PRECISION (type2) ? type1 : type2;
3813 clast_to_gcc_expression (tree, struct clast_expr *, VEC (name_tree, heap) *,
3816 /* Converts a Cloog reduction expression R with reduction operation OP
3817 to a GCC expression tree of type TYPE. PARAMS is a vector of
3818 parameters of the scop, and IVSTACK contains the stack of induction
3822 clast_to_gcc_expression_red (tree type, enum tree_code op,
3823 struct clast_reduction *r,
3824 VEC (name_tree, heap) *params,
3825 loop_iv_stack ivstack)
3828 tree res = clast_to_gcc_expression (type, r->elts[0], params, ivstack);
3830 for (i = 1; i < r->n; i++)
3832 tree t = clast_to_gcc_expression (type, r->elts[i], params, ivstack);
3833 res = fold_build2 (op, type, res, t);
3838 /* Converts a Cloog AST expression E back to a GCC expression tree of
3839 type TYPE. PARAMS is a vector of parameters of the scop, and
3840 IVSTACK contains the stack of induction variables. */
3843 clast_to_gcc_expression (tree type, struct clast_expr *e,
3844 VEC (name_tree, heap) *params,
3845 loop_iv_stack ivstack)
3851 struct clast_term *t = (struct clast_term *) e;
3855 if (value_one_p (t->val))
3857 tree name = clast_name_to_gcc (t->var, params, ivstack);
3858 return fold_convert (type, name);
3861 else if (value_mone_p (t->val))
3863 tree name = clast_name_to_gcc (t->var, params, ivstack);
3864 name = fold_convert (type, name);
3865 return fold_build1 (NEGATE_EXPR, type, name);
3869 tree name = clast_name_to_gcc (t->var, params, ivstack);
3870 tree cst = gmp_cst_to_tree (type, t->val);
3871 name = fold_convert (type, name);
3872 return fold_build2 (MULT_EXPR, type, cst, name);
3876 return gmp_cst_to_tree (type, t->val);
3881 struct clast_reduction *r = (struct clast_reduction *) e;
3886 return clast_to_gcc_expression_red (type, PLUS_EXPR, r, params, ivstack);
3889 return clast_to_gcc_expression_red (type, MIN_EXPR, r, params, ivstack);
3892 return clast_to_gcc_expression_red (type, MAX_EXPR, r, params, ivstack);
3902 struct clast_binary *b = (struct clast_binary *) e;
3903 struct clast_expr *lhs = (struct clast_expr *) b->LHS;
3904 tree tl = clast_to_gcc_expression (type, lhs, params, ivstack);
3905 tree tr = gmp_cst_to_tree (type, b->RHS);
3909 case clast_bin_fdiv:
3910 return fold_build2 (FLOOR_DIV_EXPR, type, tl, tr);
3912 case clast_bin_cdiv:
3913 return fold_build2 (CEIL_DIV_EXPR, type, tl, tr);
3916 return fold_build2 (EXACT_DIV_EXPR, type, tl, tr);
3919 return fold_build2 (TRUNC_MOD_EXPR, type, tl, tr);
3933 /* Returns the type for the expression E. */
3936 gcc_type_for_clast_expr (struct clast_expr *e,
3937 VEC (name_tree, heap) *params,
3938 loop_iv_stack ivstack)
3944 struct clast_term *t = (struct clast_term *) e;
3947 return TREE_TYPE (clast_name_to_gcc (t->var, params, ivstack));
3954 struct clast_reduction *r = (struct clast_reduction *) e;
3957 return gcc_type_for_clast_expr (r->elts[0], params, ivstack);
3961 for (i = 0; i < r->n; i++)
3963 tree type = gcc_type_for_clast_expr (r->elts[i], params, ivstack);
3973 struct clast_binary *b = (struct clast_binary *) e;
3974 struct clast_expr *lhs = (struct clast_expr *) b->LHS;
3975 return gcc_type_for_clast_expr (lhs, params, ivstack);
3985 /* Returns the type for the equation CLEQ. */
3988 gcc_type_for_clast_eq (struct clast_equation *cleq,
3989 VEC (name_tree, heap) *params,
3990 loop_iv_stack ivstack)
3992 tree type = gcc_type_for_clast_expr (cleq->LHS, params, ivstack);
3996 return gcc_type_for_clast_expr (cleq->RHS, params, ivstack);
3999 /* Translates a clast equation CLEQ to a tree. */
4002 graphite_translate_clast_equation (scop_p scop,
4003 struct clast_equation *cleq,
4004 loop_iv_stack ivstack)
4006 enum tree_code comp;
4007 tree type = gcc_type_for_clast_eq (cleq, SCOP_PARAMS (scop), ivstack);
4008 tree lhs = clast_to_gcc_expression (type, cleq->LHS, SCOP_PARAMS (scop), ivstack);
4009 tree rhs = clast_to_gcc_expression (type, cleq->RHS, SCOP_PARAMS (scop), ivstack);
4011 if (cleq->sign == 0)
4014 else if (cleq->sign > 0)
4020 return fold_build2 (comp, type, lhs, rhs);
4023 /* Creates the test for the condition in STMT. */
4026 graphite_create_guard_cond_expr (scop_p scop, struct clast_guard *stmt,
4027 loop_iv_stack ivstack)
4032 for (i = 0; i < stmt->n; i++)
4034 tree eq = graphite_translate_clast_equation (scop, &stmt->eq[i], ivstack);
4037 cond = fold_build2 (TRUTH_AND_EXPR, TREE_TYPE (eq), cond, eq);
4045 /* Creates a new if region corresponding to Cloog's guard. */
4048 graphite_create_new_guard (scop_p scop, edge entry_edge,
4049 struct clast_guard *stmt,
4050 loop_iv_stack ivstack)
4052 tree cond_expr = graphite_create_guard_cond_expr (scop, stmt, ivstack);
4053 edge exit_edge = create_empty_if_region_on_edge (entry_edge, cond_expr);
4057 /* Walks a CLAST and returns the first statement in the body of a
4060 static struct clast_user_stmt *
4061 clast_get_body_of_loop (struct clast_stmt *stmt)
4064 || CLAST_STMT_IS_A (stmt, stmt_user))
4065 return (struct clast_user_stmt *) stmt;
4067 if (CLAST_STMT_IS_A (stmt, stmt_for))
4068 return clast_get_body_of_loop (((struct clast_for *) stmt)->body);
4070 if (CLAST_STMT_IS_A (stmt, stmt_guard))
4071 return clast_get_body_of_loop (((struct clast_guard *) stmt)->then);
4073 if (CLAST_STMT_IS_A (stmt, stmt_block))
4074 return clast_get_body_of_loop (((struct clast_block *) stmt)->body);
4079 /* Returns the induction variable for the loop that gets translated to
4083 gcc_type_for_iv_of_clast_loop (struct clast_for *stmt_for)
4085 struct clast_user_stmt *stmt = clast_get_body_of_loop ((struct clast_stmt *) stmt_for);
4086 const char *cloog_iv = stmt_for->iterator;
4087 CloogStatement *cs = stmt->statement;
4088 graphite_bb_p gbb = (graphite_bb_p) cloog_statement_usr (cs);
4090 return gcc_type_for_cloog_iv (cloog_iv, gbb);
4093 /* Creates a new LOOP corresponding to Cloog's STMT. Inserts an induction
4094 variable for the new LOOP. New LOOP is attached to CFG starting at
4095 ENTRY_EDGE. LOOP is inserted into the loop tree and becomes the child
4096 loop of the OUTER_LOOP. */
4098 static struct loop *
4099 graphite_create_new_loop (scop_p scop, edge entry_edge,
4100 struct clast_for *stmt, loop_iv_stack ivstack,
4103 tree type = gcc_type_for_iv_of_clast_loop (stmt);
4104 VEC (name_tree, heap) *params = SCOP_PARAMS (scop);
4105 tree lb = clast_to_gcc_expression (type, stmt->LB, params, ivstack);
4106 tree ub = clast_to_gcc_expression (type, stmt->UB, params, ivstack);
4107 tree stride = gmp_cst_to_tree (type, stmt->stride);
4108 tree ivvar = create_tmp_var (type, "graphiteIV");
4110 loop_p loop = create_empty_loop_on_edge
4111 (entry_edge, lb, stride, ub, ivvar, &iv_before,
4112 outer ? outer : entry_edge->src->loop_father);
4114 add_referenced_var (ivvar);
4115 loop_iv_stack_push_iv (ivstack, iv_before, stmt->iterator);
4119 /* Rename the SSA_NAMEs used in STMT and that appear in IVSTACK. */
4122 rename_variables_in_stmt (gimple stmt, htab_t map)
4125 use_operand_p use_p;
4127 FOR_EACH_SSA_USE_OPERAND (use_p, stmt, iter, SSA_OP_USE)
4129 tree use = USE_FROM_PTR (use_p);
4130 tree new_name = get_new_name_from_old_name (map, use);
4132 replace_exp (use_p, new_name);
4138 /* Returns true if SSA_NAME is a parameter of SCOP. */
4141 is_parameter (scop_p scop, tree ssa_name)
4144 VEC (name_tree, heap) *params = SCOP_PARAMS (scop);
4147 for (i = 0; VEC_iterate (name_tree, params, i, param); i++)
4148 if (param->t == ssa_name)
4154 /* Returns true if NAME is an induction variable. */
4159 return gimple_code (SSA_NAME_DEF_STMT (name)) == GIMPLE_PHI;
4162 static void expand_scalar_variables_stmt (gimple, basic_block, scop_p,
4165 expand_scalar_variables_expr (tree, tree, enum tree_code, tree, basic_block,
4166 scop_p, htab_t, gimple_stmt_iterator *);
4168 /* Copies at GSI all the scalar computations on which the ssa_name OP0
4169 depends on in the SCOP: these are all the scalar variables used in
4170 the definition of OP0, that are defined outside BB and still in the
4171 SCOP, i.e. not a parameter of the SCOP. The expression that is
4172 returned contains only induction variables from the generated code:
4173 MAP contains the induction variables renaming mapping, and is used
4174 to translate the names of induction variables. */
4177 expand_scalar_variables_ssa_name (tree op0, basic_block bb,
4178 scop_p scop, htab_t map,
4179 gimple_stmt_iterator *gsi)
4181 tree var0, var1, type;
4183 enum tree_code subcode;
4185 if (is_parameter (scop, op0)
4187 return get_new_name_from_old_name (map, op0);
4189 def_stmt = SSA_NAME_DEF_STMT (op0);
4191 if (gimple_bb (def_stmt) == bb)
4193 /* If the defining statement is in the basic block already
4194 we do not need to create a new expression for it, we
4195 only need to ensure its operands are expanded. */
4196 expand_scalar_variables_stmt (def_stmt, bb, scop, map);
4197 return get_new_name_from_old_name (map, op0);
4201 if (gimple_code (def_stmt) != GIMPLE_ASSIGN
4202 || !bb_in_sese_p (gimple_bb (def_stmt), SCOP_REGION (scop)))
4203 return get_new_name_from_old_name (map, op0);
4205 var0 = gimple_assign_rhs1 (def_stmt);
4206 subcode = gimple_assign_rhs_code (def_stmt);
4207 var1 = gimple_assign_rhs2 (def_stmt);
4208 type = gimple_expr_type (def_stmt);
4210 return expand_scalar_variables_expr (type, var0, subcode, var1, bb, scop,
4215 /* Copies at GSI all the scalar computations on which the expression
4216 OP0 CODE OP1 depends on in the SCOP: these are all the scalar
4217 variables used in OP0 and OP1, defined outside BB and still defined
4218 in the SCOP, i.e. not a parameter of the SCOP. The expression that
4219 is returned contains only induction variables from the generated
4220 code: MAP contains the induction variables renaming mapping, and is
4221 used to translate the names of induction variables. */
4224 expand_scalar_variables_expr (tree type, tree op0, enum tree_code code,
4225 tree op1, basic_block bb, scop_p scop,
4226 htab_t map, gimple_stmt_iterator *gsi)
4228 if (TREE_CODE_CLASS (code) == tcc_constant
4229 || TREE_CODE_CLASS (code) == tcc_declaration)
4232 /* For data references we have to duplicate also its memory
4234 if (TREE_CODE_CLASS (code) == tcc_reference)
4240 tree old_name = TREE_OPERAND (op0, 0);
4241 tree expr = expand_scalar_variables_ssa_name
4242 (old_name, bb, scop, map, gsi);
4243 tree new_name = force_gimple_operand_gsi (gsi, expr, true, NULL,
4244 true, GSI_SAME_STMT);
4246 set_symbol_mem_tag (SSA_NAME_VAR (new_name),
4247 symbol_mem_tag (SSA_NAME_VAR (old_name)));
4248 return fold_build1 (code, type, new_name);
4253 tree op00 = TREE_OPERAND (op0, 0);
4254 tree op01 = TREE_OPERAND (op0, 1);
4255 tree op02 = TREE_OPERAND (op0, 2);
4256 tree op03 = TREE_OPERAND (op0, 3);
4257 tree base = expand_scalar_variables_expr
4258 (TREE_TYPE (op00), op00, TREE_CODE (op00), NULL, bb, scop,
4260 tree subscript = expand_scalar_variables_expr
4261 (TREE_TYPE (op01), op01, TREE_CODE (op01), NULL, bb, scop,
4264 return build4 (ARRAY_REF, type, base, subscript, op02, op03);
4268 /* The above cases should catch everything. */
4273 if (TREE_CODE_CLASS (code) == tcc_unary)
4275 tree op0_type = TREE_TYPE (op0);
4276 enum tree_code op0_code = TREE_CODE (op0);
4277 tree op0_expr = expand_scalar_variables_expr (op0_type, op0, op0_code,
4278 NULL, bb, scop, map, gsi);
4280 return fold_build1 (code, type, op0_expr);
4283 if (TREE_CODE_CLASS (code) == tcc_binary)
4285 tree op0_type = TREE_TYPE (op0);
4286 enum tree_code op0_code = TREE_CODE (op0);
4287 tree op0_expr = expand_scalar_variables_expr (op0_type, op0, op0_code,
4288 NULL, bb, scop, map, gsi);
4289 tree op1_type = TREE_TYPE (op1);
4290 enum tree_code op1_code = TREE_CODE (op1);
4291 tree op1_expr = expand_scalar_variables_expr (op1_type, op1, op1_code,
4292 NULL, bb, scop, map, gsi);
4294 return fold_build2 (code, type, op0_expr, op1_expr);
4297 if (code == SSA_NAME)
4298 return expand_scalar_variables_ssa_name (op0, bb, scop, map, gsi);
4304 /* Copies at the beginning of BB all the scalar computations on which
4305 STMT depends on in the SCOP: these are all the scalar variables used
4306 in STMT, defined outside BB and still defined in the SCOP, i.e. not a
4307 parameter of the SCOP. The expression that is returned contains
4308 only induction variables from the generated code: MAP contains the
4309 induction variables renaming mapping, and is used to translate the
4310 names of induction variables. */
4313 expand_scalar_variables_stmt (gimple stmt, basic_block bb, scop_p scop,
4317 use_operand_p use_p;
4318 gimple_stmt_iterator gsi = gsi_after_labels (bb);
4320 FOR_EACH_SSA_USE_OPERAND (use_p, stmt, iter, SSA_OP_USE)
4322 tree use = USE_FROM_PTR (use_p);
4323 tree type = TREE_TYPE (use);
4324 enum tree_code code = TREE_CODE (use);
4325 tree use_expr = expand_scalar_variables_expr (type, use, code, NULL, bb,
4327 if (use_expr != use)
4330 force_gimple_operand_gsi (&gsi, use_expr, true, NULL,
4331 true, GSI_NEW_STMT);
4332 replace_exp (use_p, new_use);
4339 /* Copies at the beginning of BB all the scalar computations on which
4340 BB depends on in the SCOP: these are all the scalar variables used
4341 in BB, defined outside BB and still defined in the SCOP, i.e. not a
4342 parameter of the SCOP. The expression that is returned contains
4343 only induction variables from the generated code: MAP contains the
4344 induction variables renaming mapping, and is used to translate the
4345 names of induction variables. */
4348 expand_scalar_variables (basic_block bb, scop_p scop, htab_t map)
4350 gimple_stmt_iterator gsi;
4352 for (gsi = gsi_after_labels (bb); !gsi_end_p (gsi);)
4354 gimple stmt = gsi_stmt (gsi);
4355 expand_scalar_variables_stmt (stmt, bb, scop, map);
4360 /* Rename all the SSA_NAMEs from block BB according to the MAP. */
4363 rename_variables (basic_block bb, htab_t map)
4365 gimple_stmt_iterator gsi;
4367 for (gsi = gsi_after_labels (bb); !gsi_end_p (gsi); gsi_next (&gsi))
4368 rename_variables_in_stmt (gsi_stmt (gsi), map);
4371 /* Remove condition from BB. */
4374 remove_condition (basic_block bb)
4376 gimple last = last_stmt (bb);
4378 if (last && gimple_code (last) == GIMPLE_COND)
4380 gimple_stmt_iterator gsi = gsi_last_bb (bb);
4381 gsi_remove (&gsi, true);
4385 /* Returns the first successor edge of BB with EDGE_TRUE_VALUE flag set. */
4388 get_true_edge_from_guard_bb (basic_block bb)
4393 FOR_EACH_EDGE (e, ei, bb->succs)
4394 if (e->flags & EDGE_TRUE_VALUE)
4401 /* Returns the first successor edge of BB with EDGE_TRUE_VALUE flag cleared. */
4404 get_false_edge_from_guard_bb (basic_block bb)
4409 FOR_EACH_EDGE (e, ei, bb->succs)
4410 if (!(e->flags & EDGE_TRUE_VALUE))
4417 /* Inserts in MAP a tuple (OLD_NAME, NEW_NAME) for the induction
4418 variables of the loops around GBB in SCOP, i.e. GBB_LOOPS.
4419 NEW_NAME is obtained from IVSTACK. IVSTACK has the same stack
4420 ordering as GBB_LOOPS. */
4423 build_iv_mapping (loop_iv_stack ivstack, htab_t map, gbb_p gbb, scop_p scop)
4429 for (i = 0; VEC_iterate (name_tree, SCOP_OLDIVS (scop), i, iv); i++)
4431 struct rename_map_elt tmp;
4433 if (!flow_bb_inside_loop_p (iv->loop, GBB_BB (gbb)))
4436 tmp.old_name = iv->t;
4437 slot = htab_find_slot (map, &tmp, INSERT);
4441 tree new_name = loop_iv_stack_get_iv (ivstack,
4442 gbb_loop_index (gbb, iv->loop));
4443 *slot = new_rename_map_elt (iv->t, new_name);
4448 /* Register in MAP the tuple (old_name, new_name). */
4451 register_old_and_new_names (htab_t map, tree old_name, tree new_name)
4453 struct rename_map_elt tmp;
4456 tmp.old_name = old_name;
4457 slot = htab_find_slot (map, &tmp, INSERT);
4460 *slot = new_rename_map_elt (old_name, new_name);
4463 /* Create a duplicate of the basic block BB. NOTE: This does not
4464 preserve SSA form. */
4467 graphite_copy_stmts_from_block (basic_block bb, basic_block new_bb, htab_t map)
4469 gimple_stmt_iterator gsi, gsi_tgt;
4471 gsi_tgt = gsi_start_bb (new_bb);
4472 for (gsi = gsi_start_bb (bb); !gsi_end_p (gsi); gsi_next (&gsi))
4474 def_operand_p def_p;
4475 ssa_op_iter op_iter;
4477 gimple stmt = gsi_stmt (gsi);
4480 if (gimple_code (stmt) == GIMPLE_LABEL)
4483 /* Create a new copy of STMT and duplicate STMT's virtual
4485 copy = gimple_copy (stmt);
4486 gsi_insert_after (&gsi_tgt, copy, GSI_NEW_STMT);
4487 mark_symbols_for_renaming (copy);
4489 region = lookup_stmt_eh_region (stmt);
4491 add_stmt_to_eh_region (copy, region);
4492 gimple_duplicate_stmt_histograms (cfun, copy, cfun, stmt);
4494 /* Create new names for all the definitions created by COPY and
4495 add replacement mappings for each new name. */
4496 FOR_EACH_SSA_DEF_OPERAND (def_p, copy, op_iter, SSA_OP_DEF)
4498 tree old_name = DEF_FROM_PTR (def_p);
4499 tree new_name = create_new_def_for (old_name, copy, def_p);
4500 register_old_and_new_names (map, old_name, new_name);
4505 /* Records in SCOP_LIVEOUT_RENAMES the names that are live out of
4506 the SCOP and that appear in the RENAME_MAP. */
4509 register_scop_liveout_renames (scop_p scop, htab_t rename_map)
4512 sese region = SCOP_REGION (scop);
4514 for (i = 0; i < SESE_NUM_VER (region); i++)
4515 if (bitmap_bit_p (SESE_LIVEOUT (region), i)
4516 && is_gimple_reg (ssa_name (i)))
4518 tree old_name = ssa_name (i);
4519 tree new_name = get_new_name_from_old_name (rename_map, old_name);
4521 register_old_and_new_names (SCOP_LIVEOUT_RENAMES (scop),
4522 old_name, new_name);
4526 /* Copies BB and includes in the copied BB all the statements that can
4527 be reached following the use-def chains from the memory accesses,
4528 and returns the next edge following this new block. */
4531 copy_bb_and_scalar_dependences (basic_block bb, scop_p scop,
4532 edge next_e, htab_t map)
4534 basic_block new_bb = split_edge (next_e);
4536 next_e = single_succ_edge (new_bb);
4537 graphite_copy_stmts_from_block (bb, new_bb, map);
4538 remove_condition (new_bb);
4539 rename_variables (new_bb, map);
4540 remove_phi_nodes (new_bb);
4541 expand_scalar_variables (new_bb, scop, map);
4542 register_scop_liveout_renames (scop, map);
4547 /* Helper function for htab_traverse in insert_loop_close_phis. */
4550 add_loop_exit_phis (void **slot, void *s)
4552 struct rename_map_elt *entry = (struct rename_map_elt *) *slot;
4553 tree new_name = entry->new_name;
4554 basic_block bb = (basic_block) s;
4555 gimple phi = create_phi_node (new_name, bb);
4556 tree res = create_new_def_for (gimple_phi_result (phi), phi,
4557 gimple_phi_result_ptr (phi));
4559 add_phi_arg (phi, new_name, single_pred_edge (bb));
4561 entry->new_name = res;
4566 /* Iterate over the SCOP_LIVEOUT_RENAMES (SCOP) and get tuples of the
4567 form (OLD_NAME, NEW_NAME). Insert in BB "RES = phi (NEW_NAME)",
4568 and finally register in SCOP_LIVEOUT_RENAMES (scop) the tuple
4572 insert_loop_close_phis (scop_p scop, basic_block bb)
4574 update_ssa (TODO_update_ssa);
4575 htab_traverse (SCOP_LIVEOUT_RENAMES (scop), add_loop_exit_phis, bb);
4576 update_ssa (TODO_update_ssa);
4579 /* Helper structure for htab_traverse in insert_guard_phis. */
4583 edge true_edge, false_edge;
4584 htab_t liveout_before_guard;
4587 /* Return the default name that is before the guard. */
4590 default_liveout_before_guard (htab_t liveout_before_guard, tree old_name)
4592 tree res = get_new_name_from_old_name (liveout_before_guard, old_name);
4594 if (res == old_name)
4596 if (is_gimple_reg (res))
4597 return fold_convert (TREE_TYPE (res), integer_zero_node);
4598 return gimple_default_def (cfun, res);
4604 /* Helper function for htab_traverse in insert_guard_phis. */
4607 add_guard_exit_phis (void **slot, void *s)
4609 struct rename_map_elt *entry = (struct rename_map_elt *) *slot;
4610 struct igp *i = (struct igp *) s;
4611 basic_block bb = i->bb;
4612 edge true_edge = i->true_edge;
4613 edge false_edge = i->false_edge;
4614 tree name1 = entry->new_name;
4615 tree name2 = default_liveout_before_guard (i->liveout_before_guard,
4617 gimple phi = create_phi_node (name1, bb);
4618 tree res = create_new_def_for (gimple_phi_result (phi), phi,
4619 gimple_phi_result_ptr (phi));
4621 add_phi_arg (phi, name1, true_edge);
4622 add_phi_arg (phi, name2, false_edge);
4624 entry->new_name = res;
4629 /* Iterate over the SCOP_LIVEOUT_RENAMES (SCOP) and get tuples of the
4630 form (OLD_NAME, NAME1). If there is a correspondent tuple of
4631 OLD_NAME in LIVEOUT_BEFORE_GUARD, i.e. (OLD_NAME, NAME2) then
4634 | RES = phi (NAME1 (on TRUE_EDGE), NAME2 (on FALSE_EDGE))"
4636 if there is no tuple for OLD_NAME in LIVEOUT_BEFORE_GUARD, insert
4638 | RES = phi (NAME1 (on TRUE_EDGE),
4639 | DEFAULT_DEFINITION of NAME1 (on FALSE_EDGE))".
4641 Finally register in SCOP_LIVEOUT_RENAMES (scop) the tuple
4645 insert_guard_phis (scop_p scop, basic_block bb, edge true_edge,
4646 edge false_edge, htab_t liveout_before_guard)
4650 i.true_edge = true_edge;
4651 i.false_edge = false_edge;
4652 i.liveout_before_guard = liveout_before_guard;
4654 update_ssa (TODO_update_ssa);
4655 htab_traverse (SCOP_LIVEOUT_RENAMES (scop), add_guard_exit_phis, &i);
4656 update_ssa (TODO_update_ssa);
4659 /* Helper function for htab_traverse. */
4662 copy_renames (void **slot, void *s)
4664 struct rename_map_elt *entry = (struct rename_map_elt *) *slot;
4665 htab_t res = (htab_t) s;
4666 tree old_name = entry->old_name;
4667 tree new_name = entry->new_name;
4668 struct rename_map_elt tmp;
4671 tmp.old_name = old_name;
4672 x = htab_find_slot (res, &tmp, INSERT);
4675 *x = new_rename_map_elt (old_name, new_name);
4680 /* Translates a CLAST statement STMT to GCC representation in the
4683 - NEXT_E is the edge where new generated code should be attached.
4684 - CONTEXT_LOOP is the loop in which the generated code will be placed
4686 - IVSTACK contains the surrounding loops around the statement to be
4691 translate_clast (scop_p scop, struct loop *context_loop,
4692 struct clast_stmt *stmt, edge next_e, loop_iv_stack ivstack)
4697 if (CLAST_STMT_IS_A (stmt, stmt_root))
4698 return translate_clast (scop, context_loop, stmt->next, next_e, ivstack);
4700 if (CLAST_STMT_IS_A (stmt, stmt_user))
4703 CloogStatement *cs = ((struct clast_user_stmt *) stmt)->statement;
4704 graphite_bb_p gbb = (graphite_bb_p) cloog_statement_usr (cs);
4706 if (GBB_BB (gbb) == ENTRY_BLOCK_PTR)
4709 map = htab_create (10, rename_map_elt_info, eq_rename_map_elts, free);
4710 loop_iv_stack_patch_for_consts (ivstack, (struct clast_user_stmt *) stmt);
4711 build_iv_mapping (ivstack, map, gbb, scop);
4712 next_e = copy_bb_and_scalar_dependences (GBB_BB (gbb), scop,
4715 loop_iv_stack_remove_constants (ivstack);
4716 update_ssa (TODO_update_ssa);
4717 recompute_all_dominators ();
4719 return translate_clast (scop, context_loop, stmt->next, next_e, ivstack);
4722 if (CLAST_STMT_IS_A (stmt, stmt_for))
4725 = graphite_create_new_loop (scop, next_e, (struct clast_for *) stmt,
4726 ivstack, context_loop ? context_loop
4728 edge last_e = single_exit (loop);
4730 next_e = translate_clast (scop, loop, ((struct clast_for *) stmt)->body,
4731 single_pred_edge (loop->latch), ivstack);
4732 redirect_edge_succ_nodup (next_e, loop->latch);
4734 set_immediate_dominator (CDI_DOMINATORS, next_e->dest, next_e->src);
4735 loop_iv_stack_pop (ivstack);
4736 last_e = single_succ_edge (split_edge (last_e));
4737 insert_loop_close_phis (scop, last_e->src);
4739 recompute_all_dominators ();
4741 return translate_clast (scop, context_loop, stmt->next, last_e, ivstack);
4744 if (CLAST_STMT_IS_A (stmt, stmt_guard))
4746 htab_t liveout_before_guard = htab_create (10, rename_map_elt_info,
4747 eq_rename_map_elts, free);
4748 edge last_e = graphite_create_new_guard (scop, next_e,
4749 ((struct clast_guard *) stmt),
4751 edge true_e = get_true_edge_from_guard_bb (next_e->dest);
4752 edge false_e = get_false_edge_from_guard_bb (next_e->dest);
4753 edge exit_true_e = single_succ_edge (true_e->dest);
4754 edge exit_false_e = single_succ_edge (false_e->dest);
4756 htab_traverse (SCOP_LIVEOUT_RENAMES (scop), copy_renames,
4757 liveout_before_guard);
4759 next_e = translate_clast (scop, context_loop,
4760 ((struct clast_guard *) stmt)->then,
4762 insert_guard_phis (scop, last_e->src, exit_true_e, exit_false_e,
4763 liveout_before_guard);
4764 htab_delete (liveout_before_guard);
4765 recompute_all_dominators ();
4768 return translate_clast (scop, context_loop, stmt->next, last_e, ivstack);
4771 if (CLAST_STMT_IS_A (stmt, stmt_block))
4773 next_e = translate_clast (scop, context_loop,
4774 ((struct clast_block *) stmt)->body,
4776 recompute_all_dominators ();
4778 return translate_clast (scop, context_loop, stmt->next, next_e, ivstack);
4784 /* Free the SCATTERING domain list. */
4787 free_scattering (CloogDomainList *scattering)
4791 CloogDomain *dom = cloog_domain (scattering);
4792 CloogDomainList *next = cloog_next_domain (scattering);
4794 cloog_domain_free (dom);
4800 /* Build cloog program for SCoP. */
4803 build_cloog_prog (scop_p scop)
4806 int max_nb_loops = scop_max_loop_depth (scop);
4808 CloogLoop *loop_list = NULL;
4809 CloogBlockList *block_list = NULL;
4810 CloogDomainList *scattering = NULL;
4811 CloogProgram *prog = SCOP_PROG (scop);
4812 int nbs = 2 * max_nb_loops + 1;
4813 int *scaldims = (int *) xmalloc (nbs * (sizeof (int)));
4815 cloog_program_set_nb_scattdims (prog, nbs);
4816 initialize_cloog_names (scop);
4818 for (i = 0; VEC_iterate (graphite_bb_p, SCOP_BBS (scop), i, gbb); i++)
4820 /* Build new block. */
4821 CloogMatrix *domain = GBB_DOMAIN (gbb);
4822 CloogStatement *stmt = cloog_statement_alloc (GBB_BB (gbb)->index);
4823 CloogBlock *block = cloog_block_alloc (stmt, 0, NULL,
4824 nb_loops_around_gb (gbb));
4825 cloog_statement_set_usr (stmt, gbb);
4827 /* Add empty domain to all bbs, which do not yet have a domain, as they
4828 are not part of any loop. */
4831 domain = cloog_matrix_alloc (0, scop_nb_params (scop) + 2);
4832 GBB_DOMAIN (gbb) = domain;
4835 /* Build loop list. */
4837 CloogLoop *new_loop_list = cloog_loop_malloc ();
4838 cloog_loop_set_next (new_loop_list, loop_list);
4839 cloog_loop_set_domain (new_loop_list,
4840 cloog_domain_matrix2domain (domain));
4841 cloog_loop_set_block (new_loop_list, block);
4842 loop_list = new_loop_list;
4845 /* Build block list. */
4847 CloogBlockList *new_block_list = cloog_block_list_malloc ();
4849 cloog_block_list_set_next (new_block_list, block_list);
4850 cloog_block_list_set_block (new_block_list, block);
4851 block_list = new_block_list;
4854 /* Build scattering list. */
4856 /* XXX: Replace with cloog_domain_list_alloc(), when available. */
4857 CloogDomainList *new_scattering
4858 = (CloogDomainList *) xmalloc (sizeof (CloogDomainList));
4859 CloogMatrix *scat_mat = schedule_to_scattering (gbb, nbs);
4861 cloog_set_next_domain (new_scattering, scattering);
4862 cloog_set_domain (new_scattering,
4863 cloog_domain_matrix2domain (scat_mat));
4864 scattering = new_scattering;
4865 cloog_matrix_free (scat_mat);
4869 cloog_program_set_loop (prog, loop_list);
4870 cloog_program_set_blocklist (prog, block_list);
4872 for (i = 0; i < nbs; i++)
4875 cloog_program_set_scaldims (prog, scaldims);
4877 /* Extract scalar dimensions to simplify the code generation problem. */
4878 cloog_program_extract_scalars (prog, scattering);
4880 /* Apply scattering. */
4881 cloog_program_scatter (prog, scattering);
4882 free_scattering (scattering);
4884 /* Iterators corresponding to scalar dimensions have to be extracted. */
4885 cloog_names_scalarize (cloog_program_names (prog), nbs,
4886 cloog_program_scaldims (prog));
4888 /* Free blocklist. */
4890 CloogBlockList *next = cloog_program_blocklist (prog);
4894 CloogBlockList *toDelete = next;
4895 next = cloog_block_list_next (next);
4896 cloog_block_list_set_next (toDelete, NULL);
4897 cloog_block_list_set_block (toDelete, NULL);
4898 cloog_block_list_free (toDelete);
4900 cloog_program_set_blocklist (prog, NULL);
4904 /* Return the options that will be used in GLOOG. */
4906 static CloogOptions *
4907 set_cloog_options (void)
4909 CloogOptions *options = cloog_options_malloc ();
4911 /* Change cloog output language to C. If we do use FORTRAN instead, cloog
4912 will stop e.g. with "ERROR: unbounded loops not allowed in FORTRAN.", if
4913 we pass an incomplete program to cloog. */
4914 options->language = LANGUAGE_C;
4916 /* Enable complex equality spreading: removes dummy statements
4917 (assignments) in the generated code which repeats the
4918 substitution equations for statements. This is useless for
4922 /* Enable C pretty-printing mode: normalizes the substitution
4923 equations for statements. */
4926 /* Allow cloog to build strides with a stride width different to one.
4927 This example has stride = 4:
4929 for (i = 0; i < 20; i += 4)
4931 options->strides = 1;
4933 /* Disable optimizations and make cloog generate source code closer to the
4934 input. This is useful for debugging, but later we want the optimized
4937 XXX: We can not disable optimizations, as loop blocking is not working
4942 options->l = INT_MAX;
4948 /* Prints STMT to STDERR. */
4951 debug_clast_stmt (struct clast_stmt *stmt)
4953 CloogOptions *options = set_cloog_options ();
4955 pprint (stderr, stmt, 0, options);
4958 /* Find the right transform for the SCOP, and return a Cloog AST
4959 representing the new form of the program. */
4961 static struct clast_stmt *
4962 find_transform (scop_p scop)
4964 struct clast_stmt *stmt;
4965 CloogOptions *options = set_cloog_options ();
4967 /* Connect new cloog prog generation to graphite. */
4968 build_cloog_prog (scop);
4970 if (dump_file && (dump_flags & TDF_DETAILS))
4972 fprintf (dump_file, "Cloog Input [\n");
4973 cloog_program_print (dump_file, SCOP_PROG(scop));
4974 fprintf (dump_file, "]\n");
4977 SCOP_PROG (scop) = cloog_program_generate (SCOP_PROG (scop), options);
4978 stmt = cloog_clast_create (SCOP_PROG (scop), options);
4980 if (dump_file && (dump_flags & TDF_DETAILS))
4982 fprintf (dump_file, "Cloog Output[\n");
4983 pprint (dump_file, stmt, 0, options);
4984 cloog_program_dump_cloog (dump_file, SCOP_PROG (scop));
4985 fprintf (dump_file, "]\n");
4988 cloog_options_free (options);
4992 /* Remove from the CFG the REGION. */
4995 remove_sese_region (sese region)
4997 VEC (basic_block, heap) *bbs = NULL;
4998 basic_block entry_bb = SESE_ENTRY (region)->dest;
4999 basic_block exit_bb = SESE_EXIT (region)->dest;
5003 VEC_safe_push (basic_block, heap, bbs, entry_bb);
5004 gather_blocks_in_sese_region (entry_bb, exit_bb, &bbs);
5006 for (i = 0; VEC_iterate (basic_block, bbs, i, bb); i++)
5007 delete_basic_block (bb);
5009 VEC_free (basic_block, heap, bbs);
5012 typedef struct ifsese {
5019 if_region_entry (ifsese if_region)
5021 return SESE_ENTRY (if_region->region);
5025 if_region_exit (ifsese if_region)
5027 return SESE_EXIT (if_region->region);
5030 static inline basic_block
5031 if_region_get_condition_block (ifsese if_region)
5033 return if_region_entry (if_region)->dest;
5037 if_region_set_false_region (ifsese if_region, sese region)
5039 basic_block condition = if_region_get_condition_block (if_region);
5040 edge false_edge = get_false_edge_from_guard_bb (condition);
5041 basic_block dummy = false_edge->dest;
5042 edge entry_region = SESE_ENTRY (region);
5043 edge exit_region = SESE_EXIT (region);
5044 basic_block before_region = entry_region->src;
5045 basic_block last_in_region = exit_region->src;
5046 void **slot = htab_find_slot_with_hash (current_loops->exits, exit_region,
5047 htab_hash_pointer (exit_region),
5050 entry_region->flags = false_edge->flags;
5051 false_edge->flags = exit_region->flags;
5053 redirect_edge_pred (entry_region, condition);
5054 redirect_edge_pred (exit_region, before_region);
5055 redirect_edge_pred (false_edge, last_in_region);
5056 redirect_edge_succ (false_edge, single_succ (dummy));
5057 delete_basic_block (dummy);
5059 exit_region->flags = EDGE_FALLTHRU;
5060 recompute_all_dominators ();
5062 SESE_EXIT (region) = false_edge;
5063 if_region->false_region = region;
5067 struct loop_exit *loop_exit = GGC_CNEW (struct loop_exit);
5069 memcpy (loop_exit, *((struct loop_exit **) slot), sizeof (struct loop_exit));
5070 htab_clear_slot (current_loops->exits, slot);
5072 slot = htab_find_slot_with_hash (current_loops->exits, false_edge,
5073 htab_hash_pointer (false_edge),
5075 loop_exit->e = false_edge;
5077 false_edge->src->loop_father->exits->next = loop_exit;
5082 create_if_region_on_edge (edge entry, tree condition)
5086 sese sese_region = GGC_NEW (struct sese);
5087 sese true_region = GGC_NEW (struct sese);
5088 sese false_region = GGC_NEW (struct sese);
5089 ifsese if_region = GGC_NEW (struct ifsese);
5090 edge exit = create_empty_if_region_on_edge (entry, condition);
5092 if_region->region = sese_region;
5093 if_region->region->entry = entry;
5094 if_region->region->exit = exit;
5096 FOR_EACH_EDGE (e, ei, entry->dest->succs)
5098 if (e->flags & EDGE_TRUE_VALUE)
5100 true_region->entry = e;
5101 true_region->exit = single_succ_edge (e->dest);
5102 if_region->true_region = true_region;
5104 else if (e->flags & EDGE_FALSE_VALUE)
5106 false_region->entry = e;
5107 false_region->exit = single_succ_edge (e->dest);
5108 if_region->false_region = false_region;
5115 /* Moves REGION in a condition expression:
5123 move_sese_in_condition (sese region)
5125 basic_block pred_block = split_edge (SESE_ENTRY (region));
5126 ifsese if_region = NULL;
5128 SESE_ENTRY (region) = single_succ_edge (pred_block);
5129 if_region = create_if_region_on_edge (single_pred_edge (pred_block), integer_one_node);
5130 if_region_set_false_region (if_region, region);
5135 /* Add exit phis for USE on EXIT. */
5138 scop_add_exit_phis_edge (basic_block exit, tree use, edge false_e, edge true_e)
5140 gimple phi = create_phi_node (use, exit);
5142 create_new_def_for (gimple_phi_result (phi), phi,
5143 gimple_phi_result_ptr (phi));
5144 add_phi_arg (phi, use, false_e);
5145 add_phi_arg (phi, use, true_e);
5148 /* Add phi nodes for VAR that is used in LIVEIN. Phi nodes are
5149 inserted in block BB. */
5152 scop_add_exit_phis_var (basic_block bb, tree var, bitmap livein,
5153 edge false_e, edge true_e)
5156 basic_block def_bb = gimple_bb (SSA_NAME_DEF_STMT (var));
5158 if (is_gimple_reg (var))
5159 bitmap_clear_bit (livein, def_bb->index);
5161 bitmap_set_bit (livein, def_bb->index);
5163 def = BITMAP_ALLOC (NULL);
5164 bitmap_set_bit (def, def_bb->index);
5165 compute_global_livein (livein, def);
5168 scop_add_exit_phis_edge (bb, var, false_e, true_e);
5171 /* Insert in the block BB phi nodes for variables defined in REGION
5172 and used outside the REGION. The code generation moves REGION in
5173 the else clause of an "if (1)" and generates code in the then
5174 clause that is at this point empty:
5183 scop_insert_phis_for_liveouts (sese region, basic_block bb,
5184 edge false_e, edge true_e)
5189 update_ssa (TODO_update_ssa);
5191 EXECUTE_IF_SET_IN_BITMAP (SESE_LIVEOUT (region), 0, i, bi)
5192 scop_add_exit_phis_var (bb, ssa_name (i), SESE_LIVEIN_VER (region, i),
5195 update_ssa (TODO_update_ssa);
5198 /* Get the definition of NAME before the SCOP. Keep track of the
5199 basic blocks that have been VISITED in a bitmap. */
5202 get_vdef_before_scop (scop_p scop, tree name, sbitmap visited)
5205 gimple def_stmt = SSA_NAME_DEF_STMT (name);
5206 basic_block def_bb = gimple_bb (def_stmt);
5209 || !bb_in_sese_p (def_bb, SCOP_REGION (scop)))
5212 if (TEST_BIT (visited, def_bb->index))
5215 SET_BIT (visited, def_bb->index);
5217 switch (gimple_code (def_stmt))
5220 for (i = 0; i < gimple_phi_num_args (def_stmt); i++)
5222 tree arg = gimple_phi_arg_def (def_stmt, i);
5223 tree res = get_vdef_before_scop (scop, arg, visited);
5234 /* Adjust a virtual phi node PHI that is placed at the end of the
5235 generated code for SCOP:
5238 | generated code from REGION;
5242 The FALSE_E edge comes from the original code, TRUE_E edge comes
5243 from the code generated for the SCOP. */
5246 scop_adjust_vphi (scop_p scop, gimple phi, edge true_e)
5250 gcc_assert (gimple_phi_num_args (phi) == 2);
5252 for (i = 0; i < gimple_phi_num_args (phi); i++)
5253 if (gimple_phi_arg_edge (phi, i) == true_e)
5255 tree true_arg, false_arg, before_scop_arg;
5258 true_arg = gimple_phi_arg_def (phi, i);
5259 if (!SSA_NAME_IS_DEFAULT_DEF (true_arg))
5262 false_arg = gimple_phi_arg_def (phi, i == 0 ? 1 : 0);
5263 if (SSA_NAME_IS_DEFAULT_DEF (false_arg))
5266 visited = sbitmap_alloc (last_basic_block);
5267 sbitmap_zero (visited);
5268 before_scop_arg = get_vdef_before_scop (scop, false_arg, visited);
5269 gcc_assert (before_scop_arg != NULL_TREE);
5270 SET_PHI_ARG_DEF (phi, i, before_scop_arg);
5271 sbitmap_free (visited);
5275 /* Adjusts the phi nodes in the block BB for variables defined in
5276 SCOP_REGION and used outside the SCOP_REGION. The code generation
5277 moves SCOP_REGION in the else clause of an "if (1)" and generates
5278 code in the then clause:
5281 | generated code from REGION;
5285 To adjust the phi nodes after the condition, SCOP_LIVEOUT_RENAMES
5286 hash table is used: this stores for a name that is part of the
5287 LIVEOUT of SCOP_REGION its new name in the generated code. */
5290 scop_adjust_phis_for_liveouts (scop_p scop, basic_block bb, edge false_e,
5293 gimple_stmt_iterator si;
5295 for (si = gsi_start_phis (bb); !gsi_end_p (si); gsi_next (&si))
5298 unsigned false_i = 0;
5299 gimple phi = gsi_stmt (si);
5301 if (!is_gimple_reg (PHI_RESULT (phi)))
5303 scop_adjust_vphi (scop, phi, true_e);
5307 for (i = 0; i < gimple_phi_num_args (phi); i++)
5308 if (gimple_phi_arg_edge (phi, i) == false_e)
5314 for (i = 0; i < gimple_phi_num_args (phi); i++)
5315 if (gimple_phi_arg_edge (phi, i) == true_e)
5317 tree old_name = gimple_phi_arg_def (phi, false_i);
5318 tree new_name = get_new_name_from_old_name
5319 (SCOP_LIVEOUT_RENAMES (scop), old_name);
5321 gcc_assert (old_name != new_name);
5322 SET_PHI_ARG_DEF (phi, i, new_name);
5327 /* Returns the first cloog name used in EXPR. */
5330 find_cloog_iv_in_expr (struct clast_expr *expr)
5332 struct clast_term *term = (struct clast_term *) expr;
5334 if (expr->type == expr_term
5338 if (expr->type == expr_term)
5341 if (expr->type == expr_red)
5344 struct clast_reduction *red = (struct clast_reduction *) expr;
5346 for (i = 0; i < red->n; i++)
5348 const char *res = find_cloog_iv_in_expr ((red)->elts[i]);
5358 /* Build for a clast_user_stmt USER_STMT a map between the CLAST
5359 induction variables and the corresponding GCC old induction
5360 variables. This information is stored on each GRAPHITE_BB. */
5363 compute_cloog_iv_types_1 (graphite_bb_p gbb,
5364 struct clast_user_stmt *user_stmt)
5366 struct clast_stmt *t;
5369 for (t = user_stmt->substitutions; t; t = t->next, index++)
5372 struct ivtype_map_elt tmp;
5373 struct clast_expr *expr = (struct clast_expr *)
5374 ((struct clast_assignment *)t)->RHS;
5376 /* Create an entry (clast_var, type). */
5377 tmp.cloog_iv = find_cloog_iv_in_expr (expr);
5381 slot = htab_find_slot (GBB_CLOOG_IV_TYPES (gbb), &tmp, INSERT);
5385 loop_p loop = gbb_loop_at_index (gbb, index);
5386 tree oldiv = oldiv_for_loop (GBB_SCOP (gbb), loop);
5387 tree type = oldiv ? TREE_TYPE (oldiv) : integer_type_node;
5388 *slot = new_ivtype_map_elt (tmp.cloog_iv, type);
5393 /* Walk the CLAST tree starting from STMT and build for each
5394 clast_user_stmt a map between the CLAST induction variables and the
5395 corresponding GCC old induction variables. This information is
5396 stored on each GRAPHITE_BB. */
5399 compute_cloog_iv_types (struct clast_stmt *stmt)
5404 if (CLAST_STMT_IS_A (stmt, stmt_root))
5407 if (CLAST_STMT_IS_A (stmt, stmt_user))
5409 CloogStatement *cs = ((struct clast_user_stmt *) stmt)->statement;
5410 graphite_bb_p gbb = (graphite_bb_p) cloog_statement_usr (cs);
5411 GBB_CLOOG_IV_TYPES (gbb) = htab_create (10, ivtype_map_elt_info,
5412 eq_ivtype_map_elts, free);
5413 compute_cloog_iv_types_1 (gbb, (struct clast_user_stmt *) stmt);
5417 if (CLAST_STMT_IS_A (stmt, stmt_for))
5419 struct clast_stmt *s = ((struct clast_for *) stmt)->body;
5420 compute_cloog_iv_types (s);
5424 if (CLAST_STMT_IS_A (stmt, stmt_guard))
5426 struct clast_stmt *s = ((struct clast_guard *) stmt)->then;
5427 compute_cloog_iv_types (s);
5431 if (CLAST_STMT_IS_A (stmt, stmt_block))
5433 struct clast_stmt *s = ((struct clast_block *) stmt)->body;
5434 compute_cloog_iv_types (s);
5441 compute_cloog_iv_types (stmt->next);
5444 /* GIMPLE Loop Generator: generates loops from STMT in GIMPLE form for
5445 the given SCOP. Return true if code generation succeeded. */
5448 gloog (scop_p scop, struct clast_stmt *stmt)
5450 edge new_scop_exit_edge = NULL;
5451 VEC (iv_stack_entry_p, heap) *ivstack = VEC_alloc (iv_stack_entry_p, heap,
5453 loop_p context_loop;
5454 ifsese if_region = NULL;
5456 recompute_all_dominators ();
5458 if_region = move_sese_in_condition (SCOP_REGION (scop));
5459 sese_build_livein_liveouts (SCOP_REGION (scop));
5460 scop_insert_phis_for_liveouts (SCOP_REGION (scop),
5461 if_region->region->exit->src,
5462 if_region->false_region->exit,
5463 if_region->true_region->exit);
5464 recompute_all_dominators ();
5466 context_loop = SESE_ENTRY (SCOP_REGION (scop))->src->loop_father;
5467 compute_cloog_iv_types (stmt);
5469 new_scop_exit_edge = translate_clast (scop, context_loop, stmt,
5470 if_region->true_region->entry,
5472 free_loop_iv_stack (&ivstack);
5473 cloog_clast_free (stmt);
5476 scop_adjust_phis_for_liveouts (scop,
5477 if_region->region->exit->src,
5478 if_region->false_region->exit,
5479 if_region->true_region->exit);
5481 recompute_all_dominators ();
5486 /* Returns the number of data references in SCOP. */
5489 nb_data_refs_in_scop (scop_p scop)
5495 for (i = 0; VEC_iterate (graphite_bb_p, SCOP_BBS (scop), i, gbb); i++)
5496 res += VEC_length (data_reference_p, GBB_DATA_REFS (gbb));
5501 /* Move the loop at index LOOP and insert it before index NEW_LOOP_POS.
5502 This transformartion is only valid, if the loop nest between i and k is
5503 perfectly nested. Therefore we do not need to change the static schedule.
5507 for (i = 0; i < 50; i++)
5509 for (k = 5; k < 100; k++)
5512 To move k before i use:
5514 graphite_trans_bb_move_loop (A, 2, 0)
5516 for (k = 5; k < 100; k++)
5517 for (i = 0; i < 50; i++)
5523 graphite_trans_bb_move_loop (A, 0, 2)
5525 This function does not check the validity of interchanging loops.
5526 This should be checked before calling this function. */
5529 graphite_trans_bb_move_loop (graphite_bb_p gb, int loop,
5532 CloogMatrix *domain = GBB_DOMAIN (gb);
5536 gcc_assert (loop < gbb_nb_loops (gb)
5537 && new_loop_pos < gbb_nb_loops (gb));
5539 /* Update LOOPS vector. */
5540 tmp_loop_p = VEC_index (loop_p, GBB_LOOPS (gb), loop);
5541 VEC_ordered_remove (loop_p, GBB_LOOPS (gb), loop);
5542 VEC_safe_insert (loop_p, heap, GBB_LOOPS (gb), new_loop_pos, tmp_loop_p);
5544 /* Move the domain columns. */
5545 if (loop < new_loop_pos)
5546 for (row = 0; row < domain->NbRows; row++)
5550 value_assign (tmp, domain->p[row][loop + 1]);
5552 for (j = loop ; j < new_loop_pos - 1; j++)
5553 value_assign (domain->p[row][j + 1], domain->p[row][j + 2]);
5555 value_assign (domain->p[row][new_loop_pos], tmp);
5559 for (row = 0; row < domain->NbRows; row++)
5563 value_assign (tmp, domain->p[row][loop + 1]);
5565 for (j = loop ; j > new_loop_pos; j--)
5566 value_assign (domain->p[row][j + 1], domain->p[row][j]);
5568 value_assign (domain->p[row][new_loop_pos + 1], tmp);
5573 /* Get the index of the column representing constants in the DOMAIN
5577 const_column_index (CloogMatrix *domain)
5579 return domain->NbColumns - 1;
5583 /* Get the first index that is positive or negative, determined
5584 following the value of POSITIVE, in matrix DOMAIN in COLUMN. */
5587 get_first_matching_sign_row_index (CloogMatrix *domain, int column,
5592 for (row = 0; row < domain->NbRows; row++)
5594 int val = value_get_si (domain->p[row][column]);
5596 if (val > 0 && positive)
5599 else if (val < 0 && !positive)
5606 /* Get the lower bound of COLUMN in matrix DOMAIN. */
5609 get_lower_bound_row (CloogMatrix *domain, int column)
5611 return get_first_matching_sign_row_index (domain, column, true);
5614 /* Get the upper bound of COLUMN in matrix DOMAIN. */
5617 get_upper_bound_row (CloogMatrix *domain, int column)
5619 return get_first_matching_sign_row_index (domain, column, false);
5622 /* Copies the OLD_ROW constraint from OLD_DOMAIN to the NEW_DOMAIN at
5626 copy_constraint (CloogMatrix *old_domain, CloogMatrix *new_domain,
5627 int old_row, int new_row)
5631 gcc_assert (old_domain->NbColumns == new_domain->NbColumns
5632 && old_row < old_domain->NbRows
5633 && new_row < new_domain->NbRows);
5635 for (i = 0; i < old_domain->NbColumns; i++)
5636 value_assign (new_domain->p[new_row][i], old_domain->p[old_row][i]);
5639 /* Swap coefficients of variables X and Y on row R. */
5642 swap_constraint_variables (CloogMatrix *domain,
5643 int r, int x, int y)
5645 value_swap (domain->p[r][x], domain->p[r][y]);
5648 /* Scale by X the coefficient C of constraint at row R in DOMAIN. */
5651 scale_constraint_variable (CloogMatrix *domain,
5652 int r, int c, int x)
5654 Value strip_size_value;
5655 value_init (strip_size_value);
5656 value_set_si (strip_size_value, x);
5657 value_multiply (domain->p[r][c], domain->p[r][c], strip_size_value);
5658 value_clear (strip_size_value);
5661 /* Strip mines the loop of BB at the position LOOP_DEPTH with STRIDE.
5662 Always valid, but not always a performance improvement. */
5665 graphite_trans_bb_strip_mine (graphite_bb_p gb, int loop_depth, int stride)
5669 CloogMatrix *domain = GBB_DOMAIN (gb);
5670 CloogMatrix *new_domain = cloog_matrix_alloc (domain->NbRows + 3,
5671 domain->NbColumns + 1);
5673 int col_loop_old = loop_depth + 2;
5674 int col_loop_strip = col_loop_old - 1;
5676 gcc_assert (loop_depth <= gbb_nb_loops (gb) - 1);
5678 VEC_safe_insert (loop_p, heap, GBB_LOOPS (gb), loop_depth, NULL);
5680 GBB_DOMAIN (gb) = new_domain;
5682 for (row = 0; row < domain->NbRows; row++)
5683 for (col = 0; col < domain->NbColumns; col++)
5684 if (col <= loop_depth)
5685 value_assign (new_domain->p[row][col], domain->p[row][col]);
5687 value_assign (new_domain->p[row][col + 1], domain->p[row][col]);
5689 row = domain->NbRows;
5691 /* Lower bound of the outer stripped loop. */
5692 copy_constraint (new_domain, new_domain,
5693 get_lower_bound_row (new_domain, col_loop_old), row);
5694 swap_constraint_variables (new_domain, row, col_loop_old, col_loop_strip);
5697 /* Upper bound of the outer stripped loop. */
5698 copy_constraint (new_domain, new_domain,
5699 get_upper_bound_row (new_domain, col_loop_old), row);
5700 swap_constraint_variables (new_domain, row, col_loop_old, col_loop_strip);
5701 scale_constraint_variable (new_domain, row, col_loop_strip, stride);
5704 /* Lower bound of a tile starts at "stride * outer_iv". */
5705 row = get_lower_bound_row (new_domain, col_loop_old);
5706 value_set_si (new_domain->p[row][0], 1);
5707 value_set_si (new_domain->p[row][const_column_index (new_domain)], 0);
5708 value_set_si (new_domain->p[row][col_loop_old], 1);
5709 value_set_si (new_domain->p[row][col_loop_strip], -1 * stride);
5711 /* Upper bound of a tile stops at "stride * outer_iv + stride - 1",
5712 or at the old upper bound that is not modified. */
5713 row = new_domain->NbRows - 1;
5714 value_set_si (new_domain->p[row][0], 1);
5715 value_set_si (new_domain->p[row][col_loop_old], -1);
5716 value_set_si (new_domain->p[row][col_loop_strip], stride);
5717 value_set_si (new_domain->p[row][const_column_index (new_domain)],
5720 cloog_matrix_free (domain);
5722 /* Update static schedule. */
5725 int nb_loops = gbb_nb_loops (gb);
5726 lambda_vector new_schedule = lambda_vector_new (nb_loops + 1);
5728 for (i = 0; i <= loop_depth; i++)
5729 new_schedule[i] = GBB_STATIC_SCHEDULE (gb)[i];
5731 for (i = loop_depth + 1; i <= nb_loops - 2; i++)
5732 new_schedule[i + 2] = GBB_STATIC_SCHEDULE (gb)[i];
5734 GBB_STATIC_SCHEDULE (gb) = new_schedule;
5738 /* Returns true when the strip mining of LOOP_INDEX by STRIDE is
5739 profitable or undecidable. GB is the statement around which the
5740 loops will be strip mined. */
5743 strip_mine_profitable_p (graphite_bb_p gb, int stride,
5752 loop = VEC_index (loop_p, GBB_LOOPS (gb), loop_index);
5753 exit = single_exit (loop);
5755 niter = find_loop_niter (loop, &exit);
5756 if (niter == chrec_dont_know
5757 || TREE_CODE (niter) != INTEGER_CST)
5760 niter_val = int_cst_value (niter);
5762 if (niter_val < stride)
5765 if (dump_file && (dump_flags & TDF_DETAILS))
5767 fprintf (dump_file, "\nStrip Mining is not profitable for loop %d:",
5769 fprintf (dump_file, "number of iterations is too low.\n");
5776 /* Determines when the interchange of LOOP_A and LOOP_B belonging to
5777 SCOP is legal. DEPTH is the number of loops around. */
5780 is_interchange_valid (scop_p scop, int loop_a, int loop_b, int depth)
5783 VEC (ddr_p, heap) *dependence_relations;
5784 VEC (data_reference_p, heap) *datarefs;
5786 struct loop *nest = VEC_index (loop_p, SCOP_LOOP_NEST (scop), loop_a);
5787 lambda_trans_matrix trans;
5789 gcc_assert (loop_a < loop_b);
5791 dependence_relations = VEC_alloc (ddr_p, heap, 10 * 10);
5792 datarefs = VEC_alloc (data_reference_p, heap, 10);
5794 if (!compute_data_dependences_for_loop (nest, true, &datarefs,
5795 &dependence_relations))
5798 if (dump_file && (dump_flags & TDF_DETAILS))
5799 dump_ddrs (dump_file, dependence_relations);
5801 trans = lambda_trans_matrix_new (depth, depth);
5802 lambda_matrix_id (LTM_MATRIX (trans), depth);
5804 lambda_matrix_row_exchange (LTM_MATRIX (trans), 0, loop_b - loop_a);
5806 if (!lambda_transform_legal_p (trans, depth, dependence_relations))
5808 lambda_matrix_row_exchange (LTM_MATRIX (trans), 0, loop_b - loop_a);
5814 free_dependence_relations (dependence_relations);
5815 free_data_refs (datarefs);
5819 /* Loop block the LOOPS innermost loops of GB with stride size STRIDE.
5823 for (i = 0; i <= 50; i++=4)
5824 for (k = 0; k <= 100; k++=4)
5825 for (l = 0; l <= 200; l++=4)
5828 To strip mine the two inner most loops with stride = 4 call:
5830 graphite_trans_bb_block (A, 4, 2)
5832 for (i = 0; i <= 50; i++)
5833 for (kk = 0; kk <= 100; kk+=4)
5834 for (ll = 0; ll <= 200; ll+=4)
5835 for (k = kk; k <= min (100, kk + 3); k++)
5836 for (l = ll; l <= min (200, ll + 3); l++)
5841 graphite_trans_bb_block (graphite_bb_p gb, int stride, int loops)
5844 int nb_loops = gbb_nb_loops (gb);
5845 int start = nb_loops - loops;
5846 scop_p scop = GBB_SCOP (gb);
5848 gcc_assert (scop_contains_loop (scop, gbb_loop (gb)));
5850 for (i = start ; i < nb_loops; i++)
5851 for (j = i + 1; j < nb_loops; j++)
5852 if (!is_interchange_valid (scop, i, j, nb_loops))
5854 if (dump_file && (dump_flags & TDF_DETAILS))
5856 "\nInterchange not valid for loops %d and %d:\n", i, j);
5859 else if (dump_file && (dump_flags & TDF_DETAILS))
5861 "\nInterchange valid for loops %d and %d:\n", i, j);
5863 /* Check if strip mining is profitable for every loop. */
5864 for (i = 0; i < nb_loops - start; i++)
5865 if (!strip_mine_profitable_p (gb, stride, start + i))
5868 /* Strip mine loops. */
5869 for (i = 0; i < nb_loops - start; i++)
5870 graphite_trans_bb_strip_mine (gb, start + 2 * i, stride);
5872 /* Interchange loops. */
5873 for (i = 1; i < nb_loops - start; i++)
5874 graphite_trans_bb_move_loop (gb, start + 2 * i, start + i);
5876 if (dump_file && (dump_flags & TDF_DETAILS))
5877 fprintf (dump_file, "\nLoops containing BB %d will be loop blocked.\n",
5878 GBB_BB (gb)->index);
5883 /* Loop block LOOPS innermost loops of a loop nest. BBS represent the
5884 basic blocks that belong to the loop nest to be blocked. */
5887 graphite_trans_loop_block (VEC (graphite_bb_p, heap) *bbs, int loops)
5891 bool transform_done = false;
5893 /* TODO: - Calculate the stride size automatically. */
5894 int stride_size = 51;
5896 for (i = 0; VEC_iterate (graphite_bb_p, bbs, i, gb); i++)
5897 transform_done |= graphite_trans_bb_block (gb, stride_size, loops);
5899 return transform_done;
5902 /* Loop block all basic blocks of SCOP. Return false when the
5903 transform is not performed. */
5906 graphite_trans_scop_block (scop_p scop)
5912 bool perfect = true;
5913 bool transform_done = false;
5915 VEC (graphite_bb_p, heap) *bbs = VEC_alloc (graphite_bb_p, heap, 3);
5916 int max_schedule = scop_max_loop_depth (scop) + 1;
5917 lambda_vector last_schedule = lambda_vector_new (max_schedule);
5919 if (VEC_length (graphite_bb_p, SCOP_BBS (scop)) == 0)
5922 /* Get the data of the first bb. */
5923 gb = VEC_index (graphite_bb_p, SCOP_BBS (scop), 0);
5924 last_nb_loops = gbb_nb_loops (gb);
5925 lambda_vector_copy (GBB_STATIC_SCHEDULE (gb), last_schedule,
5927 VEC_safe_push (graphite_bb_p, heap, bbs, gb);
5929 for (i = 0; VEC_iterate (graphite_bb_p, SCOP_BBS (scop), i, gb); i++)
5931 /* We did the first bb before. */
5935 nb_loops = gbb_nb_loops (gb);
5937 /* If the number of loops is unchanged and only the last element of the
5938 schedule changes, we stay in the loop nest. */
5939 if (nb_loops == last_nb_loops
5940 && (last_schedule [nb_loops + 1]
5941 != GBB_STATIC_SCHEDULE (gb)[nb_loops + 1]))
5943 VEC_safe_push (graphite_bb_p, heap, bbs, gb);
5947 /* Otherwise, we left the innermost loop. So check, if the last bb was in
5948 a perfect loop nest and how many loops are contained in this perfect
5951 Count the number of zeros from the end of the schedule. They are the
5952 number of surrounding loops.
5955 last_bb 2 3 2 0 0 0 0 3
5959 last_bb 2 3 2 0 0 0 0 3
5963 If there is no zero, there were other bbs in outer loops and the loop
5964 nest is not perfect. */
5965 for (j = last_nb_loops - 1; j >= 0; j--)
5967 if (last_schedule [j] != 0
5968 || (j <= nb_loops && GBB_STATIC_SCHEDULE (gb)[j] == 1))
5977 /* Found perfect loop nest. */
5978 if (perfect && last_nb_loops - j >= 2)
5979 transform_done |= graphite_trans_loop_block (bbs, last_nb_loops - j);
5981 /* Check if we start with a new loop.
5985 last_bb 2 3 2 0 0 0 0 3
5988 Here we start with the loop "2 3 2 0 0 1"
5990 last_bb 2 3 2 0 0 0 0 3
5993 But here not, so the loop nest can never be perfect. */
5995 perfect = (GBB_STATIC_SCHEDULE (gb)[nb_loops] == 0);
5997 /* Update the last_bb infos. We do not do that for the bbs in the same
5998 loop, as the data we use is not changed. */
5999 last_nb_loops = nb_loops;
6000 lambda_vector_copy (GBB_STATIC_SCHEDULE (gb), last_schedule,
6002 VEC_truncate (graphite_bb_p, bbs, 0);
6003 VEC_safe_push (graphite_bb_p, heap, bbs, gb);
6006 /* Check if the last loop nest was perfect. It is the same check as above,
6007 but the comparison with the next bb is missing. */
6008 for (j = last_nb_loops - 1; j >= 0; j--)
6009 if (last_schedule [j] != 0)
6017 /* Found perfect loop nest. */
6018 if (last_nb_loops - j >= 2)
6019 transform_done |= graphite_trans_loop_block (bbs, last_nb_loops - j);
6020 VEC_free (graphite_bb_p, heap, bbs);
6022 return transform_done;
6025 /* Apply graphite transformations to all the basic blocks of SCOP. */
6028 graphite_apply_transformations (scop_p scop)
6030 bool transform_done = false;
6032 /* Sort the list of bbs. Keep them always sorted. */
6033 graphite_sort_gbbs (scop);
6035 if (flag_loop_block)
6036 transform_done = graphite_trans_scop_block (scop);
6038 /* Generate code even if we did not apply any real transformation.
6039 This also allows to check the performance for the identity
6040 transformation: GIMPLE -> GRAPHITE -> GIMPLE
6041 Keep in mind that CLooG optimizes in control, so the loop structure
6042 may change, even if we only use -fgraphite-identity. */
6043 if (flag_graphite_identity)
6044 transform_done = true;
6046 return transform_done;
6049 /* We limit all SCoPs to SCoPs, that are completely surrounded by a loop.
6059 * SCoP frontier, as this line is not surrounded by any loop. *
6063 This is necessary as scalar evolution and parameter detection need a
6064 outermost loop to initialize parameters correctly.
6066 TODO: FIX scalar evolution and parameter detection to allow more flexible
6072 VEC (sd_region, heap) *tmp_scops = VEC_alloc (sd_region, heap, 3);
6077 for (i = 0; VEC_iterate (scop_p, current_scops, i, scop); i++)
6081 build_scop_bbs (scop);
6083 if (!build_scop_loop_nests (scop))
6086 for (j = 0; VEC_iterate (loop_p, SCOP_LOOP_NEST (scop), j, loop); j++)
6087 if (!loop_in_sese_p (loop_outer (loop), SCOP_REGION (scop)))
6089 sd_region open_scop;
6090 open_scop.entry = loop->header;
6091 open_scop.exit = single_exit (loop)->dest;
6092 VEC_safe_push (sd_region, heap, tmp_scops, &open_scop);
6096 free_scops (current_scops);
6097 current_scops = VEC_alloc (scop_p, heap, 3);
6099 create_sese_edges (tmp_scops);
6100 build_graphite_scops (tmp_scops);
6101 VEC_free (sd_region, heap, tmp_scops);
6104 /* Perform a set of linear transforms on the loops of the current
6108 graphite_transform_loops (void)
6112 bool transform_done = false;
6114 if (number_of_loops () <= 1)
6117 current_scops = VEC_alloc (scop_p, heap, 3);
6118 recompute_all_dominators ();
6120 if (dump_file && (dump_flags & TDF_DETAILS))
6121 fprintf (dump_file, "Graphite loop transformations \n");
6123 initialize_original_copy_tables ();
6124 cloog_initialize ();
6128 if (dump_file && (dump_flags & TDF_DETAILS))
6129 fprintf (dump_file, "\nnumber of SCoPs: %d\n",
6130 VEC_length (scop_p, current_scops));
6132 for (i = 0; VEC_iterate (scop_p, current_scops, i, scop); i++)
6134 build_scop_bbs (scop);
6135 if (!build_scop_loop_nests (scop))
6138 build_bb_loops (scop);
6140 if (!build_scop_conditions (scop))
6143 find_scop_parameters (scop);
6144 build_scop_context (scop);
6146 if (dump_file && (dump_flags & TDF_DETAILS))
6148 fprintf (dump_file, "\n(In SCoP %d:\n", i);
6149 fprintf (dump_file, "\nnumber of bbs: %d\n",
6150 VEC_length (graphite_bb_p, SCOP_BBS (scop)));
6151 fprintf (dump_file, "\nnumber of loops: %d)\n",
6152 VEC_length (loop_p, SCOP_LOOP_NEST (scop)));
6155 if (!build_scop_iteration_domain (scop))
6158 add_conditions_to_constraints (scop);
6159 build_scop_canonical_schedules (scop);
6161 build_scop_data_accesses (scop);
6162 build_scop_dynamic_schedules (scop);
6164 if (dump_file && (dump_flags & TDF_DETAILS))
6166 int nbrefs = nb_data_refs_in_scop (scop);
6167 fprintf (dump_file, "\nnumber of data refs: %d\n", nbrefs);
6170 if (graphite_apply_transformations (scop))
6171 transform_done = gloog (scop, find_transform (scop));
6172 #ifdef ENABLE_CHECKING
6175 struct clast_stmt *stmt = find_transform (scop);
6176 cloog_clast_free (stmt);
6183 cleanup_tree_cfg ();
6185 free_scops (current_scops);
6187 free_original_copy_tables ();
6190 #else /* If Cloog is not available: #ifndef HAVE_cloog. */
6193 graphite_transform_loops (void)
6195 sorry ("Graphite loop optimizations cannot be used");