1 /* Gimple Represented as Polyhedra.
2 Copyright (C) 2006, 2007, 2008, 2009 Free Software Foundation, Inc.
3 Contributed by Sebastian Pop <sebastian.pop@inria.fr>.
5 This file is part of GCC.
7 GCC is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License as published by
9 the Free Software Foundation; either version 3, or (at your option)
12 GCC is distributed in the hope that it will be useful,
13 but WITHOUT ANY WARRANTY; without even the implied warranty of
14 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 GNU General Public License for more details.
17 You should have received a copy of the GNU General Public License
18 along with GCC; see the file COPYING3. If not see
19 <http://www.gnu.org/licenses/>. */
21 /* This pass converts GIMPLE to GRAPHITE, performs some loop
22 transformations and then converts the resulting representation back
25 An early description of this pass can be found in the GCC Summit'06
26 paper "GRAPHITE: Polyhedral Analyses and Optimizations for GCC".
27 The wiki page http://gcc.gnu.org/wiki/Graphite contains pointers to
30 One important document to read is CLooG's internal manual:
31 http://repo.or.cz/w/cloog-ppl.git?a=blob_plain;f=doc/cloog.texi;hb=HEAD
32 that describes the data structure of loops used in this file, and
33 the functions that are used for transforming the code. */
37 #include "coretypes.h"
42 #include "basic-block.h"
43 #include "diagnostic.h"
44 #include "tree-flow.h"
46 #include "tree-dump.h"
49 #include "tree-chrec.h"
50 #include "tree-data-ref.h"
51 #include "tree-scalar-evolution.h"
52 #include "tree-pass.h"
53 #include "value-prof.h"
54 #include "pointer-set.h"
59 /* The CLooG header file is not -Wc++-compat ready as of 2009-05-11.
60 This #pragma should be removed when it is ready. */
61 #if GCC_VERSION >= 4003
62 #pragma GCC diagnostic warning "-Wc++-compat"
65 #include "cloog/cloog.h"
68 static VEC (scop_p, heap) *current_scops;
70 /* Converts a GMP constant V to a tree and returns it. */
73 gmp_cst_to_tree (tree type, Value v)
75 return build_int_cst (type, value_get_si (v));
78 /* Returns true when BB is in REGION. */
81 bb_in_sese_p (basic_block bb, sese region)
83 return pointer_set_contains (SESE_REGION_BBS (region), bb);
86 /* Returns true when LOOP is in the SESE region R. */
89 loop_in_sese_p (struct loop *loop, sese r)
91 return (bb_in_sese_p (loop->header, r)
92 && bb_in_sese_p (loop->latch, r));
95 /* For a USE in BB, if BB is outside REGION, mark the USE in the
96 SESE_LIVEIN and SESE_LIVEOUT sets. */
99 sese_build_livein_liveouts_use (sese region, basic_block bb, tree use)
104 if (TREE_CODE (use) != SSA_NAME)
107 ver = SSA_NAME_VERSION (use);
108 def_bb = gimple_bb (SSA_NAME_DEF_STMT (use));
110 || !bb_in_sese_p (def_bb, region)
111 || bb_in_sese_p (bb, region))
114 if (!SESE_LIVEIN_VER (region, ver))
115 SESE_LIVEIN_VER (region, ver) = BITMAP_ALLOC (NULL);
117 bitmap_set_bit (SESE_LIVEIN_VER (region, ver), bb->index);
118 bitmap_set_bit (SESE_LIVEOUT (region), ver);
121 /* Marks for rewrite all the SSA_NAMES defined in REGION and that are
122 used in BB that is outside of the REGION. */
125 sese_build_livein_liveouts_bb (sese region, basic_block bb)
127 gimple_stmt_iterator bsi;
133 FOR_EACH_EDGE (e, ei, bb->succs)
134 for (bsi = gsi_start_phis (e->dest); !gsi_end_p (bsi); gsi_next (&bsi))
135 sese_build_livein_liveouts_use (region, bb,
136 PHI_ARG_DEF_FROM_EDGE (gsi_stmt (bsi), e));
138 for (bsi = gsi_start_bb (bb); !gsi_end_p (bsi); gsi_next (&bsi))
139 FOR_EACH_SSA_TREE_OPERAND (var, gsi_stmt (bsi), iter, SSA_OP_ALL_USES)
140 sese_build_livein_liveouts_use (region, bb, var);
143 /* Build the SESE_LIVEIN and SESE_LIVEOUT for REGION. */
146 sese_build_livein_liveouts (sese region)
150 SESE_LIVEOUT (region) = BITMAP_ALLOC (NULL);
151 SESE_NUM_VER (region) = num_ssa_names;
152 SESE_LIVEIN (region) = XCNEWVEC (bitmap, SESE_NUM_VER (region));
155 sese_build_livein_liveouts_bb (region, bb);
158 /* Register basic blocks belonging to a region in a pointer set. */
161 register_bb_in_sese (basic_block entry_bb, basic_block exit_bb, sese region)
165 basic_block bb = entry_bb;
167 FOR_EACH_EDGE (e, ei, bb->succs)
169 if (!pointer_set_contains (SESE_REGION_BBS (region), e->dest) &&
170 e->dest->index != exit_bb->index)
172 pointer_set_insert (SESE_REGION_BBS (region), e->dest);
173 register_bb_in_sese (e->dest, exit_bb, region);
178 /* Builds a new SESE region from edges ENTRY and EXIT. */
181 new_sese (edge entry, edge exit)
183 sese res = XNEW (struct sese_d);
185 SESE_ENTRY (res) = entry;
186 SESE_EXIT (res) = exit;
187 SESE_REGION_BBS (res) = pointer_set_create ();
188 register_bb_in_sese (entry->dest, exit->dest, res);
190 SESE_LIVEOUT (res) = NULL;
191 SESE_NUM_VER (res) = 0;
192 SESE_LIVEIN (res) = NULL;
197 /* Deletes REGION. */
200 free_sese (sese region)
204 for (i = 0; i < SESE_NUM_VER (region); i++)
205 BITMAP_FREE (SESE_LIVEIN_VER (region, i));
207 if (SESE_LIVEIN (region))
208 free (SESE_LIVEIN (region));
210 if (SESE_LIVEOUT (region))
211 BITMAP_FREE (SESE_LIVEOUT (region));
213 pointer_set_destroy (SESE_REGION_BBS (region));
219 /* Debug the list of old induction variables for this SCOP. */
222 debug_oldivs (scop_p scop)
227 fprintf (stderr, "Old IVs:");
229 for (i = 0; VEC_iterate (name_tree, SCOP_OLDIVS (scop), i, oldiv); i++)
231 fprintf (stderr, "(");
232 print_generic_expr (stderr, oldiv->t, 0);
233 fprintf (stderr, ", %s, %d)\n", oldiv->name, oldiv->loop->num);
235 fprintf (stderr, "\n");
238 /* Debug the loops around basic block GB. */
241 debug_loop_vec (graphite_bb_p gb)
246 fprintf (stderr, "Loop Vec:");
248 for (i = 0; VEC_iterate (loop_p, GBB_LOOPS (gb), i, loop); i++)
249 fprintf (stderr, "%d: %d, ", i, loop ? loop->num : -1);
251 fprintf (stderr, "\n");
254 /* Returns true if stack ENTRY is a constant. */
257 iv_stack_entry_is_constant (iv_stack_entry *entry)
259 return entry->kind == iv_stack_entry_const;
262 /* Returns true if stack ENTRY is an induction variable. */
265 iv_stack_entry_is_iv (iv_stack_entry *entry)
267 return entry->kind == iv_stack_entry_iv;
270 /* Push (IV, NAME) on STACK. */
273 loop_iv_stack_push_iv (loop_iv_stack stack, tree iv, const char *name)
275 iv_stack_entry *entry = XNEW (iv_stack_entry);
276 name_tree named_iv = XNEW (struct name_tree_d);
279 named_iv->name = name;
281 entry->kind = iv_stack_entry_iv;
282 entry->data.iv = named_iv;
284 VEC_safe_push (iv_stack_entry_p, heap, *stack, entry);
287 /* Inserts a CONSTANT in STACK at INDEX. */
290 loop_iv_stack_insert_constant (loop_iv_stack stack, int index,
293 iv_stack_entry *entry = XNEW (iv_stack_entry);
295 entry->kind = iv_stack_entry_const;
296 entry->data.constant = constant;
298 VEC_safe_insert (iv_stack_entry_p, heap, *stack, index, entry);
301 /* Pops and frees an element out of STACK. */
304 loop_iv_stack_pop (loop_iv_stack stack)
306 iv_stack_entry_p entry = VEC_pop (iv_stack_entry_p, *stack);
308 free (entry->data.iv);
312 /* Get the IV at INDEX in STACK. */
315 loop_iv_stack_get_iv (loop_iv_stack stack, int index)
317 iv_stack_entry_p entry = VEC_index (iv_stack_entry_p, *stack, index);
318 iv_stack_entry_data data = entry->data;
320 return iv_stack_entry_is_iv (entry) ? data.iv->t : data.constant;
323 /* Get the IV from its NAME in STACK. */
326 loop_iv_stack_get_iv_from_name (loop_iv_stack stack, const char* name)
329 iv_stack_entry_p entry;
331 for (i = 0; VEC_iterate (iv_stack_entry_p, *stack, i, entry); i++)
333 name_tree iv = entry->data.iv;
334 if (!strcmp (name, iv->name))
341 /* Prints on stderr the contents of STACK. */
344 debug_loop_iv_stack (loop_iv_stack stack)
347 iv_stack_entry_p entry;
350 fprintf (stderr, "(");
352 for (i = 0; VEC_iterate (iv_stack_entry_p, *stack, i, entry); i++)
357 fprintf (stderr, " ");
359 if (iv_stack_entry_is_iv (entry))
361 name_tree iv = entry->data.iv;
362 fprintf (stderr, "%s:", iv->name);
363 print_generic_expr (stderr, iv->t, 0);
367 tree constant = entry->data.constant;
368 print_generic_expr (stderr, constant, 0);
369 fprintf (stderr, ":");
370 print_generic_expr (stderr, constant, 0);
374 fprintf (stderr, ")\n");
380 free_loop_iv_stack (loop_iv_stack stack)
383 iv_stack_entry_p entry;
385 for (i = 0; VEC_iterate (iv_stack_entry_p, *stack, i, entry); i++)
387 free (entry->data.iv);
391 VEC_free (iv_stack_entry_p, heap, *stack);
396 /* Structure containing the mapping between the CLooG's induction
397 variable and the type of the old induction variable. */
398 typedef struct ivtype_map_elt_d
401 const char *cloog_iv;
404 /* Print to stderr the element ELT. */
407 debug_ivtype_elt (ivtype_map_elt elt)
409 fprintf (stderr, "(%s, ", elt->cloog_iv);
410 print_generic_expr (stderr, elt->type, 0);
411 fprintf (stderr, ")\n");
414 /* Helper function for debug_ivtype_map. */
417 debug_ivtype_map_1 (void **slot, void *s ATTRIBUTE_UNUSED)
419 struct ivtype_map_elt_d *entry = (struct ivtype_map_elt_d *) *slot;
420 debug_ivtype_elt (entry);
424 /* Print to stderr all the elements of MAP. */
427 debug_ivtype_map (htab_t map)
429 htab_traverse (map, debug_ivtype_map_1, NULL);
432 /* Constructs a new SCEV_INFO_STR structure for VAR and INSTANTIATED_BELOW. */
434 static inline ivtype_map_elt
435 new_ivtype_map_elt (const char *cloog_iv, tree type)
439 res = XNEW (struct ivtype_map_elt_d);
440 res->cloog_iv = cloog_iv;
446 /* Computes a hash function for database element ELT. */
449 ivtype_map_elt_info (const void *elt)
451 return htab_hash_pointer (((const struct ivtype_map_elt_d *) elt)->cloog_iv);
454 /* Compares database elements E1 and E2. */
457 eq_ivtype_map_elts (const void *e1, const void *e2)
459 const struct ivtype_map_elt_d *elt1 = (const struct ivtype_map_elt_d *) e1;
460 const struct ivtype_map_elt_d *elt2 = (const struct ivtype_map_elt_d *) e2;
462 return (elt1->cloog_iv == elt2->cloog_iv);
467 /* Given a CLOOG_IV, returns the type that it should have in GCC land.
468 If the information is not available, i.e. in the case one of the
469 transforms created the loop, just return integer_type_node. */
472 gcc_type_for_cloog_iv (const char *cloog_iv, graphite_bb_p gbb)
474 struct ivtype_map_elt_d tmp;
477 tmp.cloog_iv = cloog_iv;
478 slot = htab_find_slot (GBB_CLOOG_IV_TYPES (gbb), &tmp, NO_INSERT);
481 return ((ivtype_map_elt) *slot)->type;
483 return integer_type_node;
486 /* Inserts constants derived from the USER_STMT argument list into the
487 STACK. This is needed to map old ivs to constants when loops have
491 loop_iv_stack_patch_for_consts (loop_iv_stack stack,
492 struct clast_user_stmt *user_stmt)
494 struct clast_stmt *t;
496 CloogStatement *cs = user_stmt->statement;
497 graphite_bb_p gbb = (graphite_bb_p) cloog_statement_usr (cs);
499 for (t = user_stmt->substitutions; t; t = t->next)
501 struct clast_expr *expr = (struct clast_expr *)
502 ((struct clast_assignment *)t)->RHS;
503 struct clast_term *term = (struct clast_term *) expr;
505 /* FIXME: What should be done with expr_bin, expr_red? */
506 if (expr->type == expr_term
509 loop_p loop = gbb_loop_at_index (gbb, index);
510 tree oldiv = oldiv_for_loop (GBB_SCOP (gbb), loop);
511 tree type = oldiv ? TREE_TYPE (oldiv) : integer_type_node;
512 tree value = gmp_cst_to_tree (type, term->val);
513 loop_iv_stack_insert_constant (stack, index, value);
519 /* Removes all constants in the iv STACK. */
522 loop_iv_stack_remove_constants (loop_iv_stack stack)
525 iv_stack_entry *entry;
527 for (i = 0; VEC_iterate (iv_stack_entry_p, *stack, i, entry);)
529 if (iv_stack_entry_is_constant (entry))
531 free (VEC_index (iv_stack_entry_p, *stack, i));
532 VEC_ordered_remove (iv_stack_entry_p, *stack, i);
539 /* Returns a new loop_to_cloog_loop_str structure. */
541 static inline struct loop_to_cloog_loop_str *
542 new_loop_to_cloog_loop_str (int loop_num,
544 CloogLoop *cloog_loop)
546 struct loop_to_cloog_loop_str *result;
548 result = XNEW (struct loop_to_cloog_loop_str);
549 result->loop_num = loop_num;
550 result->cloog_loop = cloog_loop;
551 result->loop_position = loop_position;
556 /* Hash function for SCOP_LOOP2CLOOG_LOOP hash table. */
559 hash_loop_to_cloog_loop (const void *elt)
561 return ((const struct loop_to_cloog_loop_str *) elt)->loop_num;
564 /* Equality function for SCOP_LOOP2CLOOG_LOOP hash table. */
567 eq_loop_to_cloog_loop (const void *el1, const void *el2)
569 const struct loop_to_cloog_loop_str *elt1, *elt2;
571 elt1 = (const struct loop_to_cloog_loop_str *) el1;
572 elt2 = (const struct loop_to_cloog_loop_str *) el2;
573 return elt1->loop_num == elt2->loop_num;
576 /* Compares two graphite bbs and returns an integer less than, equal to, or
577 greater than zero if the first argument is considered to be respectively
578 less than, equal to, or greater than the second.
579 We compare using the lexicographic order of the static schedules. */
582 gbb_compare (const void *p_1, const void *p_2)
584 const struct graphite_bb *const gbb_1
585 = *(const struct graphite_bb *const*) p_1;
586 const struct graphite_bb *const gbb_2
587 = *(const struct graphite_bb *const*) p_2;
589 return lambda_vector_compare (GBB_STATIC_SCHEDULE (gbb_1),
590 gbb_nb_loops (gbb_1) + 1,
591 GBB_STATIC_SCHEDULE (gbb_2),
592 gbb_nb_loops (gbb_2) + 1);
595 /* Sort graphite bbs in SCOP. */
598 graphite_sort_gbbs (scop_p scop)
600 VEC (graphite_bb_p, heap) *bbs = SCOP_BBS (scop);
602 qsort (VEC_address (graphite_bb_p, bbs),
603 VEC_length (graphite_bb_p, bbs),
604 sizeof (graphite_bb_p), gbb_compare);
607 /* Dump conditions of a graphite basic block GBB on FILE. */
610 dump_gbb_conditions (FILE *file, graphite_bb_p gbb)
614 VEC (gimple, heap) *conditions = GBB_CONDITIONS (gbb);
616 if (VEC_empty (gimple, conditions))
619 fprintf (file, "\tbb %d\t: cond = {", GBB_BB (gbb)->index);
621 for (i = 0; VEC_iterate (gimple, conditions, i, stmt); i++)
622 print_gimple_stmt (file, stmt, 0, 0);
624 fprintf (file, "}\n");
627 /* Converts the graphite scheduling function into a cloog scattering
628 matrix. This scattering matrix is used to limit the possible cloog
629 output to valid programs in respect to the scheduling function.
631 SCATTERING_DIMENSIONS specifies the dimensionality of the scattering
632 matrix. CLooG 0.14.0 and previous versions require, that all scattering
633 functions of one CloogProgram have the same dimensionality, therefore we
634 allow to specify it. (Should be removed in future versions) */
637 schedule_to_scattering (graphite_bb_p gb, int scattering_dimensions)
640 scop_p scop = GBB_SCOP (gb);
642 int nb_iterators = gbb_nb_loops (gb);
644 /* The cloog scattering matrix consists of these colums:
646 scattering_dimensions cols = Scattering dimensions,
647 nb_iterators cols = bb's iterators,
648 scop_nb_params cols = Parameters,
653 scattering_dimensions = 5
663 s1 s2 s3 s4 s5 i p1 p2 1
664 1 0 0 0 0 0 0 0 -4 = 0
665 0 1 0 0 0 -1 0 0 0 = 0
666 0 0 1 0 0 0 0 0 -5 = 0 */
667 int nb_params = scop_nb_params (scop);
668 int nb_cols = 1 + scattering_dimensions + nb_iterators + nb_params + 1;
669 int col_const = nb_cols - 1;
670 int col_iter_offset = 1 + scattering_dimensions;
672 CloogMatrix *scat = cloog_matrix_alloc (scattering_dimensions, nb_cols);
674 gcc_assert (scattering_dimensions >= nb_iterators * 2 + 1);
676 /* Initialize the identity matrix. */
677 for (i = 0; i < scattering_dimensions; i++)
678 value_set_si (scat->p[i][i + 1], 1);
680 /* Textual order outside the first loop */
681 value_set_si (scat->p[0][col_const], -GBB_STATIC_SCHEDULE (gb)[0]);
683 /* For all surrounding loops. */
684 for (i = 0; i < nb_iterators; i++)
686 int schedule = GBB_STATIC_SCHEDULE (gb)[i + 1];
688 /* Iterations of this loop. */
689 value_set_si (scat->p[2 * i + 1][col_iter_offset + i], -1);
691 /* Textual order inside this loop. */
692 value_set_si (scat->p[2 * i + 2][col_const], -schedule);
698 /* Print the schedules of GB to FILE with INDENT white spaces before.
699 VERBOSITY determines how verbose the code pretty printers are. */
702 print_graphite_bb (FILE *file, graphite_bb_p gb, int indent, int verbosity)
704 CloogMatrix *scattering;
707 fprintf (file, "\nGBB (\n");
709 print_loops_bb (file, GBB_BB (gb), indent+2, verbosity);
713 fprintf (file, " (domain: \n");
714 cloog_matrix_print (file, GBB_DOMAIN (gb));
715 fprintf (file, " )\n");
718 if (GBB_STATIC_SCHEDULE (gb))
720 fprintf (file, " (static schedule: ");
721 print_lambda_vector (file, GBB_STATIC_SCHEDULE (gb),
722 gbb_nb_loops (gb) + 1);
723 fprintf (file, " )\n");
728 fprintf (file, " (contained loops: \n");
729 for (i = 0; VEC_iterate (loop_p, GBB_LOOPS (gb), i, loop); i++)
731 fprintf (file, " iterator %d => NULL \n", i);
733 fprintf (file, " iterator %d => loop %d \n", i,
735 fprintf (file, " )\n");
738 if (GBB_DATA_REFS (gb))
739 dump_data_references (file, GBB_DATA_REFS (gb));
741 if (GBB_CONDITIONS (gb))
743 fprintf (file, " (conditions: \n");
744 dump_gbb_conditions (file, gb);
745 fprintf (file, " )\n");
749 && GBB_STATIC_SCHEDULE (gb))
751 fprintf (file, " (scattering: \n");
752 scattering = schedule_to_scattering (gb, 2 * gbb_nb_loops (gb) + 1);
753 cloog_matrix_print (file, scattering);
754 cloog_matrix_free (scattering);
755 fprintf (file, " )\n");
758 fprintf (file, ")\n");
761 /* Print to STDERR the schedules of GB with VERBOSITY level. */
764 debug_gbb (graphite_bb_p gb, int verbosity)
766 print_graphite_bb (stderr, gb, 0, verbosity);
770 /* Print SCOP to FILE. VERBOSITY determines how verbose the pretty
774 print_scop (FILE *file, scop_p scop, int verbosity)
779 fprintf (file, "\nSCoP_%d_%d (\n",
780 SCOP_ENTRY (scop)->index, SCOP_EXIT (scop)->index);
782 fprintf (file, " (cloog: \n");
783 cloog_program_print (file, SCOP_PROG (scop));
784 fprintf (file, " )\n");
791 for (i = 0; VEC_iterate (graphite_bb_p, SCOP_BBS (scop), i, gb); i++)
792 print_graphite_bb (file, gb, 0, verbosity);
795 fprintf (file, ")\n");
798 /* Print all the SCOPs to FILE. VERBOSITY determines how verbose the
799 code pretty printers are. */
802 print_scops (FILE *file, int verbosity)
807 for (i = 0; VEC_iterate (scop_p, current_scops, i, scop); i++)
808 print_scop (file, scop, verbosity);
811 /* Debug SCOP. VERBOSITY determines how verbose the code pretty
815 debug_scop (scop_p scop, int verbosity)
817 print_scop (stderr, scop, verbosity);
820 /* Debug all SCOPs from CURRENT_SCOPS. VERBOSITY determines how
821 verbose the code pretty printers are. */
824 debug_scops (int verbosity)
826 print_scops (stderr, verbosity);
829 /* Pretty print to FILE the SCOP in DOT format. */
832 dot_scop_1 (FILE *file, scop_p scop)
837 basic_block entry = SCOP_ENTRY (scop);
838 basic_block exit = SCOP_EXIT (scop);
840 fprintf (file, "digraph SCoP_%d_%d {\n", entry->index,
846 fprintf (file, "%d [shape=triangle];\n", bb->index);
849 fprintf (file, "%d [shape=box];\n", bb->index);
851 if (bb_in_sese_p (bb, SCOP_REGION (scop)))
852 fprintf (file, "%d [color=red];\n", bb->index);
854 FOR_EACH_EDGE (e, ei, bb->succs)
855 fprintf (file, "%d -> %d;\n", bb->index, e->dest->index);
858 fputs ("}\n\n", file);
861 /* Display SCOP using dotty. */
864 dot_scop (scop_p scop)
866 dot_scop_1 (stderr, scop);
869 /* Pretty print all SCoPs in DOT format and mark them with different colors.
870 If there are not enough colors, paint later SCoPs gray.
872 - "*" after the node number: entry of a SCoP,
873 - "#" after the node number: exit of a SCoP,
874 - "()" entry or exit not part of SCoP. */
877 dot_all_scops_1 (FILE *file)
886 /* Disable debugging while printing graph. */
887 int tmp_dump_flags = dump_flags;
890 fprintf (file, "digraph all {\n");
894 int part_of_scop = false;
896 /* Use HTML for every bb label. So we are able to print bbs
897 which are part of two different SCoPs, with two different
898 background colors. */
899 fprintf (file, "%d [label=<\n <TABLE BORDER=\"0\" CELLBORDER=\"1\" ",
901 fprintf (file, "CELLSPACING=\"0\">\n");
903 /* Select color for SCoP. */
904 for (i = 0; VEC_iterate (scop_p, current_scops, i, scop); i++)
905 if (bb_in_sese_p (bb, SCOP_REGION (scop))
906 || (SCOP_EXIT (scop) == bb)
907 || (SCOP_ENTRY (scop) == bb))
966 fprintf (file, " <TR><TD WIDTH=\"50\" BGCOLOR=\"%s\">", color);
968 if (!bb_in_sese_p (bb, SCOP_REGION (scop)))
969 fprintf (file, " (");
971 if (bb == SCOP_ENTRY (scop)
972 && bb == SCOP_EXIT (scop))
973 fprintf (file, " %d*# ", bb->index);
974 else if (bb == SCOP_ENTRY (scop))
975 fprintf (file, " %d* ", bb->index);
976 else if (bb == SCOP_EXIT (scop))
977 fprintf (file, " %d# ", bb->index);
979 fprintf (file, " %d ", bb->index);
981 if (!bb_in_sese_p (bb, SCOP_REGION (scop)))
984 fprintf (file, "</TD></TR>\n");
990 fprintf (file, " <TR><TD WIDTH=\"50\" BGCOLOR=\"#ffffff\">");
991 fprintf (file, " %d </TD></TR>\n", bb->index);
994 fprintf (file, " </TABLE>>, shape=box, style=\"setlinewidth(0)\"]\n");
999 FOR_EACH_EDGE (e, ei, bb->succs)
1000 fprintf (file, "%d -> %d;\n", bb->index, e->dest->index);
1003 fputs ("}\n\n", file);
1005 /* Enable debugging again. */
1006 dump_flags = tmp_dump_flags;
1009 /* Display all SCoPs using dotty. */
1012 dot_all_scops (void)
1014 /* When debugging, enable the following code. This cannot be used
1015 in production compilers because it calls "system". */
1017 FILE *stream = fopen ("/tmp/allscops.dot", "w");
1018 gcc_assert (stream);
1020 dot_all_scops_1 (stream);
1023 system ("dotty /tmp/allscops.dot");
1025 dot_all_scops_1 (stderr);
1029 /* Returns the outermost loop in SCOP that contains BB. */
1031 static struct loop *
1032 outermost_loop_in_scop (scop_p scop, basic_block bb)
1036 nest = bb->loop_father;
1037 while (loop_outer (nest)
1038 && loop_in_sese_p (loop_outer (nest), SCOP_REGION (scop)))
1039 nest = loop_outer (nest);
1044 /* Returns the block preceding the entry of SCOP. */
1047 block_before_scop (scop_p scop)
1049 return SESE_ENTRY (SCOP_REGION (scop))->src;
1052 /* Return true when EXPR is an affine function in LOOP with parameters
1053 instantiated relative to SCOP_ENTRY. */
1056 loop_affine_expr (basic_block scop_entry, struct loop *loop, tree expr)
1059 tree scev = analyze_scalar_evolution (loop, expr);
1061 scev = instantiate_scev (scop_entry, loop, scev);
1063 return (evolution_function_is_invariant_p (scev, n)
1064 || evolution_function_is_affine_multivariate_p (scev, n));
1067 /* Return true if REF or any of its subtrees contains a
1071 contains_component_ref_p (tree ref)
1076 while (handled_component_p (ref))
1078 if (TREE_CODE (ref) == COMPONENT_REF)
1081 ref = TREE_OPERAND (ref, 0);
1087 /* Return true if the operand OP is simple. */
1090 is_simple_operand (loop_p loop, gimple stmt, tree op)
1092 /* It is not a simple operand when it is a declaration, */
1094 /* or a structure, */
1095 || AGGREGATE_TYPE_P (TREE_TYPE (op))
1096 /* or a COMPONENT_REF, */
1097 || contains_component_ref_p (op)
1098 /* or a memory access that cannot be analyzed by the data
1099 reference analysis. */
1100 || ((handled_component_p (op) || INDIRECT_REF_P (op))
1101 && !stmt_simple_memref_p (loop, stmt, op)))
1107 /* Return true only when STMT is simple enough for being handled by
1108 Graphite. This depends on SCOP_ENTRY, as the parametetrs are
1109 initialized relatively to this basic block. */
1112 stmt_simple_for_scop_p (basic_block scop_entry, gimple stmt)
1114 basic_block bb = gimple_bb (stmt);
1115 struct loop *loop = bb->loop_father;
1117 /* GIMPLE_ASM and GIMPLE_CALL may embed arbitrary side effects.
1118 Calls have side-effects, except those to const or pure
1120 if (gimple_has_volatile_ops (stmt)
1121 || (gimple_code (stmt) == GIMPLE_CALL
1122 && !(gimple_call_flags (stmt) & (ECF_CONST | ECF_PURE)))
1123 || (gimple_code (stmt) == GIMPLE_ASM))
1126 switch (gimple_code (stmt))
1135 ssa_op_iter op_iter;
1136 enum tree_code code = gimple_cond_code (stmt);
1138 /* We can only handle this kind of conditional expressions.
1139 For inequalities like "if (i != 3 * k)" we need unions of
1140 polyhedrons. Expressions like "if (a)" or "if (a == 15)" need
1141 them for the else branch. */
1142 if (!(code == LT_EXPR
1145 || code == GE_EXPR))
1151 FOR_EACH_SSA_TREE_OPERAND (op, stmt, op_iter, SSA_OP_ALL_USES)
1152 if (!loop_affine_expr (scop_entry, loop, op))
1160 enum tree_code code = gimple_assign_rhs_code (stmt);
1162 switch (get_gimple_rhs_class (code))
1164 case GIMPLE_UNARY_RHS:
1165 case GIMPLE_SINGLE_RHS:
1166 return (is_simple_operand (loop, stmt, gimple_assign_lhs (stmt))
1167 && is_simple_operand (loop, stmt, gimple_assign_rhs1 (stmt)));
1169 case GIMPLE_BINARY_RHS:
1170 return (is_simple_operand (loop, stmt, gimple_assign_lhs (stmt))
1171 && is_simple_operand (loop, stmt, gimple_assign_rhs1 (stmt))
1172 && is_simple_operand (loop, stmt, gimple_assign_rhs2 (stmt)));
1174 case GIMPLE_INVALID_RHS:
1183 size_t n = gimple_call_num_args (stmt);
1184 tree lhs = gimple_call_lhs (stmt);
1186 if (lhs && !is_simple_operand (loop, stmt, lhs))
1189 for (i = 0; i < n; i++)
1190 if (!is_simple_operand (loop, stmt, gimple_call_arg (stmt, i)))
1197 /* These nodes cut a new scope. */
1204 /* Returns the statement of BB that contains a harmful operation: that
1205 can be a function call with side effects, the induction variables
1206 are not linear with respect to SCOP_ENTRY, etc. The current open
1207 scop should end before this statement. */
1210 harmful_stmt_in_bb (basic_block scop_entry, basic_block bb)
1212 gimple_stmt_iterator gsi;
1215 for (gsi = gsi_start_bb (bb); !gsi_end_p (gsi); gsi_next (&gsi))
1216 if (!stmt_simple_for_scop_p (scop_entry, gsi_stmt (gsi)))
1217 return gsi_stmt (gsi);
1219 stmt = last_stmt (bb);
1220 if (stmt && gimple_code (stmt) == GIMPLE_COND)
1222 tree lhs = gimple_cond_lhs (stmt);
1223 tree rhs = gimple_cond_rhs (stmt);
1225 if (TREE_CODE (TREE_TYPE (lhs)) == REAL_TYPE
1226 || TREE_CODE (TREE_TYPE (rhs)) == REAL_TYPE)
1233 /* Returns true when BB will be represented in graphite. Return false
1234 for the basic blocks that contain code eliminated in the code
1235 generation pass: i.e. induction variables and exit conditions. */
1238 graphite_stmt_p (scop_p scop, basic_block bb,
1239 VEC (data_reference_p, heap) *drs)
1241 gimple_stmt_iterator gsi;
1242 loop_p loop = bb->loop_father;
1244 if (VEC_length (data_reference_p, drs) > 0)
1247 for (gsi = gsi_start_bb (bb); !gsi_end_p (gsi); gsi_next (&gsi))
1249 gimple stmt = gsi_stmt (gsi);
1251 switch (gimple_code (stmt))
1253 /* Control flow expressions can be ignored, as they are
1254 represented in the iteration domains and will be
1255 regenerated by graphite. */
1263 tree var = gimple_assign_lhs (stmt);
1264 var = analyze_scalar_evolution (loop, var);
1265 var = instantiate_scev (block_before_scop (scop), loop, var);
1267 if (chrec_contains_undetermined (var))
1281 /* Store the GRAPHITE representation of BB. */
1284 new_graphite_bb (scop_p scop, basic_block bb)
1286 struct graphite_bb *gbb;
1287 VEC (data_reference_p, heap) *drs = VEC_alloc (data_reference_p, heap, 5);
1288 struct loop *nest = outermost_loop_in_scop (scop, bb);
1289 gimple_stmt_iterator gsi;
1291 for (gsi = gsi_start_bb (bb); !gsi_end_p (gsi); gsi_next (&gsi))
1292 find_data_references_in_stmt (nest, gsi_stmt (gsi), &drs);
1294 if (!graphite_stmt_p (scop, bb, drs))
1296 free_data_refs (drs);
1300 gbb = XNEW (struct graphite_bb);
1303 GBB_SCOP (gbb) = scop;
1304 GBB_DATA_REFS (gbb) = drs;
1305 GBB_DOMAIN (gbb) = NULL;
1306 GBB_CONDITIONS (gbb) = NULL;
1307 GBB_CONDITION_CASES (gbb) = NULL;
1308 GBB_LOOPS (gbb) = NULL;
1309 GBB_STATIC_SCHEDULE (gbb) = NULL;
1310 GBB_CLOOG_IV_TYPES (gbb) = NULL;
1311 VEC_safe_push (graphite_bb_p, heap, SCOP_BBS (scop), gbb);
1317 free_graphite_bb (struct graphite_bb *gbb)
1319 if (GBB_DOMAIN (gbb))
1320 cloog_matrix_free (GBB_DOMAIN (gbb));
1322 if (GBB_CLOOG_IV_TYPES (gbb))
1323 htab_delete (GBB_CLOOG_IV_TYPES (gbb));
1325 /* FIXME: free_data_refs is disabled for the moment, but should be
1328 free_data_refs (GBB_DATA_REFS (gbb)); */
1330 VEC_free (gimple, heap, GBB_CONDITIONS (gbb));
1331 VEC_free (gimple, heap, GBB_CONDITION_CASES (gbb));
1332 VEC_free (loop_p, heap, GBB_LOOPS (gbb));
1333 GBB_BB (gbb)->aux = 0;
1339 /* Structure containing the mapping between the old names and the new
1340 names used after block copy in the new loop context. */
1341 typedef struct rename_map_elt_d
1343 tree old_name, new_name;
1347 /* Print to stderr the element ELT. */
1350 debug_rename_elt (rename_map_elt elt)
1352 fprintf (stderr, "(");
1353 print_generic_expr (stderr, elt->old_name, 0);
1354 fprintf (stderr, ", ");
1355 print_generic_expr (stderr, elt->new_name, 0);
1356 fprintf (stderr, ")\n");
1359 /* Helper function for debug_rename_map. */
1362 debug_rename_map_1 (void **slot, void *s ATTRIBUTE_UNUSED)
1364 struct rename_map_elt_d *entry = (struct rename_map_elt_d *) *slot;
1365 debug_rename_elt (entry);
1369 /* Print to stderr all the elements of MAP. */
1372 debug_rename_map (htab_t map)
1374 htab_traverse (map, debug_rename_map_1, NULL);
1377 /* Constructs a new SCEV_INFO_STR structure for VAR and INSTANTIATED_BELOW. */
1379 static inline rename_map_elt
1380 new_rename_map_elt (tree old_name, tree new_name)
1384 res = XNEW (struct rename_map_elt_d);
1385 res->old_name = old_name;
1386 res->new_name = new_name;
1391 /* Computes a hash function for database element ELT. */
1394 rename_map_elt_info (const void *elt)
1396 return htab_hash_pointer (((const struct rename_map_elt_d *) elt)->old_name);
1399 /* Compares database elements E1 and E2. */
1402 eq_rename_map_elts (const void *e1, const void *e2)
1404 const struct rename_map_elt_d *elt1 = (const struct rename_map_elt_d *) e1;
1405 const struct rename_map_elt_d *elt2 = (const struct rename_map_elt_d *) e2;
1407 return (elt1->old_name == elt2->old_name);
1410 /* Returns the new name associated to OLD_NAME in MAP. */
1413 get_new_name_from_old_name (htab_t map, tree old_name)
1415 struct rename_map_elt_d tmp;
1418 tmp.old_name = old_name;
1419 slot = htab_find_slot (map, &tmp, NO_INSERT);
1422 return ((rename_map_elt) *slot)->new_name;
1429 /* Creates a new scop starting with ENTRY. */
1432 new_scop (edge entry, edge exit)
1434 scop_p scop = XNEW (struct scop);
1436 gcc_assert (entry && exit);
1438 SCOP_REGION (scop) = new_sese (entry, exit);
1439 SCOP_BBS (scop) = VEC_alloc (graphite_bb_p, heap, 3);
1440 SCOP_OLDIVS (scop) = VEC_alloc (name_tree, heap, 3);
1441 SCOP_LOOPS (scop) = BITMAP_ALLOC (NULL);
1442 SCOP_LOOP_NEST (scop) = VEC_alloc (loop_p, heap, 3);
1443 SCOP_ADD_PARAMS (scop) = true;
1444 SCOP_PARAMS (scop) = VEC_alloc (name_tree, heap, 3);
1445 SCOP_PROG (scop) = cloog_program_malloc ();
1446 cloog_program_set_names (SCOP_PROG (scop), cloog_names_malloc ());
1447 SCOP_LOOP2CLOOG_LOOP (scop) = htab_create (10, hash_loop_to_cloog_loop,
1448 eq_loop_to_cloog_loop,
1450 SCOP_LIVEOUT_RENAMES (scop) = htab_create (10, rename_map_elt_info,
1451 eq_rename_map_elts, free);
1458 free_scop (scop_p scop)
1462 struct graphite_bb *gb;
1465 for (i = 0; VEC_iterate (graphite_bb_p, SCOP_BBS (scop), i, gb); i++)
1466 free_graphite_bb (gb);
1468 VEC_free (graphite_bb_p, heap, SCOP_BBS (scop));
1469 BITMAP_FREE (SCOP_LOOPS (scop));
1470 VEC_free (loop_p, heap, SCOP_LOOP_NEST (scop));
1472 for (i = 0; VEC_iterate (name_tree, SCOP_OLDIVS (scop), i, iv); i++)
1474 VEC_free (name_tree, heap, SCOP_OLDIVS (scop));
1476 for (i = 0; VEC_iterate (name_tree, SCOP_PARAMS (scop), i, p); i++)
1479 VEC_free (name_tree, heap, SCOP_PARAMS (scop));
1480 cloog_program_free (SCOP_PROG (scop));
1481 htab_delete (SCOP_LOOP2CLOOG_LOOP (scop));
1482 htab_delete (SCOP_LIVEOUT_RENAMES (scop));
1483 free_sese (SCOP_REGION (scop));
1487 /* Deletes all scops in SCOPS. */
1490 free_scops (VEC (scop_p, heap) *scops)
1495 for (i = 0; VEC_iterate (scop_p, scops, i, scop); i++)
1498 VEC_free (scop_p, heap, scops);
1501 typedef enum gbb_type {
1503 GBB_LOOP_SING_EXIT_HEADER,
1504 GBB_LOOP_MULT_EXIT_HEADER,
1511 /* Detect the type of BB. Loop headers are only marked, if they are
1512 new. This means their loop_father is different to LAST_LOOP.
1513 Otherwise they are treated like any other bb and their type can be
1517 get_bb_type (basic_block bb, struct loop *last_loop)
1519 VEC (basic_block, heap) *dom;
1521 struct loop *loop = bb->loop_father;
1523 /* Check, if we entry into a new loop. */
1524 if (loop != last_loop)
1526 if (single_exit (loop) != NULL)
1527 return GBB_LOOP_SING_EXIT_HEADER;
1528 else if (loop->num != 0)
1529 return GBB_LOOP_MULT_EXIT_HEADER;
1531 return GBB_COND_HEADER;
1534 dom = get_dominated_by (CDI_DOMINATORS, bb);
1535 nb_dom = VEC_length (basic_block, dom);
1536 VEC_free (basic_block, heap, dom);
1541 nb_suc = VEC_length (edge, bb->succs);
1543 if (nb_dom == 1 && nb_suc == 1)
1546 return GBB_COND_HEADER;
1549 /* A SCoP detection region, defined using bbs as borders.
1550 All control flow touching this region, comes in passing basic_block ENTRY and
1551 leaves passing basic_block EXIT. By using bbs instead of edges for the
1552 borders we are able to represent also regions that do not have a single
1554 But as they have a single entry basic_block and a single exit basic_block, we
1555 are able to generate for every sd_region a single entry and exit edge.
1562 / \ This region contains: {3, 4, 5, 6, 7, 8}
1570 typedef struct sd_region_p
1572 /* The entry bb dominates all bbs in the sd_region. It is part of the
1576 /* The exit bb postdominates all bbs in the sd_region, but is not
1577 part of the region. */
1581 DEF_VEC_O(sd_region);
1582 DEF_VEC_ALLOC_O(sd_region, heap);
1585 /* Moves the scops from SOURCE to TARGET and clean up SOURCE. */
1588 move_sd_regions (VEC (sd_region, heap) **source, VEC (sd_region, heap) **target)
1593 for (i = 0; VEC_iterate (sd_region, *source, i, s); i++)
1594 VEC_safe_push (sd_region, heap, *target, s);
1596 VEC_free (sd_region, heap, *source);
1599 /* Return true when it is not possible to represent the upper bound of
1600 LOOP in the polyhedral representation. */
1603 graphite_cannot_represent_loop_niter (loop_p loop)
1605 tree niter = number_of_latch_executions (loop);
1607 return chrec_contains_undetermined (niter)
1608 || !scev_is_linear_expression (niter);
1610 /* Store information needed by scopdet_* functions. */
1614 /* Where the last open scop would stop if the current BB is harmful. */
1617 /* Where the next scop would start if the current BB is harmful. */
1620 /* The bb or one of its children contains open loop exits. That means
1621 loop exit nodes that are not surrounded by a loop dominated by bb. */
1624 /* The bb or one of its children contains only structures we can handle. */
1629 static struct scopdet_info build_scops_1 (basic_block, VEC (sd_region, heap) **,
1632 /* Calculates BB infos. If bb is difficult we add valid SCoPs dominated by BB
1633 to SCOPS. TYPE is the gbb_type of BB. */
1635 static struct scopdet_info
1636 scopdet_basic_block_info (basic_block bb, VEC (sd_region, heap) **scops,
1639 struct loop *loop = bb->loop_father;
1640 struct scopdet_info result;
1643 /* XXX: ENTRY_BLOCK_PTR could be optimized in later steps. */
1644 stmt = harmful_stmt_in_bb (ENTRY_BLOCK_PTR, bb);
1645 result.difficult = (stmt != NULL);
1652 result.exits = false;
1655 /* Mark bbs terminating a SESE region difficult, if they start
1657 if (VEC_length (edge, bb->succs) > 1)
1658 result.difficult = true;
1663 result.next = single_succ (bb);
1664 result.exits = false;
1668 case GBB_LOOP_SING_EXIT_HEADER:
1670 VEC (sd_region, heap) *tmp_scops = VEC_alloc (sd_region, heap,3);
1671 struct scopdet_info sinfo;
1673 sinfo = build_scops_1 (bb, &tmp_scops, loop);
1675 result.last = single_exit (bb->loop_father)->src;
1676 result.next = single_exit (bb->loop_father)->dest;
1678 /* If we do not dominate result.next, remove it. It's either
1679 the EXIT_BLOCK_PTR, or another bb dominates it and will
1680 call the scop detection for this bb. */
1681 if (!dominated_by_p (CDI_DOMINATORS, result.next, bb))
1684 if (result.last->loop_father != loop)
1687 if (graphite_cannot_represent_loop_niter (loop))
1688 result.difficult = true;
1690 if (sinfo.difficult)
1691 move_sd_regions (&tmp_scops, scops);
1693 VEC_free (sd_region, heap, tmp_scops);
1695 result.exits = false;
1696 result.difficult |= sinfo.difficult;
1700 case GBB_LOOP_MULT_EXIT_HEADER:
1702 /* XXX: For now we just do not join loops with multiple exits. If the
1703 exits lead to the same bb it may be possible to join the loop. */
1704 VEC (sd_region, heap) *tmp_scops = VEC_alloc (sd_region, heap, 3);
1705 VEC (edge, heap) *exits = get_loop_exit_edges (loop);
1708 build_scops_1 (bb, &tmp_scops, loop);
1710 /* Scan the code dominated by this loop. This means all bbs, that are
1711 are dominated by a bb in this loop, but are not part of this loop.
1714 - The loop exit destination is dominated by the exit sources.
1716 TODO: We miss here the more complex cases:
1717 - The exit destinations are dominated by another bb inside the
1719 - The loop dominates bbs, that are not exit destinations. */
1720 for (i = 0; VEC_iterate (edge, exits, i, e); i++)
1721 if (e->src->loop_father == loop
1722 && dominated_by_p (CDI_DOMINATORS, e->dest, e->src))
1724 /* Pass loop_outer to recognize e->dest as loop header in
1726 if (e->dest->loop_father->header == e->dest)
1727 build_scops_1 (e->dest, &tmp_scops,
1728 loop_outer (e->dest->loop_father));
1730 build_scops_1 (e->dest, &tmp_scops, e->dest->loop_father);
1735 result.difficult = true;
1736 result.exits = false;
1737 move_sd_regions (&tmp_scops, scops);
1738 VEC_free (edge, heap, exits);
1741 case GBB_COND_HEADER:
1743 VEC (sd_region, heap) *tmp_scops = VEC_alloc (sd_region, heap, 3);
1744 struct scopdet_info sinfo;
1745 VEC (basic_block, heap) *dominated;
1748 basic_block last_bb = NULL;
1750 result.exits = false;
1752 /* First check the successors of BB, and check if it is possible to join
1753 the different branches. */
1754 for (i = 0; VEC_iterate (edge, bb->succs, i, e); i++)
1756 /* Ignore loop exits. They will be handled after the loop body. */
1757 if (is_loop_exit (loop, e->dest))
1759 result.exits = true;
1763 /* Do not follow edges that lead to the end of the
1764 conditions block. For example, in
1774 the edge from 0 => 6. Only check if all paths lead to
1777 if (!single_pred_p (e->dest))
1779 /* Check, if edge leads directly to the end of this
1786 if (e->dest != last_bb)
1787 result.difficult = true;
1792 if (!dominated_by_p (CDI_DOMINATORS, e->dest, bb))
1794 result.difficult = true;
1798 sinfo = build_scops_1 (e->dest, &tmp_scops, loop);
1800 result.exits |= sinfo.exits;
1801 result.last = sinfo.last;
1802 result.difficult |= sinfo.difficult;
1804 /* Checks, if all branches end at the same point.
1805 If that is true, the condition stays joinable.
1806 Have a look at the example above. */
1807 if (sinfo.last && single_succ_p (sinfo.last))
1809 basic_block next_tmp = single_succ (sinfo.last);
1814 if (next_tmp != last_bb)
1815 result.difficult = true;
1818 result.difficult = true;
1821 /* If the condition is joinable. */
1822 if (!result.exits && !result.difficult)
1824 /* Only return a next pointer if we dominate this pointer.
1825 Otherwise it will be handled by the bb dominating it. */
1826 if (dominated_by_p (CDI_DOMINATORS, last_bb, bb) && last_bb != bb)
1827 result.next = last_bb;
1831 VEC_free (sd_region, heap, tmp_scops);
1835 /* Scan remaining bbs dominated by BB. */
1836 dominated = get_dominated_by (CDI_DOMINATORS, bb);
1838 for (i = 0; VEC_iterate (basic_block, dominated, i, dom_bb); i++)
1840 /* Ignore loop exits: they will be handled after the loop body. */
1841 if (loop_depth (find_common_loop (loop, dom_bb->loop_father))
1842 < loop_depth (loop))
1844 result.exits = true;
1848 /* Ignore the bbs processed above. */
1849 if (single_pred_p (dom_bb) && single_pred (dom_bb) == bb)
1852 if (loop_depth (loop) > loop_depth (dom_bb->loop_father))
1853 sinfo = build_scops_1 (dom_bb, &tmp_scops, loop_outer (loop));
1855 sinfo = build_scops_1 (dom_bb, &tmp_scops, loop);
1858 result.exits |= sinfo.exits;
1859 result.difficult = true;
1863 VEC_free (basic_block, heap, dominated);
1866 move_sd_regions (&tmp_scops, scops);
1878 /* Creates the SCoPs and writes entry and exit points for every SCoP. */
1880 static struct scopdet_info
1881 build_scops_1 (basic_block current, VEC (sd_region, heap) **scops, loop_p loop)
1883 bool in_scop = false;
1884 sd_region open_scop;
1885 struct scopdet_info sinfo;
1887 /* Initialize result. */
1888 struct scopdet_info result;
1889 result.exits = false;
1890 result.difficult = false;
1893 open_scop.entry = NULL;
1894 open_scop.exit = NULL;
1897 /* Loop over the dominance tree. If we meet a difficult bb, close
1898 the current SCoP. Loop and condition header start a new layer,
1899 and can only be added if all bbs in deeper layers are simple. */
1900 while (current != NULL)
1902 sinfo = scopdet_basic_block_info (current, scops, get_bb_type (current,
1905 if (!in_scop && !(sinfo.exits || sinfo.difficult))
1907 open_scop.entry = current;
1908 open_scop.exit = NULL;
1911 else if (in_scop && (sinfo.exits || sinfo.difficult))
1913 open_scop.exit = current;
1914 VEC_safe_push (sd_region, heap, *scops, &open_scop);
1918 result.difficult |= sinfo.difficult;
1919 result.exits |= sinfo.exits;
1921 current = sinfo.next;
1924 /* Try to close open_scop, if we are still in an open SCoP. */
1930 for (i = 0; VEC_iterate (edge, sinfo.last->succs, i, e); i++)
1931 if (dominated_by_p (CDI_POST_DOMINATORS, sinfo.last, e->dest))
1932 open_scop.exit = e->dest;
1934 if (!open_scop.exit && open_scop.entry != sinfo.last)
1935 open_scop.exit = sinfo.last;
1938 VEC_safe_push (sd_region, heap, *scops, &open_scop);
1942 result.last = sinfo.last;
1946 /* Checks if a bb is contained in REGION. */
1949 bb_in_sd_region (basic_block bb, sd_region *region)
1951 return dominated_by_p (CDI_DOMINATORS, bb, region->entry)
1952 && !(dominated_by_p (CDI_DOMINATORS, bb, region->exit)
1953 && !dominated_by_p (CDI_DOMINATORS, region->entry,
1957 /* Returns the single entry edge of REGION, if it does not exits NULL. */
1960 find_single_entry_edge (sd_region *region)
1966 FOR_EACH_EDGE (e, ei, region->entry->preds)
1967 if (!bb_in_sd_region (e->src, region))
1982 /* Returns the single exit edge of REGION, if it does not exits NULL. */
1985 find_single_exit_edge (sd_region *region)
1991 FOR_EACH_EDGE (e, ei, region->exit->preds)
1992 if (bb_in_sd_region (e->src, region))
2007 /* Create a single entry edge for REGION. */
2010 create_single_entry_edge (sd_region *region)
2012 if (find_single_entry_edge (region))
2015 /* There are multiple predecessors for bb_3
2028 There are two edges (1->3, 2->3), that point from outside into the region,
2029 and another one (5->3), a loop latch, lead to bb_3.
2037 | |\ (3.0 -> 3.1) = single entry edge
2046 If the loop is part of the SCoP, we have to redirect the loop latches.
2052 | | (3.0 -> 3.1) = entry edge
2061 if (region->entry->loop_father->header != region->entry
2062 || dominated_by_p (CDI_DOMINATORS,
2063 loop_latch_edge (region->entry->loop_father)->src,
2066 edge forwarder = split_block_after_labels (region->entry);
2067 region->entry = forwarder->dest;
2070 /* This case is never executed, as the loop headers seem always to have a
2071 single edge pointing from outside into the loop. */
2074 #ifdef ENABLE_CHECKING
2075 gcc_assert (find_single_entry_edge (region));
2079 /* Check if the sd_region, mentioned in EDGE, has no exit bb. */
2082 sd_region_without_exit (edge e)
2084 sd_region *r = (sd_region *) e->aux;
2087 return r->exit == NULL;
2092 /* Create a single exit edge for REGION. */
2095 create_single_exit_edge (sd_region *region)
2099 edge forwarder = NULL;
2102 if (find_single_exit_edge (region))
2105 /* We create a forwarder bb (5) for all edges leaving this region
2106 (3->5, 4->5). All other edges leading to the same bb, are moved
2107 to a new bb (6). If these edges where part of another region (2->5)
2108 we update the region->exit pointer, of this region.
2110 To identify which edge belongs to which region we depend on the e->aux
2111 pointer in every edge. It points to the region of the edge or to NULL,
2112 if the edge is not part of any region.
2114 1 2 3 4 1->5 no region, 2->5 region->exit = 5,
2115 \| |/ 3->5 region->exit = NULL, 4->5 region->exit = NULL
2120 1 2 3 4 1->6 no region, 2->6 region->exit = 6,
2121 | | \/ 3->5 no region, 4->5 no region,
2123 \| / 5->6 region->exit = 6
2126 Now there is only a single exit edge (5->6). */
2127 exit = region->exit;
2128 region->exit = NULL;
2129 forwarder = make_forwarder_block (exit, &sd_region_without_exit, NULL);
2131 /* Unmark the edges, that are no longer exit edges. */
2132 FOR_EACH_EDGE (e, ei, forwarder->src->preds)
2136 /* Mark the new exit edge. */
2137 single_succ_edge (forwarder->src)->aux = region;
2139 /* Update the exit bb of all regions, where exit edges lead to
2141 FOR_EACH_EDGE (e, ei, forwarder->dest->preds)
2143 ((sd_region *) e->aux)->exit = forwarder->dest;
2145 #ifdef ENABLE_CHECKING
2146 gcc_assert (find_single_exit_edge (region));
2150 /* Unmark the exit edges of all REGIONS.
2151 See comment in "create_single_exit_edge". */
2154 unmark_exit_edges (VEC (sd_region, heap) *regions)
2161 for (i = 0; VEC_iterate (sd_region, regions, i, s); i++)
2162 FOR_EACH_EDGE (e, ei, s->exit->preds)
2167 /* Mark the exit edges of all REGIONS.
2168 See comment in "create_single_exit_edge". */
2171 mark_exit_edges (VEC (sd_region, heap) *regions)
2178 for (i = 0; VEC_iterate (sd_region, regions, i, s); i++)
2179 FOR_EACH_EDGE (e, ei, s->exit->preds)
2180 if (bb_in_sd_region (e->src, s))
2184 /* Free and compute again all the dominators information. */
2187 recompute_all_dominators (void)
2189 mark_irreducible_loops ();
2190 free_dominance_info (CDI_DOMINATORS);
2191 free_dominance_info (CDI_POST_DOMINATORS);
2192 calculate_dominance_info (CDI_DOMINATORS);
2193 calculate_dominance_info (CDI_POST_DOMINATORS);
2196 /* Verifies properties that GRAPHITE should maintain during translation. */
2199 graphite_verify (void)
2201 #ifdef ENABLE_CHECKING
2202 verify_loop_structure ();
2203 verify_dominators (CDI_DOMINATORS);
2204 verify_dominators (CDI_POST_DOMINATORS);
2206 verify_loop_closed_ssa ();
2210 /* Create for all scop regions a single entry and a single exit edge. */
2213 create_sese_edges (VEC (sd_region, heap) *regions)
2218 for (i = 0; VEC_iterate (sd_region, regions, i, s); i++)
2219 create_single_entry_edge (s);
2221 mark_exit_edges (regions);
2223 for (i = 0; VEC_iterate (sd_region, regions, i, s); i++)
2224 create_single_exit_edge (s);
2226 unmark_exit_edges (regions);
2228 fix_loop_structure (NULL);
2230 #ifdef ENABLE_CHECKING
2231 verify_loop_structure ();
2232 verify_dominators (CDI_DOMINATORS);
2237 /* Create graphite SCoPs from an array of scop detection regions. */
2240 build_graphite_scops (VEC (sd_region, heap) *scop_regions)
2245 for (i = 0; VEC_iterate (sd_region, scop_regions, i, s); i++)
2247 edge entry = find_single_entry_edge (s);
2248 edge exit = find_single_exit_edge (s);
2249 scop_p scop = new_scop (entry, exit);
2250 VEC_safe_push (scop_p, heap, current_scops, scop);
2252 /* Are there overlapping SCoPs? */
2253 #ifdef ENABLE_CHECKING
2258 for (j = 0; VEC_iterate (sd_region, scop_regions, j, s2); j++)
2260 gcc_assert (!bb_in_sd_region (s->entry, s2));
2266 /* Find static control parts. */
2271 struct loop *loop = current_loops->tree_root;
2272 VEC (sd_region, heap) *tmp_scops = VEC_alloc (sd_region, heap, 3);
2274 build_scops_1 (single_succ (ENTRY_BLOCK_PTR), &tmp_scops, loop);
2275 create_sese_edges (tmp_scops);
2276 build_graphite_scops (tmp_scops);
2277 VEC_free (sd_region, heap, tmp_scops);
2280 /* Gather the basic blocks belonging to the SCOP. */
2283 build_scop_bbs (scop_p scop)
2285 basic_block *stack = XNEWVEC (basic_block, n_basic_blocks + 1);
2286 sbitmap visited = sbitmap_alloc (last_basic_block);
2289 sbitmap_zero (visited);
2290 stack[sp++] = SCOP_ENTRY (scop);
2294 basic_block bb = stack[--sp];
2295 int depth = loop_depth (bb->loop_father);
2296 int num = bb->loop_father->num;
2300 /* Scop's exit is not in the scop. Exclude also bbs, which are
2301 dominated by the SCoP exit. These are e.g. loop latches. */
2302 if (TEST_BIT (visited, bb->index)
2303 || dominated_by_p (CDI_DOMINATORS, bb, SCOP_EXIT (scop))
2304 /* Every block in the scop is dominated by scop's entry. */
2305 || !dominated_by_p (CDI_DOMINATORS, bb, SCOP_ENTRY (scop)))
2308 new_graphite_bb (scop, bb);
2309 SET_BIT (visited, bb->index);
2311 /* First push the blocks that have to be processed last. Note
2312 that this means that the order in which the code is organized
2313 below is important: do not reorder the following code. */
2314 FOR_EACH_EDGE (e, ei, bb->succs)
2315 if (! TEST_BIT (visited, e->dest->index)
2316 && (int) loop_depth (e->dest->loop_father) < depth)
2317 stack[sp++] = e->dest;
2319 FOR_EACH_EDGE (e, ei, bb->succs)
2320 if (! TEST_BIT (visited, e->dest->index)
2321 && (int) loop_depth (e->dest->loop_father) == depth
2322 && e->dest->loop_father->num != num)
2323 stack[sp++] = e->dest;
2325 FOR_EACH_EDGE (e, ei, bb->succs)
2326 if (! TEST_BIT (visited, e->dest->index)
2327 && (int) loop_depth (e->dest->loop_father) == depth
2328 && e->dest->loop_father->num == num
2329 && EDGE_COUNT (e->dest->preds) > 1)
2330 stack[sp++] = e->dest;
2332 FOR_EACH_EDGE (e, ei, bb->succs)
2333 if (! TEST_BIT (visited, e->dest->index)
2334 && (int) loop_depth (e->dest->loop_father) == depth
2335 && e->dest->loop_father->num == num
2336 && EDGE_COUNT (e->dest->preds) == 1)
2337 stack[sp++] = e->dest;
2339 FOR_EACH_EDGE (e, ei, bb->succs)
2340 if (! TEST_BIT (visited, e->dest->index)
2341 && (int) loop_depth (e->dest->loop_father) > depth)
2342 stack[sp++] = e->dest;
2346 sbitmap_free (visited);
2349 /* Returns the number of reduction phi nodes in LOOP. */
2352 nb_reductions_in_loop (loop_p loop)
2355 gimple_stmt_iterator gsi;
2357 for (gsi = gsi_start_phis (loop->header); !gsi_end_p (gsi); gsi_next (&gsi))
2359 gimple phi = gsi_stmt (gsi);
2363 if (!is_gimple_reg (PHI_RESULT (phi)))
2366 scev = analyze_scalar_evolution (loop, PHI_RESULT (phi));
2367 scev = instantiate_parameters (loop, scev);
2368 if (!simple_iv (loop, loop, PHI_RESULT (phi), &iv, true))
2375 /* A LOOP is in normal form when it contains only one scalar phi node
2376 that defines the main induction variable of the loop, only one
2377 increment of the IV, and only one exit condition. */
2380 graphite_loop_normal_form (loop_p loop)
2382 struct tree_niter_desc niter;
2385 edge exit = single_dom_exit (loop);
2386 bool known_niter = number_of_iterations_exit (loop, exit, &niter, false);
2388 gcc_assert (known_niter);
2390 nit = force_gimple_operand (unshare_expr (niter.niter), &stmts, true,
2393 gsi_insert_seq_on_edge_immediate (loop_preheader_edge (loop), stmts);
2395 /* One IV per loop. */
2396 if (nb_reductions_in_loop (loop) > 0)
2399 return canonicalize_loop_ivs (loop, NULL, &nit);
2402 /* Record LOOP as occuring in SCOP. Returns true when the operation
2406 scop_record_loop (scop_p scop, loop_p loop)
2411 if (bitmap_bit_p (SCOP_LOOPS (scop), loop->num))
2414 bitmap_set_bit (SCOP_LOOPS (scop), loop->num);
2415 VEC_safe_push (loop_p, heap, SCOP_LOOP_NEST (scop), loop);
2417 induction_var = graphite_loop_normal_form (loop);
2421 oldiv = XNEW (struct name_tree_d);
2422 oldiv->t = induction_var;
2423 oldiv->name = get_name (SSA_NAME_VAR (oldiv->t));
2425 VEC_safe_push (name_tree, heap, SCOP_OLDIVS (scop), oldiv);
2429 /* Build the loop nests contained in SCOP. Returns true when the
2430 operation was successful. */
2433 build_scop_loop_nests (scop_p scop)
2437 struct loop *loop0, *loop1;
2440 if (bb_in_sese_p (bb, SCOP_REGION (scop)))
2442 struct loop *loop = bb->loop_father;
2444 /* Only add loops if they are completely contained in the SCoP. */
2445 if (loop->header == bb
2446 && bb_in_sese_p (loop->latch, SCOP_REGION (scop)))
2448 if (!scop_record_loop (scop, loop))
2453 /* Make sure that the loops in the SCOP_LOOP_NEST are ordered. It
2454 can be the case that an inner loop is inserted before an outer
2455 loop. To avoid this, semi-sort once. */
2456 for (i = 0; VEC_iterate (loop_p, SCOP_LOOP_NEST (scop), i, loop0); i++)
2458 if (VEC_length (loop_p, SCOP_LOOP_NEST (scop)) == i + 1)
2461 loop1 = VEC_index (loop_p, SCOP_LOOP_NEST (scop), i + 1);
2462 if (loop0->num > loop1->num)
2464 VEC_replace (loop_p, SCOP_LOOP_NEST (scop), i, loop1);
2465 VEC_replace (loop_p, SCOP_LOOP_NEST (scop), i + 1, loop0);
2472 /* Calculate the number of loops around LOOP in the SCOP. */
2475 nb_loops_around_loop_in_scop (struct loop *l, scop_p scop)
2479 for (; loop_in_sese_p (l, SCOP_REGION (scop)); d++, l = loop_outer (l));
2484 /* Calculate the number of loops around GB in the current SCOP. */
2487 nb_loops_around_gb (graphite_bb_p gb)
2489 return nb_loops_around_loop_in_scop (gbb_loop (gb), GBB_SCOP (gb));
2492 /* Returns the dimensionality of an enclosing loop iteration domain
2493 with respect to enclosing SCoP for a given data reference REF. The
2494 returned dimensionality is homogeneous (depth of loop nest + number
2495 of SCoP parameters + const). */
2498 ref_nb_loops (data_reference_p ref)
2500 loop_p loop = loop_containing_stmt (DR_STMT (ref));
2501 scop_p scop = DR_SCOP (ref);
2503 return nb_loops_around_loop_in_scop (loop, scop) + scop_nb_params (scop) + 2;
2506 /* Build dynamic schedules for all the BBs. */
2509 build_scop_dynamic_schedules (scop_p scop)
2511 int i, dim, loop_num, row, col;
2514 for (i = 0; VEC_iterate (graphite_bb_p, SCOP_BBS (scop), i, gb); i++)
2516 loop_num = GBB_BB (gb)->loop_father->num;
2520 dim = nb_loops_around_gb (gb);
2521 GBB_DYNAMIC_SCHEDULE (gb) = cloog_matrix_alloc (dim, dim);
2523 for (row = 0; row < GBB_DYNAMIC_SCHEDULE (gb)->NbRows; row++)
2524 for (col = 0; col < GBB_DYNAMIC_SCHEDULE (gb)->NbColumns; col++)
2526 value_set_si (GBB_DYNAMIC_SCHEDULE (gb)->p[row][col], 1);
2528 value_set_si (GBB_DYNAMIC_SCHEDULE (gb)->p[row][col], 0);
2531 GBB_DYNAMIC_SCHEDULE (gb) = NULL;
2535 /* Returns the number of loops that are identical at the beginning of
2536 the vectors A and B. */
2539 compare_prefix_loops (VEC (loop_p, heap) *a, VEC (loop_p, heap) *b)
2548 lb = VEC_length (loop_p, b);
2550 for (i = 0; VEC_iterate (loop_p, a, i, ea); i++)
2552 || ea != VEC_index (loop_p, b, i))
2558 /* Build for BB the static schedule.
2560 The STATIC_SCHEDULE is defined like this:
2579 Static schedules for A to F:
2592 build_scop_canonical_schedules (scop_p scop)
2596 int nb_loops = scop_nb_loops (scop);
2597 lambda_vector static_schedule = lambda_vector_new (nb_loops + 1);
2598 VEC (loop_p, heap) *loops_previous = NULL;
2600 /* We have to start schedules at 0 on the first component and
2601 because we cannot compare_prefix_loops against a previous loop,
2602 prefix will be equal to zero, and that index will be
2603 incremented before copying. */
2604 static_schedule[0] = -1;
2606 for (i = 0; VEC_iterate (graphite_bb_p, SCOP_BBS (scop), i, gb); i++)
2608 int prefix = compare_prefix_loops (loops_previous, GBB_LOOPS (gb));
2609 int nb = gbb_nb_loops (gb);
2611 loops_previous = GBB_LOOPS (gb);
2612 memset (&(static_schedule[prefix + 1]), 0, sizeof (int) * (nb_loops - prefix));
2613 ++static_schedule[prefix];
2614 GBB_STATIC_SCHEDULE (gb) = lambda_vector_new (nb + 1);
2615 lambda_vector_copy (static_schedule,
2616 GBB_STATIC_SCHEDULE (gb), nb + 1);
2620 /* Build the LOOPS vector for all bbs in SCOP. */
2623 build_bb_loops (scop_p scop)
2628 for (i = 0; VEC_iterate (graphite_bb_p, SCOP_BBS (scop), i, gb); i++)
2633 depth = nb_loops_around_gb (gb) - 1;
2635 GBB_LOOPS (gb) = VEC_alloc (loop_p, heap, 3);
2636 VEC_safe_grow_cleared (loop_p, heap, GBB_LOOPS (gb), depth + 1);
2638 loop = GBB_BB (gb)->loop_father;
2640 while (scop_contains_loop (scop, loop))
2642 VEC_replace (loop_p, GBB_LOOPS (gb), depth, loop);
2643 loop = loop_outer (loop);
2649 /* Get the index for parameter VAR in SCOP. */
2652 param_index (tree var, scop_p scop)
2658 gcc_assert (TREE_CODE (var) == SSA_NAME);
2660 for (i = 0; VEC_iterate (name_tree, SCOP_PARAMS (scop), i, p); i++)
2664 gcc_assert (SCOP_ADD_PARAMS (scop));
2666 nvar = XNEW (struct name_tree_d);
2669 VEC_safe_push (name_tree, heap, SCOP_PARAMS (scop), nvar);
2670 return VEC_length (name_tree, SCOP_PARAMS (scop)) - 1;
2673 /* Scan EXPR and translate it to an inequality vector INEQ that will
2674 be added, or subtracted, in the constraint domain matrix C at row
2675 R. K is the number of columns for loop iterators in C. */
2678 scan_tree_for_params (scop_p s, tree e, CloogMatrix *c, int r, Value k,
2681 int cst_col, param_col;
2683 if (e == chrec_dont_know)
2686 switch (TREE_CODE (e))
2688 case POLYNOMIAL_CHREC:
2690 tree left = CHREC_LEFT (e);
2691 tree right = CHREC_RIGHT (e);
2692 int var = CHREC_VARIABLE (e);
2694 if (TREE_CODE (right) != INTEGER_CST)
2699 int loop_col = scop_gimple_loop_depth (s, get_loop (var)) + 1;
2702 value_sub_int (c->p[r][loop_col], c->p[r][loop_col],
2703 int_cst_value (right));
2705 value_add_int (c->p[r][loop_col], c->p[r][loop_col],
2706 int_cst_value (right));
2709 switch (TREE_CODE (left))
2711 case POLYNOMIAL_CHREC:
2712 scan_tree_for_params (s, left, c, r, k, subtract);
2716 /* Constant part. */
2719 int v = int_cst_value (left);
2720 cst_col = c->NbColumns - 1;
2725 subtract = subtract ? false : true;
2729 value_sub_int (c->p[r][cst_col], c->p[r][cst_col], v);
2731 value_add_int (c->p[r][cst_col], c->p[r][cst_col], v);
2736 scan_tree_for_params (s, left, c, r, k, subtract);
2743 if (chrec_contains_symbols (TREE_OPERAND (e, 0)))
2748 gcc_assert (host_integerp (TREE_OPERAND (e, 1), 0));
2750 value_set_si (val, int_cst_value (TREE_OPERAND (e, 1)));
2751 value_multiply (k, k, val);
2754 scan_tree_for_params (s, TREE_OPERAND (e, 0), c, r, k, subtract);
2761 gcc_assert (host_integerp (TREE_OPERAND (e, 0), 0));
2763 value_set_si (val, int_cst_value (TREE_OPERAND (e, 0)));
2764 value_multiply (k, k, val);
2767 scan_tree_for_params (s, TREE_OPERAND (e, 1), c, r, k, subtract);
2772 case POINTER_PLUS_EXPR:
2773 scan_tree_for_params (s, TREE_OPERAND (e, 0), c, r, k, subtract);
2774 scan_tree_for_params (s, TREE_OPERAND (e, 1), c, r, k, subtract);
2778 scan_tree_for_params (s, TREE_OPERAND (e, 0), c, r, k, subtract);
2779 scan_tree_for_params (s, TREE_OPERAND (e, 1), c, r, k, !subtract);
2783 scan_tree_for_params (s, TREE_OPERAND (e, 0), c, r, k, !subtract);
2787 param_col = param_index (e, s);
2791 param_col += c->NbColumns - scop_nb_params (s) - 1;
2794 value_subtract (c->p[r][param_col], c->p[r][param_col], k);
2796 value_addto (c->p[r][param_col], c->p[r][param_col], k);
2803 int v = int_cst_value (e);
2804 cst_col = c->NbColumns - 1;
2809 subtract = subtract ? false : true;
2813 value_sub_int (c->p[r][cst_col], c->p[r][cst_col], v);
2815 value_add_int (c->p[r][cst_col], c->p[r][cst_col], v);
2820 case NON_LVALUE_EXPR:
2821 scan_tree_for_params (s, TREE_OPERAND (e, 0), c, r, k, subtract);
2830 /* Data structure for idx_record_params. */
2838 /* For a data reference with an ARRAY_REF as its BASE, record the
2839 parameters occurring in IDX. DTA is passed in as complementary
2840 information, and is used by the automatic walker function. This
2841 function is a callback for for_each_index. */
2844 idx_record_params (tree base, tree *idx, void *dta)
2846 struct irp_data *data = (struct irp_data *) dta;
2848 if (TREE_CODE (base) != ARRAY_REF)
2851 if (TREE_CODE (*idx) == SSA_NAME)
2854 scop_p scop = data->scop;
2855 struct loop *loop = data->loop;
2858 scev = analyze_scalar_evolution (loop, *idx);
2859 scev = instantiate_scev (block_before_scop (scop), loop, scev);
2862 value_set_si (one, 1);
2863 scan_tree_for_params (scop, scev, NULL, 0, one, false);
2870 /* Find parameters with respect to SCOP in BB. We are looking in memory
2871 access functions, conditions and loop bounds. */
2874 find_params_in_bb (scop_p scop, graphite_bb_p gb)
2877 data_reference_p dr;
2879 loop_p father = GBB_BB (gb)->loop_father;
2881 for (i = 0; VEC_iterate (data_reference_p, GBB_DATA_REFS (gb), i, dr); i++)
2883 struct irp_data irp;
2887 for_each_index (&dr->ref, idx_record_params, &irp);
2890 /* Find parameters in conditional statements. */
2891 for (i = 0; VEC_iterate (gimple, GBB_CONDITIONS (gb), i, stmt); i++)
2894 loop_p loop = father;
2898 lhs = gimple_cond_lhs (stmt);
2899 lhs = analyze_scalar_evolution (loop, lhs);
2900 lhs = instantiate_scev (block_before_scop (scop), loop, lhs);
2902 rhs = gimple_cond_rhs (stmt);
2903 rhs = analyze_scalar_evolution (loop, rhs);
2904 rhs = instantiate_scev (block_before_scop (scop), loop, rhs);
2907 scan_tree_for_params (scop, lhs, NULL, 0, one, false);
2908 value_set_si (one, 1);
2909 scan_tree_for_params (scop, rhs, NULL, 0, one, false);
2914 /* Saves in NV the name of variable P->T. */
2917 save_var_name (char **nv, int i, name_tree p)
2919 const char *name = get_name (SSA_NAME_VAR (p->t));
2923 int len = strlen (name) + 16;
2924 nv[i] = XNEWVEC (char, len);
2925 snprintf (nv[i], len, "%s_%d", name, SSA_NAME_VERSION (p->t));
2929 nv[i] = XNEWVEC (char, 16);
2930 snprintf (nv[i], 2 + 16, "T_%d", SSA_NAME_VERSION (p->t));
2936 /* Return the maximal loop depth in SCOP. */
2939 scop_max_loop_depth (scop_p scop)
2943 int max_nb_loops = 0;
2945 for (i = 0; VEC_iterate (graphite_bb_p, SCOP_BBS (scop), i, gbb); i++)
2947 int nb_loops = gbb_nb_loops (gbb);
2948 if (max_nb_loops < nb_loops)
2949 max_nb_loops = nb_loops;
2952 return max_nb_loops;
2955 /* Initialize Cloog's parameter names from the names used in GIMPLE.
2956 Initialize Cloog's iterator names, using 'graphite_iterator_%d'
2957 from 0 to scop_nb_loops (scop). */
2960 initialize_cloog_names (scop_p scop)
2962 int i, nb_params = VEC_length (name_tree, SCOP_PARAMS (scop));
2963 char **params = XNEWVEC (char *, nb_params);
2964 int nb_iterators = scop_max_loop_depth (scop);
2965 int nb_scattering= cloog_program_nb_scattdims (SCOP_PROG (scop));
2966 char **iterators = XNEWVEC (char *, nb_iterators * 2);
2967 char **scattering = XNEWVEC (char *, nb_scattering);
2970 for (i = 0; VEC_iterate (name_tree, SCOP_PARAMS (scop), i, p); i++)
2971 save_var_name (params, i, p);
2973 cloog_names_set_nb_parameters (cloog_program_names (SCOP_PROG (scop)),
2975 cloog_names_set_parameters (cloog_program_names (SCOP_PROG (scop)),
2978 for (i = 0; i < nb_iterators; i++)
2981 iterators[i] = XNEWVEC (char, len);
2982 snprintf (iterators[i], len, "graphite_iterator_%d", i);
2985 cloog_names_set_nb_iterators (cloog_program_names (SCOP_PROG (scop)),
2987 cloog_names_set_iterators (cloog_program_names (SCOP_PROG (scop)),
2990 for (i = 0; i < nb_scattering; i++)
2993 scattering[i] = XNEWVEC (char, len);
2994 snprintf (scattering[i], len, "s_%d", i);
2997 cloog_names_set_nb_scattering (cloog_program_names (SCOP_PROG (scop)),
2999 cloog_names_set_scattering (cloog_program_names (SCOP_PROG (scop)),
3003 /* Record the parameters used in the SCOP. A variable is a parameter
3004 in a scop if it does not vary during the execution of that scop. */
3007 find_scop_parameters (scop_p scop)
3015 value_set_si (one, 1);
3017 /* Find the parameters used in the loop bounds. */
3018 for (i = 0; VEC_iterate (loop_p, SCOP_LOOP_NEST (scop), i, loop); i++)
3020 tree nb_iters = number_of_latch_executions (loop);
3022 if (!chrec_contains_symbols (nb_iters))
3025 nb_iters = analyze_scalar_evolution (loop, nb_iters);
3026 nb_iters = instantiate_scev (block_before_scop (scop), loop, nb_iters);
3027 scan_tree_for_params (scop, nb_iters, NULL, 0, one, false);
3032 /* Find the parameters used in data accesses. */
3033 for (i = 0; VEC_iterate (graphite_bb_p, SCOP_BBS (scop), i, gb); i++)
3034 find_params_in_bb (scop, gb);
3036 SCOP_ADD_PARAMS (scop) = false;
3039 /* Build the context constraints for SCOP: constraints and relations
3043 build_scop_context (scop_p scop)
3045 int nb_params = scop_nb_params (scop);
3046 CloogMatrix *matrix = cloog_matrix_alloc (1, nb_params + 2);
3048 /* Insert '0 >= 0' in the context matrix, as it is not allowed to be
3051 value_set_si (matrix->p[0][0], 1);
3053 value_set_si (matrix->p[0][nb_params + 1], 0);
3055 cloog_program_set_context (SCOP_PROG (scop),
3056 cloog_domain_matrix2domain (matrix));
3057 cloog_matrix_free (matrix);
3060 /* Returns a graphite_bb from BB. */
3062 static inline graphite_bb_p
3063 gbb_from_bb (basic_block bb)
3065 return (graphite_bb_p) bb->aux;
3068 /* Builds the constraint matrix for LOOP in SCOP. NB_OUTER_LOOPS is the
3069 number of loops surrounding LOOP in SCOP. OUTER_CSTR gives the
3070 constraints matrix for the surrounding loops. */
3073 build_loop_iteration_domains (scop_p scop, struct loop *loop,
3074 CloogMatrix *outer_cstr, int nb_outer_loops)
3080 int nb_rows = outer_cstr->NbRows + 1;
3081 int nb_cols = outer_cstr->NbColumns + 1;
3083 /* Last column of CSTR is the column of constants. */
3084 int cst_col = nb_cols - 1;
3086 /* The column for the current loop is just after the columns of
3087 other outer loops. */
3088 int loop_col = nb_outer_loops + 1;
3090 tree nb_iters = number_of_latch_executions (loop);
3092 /* When the number of iterations is a constant or a parameter, we
3093 add a constraint for the upper bound of the loop. So add a row
3094 to the constraint matrix before allocating it. */
3095 if (TREE_CODE (nb_iters) == INTEGER_CST
3096 || !chrec_contains_undetermined (nb_iters))
3099 cstr = cloog_matrix_alloc (nb_rows, nb_cols);
3101 /* Copy the outer constraints. */
3102 for (i = 0; i < outer_cstr->NbRows; i++)
3104 /* Copy the eq/ineq and loops columns. */
3105 for (j = 0; j < loop_col; j++)
3106 value_assign (cstr->p[i][j], outer_cstr->p[i][j]);
3108 /* Leave an empty column in CSTR for the current loop, and then
3109 copy the parameter columns. */
3110 for (j = loop_col; j < outer_cstr->NbColumns; j++)
3111 value_assign (cstr->p[i][j + 1], outer_cstr->p[i][j]);
3115 row = outer_cstr->NbRows;
3116 value_set_si (cstr->p[row][0], 1);
3117 value_set_si (cstr->p[row][loop_col], 1);
3119 /* loop_i <= nb_iters */
3120 if (TREE_CODE (nb_iters) == INTEGER_CST)
3123 value_set_si (cstr->p[row][0], 1);
3124 value_set_si (cstr->p[row][loop_col], -1);
3126 value_set_si (cstr->p[row][cst_col],
3127 int_cst_value (nb_iters));
3129 else if (!chrec_contains_undetermined (nb_iters))
3131 /* Otherwise nb_iters contains parameters: scan the nb_iters
3132 expression and build its matrix representation. */
3136 value_set_si (cstr->p[row][0], 1);
3137 value_set_si (cstr->p[row][loop_col], -1);
3139 nb_iters = analyze_scalar_evolution (loop, nb_iters);
3140 nb_iters = instantiate_scev (block_before_scop (scop), loop, nb_iters);
3143 value_set_si (one, 1);
3144 scan_tree_for_params (scop, nb_iters, cstr, row, one, false);
3150 if (loop->inner && loop_in_sese_p (loop->inner, SCOP_REGION (scop)))
3151 build_loop_iteration_domains (scop, loop->inner, cstr, nb_outer_loops + 1);
3153 /* Only go to the next loops, if we are not at the outermost layer. These
3154 have to be handled seperately, as we can be sure, that the chain at this
3155 layer will be connected. */
3156 if (nb_outer_loops != 0 && loop->next && loop_in_sese_p (loop->next,
3157 SCOP_REGION (scop)))
3158 build_loop_iteration_domains (scop, loop->next, outer_cstr, nb_outer_loops);
3160 for (i = 0; VEC_iterate (graphite_bb_p, SCOP_BBS (scop), i, gb); i++)
3161 if (gbb_loop (gb) == loop)
3162 GBB_DOMAIN (gb) = cloog_matrix_copy (cstr);
3164 cloog_matrix_free (cstr);
3167 /* Add conditions to the domain of GB. */
3170 add_conditions_to_domain (graphite_bb_p gb)
3174 VEC (gimple, heap) *conditions = GBB_CONDITIONS (gb);
3175 CloogMatrix *domain = GBB_DOMAIN (gb);
3176 scop_p scop = GBB_SCOP (gb);
3180 unsigned nb_new_rows = 0;
3183 if (VEC_empty (gimple, conditions))
3188 nb_rows = domain->NbRows;
3189 nb_cols = domain->NbColumns;
3194 nb_cols = nb_loops_around_gb (gb) + scop_nb_params (scop) + 2;
3197 /* Count number of necessary new rows to add the conditions to the
3199 for (i = 0; VEC_iterate (gimple, conditions, i, stmt); i++)
3201 switch (gimple_code (stmt))
3205 enum tree_code code = gimple_cond_code (stmt);
3211 /* NE and EQ statements are not supported right know. */
3227 /* Switch statements are not supported right know. */
3238 /* Enlarge the matrix. */
3240 CloogMatrix *new_domain;
3241 new_domain = cloog_matrix_alloc (nb_rows + nb_new_rows, nb_cols);
3245 for (i = 0; i < nb_rows; i++)
3246 for (j = 0; j < nb_cols; j++)
3247 value_assign (new_domain->p[i][j], domain->p[i][j]);
3249 cloog_matrix_free (domain);
3252 domain = new_domain;
3253 GBB_DOMAIN (gb) = new_domain;
3256 /* Add the conditions to the new enlarged domain matrix. */
3258 for (i = 0; VEC_iterate (gimple, conditions, i, stmt); i++)
3260 switch (gimple_code (stmt))
3265 enum tree_code code;
3268 loop_p loop = GBB_BB (gb)->loop_father;
3270 left = gimple_cond_lhs (stmt);
3271 right = gimple_cond_rhs (stmt);
3273 left = analyze_scalar_evolution (loop, left);
3274 right = analyze_scalar_evolution (loop, right);
3276 left = instantiate_scev (block_before_scop (scop), loop, left);
3277 right = instantiate_scev (block_before_scop (scop), loop, right);
3279 code = gimple_cond_code (stmt);
3281 /* The conditions for ELSE-branches are inverted. */
3282 if (VEC_index (gimple, gb->condition_cases, i) == NULL)
3283 code = invert_tree_comparison (code, false);
3288 /* NE statements are not supported right know. */
3292 value_set_si (domain->p[row][0], 1);
3294 value_set_si (one, 1);
3295 scan_tree_for_params (scop, left, domain, row, one, true);
3296 value_set_si (one, 1);
3297 scan_tree_for_params (scop, right, domain, row, one, false);
3299 value_set_si (domain->p[row][0], 1);
3300 value_set_si (one, 1);
3301 scan_tree_for_params (scop, left, domain, row, one, false);
3302 value_set_si (one, 1);
3303 scan_tree_for_params (scop, right, domain, row, one, true);
3308 value_set_si (domain->p[row][0], 1);
3310 value_set_si (one, 1);
3311 scan_tree_for_params (scop, left, domain, row, one, true);
3312 value_set_si (one, 1);
3313 scan_tree_for_params (scop, right, domain, row, one, false);
3314 value_sub_int (domain->p[row][nb_cols - 1],
3315 domain->p[row][nb_cols - 1], 1);
3320 value_set_si (domain->p[row][0], 1);
3322 value_set_si (one, 1);
3323 scan_tree_for_params (scop, left, domain, row, one, false);
3324 value_set_si (one, 1);
3325 scan_tree_for_params (scop, right, domain, row, one, true);
3326 value_sub_int (domain->p[row][nb_cols - 1],
3327 domain->p[row][nb_cols - 1], 1);
3332 value_set_si (domain->p[row][0], 1);
3334 value_set_si (one, 1);
3335 scan_tree_for_params (scop, left, domain, row, one, true);
3336 value_set_si (one, 1);
3337 scan_tree_for_params (scop, right, domain, row, one, false);
3342 value_set_si (domain->p[row][0], 1);
3344 value_set_si (one, 1);
3345 scan_tree_for_params (scop, left, domain, row, one, false);
3346 value_set_si (one, 1);
3347 scan_tree_for_params (scop, right, domain, row, one, true);
3358 /* Switch statements are not supported right know. */
3369 /* Returns true when PHI defines an induction variable in the loop
3370 containing the PHI node. */
3373 phi_node_is_iv (gimple phi)
3375 loop_p loop = gimple_bb (phi)->loop_father;
3376 tree scev = analyze_scalar_evolution (loop, gimple_phi_result (phi));
3378 return tree_contains_chrecs (scev, NULL);
3381 /* Returns true when BB contains scalar phi nodes that are not an
3382 induction variable of a loop. */
3385 bb_contains_non_iv_scalar_phi_nodes (basic_block bb)
3388 gimple_stmt_iterator si;
3390 for (si = gsi_start_phis (bb); !gsi_end_p (si); gsi_next (&si))
3391 if (is_gimple_reg (gimple_phi_result (gsi_stmt (si))))
3393 /* Store the unique scalar PHI node: at this point, loops
3394 should be in cannonical form, so we expect to see at most
3395 one scalar phi node in the loop header. */
3397 || bb != bb->loop_father->header)
3400 phi = gsi_stmt (si);
3404 || phi_node_is_iv (phi))
3410 /* Helper recursive function. Record in CONDITIONS and CASES all
3411 conditions from 'if's and 'switch'es occurring in BB from SCOP.
3413 Returns false when the conditions contain scalar computations that
3414 depend on the condition, i.e. when there are scalar phi nodes on
3415 the junction after the condition. Only the computations occurring
3416 on memory can be handled in the polyhedral model: operations that
3417 define scalar evolutions in conditions, that can potentially be
3418 used to index memory, can't be handled by the polyhedral model. */
3421 build_scop_conditions_1 (VEC (gimple, heap) **conditions,
3422 VEC (gimple, heap) **cases, basic_block bb,
3428 basic_block bb_child, bb_iter;
3429 VEC (basic_block, heap) *dom;
3432 /* Make sure we are in the SCoP. */
3433 if (!bb_in_sese_p (bb, SCOP_REGION (scop)))
3436 if (bb_contains_non_iv_scalar_phi_nodes (bb))
3439 gbb = gbb_from_bb (bb);
3442 GBB_CONDITIONS (gbb) = VEC_copy (gimple, heap, *conditions);
3443 GBB_CONDITION_CASES (gbb) = VEC_copy (gimple, heap, *cases);
3446 dom = get_dominated_by (CDI_DOMINATORS, bb);
3448 stmt = last_stmt (bb);
3451 VEC (edge, gc) *edges;
3454 switch (gimple_code (stmt))
3458 for (i = 0; VEC_iterate (edge, edges, i, e); i++)
3459 if ((dominated_by_p (CDI_DOMINATORS, e->dest, bb))
3460 && VEC_length (edge, e->dest->preds) == 1)
3462 /* Remove the scanned block from the dominator successors. */
3463 for (j = 0; VEC_iterate (basic_block, dom, j, bb_iter); j++)
3464 if (bb_iter == e->dest)
3466 VEC_unordered_remove (basic_block, dom, j);
3470 /* Recursively scan the then or else part. */
3471 if (e->flags & EDGE_TRUE_VALUE)
3472 VEC_safe_push (gimple, heap, *cases, stmt);
3475 gcc_assert (e->flags & EDGE_FALSE_VALUE);
3476 VEC_safe_push (gimple, heap, *cases, NULL);
3479 VEC_safe_push (gimple, heap, *conditions, stmt);
3480 if (!build_scop_conditions_1 (conditions, cases, e->dest, scop))
3485 VEC_pop (gimple, *conditions);
3486 VEC_pop (gimple, *cases);
3493 gimple_stmt_iterator gsi_search_gimple_label;
3495 for (i = 0; i < gimple_switch_num_labels (stmt); ++i)
3497 basic_block bb_iter;
3499 size_t n_cases = VEC_length (gimple, *conditions);
3500 unsigned n = gimple_switch_num_labels (stmt);
3502 bb_child = label_to_block
3503 (CASE_LABEL (gimple_switch_label (stmt, i)));
3505 for (k = 0; k < n; k++)
3508 (CASE_LABEL (gimple_switch_label (stmt, k))) == bb_child)
3511 /* Switches with multiple case values for the same
3512 block are not handled. */
3514 /* Switch cases with more than one predecessor are
3516 || VEC_length (edge, bb_child->preds) != 1)
3522 /* Recursively scan the corresponding 'case' block. */
3523 for (gsi_search_gimple_label = gsi_start_bb (bb_child);
3524 !gsi_end_p (gsi_search_gimple_label);
3525 gsi_next (&gsi_search_gimple_label))
3527 gimple label = gsi_stmt (gsi_search_gimple_label);
3529 if (gimple_code (label) == GIMPLE_LABEL)
3531 tree t = gimple_label_label (label);
3533 gcc_assert (t == gimple_switch_label (stmt, i));
3534 VEC_replace (gimple, *cases, n_cases, label);
3539 if (!build_scop_conditions_1 (conditions, cases, bb_child, scop))
3545 /* Remove the scanned block from the dominator successors. */
3546 for (j = 0; VEC_iterate (basic_block, dom, j, bb_iter); j++)
3547 if (bb_iter == bb_child)
3549 VEC_unordered_remove (basic_block, dom, j);
3554 VEC_pop (gimple, *conditions);
3555 VEC_pop (gimple, *cases);
3564 /* Scan all immediate dominated successors. */
3565 for (i = 0; VEC_iterate (basic_block, dom, i, bb_child); i++)
3566 if (!build_scop_conditions_1 (conditions, cases, bb_child, scop))
3573 VEC_free (basic_block, heap, dom);
3577 /* Record all conditions from SCOP.
3579 Returns false when the conditions contain scalar computations that
3580 depend on the condition, i.e. when there are scalar phi nodes on
3581 the junction after the condition. Only the computations occurring
3582 on memory can be handled in the polyhedral model: operations that
3583 define scalar evolutions in conditions, that can potentially be
3584 used to index memory, can't be handled by the polyhedral model. */
3587 build_scop_conditions (scop_p scop)
3590 VEC (gimple, heap) *conditions = NULL;
3591 VEC (gimple, heap) *cases = NULL;
3593 res = build_scop_conditions_1 (&conditions, &cases, SCOP_ENTRY (scop), scop);
3595 VEC_free (gimple, heap, conditions);
3596 VEC_free (gimple, heap, cases);
3600 /* Traverses all the GBBs of the SCOP and add their constraints to the
3601 iteration domains. */
3604 add_conditions_to_constraints (scop_p scop)
3609 for (i = 0; VEC_iterate (graphite_bb_p, SCOP_BBS (scop), i, gbb); i++)
3610 add_conditions_to_domain (gbb);
3613 /* Build the current domain matrix: the loops belonging to the current
3614 SCOP, and that vary for the execution of the current basic block.
3615 Returns false if there is no loop in SCOP. */
3618 build_scop_iteration_domain (scop_p scop)
3621 CloogMatrix *outer_cstr;
3624 /* Build cloog loop for all loops, that are in the uppermost loop layer of
3626 for (i = 0; VEC_iterate (loop_p, SCOP_LOOP_NEST (scop), i, loop); i++)
3627 if (!loop_in_sese_p (loop_outer (loop), SCOP_REGION (scop)))
3629 /* The outermost constraints is a matrix that has:
3630 -first column: eq/ineq boolean
3631 -last column: a constant
3632 -scop_nb_params columns for the parameters used in the scop. */
3633 outer_cstr = cloog_matrix_alloc (0, scop_nb_params (scop) + 2);
3634 build_loop_iteration_domains (scop, loop, outer_cstr, 0);
3635 cloog_matrix_free (outer_cstr);
3641 /* Initializes an equation CY of the access matrix using the
3642 information for a subscript from AF, relatively to the loop
3643 indexes from LOOP_NEST and parameter indexes from PARAMS. NDIM is
3644 the dimension of the array access, i.e. the number of
3645 subscripts. Returns true when the operation succeeds. */
3648 build_access_matrix_with_af (tree af, lambda_vector cy,
3649 scop_p scop, int ndim)
3653 switch (TREE_CODE (af))
3655 case POLYNOMIAL_CHREC:
3657 struct loop *outer_loop;
3658 tree left = CHREC_LEFT (af);
3659 tree right = CHREC_RIGHT (af);
3662 if (TREE_CODE (right) != INTEGER_CST)
3665 outer_loop = get_loop (CHREC_VARIABLE (af));
3666 var = nb_loops_around_loop_in_scop (outer_loop, scop);
3667 cy[var] = int_cst_value (right);
3669 switch (TREE_CODE (left))
3671 case POLYNOMIAL_CHREC:
3672 return build_access_matrix_with_af (left, cy, scop, ndim);
3675 cy[ndim - 1] = int_cst_value (left);
3679 return build_access_matrix_with_af (left, cy, scop, ndim);
3684 build_access_matrix_with_af (TREE_OPERAND (af, 0), cy, scop, ndim);
3685 build_access_matrix_with_af (TREE_OPERAND (af, 1), cy, scop, ndim);
3689 build_access_matrix_with_af (TREE_OPERAND (af, 0), cy, scop, ndim);
3690 build_access_matrix_with_af (TREE_OPERAND (af, 1), cy, scop, ndim);
3694 cy[ndim - 1] = int_cst_value (af);
3698 param_col = param_index (af, scop);
3699 cy [ndim - scop_nb_params (scop) + param_col - 1] = 1;
3703 /* FIXME: access_fn can have parameters. */
3708 /* Initialize the access matrix in the data reference REF with respect
3709 to the loop nesting LOOP_NEST. Return true when the operation
3713 build_access_matrix (data_reference_p ref, graphite_bb_p gb)
3715 int i, ndim = DR_NUM_DIMENSIONS (ref);
3716 struct access_matrix *am = GGC_NEW (struct access_matrix);
3718 AM_MATRIX (am) = VEC_alloc (lambda_vector, gc, ndim);
3719 DR_SCOP (ref) = GBB_SCOP (gb);
3721 for (i = 0; i < ndim; i++)
3723 lambda_vector v = lambda_vector_new (ref_nb_loops (ref));
3724 scop_p scop = GBB_SCOP (gb);
3725 tree af = DR_ACCESS_FN (ref, i);
3727 if (!build_access_matrix_with_af (af, v, scop, ref_nb_loops (ref)))
3730 VEC_quick_push (lambda_vector, AM_MATRIX (am), v);
3733 DR_ACCESS_MATRIX (ref) = am;
3737 /* Build the access matrices for the data references in the SCOP. */
3740 build_scop_data_accesses (scop_p scop)
3745 /* FIXME: Construction of access matrix is disabled until some
3746 pass, like the data dependence analysis, is using it. */
3749 for (i = 0; VEC_iterate (graphite_bb_p, SCOP_BBS (scop), i, gb); i++)
3752 data_reference_p dr;
3754 /* Construct the access matrix for each data ref, with respect to
3755 the loop nest of the current BB in the considered SCOP. */
3757 VEC_iterate (data_reference_p, GBB_DATA_REFS (gb), j, dr);
3760 bool res = build_access_matrix (dr, gb);
3762 /* FIXME: At this point the DRs should always have an affine
3763 form. For the moment this fails as build_access_matrix
3764 does not build matrices with parameters. */
3770 /* Returns the tree variable from the name NAME that was given in
3771 Cloog representation. All the parameters are stored in PARAMS, and
3772 all the loop induction variables are stored in IVSTACK.
3774 FIXME: This is a hack, and Cloog should be fixed to not work with
3775 variable names represented as "char *string", but with void
3776 pointers that could be casted back to a tree. The only problem in
3777 doing that is that Cloog's pretty printer still assumes that
3778 variable names are char *strings. The solution would be to have a
3779 function pointer for pretty-printing that can be redirected to be
3780 print_generic_stmt in our case, or fprintf by default.
3781 ??? Too ugly to live. */
3784 clast_name_to_gcc (const char *name, VEC (name_tree, heap) *params,
3785 loop_iv_stack ivstack)
3792 for (i = 0; VEC_iterate (name_tree, params, i, t); i++)
3793 if (!strcmp (name, t->name))
3796 iv = loop_iv_stack_get_iv_from_name (ivstack, name);
3803 /* Returns the maximal precision type for expressions E1 and E2. */
3806 max_precision_type (tree e1, tree e2)
3808 tree type1 = TREE_TYPE (e1);
3809 tree type2 = TREE_TYPE (e2);
3810 return TYPE_PRECISION (type1) > TYPE_PRECISION (type2) ? type1 : type2;
3814 clast_to_gcc_expression (tree, struct clast_expr *, VEC (name_tree, heap) *,
3817 /* Converts a Cloog reduction expression R with reduction operation OP
3818 to a GCC expression tree of type TYPE. PARAMS is a vector of
3819 parameters of the scop, and IVSTACK contains the stack of induction
3823 clast_to_gcc_expression_red (tree type, enum tree_code op,
3824 struct clast_reduction *r,
3825 VEC (name_tree, heap) *params,
3826 loop_iv_stack ivstack)
3829 tree res = clast_to_gcc_expression (type, r->elts[0], params, ivstack);
3831 for (i = 1; i < r->n; i++)
3833 tree t = clast_to_gcc_expression (type, r->elts[i], params, ivstack);
3834 res = fold_build2 (op, type, res, t);
3839 /* Converts a Cloog AST expression E back to a GCC expression tree of
3840 type TYPE. PARAMS is a vector of parameters of the scop, and
3841 IVSTACK contains the stack of induction variables. */
3844 clast_to_gcc_expression (tree type, struct clast_expr *e,
3845 VEC (name_tree, heap) *params,
3846 loop_iv_stack ivstack)
3852 struct clast_term *t = (struct clast_term *) e;
3856 if (value_one_p (t->val))
3858 tree name = clast_name_to_gcc (t->var, params, ivstack);
3859 return fold_convert (type, name);
3862 else if (value_mone_p (t->val))
3864 tree name = clast_name_to_gcc (t->var, params, ivstack);
3865 name = fold_convert (type, name);
3866 return fold_build1 (NEGATE_EXPR, type, name);
3870 tree name = clast_name_to_gcc (t->var, params, ivstack);
3871 tree cst = gmp_cst_to_tree (type, t->val);
3872 name = fold_convert (type, name);
3873 return fold_build2 (MULT_EXPR, type, cst, name);
3877 return gmp_cst_to_tree (type, t->val);
3882 struct clast_reduction *r = (struct clast_reduction *) e;
3887 return clast_to_gcc_expression_red (type, PLUS_EXPR, r, params, ivstack);
3890 return clast_to_gcc_expression_red (type, MIN_EXPR, r, params, ivstack);
3893 return clast_to_gcc_expression_red (type, MAX_EXPR, r, params, ivstack);
3903 struct clast_binary *b = (struct clast_binary *) e;
3904 struct clast_expr *lhs = (struct clast_expr *) b->LHS;
3905 tree tl = clast_to_gcc_expression (type, lhs, params, ivstack);
3906 tree tr = gmp_cst_to_tree (type, b->RHS);
3910 case clast_bin_fdiv:
3911 return fold_build2 (FLOOR_DIV_EXPR, type, tl, tr);
3913 case clast_bin_cdiv:
3914 return fold_build2 (CEIL_DIV_EXPR, type, tl, tr);
3917 return fold_build2 (EXACT_DIV_EXPR, type, tl, tr);
3920 return fold_build2 (TRUNC_MOD_EXPR, type, tl, tr);
3934 /* Returns the type for the expression E. */
3937 gcc_type_for_clast_expr (struct clast_expr *e,
3938 VEC (name_tree, heap) *params,
3939 loop_iv_stack ivstack)
3945 struct clast_term *t = (struct clast_term *) e;
3948 return TREE_TYPE (clast_name_to_gcc (t->var, params, ivstack));
3955 struct clast_reduction *r = (struct clast_reduction *) e;
3958 return gcc_type_for_clast_expr (r->elts[0], params, ivstack);
3962 for (i = 0; i < r->n; i++)
3964 tree type = gcc_type_for_clast_expr (r->elts[i], params, ivstack);
3974 struct clast_binary *b = (struct clast_binary *) e;
3975 struct clast_expr *lhs = (struct clast_expr *) b->LHS;
3976 return gcc_type_for_clast_expr (lhs, params, ivstack);
3986 /* Returns the type for the equation CLEQ. */
3989 gcc_type_for_clast_eq (struct clast_equation *cleq,
3990 VEC (name_tree, heap) *params,
3991 loop_iv_stack ivstack)
3993 tree type = gcc_type_for_clast_expr (cleq->LHS, params, ivstack);
3997 return gcc_type_for_clast_expr (cleq->RHS, params, ivstack);
4000 /* Translates a clast equation CLEQ to a tree. */
4003 graphite_translate_clast_equation (scop_p scop,
4004 struct clast_equation *cleq,
4005 loop_iv_stack ivstack)
4007 enum tree_code comp;
4008 tree type = gcc_type_for_clast_eq (cleq, SCOP_PARAMS (scop), ivstack);
4009 tree lhs = clast_to_gcc_expression (type, cleq->LHS, SCOP_PARAMS (scop), ivstack);
4010 tree rhs = clast_to_gcc_expression (type, cleq->RHS, SCOP_PARAMS (scop), ivstack);
4012 if (cleq->sign == 0)
4015 else if (cleq->sign > 0)
4021 return fold_build2 (comp, type, lhs, rhs);
4024 /* Creates the test for the condition in STMT. */
4027 graphite_create_guard_cond_expr (scop_p scop, struct clast_guard *stmt,
4028 loop_iv_stack ivstack)
4033 for (i = 0; i < stmt->n; i++)
4035 tree eq = graphite_translate_clast_equation (scop, &stmt->eq[i], ivstack);
4038 cond = fold_build2 (TRUTH_AND_EXPR, TREE_TYPE (eq), cond, eq);
4046 /* Creates a new if region corresponding to Cloog's guard. */
4049 graphite_create_new_guard (scop_p scop, edge entry_edge,
4050 struct clast_guard *stmt,
4051 loop_iv_stack ivstack)
4053 tree cond_expr = graphite_create_guard_cond_expr (scop, stmt, ivstack);
4054 edge exit_edge = create_empty_if_region_on_edge (entry_edge, cond_expr);
4058 /* Walks a CLAST and returns the first statement in the body of a
4061 static struct clast_user_stmt *
4062 clast_get_body_of_loop (struct clast_stmt *stmt)
4065 || CLAST_STMT_IS_A (stmt, stmt_user))
4066 return (struct clast_user_stmt *) stmt;
4068 if (CLAST_STMT_IS_A (stmt, stmt_for))
4069 return clast_get_body_of_loop (((struct clast_for *) stmt)->body);
4071 if (CLAST_STMT_IS_A (stmt, stmt_guard))
4072 return clast_get_body_of_loop (((struct clast_guard *) stmt)->then);
4074 if (CLAST_STMT_IS_A (stmt, stmt_block))
4075 return clast_get_body_of_loop (((struct clast_block *) stmt)->body);
4080 /* Returns the induction variable for the loop that gets translated to
4084 gcc_type_for_iv_of_clast_loop (struct clast_for *stmt_for)
4086 struct clast_user_stmt *stmt = clast_get_body_of_loop ((struct clast_stmt *) stmt_for);
4087 const char *cloog_iv = stmt_for->iterator;
4088 CloogStatement *cs = stmt->statement;
4089 graphite_bb_p gbb = (graphite_bb_p) cloog_statement_usr (cs);
4091 return gcc_type_for_cloog_iv (cloog_iv, gbb);
4094 /* Creates a new LOOP corresponding to Cloog's STMT. Inserts an induction
4095 variable for the new LOOP. New LOOP is attached to CFG starting at
4096 ENTRY_EDGE. LOOP is inserted into the loop tree and becomes the child
4097 loop of the OUTER_LOOP. */
4099 static struct loop *
4100 graphite_create_new_loop (scop_p scop, edge entry_edge,
4101 struct clast_for *stmt, loop_iv_stack ivstack,
4104 tree type = gcc_type_for_iv_of_clast_loop (stmt);
4105 VEC (name_tree, heap) *params = SCOP_PARAMS (scop);
4106 tree lb = clast_to_gcc_expression (type, stmt->LB, params, ivstack);
4107 tree ub = clast_to_gcc_expression (type, stmt->UB, params, ivstack);
4108 tree stride = gmp_cst_to_tree (type, stmt->stride);
4109 tree ivvar = create_tmp_var (type, "graphiteIV");
4111 loop_p loop = create_empty_loop_on_edge
4112 (entry_edge, lb, stride, ub, ivvar, &iv_before,
4113 outer ? outer : entry_edge->src->loop_father);
4115 add_referenced_var (ivvar);
4116 loop_iv_stack_push_iv (ivstack, iv_before, stmt->iterator);
4120 /* Rename the SSA_NAMEs used in STMT and that appear in IVSTACK. */
4123 rename_variables_in_stmt (gimple stmt, htab_t map)
4126 use_operand_p use_p;
4128 FOR_EACH_SSA_USE_OPERAND (use_p, stmt, iter, SSA_OP_ALL_USES)
4130 tree use = USE_FROM_PTR (use_p);
4131 tree new_name = get_new_name_from_old_name (map, use);
4133 replace_exp (use_p, new_name);
4139 /* Returns true if SSA_NAME is a parameter of SCOP. */
4142 is_parameter (scop_p scop, tree ssa_name)
4145 VEC (name_tree, heap) *params = SCOP_PARAMS (scop);
4148 for (i = 0; VEC_iterate (name_tree, params, i, param); i++)
4149 if (param->t == ssa_name)
4155 /* Returns true if NAME is an induction variable. */
4160 return gimple_code (SSA_NAME_DEF_STMT (name)) == GIMPLE_PHI;
4163 static void expand_scalar_variables_stmt (gimple, basic_block, scop_p,
4166 expand_scalar_variables_expr (tree, tree, enum tree_code, tree, basic_block,
4167 scop_p, htab_t, gimple_stmt_iterator *);
4169 /* Copies at GSI all the scalar computations on which the ssa_name OP0
4170 depends on in the SCOP: these are all the scalar variables used in
4171 the definition of OP0, that are defined outside BB and still in the
4172 SCOP, i.e. not a parameter of the SCOP. The expression that is
4173 returned contains only induction variables from the generated code:
4174 MAP contains the induction variables renaming mapping, and is used
4175 to translate the names of induction variables. */
4178 expand_scalar_variables_ssa_name (tree op0, basic_block bb,
4179 scop_p scop, htab_t map,
4180 gimple_stmt_iterator *gsi)
4182 tree var0, var1, type;
4184 enum tree_code subcode;
4186 if (is_parameter (scop, op0)
4188 return get_new_name_from_old_name (map, op0);
4190 def_stmt = SSA_NAME_DEF_STMT (op0);
4192 if (gimple_bb (def_stmt) == bb)
4194 /* If the defining statement is in the basic block already
4195 we do not need to create a new expression for it, we
4196 only need to ensure its operands are expanded. */
4197 expand_scalar_variables_stmt (def_stmt, bb, scop, map);
4198 return get_new_name_from_old_name (map, op0);
4202 if (gimple_code (def_stmt) != GIMPLE_ASSIGN
4203 || !bb_in_sese_p (gimple_bb (def_stmt), SCOP_REGION (scop)))
4204 return get_new_name_from_old_name (map, op0);
4206 var0 = gimple_assign_rhs1 (def_stmt);
4207 subcode = gimple_assign_rhs_code (def_stmt);
4208 var1 = gimple_assign_rhs2 (def_stmt);
4209 type = gimple_expr_type (def_stmt);
4211 return expand_scalar_variables_expr (type, var0, subcode, var1, bb, scop,
4216 /* Copies at GSI all the scalar computations on which the expression
4217 OP0 CODE OP1 depends on in the SCOP: these are all the scalar
4218 variables used in OP0 and OP1, defined outside BB and still defined
4219 in the SCOP, i.e. not a parameter of the SCOP. The expression that
4220 is returned contains only induction variables from the generated
4221 code: MAP contains the induction variables renaming mapping, and is
4222 used to translate the names of induction variables. */
4225 expand_scalar_variables_expr (tree type, tree op0, enum tree_code code,
4226 tree op1, basic_block bb, scop_p scop,
4227 htab_t map, gimple_stmt_iterator *gsi)
4229 if (TREE_CODE_CLASS (code) == tcc_constant
4230 || TREE_CODE_CLASS (code) == tcc_declaration)
4233 /* For data references we have to duplicate also its memory
4235 if (TREE_CODE_CLASS (code) == tcc_reference)
4241 tree old_name = TREE_OPERAND (op0, 0);
4242 tree expr = expand_scalar_variables_ssa_name
4243 (old_name, bb, scop, map, gsi);
4244 tree new_name = force_gimple_operand_gsi (gsi, expr, true, NULL,
4245 true, GSI_SAME_STMT);
4247 return fold_build1 (code, type, new_name);
4252 tree op00 = TREE_OPERAND (op0, 0);
4253 tree op01 = TREE_OPERAND (op0, 1);
4254 tree op02 = TREE_OPERAND (op0, 2);
4255 tree op03 = TREE_OPERAND (op0, 3);
4256 tree base = expand_scalar_variables_expr
4257 (TREE_TYPE (op00), op00, TREE_CODE (op00), NULL, bb, scop,
4259 tree subscript = expand_scalar_variables_expr
4260 (TREE_TYPE (op01), op01, TREE_CODE (op01), NULL, bb, scop,
4263 return build4 (ARRAY_REF, type, base, subscript, op02, op03);
4267 /* The above cases should catch everything. */
4272 if (TREE_CODE_CLASS (code) == tcc_unary)
4274 tree op0_type = TREE_TYPE (op0);
4275 enum tree_code op0_code = TREE_CODE (op0);
4276 tree op0_expr = expand_scalar_variables_expr (op0_type, op0, op0_code,
4277 NULL, bb, scop, map, gsi);
4279 return fold_build1 (code, type, op0_expr);
4282 if (TREE_CODE_CLASS (code) == tcc_binary)
4284 tree op0_type = TREE_TYPE (op0);
4285 enum tree_code op0_code = TREE_CODE (op0);
4286 tree op0_expr = expand_scalar_variables_expr (op0_type, op0, op0_code,
4287 NULL, bb, scop, map, gsi);
4288 tree op1_type = TREE_TYPE (op1);
4289 enum tree_code op1_code = TREE_CODE (op1);
4290 tree op1_expr = expand_scalar_variables_expr (op1_type, op1, op1_code,
4291 NULL, bb, scop, map, gsi);
4293 return fold_build2 (code, type, op0_expr, op1_expr);
4296 if (code == SSA_NAME)
4297 return expand_scalar_variables_ssa_name (op0, bb, scop, map, gsi);
4303 /* Copies at the beginning of BB all the scalar computations on which
4304 STMT depends on in the SCOP: these are all the scalar variables used
4305 in STMT, defined outside BB and still defined in the SCOP, i.e. not a
4306 parameter of the SCOP. The expression that is returned contains
4307 only induction variables from the generated code: MAP contains the
4308 induction variables renaming mapping, and is used to translate the
4309 names of induction variables. */
4312 expand_scalar_variables_stmt (gimple stmt, basic_block bb, scop_p scop,
4316 use_operand_p use_p;
4317 gimple_stmt_iterator gsi = gsi_after_labels (bb);
4319 FOR_EACH_SSA_USE_OPERAND (use_p, stmt, iter, SSA_OP_USE)
4321 tree use = USE_FROM_PTR (use_p);
4322 tree type = TREE_TYPE (use);
4323 enum tree_code code = TREE_CODE (use);
4324 tree use_expr = expand_scalar_variables_expr (type, use, code, NULL, bb,
4326 if (use_expr != use)
4329 force_gimple_operand_gsi (&gsi, use_expr, true, NULL,
4330 true, GSI_NEW_STMT);
4331 replace_exp (use_p, new_use);
4338 /* Copies at the beginning of BB all the scalar computations on which
4339 BB depends on in the SCOP: these are all the scalar variables used
4340 in BB, defined outside BB and still defined in the SCOP, i.e. not a
4341 parameter of the SCOP. The expression that is returned contains
4342 only induction variables from the generated code: MAP contains the
4343 induction variables renaming mapping, and is used to translate the
4344 names of induction variables. */
4347 expand_scalar_variables (basic_block bb, scop_p scop, htab_t map)
4349 gimple_stmt_iterator gsi;
4351 for (gsi = gsi_after_labels (bb); !gsi_end_p (gsi);)
4353 gimple stmt = gsi_stmt (gsi);
4354 expand_scalar_variables_stmt (stmt, bb, scop, map);
4359 /* Rename all the SSA_NAMEs from block BB according to the MAP. */
4362 rename_variables (basic_block bb, htab_t map)
4364 gimple_stmt_iterator gsi;
4366 for (gsi = gsi_after_labels (bb); !gsi_end_p (gsi); gsi_next (&gsi))
4367 rename_variables_in_stmt (gsi_stmt (gsi), map);
4370 /* Remove condition from BB. */
4373 remove_condition (basic_block bb)
4375 gimple last = last_stmt (bb);
4377 if (last && gimple_code (last) == GIMPLE_COND)
4379 gimple_stmt_iterator gsi = gsi_last_bb (bb);
4380 gsi_remove (&gsi, true);
4384 /* Returns the first successor edge of BB with EDGE_TRUE_VALUE flag set. */
4387 get_true_edge_from_guard_bb (basic_block bb)
4392 FOR_EACH_EDGE (e, ei, bb->succs)
4393 if (e->flags & EDGE_TRUE_VALUE)
4400 /* Returns the first successor edge of BB with EDGE_TRUE_VALUE flag cleared. */
4403 get_false_edge_from_guard_bb (basic_block bb)
4408 FOR_EACH_EDGE (e, ei, bb->succs)
4409 if (!(e->flags & EDGE_TRUE_VALUE))
4416 /* Inserts in MAP a tuple (OLD_NAME, NEW_NAME) for the induction
4417 variables of the loops around GBB in SCOP, i.e. GBB_LOOPS.
4418 NEW_NAME is obtained from IVSTACK. IVSTACK has the same stack
4419 ordering as GBB_LOOPS. */
4422 build_iv_mapping (loop_iv_stack ivstack, htab_t map, gbb_p gbb, scop_p scop)
4428 for (i = 0; VEC_iterate (name_tree, SCOP_OLDIVS (scop), i, iv); i++)
4430 struct rename_map_elt_d tmp;
4432 if (!flow_bb_inside_loop_p (iv->loop, GBB_BB (gbb)))
4435 tmp.old_name = iv->t;
4436 slot = htab_find_slot (map, &tmp, INSERT);
4440 tree new_name = loop_iv_stack_get_iv (ivstack,
4441 gbb_loop_index (gbb, iv->loop));
4442 *slot = new_rename_map_elt (iv->t, new_name);
4447 /* Register in MAP the tuple (old_name, new_name). */
4450 register_old_and_new_names (htab_t map, tree old_name, tree new_name)
4452 struct rename_map_elt_d tmp;
4455 tmp.old_name = old_name;
4456 slot = htab_find_slot (map, &tmp, INSERT);
4459 *slot = new_rename_map_elt (old_name, new_name);
4462 /* Create a duplicate of the basic block BB. NOTE: This does not
4463 preserve SSA form. */
4466 graphite_copy_stmts_from_block (basic_block bb, basic_block new_bb, htab_t map)
4468 gimple_stmt_iterator gsi, gsi_tgt;
4470 gsi_tgt = gsi_start_bb (new_bb);
4471 for (gsi = gsi_start_bb (bb); !gsi_end_p (gsi); gsi_next (&gsi))
4473 def_operand_p def_p;
4474 ssa_op_iter op_iter;
4476 gimple stmt = gsi_stmt (gsi);
4479 if (gimple_code (stmt) == GIMPLE_LABEL)
4482 /* Create a new copy of STMT and duplicate STMT's virtual
4484 copy = gimple_copy (stmt);
4485 gsi_insert_after (&gsi_tgt, copy, GSI_NEW_STMT);
4486 mark_sym_for_renaming (gimple_vop (cfun));
4488 region = lookup_stmt_eh_region (stmt);
4490 add_stmt_to_eh_region (copy, region);
4491 gimple_duplicate_stmt_histograms (cfun, copy, cfun, stmt);
4493 /* Create new names for all the definitions created by COPY and
4494 add replacement mappings for each new name. */
4495 FOR_EACH_SSA_DEF_OPERAND (def_p, copy, op_iter, SSA_OP_ALL_DEFS)
4497 tree old_name = DEF_FROM_PTR (def_p);
4498 tree new_name = create_new_def_for (old_name, copy, def_p);
4499 register_old_and_new_names (map, old_name, new_name);
4504 /* Records in SCOP_LIVEOUT_RENAMES the names that are live out of
4505 the SCOP and that appear in the RENAME_MAP. */
4508 register_scop_liveout_renames (scop_p scop, htab_t rename_map)
4511 sese region = SCOP_REGION (scop);
4513 for (i = 0; i < SESE_NUM_VER (region); i++)
4514 if (bitmap_bit_p (SESE_LIVEOUT (region), i)
4515 && is_gimple_reg (ssa_name (i)))
4517 tree old_name = ssa_name (i);
4518 tree new_name = get_new_name_from_old_name (rename_map, old_name);
4520 register_old_and_new_names (SCOP_LIVEOUT_RENAMES (scop),
4521 old_name, new_name);
4525 /* Copies BB and includes in the copied BB all the statements that can
4526 be reached following the use-def chains from the memory accesses,
4527 and returns the next edge following this new block. */
4530 copy_bb_and_scalar_dependences (basic_block bb, scop_p scop,
4531 edge next_e, htab_t map)
4533 basic_block new_bb = split_edge (next_e);
4535 next_e = single_succ_edge (new_bb);
4536 graphite_copy_stmts_from_block (bb, new_bb, map);
4537 remove_condition (new_bb);
4538 rename_variables (new_bb, map);
4539 remove_phi_nodes (new_bb);
4540 expand_scalar_variables (new_bb, scop, map);
4541 register_scop_liveout_renames (scop, map);
4546 /* Helper function for htab_traverse in insert_loop_close_phis. */
4549 add_loop_exit_phis (void **slot, void *s)
4551 struct rename_map_elt_d *entry = (struct rename_map_elt_d *) *slot;
4552 tree new_name = entry->new_name;
4553 basic_block bb = (basic_block) s;
4554 gimple phi = create_phi_node (new_name, bb);
4555 tree res = create_new_def_for (gimple_phi_result (phi), phi,
4556 gimple_phi_result_ptr (phi));
4558 add_phi_arg (phi, new_name, single_pred_edge (bb), UNKNOWN_LOCATION);
4560 entry->new_name = res;
4565 /* Iterate over the SCOP_LIVEOUT_RENAMES (SCOP) and get tuples of the
4566 form (OLD_NAME, NEW_NAME). Insert in BB "RES = phi (NEW_NAME)",
4567 and finally register in SCOP_LIVEOUT_RENAMES (scop) the tuple
4571 insert_loop_close_phis (scop_p scop, basic_block bb)
4573 update_ssa (TODO_update_ssa);
4574 htab_traverse (SCOP_LIVEOUT_RENAMES (scop), add_loop_exit_phis, bb);
4575 update_ssa (TODO_update_ssa);
4578 /* Helper structure for htab_traverse in insert_guard_phis. */
4582 edge true_edge, false_edge;
4583 htab_t liveout_before_guard;
4586 /* Return the default name that is before the guard. */
4589 default_liveout_before_guard (htab_t liveout_before_guard, tree old_name)
4591 tree res = get_new_name_from_old_name (liveout_before_guard, old_name);
4593 if (res == old_name)
4595 if (is_gimple_reg (res))
4596 return fold_convert (TREE_TYPE (res), integer_zero_node);
4597 return gimple_default_def (cfun, res);
4603 /* Helper function for htab_traverse in insert_guard_phis. */
4606 add_guard_exit_phis (void **slot, void *s)
4608 struct rename_map_elt_d *entry = (struct rename_map_elt_d *) *slot;
4609 struct igp *i = (struct igp *) s;
4610 basic_block bb = i->bb;
4611 edge true_edge = i->true_edge;
4612 edge false_edge = i->false_edge;
4613 tree name1 = entry->new_name;
4614 tree name2 = default_liveout_before_guard (i->liveout_before_guard,
4616 gimple phi = create_phi_node (name1, bb);
4617 tree res = create_new_def_for (gimple_phi_result (phi), phi,
4618 gimple_phi_result_ptr (phi));
4620 add_phi_arg (phi, name1, true_edge, UNKNOWN_LOCATION);
4621 add_phi_arg (phi, name2, false_edge, UNKNOWN_LOCATION);
4623 entry->new_name = res;
4628 /* Iterate over the SCOP_LIVEOUT_RENAMES (SCOP) and get tuples of the
4629 form (OLD_NAME, NAME1). If there is a correspondent tuple of
4630 OLD_NAME in LIVEOUT_BEFORE_GUARD, i.e. (OLD_NAME, NAME2) then
4633 | RES = phi (NAME1 (on TRUE_EDGE), NAME2 (on FALSE_EDGE))"
4635 if there is no tuple for OLD_NAME in LIVEOUT_BEFORE_GUARD, insert
4637 | RES = phi (NAME1 (on TRUE_EDGE),
4638 | DEFAULT_DEFINITION of NAME1 (on FALSE_EDGE))".
4640 Finally register in SCOP_LIVEOUT_RENAMES (scop) the tuple
4644 insert_guard_phis (scop_p scop, basic_block bb, edge true_edge,
4645 edge false_edge, htab_t liveout_before_guard)
4649 i.true_edge = true_edge;
4650 i.false_edge = false_edge;
4651 i.liveout_before_guard = liveout_before_guard;
4653 update_ssa (TODO_update_ssa);
4654 htab_traverse (SCOP_LIVEOUT_RENAMES (scop), add_guard_exit_phis, &i);
4655 update_ssa (TODO_update_ssa);
4658 /* Helper function for htab_traverse. */
4661 copy_renames (void **slot, void *s)
4663 struct rename_map_elt_d *entry = (struct rename_map_elt_d *) *slot;
4664 htab_t res = (htab_t) s;
4665 tree old_name = entry->old_name;
4666 tree new_name = entry->new_name;
4667 struct rename_map_elt_d tmp;
4670 tmp.old_name = old_name;
4671 x = htab_find_slot (res, &tmp, INSERT);
4674 *x = new_rename_map_elt (old_name, new_name);
4679 /* Translates a CLAST statement STMT to GCC representation in the
4682 - NEXT_E is the edge where new generated code should be attached.
4683 - CONTEXT_LOOP is the loop in which the generated code will be placed
4685 - IVSTACK contains the surrounding loops around the statement to be
4690 translate_clast (scop_p scop, struct loop *context_loop,
4691 struct clast_stmt *stmt, edge next_e, loop_iv_stack ivstack)
4696 if (CLAST_STMT_IS_A (stmt, stmt_root))
4697 return translate_clast (scop, context_loop, stmt->next, next_e, ivstack);
4699 if (CLAST_STMT_IS_A (stmt, stmt_user))
4702 CloogStatement *cs = ((struct clast_user_stmt *) stmt)->statement;
4703 graphite_bb_p gbb = (graphite_bb_p) cloog_statement_usr (cs);
4705 if (GBB_BB (gbb) == ENTRY_BLOCK_PTR)
4708 map = htab_create (10, rename_map_elt_info, eq_rename_map_elts, free);
4709 loop_iv_stack_patch_for_consts (ivstack, (struct clast_user_stmt *) stmt);
4710 build_iv_mapping (ivstack, map, gbb, scop);
4711 next_e = copy_bb_and_scalar_dependences (GBB_BB (gbb), scop,
4714 loop_iv_stack_remove_constants (ivstack);
4715 recompute_all_dominators ();
4716 update_ssa (TODO_update_ssa);
4718 return translate_clast (scop, context_loop, stmt->next, next_e, ivstack);
4721 if (CLAST_STMT_IS_A (stmt, stmt_for))
4724 = graphite_create_new_loop (scop, next_e, (struct clast_for *) stmt,
4725 ivstack, context_loop ? context_loop
4727 edge last_e = single_exit (loop);
4729 next_e = translate_clast (scop, loop, ((struct clast_for *) stmt)->body,
4730 single_pred_edge (loop->latch), ivstack);
4731 redirect_edge_succ_nodup (next_e, loop->latch);
4733 set_immediate_dominator (CDI_DOMINATORS, next_e->dest, next_e->src);
4734 loop_iv_stack_pop (ivstack);
4735 last_e = single_succ_edge (split_edge (last_e));
4736 insert_loop_close_phis (scop, last_e->src);
4738 recompute_all_dominators ();
4740 return translate_clast (scop, context_loop, stmt->next, last_e, ivstack);
4743 if (CLAST_STMT_IS_A (stmt, stmt_guard))
4745 htab_t liveout_before_guard = htab_create (10, rename_map_elt_info,
4746 eq_rename_map_elts, free);
4747 edge last_e = graphite_create_new_guard (scop, next_e,
4748 ((struct clast_guard *) stmt),
4750 edge true_e = get_true_edge_from_guard_bb (next_e->dest);
4751 edge false_e = get_false_edge_from_guard_bb (next_e->dest);
4752 edge exit_true_e = single_succ_edge (true_e->dest);
4753 edge exit_false_e = single_succ_edge (false_e->dest);
4755 htab_traverse (SCOP_LIVEOUT_RENAMES (scop), copy_renames,
4756 liveout_before_guard);
4758 next_e = translate_clast (scop, context_loop,
4759 ((struct clast_guard *) stmt)->then,
4761 insert_guard_phis (scop, last_e->src, exit_true_e, exit_false_e,
4762 liveout_before_guard);
4763 htab_delete (liveout_before_guard);
4764 recompute_all_dominators ();
4767 return translate_clast (scop, context_loop, stmt->next, last_e, ivstack);
4770 if (CLAST_STMT_IS_A (stmt, stmt_block))
4772 next_e = translate_clast (scop, context_loop,
4773 ((struct clast_block *) stmt)->body,
4775 recompute_all_dominators ();
4777 return translate_clast (scop, context_loop, stmt->next, next_e, ivstack);
4783 /* Free the SCATTERING domain list. */
4786 free_scattering (CloogDomainList *scattering)
4790 CloogDomain *dom = cloog_domain (scattering);
4791 CloogDomainList *next = cloog_next_domain (scattering);
4793 cloog_domain_free (dom);
4799 /* Build cloog program for SCoP. */
4802 build_cloog_prog (scop_p scop)
4805 int max_nb_loops = scop_max_loop_depth (scop);
4807 CloogLoop *loop_list = NULL;
4808 CloogBlockList *block_list = NULL;
4809 CloogDomainList *scattering = NULL;
4810 CloogProgram *prog = SCOP_PROG (scop);
4811 int nbs = 2 * max_nb_loops + 1;
4812 int *scaldims = (int *) xmalloc (nbs * (sizeof (int)));
4814 cloog_program_set_nb_scattdims (prog, nbs);
4815 initialize_cloog_names (scop);
4817 for (i = 0; VEC_iterate (graphite_bb_p, SCOP_BBS (scop), i, gbb); i++)
4819 /* Build new block. */
4820 CloogMatrix *domain = GBB_DOMAIN (gbb);
4821 CloogStatement *stmt = cloog_statement_alloc (GBB_BB (gbb)->index);
4822 CloogBlock *block = cloog_block_alloc (stmt, 0, NULL,
4823 nb_loops_around_gb (gbb));
4824 cloog_statement_set_usr (stmt, gbb);
4826 /* Add empty domain to all bbs, which do not yet have a domain, as they
4827 are not part of any loop. */
4830 domain = cloog_matrix_alloc (0, scop_nb_params (scop) + 2);
4831 GBB_DOMAIN (gbb) = domain;
4834 /* Build loop list. */
4836 CloogLoop *new_loop_list = cloog_loop_malloc ();
4837 cloog_loop_set_next (new_loop_list, loop_list);
4838 cloog_loop_set_domain (new_loop_list,
4839 cloog_domain_matrix2domain (domain));
4840 cloog_loop_set_block (new_loop_list, block);
4841 loop_list = new_loop_list;
4844 /* Build block list. */
4846 CloogBlockList *new_block_list = cloog_block_list_malloc ();
4848 cloog_block_list_set_next (new_block_list, block_list);
4849 cloog_block_list_set_block (new_block_list, block);
4850 block_list = new_block_list;
4853 /* Build scattering list. */
4855 /* XXX: Replace with cloog_domain_list_alloc(), when available. */
4856 CloogDomainList *new_scattering
4857 = (CloogDomainList *) xmalloc (sizeof (CloogDomainList));
4858 CloogMatrix *scat_mat = schedule_to_scattering (gbb, nbs);
4860 cloog_set_next_domain (new_scattering, scattering);
4861 cloog_set_domain (new_scattering,
4862 cloog_domain_matrix2domain (scat_mat));
4863 scattering = new_scattering;
4864 cloog_matrix_free (scat_mat);
4868 cloog_program_set_loop (prog, loop_list);
4869 cloog_program_set_blocklist (prog, block_list);
4871 for (i = 0; i < nbs; i++)
4874 cloog_program_set_scaldims (prog, scaldims);
4876 /* Extract scalar dimensions to simplify the code generation problem. */
4877 cloog_program_extract_scalars (prog, scattering);
4879 /* Apply scattering. */
4880 cloog_program_scatter (prog, scattering);
4881 free_scattering (scattering);
4883 /* Iterators corresponding to scalar dimensions have to be extracted. */
4884 cloog_names_scalarize (cloog_program_names (prog), nbs,
4885 cloog_program_scaldims (prog));
4887 /* Free blocklist. */
4889 CloogBlockList *next = cloog_program_blocklist (prog);
4893 CloogBlockList *toDelete = next;
4894 next = cloog_block_list_next (next);
4895 cloog_block_list_set_next (toDelete, NULL);
4896 cloog_block_list_set_block (toDelete, NULL);
4897 cloog_block_list_free (toDelete);
4899 cloog_program_set_blocklist (prog, NULL);
4903 /* Return the options that will be used in GLOOG. */
4905 static CloogOptions *
4906 set_cloog_options (void)
4908 CloogOptions *options = cloog_options_malloc ();
4910 /* Change cloog output language to C. If we do use FORTRAN instead, cloog
4911 will stop e.g. with "ERROR: unbounded loops not allowed in FORTRAN.", if
4912 we pass an incomplete program to cloog. */
4913 options->language = LANGUAGE_C;
4915 /* Enable complex equality spreading: removes dummy statements
4916 (assignments) in the generated code which repeats the
4917 substitution equations for statements. This is useless for
4921 /* Enable C pretty-printing mode: normalizes the substitution
4922 equations for statements. */
4925 /* Allow cloog to build strides with a stride width different to one.
4926 This example has stride = 4:
4928 for (i = 0; i < 20; i += 4)
4930 options->strides = 1;
4932 /* Disable optimizations and make cloog generate source code closer to the
4933 input. This is useful for debugging, but later we want the optimized
4936 XXX: We can not disable optimizations, as loop blocking is not working
4941 options->l = INT_MAX;
4947 /* Prints STMT to STDERR. */
4950 debug_clast_stmt (struct clast_stmt *stmt)
4952 CloogOptions *options = set_cloog_options ();
4954 pprint (stderr, stmt, 0, options);
4957 /* Find the right transform for the SCOP, and return a Cloog AST
4958 representing the new form of the program. */
4960 static struct clast_stmt *
4961 find_transform (scop_p scop)
4963 struct clast_stmt *stmt;
4964 CloogOptions *options = set_cloog_options ();
4966 /* Connect new cloog prog generation to graphite. */
4967 build_cloog_prog (scop);
4969 if (dump_file && (dump_flags & TDF_DETAILS))
4971 fprintf (dump_file, "Cloog Input [\n");
4972 cloog_program_print (dump_file, SCOP_PROG(scop));
4973 fprintf (dump_file, "]\n");
4976 SCOP_PROG (scop) = cloog_program_generate (SCOP_PROG (scop), options);
4977 stmt = cloog_clast_create (SCOP_PROG (scop), options);
4979 if (dump_file && (dump_flags & TDF_DETAILS))
4981 fprintf (dump_file, "Cloog Output[\n");
4982 pprint (dump_file, stmt, 0, options);
4983 cloog_program_dump_cloog (dump_file, SCOP_PROG (scop));
4984 fprintf (dump_file, "]\n");
4987 cloog_options_free (options);
4991 /* Remove from the CFG the REGION. */
4994 remove_sese_region (sese region)
4996 VEC (basic_block, heap) *bbs = NULL;
4997 basic_block entry_bb = SESE_ENTRY (region)->dest;
4998 basic_block exit_bb = SESE_EXIT (region)->dest;
5002 VEC_safe_push (basic_block, heap, bbs, entry_bb);
5003 gather_blocks_in_sese_region (entry_bb, exit_bb, &bbs);
5005 for (i = 0; VEC_iterate (basic_block, bbs, i, bb); i++)
5006 delete_basic_block (bb);
5008 VEC_free (basic_block, heap, bbs);
5011 typedef struct ifsese_d
5019 if_region_entry (ifsese if_region)
5021 return SESE_ENTRY (if_region->region);
5025 if_region_exit (ifsese if_region)
5027 return SESE_EXIT (if_region->region);
5030 static inline basic_block
5031 if_region_get_condition_block (ifsese if_region)
5033 return if_region_entry (if_region)->dest;
5037 if_region_set_false_region (ifsese if_region, sese region)
5039 basic_block condition = if_region_get_condition_block (if_region);
5040 edge false_edge = get_false_edge_from_guard_bb (condition);
5041 basic_block dummy = false_edge->dest;
5042 edge entry_region = SESE_ENTRY (region);
5043 edge exit_region = SESE_EXIT (region);
5044 basic_block before_region = entry_region->src;
5045 basic_block last_in_region = exit_region->src;
5046 void **slot = htab_find_slot_with_hash (current_loops->exits, exit_region,
5047 htab_hash_pointer (exit_region),
5050 entry_region->flags = false_edge->flags;
5051 false_edge->flags = exit_region->flags;
5053 redirect_edge_pred (entry_region, condition);
5054 redirect_edge_pred (exit_region, before_region);
5055 redirect_edge_pred (false_edge, last_in_region);
5056 redirect_edge_succ (false_edge, single_succ (dummy));
5057 delete_basic_block (dummy);
5059 exit_region->flags = EDGE_FALLTHRU;
5060 recompute_all_dominators ();
5062 SESE_EXIT (region) = false_edge;
5063 if_region->false_region = region;
5067 struct loop_exit *loop_exit = GGC_CNEW (struct loop_exit);
5069 memcpy (loop_exit, *((struct loop_exit **) slot), sizeof (struct loop_exit));
5070 htab_clear_slot (current_loops->exits, slot);
5072 slot = htab_find_slot_with_hash (current_loops->exits, false_edge,
5073 htab_hash_pointer (false_edge),
5075 loop_exit->e = false_edge;
5077 false_edge->src->loop_father->exits->next = loop_exit;
5082 create_if_region_on_edge (edge entry, tree condition)
5086 sese sese_region = GGC_NEW (struct sese_d);
5087 sese true_region = GGC_NEW (struct sese_d);
5088 sese false_region = GGC_NEW (struct sese_d);
5089 ifsese if_region = GGC_NEW (struct ifsese_d);
5090 edge exit = create_empty_if_region_on_edge (entry, condition);
5092 if_region->region = sese_region;
5093 if_region->region->entry = entry;
5094 if_region->region->exit = exit;
5096 FOR_EACH_EDGE (e, ei, entry->dest->succs)
5098 if (e->flags & EDGE_TRUE_VALUE)
5100 true_region->entry = e;
5101 true_region->exit = single_succ_edge (e->dest);
5102 if_region->true_region = true_region;
5104 else if (e->flags & EDGE_FALSE_VALUE)
5106 false_region->entry = e;
5107 false_region->exit = single_succ_edge (e->dest);
5108 if_region->false_region = false_region;
5115 /* Moves REGION in a condition expression:
5123 move_sese_in_condition (sese region)
5125 basic_block pred_block = split_edge (SESE_ENTRY (region));
5126 ifsese if_region = NULL;
5128 SESE_ENTRY (region) = single_succ_edge (pred_block);
5129 if_region = create_if_region_on_edge (single_pred_edge (pred_block), integer_one_node);
5130 if_region_set_false_region (if_region, region);
5135 /* Add exit phis for USE on EXIT. */
5138 scop_add_exit_phis_edge (basic_block exit, tree use, edge false_e, edge true_e)
5140 gimple phi = create_phi_node (use, exit);
5142 create_new_def_for (gimple_phi_result (phi), phi,
5143 gimple_phi_result_ptr (phi));
5144 add_phi_arg (phi, use, false_e, UNKNOWN_LOCATION);
5145 add_phi_arg (phi, use, true_e, UNKNOWN_LOCATION);
5148 /* Add phi nodes for VAR that is used in LIVEIN. Phi nodes are
5149 inserted in block BB. */
5152 scop_add_exit_phis_var (basic_block bb, tree var, bitmap livein,
5153 edge false_e, edge true_e)
5156 basic_block def_bb = gimple_bb (SSA_NAME_DEF_STMT (var));
5158 if (is_gimple_reg (var))
5159 bitmap_clear_bit (livein, def_bb->index);
5161 bitmap_set_bit (livein, def_bb->index);
5163 def = BITMAP_ALLOC (NULL);
5164 bitmap_set_bit (def, def_bb->index);
5165 compute_global_livein (livein, def);
5168 scop_add_exit_phis_edge (bb, var, false_e, true_e);
5171 /* Insert in the block BB phi nodes for variables defined in REGION
5172 and used outside the REGION. The code generation moves REGION in
5173 the else clause of an "if (1)" and generates code in the then
5174 clause that is at this point empty:
5183 scop_insert_phis_for_liveouts (sese region, basic_block bb,
5184 edge false_e, edge true_e)
5189 update_ssa (TODO_update_ssa);
5191 EXECUTE_IF_SET_IN_BITMAP (SESE_LIVEOUT (region), 0, i, bi)
5192 scop_add_exit_phis_var (bb, ssa_name (i), SESE_LIVEIN_VER (region, i),
5195 update_ssa (TODO_update_ssa);
5198 /* Get the definition of NAME before the SCOP. Keep track of the
5199 basic blocks that have been VISITED in a bitmap. */
5202 get_vdef_before_scop (scop_p scop, tree name, sbitmap visited)
5205 gimple def_stmt = SSA_NAME_DEF_STMT (name);
5206 basic_block def_bb = gimple_bb (def_stmt);
5209 || !bb_in_sese_p (def_bb, SCOP_REGION (scop)))
5212 if (TEST_BIT (visited, def_bb->index))
5215 SET_BIT (visited, def_bb->index);
5217 switch (gimple_code (def_stmt))
5220 for (i = 0; i < gimple_phi_num_args (def_stmt); i++)
5222 tree arg = gimple_phi_arg_def (def_stmt, i);
5223 tree res = get_vdef_before_scop (scop, arg, visited);
5234 /* Adjust a virtual phi node PHI that is placed at the end of the
5235 generated code for SCOP:
5238 | generated code from REGION;
5242 The FALSE_E edge comes from the original code, TRUE_E edge comes
5243 from the code generated for the SCOP. */
5246 scop_adjust_vphi (scop_p scop, gimple phi, edge true_e)
5250 gcc_assert (gimple_phi_num_args (phi) == 2);
5252 for (i = 0; i < gimple_phi_num_args (phi); i++)
5253 if (gimple_phi_arg_edge (phi, i) == true_e)
5255 tree true_arg, false_arg, before_scop_arg;
5258 true_arg = gimple_phi_arg_def (phi, i);
5259 if (!SSA_NAME_IS_DEFAULT_DEF (true_arg))
5262 false_arg = gimple_phi_arg_def (phi, i == 0 ? 1 : 0);
5263 if (SSA_NAME_IS_DEFAULT_DEF (false_arg))
5266 visited = sbitmap_alloc (last_basic_block);
5267 sbitmap_zero (visited);
5268 before_scop_arg = get_vdef_before_scop (scop, false_arg, visited);
5269 gcc_assert (before_scop_arg != NULL_TREE);
5270 SET_PHI_ARG_DEF (phi, i, before_scop_arg);
5271 sbitmap_free (visited);
5275 /* Adjusts the phi nodes in the block BB for variables defined in
5276 SCOP_REGION and used outside the SCOP_REGION. The code generation
5277 moves SCOP_REGION in the else clause of an "if (1)" and generates
5278 code in the then clause:
5281 | generated code from REGION;
5285 To adjust the phi nodes after the condition, SCOP_LIVEOUT_RENAMES
5286 hash table is used: this stores for a name that is part of the
5287 LIVEOUT of SCOP_REGION its new name in the generated code. */
5290 scop_adjust_phis_for_liveouts (scop_p scop, basic_block bb, edge false_e,
5293 gimple_stmt_iterator si;
5295 for (si = gsi_start_phis (bb); !gsi_end_p (si); gsi_next (&si))
5298 unsigned false_i = 0;
5299 gimple phi = gsi_stmt (si);
5301 if (!is_gimple_reg (PHI_RESULT (phi)))
5303 scop_adjust_vphi (scop, phi, true_e);
5307 for (i = 0; i < gimple_phi_num_args (phi); i++)
5308 if (gimple_phi_arg_edge (phi, i) == false_e)
5314 for (i = 0; i < gimple_phi_num_args (phi); i++)
5315 if (gimple_phi_arg_edge (phi, i) == true_e)
5317 tree old_name = gimple_phi_arg_def (phi, false_i);
5318 tree new_name = get_new_name_from_old_name
5319 (SCOP_LIVEOUT_RENAMES (scop), old_name);
5321 gcc_assert (old_name != new_name);
5322 SET_PHI_ARG_DEF (phi, i, new_name);
5327 /* Returns the first cloog name used in EXPR. */
5330 find_cloog_iv_in_expr (struct clast_expr *expr)
5332 struct clast_term *term = (struct clast_term *) expr;
5334 if (expr->type == expr_term
5338 if (expr->type == expr_term)
5341 if (expr->type == expr_red)
5344 struct clast_reduction *red = (struct clast_reduction *) expr;
5346 for (i = 0; i < red->n; i++)
5348 const char *res = find_cloog_iv_in_expr ((red)->elts[i]);
5358 /* Build for a clast_user_stmt USER_STMT a map between the CLAST
5359 induction variables and the corresponding GCC old induction
5360 variables. This information is stored on each GRAPHITE_BB. */
5363 compute_cloog_iv_types_1 (graphite_bb_p gbb,
5364 struct clast_user_stmt *user_stmt)
5366 struct clast_stmt *t;
5369 for (t = user_stmt->substitutions; t; t = t->next, index++)
5372 struct ivtype_map_elt_d tmp;
5373 struct clast_expr *expr = (struct clast_expr *)
5374 ((struct clast_assignment *)t)->RHS;
5376 /* Create an entry (clast_var, type). */
5377 tmp.cloog_iv = find_cloog_iv_in_expr (expr);
5381 slot = htab_find_slot (GBB_CLOOG_IV_TYPES (gbb), &tmp, INSERT);
5385 loop_p loop = gbb_loop_at_index (gbb, index);
5386 tree oldiv = oldiv_for_loop (GBB_SCOP (gbb), loop);
5387 tree type = oldiv ? TREE_TYPE (oldiv) : integer_type_node;
5388 *slot = new_ivtype_map_elt (tmp.cloog_iv, type);
5393 /* Walk the CLAST tree starting from STMT and build for each
5394 clast_user_stmt a map between the CLAST induction variables and the
5395 corresponding GCC old induction variables. This information is
5396 stored on each GRAPHITE_BB. */
5399 compute_cloog_iv_types (struct clast_stmt *stmt)
5404 if (CLAST_STMT_IS_A (stmt, stmt_root))
5407 if (CLAST_STMT_IS_A (stmt, stmt_user))
5409 CloogStatement *cs = ((struct clast_user_stmt *) stmt)->statement;
5410 graphite_bb_p gbb = (graphite_bb_p) cloog_statement_usr (cs);
5411 GBB_CLOOG_IV_TYPES (gbb) = htab_create (10, ivtype_map_elt_info,
5412 eq_ivtype_map_elts, free);
5413 compute_cloog_iv_types_1 (gbb, (struct clast_user_stmt *) stmt);
5417 if (CLAST_STMT_IS_A (stmt, stmt_for))
5419 struct clast_stmt *s = ((struct clast_for *) stmt)->body;
5420 compute_cloog_iv_types (s);
5424 if (CLAST_STMT_IS_A (stmt, stmt_guard))
5426 struct clast_stmt *s = ((struct clast_guard *) stmt)->then;
5427 compute_cloog_iv_types (s);
5431 if (CLAST_STMT_IS_A (stmt, stmt_block))
5433 struct clast_stmt *s = ((struct clast_block *) stmt)->body;
5434 compute_cloog_iv_types (s);
5441 compute_cloog_iv_types (stmt->next);
5444 /* GIMPLE Loop Generator: generates loops from STMT in GIMPLE form for
5445 the given SCOP. Return true if code generation succeeded. */
5448 gloog (scop_p scop, struct clast_stmt *stmt)
5450 edge new_scop_exit_edge = NULL;
5451 VEC (iv_stack_entry_p, heap) *ivstack = VEC_alloc (iv_stack_entry_p, heap,
5453 loop_p context_loop;
5454 ifsese if_region = NULL;
5456 recompute_all_dominators ();
5458 if_region = move_sese_in_condition (SCOP_REGION (scop));
5459 sese_build_livein_liveouts (SCOP_REGION (scop));
5460 scop_insert_phis_for_liveouts (SCOP_REGION (scop),
5461 if_region->region->exit->src,
5462 if_region->false_region->exit,
5463 if_region->true_region->exit);
5464 recompute_all_dominators ();
5466 context_loop = SESE_ENTRY (SCOP_REGION (scop))->src->loop_father;
5467 compute_cloog_iv_types (stmt);
5469 new_scop_exit_edge = translate_clast (scop, context_loop, stmt,
5470 if_region->true_region->entry,
5472 free_loop_iv_stack (&ivstack);
5473 cloog_clast_free (stmt);
5476 scop_adjust_phis_for_liveouts (scop,
5477 if_region->region->exit->src,
5478 if_region->false_region->exit,
5479 if_region->true_region->exit);
5481 recompute_all_dominators ();
5486 /* Returns the number of data references in SCOP. */
5489 nb_data_refs_in_scop (scop_p scop)
5495 for (i = 0; VEC_iterate (graphite_bb_p, SCOP_BBS (scop), i, gbb); i++)
5496 res += VEC_length (data_reference_p, GBB_DATA_REFS (gbb));
5501 /* Move the loop at index LOOP and insert it before index NEW_LOOP_POS.
5502 This transformartion is only valid, if the loop nest between i and k is
5503 perfectly nested. Therefore we do not need to change the static schedule.
5507 for (i = 0; i < 50; i++)
5509 for (k = 5; k < 100; k++)
5512 To move k before i use:
5514 graphite_trans_bb_move_loop (A, 2, 0)
5516 for (k = 5; k < 100; k++)
5517 for (i = 0; i < 50; i++)
5523 graphite_trans_bb_move_loop (A, 0, 2)
5525 This function does not check the validity of interchanging loops.
5526 This should be checked before calling this function. */
5529 graphite_trans_bb_move_loop (graphite_bb_p gb, int loop,
5532 CloogMatrix *domain = GBB_DOMAIN (gb);
5536 gcc_assert (loop < gbb_nb_loops (gb)
5537 && new_loop_pos < gbb_nb_loops (gb));
5539 /* Update LOOPS vector. */
5540 tmp_loop_p = VEC_index (loop_p, GBB_LOOPS (gb), loop);
5541 VEC_ordered_remove (loop_p, GBB_LOOPS (gb), loop);
5542 VEC_safe_insert (loop_p, heap, GBB_LOOPS (gb), new_loop_pos, tmp_loop_p);
5544 /* Move the domain columns. */
5545 if (loop < new_loop_pos)
5546 for (row = 0; row < domain->NbRows; row++)
5550 value_assign (tmp, domain->p[row][loop + 1]);
5552 for (j = loop ; j < new_loop_pos - 1; j++)
5553 value_assign (domain->p[row][j + 1], domain->p[row][j + 2]);
5555 value_assign (domain->p[row][new_loop_pos], tmp);
5559 for (row = 0; row < domain->NbRows; row++)
5563 value_assign (tmp, domain->p[row][loop + 1]);
5565 for (j = loop ; j > new_loop_pos; j--)
5566 value_assign (domain->p[row][j + 1], domain->p[row][j]);
5568 value_assign (domain->p[row][new_loop_pos + 1], tmp);
5573 /* Get the index of the column representing constants in the DOMAIN
5577 const_column_index (CloogMatrix *domain)
5579 return domain->NbColumns - 1;
5583 /* Get the first index that is positive or negative, determined
5584 following the value of POSITIVE, in matrix DOMAIN in COLUMN. */
5587 get_first_matching_sign_row_index (CloogMatrix *domain, int column,
5592 for (row = 0; row < domain->NbRows; row++)
5594 int val = value_get_si (domain->p[row][column]);
5596 if (val > 0 && positive)
5599 else if (val < 0 && !positive)
5606 /* Get the lower bound of COLUMN in matrix DOMAIN. */
5609 get_lower_bound_row (CloogMatrix *domain, int column)
5611 return get_first_matching_sign_row_index (domain, column, true);
5614 /* Get the upper bound of COLUMN in matrix DOMAIN. */
5617 get_upper_bound_row (CloogMatrix *domain, int column)
5619 return get_first_matching_sign_row_index (domain, column, false);
5622 /* Copies the OLD_ROW constraint from OLD_DOMAIN to the NEW_DOMAIN at
5626 copy_constraint (CloogMatrix *old_domain, CloogMatrix *new_domain,
5627 int old_row, int new_row)
5631 gcc_assert (old_domain->NbColumns == new_domain->NbColumns
5632 && old_row < old_domain->NbRows
5633 && new_row < new_domain->NbRows);
5635 for (i = 0; i < old_domain->NbColumns; i++)
5636 value_assign (new_domain->p[new_row][i], old_domain->p[old_row][i]);
5639 /* Swap coefficients of variables X and Y on row R. */
5642 swap_constraint_variables (CloogMatrix *domain,
5643 int r, int x, int y)
5645 value_swap (domain->p[r][x], domain->p[r][y]);
5648 /* Scale by X the coefficient C of constraint at row R in DOMAIN. */
5651 scale_constraint_variable (CloogMatrix *domain,
5652 int r, int c, int x)
5654 Value strip_size_value;
5655 value_init (strip_size_value);
5656 value_set_si (strip_size_value, x);
5657 value_multiply (domain->p[r][c], domain->p[r][c], strip_size_value);
5658 value_clear (strip_size_value);
5661 /* Strip mines the loop of BB at the position LOOP_DEPTH with STRIDE.
5662 Always valid, but not always a performance improvement. */
5665 graphite_trans_bb_strip_mine (graphite_bb_p gb, int loop_depth, int stride)
5669 CloogMatrix *domain = GBB_DOMAIN (gb);
5670 CloogMatrix *new_domain = cloog_matrix_alloc (domain->NbRows + 3,
5671 domain->NbColumns + 1);
5673 int col_loop_old = loop_depth + 2;
5674 int col_loop_strip = col_loop_old - 1;
5676 gcc_assert (loop_depth <= gbb_nb_loops (gb) - 1);
5678 VEC_safe_insert (loop_p, heap, GBB_LOOPS (gb), loop_depth, NULL);
5680 GBB_DOMAIN (gb) = new_domain;
5682 for (row = 0; row < domain->NbRows; row++)
5683 for (col = 0; col < domain->NbColumns; col++)
5684 if (col <= loop_depth)
5685 value_assign (new_domain->p[row][col], domain->p[row][col]);
5687 value_assign (new_domain->p[row][col + 1], domain->p[row][col]);
5689 row = domain->NbRows;
5691 /* Lower bound of the outer stripped loop. */
5692 copy_constraint (new_domain, new_domain,
5693 get_lower_bound_row (new_domain, col_loop_old), row);
5694 swap_constraint_variables (new_domain, row, col_loop_old, col_loop_strip);
5697 /* Upper bound of the outer stripped loop. */
5698 copy_constraint (new_domain, new_domain,
5699 get_upper_bound_row (new_domain, col_loop_old), row);
5700 swap_constraint_variables (new_domain, row, col_loop_old, col_loop_strip);
5701 scale_constraint_variable (new_domain, row, col_loop_strip, stride);
5704 /* Lower bound of a tile starts at "stride * outer_iv". */
5705 row = get_lower_bound_row (new_domain, col_loop_old);
5706 value_set_si (new_domain->p[row][0], 1);
5707 value_set_si (new_domain->p[row][const_column_index (new_domain)], 0);
5708 value_set_si (new_domain->p[row][col_loop_old], 1);
5709 value_set_si (new_domain->p[row][col_loop_strip], -1 * stride);
5711 /* Upper bound of a tile stops at "stride * outer_iv + stride - 1",
5712 or at the old upper bound that is not modified. */
5713 row = new_domain->NbRows - 1;
5714 value_set_si (new_domain->p[row][0], 1);
5715 value_set_si (new_domain->p[row][col_loop_old], -1);
5716 value_set_si (new_domain->p[row][col_loop_strip], stride);
5717 value_set_si (new_domain->p[row][const_column_index (new_domain)],
5720 cloog_matrix_free (domain);
5722 /* Update static schedule. */
5725 int nb_loops = gbb_nb_loops (gb);
5726 lambda_vector new_schedule = lambda_vector_new (nb_loops + 1);
5728 for (i = 0; i <= loop_depth; i++)
5729 new_schedule[i] = GBB_STATIC_SCHEDULE (gb)[i];
5731 for (i = loop_depth + 1; i <= nb_loops - 2; i++)
5732 new_schedule[i + 2] = GBB_STATIC_SCHEDULE (gb)[i];
5734 GBB_STATIC_SCHEDULE (gb) = new_schedule;
5738 /* Returns true when the strip mining of LOOP_INDEX by STRIDE is
5739 profitable or undecidable. GB is the statement around which the
5740 loops will be strip mined. */
5743 strip_mine_profitable_p (graphite_bb_p gb, int stride,
5752 loop = VEC_index (loop_p, GBB_LOOPS (gb), loop_index);
5753 exit = single_exit (loop);
5755 niter = find_loop_niter (loop, &exit);
5756 if (niter == chrec_dont_know
5757 || TREE_CODE (niter) != INTEGER_CST)
5760 niter_val = int_cst_value (niter);
5762 if (niter_val < stride)
5765 if (dump_file && (dump_flags & TDF_DETAILS))
5767 fprintf (dump_file, "\nStrip Mining is not profitable for loop %d:",
5769 fprintf (dump_file, "number of iterations is too low.\n");
5776 /* Determines when the interchange of LOOP_A and LOOP_B belonging to
5777 SCOP is legal. DEPTH is the number of loops around. */
5780 is_interchange_valid (scop_p scop, int loop_a, int loop_b, int depth)
5783 VEC (ddr_p, heap) *dependence_relations;
5784 VEC (data_reference_p, heap) *datarefs;
5786 struct loop *nest = VEC_index (loop_p, SCOP_LOOP_NEST (scop), loop_a);
5787 lambda_trans_matrix trans;
5789 gcc_assert (loop_a < loop_b);
5791 dependence_relations = VEC_alloc (ddr_p, heap, 10 * 10);
5792 datarefs = VEC_alloc (data_reference_p, heap, 10);
5794 if (!compute_data_dependences_for_loop (nest, true, &datarefs,
5795 &dependence_relations))
5798 if (dump_file && (dump_flags & TDF_DETAILS))
5799 dump_ddrs (dump_file, dependence_relations);
5801 trans = lambda_trans_matrix_new (depth, depth);
5802 lambda_matrix_id (LTM_MATRIX (trans), depth);
5804 lambda_matrix_row_exchange (LTM_MATRIX (trans), 0, loop_b - loop_a);
5806 if (!lambda_transform_legal_p (trans, depth, dependence_relations))
5808 lambda_matrix_row_exchange (LTM_MATRIX (trans), 0, loop_b - loop_a);
5814 free_dependence_relations (dependence_relations);
5815 free_data_refs (datarefs);
5819 /* Loop block the LOOPS innermost loops of GB with stride size STRIDE.
5823 for (i = 0; i <= 50; i++=4)
5824 for (k = 0; k <= 100; k++=4)
5825 for (l = 0; l <= 200; l++=4)
5828 To strip mine the two inner most loops with stride = 4 call:
5830 graphite_trans_bb_block (A, 4, 2)
5832 for (i = 0; i <= 50; i++)
5833 for (kk = 0; kk <= 100; kk+=4)
5834 for (ll = 0; ll <= 200; ll+=4)
5835 for (k = kk; k <= min (100, kk + 3); k++)
5836 for (l = ll; l <= min (200, ll + 3); l++)
5841 graphite_trans_bb_block (graphite_bb_p gb, int stride, int loops)
5844 int nb_loops = gbb_nb_loops (gb);
5845 int start = nb_loops - loops;
5846 scop_p scop = GBB_SCOP (gb);
5848 gcc_assert (scop_contains_loop (scop, gbb_loop (gb)));
5850 for (i = start ; i < nb_loops; i++)
5851 for (j = i + 1; j < nb_loops; j++)
5852 if (!is_interchange_valid (scop, i, j, nb_loops))
5854 if (dump_file && (dump_flags & TDF_DETAILS))
5856 "\nInterchange not valid for loops %d and %d:\n", i, j);
5859 else if (dump_file && (dump_flags & TDF_DETAILS))
5861 "\nInterchange valid for loops %d and %d:\n", i, j);
5863 /* Check if strip mining is profitable for every loop. */
5864 for (i = 0; i < nb_loops - start; i++)
5865 if (!strip_mine_profitable_p (gb, stride, start + i))
5868 /* Strip mine loops. */
5869 for (i = 0; i < nb_loops - start; i++)
5870 graphite_trans_bb_strip_mine (gb, start + 2 * i, stride);
5872 /* Interchange loops. */
5873 for (i = 1; i < nb_loops - start; i++)
5874 graphite_trans_bb_move_loop (gb, start + 2 * i, start + i);
5876 if (dump_file && (dump_flags & TDF_DETAILS))
5877 fprintf (dump_file, "\nLoops containing BB %d will be loop blocked.\n",
5878 GBB_BB (gb)->index);
5883 /* Loop block LOOPS innermost loops of a loop nest. BBS represent the
5884 basic blocks that belong to the loop nest to be blocked. */
5887 graphite_trans_loop_block (VEC (graphite_bb_p, heap) *bbs, int loops)
5891 bool transform_done = false;
5893 /* TODO: - Calculate the stride size automatically. */
5894 int stride_size = 51;
5896 for (i = 0; VEC_iterate (graphite_bb_p, bbs, i, gb); i++)
5897 transform_done |= graphite_trans_bb_block (gb, stride_size, loops);
5899 return transform_done;
5902 /* Loop block all basic blocks of SCOP. Return false when the
5903 transform is not performed. */
5906 graphite_trans_scop_block (scop_p scop)
5912 bool perfect = true;
5913 bool transform_done = false;
5915 VEC (graphite_bb_p, heap) *bbs = VEC_alloc (graphite_bb_p, heap, 3);
5916 int max_schedule = scop_max_loop_depth (scop) + 1;
5917 lambda_vector last_schedule = lambda_vector_new (max_schedule);
5919 if (VEC_length (graphite_bb_p, SCOP_BBS (scop)) == 0)
5922 /* Get the data of the first bb. */
5923 gb = VEC_index (graphite_bb_p, SCOP_BBS (scop), 0);
5924 last_nb_loops = gbb_nb_loops (gb);
5925 lambda_vector_copy (GBB_STATIC_SCHEDULE (gb), last_schedule,
5927 VEC_safe_push (graphite_bb_p, heap, bbs, gb);
5929 for (i = 0; VEC_iterate (graphite_bb_p, SCOP_BBS (scop), i, gb); i++)
5931 /* We did the first bb before. */
5935 nb_loops = gbb_nb_loops (gb);
5937 /* If the number of loops is unchanged and only the last element of the
5938 schedule changes, we stay in the loop nest. */
5939 if (nb_loops == last_nb_loops
5940 && (last_schedule [nb_loops + 1]
5941 != GBB_STATIC_SCHEDULE (gb)[nb_loops + 1]))
5943 VEC_safe_push (graphite_bb_p, heap, bbs, gb);
5947 /* Otherwise, we left the innermost loop. So check, if the last bb was in
5948 a perfect loop nest and how many loops are contained in this perfect
5951 Count the number of zeros from the end of the schedule. They are the
5952 number of surrounding loops.
5955 last_bb 2 3 2 0 0 0 0 3
5959 last_bb 2 3 2 0 0 0 0 3
5963 If there is no zero, there were other bbs in outer loops and the loop
5964 nest is not perfect. */
5965 for (j = last_nb_loops - 1; j >= 0; j--)
5967 if (last_schedule [j] != 0
5968 || (j <= nb_loops && GBB_STATIC_SCHEDULE (gb)[j] == 1))
5977 /* Found perfect loop nest. */
5978 if (perfect && last_nb_loops - j >= 2)
5979 transform_done |= graphite_trans_loop_block (bbs, last_nb_loops - j);
5981 /* Check if we start with a new loop.
5985 last_bb 2 3 2 0 0 0 0 3
5988 Here we start with the loop "2 3 2 0 0 1"
5990 last_bb 2 3 2 0 0 0 0 3
5993 But here not, so the loop nest can never be perfect. */
5995 perfect = (GBB_STATIC_SCHEDULE (gb)[nb_loops] == 0);
5997 /* Update the last_bb infos. We do not do that for the bbs in the same
5998 loop, as the data we use is not changed. */
5999 last_nb_loops = nb_loops;
6000 lambda_vector_copy (GBB_STATIC_SCHEDULE (gb), last_schedule,
6002 VEC_truncate (graphite_bb_p, bbs, 0);
6003 VEC_safe_push (graphite_bb_p, heap, bbs, gb);
6006 /* Check if the last loop nest was perfect. It is the same check as above,
6007 but the comparison with the next bb is missing. */
6008 for (j = last_nb_loops - 1; j >= 0; j--)
6009 if (last_schedule [j] != 0)
6017 /* Found perfect loop nest. */
6018 if (last_nb_loops - j >= 2)
6019 transform_done |= graphite_trans_loop_block (bbs, last_nb_loops - j);
6020 VEC_free (graphite_bb_p, heap, bbs);
6022 return transform_done;
6025 /* Apply graphite transformations to all the basic blocks of SCOP. */
6028 graphite_apply_transformations (scop_p scop)
6030 bool transform_done = false;
6032 /* Sort the list of bbs. Keep them always sorted. */
6033 graphite_sort_gbbs (scop);
6035 if (flag_loop_block)
6036 transform_done = graphite_trans_scop_block (scop);
6038 /* Generate code even if we did not apply any real transformation.
6039 This also allows to check the performance for the identity
6040 transformation: GIMPLE -> GRAPHITE -> GIMPLE
6041 Keep in mind that CLooG optimizes in control, so the loop structure
6042 may change, even if we only use -fgraphite-identity. */
6043 if (flag_graphite_identity)
6044 transform_done = true;
6046 return transform_done;
6049 /* We limit all SCoPs to SCoPs, that are completely surrounded by a loop.
6059 * SCoP frontier, as this line is not surrounded by any loop. *
6063 This is necessary as scalar evolution and parameter detection need a
6064 outermost loop to initialize parameters correctly.
6066 TODO: FIX scalar evolution and parameter detection to allow more flexible
6072 VEC (sd_region, heap) *tmp_scops = VEC_alloc (sd_region, heap, 3);
6077 for (i = 0; VEC_iterate (scop_p, current_scops, i, scop); i++)
6081 build_scop_bbs (scop);
6083 if (!build_scop_loop_nests (scop))
6086 for (j = 0; VEC_iterate (loop_p, SCOP_LOOP_NEST (scop), j, loop); j++)
6087 if (!loop_in_sese_p (loop_outer (loop), SCOP_REGION (scop)))
6089 sd_region open_scop;
6090 open_scop.entry = loop->header;
6091 open_scop.exit = single_exit (loop)->dest;
6092 VEC_safe_push (sd_region, heap, tmp_scops, &open_scop);
6096 free_scops (current_scops);
6097 current_scops = VEC_alloc (scop_p, heap, 3);
6099 create_sese_edges (tmp_scops);
6100 build_graphite_scops (tmp_scops);
6101 VEC_free (sd_region, heap, tmp_scops);
6104 /* Perform a set of linear transforms on the loops of the current
6108 graphite_transform_loops (void)
6112 bool transform_done = false;
6114 if (number_of_loops () <= 1)
6117 current_scops = VEC_alloc (scop_p, heap, 3);
6118 recompute_all_dominators ();
6120 if (dump_file && (dump_flags & TDF_DETAILS))
6121 fprintf (dump_file, "Graphite loop transformations \n");
6123 initialize_original_copy_tables ();
6124 cloog_initialize ();
6128 if (dump_file && (dump_flags & TDF_DETAILS))
6129 fprintf (dump_file, "\nnumber of SCoPs: %d\n",
6130 VEC_length (scop_p, current_scops));
6132 for (i = 0; VEC_iterate (scop_p, current_scops, i, scop); i++)
6134 build_scop_bbs (scop);
6135 if (!build_scop_loop_nests (scop))
6138 build_bb_loops (scop);
6140 if (!build_scop_conditions (scop))
6143 find_scop_parameters (scop);
6144 build_scop_context (scop);
6146 if (dump_file && (dump_flags & TDF_DETAILS))
6148 fprintf (dump_file, "\n(In SCoP %d:\n", i);
6149 fprintf (dump_file, "\nnumber of bbs: %d\n",
6150 VEC_length (graphite_bb_p, SCOP_BBS (scop)));
6151 fprintf (dump_file, "\nnumber of loops: %d)\n",
6152 VEC_length (loop_p, SCOP_LOOP_NEST (scop)));
6155 if (!build_scop_iteration_domain (scop))
6158 add_conditions_to_constraints (scop);
6159 build_scop_canonical_schedules (scop);
6161 build_scop_data_accesses (scop);
6162 build_scop_dynamic_schedules (scop);
6164 if (dump_file && (dump_flags & TDF_DETAILS))
6166 int nbrefs = nb_data_refs_in_scop (scop);
6167 fprintf (dump_file, "\nnumber of data refs: %d\n", nbrefs);
6170 if (graphite_apply_transformations (scop))
6171 transform_done = gloog (scop, find_transform (scop));
6172 #ifdef ENABLE_CHECKING
6175 struct clast_stmt *stmt = find_transform (scop);
6176 cloog_clast_free (stmt);
6183 cleanup_tree_cfg ();
6185 free_scops (current_scops);
6187 free_original_copy_tables ();
6190 #else /* If Cloog is not available: #ifndef HAVE_cloog. */
6193 graphite_transform_loops (void)
6195 sorry ("Graphite loop optimizations cannot be used");