2 Copyright (C) 2006, 2007, 2008, 2009, 2010
3 Free Software Foundation, Inc.
4 Contributed by Georges-Andre Silber <Georges-Andre.Silber@ensmp.fr>
5 and Sebastian Pop <sebastian.pop@amd.com>.
7 This file is part of GCC.
9 GCC is free software; you can redistribute it and/or modify it
10 under the terms of the GNU General Public License as published by the
11 Free Software Foundation; either version 3, or (at your option) any
14 GCC is distributed in the hope that it will be useful, but WITHOUT
15 ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
16 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
19 You should have received a copy of the GNU General Public License
20 along with GCC; see the file COPYING3. If not see
21 <http://www.gnu.org/licenses/>. */
23 /* This pass performs loop distribution: for example, the loop
40 This pass uses an RDG, Reduced Dependence Graph built on top of the
41 data dependence relations. The RDG is then topologically sorted to
42 obtain a map of information producers/consumers based on which it
43 generates the new loops. */
47 #include "coretypes.h"
50 #include "basic-block.h"
51 #include "tree-flow.h"
52 #include "tree-dump.h"
55 #include "tree-chrec.h"
56 #include "tree-data-ref.h"
57 #include "tree-scalar-evolution.h"
58 #include "tree-pass.h"
60 #include "langhooks.h"
61 #include "tree-vectorizer.h"
63 /* If bit I is not set, it means that this node represents an
64 operation that has already been performed, and that should not be
65 performed again. This is the subgraph of remaining important
66 computations that is passed to the DFS algorithm for avoiding to
67 include several times the same stores in different loops. */
68 static bitmap remaining_stmts;
70 /* A node of the RDG is marked in this bitmap when it has as a
71 predecessor a node that writes to memory. */
72 static bitmap upstream_mem_writes;
74 /* Update the PHI nodes of NEW_LOOP. NEW_LOOP is a duplicate of
78 update_phis_for_loop_copy (struct loop *orig_loop, struct loop *new_loop)
81 gimple_stmt_iterator si_new, si_orig;
82 edge orig_loop_latch = loop_latch_edge (orig_loop);
83 edge orig_entry_e = loop_preheader_edge (orig_loop);
84 edge new_loop_entry_e = loop_preheader_edge (new_loop);
86 /* Scan the phis in the headers of the old and new loops
87 (they are organized in exactly the same order). */
88 for (si_new = gsi_start_phis (new_loop->header),
89 si_orig = gsi_start_phis (orig_loop->header);
90 !gsi_end_p (si_new) && !gsi_end_p (si_orig);
91 gsi_next (&si_new), gsi_next (&si_orig))
94 source_location locus;
95 gimple phi_new = gsi_stmt (si_new);
96 gimple phi_orig = gsi_stmt (si_orig);
98 /* Add the first phi argument for the phi in NEW_LOOP (the one
99 associated with the entry of NEW_LOOP) */
100 def = PHI_ARG_DEF_FROM_EDGE (phi_orig, orig_entry_e);
101 locus = gimple_phi_arg_location_from_edge (phi_orig, orig_entry_e);
102 add_phi_arg (phi_new, def, new_loop_entry_e, locus);
104 /* Add the second phi argument for the phi in NEW_LOOP (the one
105 associated with the latch of NEW_LOOP) */
106 def = PHI_ARG_DEF_FROM_EDGE (phi_orig, orig_loop_latch);
107 locus = gimple_phi_arg_location_from_edge (phi_orig, orig_loop_latch);
109 if (TREE_CODE (def) == SSA_NAME)
111 new_ssa_name = get_current_def (def);
114 /* This only happens if there are no definitions inside the
115 loop. Use the the invariant in the new loop as is. */
119 /* Could be an integer. */
122 add_phi_arg (phi_new, new_ssa_name, loop_latch_edge (new_loop), locus);
126 /* Return a copy of LOOP placed before LOOP. */
129 copy_loop_before (struct loop *loop)
132 edge preheader = loop_preheader_edge (loop);
134 if (!single_exit (loop))
137 initialize_original_copy_tables ();
138 res = slpeel_tree_duplicate_loop_to_edge_cfg (loop, preheader);
139 free_original_copy_tables ();
144 update_phis_for_loop_copy (loop, res);
145 rename_variables_in_loop (res);
150 /* Creates an empty basic block after LOOP. */
153 create_bb_after_loop (struct loop *loop)
155 edge exit = single_exit (loop);
163 /* Generate code for PARTITION from the code in LOOP. The loop is
164 copied when COPY_P is true. All the statements not flagged in the
165 PARTITION bitmap are removed from the loop or from its copy. The
166 statements are indexed in sequence inside a basic block, and the
167 basic blocks of a loop are taken in dom order. Returns true when
168 the code gen succeeded. */
171 generate_loops_for_partition (struct loop *loop, bitmap partition, bool copy_p)
174 gimple_stmt_iterator bsi;
179 loop = copy_loop_before (loop);
180 create_preheader (loop, CP_SIMPLE_PREHEADERS);
181 create_bb_after_loop (loop);
187 /* Remove stmts not in the PARTITION bitmap. The order in which we
188 visit the phi nodes and the statements is exactly as in
190 bbs = get_loop_body_in_dom_order (loop);
192 for (x = 0, i = 0; i < loop->num_nodes; i++)
194 basic_block bb = bbs[i];
196 for (bsi = gsi_start_phis (bb); !gsi_end_p (bsi);)
197 if (!bitmap_bit_p (partition, x++))
199 gimple phi = gsi_stmt (bsi);
200 if (!is_gimple_reg (gimple_phi_result (phi)))
201 mark_virtual_phi_result_for_renaming (phi);
202 remove_phi_node (&bsi, true);
207 for (bsi = gsi_start_bb (bb); !gsi_end_p (bsi);)
209 gimple stmt = gsi_stmt (bsi);
210 if (gimple_code (gsi_stmt (bsi)) != GIMPLE_LABEL
211 && !bitmap_bit_p (partition, x++))
213 unlink_stmt_vdef (stmt);
214 gsi_remove (&bsi, true);
226 /* Build the size argument for a memset call. */
229 build_size_arg_loc (location_t loc, tree nb_iter, tree op,
230 gimple_seq *stmt_list)
233 tree x = size_binop_loc (loc, MULT_EXPR,
234 fold_convert_loc (loc, sizetype, nb_iter),
235 TYPE_SIZE_UNIT (TREE_TYPE (op)));
236 x = force_gimple_operand (x, &stmts, true, NULL);
237 gimple_seq_add_seq (stmt_list, stmts);
242 /* Generate a call to memset. Return true when the operation succeeded. */
245 generate_memset_zero (gimple stmt, tree op0, tree nb_iter,
246 gimple_stmt_iterator bsi)
248 tree addr_base, nb_bytes;
250 gimple_seq stmt_list = NULL, stmts;
253 struct data_reference *dr = XCNEW (struct data_reference);
254 location_t loc = gimple_location (stmt);
258 if (!dr_analyze_innermost (dr))
261 /* Test for a positive stride, iterating over every element. */
262 if (integer_zerop (size_binop (MINUS_EXPR,
263 fold_convert (sizetype, DR_STEP (dr)),
264 TYPE_SIZE_UNIT (TREE_TYPE (op0)))))
266 addr_base = fold_convert_loc (loc, sizetype,
267 size_binop_loc (loc, PLUS_EXPR,
270 addr_base = fold_build2_loc (loc, POINTER_PLUS_EXPR,
271 TREE_TYPE (DR_BASE_ADDRESS (dr)),
272 DR_BASE_ADDRESS (dr), addr_base);
274 nb_bytes = build_size_arg_loc (loc, nb_iter, op0, &stmt_list);
277 /* Test for a negative stride, iterating over every element. */
278 else if (integer_zerop (size_binop (PLUS_EXPR,
279 TYPE_SIZE_UNIT (TREE_TYPE (op0)),
280 fold_convert (sizetype, DR_STEP (dr)))))
282 nb_bytes = build_size_arg_loc (loc, nb_iter, op0, &stmt_list);
284 addr_base = size_binop_loc (loc, PLUS_EXPR, DR_OFFSET (dr), DR_INIT (dr));
285 addr_base = fold_convert_loc (loc, sizetype, addr_base);
286 addr_base = size_binop_loc (loc, MINUS_EXPR, addr_base,
287 fold_convert_loc (loc, sizetype, nb_bytes));
288 addr_base = size_binop_loc (loc, PLUS_EXPR, addr_base,
289 TYPE_SIZE_UNIT (TREE_TYPE (op0)));
290 addr_base = fold_build2_loc (loc, POINTER_PLUS_EXPR,
291 TREE_TYPE (DR_BASE_ADDRESS (dr)),
292 DR_BASE_ADDRESS (dr), addr_base);
297 mem = force_gimple_operand (addr_base, &stmts, true, NULL);
298 gimple_seq_add_seq (&stmt_list, stmts);
300 fn = build_fold_addr_expr (implicit_built_in_decls [BUILT_IN_MEMSET]);
301 fn_call = gimple_build_call (fn, 3, mem, integer_zero_node, nb_bytes);
302 gimple_seq_add_stmt (&stmt_list, fn_call);
303 gsi_insert_seq_after (&bsi, stmt_list, GSI_CONTINUE_LINKING);
306 if (dump_file && (dump_flags & TDF_DETAILS))
307 fprintf (dump_file, "generated memset zero\n");
314 /* Propagate phis in BB b to their uses and remove them. */
317 prop_phis (basic_block b)
319 gimple_stmt_iterator psi;
321 for (psi = gsi_start_phis (b); !gsi_end_p (psi); )
323 gimple phi = gsi_stmt (psi);
324 tree def = gimple_phi_result (phi);
326 if (!is_gimple_reg (def))
327 mark_virtual_phi_result_for_renaming (phi);
330 tree use = gimple_phi_arg_def (phi, 0);
331 gcc_assert (gimple_phi_num_args (phi) == 1);
332 replace_uses_by (def, use);
335 remove_phi_node (&psi, true);
339 /* Tries to generate a builtin function for the instructions of LOOP
340 pointed to by the bits set in PARTITION. Returns true when the
341 operation succeeded. */
344 generate_builtin (struct loop *loop, bitmap partition, bool copy_p)
351 gimple_stmt_iterator bsi;
352 tree nb_iter = number_of_exit_cond_executions (loop);
354 if (!nb_iter || nb_iter == chrec_dont_know)
357 bbs = get_loop_body_in_dom_order (loop);
359 for (i = 0; i < loop->num_nodes; i++)
361 basic_block bb = bbs[i];
363 for (bsi = gsi_start_phis (bb); !gsi_end_p (bsi); gsi_next (&bsi))
366 for (bsi = gsi_start_bb (bb); !gsi_end_p (bsi); gsi_next (&bsi))
368 gimple stmt = gsi_stmt (bsi);
370 if (bitmap_bit_p (partition, x++)
371 && is_gimple_assign (stmt)
372 && !is_gimple_reg (gimple_assign_lhs (stmt)))
374 /* Don't generate the builtins when there are more than
380 if (bb == loop->latch)
381 nb_iter = number_of_latch_executions (loop);
389 op0 = gimple_assign_lhs (write);
390 op1 = gimple_assign_rhs1 (write);
392 if (!(TREE_CODE (op0) == ARRAY_REF
393 || TREE_CODE (op0) == MEM_REF))
396 /* The new statements will be placed before LOOP. */
397 bsi = gsi_last_bb (loop_preheader_edge (loop)->src);
399 if (gimple_assign_rhs_code (write) == INTEGER_CST
400 && (integer_zerop (op1) || real_zerop (op1)))
401 res = generate_memset_zero (write, op0, nb_iter, bsi);
403 /* If this is the last partition for which we generate code, we have
404 to destroy the loop. */
407 unsigned nbbs = loop->num_nodes;
408 basic_block src = loop_preheader_edge (loop)->src;
409 basic_block dest = single_exit (loop)->dest;
411 make_edge (src, dest, EDGE_FALLTHRU);
412 cancel_loop_tree (loop);
414 for (i = 0; i < nbbs; i++)
415 delete_basic_block (bbs[i]);
417 set_immediate_dominator (CDI_DOMINATORS, dest,
418 recompute_dominator (CDI_DOMINATORS, dest));
426 /* Generates code for PARTITION. For simple loops, this function can
427 generate a built-in. */
430 generate_code_for_partition (struct loop *loop, bitmap partition, bool copy_p)
432 if (generate_builtin (loop, partition, copy_p))
435 return generate_loops_for_partition (loop, partition, copy_p);
439 /* Returns true if the node V of RDG cannot be recomputed. */
442 rdg_cannot_recompute_vertex_p (struct graph *rdg, int v)
444 if (RDG_MEM_WRITE_STMT (rdg, v))
450 /* Returns true when the vertex V has already been generated in the
451 current partition (V is in PROCESSED), or when V belongs to another
452 partition and cannot be recomputed (V is not in REMAINING_STMTS). */
455 already_processed_vertex_p (bitmap processed, int v)
457 return (bitmap_bit_p (processed, v)
458 || !bitmap_bit_p (remaining_stmts, v));
461 /* Returns NULL when there is no anti-dependence among the successors
462 of vertex V, otherwise returns the edge with the anti-dep. */
464 static struct graph_edge *
465 has_anti_dependence (struct vertex *v)
467 struct graph_edge *e;
470 for (e = v->succ; e; e = e->succ_next)
471 if (RDGE_TYPE (e) == anti_dd)
477 /* Returns true when V has an anti-dependence edge among its successors. */
480 predecessor_has_mem_write (struct graph *rdg, struct vertex *v)
482 struct graph_edge *e;
485 for (e = v->pred; e; e = e->pred_next)
486 if (bitmap_bit_p (upstream_mem_writes, e->src)
487 /* Don't consider flow channels: a write to memory followed
488 by a read from memory. These channels allow the split of
489 the RDG in different partitions. */
490 && !RDG_MEM_WRITE_STMT (rdg, e->src))
496 /* Initializes the upstream_mem_writes bitmap following the
497 information from RDG. */
500 mark_nodes_having_upstream_mem_writes (struct graph *rdg)
503 bitmap seen = BITMAP_ALLOC (NULL);
505 for (v = rdg->n_vertices - 1; v >= 0; v--)
506 if (!bitmap_bit_p (seen, v))
509 VEC (int, heap) *nodes = VEC_alloc (int, heap, 3);
511 graphds_dfs (rdg, &v, 1, &nodes, false, NULL);
513 FOR_EACH_VEC_ELT (int, nodes, i, x)
515 if (!bitmap_set_bit (seen, x))
518 if (RDG_MEM_WRITE_STMT (rdg, x)
519 || predecessor_has_mem_write (rdg, &(rdg->vertices[x]))
520 /* In anti dependences the read should occur before
521 the write, this is why both the read and the write
522 should be placed in the same partition. */
523 || has_anti_dependence (&(rdg->vertices[x])))
525 bitmap_set_bit (upstream_mem_writes, x);
529 VEC_free (int, heap, nodes);
533 /* Returns true when vertex u has a memory write node as a predecessor
537 has_upstream_mem_writes (int u)
539 return bitmap_bit_p (upstream_mem_writes, u);
542 static void rdg_flag_vertex_and_dependent (struct graph *, int, bitmap, bitmap,
545 /* Flag all the uses of U. */
548 rdg_flag_all_uses (struct graph *rdg, int u, bitmap partition, bitmap loops,
549 bitmap processed, bool *part_has_writes)
551 struct graph_edge *e;
553 for (e = rdg->vertices[u].succ; e; e = e->succ_next)
554 if (!bitmap_bit_p (processed, e->dest))
556 rdg_flag_vertex_and_dependent (rdg, e->dest, partition, loops,
557 processed, part_has_writes);
558 rdg_flag_all_uses (rdg, e->dest, partition, loops, processed,
563 /* Flag the uses of U stopping following the information from
564 upstream_mem_writes. */
567 rdg_flag_uses (struct graph *rdg, int u, bitmap partition, bitmap loops,
568 bitmap processed, bool *part_has_writes)
571 struct vertex *x = &(rdg->vertices[u]);
572 gimple stmt = RDGV_STMT (x);
573 struct graph_edge *anti_dep = has_anti_dependence (x);
575 /* Keep in the same partition the destination of an antidependence,
576 because this is a store to the exact same location. Putting this
577 in another partition is bad for cache locality. */
580 int v = anti_dep->dest;
582 if (!already_processed_vertex_p (processed, v))
583 rdg_flag_vertex_and_dependent (rdg, v, partition, loops,
584 processed, part_has_writes);
587 if (gimple_code (stmt) != GIMPLE_PHI)
589 if ((use_p = gimple_vuse_op (stmt)) != NULL_USE_OPERAND_P)
591 tree use = USE_FROM_PTR (use_p);
593 if (TREE_CODE (use) == SSA_NAME)
595 gimple def_stmt = SSA_NAME_DEF_STMT (use);
596 int v = rdg_vertex_for_stmt (rdg, def_stmt);
599 && !already_processed_vertex_p (processed, v))
600 rdg_flag_vertex_and_dependent (rdg, v, partition, loops,
601 processed, part_has_writes);
606 if (is_gimple_assign (stmt) && has_upstream_mem_writes (u))
608 tree op0 = gimple_assign_lhs (stmt);
610 /* Scalar channels don't have enough space for transmitting data
611 between tasks, unless we add more storage by privatizing. */
612 if (is_gimple_reg (op0))
615 imm_use_iterator iter;
617 FOR_EACH_IMM_USE_FAST (use_p, iter, op0)
619 int v = rdg_vertex_for_stmt (rdg, USE_STMT (use_p));
621 if (!already_processed_vertex_p (processed, v))
622 rdg_flag_vertex_and_dependent (rdg, v, partition, loops,
623 processed, part_has_writes);
629 /* Flag V from RDG as part of PARTITION, and also flag its loop number
633 rdg_flag_vertex (struct graph *rdg, int v, bitmap partition, bitmap loops,
634 bool *part_has_writes)
638 if (!bitmap_set_bit (partition, v))
641 loop = loop_containing_stmt (RDG_STMT (rdg, v));
642 bitmap_set_bit (loops, loop->num);
644 if (rdg_cannot_recompute_vertex_p (rdg, v))
646 *part_has_writes = true;
647 bitmap_clear_bit (remaining_stmts, v);
651 /* Flag in the bitmap PARTITION the vertex V and all its predecessors.
652 Also flag their loop number in LOOPS. */
655 rdg_flag_vertex_and_dependent (struct graph *rdg, int v, bitmap partition,
656 bitmap loops, bitmap processed,
657 bool *part_has_writes)
660 VEC (int, heap) *nodes = VEC_alloc (int, heap, 3);
663 bitmap_set_bit (processed, v);
664 rdg_flag_uses (rdg, v, partition, loops, processed, part_has_writes);
665 graphds_dfs (rdg, &v, 1, &nodes, false, remaining_stmts);
666 rdg_flag_vertex (rdg, v, partition, loops, part_has_writes);
668 FOR_EACH_VEC_ELT (int, nodes, i, x)
669 if (!already_processed_vertex_p (processed, x))
670 rdg_flag_vertex_and_dependent (rdg, x, partition, loops, processed,
673 VEC_free (int, heap, nodes);
676 /* Initialize CONDS with all the condition statements from the basic
680 collect_condition_stmts (struct loop *loop, VEC (gimple, heap) **conds)
684 VEC (edge, heap) *exits = get_loop_exit_edges (loop);
686 FOR_EACH_VEC_ELT (edge, exits, i, e)
688 gimple cond = last_stmt (e->src);
691 VEC_safe_push (gimple, heap, *conds, cond);
694 VEC_free (edge, heap, exits);
697 /* Add to PARTITION all the exit condition statements for LOOPS
698 together with all their dependent statements determined from
702 rdg_flag_loop_exits (struct graph *rdg, bitmap loops, bitmap partition,
703 bitmap processed, bool *part_has_writes)
707 VEC (gimple, heap) *conds = VEC_alloc (gimple, heap, 3);
709 EXECUTE_IF_SET_IN_BITMAP (loops, 0, i, bi)
710 collect_condition_stmts (get_loop (i), &conds);
712 while (!VEC_empty (gimple, conds))
714 gimple cond = VEC_pop (gimple, conds);
715 int v = rdg_vertex_for_stmt (rdg, cond);
716 bitmap new_loops = BITMAP_ALLOC (NULL);
718 if (!already_processed_vertex_p (processed, v))
719 rdg_flag_vertex_and_dependent (rdg, v, partition, new_loops, processed,
722 EXECUTE_IF_SET_IN_BITMAP (new_loops, 0, i, bi)
723 if (bitmap_set_bit (loops, i))
724 collect_condition_stmts (get_loop (i), &conds);
726 BITMAP_FREE (new_loops);
730 /* Flag all the nodes of RDG containing memory accesses that could
731 potentially belong to arrays already accessed in the current
735 rdg_flag_similar_memory_accesses (struct graph *rdg, bitmap partition,
736 bitmap loops, bitmap processed,
737 VEC (int, heap) **other_stores)
743 struct graph_edge *e;
745 EXECUTE_IF_SET_IN_BITMAP (partition, 0, i, ii)
746 if (RDG_MEM_WRITE_STMT (rdg, i)
747 || RDG_MEM_READS_STMT (rdg, i))
749 for (j = 0; j < rdg->n_vertices; j++)
750 if (!bitmap_bit_p (processed, j)
751 && (RDG_MEM_WRITE_STMT (rdg, j)
752 || RDG_MEM_READS_STMT (rdg, j))
753 && rdg_has_similar_memory_accesses (rdg, i, j))
755 /* Flag first the node J itself, and all the nodes that
756 are needed to compute J. */
757 rdg_flag_vertex_and_dependent (rdg, j, partition, loops,
760 /* When J is a read, we want to coalesce in the same
761 PARTITION all the nodes that are using J: this is
762 needed for better cache locality. */
763 rdg_flag_all_uses (rdg, j, partition, loops, processed, &foo);
765 /* Remove from OTHER_STORES the vertex that we flagged. */
766 if (RDG_MEM_WRITE_STMT (rdg, j))
767 FOR_EACH_VEC_ELT (int, *other_stores, k, kk)
770 VEC_unordered_remove (int, *other_stores, k);
775 /* If the node I has two uses, then keep these together in the
777 for (n = 0, e = rdg->vertices[i].succ; e; e = e->succ_next, n++);
780 rdg_flag_all_uses (rdg, i, partition, loops, processed, &foo);
784 /* Returns a bitmap in which all the statements needed for computing
785 the strongly connected component C of the RDG are flagged, also
786 including the loop exit conditions. */
789 build_rdg_partition_for_component (struct graph *rdg, rdgc c,
790 bool *part_has_writes,
791 VEC (int, heap) **other_stores)
794 bitmap partition = BITMAP_ALLOC (NULL);
795 bitmap loops = BITMAP_ALLOC (NULL);
796 bitmap processed = BITMAP_ALLOC (NULL);
798 FOR_EACH_VEC_ELT (int, c->vertices, i, v)
799 if (!already_processed_vertex_p (processed, v))
800 rdg_flag_vertex_and_dependent (rdg, v, partition, loops, processed,
803 /* Also iterate on the array of stores not in the starting vertices,
804 and determine those vertices that have some memory affinity with
805 the current nodes in the component: these are stores to the same
806 arrays, i.e. we're taking care of cache locality. */
807 rdg_flag_similar_memory_accesses (rdg, partition, loops, processed,
810 rdg_flag_loop_exits (rdg, loops, partition, processed, part_has_writes);
812 BITMAP_FREE (processed);
817 /* Free memory for COMPONENTS. */
820 free_rdg_components (VEC (rdgc, heap) *components)
825 FOR_EACH_VEC_ELT (rdgc, components, i, x)
827 VEC_free (int, heap, x->vertices);
832 /* Build the COMPONENTS vector with the strongly connected components
833 of RDG in which the STARTING_VERTICES occur. */
836 rdg_build_components (struct graph *rdg, VEC (int, heap) *starting_vertices,
837 VEC (rdgc, heap) **components)
840 bitmap saved_components = BITMAP_ALLOC (NULL);
841 int n_components = graphds_scc (rdg, NULL);
842 VEC (int, heap) **all_components = XNEWVEC (VEC (int, heap) *, n_components);
844 for (i = 0; i < n_components; i++)
845 all_components[i] = VEC_alloc (int, heap, 3);
847 for (i = 0; i < rdg->n_vertices; i++)
848 VEC_safe_push (int, heap, all_components[rdg->vertices[i].component], i);
850 FOR_EACH_VEC_ELT (int, starting_vertices, i, v)
852 int c = rdg->vertices[v].component;
854 if (bitmap_set_bit (saved_components, c))
856 rdgc x = XCNEW (struct rdg_component);
858 x->vertices = all_components[c];
860 VEC_safe_push (rdgc, heap, *components, x);
864 for (i = 0; i < n_components; i++)
865 if (!bitmap_bit_p (saved_components, i))
866 VEC_free (int, heap, all_components[i]);
868 free (all_components);
869 BITMAP_FREE (saved_components);
872 /* Aggregate several components into a useful partition that is
873 registered in the PARTITIONS vector. Partitions will be
874 distributed in different loops. */
877 rdg_build_partitions (struct graph *rdg, VEC (rdgc, heap) *components,
878 VEC (int, heap) **other_stores,
879 VEC (bitmap, heap) **partitions, bitmap processed)
883 bitmap partition = BITMAP_ALLOC (NULL);
885 FOR_EACH_VEC_ELT (rdgc, components, i, x)
888 bool part_has_writes = false;
889 int v = VEC_index (int, x->vertices, 0);
891 if (bitmap_bit_p (processed, v))
894 np = build_rdg_partition_for_component (rdg, x, &part_has_writes,
896 bitmap_ior_into (partition, np);
897 bitmap_ior_into (processed, np);
902 if (dump_file && (dump_flags & TDF_DETAILS))
904 fprintf (dump_file, "ldist useful partition:\n");
905 dump_bitmap (dump_file, partition);
908 VEC_safe_push (bitmap, heap, *partitions, partition);
909 partition = BITMAP_ALLOC (NULL);
913 /* Add the nodes from the RDG that were not marked as processed, and
914 that are used outside the current loop. These are scalar
915 computations that are not yet part of previous partitions. */
916 for (i = 0; i < rdg->n_vertices; i++)
917 if (!bitmap_bit_p (processed, i)
918 && rdg_defs_used_in_other_loops_p (rdg, i))
919 VEC_safe_push (int, heap, *other_stores, i);
921 /* If there are still statements left in the OTHER_STORES array,
922 create other components and partitions with these stores and
923 their dependences. */
924 if (VEC_length (int, *other_stores) > 0)
926 VEC (rdgc, heap) *comps = VEC_alloc (rdgc, heap, 3);
927 VEC (int, heap) *foo = VEC_alloc (int, heap, 3);
929 rdg_build_components (rdg, *other_stores, &comps);
930 rdg_build_partitions (rdg, comps, &foo, partitions, processed);
932 VEC_free (int, heap, foo);
933 free_rdg_components (comps);
936 /* If there is something left in the last partition, save it. */
937 if (bitmap_count_bits (partition) > 0)
938 VEC_safe_push (bitmap, heap, *partitions, partition);
940 BITMAP_FREE (partition);
943 /* Dump to FILE the PARTITIONS. */
946 dump_rdg_partitions (FILE *file, VEC (bitmap, heap) *partitions)
951 FOR_EACH_VEC_ELT (bitmap, partitions, i, partition)
952 debug_bitmap_file (file, partition);
955 /* Debug PARTITIONS. */
956 extern void debug_rdg_partitions (VEC (bitmap, heap) *);
959 debug_rdg_partitions (VEC (bitmap, heap) *partitions)
961 dump_rdg_partitions (stderr, partitions);
964 /* Returns the number of read and write operations in the RDG. */
967 number_of_rw_in_rdg (struct graph *rdg)
971 for (i = 0; i < rdg->n_vertices; i++)
973 if (RDG_MEM_WRITE_STMT (rdg, i))
976 if (RDG_MEM_READS_STMT (rdg, i))
983 /* Returns the number of read and write operations in a PARTITION of
987 number_of_rw_in_partition (struct graph *rdg, bitmap partition)
993 EXECUTE_IF_SET_IN_BITMAP (partition, 0, i, ii)
995 if (RDG_MEM_WRITE_STMT (rdg, i))
998 if (RDG_MEM_READS_STMT (rdg, i))
1005 /* Returns true when one of the PARTITIONS contains all the read or
1006 write operations of RDG. */
1009 partition_contains_all_rw (struct graph *rdg, VEC (bitmap, heap) *partitions)
1013 int nrw = number_of_rw_in_rdg (rdg);
1015 FOR_EACH_VEC_ELT (bitmap, partitions, i, partition)
1016 if (nrw == number_of_rw_in_partition (rdg, partition))
1022 /* Generate code from STARTING_VERTICES in RDG. Returns the number of
1023 distributed loops. */
1026 ldist_gen (struct loop *loop, struct graph *rdg,
1027 VEC (int, heap) *starting_vertices)
1030 VEC (rdgc, heap) *components = VEC_alloc (rdgc, heap, 3);
1031 VEC (bitmap, heap) *partitions = VEC_alloc (bitmap, heap, 3);
1032 VEC (int, heap) *other_stores = VEC_alloc (int, heap, 3);
1033 bitmap partition, processed = BITMAP_ALLOC (NULL);
1035 remaining_stmts = BITMAP_ALLOC (NULL);
1036 upstream_mem_writes = BITMAP_ALLOC (NULL);
1038 for (i = 0; i < rdg->n_vertices; i++)
1040 bitmap_set_bit (remaining_stmts, i);
1042 /* Save in OTHER_STORES all the memory writes that are not in
1043 STARTING_VERTICES. */
1044 if (RDG_MEM_WRITE_STMT (rdg, i))
1050 FOR_EACH_VEC_ELT (int, starting_vertices, j, v)
1058 VEC_safe_push (int, heap, other_stores, i);
1062 mark_nodes_having_upstream_mem_writes (rdg);
1063 rdg_build_components (rdg, starting_vertices, &components);
1064 rdg_build_partitions (rdg, components, &other_stores, &partitions,
1066 BITMAP_FREE (processed);
1067 nbp = VEC_length (bitmap, partitions);
1070 || partition_contains_all_rw (rdg, partitions))
1073 if (dump_file && (dump_flags & TDF_DETAILS))
1074 dump_rdg_partitions (dump_file, partitions);
1076 FOR_EACH_VEC_ELT (bitmap, partitions, i, partition)
1077 if (!generate_code_for_partition (loop, partition, i < nbp - 1))
1080 rewrite_into_loop_closed_ssa (NULL, TODO_update_ssa);
1081 update_ssa (TODO_update_ssa_only_virtuals | TODO_update_ssa);
1085 BITMAP_FREE (remaining_stmts);
1086 BITMAP_FREE (upstream_mem_writes);
1088 FOR_EACH_VEC_ELT (bitmap, partitions, i, partition)
1089 BITMAP_FREE (partition);
1091 VEC_free (int, heap, other_stores);
1092 VEC_free (bitmap, heap, partitions);
1093 free_rdg_components (components);
1097 /* Distributes the code from LOOP in such a way that producer
1098 statements are placed before consumer statements. When STMTS is
1099 NULL, performs the maximal distribution, if STMTS is not NULL,
1100 tries to separate only these statements from the LOOP's body.
1101 Returns the number of distributed loops. */
1104 distribute_loop (struct loop *loop, VEC (gimple, heap) *stmts)
1110 VEC (int, heap) *vertices;
1112 if (loop->num_nodes > 2)
1114 if (dump_file && (dump_flags & TDF_DETAILS))
1116 "FIXME: Loop %d not distributed: it has more than two basic blocks.\n",
1122 rdg = build_rdg (loop);
1126 if (dump_file && (dump_flags & TDF_DETAILS))
1128 "FIXME: Loop %d not distributed: failed to build the RDG.\n",
1134 vertices = VEC_alloc (int, heap, 3);
1136 if (dump_file && (dump_flags & TDF_DETAILS))
1137 dump_rdg (dump_file, rdg);
1139 FOR_EACH_VEC_ELT (gimple, stmts, i, s)
1141 int v = rdg_vertex_for_stmt (rdg, s);
1145 VEC_safe_push (int, heap, vertices, v);
1147 if (dump_file && (dump_flags & TDF_DETAILS))
1149 "ldist asked to generate code for vertex %d\n", v);
1153 res = ldist_gen (loop, rdg, vertices);
1154 VEC_free (int, heap, vertices);
1160 /* Distribute all loops in the current function. */
1163 tree_loop_distribution (void)
1167 int nb_generated_loops = 0;
1169 FOR_EACH_LOOP (li, loop, 0)
1171 VEC (gimple, heap) *work_list = VEC_alloc (gimple, heap, 3);
1173 /* If both flag_tree_loop_distribute_patterns and
1174 flag_tree_loop_distribution are set, then only
1175 distribute_patterns is executed. */
1176 if (flag_tree_loop_distribute_patterns)
1178 /* With the following working list, we're asking
1179 distribute_loop to separate from the rest of the loop the
1180 stores of the form "A[i] = 0". */
1181 stores_zero_from_loop (loop, &work_list);
1183 /* Do nothing if there are no patterns to be distributed. */
1184 if (VEC_length (gimple, work_list) > 0)
1185 nb_generated_loops = distribute_loop (loop, work_list);
1187 else if (flag_tree_loop_distribution)
1189 /* With the following working list, we're asking
1190 distribute_loop to separate the stores of the loop: when
1191 dependences allow, it will end on having one store per
1193 stores_from_loop (loop, &work_list);
1195 /* A simple heuristic for cache locality is to not split
1196 stores to the same array. Without this call, an unrolled
1197 loop would be split into as many loops as unroll factor,
1198 each loop storing in the same array. */
1199 remove_similar_memory_refs (&work_list);
1201 nb_generated_loops = distribute_loop (loop, work_list);
1204 if (dump_file && (dump_flags & TDF_DETAILS))
1206 if (nb_generated_loops > 1)
1207 fprintf (dump_file, "Loop %d distributed: split to %d loops.\n",
1208 loop->num, nb_generated_loops);
1210 fprintf (dump_file, "Loop %d is the same.\n", loop->num);
1213 verify_loop_structure ();
1215 VEC_free (gimple, heap, work_list);
1222 gate_tree_loop_distribution (void)
1224 return flag_tree_loop_distribution
1225 || flag_tree_loop_distribute_patterns;
1228 struct gimple_opt_pass pass_loop_distribution =
1233 gate_tree_loop_distribution, /* gate */
1234 tree_loop_distribution, /* execute */
1237 0, /* static_pass_number */
1238 TV_TREE_LOOP_DISTRIBUTION, /* tv_id */
1239 PROP_cfg | PROP_ssa, /* properties_required */
1240 0, /* properties_provided */
1241 0, /* properties_destroyed */
1242 0, /* todo_flags_start */
1243 TODO_dump_func /* todo_flags_finish */