1 /* Loop transformation code generation
2 Copyright (C) 2003, 2004, 2005, 2006, 2007 Free Software Foundation, Inc.
3 Contributed by Daniel Berlin <dberlin@dberlin.org>
5 This file is part of GCC.
7 GCC is free software; you can redistribute it and/or modify it under
8 the terms of the GNU General Public License as published by the Free
9 Software Foundation; either version 2, or (at your option) any later
12 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
13 WARRANTY; without even the implied warranty of MERCHANTABILITY or
14 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
17 You should have received a copy of the GNU General Public License
18 along with GCC; see the file COPYING. If not, write to the Free
19 Software Foundation, 51 Franklin Street, Fifth Floor, Boston, MA
24 #include "coretypes.h"
30 #include "basic-block.h"
31 #include "diagnostic.h"
32 #include "tree-flow.h"
33 #include "tree-dump.h"
38 #include "tree-chrec.h"
39 #include "tree-data-ref.h"
40 #include "tree-pass.h"
41 #include "tree-scalar-evolution.h"
46 /* This loop nest code generation is based on non-singular matrix
49 A little terminology and a general sketch of the algorithm. See "A singular
50 loop transformation framework based on non-singular matrices" by Wei Li and
51 Keshav Pingali for formal proofs that the various statements below are
54 A loop iteration space represents the points traversed by the loop. A point in the
55 iteration space can be represented by a vector of size <loop depth>. You can
56 therefore represent the iteration space as an integral combinations of a set
59 A loop iteration space is dense if every integer point between the loop
60 bounds is a point in the iteration space. Every loop with a step of 1
61 therefore has a dense iteration space.
63 for i = 1 to 3, step 1 is a dense iteration space.
65 A loop iteration space is sparse if it is not dense. That is, the iteration
66 space skips integer points that are within the loop bounds.
68 for i = 1 to 3, step 2 is a sparse iteration space, because the integer point
71 Dense source spaces are easy to transform, because they don't skip any
72 points to begin with. Thus we can compute the exact bounds of the target
73 space using min/max and floor/ceil.
75 For a dense source space, we take the transformation matrix, decompose it
76 into a lower triangular part (H) and a unimodular part (U).
77 We then compute the auxiliary space from the unimodular part (source loop
78 nest . U = auxiliary space) , which has two important properties:
79 1. It traverses the iterations in the same lexicographic order as the source
81 2. It is a dense space when the source is a dense space (even if the target
82 space is going to be sparse).
84 Given the auxiliary space, we use the lower triangular part to compute the
85 bounds in the target space by simple matrix multiplication.
86 The gaps in the target space (IE the new loop step sizes) will be the
87 diagonals of the H matrix.
89 Sparse source spaces require another step, because you can't directly compute
90 the exact bounds of the auxiliary and target space from the sparse space.
91 Rather than try to come up with a separate algorithm to handle sparse source
92 spaces directly, we just find a legal transformation matrix that gives you
93 the sparse source space, from a dense space, and then transform the dense
96 For a regular sparse space, you can represent the source space as an integer
97 lattice, and the base space of that lattice will always be dense. Thus, we
98 effectively use the lattice to figure out the transformation from the lattice
99 base space, to the sparse iteration space (IE what transform was applied to
100 the dense space to make it sparse). We then compose this transform with the
101 transformation matrix specified by the user (since our matrix transformations
102 are closed under composition, this is okay). We can then use the base space
103 (which is dense) plus the composed transformation matrix, to compute the rest
104 of the transform using the dense space algorithm above.
106 In other words, our sparse source space (B) is decomposed into a dense base
107 space (A), and a matrix (L) that transforms A into B, such that A.L = B.
108 We then compute the composition of L and the user transformation matrix (T),
109 so that T is now a transform from A to the result, instead of from B to the
111 IE A.(LT) = result instead of B.T = result
112 Since A is now a dense source space, we can use the dense source space
113 algorithm above to compute the result of applying transform (LT) to A.
115 Fourier-Motzkin elimination is used to compute the bounds of the base space
118 static bool perfect_nestify (struct loop *, VEC(tree,heap) *,
119 VEC(tree,heap) *, VEC(int,heap) *,
121 /* Lattice stuff that is internal to the code generation algorithm. */
123 typedef struct lambda_lattice_s
125 /* Lattice base matrix. */
127 /* Lattice dimension. */
129 /* Origin vector for the coefficients. */
130 lambda_vector origin;
131 /* Origin matrix for the invariants. */
132 lambda_matrix origin_invariants;
133 /* Number of invariants. */
137 #define LATTICE_BASE(T) ((T)->base)
138 #define LATTICE_DIMENSION(T) ((T)->dimension)
139 #define LATTICE_ORIGIN(T) ((T)->origin)
140 #define LATTICE_ORIGIN_INVARIANTS(T) ((T)->origin_invariants)
141 #define LATTICE_INVARIANTS(T) ((T)->invariants)
143 static bool lle_equal (lambda_linear_expression, lambda_linear_expression,
145 static lambda_lattice lambda_lattice_new (int, int);
146 static lambda_lattice lambda_lattice_compute_base (lambda_loopnest);
148 static tree find_induction_var_from_exit_cond (struct loop *);
149 static bool can_convert_to_perfect_nest (struct loop *);
151 /* Create a new lambda body vector. */
154 lambda_body_vector_new (int size)
156 lambda_body_vector ret;
158 ret = GGC_NEW (struct lambda_body_vector_s);
159 LBV_COEFFICIENTS (ret) = lambda_vector_new (size);
160 LBV_SIZE (ret) = size;
161 LBV_DENOMINATOR (ret) = 1;
165 /* Compute the new coefficients for the vector based on the
166 *inverse* of the transformation matrix. */
169 lambda_body_vector_compute_new (lambda_trans_matrix transform,
170 lambda_body_vector vect)
172 lambda_body_vector temp;
175 /* Make sure the matrix is square. */
176 gcc_assert (LTM_ROWSIZE (transform) == LTM_COLSIZE (transform));
178 depth = LTM_ROWSIZE (transform);
180 temp = lambda_body_vector_new (depth);
181 LBV_DENOMINATOR (temp) =
182 LBV_DENOMINATOR (vect) * LTM_DENOMINATOR (transform);
183 lambda_vector_matrix_mult (LBV_COEFFICIENTS (vect), depth,
184 LTM_MATRIX (transform), depth,
185 LBV_COEFFICIENTS (temp));
186 LBV_SIZE (temp) = LBV_SIZE (vect);
190 /* Print out a lambda body vector. */
193 print_lambda_body_vector (FILE * outfile, lambda_body_vector body)
195 print_lambda_vector (outfile, LBV_COEFFICIENTS (body), LBV_SIZE (body));
198 /* Return TRUE if two linear expressions are equal. */
201 lle_equal (lambda_linear_expression lle1, lambda_linear_expression lle2,
202 int depth, int invariants)
206 if (lle1 == NULL || lle2 == NULL)
208 if (LLE_CONSTANT (lle1) != LLE_CONSTANT (lle2))
210 if (LLE_DENOMINATOR (lle1) != LLE_DENOMINATOR (lle2))
212 for (i = 0; i < depth; i++)
213 if (LLE_COEFFICIENTS (lle1)[i] != LLE_COEFFICIENTS (lle2)[i])
215 for (i = 0; i < invariants; i++)
216 if (LLE_INVARIANT_COEFFICIENTS (lle1)[i] !=
217 LLE_INVARIANT_COEFFICIENTS (lle2)[i])
222 /* Create a new linear expression with dimension DIM, and total number
223 of invariants INVARIANTS. */
225 lambda_linear_expression
226 lambda_linear_expression_new (int dim, int invariants)
228 lambda_linear_expression ret;
230 ret = GGC_CNEW (struct lambda_linear_expression_s);
232 LLE_COEFFICIENTS (ret) = lambda_vector_new (dim);
233 LLE_CONSTANT (ret) = 0;
234 LLE_INVARIANT_COEFFICIENTS (ret) = lambda_vector_new (invariants);
235 LLE_DENOMINATOR (ret) = 1;
236 LLE_NEXT (ret) = NULL;
241 /* Print out a linear expression EXPR, with SIZE coefficients, to OUTFILE.
242 The starting letter used for variable names is START. */
245 print_linear_expression (FILE * outfile, lambda_vector expr, int size,
250 for (i = 0; i < size; i++)
257 fprintf (outfile, "-");
260 else if (expr[i] > 0)
261 fprintf (outfile, " + ");
263 fprintf (outfile, " - ");
264 if (abs (expr[i]) == 1)
265 fprintf (outfile, "%c", start + i);
267 fprintf (outfile, "%d%c", abs (expr[i]), start + i);
272 /* Print out a lambda linear expression structure, EXPR, to OUTFILE. The
273 depth/number of coefficients is given by DEPTH, the number of invariants is
274 given by INVARIANTS, and the character to start variable names with is given
278 print_lambda_linear_expression (FILE * outfile,
279 lambda_linear_expression expr,
280 int depth, int invariants, char start)
282 fprintf (outfile, "\tLinear expression: ");
283 print_linear_expression (outfile, LLE_COEFFICIENTS (expr), depth, start);
284 fprintf (outfile, " constant: %d ", LLE_CONSTANT (expr));
285 fprintf (outfile, " invariants: ");
286 print_linear_expression (outfile, LLE_INVARIANT_COEFFICIENTS (expr),
288 fprintf (outfile, " denominator: %d\n", LLE_DENOMINATOR (expr));
291 /* Print a lambda loop structure LOOP to OUTFILE. The depth/number of
292 coefficients is given by DEPTH, the number of invariants is
293 given by INVARIANTS, and the character to start variable names with is given
297 print_lambda_loop (FILE * outfile, lambda_loop loop, int depth,
298 int invariants, char start)
301 lambda_linear_expression expr;
305 expr = LL_LINEAR_OFFSET (loop);
306 step = LL_STEP (loop);
307 fprintf (outfile, " step size = %d \n", step);
311 fprintf (outfile, " linear offset: \n");
312 print_lambda_linear_expression (outfile, expr, depth, invariants,
316 fprintf (outfile, " lower bound: \n");
317 for (expr = LL_LOWER_BOUND (loop); expr != NULL; expr = LLE_NEXT (expr))
318 print_lambda_linear_expression (outfile, expr, depth, invariants, start);
319 fprintf (outfile, " upper bound: \n");
320 for (expr = LL_UPPER_BOUND (loop); expr != NULL; expr = LLE_NEXT (expr))
321 print_lambda_linear_expression (outfile, expr, depth, invariants, start);
324 /* Create a new loop nest structure with DEPTH loops, and INVARIANTS as the
325 number of invariants. */
328 lambda_loopnest_new (int depth, int invariants)
331 ret = GGC_NEW (struct lambda_loopnest_s);
333 LN_LOOPS (ret) = GGC_CNEWVEC (lambda_loop, depth);
334 LN_DEPTH (ret) = depth;
335 LN_INVARIANTS (ret) = invariants;
340 /* Print a lambda loopnest structure, NEST, to OUTFILE. The starting
341 character to use for loop names is given by START. */
344 print_lambda_loopnest (FILE * outfile, lambda_loopnest nest, char start)
347 for (i = 0; i < LN_DEPTH (nest); i++)
349 fprintf (outfile, "Loop %c\n", start + i);
350 print_lambda_loop (outfile, LN_LOOPS (nest)[i], LN_DEPTH (nest),
351 LN_INVARIANTS (nest), 'i');
352 fprintf (outfile, "\n");
356 /* Allocate a new lattice structure of DEPTH x DEPTH, with INVARIANTS number
359 static lambda_lattice
360 lambda_lattice_new (int depth, int invariants)
363 ret = GGC_NEW (struct lambda_lattice_s);
364 LATTICE_BASE (ret) = lambda_matrix_new (depth, depth);
365 LATTICE_ORIGIN (ret) = lambda_vector_new (depth);
366 LATTICE_ORIGIN_INVARIANTS (ret) = lambda_matrix_new (depth, invariants);
367 LATTICE_DIMENSION (ret) = depth;
368 LATTICE_INVARIANTS (ret) = invariants;
372 /* Compute the lattice base for NEST. The lattice base is essentially a
373 non-singular transform from a dense base space to a sparse iteration space.
374 We use it so that we don't have to specially handle the case of a sparse
375 iteration space in other parts of the algorithm. As a result, this routine
376 only does something interesting (IE produce a matrix that isn't the
377 identity matrix) if NEST is a sparse space. */
379 static lambda_lattice
380 lambda_lattice_compute_base (lambda_loopnest nest)
383 int depth, invariants;
388 lambda_linear_expression expression;
390 depth = LN_DEPTH (nest);
391 invariants = LN_INVARIANTS (nest);
393 ret = lambda_lattice_new (depth, invariants);
394 base = LATTICE_BASE (ret);
395 for (i = 0; i < depth; i++)
397 loop = LN_LOOPS (nest)[i];
399 step = LL_STEP (loop);
400 /* If we have a step of 1, then the base is one, and the
401 origin and invariant coefficients are 0. */
404 for (j = 0; j < depth; j++)
407 LATTICE_ORIGIN (ret)[i] = 0;
408 for (j = 0; j < invariants; j++)
409 LATTICE_ORIGIN_INVARIANTS (ret)[i][j] = 0;
413 /* Otherwise, we need the lower bound expression (which must
414 be an affine function) to determine the base. */
415 expression = LL_LOWER_BOUND (loop);
416 gcc_assert (expression && !LLE_NEXT (expression)
417 && LLE_DENOMINATOR (expression) == 1);
419 /* The lower triangular portion of the base is going to be the
420 coefficient times the step */
421 for (j = 0; j < i; j++)
422 base[i][j] = LLE_COEFFICIENTS (expression)[j]
423 * LL_STEP (LN_LOOPS (nest)[j]);
425 for (j = i + 1; j < depth; j++)
428 /* Origin for this loop is the constant of the lower bound
430 LATTICE_ORIGIN (ret)[i] = LLE_CONSTANT (expression);
432 /* Coefficient for the invariants are equal to the invariant
433 coefficients in the expression. */
434 for (j = 0; j < invariants; j++)
435 LATTICE_ORIGIN_INVARIANTS (ret)[i][j] =
436 LLE_INVARIANT_COEFFICIENTS (expression)[j];
442 /* Compute the least common multiple of two numbers A and B . */
445 least_common_multiple (int a, int b)
447 return (abs (a) * abs (b) / gcd (a, b));
450 /* Perform Fourier-Motzkin elimination to calculate the bounds of the
452 Fourier-Motzkin is a way of reducing systems of linear inequalities so that
453 it is easy to calculate the answer and bounds.
454 A sketch of how it works:
455 Given a system of linear inequalities, ai * xj >= bk, you can always
456 rewrite the constraints so they are all of the form
457 a <= x, or x <= b, or x >= constant for some x in x1 ... xj (and some b
458 in b1 ... bk, and some a in a1...ai)
459 You can then eliminate this x from the non-constant inequalities by
460 rewriting these as a <= b, x >= constant, and delete the x variable.
461 You can then repeat this for any remaining x variables, and then we have
462 an easy to use variable <= constant (or no variables at all) form that we
463 can construct our bounds from.
465 In our case, each time we eliminate, we construct part of the bound from
466 the ith variable, then delete the ith variable.
468 Remember the constant are in our vector a, our coefficient matrix is A,
469 and our invariant coefficient matrix is B.
471 SIZE is the size of the matrices being passed.
472 DEPTH is the loop nest depth.
473 INVARIANTS is the number of loop invariants.
474 A, B, and a are the coefficient matrix, invariant coefficient, and a
475 vector of constants, respectively. */
477 static lambda_loopnest
478 compute_nest_using_fourier_motzkin (int size,
486 int multiple, f1, f2;
488 lambda_linear_expression expression;
490 lambda_loopnest auxillary_nest;
491 lambda_matrix swapmatrix, A1, B1;
492 lambda_vector swapvector, a1;
495 A1 = lambda_matrix_new (128, depth);
496 B1 = lambda_matrix_new (128, invariants);
497 a1 = lambda_vector_new (128);
499 auxillary_nest = lambda_loopnest_new (depth, invariants);
501 for (i = depth - 1; i >= 0; i--)
503 loop = lambda_loop_new ();
504 LN_LOOPS (auxillary_nest)[i] = loop;
507 for (j = 0; j < size; j++)
511 /* Any linear expression in the matrix with a coefficient less
512 than 0 becomes part of the new lower bound. */
513 expression = lambda_linear_expression_new (depth, invariants);
515 for (k = 0; k < i; k++)
516 LLE_COEFFICIENTS (expression)[k] = A[j][k];
518 for (k = 0; k < invariants; k++)
519 LLE_INVARIANT_COEFFICIENTS (expression)[k] = -1 * B[j][k];
521 LLE_DENOMINATOR (expression) = -1 * A[j][i];
522 LLE_CONSTANT (expression) = -1 * a[j];
524 /* Ignore if identical to the existing lower bound. */
525 if (!lle_equal (LL_LOWER_BOUND (loop),
526 expression, depth, invariants))
528 LLE_NEXT (expression) = LL_LOWER_BOUND (loop);
529 LL_LOWER_BOUND (loop) = expression;
533 else if (A[j][i] > 0)
535 /* Any linear expression with a coefficient greater than 0
536 becomes part of the new upper bound. */
537 expression = lambda_linear_expression_new (depth, invariants);
538 for (k = 0; k < i; k++)
539 LLE_COEFFICIENTS (expression)[k] = -1 * A[j][k];
541 for (k = 0; k < invariants; k++)
542 LLE_INVARIANT_COEFFICIENTS (expression)[k] = B[j][k];
544 LLE_DENOMINATOR (expression) = A[j][i];
545 LLE_CONSTANT (expression) = a[j];
547 /* Ignore if identical to the existing upper bound. */
548 if (!lle_equal (LL_UPPER_BOUND (loop),
549 expression, depth, invariants))
551 LLE_NEXT (expression) = LL_UPPER_BOUND (loop);
552 LL_UPPER_BOUND (loop) = expression;
558 /* This portion creates a new system of linear inequalities by deleting
559 the i'th variable, reducing the system by one variable. */
561 for (j = 0; j < size; j++)
563 /* If the coefficient for the i'th variable is 0, then we can just
564 eliminate the variable straightaway. Otherwise, we have to
565 multiply through by the coefficients we are eliminating. */
568 lambda_vector_copy (A[j], A1[newsize], depth);
569 lambda_vector_copy (B[j], B1[newsize], invariants);
573 else if (A[j][i] > 0)
575 for (k = 0; k < size; k++)
579 multiple = least_common_multiple (A[j][i], A[k][i]);
580 f1 = multiple / A[j][i];
581 f2 = -1 * multiple / A[k][i];
583 lambda_vector_add_mc (A[j], f1, A[k], f2,
585 lambda_vector_add_mc (B[j], f1, B[k], f2,
586 B1[newsize], invariants);
587 a1[newsize] = f1 * a[j] + f2 * a[k];
609 return auxillary_nest;
612 /* Compute the loop bounds for the auxiliary space NEST.
613 Input system used is Ax <= b. TRANS is the unimodular transformation.
614 Given the original nest, this function will
615 1. Convert the nest into matrix form, which consists of a matrix for the
616 coefficients, a matrix for the
617 invariant coefficients, and a vector for the constants.
618 2. Use the matrix form to calculate the lattice base for the nest (which is
620 3. Compose the dense space transform with the user specified transform, to
621 get a transform we can easily calculate transformed bounds for.
622 4. Multiply the composed transformation matrix times the matrix form of the
624 5. Transform the newly created matrix (from step 4) back into a loop nest
625 using Fourier-Motzkin elimination to figure out the bounds. */
627 static lambda_loopnest
628 lambda_compute_auxillary_space (lambda_loopnest nest,
629 lambda_trans_matrix trans)
631 lambda_matrix A, B, A1, B1;
633 lambda_matrix invertedtrans;
634 int depth, invariants, size;
637 lambda_linear_expression expression;
638 lambda_lattice lattice;
640 depth = LN_DEPTH (nest);
641 invariants = LN_INVARIANTS (nest);
643 /* Unfortunately, we can't know the number of constraints we'll have
644 ahead of time, but this should be enough even in ridiculous loop nest
645 cases. We must not go over this limit. */
646 A = lambda_matrix_new (128, depth);
647 B = lambda_matrix_new (128, invariants);
648 a = lambda_vector_new (128);
650 A1 = lambda_matrix_new (128, depth);
651 B1 = lambda_matrix_new (128, invariants);
652 a1 = lambda_vector_new (128);
654 /* Store the bounds in the equation matrix A, constant vector a, and
655 invariant matrix B, so that we have Ax <= a + B.
656 This requires a little equation rearranging so that everything is on the
657 correct side of the inequality. */
659 for (i = 0; i < depth; i++)
661 loop = LN_LOOPS (nest)[i];
663 /* First we do the lower bound. */
664 if (LL_STEP (loop) > 0)
665 expression = LL_LOWER_BOUND (loop);
667 expression = LL_UPPER_BOUND (loop);
669 for (; expression != NULL; expression = LLE_NEXT (expression))
671 /* Fill in the coefficient. */
672 for (j = 0; j < i; j++)
673 A[size][j] = LLE_COEFFICIENTS (expression)[j];
675 /* And the invariant coefficient. */
676 for (j = 0; j < invariants; j++)
677 B[size][j] = LLE_INVARIANT_COEFFICIENTS (expression)[j];
679 /* And the constant. */
680 a[size] = LLE_CONSTANT (expression);
682 /* Convert (2x+3y+2+b)/4 <= z to 2x+3y-4z <= -2-b. IE put all
683 constants and single variables on */
684 A[size][i] = -1 * LLE_DENOMINATOR (expression);
686 for (j = 0; j < invariants; j++)
690 /* Need to increase matrix sizes above. */
691 gcc_assert (size <= 127);
695 /* Then do the exact same thing for the upper bounds. */
696 if (LL_STEP (loop) > 0)
697 expression = LL_UPPER_BOUND (loop);
699 expression = LL_LOWER_BOUND (loop);
701 for (; expression != NULL; expression = LLE_NEXT (expression))
703 /* Fill in the coefficient. */
704 for (j = 0; j < i; j++)
705 A[size][j] = LLE_COEFFICIENTS (expression)[j];
707 /* And the invariant coefficient. */
708 for (j = 0; j < invariants; j++)
709 B[size][j] = LLE_INVARIANT_COEFFICIENTS (expression)[j];
711 /* And the constant. */
712 a[size] = LLE_CONSTANT (expression);
714 /* Convert z <= (2x+3y+2+b)/4 to -2x-3y+4z <= 2+b. */
715 for (j = 0; j < i; j++)
717 A[size][i] = LLE_DENOMINATOR (expression);
719 /* Need to increase matrix sizes above. */
720 gcc_assert (size <= 127);
725 /* Compute the lattice base x = base * y + origin, where y is the
727 lattice = lambda_lattice_compute_base (nest);
729 /* Ax <= a + B then becomes ALy <= a+B - A*origin. L is the lattice base */
732 lambda_matrix_mult (A, LATTICE_BASE (lattice), A1, size, depth, depth);
734 /* a1 = a - A * origin constant. */
735 lambda_matrix_vector_mult (A, size, depth, LATTICE_ORIGIN (lattice), a1);
736 lambda_vector_add_mc (a, 1, a1, -1, a1, size);
738 /* B1 = B - A * origin invariant. */
739 lambda_matrix_mult (A, LATTICE_ORIGIN_INVARIANTS (lattice), B1, size, depth,
741 lambda_matrix_add_mc (B, 1, B1, -1, B1, size, invariants);
743 /* Now compute the auxiliary space bounds by first inverting U, multiplying
744 it by A1, then performing Fourier-Motzkin. */
746 invertedtrans = lambda_matrix_new (depth, depth);
748 /* Compute the inverse of U. */
749 lambda_matrix_inverse (LTM_MATRIX (trans),
750 invertedtrans, depth);
753 lambda_matrix_mult (A1, invertedtrans, A, size, depth, depth);
755 return compute_nest_using_fourier_motzkin (size, depth, invariants,
759 /* Compute the loop bounds for the target space, using the bounds of
760 the auxiliary nest AUXILLARY_NEST, and the triangular matrix H.
761 The target space loop bounds are computed by multiplying the triangular
762 matrix H by the auxiliary nest, to get the new loop bounds. The sign of
763 the loop steps (positive or negative) is then used to swap the bounds if
764 the loop counts downwards.
765 Return the target loopnest. */
767 static lambda_loopnest
768 lambda_compute_target_space (lambda_loopnest auxillary_nest,
769 lambda_trans_matrix H, lambda_vector stepsigns)
771 lambda_matrix inverse, H1;
772 int determinant, i, j;
776 lambda_loopnest target_nest;
777 int depth, invariants;
778 lambda_matrix target;
780 lambda_loop auxillary_loop, target_loop;
781 lambda_linear_expression expression, auxillary_expr, target_expr, tmp_expr;
783 depth = LN_DEPTH (auxillary_nest);
784 invariants = LN_INVARIANTS (auxillary_nest);
786 inverse = lambda_matrix_new (depth, depth);
787 determinant = lambda_matrix_inverse (LTM_MATRIX (H), inverse, depth);
789 /* H1 is H excluding its diagonal. */
790 H1 = lambda_matrix_new (depth, depth);
791 lambda_matrix_copy (LTM_MATRIX (H), H1, depth, depth);
793 for (i = 0; i < depth; i++)
796 /* Computes the linear offsets of the loop bounds. */
797 target = lambda_matrix_new (depth, depth);
798 lambda_matrix_mult (H1, inverse, target, depth, depth, depth);
800 target_nest = lambda_loopnest_new (depth, invariants);
802 for (i = 0; i < depth; i++)
805 /* Get a new loop structure. */
806 target_loop = lambda_loop_new ();
807 LN_LOOPS (target_nest)[i] = target_loop;
809 /* Computes the gcd of the coefficients of the linear part. */
810 gcd1 = lambda_vector_gcd (target[i], i);
812 /* Include the denominator in the GCD. */
813 gcd1 = gcd (gcd1, determinant);
815 /* Now divide through by the gcd. */
816 for (j = 0; j < i; j++)
817 target[i][j] = target[i][j] / gcd1;
819 expression = lambda_linear_expression_new (depth, invariants);
820 lambda_vector_copy (target[i], LLE_COEFFICIENTS (expression), depth);
821 LLE_DENOMINATOR (expression) = determinant / gcd1;
822 LLE_CONSTANT (expression) = 0;
823 lambda_vector_clear (LLE_INVARIANT_COEFFICIENTS (expression),
825 LL_LINEAR_OFFSET (target_loop) = expression;
828 /* For each loop, compute the new bounds from H. */
829 for (i = 0; i < depth; i++)
831 auxillary_loop = LN_LOOPS (auxillary_nest)[i];
832 target_loop = LN_LOOPS (target_nest)[i];
833 LL_STEP (target_loop) = LTM_MATRIX (H)[i][i];
834 factor = LTM_MATRIX (H)[i][i];
836 /* First we do the lower bound. */
837 auxillary_expr = LL_LOWER_BOUND (auxillary_loop);
839 for (; auxillary_expr != NULL;
840 auxillary_expr = LLE_NEXT (auxillary_expr))
842 target_expr = lambda_linear_expression_new (depth, invariants);
843 lambda_vector_matrix_mult (LLE_COEFFICIENTS (auxillary_expr),
844 depth, inverse, depth,
845 LLE_COEFFICIENTS (target_expr));
846 lambda_vector_mult_const (LLE_COEFFICIENTS (target_expr),
847 LLE_COEFFICIENTS (target_expr), depth,
850 LLE_CONSTANT (target_expr) = LLE_CONSTANT (auxillary_expr) * factor;
851 lambda_vector_copy (LLE_INVARIANT_COEFFICIENTS (auxillary_expr),
852 LLE_INVARIANT_COEFFICIENTS (target_expr),
854 lambda_vector_mult_const (LLE_INVARIANT_COEFFICIENTS (target_expr),
855 LLE_INVARIANT_COEFFICIENTS (target_expr),
857 LLE_DENOMINATOR (target_expr) = LLE_DENOMINATOR (auxillary_expr);
859 if (!lambda_vector_zerop (LLE_COEFFICIENTS (target_expr), depth))
861 LLE_CONSTANT (target_expr) = LLE_CONSTANT (target_expr)
863 lambda_vector_mult_const (LLE_INVARIANT_COEFFICIENTS
865 LLE_INVARIANT_COEFFICIENTS
866 (target_expr), invariants,
868 LLE_DENOMINATOR (target_expr) =
869 LLE_DENOMINATOR (target_expr) * determinant;
871 /* Find the gcd and divide by it here, rather than doing it
872 at the tree level. */
873 gcd1 = lambda_vector_gcd (LLE_COEFFICIENTS (target_expr), depth);
874 gcd2 = lambda_vector_gcd (LLE_INVARIANT_COEFFICIENTS (target_expr),
876 gcd1 = gcd (gcd1, gcd2);
877 gcd1 = gcd (gcd1, LLE_CONSTANT (target_expr));
878 gcd1 = gcd (gcd1, LLE_DENOMINATOR (target_expr));
879 for (j = 0; j < depth; j++)
880 LLE_COEFFICIENTS (target_expr)[j] /= gcd1;
881 for (j = 0; j < invariants; j++)
882 LLE_INVARIANT_COEFFICIENTS (target_expr)[j] /= gcd1;
883 LLE_CONSTANT (target_expr) /= gcd1;
884 LLE_DENOMINATOR (target_expr) /= gcd1;
885 /* Ignore if identical to existing bound. */
886 if (!lle_equal (LL_LOWER_BOUND (target_loop), target_expr, depth,
889 LLE_NEXT (target_expr) = LL_LOWER_BOUND (target_loop);
890 LL_LOWER_BOUND (target_loop) = target_expr;
893 /* Now do the upper bound. */
894 auxillary_expr = LL_UPPER_BOUND (auxillary_loop);
896 for (; auxillary_expr != NULL;
897 auxillary_expr = LLE_NEXT (auxillary_expr))
899 target_expr = lambda_linear_expression_new (depth, invariants);
900 lambda_vector_matrix_mult (LLE_COEFFICIENTS (auxillary_expr),
901 depth, inverse, depth,
902 LLE_COEFFICIENTS (target_expr));
903 lambda_vector_mult_const (LLE_COEFFICIENTS (target_expr),
904 LLE_COEFFICIENTS (target_expr), depth,
906 LLE_CONSTANT (target_expr) = LLE_CONSTANT (auxillary_expr) * factor;
907 lambda_vector_copy (LLE_INVARIANT_COEFFICIENTS (auxillary_expr),
908 LLE_INVARIANT_COEFFICIENTS (target_expr),
910 lambda_vector_mult_const (LLE_INVARIANT_COEFFICIENTS (target_expr),
911 LLE_INVARIANT_COEFFICIENTS (target_expr),
913 LLE_DENOMINATOR (target_expr) = LLE_DENOMINATOR (auxillary_expr);
915 if (!lambda_vector_zerop (LLE_COEFFICIENTS (target_expr), depth))
917 LLE_CONSTANT (target_expr) = LLE_CONSTANT (target_expr)
919 lambda_vector_mult_const (LLE_INVARIANT_COEFFICIENTS
921 LLE_INVARIANT_COEFFICIENTS
922 (target_expr), invariants,
924 LLE_DENOMINATOR (target_expr) =
925 LLE_DENOMINATOR (target_expr) * determinant;
927 /* Find the gcd and divide by it here, instead of at the
929 gcd1 = lambda_vector_gcd (LLE_COEFFICIENTS (target_expr), depth);
930 gcd2 = lambda_vector_gcd (LLE_INVARIANT_COEFFICIENTS (target_expr),
932 gcd1 = gcd (gcd1, gcd2);
933 gcd1 = gcd (gcd1, LLE_CONSTANT (target_expr));
934 gcd1 = gcd (gcd1, LLE_DENOMINATOR (target_expr));
935 for (j = 0; j < depth; j++)
936 LLE_COEFFICIENTS (target_expr)[j] /= gcd1;
937 for (j = 0; j < invariants; j++)
938 LLE_INVARIANT_COEFFICIENTS (target_expr)[j] /= gcd1;
939 LLE_CONSTANT (target_expr) /= gcd1;
940 LLE_DENOMINATOR (target_expr) /= gcd1;
941 /* Ignore if equal to existing bound. */
942 if (!lle_equal (LL_UPPER_BOUND (target_loop), target_expr, depth,
945 LLE_NEXT (target_expr) = LL_UPPER_BOUND (target_loop);
946 LL_UPPER_BOUND (target_loop) = target_expr;
950 for (i = 0; i < depth; i++)
952 target_loop = LN_LOOPS (target_nest)[i];
953 /* If necessary, exchange the upper and lower bounds and negate
955 if (stepsigns[i] < 0)
957 LL_STEP (target_loop) *= -1;
958 tmp_expr = LL_LOWER_BOUND (target_loop);
959 LL_LOWER_BOUND (target_loop) = LL_UPPER_BOUND (target_loop);
960 LL_UPPER_BOUND (target_loop) = tmp_expr;
966 /* Compute the step signs of TRANS, using TRANS and stepsigns. Return the new
970 lambda_compute_step_signs (lambda_trans_matrix trans, lambda_vector stepsigns)
972 lambda_matrix matrix, H;
974 lambda_vector newsteps;
975 int i, j, factor, minimum_column;
978 matrix = LTM_MATRIX (trans);
979 size = LTM_ROWSIZE (trans);
980 H = lambda_matrix_new (size, size);
982 newsteps = lambda_vector_new (size);
983 lambda_vector_copy (stepsigns, newsteps, size);
985 lambda_matrix_copy (matrix, H, size, size);
987 for (j = 0; j < size; j++)
991 for (i = j; i < size; i++)
993 lambda_matrix_col_negate (H, size, i);
994 while (lambda_vector_first_nz (row, size, j + 1) < size)
996 minimum_column = lambda_vector_min_nz (row, size, j);
997 lambda_matrix_col_exchange (H, size, j, minimum_column);
1000 newsteps[j] = newsteps[minimum_column];
1001 newsteps[minimum_column] = temp;
1003 for (i = j + 1; i < size; i++)
1005 factor = row[i] / row[j];
1006 lambda_matrix_col_add (H, size, j, i, -1 * factor);
1013 /* Transform NEST according to TRANS, and return the new loopnest.
1015 1. Computing a lattice base for the transformation
1016 2. Composing the dense base with the specified transformation (TRANS)
1017 3. Decomposing the combined transformation into a lower triangular portion,
1018 and a unimodular portion.
1019 4. Computing the auxiliary nest using the unimodular portion.
1020 5. Computing the target nest using the auxiliary nest and the lower
1021 triangular portion. */
1024 lambda_loopnest_transform (lambda_loopnest nest, lambda_trans_matrix trans)
1026 lambda_loopnest auxillary_nest, target_nest;
1028 int depth, invariants;
1030 lambda_lattice lattice;
1031 lambda_trans_matrix trans1, H, U;
1033 lambda_linear_expression expression;
1034 lambda_vector origin;
1035 lambda_matrix origin_invariants;
1036 lambda_vector stepsigns;
1039 depth = LN_DEPTH (nest);
1040 invariants = LN_INVARIANTS (nest);
1042 /* Keep track of the signs of the loop steps. */
1043 stepsigns = lambda_vector_new (depth);
1044 for (i = 0; i < depth; i++)
1046 if (LL_STEP (LN_LOOPS (nest)[i]) > 0)
1052 /* Compute the lattice base. */
1053 lattice = lambda_lattice_compute_base (nest);
1054 trans1 = lambda_trans_matrix_new (depth, depth);
1056 /* Multiply the transformation matrix by the lattice base. */
1058 lambda_matrix_mult (LTM_MATRIX (trans), LATTICE_BASE (lattice),
1059 LTM_MATRIX (trans1), depth, depth, depth);
1061 /* Compute the Hermite normal form for the new transformation matrix. */
1062 H = lambda_trans_matrix_new (depth, depth);
1063 U = lambda_trans_matrix_new (depth, depth);
1064 lambda_matrix_hermite (LTM_MATRIX (trans1), depth, LTM_MATRIX (H),
1067 /* Compute the auxiliary loop nest's space from the unimodular
1069 auxillary_nest = lambda_compute_auxillary_space (nest, U);
1071 /* Compute the loop step signs from the old step signs and the
1072 transformation matrix. */
1073 stepsigns = lambda_compute_step_signs (trans1, stepsigns);
1075 /* Compute the target loop nest space from the auxiliary nest and
1076 the lower triangular matrix H. */
1077 target_nest = lambda_compute_target_space (auxillary_nest, H, stepsigns);
1078 origin = lambda_vector_new (depth);
1079 origin_invariants = lambda_matrix_new (depth, invariants);
1080 lambda_matrix_vector_mult (LTM_MATRIX (trans), depth, depth,
1081 LATTICE_ORIGIN (lattice), origin);
1082 lambda_matrix_mult (LTM_MATRIX (trans), LATTICE_ORIGIN_INVARIANTS (lattice),
1083 origin_invariants, depth, depth, invariants);
1085 for (i = 0; i < depth; i++)
1087 loop = LN_LOOPS (target_nest)[i];
1088 expression = LL_LINEAR_OFFSET (loop);
1089 if (lambda_vector_zerop (LLE_COEFFICIENTS (expression), depth))
1092 f = LLE_DENOMINATOR (expression);
1094 LLE_CONSTANT (expression) += f * origin[i];
1096 for (j = 0; j < invariants; j++)
1097 LLE_INVARIANT_COEFFICIENTS (expression)[j] +=
1098 f * origin_invariants[i][j];
1105 /* Convert a gcc tree expression EXPR to a lambda linear expression, and
1106 return the new expression. DEPTH is the depth of the loopnest.
1107 OUTERINDUCTIONVARS is an array of the induction variables for outer loops
1108 in this nest. INVARIANTS is the array of invariants for the loop. EXTRA
1109 is the amount we have to add/subtract from the expression because of the
1110 type of comparison it is used in. */
1112 static lambda_linear_expression
1113 gcc_tree_to_linear_expression (int depth, tree expr,
1114 VEC(tree,heap) *outerinductionvars,
1115 VEC(tree,heap) *invariants, int extra)
1117 lambda_linear_expression lle = NULL;
1118 switch (TREE_CODE (expr))
1122 lle = lambda_linear_expression_new (depth, 2 * depth);
1123 LLE_CONSTANT (lle) = TREE_INT_CST_LOW (expr);
1125 LLE_CONSTANT (lle) += extra;
1127 LLE_DENOMINATOR (lle) = 1;
1134 for (i = 0; VEC_iterate (tree, outerinductionvars, i, iv); i++)
1137 if (SSA_NAME_VAR (iv) == SSA_NAME_VAR (expr))
1139 lle = lambda_linear_expression_new (depth, 2 * depth);
1140 LLE_COEFFICIENTS (lle)[i] = 1;
1142 LLE_CONSTANT (lle) = extra;
1144 LLE_DENOMINATOR (lle) = 1;
1147 for (i = 0; VEC_iterate (tree, invariants, i, invar); i++)
1150 if (SSA_NAME_VAR (invar) == SSA_NAME_VAR (expr))
1152 lle = lambda_linear_expression_new (depth, 2 * depth);
1153 LLE_INVARIANT_COEFFICIENTS (lle)[i] = 1;
1155 LLE_CONSTANT (lle) = extra;
1156 LLE_DENOMINATOR (lle) = 1;
1168 /* Return the depth of the loopnest NEST */
1171 depth_of_nest (struct loop *nest)
1183 /* Return true if OP is invariant in LOOP and all outer loops. */
1186 invariant_in_loop_and_outer_loops (struct loop *loop, tree op)
1188 if (is_gimple_min_invariant (op))
1190 if (loop_depth (loop) == 0)
1192 if (!expr_invariant_in_loop_p (loop, op))
1194 if (!invariant_in_loop_and_outer_loops (loop_outer (loop), op))
1199 /* Generate a lambda loop from a gcc loop LOOP. Return the new lambda loop,
1200 or NULL if it could not be converted.
1201 DEPTH is the depth of the loop.
1202 INVARIANTS is a pointer to the array of loop invariants.
1203 The induction variable for this loop should be stored in the parameter
1205 OUTERINDUCTIONVARS is an array of induction variables for outer loops. */
1208 gcc_loop_to_lambda_loop (struct loop *loop, int depth,
1209 VEC(tree,heap) ** invariants,
1210 tree * ourinductionvar,
1211 VEC(tree,heap) * outerinductionvars,
1212 VEC(tree,heap) ** lboundvars,
1213 VEC(tree,heap) ** uboundvars,
1214 VEC(int,heap) ** steps)
1218 tree access_fn, inductionvar;
1220 lambda_loop lloop = NULL;
1221 lambda_linear_expression lbound, ubound;
1225 tree lboundvar, uboundvar, uboundresult;
1227 /* Find out induction var and exit condition. */
1228 inductionvar = find_induction_var_from_exit_cond (loop);
1229 exit_cond = get_loop_exit_condition (loop);
1231 if (inductionvar == NULL || exit_cond == NULL)
1233 if (dump_file && (dump_flags & TDF_DETAILS))
1235 "Unable to convert loop: Cannot determine exit condition or induction variable for loop.\n");
1239 test = TREE_OPERAND (exit_cond, 0);
1241 if (SSA_NAME_DEF_STMT (inductionvar) == NULL_TREE)
1244 if (dump_file && (dump_flags & TDF_DETAILS))
1246 "Unable to convert loop: Cannot find PHI node for induction variable\n");
1251 phi = SSA_NAME_DEF_STMT (inductionvar);
1252 if (TREE_CODE (phi) != PHI_NODE)
1254 phi = SINGLE_SSA_TREE_OPERAND (phi, SSA_OP_USE);
1258 if (dump_file && (dump_flags & TDF_DETAILS))
1260 "Unable to convert loop: Cannot find PHI node for induction variable\n");
1265 phi = SSA_NAME_DEF_STMT (phi);
1266 if (TREE_CODE (phi) != PHI_NODE)
1269 if (dump_file && (dump_flags & TDF_DETAILS))
1271 "Unable to convert loop: Cannot find PHI node for induction variable\n");
1277 /* The induction variable name/version we want to put in the array is the
1278 result of the induction variable phi node. */
1279 *ourinductionvar = PHI_RESULT (phi);
1280 access_fn = instantiate_parameters
1281 (loop, analyze_scalar_evolution (loop, PHI_RESULT (phi)));
1282 if (access_fn == chrec_dont_know)
1284 if (dump_file && (dump_flags & TDF_DETAILS))
1286 "Unable to convert loop: Access function for induction variable phi is unknown\n");
1291 step = evolution_part_in_loop_num (access_fn, loop->num);
1292 if (!step || step == chrec_dont_know)
1294 if (dump_file && (dump_flags & TDF_DETAILS))
1296 "Unable to convert loop: Cannot determine step of loop.\n");
1300 if (TREE_CODE (step) != INTEGER_CST)
1303 if (dump_file && (dump_flags & TDF_DETAILS))
1305 "Unable to convert loop: Step of loop is not integer.\n");
1309 stepint = TREE_INT_CST_LOW (step);
1311 /* Only want phis for induction vars, which will have two
1313 if (PHI_NUM_ARGS (phi) != 2)
1315 if (dump_file && (dump_flags & TDF_DETAILS))
1317 "Unable to convert loop: PHI node for induction variable has >2 arguments\n");
1321 /* Another induction variable check. One argument's source should be
1322 in the loop, one outside the loop. */
1323 if (flow_bb_inside_loop_p (loop, PHI_ARG_EDGE (phi, 0)->src)
1324 && flow_bb_inside_loop_p (loop, PHI_ARG_EDGE (phi, 1)->src))
1327 if (dump_file && (dump_flags & TDF_DETAILS))
1329 "Unable to convert loop: PHI edges both inside loop, or both outside loop.\n");
1334 if (flow_bb_inside_loop_p (loop, PHI_ARG_EDGE (phi, 0)->src))
1336 lboundvar = PHI_ARG_DEF (phi, 1);
1337 lbound = gcc_tree_to_linear_expression (depth, lboundvar,
1338 outerinductionvars, *invariants,
1343 lboundvar = PHI_ARG_DEF (phi, 0);
1344 lbound = gcc_tree_to_linear_expression (depth, lboundvar,
1345 outerinductionvars, *invariants,
1352 if (dump_file && (dump_flags & TDF_DETAILS))
1354 "Unable to convert loop: Cannot convert lower bound to linear expression\n");
1358 /* One part of the test may be a loop invariant tree. */
1359 VEC_reserve (tree, heap, *invariants, 1);
1360 if (TREE_CODE (TREE_OPERAND (test, 1)) == SSA_NAME
1361 && invariant_in_loop_and_outer_loops (loop, TREE_OPERAND (test, 1)))
1362 VEC_quick_push (tree, *invariants, TREE_OPERAND (test, 1));
1363 else if (TREE_CODE (TREE_OPERAND (test, 0)) == SSA_NAME
1364 && invariant_in_loop_and_outer_loops (loop, TREE_OPERAND (test, 0)))
1365 VEC_quick_push (tree, *invariants, TREE_OPERAND (test, 0));
1367 /* The non-induction variable part of the test is the upper bound variable.
1369 if (TREE_OPERAND (test, 0) == inductionvar)
1370 uboundvar = TREE_OPERAND (test, 1);
1372 uboundvar = TREE_OPERAND (test, 0);
1375 /* We only size the vectors assuming we have, at max, 2 times as many
1376 invariants as we do loops (one for each bound).
1377 This is just an arbitrary number, but it has to be matched against the
1379 gcc_assert (VEC_length (tree, *invariants) <= (unsigned int) (2 * depth));
1382 /* We might have some leftover. */
1383 if (TREE_CODE (test) == LT_EXPR)
1384 extra = -1 * stepint;
1385 else if (TREE_CODE (test) == NE_EXPR)
1386 extra = -1 * stepint;
1387 else if (TREE_CODE (test) == GT_EXPR)
1388 extra = -1 * stepint;
1389 else if (TREE_CODE (test) == EQ_EXPR)
1390 extra = 1 * stepint;
1392 ubound = gcc_tree_to_linear_expression (depth, uboundvar,
1394 *invariants, extra);
1395 uboundresult = build2 (PLUS_EXPR, TREE_TYPE (uboundvar), uboundvar,
1396 build_int_cst (TREE_TYPE (uboundvar), extra));
1397 VEC_safe_push (tree, heap, *uboundvars, uboundresult);
1398 VEC_safe_push (tree, heap, *lboundvars, lboundvar);
1399 VEC_safe_push (int, heap, *steps, stepint);
1402 if (dump_file && (dump_flags & TDF_DETAILS))
1404 "Unable to convert loop: Cannot convert upper bound to linear expression\n");
1408 lloop = lambda_loop_new ();
1409 LL_STEP (lloop) = stepint;
1410 LL_LOWER_BOUND (lloop) = lbound;
1411 LL_UPPER_BOUND (lloop) = ubound;
1415 /* Given a LOOP, find the induction variable it is testing against in the exit
1416 condition. Return the induction variable if found, NULL otherwise. */
1419 find_induction_var_from_exit_cond (struct loop *loop)
1421 tree expr = get_loop_exit_condition (loop);
1424 if (expr == NULL_TREE)
1426 if (TREE_CODE (expr) != COND_EXPR)
1428 test = TREE_OPERAND (expr, 0);
1429 if (!COMPARISON_CLASS_P (test))
1432 /* Find the side that is invariant in this loop. The ivar must be the other
1435 if (expr_invariant_in_loop_p (loop, TREE_OPERAND (test, 0)))
1436 ivarop = TREE_OPERAND (test, 1);
1437 else if (expr_invariant_in_loop_p (loop, TREE_OPERAND (test, 1)))
1438 ivarop = TREE_OPERAND (test, 0);
1442 if (TREE_CODE (ivarop) != SSA_NAME)
1447 DEF_VEC_P(lambda_loop);
1448 DEF_VEC_ALLOC_P(lambda_loop,heap);
1450 /* Generate a lambda loopnest from a gcc loopnest LOOP_NEST.
1451 Return the new loop nest.
1452 INDUCTIONVARS is a pointer to an array of induction variables for the
1453 loopnest that will be filled in during this process.
1454 INVARIANTS is a pointer to an array of invariants that will be filled in
1455 during this process. */
1458 gcc_loopnest_to_lambda_loopnest (struct loop *loop_nest,
1459 VEC(tree,heap) **inductionvars,
1460 VEC(tree,heap) **invariants)
1462 lambda_loopnest ret = NULL;
1463 struct loop *temp = loop_nest;
1464 int depth = depth_of_nest (loop_nest);
1466 VEC(lambda_loop,heap) *loops = NULL;
1467 VEC(tree,heap) *uboundvars = NULL;
1468 VEC(tree,heap) *lboundvars = NULL;
1469 VEC(int,heap) *steps = NULL;
1470 lambda_loop newloop;
1471 tree inductionvar = NULL;
1472 bool perfect_nest = perfect_nest_p (loop_nest);
1474 if (!perfect_nest && !can_convert_to_perfect_nest (loop_nest))
1479 newloop = gcc_loop_to_lambda_loop (temp, depth, invariants,
1480 &inductionvar, *inductionvars,
1481 &lboundvars, &uboundvars,
1486 VEC_safe_push (tree, heap, *inductionvars, inductionvar);
1487 VEC_safe_push (lambda_loop, heap, loops, newloop);
1493 if (!perfect_nestify (loop_nest, lboundvars, uboundvars, steps,
1498 "Not a perfect loop nest and couldn't convert to one.\n");
1503 "Successfully converted loop nest to perfect loop nest.\n");
1506 ret = lambda_loopnest_new (depth, 2 * depth);
1508 for (i = 0; VEC_iterate (lambda_loop, loops, i, newloop); i++)
1509 LN_LOOPS (ret)[i] = newloop;
1512 VEC_free (lambda_loop, heap, loops);
1513 VEC_free (tree, heap, uboundvars);
1514 VEC_free (tree, heap, lboundvars);
1515 VEC_free (int, heap, steps);
1520 /* Convert a lambda body vector LBV to a gcc tree, and return the new tree.
1521 STMTS_TO_INSERT is a pointer to a tree where the statements we need to be
1522 inserted for us are stored. INDUCTION_VARS is the array of induction
1523 variables for the loop this LBV is from. TYPE is the tree type to use for
1524 the variables and trees involved. */
1527 lbv_to_gcc_expression (lambda_body_vector lbv,
1528 tree type, VEC(tree,heap) *induction_vars,
1529 tree *stmts_to_insert)
1533 tree expr = build_linear_expr (type, LBV_COEFFICIENTS (lbv), induction_vars);
1535 k = LBV_DENOMINATOR (lbv);
1536 gcc_assert (k != 0);
1538 expr = fold_build2 (CEIL_DIV_EXPR, type, expr, build_int_cst (type, k));
1540 resvar = create_tmp_var (type, "lbvtmp");
1541 add_referenced_var (resvar);
1542 return force_gimple_operand (fold (expr), stmts_to_insert, true, resvar);
1545 /* Convert a linear expression from coefficient and constant form to a
1547 Return the tree that represents the final value of the expression.
1548 LLE is the linear expression to convert.
1549 OFFSET is the linear offset to apply to the expression.
1550 TYPE is the tree type to use for the variables and math.
1551 INDUCTION_VARS is a vector of induction variables for the loops.
1552 INVARIANTS is a vector of the loop nest invariants.
1553 WRAP specifies what tree code to wrap the results in, if there is more than
1554 one (it is either MAX_EXPR, or MIN_EXPR).
1555 STMTS_TO_INSERT Is a pointer to the statement list we fill in with
1556 statements that need to be inserted for the linear expression. */
1559 lle_to_gcc_expression (lambda_linear_expression lle,
1560 lambda_linear_expression offset,
1562 VEC(tree,heap) *induction_vars,
1563 VEC(tree,heap) *invariants,
1564 enum tree_code wrap, tree *stmts_to_insert)
1568 tree expr = NULL_TREE;
1569 VEC(tree,heap) *results = NULL;
1571 gcc_assert (wrap == MAX_EXPR || wrap == MIN_EXPR);
1573 /* Build up the linear expressions. */
1574 for (; lle != NULL; lle = LLE_NEXT (lle))
1576 expr = build_linear_expr (type, LLE_COEFFICIENTS (lle), induction_vars);
1577 expr = fold_build2 (PLUS_EXPR, type, expr,
1578 build_linear_expr (type,
1579 LLE_INVARIANT_COEFFICIENTS (lle),
1582 k = LLE_CONSTANT (lle);
1584 expr = fold_build2 (PLUS_EXPR, type, expr, build_int_cst (type, k));
1586 k = LLE_CONSTANT (offset);
1588 expr = fold_build2 (PLUS_EXPR, type, expr, build_int_cst (type, k));
1590 k = LLE_DENOMINATOR (lle);
1592 expr = fold_build2 (wrap == MAX_EXPR ? CEIL_DIV_EXPR : FLOOR_DIV_EXPR,
1593 type, expr, build_int_cst (type, k));
1596 VEC_safe_push (tree, heap, results, expr);
1601 /* We may need to wrap the results in a MAX_EXPR or MIN_EXPR. */
1602 if (VEC_length (tree, results) > 1)
1607 expr = VEC_index (tree, results, 0);
1608 for (i = 1; VEC_iterate (tree, results, i, op); i++)
1609 expr = fold_build2 (wrap, type, expr, op);
1612 VEC_free (tree, heap, results);
1614 resvar = create_tmp_var (type, "lletmp");
1615 add_referenced_var (resvar);
1616 return force_gimple_operand (fold (expr), stmts_to_insert, true, resvar);
1619 /* Remove the induction variable defined at IV_STMT. */
1622 remove_iv (tree iv_stmt)
1624 if (TREE_CODE (iv_stmt) == PHI_NODE)
1628 for (i = 0; i < PHI_NUM_ARGS (iv_stmt); i++)
1631 imm_use_iterator imm_iter;
1632 tree arg = PHI_ARG_DEF (iv_stmt, i);
1635 if (TREE_CODE (arg) != SSA_NAME)
1638 FOR_EACH_IMM_USE_STMT (stmt, imm_iter, arg)
1639 if (stmt != iv_stmt)
1643 remove_iv (SSA_NAME_DEF_STMT (arg));
1646 remove_phi_node (iv_stmt, NULL_TREE, true);
1650 block_stmt_iterator bsi = bsi_for_stmt (iv_stmt);
1652 bsi_remove (&bsi, true);
1653 release_defs (iv_stmt);
1658 /* Transform a lambda loopnest NEW_LOOPNEST, which had TRANSFORM applied to
1659 it, back into gcc code. This changes the
1660 loops, their induction variables, and their bodies, so that they
1661 match the transformed loopnest.
1662 OLD_LOOPNEST is the loopnest before we've replaced it with the new
1664 OLD_IVS is a vector of induction variables from the old loopnest.
1665 INVARIANTS is a vector of loop invariants from the old loopnest.
1666 NEW_LOOPNEST is the new lambda loopnest to replace OLD_LOOPNEST with.
1667 TRANSFORM is the matrix transform that was applied to OLD_LOOPNEST to get
1671 lambda_loopnest_to_gcc_loopnest (struct loop *old_loopnest,
1672 VEC(tree,heap) *old_ivs,
1673 VEC(tree,heap) *invariants,
1674 lambda_loopnest new_loopnest,
1675 lambda_trans_matrix transform)
1680 VEC(tree,heap) *new_ivs = NULL;
1683 block_stmt_iterator bsi;
1687 transform = lambda_trans_matrix_inverse (transform);
1688 fprintf (dump_file, "Inverse of transformation matrix:\n");
1689 print_lambda_trans_matrix (dump_file, transform);
1691 depth = depth_of_nest (old_loopnest);
1692 temp = old_loopnest;
1696 lambda_loop newloop;
1699 tree ivvar, ivvarinced, exitcond, stmts;
1700 enum tree_code testtype;
1701 tree newupperbound, newlowerbound;
1702 lambda_linear_expression offset;
1707 oldiv = VEC_index (tree, old_ivs, i);
1708 type = TREE_TYPE (oldiv);
1710 /* First, build the new induction variable temporary */
1712 ivvar = create_tmp_var (type, "lnivtmp");
1713 add_referenced_var (ivvar);
1715 VEC_safe_push (tree, heap, new_ivs, ivvar);
1717 newloop = LN_LOOPS (new_loopnest)[i];
1719 /* Linear offset is a bit tricky to handle. Punt on the unhandled
1721 offset = LL_LINEAR_OFFSET (newloop);
1723 gcc_assert (LLE_DENOMINATOR (offset) == 1 &&
1724 lambda_vector_zerop (LLE_COEFFICIENTS (offset), depth));
1726 /* Now build the new lower bounds, and insert the statements
1727 necessary to generate it on the loop preheader. */
1728 newlowerbound = lle_to_gcc_expression (LL_LOWER_BOUND (newloop),
1729 LL_LINEAR_OFFSET (newloop),
1732 invariants, MAX_EXPR, &stmts);
1736 bsi_insert_on_edge (loop_preheader_edge (temp), stmts);
1737 bsi_commit_edge_inserts ();
1739 /* Build the new upper bound and insert its statements in the
1740 basic block of the exit condition */
1741 newupperbound = lle_to_gcc_expression (LL_UPPER_BOUND (newloop),
1742 LL_LINEAR_OFFSET (newloop),
1745 invariants, MIN_EXPR, &stmts);
1746 exit = single_exit (temp);
1747 exitcond = get_loop_exit_condition (temp);
1748 bb = bb_for_stmt (exitcond);
1749 bsi = bsi_after_labels (bb);
1751 bsi_insert_before (&bsi, stmts, BSI_NEW_STMT);
1753 /* Create the new iv. */
1755 standard_iv_increment_position (temp, &bsi, &insert_after);
1756 create_iv (newlowerbound,
1757 build_int_cst (type, LL_STEP (newloop)),
1758 ivvar, temp, &bsi, insert_after, &ivvar,
1761 /* Unfortunately, the incremented ivvar that create_iv inserted may not
1762 dominate the block containing the exit condition.
1763 So we simply create our own incremented iv to use in the new exit
1764 test, and let redundancy elimination sort it out. */
1765 inc_stmt = build2 (PLUS_EXPR, type,
1766 ivvar, build_int_cst (type, LL_STEP (newloop)));
1767 inc_stmt = build_gimple_modify_stmt (SSA_NAME_VAR (ivvar), inc_stmt);
1768 ivvarinced = make_ssa_name (SSA_NAME_VAR (ivvar), inc_stmt);
1769 GIMPLE_STMT_OPERAND (inc_stmt, 0) = ivvarinced;
1770 bsi = bsi_for_stmt (exitcond);
1771 bsi_insert_before (&bsi, inc_stmt, BSI_SAME_STMT);
1773 /* Replace the exit condition with the new upper bound
1776 testtype = LL_STEP (newloop) >= 0 ? LE_EXPR : GE_EXPR;
1778 /* We want to build a conditional where true means exit the loop, and
1779 false means continue the loop.
1780 So swap the testtype if this isn't the way things are.*/
1782 if (exit->flags & EDGE_FALSE_VALUE)
1783 testtype = swap_tree_comparison (testtype);
1785 COND_EXPR_COND (exitcond) = build2 (testtype,
1787 newupperbound, ivvarinced);
1788 update_stmt (exitcond);
1789 VEC_replace (tree, new_ivs, i, ivvar);
1795 /* Rewrite uses of the old ivs so that they are now specified in terms of
1798 for (i = 0; VEC_iterate (tree, old_ivs, i, oldiv); i++)
1800 imm_use_iterator imm_iter;
1801 use_operand_p use_p;
1803 tree oldiv_stmt = SSA_NAME_DEF_STMT (oldiv);
1806 if (TREE_CODE (oldiv_stmt) == PHI_NODE)
1807 oldiv_def = PHI_RESULT (oldiv_stmt);
1809 oldiv_def = SINGLE_SSA_TREE_OPERAND (oldiv_stmt, SSA_OP_DEF);
1810 gcc_assert (oldiv_def != NULL_TREE);
1812 FOR_EACH_IMM_USE_STMT (stmt, imm_iter, oldiv_def)
1815 lambda_body_vector lbv, newlbv;
1817 gcc_assert (TREE_CODE (stmt) != PHI_NODE);
1819 /* Compute the new expression for the induction
1821 depth = VEC_length (tree, new_ivs);
1822 lbv = lambda_body_vector_new (depth);
1823 LBV_COEFFICIENTS (lbv)[i] = 1;
1825 newlbv = lambda_body_vector_compute_new (transform, lbv);
1827 newiv = lbv_to_gcc_expression (newlbv, TREE_TYPE (oldiv),
1831 bsi = bsi_for_stmt (stmt);
1832 bsi_insert_before (&bsi, stmts, BSI_SAME_STMT);
1835 FOR_EACH_IMM_USE_ON_STMT (use_p, imm_iter)
1836 propagate_value (use_p, newiv);
1840 /* Remove the now unused induction variable. */
1841 remove_iv (oldiv_stmt);
1843 VEC_free (tree, heap, new_ivs);
1846 /* Return TRUE if this is not interesting statement from the perspective of
1847 determining if we have a perfect loop nest. */
1850 not_interesting_stmt (tree stmt)
1852 /* Note that COND_EXPR's aren't interesting because if they were exiting the
1853 loop, we would have already failed the number of exits tests. */
1854 if (TREE_CODE (stmt) == LABEL_EXPR
1855 || TREE_CODE (stmt) == GOTO_EXPR
1856 || TREE_CODE (stmt) == COND_EXPR)
1861 /* Return TRUE if PHI uses DEF for it's in-the-loop edge for LOOP. */
1864 phi_loop_edge_uses_def (struct loop *loop, tree phi, tree def)
1867 for (i = 0; i < PHI_NUM_ARGS (phi); i++)
1868 if (flow_bb_inside_loop_p (loop, PHI_ARG_EDGE (phi, i)->src))
1869 if (PHI_ARG_DEF (phi, i) == def)
1874 /* Return TRUE if STMT is a use of PHI_RESULT. */
1877 stmt_uses_phi_result (tree stmt, tree phi_result)
1879 tree use = SINGLE_SSA_TREE_OPERAND (stmt, SSA_OP_USE);
1881 /* This is conservatively true, because we only want SIMPLE bumpers
1882 of the form x +- constant for our pass. */
1883 return (use == phi_result);
1886 /* STMT is a bumper stmt for LOOP if the version it defines is used in the
1887 in-loop-edge in a phi node, and the operand it uses is the result of that
1890 i_3 = PHI (0, i_29); */
1893 stmt_is_bumper_for_loop (struct loop *loop, tree stmt)
1897 imm_use_iterator iter;
1898 use_operand_p use_p;
1900 def = SINGLE_SSA_TREE_OPERAND (stmt, SSA_OP_DEF);
1904 FOR_EACH_IMM_USE_FAST (use_p, iter, def)
1906 use = USE_STMT (use_p);
1907 if (TREE_CODE (use) == PHI_NODE)
1909 if (phi_loop_edge_uses_def (loop, use, def))
1910 if (stmt_uses_phi_result (stmt, PHI_RESULT (use)))
1918 /* Return true if LOOP is a perfect loop nest.
1919 Perfect loop nests are those loop nests where all code occurs in the
1920 innermost loop body.
1921 If S is a program statement, then
1930 is not a perfect loop nest because of S1.
1938 is a perfect loop nest.
1940 Since we don't have high level loops anymore, we basically have to walk our
1941 statements and ignore those that are there because the loop needs them (IE
1942 the induction variable increment, and jump back to the top of the loop). */
1945 perfect_nest_p (struct loop *loop)
1953 bbs = get_loop_body (loop);
1954 exit_cond = get_loop_exit_condition (loop);
1955 for (i = 0; i < loop->num_nodes; i++)
1957 if (bbs[i]->loop_father == loop)
1959 block_stmt_iterator bsi;
1960 for (bsi = bsi_start (bbs[i]); !bsi_end_p (bsi); bsi_next (&bsi))
1962 tree stmt = bsi_stmt (bsi);
1963 if (stmt == exit_cond
1964 || not_interesting_stmt (stmt)
1965 || stmt_is_bumper_for_loop (loop, stmt))
1973 /* See if the inner loops are perfectly nested as well. */
1975 return perfect_nest_p (loop->inner);
1979 /* Replace the USES of X in STMT, or uses with the same step as X with Y.
1980 YINIT is the initial value of Y, REPLACEMENTS is a hash table to
1981 avoid creating duplicate temporaries and FIRSTBSI is statement
1982 iterator where new temporaries should be inserted at the beginning
1983 of body basic block. */
1986 replace_uses_equiv_to_x_with_y (struct loop *loop, tree stmt, tree x,
1987 int xstep, tree y, tree yinit,
1988 htab_t replacements,
1989 block_stmt_iterator *firstbsi)
1992 use_operand_p use_p;
1994 FOR_EACH_SSA_USE_OPERAND (use_p, stmt, iter, SSA_OP_USE)
1996 tree use = USE_FROM_PTR (use_p);
1997 tree step = NULL_TREE;
1998 tree scev, init, val, var, setstmt;
1999 struct tree_map *h, in;
2002 /* Replace uses of X with Y right away. */
2009 scev = instantiate_parameters (loop,
2010 analyze_scalar_evolution (loop, use));
2012 if (scev == NULL || scev == chrec_dont_know)
2015 step = evolution_part_in_loop_num (scev, loop->num);
2017 || step == chrec_dont_know
2018 || TREE_CODE (step) != INTEGER_CST
2019 || int_cst_value (step) != xstep)
2022 /* Use REPLACEMENTS hash table to cache already created
2024 in.hash = htab_hash_pointer (use);
2026 h = (struct tree_map *) htab_find_with_hash (replacements, &in, in.hash);
2029 SET_USE (use_p, h->to);
2033 /* USE which has the same step as X should be replaced
2034 with a temporary set to Y + YINIT - INIT. */
2035 init = initial_condition_in_loop_num (scev, loop->num);
2036 gcc_assert (init != NULL && init != chrec_dont_know);
2037 if (TREE_TYPE (use) == TREE_TYPE (y))
2039 val = fold_build2 (MINUS_EXPR, TREE_TYPE (y), init, yinit);
2040 val = fold_build2 (PLUS_EXPR, TREE_TYPE (y), y, val);
2043 /* If X has the same type as USE, the same step
2044 and same initial value, it can be replaced by Y. */
2051 val = fold_build2 (MINUS_EXPR, TREE_TYPE (y), y, yinit);
2052 val = fold_convert (TREE_TYPE (use), val);
2053 val = fold_build2 (PLUS_EXPR, TREE_TYPE (use), val, init);
2056 /* Create a temporary variable and insert it at the beginning
2057 of the loop body basic block, right after the PHI node
2059 var = create_tmp_var (TREE_TYPE (use), "perfecttmp");
2060 add_referenced_var (var);
2061 val = force_gimple_operand_bsi (firstbsi, val, false, NULL,
2062 true, BSI_SAME_STMT);
2063 setstmt = build_gimple_modify_stmt (var, val);
2064 var = make_ssa_name (var, setstmt);
2065 GIMPLE_STMT_OPERAND (setstmt, 0) = var;
2066 bsi_insert_before (firstbsi, setstmt, BSI_SAME_STMT);
2067 update_stmt (setstmt);
2068 SET_USE (use_p, var);
2069 h = GGC_NEW (struct tree_map);
2073 loc = htab_find_slot_with_hash (replacements, h, in.hash, INSERT);
2074 gcc_assert ((*(struct tree_map **)loc) == NULL);
2075 *(struct tree_map **) loc = h;
2079 /* Return true if STMT is an exit PHI for LOOP */
2082 exit_phi_for_loop_p (struct loop *loop, tree stmt)
2085 if (TREE_CODE (stmt) != PHI_NODE
2086 || PHI_NUM_ARGS (stmt) != 1
2087 || bb_for_stmt (stmt) != single_exit (loop)->dest)
2093 /* Return true if STMT can be put back into the loop INNER, by
2094 copying it to the beginning of that loop and changing the uses. */
2097 can_put_in_inner_loop (struct loop *inner, tree stmt)
2099 imm_use_iterator imm_iter;
2100 use_operand_p use_p;
2102 gcc_assert (TREE_CODE (stmt) == GIMPLE_MODIFY_STMT);
2103 if (!ZERO_SSA_OPERANDS (stmt, SSA_OP_ALL_VIRTUALS)
2104 || !expr_invariant_in_loop_p (inner, GIMPLE_STMT_OPERAND (stmt, 1)))
2107 FOR_EACH_IMM_USE_FAST (use_p, imm_iter, GIMPLE_STMT_OPERAND (stmt, 0))
2109 if (!exit_phi_for_loop_p (inner, USE_STMT (use_p)))
2111 basic_block immbb = bb_for_stmt (USE_STMT (use_p));
2113 if (!flow_bb_inside_loop_p (inner, immbb))
2120 /* Return true if STMT can be put *after* the inner loop of LOOP. */
2122 can_put_after_inner_loop (struct loop *loop, tree stmt)
2124 imm_use_iterator imm_iter;
2125 use_operand_p use_p;
2127 if (!ZERO_SSA_OPERANDS (stmt, SSA_OP_ALL_VIRTUALS))
2130 FOR_EACH_IMM_USE_FAST (use_p, imm_iter, GIMPLE_STMT_OPERAND (stmt, 0))
2132 if (!exit_phi_for_loop_p (loop, USE_STMT (use_p)))
2134 basic_block immbb = bb_for_stmt (USE_STMT (use_p));
2136 if (!dominated_by_p (CDI_DOMINATORS,
2138 loop->inner->header)
2139 && !can_put_in_inner_loop (loop->inner, stmt))
2148 /* Return TRUE if LOOP is an imperfect nest that we can convert to a
2149 perfect one. At the moment, we only handle imperfect nests of
2150 depth 2, where all of the statements occur after the inner loop. */
2153 can_convert_to_perfect_nest (struct loop *loop)
2156 tree exit_condition, phi;
2158 block_stmt_iterator bsi;
2159 basic_block exitdest;
2161 /* Can't handle triply nested+ loops yet. */
2162 if (!loop->inner || loop->inner->inner)
2165 bbs = get_loop_body (loop);
2166 exit_condition = get_loop_exit_condition (loop);
2167 for (i = 0; i < loop->num_nodes; i++)
2169 if (bbs[i]->loop_father == loop)
2171 for (bsi = bsi_start (bbs[i]); !bsi_end_p (bsi); bsi_next (&bsi))
2173 tree stmt = bsi_stmt (bsi);
2175 if (stmt == exit_condition
2176 || not_interesting_stmt (stmt)
2177 || stmt_is_bumper_for_loop (loop, stmt))
2180 /* If this is a scalar operation that can be put back
2181 into the inner loop, or after the inner loop, through
2182 copying, then do so. This works on the theory that
2183 any amount of scalar code we have to reduplicate
2184 into or after the loops is less expensive that the
2185 win we get from rearranging the memory walk
2186 the loop is doing so that it has better
2188 if (TREE_CODE (stmt) == GIMPLE_MODIFY_STMT)
2190 use_operand_p use_a, use_b;
2191 imm_use_iterator imm_iter;
2192 ssa_op_iter op_iter, op_iter1;
2193 tree op0 = GIMPLE_STMT_OPERAND (stmt, 0);
2194 tree scev = instantiate_parameters
2195 (loop, analyze_scalar_evolution (loop, op0));
2197 /* If the IV is simple, it can be duplicated. */
2198 if (!automatically_generated_chrec_p (scev))
2200 tree step = evolution_part_in_loop_num (scev, loop->num);
2201 if (step && step != chrec_dont_know
2202 && TREE_CODE (step) == INTEGER_CST)
2206 /* The statement should not define a variable used
2207 in the inner loop. */
2208 if (TREE_CODE (op0) == SSA_NAME)
2209 FOR_EACH_IMM_USE_FAST (use_a, imm_iter, op0)
2210 if (bb_for_stmt (USE_STMT (use_a))->loop_father
2214 FOR_EACH_SSA_USE_OPERAND (use_a, stmt, op_iter, SSA_OP_USE)
2216 tree node, op = USE_FROM_PTR (use_a);
2218 /* The variables should not be used in both loops. */
2219 FOR_EACH_IMM_USE_FAST (use_b, imm_iter, op)
2220 if (bb_for_stmt (USE_STMT (use_b))->loop_father
2224 /* The statement should not use the value of a
2225 scalar that was modified in the loop. */
2226 node = SSA_NAME_DEF_STMT (op);
2227 if (TREE_CODE (node) == PHI_NODE)
2228 FOR_EACH_PHI_ARG (use_b, node, op_iter1, SSA_OP_USE)
2230 tree arg = USE_FROM_PTR (use_b);
2232 if (TREE_CODE (arg) == SSA_NAME)
2234 tree arg_stmt = SSA_NAME_DEF_STMT (arg);
2236 if (bb_for_stmt (arg_stmt)
2237 && (bb_for_stmt (arg_stmt)->loop_father
2244 if (can_put_in_inner_loop (loop->inner, stmt)
2245 || can_put_after_inner_loop (loop, stmt))
2249 /* Otherwise, if the bb of a statement we care about isn't
2250 dominated by the header of the inner loop, then we can't
2251 handle this case right now. This test ensures that the
2252 statement comes completely *after* the inner loop. */
2253 if (!dominated_by_p (CDI_DOMINATORS,
2255 loop->inner->header))
2261 /* We also need to make sure the loop exit only has simple copy phis in it,
2262 otherwise we don't know how to transform it into a perfect nest right
2264 exitdest = single_exit (loop)->dest;
2266 for (phi = phi_nodes (exitdest); phi; phi = PHI_CHAIN (phi))
2267 if (PHI_NUM_ARGS (phi) != 1)
2278 /* Transform the loop nest into a perfect nest, if possible.
2279 LOOP is the loop nest to transform into a perfect nest
2280 LBOUNDS are the lower bounds for the loops to transform
2281 UBOUNDS are the upper bounds for the loops to transform
2282 STEPS is the STEPS for the loops to transform.
2283 LOOPIVS is the induction variables for the loops to transform.
2285 Basically, for the case of
2287 FOR (i = 0; i < 50; i++)
2289 FOR (j =0; j < 50; j++)
2296 This function will transform it into a perfect loop nest by splitting the
2297 outer loop into two loops, like so:
2299 FOR (i = 0; i < 50; i++)
2301 FOR (j = 0; j < 50; j++)
2307 FOR (i = 0; i < 50; i ++)
2312 Return FALSE if we can't make this loop into a perfect nest. */
2315 perfect_nestify (struct loop *loop,
2316 VEC(tree,heap) *lbounds,
2317 VEC(tree,heap) *ubounds,
2318 VEC(int,heap) *steps,
2319 VEC(tree,heap) *loopivs)
2322 tree exit_condition;
2324 basic_block preheaderbb, headerbb, bodybb, latchbb, olddest;
2326 block_stmt_iterator bsi, firstbsi;
2329 struct loop *newloop;
2333 tree oldivvar, ivvar, ivvarinced;
2334 VEC(tree,heap) *phis = NULL;
2335 htab_t replacements = NULL;
2337 /* Create the new loop. */
2338 olddest = single_exit (loop)->dest;
2339 preheaderbb = split_edge (single_exit (loop));
2340 headerbb = create_empty_bb (EXIT_BLOCK_PTR->prev_bb);
2342 /* Push the exit phi nodes that we are moving. */
2343 for (phi = phi_nodes (olddest); phi; phi = PHI_CHAIN (phi))
2345 VEC_reserve (tree, heap, phis, 2);
2346 VEC_quick_push (tree, phis, PHI_RESULT (phi));
2347 VEC_quick_push (tree, phis, PHI_ARG_DEF (phi, 0));
2349 e = redirect_edge_and_branch (single_succ_edge (preheaderbb), headerbb);
2351 /* Remove the exit phis from the old basic block. */
2352 while (phi_nodes (olddest) != NULL)
2353 remove_phi_node (phi_nodes (olddest), NULL, false);
2355 /* and add them back to the new basic block. */
2356 while (VEC_length (tree, phis) != 0)
2360 def = VEC_pop (tree, phis);
2361 phiname = VEC_pop (tree, phis);
2362 phi = create_phi_node (phiname, preheaderbb);
2363 add_phi_arg (phi, def, single_pred_edge (preheaderbb));
2365 flush_pending_stmts (e);
2366 VEC_free (tree, heap, phis);
2368 bodybb = create_empty_bb (EXIT_BLOCK_PTR->prev_bb);
2369 latchbb = create_empty_bb (EXIT_BLOCK_PTR->prev_bb);
2370 make_edge (headerbb, bodybb, EDGE_FALLTHRU);
2371 cond_stmt = build3 (COND_EXPR, void_type_node,
2372 build2 (NE_EXPR, boolean_type_node,
2375 NULL_TREE, NULL_TREE);
2376 bsi = bsi_start (bodybb);
2377 bsi_insert_after (&bsi, cond_stmt, BSI_NEW_STMT);
2378 e = make_edge (bodybb, olddest, EDGE_FALSE_VALUE);
2379 make_edge (bodybb, latchbb, EDGE_TRUE_VALUE);
2380 make_edge (latchbb, headerbb, EDGE_FALLTHRU);
2382 /* Update the loop structures. */
2383 newloop = duplicate_loop (loop, olddest->loop_father);
2384 newloop->header = headerbb;
2385 newloop->latch = latchbb;
2386 add_bb_to_loop (latchbb, newloop);
2387 add_bb_to_loop (bodybb, newloop);
2388 add_bb_to_loop (headerbb, newloop);
2389 set_immediate_dominator (CDI_DOMINATORS, bodybb, headerbb);
2390 set_immediate_dominator (CDI_DOMINATORS, headerbb, preheaderbb);
2391 set_immediate_dominator (CDI_DOMINATORS, preheaderbb,
2392 single_exit (loop)->src);
2393 set_immediate_dominator (CDI_DOMINATORS, latchbb, bodybb);
2394 set_immediate_dominator (CDI_DOMINATORS, olddest,
2395 recompute_dominator (CDI_DOMINATORS, olddest));
2396 /* Create the new iv. */
2397 oldivvar = VEC_index (tree, loopivs, 0);
2398 ivvar = create_tmp_var (TREE_TYPE (oldivvar), "perfectiv");
2399 add_referenced_var (ivvar);
2400 standard_iv_increment_position (newloop, &bsi, &insert_after);
2401 create_iv (VEC_index (tree, lbounds, 0),
2402 build_int_cst (TREE_TYPE (oldivvar), VEC_index (int, steps, 0)),
2403 ivvar, newloop, &bsi, insert_after, &ivvar, &ivvarinced);
2405 /* Create the new upper bound. This may be not just a variable, so we copy
2406 it to one just in case. */
2408 exit_condition = get_loop_exit_condition (newloop);
2409 uboundvar = create_tmp_var (integer_type_node, "uboundvar");
2410 add_referenced_var (uboundvar);
2411 stmt = build_gimple_modify_stmt (uboundvar, VEC_index (tree, ubounds, 0));
2412 uboundvar = make_ssa_name (uboundvar, stmt);
2413 GIMPLE_STMT_OPERAND (stmt, 0) = uboundvar;
2416 bsi_insert_after (&bsi, stmt, BSI_SAME_STMT);
2418 bsi_insert_before (&bsi, stmt, BSI_SAME_STMT);
2420 COND_EXPR_COND (exit_condition) = build2 (GE_EXPR,
2424 update_stmt (exit_condition);
2425 replacements = htab_create_ggc (20, tree_map_hash,
2427 bbs = get_loop_body_in_dom_order (loop);
2428 /* Now move the statements, and replace the induction variable in the moved
2429 statements with the correct loop induction variable. */
2430 oldivvar = VEC_index (tree, loopivs, 0);
2431 firstbsi = bsi_start (bodybb);
2432 for (i = loop->num_nodes - 1; i >= 0 ; i--)
2434 block_stmt_iterator tobsi = bsi_last (bodybb);
2435 if (bbs[i]->loop_father == loop)
2437 /* If this is true, we are *before* the inner loop.
2438 If this isn't true, we are *after* it.
2440 The only time can_convert_to_perfect_nest returns true when we
2441 have statements before the inner loop is if they can be moved
2442 into the inner loop.
2444 The only time can_convert_to_perfect_nest returns true when we
2445 have statements after the inner loop is if they can be moved into
2446 the new split loop. */
2448 if (dominated_by_p (CDI_DOMINATORS, loop->inner->header, bbs[i]))
2450 block_stmt_iterator header_bsi
2451 = bsi_after_labels (loop->inner->header);
2453 for (bsi = bsi_start (bbs[i]); !bsi_end_p (bsi);)
2455 tree stmt = bsi_stmt (bsi);
2457 if (stmt == exit_condition
2458 || not_interesting_stmt (stmt)
2459 || stmt_is_bumper_for_loop (loop, stmt))
2465 bsi_move_before (&bsi, &header_bsi);
2470 /* Note that the bsi only needs to be explicitly incremented
2471 when we don't move something, since it is automatically
2472 incremented when we do. */
2473 for (bsi = bsi_start (bbs[i]); !bsi_end_p (bsi);)
2476 tree n, stmt = bsi_stmt (bsi);
2478 if (stmt == exit_condition
2479 || not_interesting_stmt (stmt)
2480 || stmt_is_bumper_for_loop (loop, stmt))
2486 replace_uses_equiv_to_x_with_y
2487 (loop, stmt, oldivvar, VEC_index (int, steps, 0), ivvar,
2488 VEC_index (tree, lbounds, 0), replacements, &firstbsi);
2490 bsi_move_before (&bsi, &tobsi);
2492 /* If the statement has any virtual operands, they may
2493 need to be rewired because the original loop may
2494 still reference them. */
2495 FOR_EACH_SSA_TREE_OPERAND (n, stmt, i, SSA_OP_ALL_VIRTUALS)
2496 mark_sym_for_renaming (SSA_NAME_VAR (n));
2504 htab_delete (replacements);
2505 return perfect_nest_p (loop);
2508 /* Return true if TRANS is a legal transformation matrix that respects
2509 the dependence vectors in DISTS and DIRS. The conservative answer
2512 "Wolfe proves that a unimodular transformation represented by the
2513 matrix T is legal when applied to a loop nest with a set of
2514 lexicographically non-negative distance vectors RDG if and only if
2515 for each vector d in RDG, (T.d >= 0) is lexicographically positive.
2516 i.e.: if and only if it transforms the lexicographically positive
2517 distance vectors to lexicographically positive vectors. Note that
2518 a unimodular matrix must transform the zero vector (and only it) to
2519 the zero vector." S.Muchnick. */
2522 lambda_transform_legal_p (lambda_trans_matrix trans,
2524 VEC (ddr_p, heap) *dependence_relations)
2527 lambda_vector distres;
2528 struct data_dependence_relation *ddr;
2530 gcc_assert (LTM_COLSIZE (trans) == nb_loops
2531 && LTM_ROWSIZE (trans) == nb_loops);
2533 /* When there is an unknown relation in the dependence_relations, we
2534 know that it is no worth looking at this loop nest: give up. */
2535 ddr = VEC_index (ddr_p, dependence_relations, 0);
2538 if (DDR_ARE_DEPENDENT (ddr) == chrec_dont_know)
2541 distres = lambda_vector_new (nb_loops);
2543 /* For each distance vector in the dependence graph. */
2544 for (i = 0; VEC_iterate (ddr_p, dependence_relations, i, ddr); i++)
2546 /* Don't care about relations for which we know that there is no
2547 dependence, nor about read-read (aka. output-dependences):
2548 these data accesses can happen in any order. */
2549 if (DDR_ARE_DEPENDENT (ddr) == chrec_known
2550 || (DR_IS_READ (DDR_A (ddr)) && DR_IS_READ (DDR_B (ddr))))
2553 /* Conservatively answer: "this transformation is not valid". */
2554 if (DDR_ARE_DEPENDENT (ddr) == chrec_dont_know)
2557 /* If the dependence could not be captured by a distance vector,
2558 conservatively answer that the transform is not valid. */
2559 if (DDR_NUM_DIST_VECTS (ddr) == 0)
2562 /* Compute trans.dist_vect */
2563 for (j = 0; j < DDR_NUM_DIST_VECTS (ddr); j++)
2565 lambda_matrix_vector_mult (LTM_MATRIX (trans), nb_loops, nb_loops,
2566 DDR_DIST_VECT (ddr, j), distres);
2568 if (!lambda_vector_lexico_pos (distres, nb_loops))