/* Returns true iff A divides B. */
static inline bool
-tree_fold_divides_p (tree a, tree b)
+tree_fold_divides_p (const_tree a, const_tree b)
{
gcc_assert (TREE_CODE (a) == INTEGER_CST);
gcc_assert (TREE_CODE (b) == INTEGER_CST);
/* Expresses EXP as VAR + OFF, where off is a constant. The type of OFF
will be ssizetype. */
-static void
+void
split_constant_offset (tree exp, tree *var, tree *off)
{
tree type = TREE_TYPE (exp), otype;
{
split_constant_offset (poffset, &poffset, &off1);
off0 = size_binop (PLUS_EXPR, off0, off1);
- base = fold_build2 (PLUS_EXPR, TREE_TYPE (base),
- base,
- fold_convert (TREE_TYPE (base), poffset));
+ if (POINTER_TYPE_P (TREE_TYPE (base)))
+ base = fold_build2 (POINTER_PLUS_EXPR, TREE_TYPE (base),
+ base, fold_convert (sizetype, poffset));
+ else
+ base = fold_build2 (PLUS_EXPR, TREE_TYPE (base), base,
+ fold_convert (TREE_TYPE (base), poffset));
}
- *var = fold_convert (type, base);
+ var0 = fold_convert (type, base);
+
+ /* If variable length types are involved, punt, otherwise casts
+ might be converted into ARRAY_REFs in gimplify_conversion.
+ To compute that ARRAY_REF's element size TYPE_SIZE_UNIT, which
+ possibly no longer appears in current GIMPLE, might resurface.
+ This perhaps could run
+ if (TREE_CODE (var0) == NOP_EXPR
+ || TREE_CODE (var0) == CONVERT_EXPR)
+ {
+ gimplify_conversion (&var0);
+ // Attempt to fill in any within var0 found ARRAY_REF's
+ // element size from corresponding op embedded ARRAY_REF,
+ // if unsuccessful, just punt.
+ } */
+ while (POINTER_TYPE_P (type))
+ type = TREE_TYPE (type);
+ if (int_size_in_bytes (type) < 0)
+ break;
+
+ *var = var0;
*off = off0;
return;
}
+ case SSA_NAME:
+ {
+ tree def_stmt = SSA_NAME_DEF_STMT (exp);
+ if (TREE_CODE (def_stmt) == GIMPLE_MODIFY_STMT)
+ {
+ tree def_stmt_rhs = GIMPLE_STMT_OPERAND (def_stmt, 1);
+
+ if (!TREE_SIDE_EFFECTS (def_stmt_rhs)
+ && EXPR_P (def_stmt_rhs)
+ && !REFERENCE_CLASS_P (def_stmt_rhs)
+ && !get_call_expr_in (def_stmt_rhs))
+ {
+ split_constant_offset (def_stmt_rhs, &var0, &off0);
+ var0 = fold_convert (type, var0);
+ *var = var0;
+ *off = off0;
+ return;
+ }
+ }
+ break;
+ }
+
default:
break;
}
&& affine_function_constant_p (fn));
}
+/* Returns a signed integer type with the largest precision from TA
+ and TB. */
+
+static tree
+signed_type_for_types (tree ta, tree tb)
+{
+ if (TYPE_PRECISION (ta) > TYPE_PRECISION (tb))
+ return signed_type_for (ta);
+ else
+ return signed_type_for (tb);
+}
+
/* Applies operation OP on affine functions FNA and FNB, and returns the
result. */
ret = VEC_alloc (tree, heap, m);
for (i = 0; i < n; i++)
- VEC_quick_push (tree, ret,
- fold_build2 (op, integer_type_node,
- VEC_index (tree, fna, i),
- VEC_index (tree, fnb, i)));
+ {
+ tree type = signed_type_for_types (TREE_TYPE (VEC_index (tree, fna, i)),
+ TREE_TYPE (VEC_index (tree, fnb, i)));
+
+ VEC_quick_push (tree, ret,
+ fold_build2 (op, type,
+ VEC_index (tree, fna, i),
+ VEC_index (tree, fnb, i)));
+ }
for (; VEC_iterate (tree, fna, i, coef); i++)
VEC_quick_push (tree, ret,
- fold_build2 (op, integer_type_node,
+ fold_build2 (op, signed_type_for (TREE_TYPE (coef)),
coef, integer_zero_node));
for (; VEC_iterate (tree, fnb, i, coef); i++)
VEC_quick_push (tree, ret,
- fold_build2 (op, integer_type_node,
+ fold_build2 (op, signed_type_for (TREE_TYPE (coef)),
integer_zero_node, coef));
return ret;
/* Returns true if the address of OBJ is invariant in LOOP. */
static bool
-object_address_invariant_in_loop_p (struct loop *loop, tree obj)
+object_address_invariant_in_loop_p (const struct loop *loop, const_tree obj)
{
while (handled_component_p (obj))
{
true otherwise. */
static bool
-dr_may_alias_p (struct data_reference *a, struct data_reference *b)
+dr_may_alias_p (const struct data_reference *a, const struct data_reference *b)
{
- tree addr_a = DR_BASE_ADDRESS (a);
- tree addr_b = DR_BASE_ADDRESS (b);
- tree type_a, type_b;
- tree decl_a = NULL_TREE, decl_b = NULL_TREE;
+ const_tree addr_a = DR_BASE_ADDRESS (a);
+ const_tree addr_b = DR_BASE_ADDRESS (b);
+ const_tree type_a, type_b;
+ const_tree decl_a = NULL_TREE, decl_b = NULL_TREE;
/* If the sets of virtual operands are disjoint, the memory references do not
alias. */
DDR_B (res) = b;
DDR_LOOP_NEST (res) = NULL;
DDR_REVERSED_P (res) = false;
+ DDR_SUBSCRIPTS (res) = NULL;
+ DDR_DIR_VECTS (res) = NULL;
+ DDR_DIST_VECTS (res) = NULL;
if (a == NULL || b == NULL)
{
DDR_SUBSCRIPTS (res) = VEC_alloc (subscript_p, heap, DR_NUM_DIMENSIONS (a));
DDR_LOOP_NEST (res) = loop_nest;
DDR_INNER_LOOP (res) = 0;
- DDR_DIR_VECTS (res) = NULL;
- DDR_DIST_VECTS (res) = NULL;
for (i = 0; i < DR_NUM_DIMENSIONS (a); i++)
{
DDR_ARE_DEPENDENT (ddr) = chrec;
free_subscripts (DDR_SUBSCRIPTS (ddr));
+ DDR_SUBSCRIPTS (ddr) = NULL;
}
/* The dependence relation DDR cannot be represented by a distance
variables, i.e., if the ZIV (Zero Index Variable) test is true. */
static inline bool
-ziv_subscript_p (tree chrec_a,
- tree chrec_b)
+ziv_subscript_p (const_tree chrec_a, const_tree chrec_b)
{
return (evolution_function_is_constant_p (chrec_a)
&& evolution_function_is_constant_p (chrec_b));
variable, i.e., if the SIV (Single Index Variable) test is true. */
static bool
-siv_subscript_p (tree chrec_a,
- tree chrec_b)
+siv_subscript_p (const_tree chrec_a, const_tree chrec_b)
{
if ((evolution_function_is_constant_p (chrec_a)
&& evolution_function_is_univariate_p (chrec_b))
conflict_function **overlaps_b,
tree *last_conflicts)
{
- tree difference;
+ tree type, difference;
dependence_stats.num_ziv++;
if (dump_file && (dump_flags & TDF_DETAILS))
fprintf (dump_file, "(analyze_ziv_subscript \n");
-
- chrec_a = chrec_convert (integer_type_node, chrec_a, NULL_TREE);
- chrec_b = chrec_convert (integer_type_node, chrec_b, NULL_TREE);
- difference = chrec_fold_minus (integer_type_node, chrec_a, chrec_b);
+
+ type = signed_type_for_types (TREE_TYPE (chrec_a), TREE_TYPE (chrec_b));
+ chrec_a = chrec_convert (type, chrec_a, NULL_TREE);
+ chrec_b = chrec_convert (type, chrec_b, NULL_TREE);
+ difference = chrec_fold_minus (type, chrec_a, chrec_b);
switch (TREE_CODE (difference))
{
tree *last_conflicts)
{
bool value0, value1, value2;
- tree difference, tmp;
+ tree type, difference, tmp;
- chrec_a = chrec_convert (integer_type_node, chrec_a, NULL_TREE);
- chrec_b = chrec_convert (integer_type_node, chrec_b, NULL_TREE);
- difference = chrec_fold_minus
- (integer_type_node, initial_condition (chrec_b), chrec_a);
+ type = signed_type_for_types (TREE_TYPE (chrec_a), TREE_TYPE (chrec_b));
+ chrec_a = chrec_convert (type, chrec_a, NULL_TREE);
+ chrec_b = chrec_convert (type, chrec_b, NULL_TREE);
+ difference = chrec_fold_minus (type, initial_condition (chrec_b), chrec_a);
if (!chrec_is_positive (initial_condition (difference), &value0))
{
struct loop *loop = get_chrec_loop (chrec_b);
*overlaps_a = conflict_fn (1, affine_fn_cst (integer_zero_node));
- tmp = fold_build2 (EXACT_DIV_EXPR, integer_type_node,
- fold_build1 (ABS_EXPR,
- integer_type_node,
- difference),
+ tmp = fold_build2 (EXACT_DIV_EXPR, type,
+ fold_build1 (ABS_EXPR, type, difference),
CHREC_RIGHT (chrec_b));
*overlaps_b = conflict_fn (1, affine_fn_cst (tmp));
*last_conflicts = integer_one_node;
struct loop *loop = get_chrec_loop (chrec_b);
*overlaps_a = conflict_fn (1, affine_fn_cst (integer_zero_node));
- tmp = fold_build2 (EXACT_DIV_EXPR,
- integer_type_node, difference,
+ tmp = fold_build2 (EXACT_DIV_EXPR, type, difference,
CHREC_RIGHT (chrec_b));
*overlaps_b = conflict_fn (1, affine_fn_cst (tmp));
*last_conflicts = integer_one_node;
/* Helper recursive function for initializing the matrix A. Returns
the initial value of CHREC. */
-static int
+static HOST_WIDE_INT
initialize_matrix_A (lambda_matrix A, tree chrec, unsigned index, int mult)
{
gcc_assert (chrec);
step_overlaps_a = step_b / gcd_steps_a_b;
step_overlaps_b = step_a / gcd_steps_a_b;
- tau2 = FLOOR_DIV (niter, step_overlaps_a);
- tau2 = MIN (tau2, FLOOR_DIV (niter, step_overlaps_b));
- last_conflict = tau2;
+ if (niter > 0)
+ {
+ tau2 = FLOOR_DIV (niter, step_overlaps_a);
+ tau2 = MIN (tau2, FLOOR_DIV (niter, step_overlaps_b));
+ last_conflict = tau2;
+ *last_conflicts = build_int_cst (NULL_TREE, last_conflict);
+ }
+ else
+ *last_conflicts = chrec_dont_know;
*overlaps_a = affine_fn_univar (integer_zero_node, dim,
build_int_cst (NULL_TREE,
*overlaps_b = affine_fn_univar (integer_zero_node, dim,
build_int_cst (NULL_TREE,
step_overlaps_b));
- *last_conflicts = build_int_cst (NULL_TREE, last_conflict);
}
else
{
unsigned nb_vars_a, nb_vars_b, dim;
HOST_WIDE_INT init_a, init_b, gamma, gcd_alpha_beta;
- HOST_WIDE_INT tau1, tau2;
lambda_matrix A, U, S;
if (eq_evolutions_p (chrec_a, chrec_b))
false);
niter_b = estimated_loop_iterations_int (get_chrec_loop (chrec_b),
false);
- if (niter_a < 0 || niter_b < 0)
- {
- if (dump_file && (dump_flags & TDF_DETAILS))
- fprintf (dump_file, "affine-affine test failed: missing iteration counts.\n");
- *overlaps_a = conflict_fn_not_known ();
- *overlaps_b = conflict_fn_not_known ();
- *last_conflicts = chrec_dont_know;
- goto end_analyze_subs_aa;
- }
-
niter = MIN (niter_a, niter_b);
-
step_a = int_cst_value (CHREC_RIGHT (chrec_a));
step_b = int_cst_value (CHREC_RIGHT (chrec_b));
| x0 = i0 + i1 * t,
| y0 = j0 + j1 * t. */
-
- HOST_WIDE_INT i0, j0, i1, j1;
-
- /* X0 and Y0 are the first iterations for which there is a
- dependence. X0, Y0 are two solutions of the Diophantine
- equation: chrec_a (X0) = chrec_b (Y0). */
- HOST_WIDE_INT x0, y0;
- HOST_WIDE_INT niter, niter_a, niter_b;
-
- niter_a = estimated_loop_iterations_int (get_chrec_loop (chrec_a),
- false);
- niter_b = estimated_loop_iterations_int (get_chrec_loop (chrec_b),
- false);
-
- if (niter_a < 0 || niter_b < 0)
- {
- if (dump_file && (dump_flags & TDF_DETAILS))
- fprintf (dump_file, "affine-affine test failed: missing iteration counts.\n");
- *overlaps_a = conflict_fn_not_known ();
- *overlaps_b = conflict_fn_not_known ();
- *last_conflicts = chrec_dont_know;
- goto end_analyze_subs_aa;
- }
-
- niter = MIN (niter_a, niter_b);
+ HOST_WIDE_INT i0, j0, i1, j1;
i0 = U[0][0] * gamma / gcd_alpha_beta;
j0 = U[0][1] * gamma / gcd_alpha_beta;
*overlaps_a = conflict_fn_no_dependence ();
*overlaps_b = conflict_fn_no_dependence ();
*last_conflicts = integer_zero_node;
+ goto end_analyze_subs_aa;
}
- else
+ if (i1 > 0 && j1 > 0)
{
- if (i1 > 0)
+ HOST_WIDE_INT niter_a = estimated_loop_iterations_int
+ (get_chrec_loop (chrec_a), false);
+ HOST_WIDE_INT niter_b = estimated_loop_iterations_int
+ (get_chrec_loop (chrec_b), false);
+ HOST_WIDE_INT niter = MIN (niter_a, niter_b);
+
+ /* (X0, Y0) is a solution of the Diophantine equation:
+ "chrec_a (X0) = chrec_b (Y0)". */
+ HOST_WIDE_INT tau1 = MAX (CEIL (-i0, i1),
+ CEIL (-j0, j1));
+ HOST_WIDE_INT x0 = i1 * tau1 + i0;
+ HOST_WIDE_INT y0 = j1 * tau1 + j0;
+
+ /* (X1, Y1) is the smallest positive solution of the eq
+ "chrec_a (X1) = chrec_b (Y1)", i.e. this is where the
+ first conflict occurs. */
+ HOST_WIDE_INT min_multiple = MIN (x0 / i1, y0 / j1);
+ HOST_WIDE_INT x1 = x0 - i1 * min_multiple;
+ HOST_WIDE_INT y1 = y0 - j1 * min_multiple;
+
+ if (niter > 0)
{
- tau1 = CEIL (-i0, i1);
- tau2 = FLOOR_DIV (niter - i0, i1);
+ HOST_WIDE_INT tau2 = MIN (FLOOR_DIV (niter - i0, i1),
+ FLOOR_DIV (niter - j0, j1));
+ HOST_WIDE_INT last_conflict = tau2 - (x1 - i0)/i1;
- if (j1 > 0)
+ /* If the overlap occurs outside of the bounds of the
+ loop, there is no dependence. */
+ if (x1 > niter || y1 > niter)
{
- int last_conflict, min_multiple;
- tau1 = MAX (tau1, CEIL (-j0, j1));
- tau2 = MIN (tau2, FLOOR_DIV (niter - j0, j1));
-
- x0 = i1 * tau1 + i0;
- y0 = j1 * tau1 + j0;
-
- /* At this point (x0, y0) is one of the
- solutions to the Diophantine equation. The
- next step has to compute the smallest
- positive solution: the first conflicts. */
- min_multiple = MIN (x0 / i1, y0 / j1);
- x0 -= i1 * min_multiple;
- y0 -= j1 * min_multiple;
-
- tau1 = (x0 - i0)/i1;
- last_conflict = tau2 - tau1;
-
- /* If the overlap occurs outside of the bounds of the
- loop, there is no dependence. */
- if (x0 > niter || y0 > niter)
- {
- *overlaps_a = conflict_fn_no_dependence ();
- *overlaps_b = conflict_fn_no_dependence ();
- *last_conflicts = integer_zero_node;
- }
- else
- {
- *overlaps_a
- = conflict_fn (1,
- affine_fn_univar (build_int_cst (NULL_TREE, x0),
- 1,
- build_int_cst (NULL_TREE, i1)));
- *overlaps_b
- = conflict_fn (1,
- affine_fn_univar (build_int_cst (NULL_TREE, y0),
- 1,
- build_int_cst (NULL_TREE, j1)));
- *last_conflicts = build_int_cst (NULL_TREE, last_conflict);
- }
+ *overlaps_a = conflict_fn_no_dependence ();
+ *overlaps_b = conflict_fn_no_dependence ();
+ *last_conflicts = integer_zero_node;
+ goto end_analyze_subs_aa;
}
else
- {
- /* FIXME: For the moment, the upper bound of the
- iteration domain for j is not checked. */
- if (dump_file && (dump_flags & TDF_DETAILS))
- fprintf (dump_file, "affine-affine test failed: unimplemented.\n");
- *overlaps_a = conflict_fn_not_known ();
- *overlaps_b = conflict_fn_not_known ();
- *last_conflicts = chrec_dont_know;
- }
+ *last_conflicts = build_int_cst (NULL_TREE, last_conflict);
}
-
else
- {
- /* FIXME: For the moment, the upper bound of the
- iteration domain for i is not checked. */
- if (dump_file && (dump_flags & TDF_DETAILS))
- fprintf (dump_file, "affine-affine test failed: unimplemented.\n");
- *overlaps_a = conflict_fn_not_known ();
- *overlaps_b = conflict_fn_not_known ();
- *last_conflicts = chrec_dont_know;
- }
+ *last_conflicts = chrec_dont_know;
+
+ *overlaps_a
+ = conflict_fn (1,
+ affine_fn_univar (build_int_cst (NULL_TREE, x1),
+ 1,
+ build_int_cst (NULL_TREE, i1)));
+ *overlaps_b
+ = conflict_fn (1,
+ affine_fn_univar (build_int_cst (NULL_TREE, y1),
+ 1,
+ build_int_cst (NULL_TREE, j1)));
+ }
+ else
+ {
+ /* FIXME: For the moment, the upper bound of the
+ iteration domain for i and j is not checked. */
+ if (dump_file && (dump_flags & TDF_DETAILS))
+ fprintf (dump_file, "affine-affine test failed: unimplemented.\n");
+ *overlaps_a = conflict_fn_not_known ();
+ *overlaps_b = conflict_fn_not_known ();
+ *last_conflicts = chrec_dont_know;
}
}
else
*last_conflicts = chrec_dont_know;
}
}
-
else
{
if (dump_file && (dump_flags & TDF_DETAILS))
of CHREC does not divide CST, false otherwise. */
static bool
-gcd_of_steps_may_divide_p (tree chrec, tree cst)
+gcd_of_steps_may_divide_p (const_tree chrec, const_tree cst)
{
HOST_WIDE_INT cd = 0, val;
tree step;
variables. In the MIV case we have to solve a Diophantine
equation with 2*n variables (if the subscript uses n IVs).
*/
- tree difference;
+ tree type, difference;
+
dependence_stats.num_miv++;
if (dump_file && (dump_flags & TDF_DETAILS))
fprintf (dump_file, "(analyze_miv_subscript \n");
- chrec_a = chrec_convert (integer_type_node, chrec_a, NULL_TREE);
- chrec_b = chrec_convert (integer_type_node, chrec_b, NULL_TREE);
- difference = chrec_fold_minus (integer_type_node, chrec_a, chrec_b);
+ type = signed_type_for_types (TREE_TYPE (chrec_a), TREE_TYPE (chrec_b));
+ chrec_a = chrec_convert (type, chrec_a, NULL_TREE);
+ chrec_b = chrec_convert (type, chrec_b, NULL_TREE);
+ difference = chrec_fold_minus (type, chrec_a, chrec_b);
if (eq_evolutions_p (chrec_a, chrec_b))
{
same access functions. */
static bool
-same_access_functions (struct data_dependence_relation *ddr)
+same_access_functions (const struct data_dependence_relation *ddr)
{
unsigned i;
/* Return true when the DDR contains only constant access functions. */
static bool
-constant_access_functions (struct data_dependence_relation *ddr)
+constant_access_functions (const struct data_dependence_relation *ddr)
{
unsigned i;
return true;
}
-
/* Helper function for the case where DDR_A and DDR_B are the same
- multivariate access function. */
+ multivariate access function with a constant step. For an example
+ see pr34635-1.c. */
static void
add_multivariate_self_dist (struct data_dependence_relation *ddr, tree c_2)
return;
}
- add_multivariate_self_dist (ddr, DR_ACCESS_FN (DDR_A (ddr), 0));
+ access_fun = DR_ACCESS_FN (DDR_A (ddr), 0);
+
+ if (TREE_CODE (CHREC_LEFT (access_fun)) == POLYNOMIAL_CHREC)
+ add_multivariate_self_dist (ddr, access_fun);
+ else
+ /* The evolution step is not constant: it varies in
+ the outer loop, so this cannot be represented by a
+ distance vector. For example in pr34635.c the
+ evolution is {0, +, {0, +, 4}_1}_2. */
+ DDR_AFFINE_P (ddr) = false;
+
return;
}
lambda_vector dist_v;
if (DDR_ARE_DEPENDENT (ddr) != NULL_TREE)
- return true;
+ return false;
if (same_access_functions (ddr))
{
if (!lambda_vector_lexico_pos (dist_v, DDR_NB_LOOPS (ddr)))
{
lambda_vector save_v = lambda_vector_new (DDR_NB_LOOPS (ddr));
- subscript_dependence_tester_1 (ddr, DDR_B (ddr), DDR_A (ddr),
- loop_nest);
+ if (!subscript_dependence_tester_1 (ddr, DDR_B (ddr), DDR_A (ddr),
+ loop_nest))
+ return false;
compute_subscript_distance (ddr);
- build_classic_dist_vector_1 (ddr, DDR_B (ddr), DDR_A (ddr),
- save_v, &init_b, &index_carry);
+ if (!build_classic_dist_vector_1 (ddr, DDR_B (ddr), DDR_A (ddr),
+ save_v, &init_b, &index_carry))
+ return false;
save_dist_v (ddr, save_v);
DDR_REVERSED_P (ddr) = true;
{
lambda_vector save_v = lambda_vector_new (DDR_NB_LOOPS (ddr));
lambda_vector_copy (dist_v, save_v, DDR_NB_LOOPS (ddr));
- save_dist_v (ddr, save_v);
if (DDR_NB_LOOPS (ddr) > 1)
{
lambda_vector opposite_v = lambda_vector_new (DDR_NB_LOOPS (ddr));
- subscript_dependence_tester_1 (ddr, DDR_B (ddr), DDR_A (ddr),
- loop_nest);
+ if (!subscript_dependence_tester_1 (ddr, DDR_B (ddr),
+ DDR_A (ddr), loop_nest))
+ return false;
compute_subscript_distance (ddr);
- build_classic_dist_vector_1 (ddr, DDR_B (ddr), DDR_A (ddr),
- opposite_v, &init_b, &index_carry);
+ if (!build_classic_dist_vector_1 (ddr, DDR_B (ddr), DDR_A (ddr),
+ opposite_v, &init_b,
+ &index_carry))
+ return false;
+ save_dist_v (ddr, save_v);
add_outer_distances (ddr, dist_v, index_carry);
add_outer_distances (ddr, opposite_v, index_carry);
}
+ else
+ save_dist_v (ddr, save_v);
}
}
else
else
{
+ if (SUB_CONFLICTS_IN_A (subscript))
+ free_conflict_function (SUB_CONFLICTS_IN_A (subscript));
+ if (SUB_CONFLICTS_IN_B (subscript))
+ free_conflict_function (SUB_CONFLICTS_IN_B (subscript));
+
SUB_CONFLICTS_IN_A (subscript) = overlaps_a;
SUB_CONFLICTS_IN_B (subscript) = overlaps_b;
SUB_LAST_CONFLICT (subscript) = last_conflicts;
constant with respect to LOOP_NEST. */
static bool
-access_functions_are_affine_or_constant_p (struct data_reference *a,
- struct loop *loop_nest)
+access_functions_are_affine_or_constant_p (const struct data_reference *a,
+ const struct loop *loop_nest)
{
unsigned int i;
VEC(tree,heap) *fns = DR_ACCESS_FNS (a);
omega_pb pb, bool *maybe_dependent)
{
int eq;
- tree fun_a = chrec_convert (integer_type_node, access_fun_a, NULL_TREE);
- tree fun_b = chrec_convert (integer_type_node, access_fun_b, NULL_TREE);
- tree difference = chrec_fold_minus (integer_type_node, fun_a, fun_b);
+ tree type = signed_type_for_types (TREE_TYPE (access_fun_a),
+ TREE_TYPE (access_fun_b));
+ tree fun_a = chrec_convert (type, access_fun_a, NULL_TREE);
+ tree fun_b = chrec_convert (type, access_fun_b, NULL_TREE);
+ tree difference = chrec_fold_minus (type, fun_a, fun_b);
/* When the fun_a - fun_b is not constant, the dependence is not
captured by the classic distance vector representation. */
return true;
}
- fun_b = chrec_fold_multiply (integer_type_node, fun_b,
- integer_minus_one_node);
+ fun_b = chrec_fold_multiply (type, fun_b, integer_minus_one_node);
eq = omega_add_zero_eq (pb, omega_black);
if (!init_omega_eq_with_af (pb, eq, DDR_NB_LOOPS (ddr), fun_a, ddr)
for (i = 0; VEC_iterate (subscript_p, DDR_SUBSCRIPTS (ddr), i, subscript);
i++)
{
+ if (SUB_CONFLICTS_IN_A (subscript))
+ free_conflict_function (SUB_CONFLICTS_IN_A (subscript));
+ if (SUB_CONFLICTS_IN_B (subscript))
+ free_conflict_function (SUB_CONFLICTS_IN_B (subscript));
+
/* The accessed index overlaps for each iteration. */
SUB_CONFLICTS_IN_A (subscript)
- = conflict_fn (1, affine_fn_cst (integer_zero_node));
+ = conflict_fn (1, affine_fn_cst (integer_zero_node));
SUB_CONFLICTS_IN_B (subscript)
- = conflict_fn (1, affine_fn_cst (integer_zero_node));
+ = conflict_fn (1, affine_fn_cst (integer_zero_node));
SUB_LAST_CONFLICT (subscript) = chrec_dont_know;
}
op1 = &GIMPLE_STMT_OPERAND (stmt, 1);
if (DECL_P (*op1)
- || REFERENCE_CLASS_P (*op1))
+ || (REFERENCE_CLASS_P (*op1) && get_base_address (*op1)))
{
ref = VEC_safe_push (data_ref_loc, heap, *references, NULL);
ref->pos = op1;
}
if (DECL_P (*op0)
- || REFERENCE_CLASS_P (*op0))
+ || (REFERENCE_CLASS_P (*op0) && get_base_address (*op0)))
{
ref = VEC_safe_push (data_ref_loc, heap, *references, NULL);
ref->pos = op0;
op0 = &CALL_EXPR_ARG (call, i);
if (DECL_P (*op0)
- || REFERENCE_CLASS_P (*op0))
+ || (REFERENCE_CLASS_P (*op0) && get_base_address (*op0)))
{
ref = VEC_safe_push (data_ref_loc, heap, *references, NULL);
ref->pos = op0;
if (ddr == NULL)
return;
- if (DDR_ARE_DEPENDENT (ddr) == NULL_TREE && DDR_SUBSCRIPTS (ddr))
+ if (DDR_SUBSCRIPTS (ddr))
free_subscripts (DDR_SUBSCRIPTS (ddr));
+ if (DDR_DIST_VECTS (ddr))
+ VEC_free (lambda_vector, heap, DDR_DIST_VECTS (ddr));
+ if (DDR_DIR_VECTS (ddr))
+ VEC_free (lambda_vector, heap, DDR_DIR_VECTS (ddr));
free (ddr);
}
/* Returns the index of STMT in RDG. */
static int
-find_vertex_for_stmt (struct graph *rdg, tree stmt)
+find_vertex_for_stmt (const struct graph *rdg, const_tree stmt)
{
int i;