#include "system.h"
#include "coretypes.h"
#include "tree.h"
-#include "gimple.h"
-#include "ggc.h"
-#include "toplev.h"
-#include "real.h"
+#include "diagnostic-core.h" /* For internal_error/fatal_error. */
#include "flags.h"
#include "gfortran.h"
#include "constructor.h"
#include "trans-const.h"
#include "dependency.h"
-static gfc_ss *gfc_walk_subexpr (gfc_ss *, gfc_expr *);
static bool gfc_get_array_constructor_size (mpz_t *, gfc_constructor_base);
/* The contents of this structure aren't actually used, just the address. */
field = TYPE_FIELDS (type);
gcc_assert (DATA_FIELD == 0);
- t = fold_build3 (COMPONENT_REF, TREE_TYPE (field), desc, field, NULL_TREE);
+ t = fold_build3_loc (input_location, COMPONENT_REF, TREE_TYPE (field), desc,
+ field, NULL_TREE);
t = fold_convert (GFC_TYPE_ARRAY_DATAPTR_TYPE (type), t);
return t;
field = TYPE_FIELDS (type);
gcc_assert (DATA_FIELD == 0);
- t = fold_build3 (COMPONENT_REF, TREE_TYPE (field), desc, field, NULL_TREE);
+ t = fold_build3_loc (input_location, COMPONENT_REF, TREE_TYPE (field), desc,
+ field, NULL_TREE);
gfc_add_modify (block, t, fold_convert (TREE_TYPE (field), value));
}
field = TYPE_FIELDS (type);
gcc_assert (DATA_FIELD == 0);
- t = fold_build3 (COMPONENT_REF, TREE_TYPE (field), desc, field, NULL_TREE);
+ t = fold_build3_loc (input_location, COMPONENT_REF, TREE_TYPE (field), desc,
+ field, NULL_TREE);
return gfc_build_addr_expr (NULL_TREE, t);
}
field = gfc_advance_chain (TYPE_FIELDS (type), OFFSET_FIELD);
gcc_assert (field != NULL_TREE && TREE_TYPE (field) == gfc_array_index_type);
- return fold_build3 (COMPONENT_REF, TREE_TYPE (field),
- desc, field, NULL_TREE);
+ return fold_build3_loc (input_location, COMPONENT_REF, TREE_TYPE (field),
+ desc, field, NULL_TREE);
}
tree
field = gfc_advance_chain (TYPE_FIELDS (type), DTYPE_FIELD);
gcc_assert (field != NULL_TREE && TREE_TYPE (field) == gfc_array_index_type);
- return fold_build3 (COMPONENT_REF, TREE_TYPE (field),
- desc, field, NULL_TREE);
+ return fold_build3_loc (input_location, COMPONENT_REF, TREE_TYPE (field),
+ desc, field, NULL_TREE);
}
static tree
&& TREE_CODE (TREE_TYPE (field)) == ARRAY_TYPE
&& TREE_CODE (TREE_TYPE (TREE_TYPE (field))) == RECORD_TYPE);
- tmp = fold_build3 (COMPONENT_REF, TREE_TYPE (field),
- desc, field, NULL_TREE);
+ tmp = fold_build3_loc (input_location, COMPONENT_REF, TREE_TYPE (field),
+ desc, field, NULL_TREE);
tmp = gfc_build_array_ref (tmp, dim, NULL);
return tmp;
}
field = gfc_advance_chain (field, STRIDE_SUBFIELD);
gcc_assert (field != NULL_TREE && TREE_TYPE (field) == gfc_array_index_type);
- tmp = fold_build3 (COMPONENT_REF, TREE_TYPE (field),
- tmp, field, NULL_TREE);
+ tmp = fold_build3_loc (input_location, COMPONENT_REF, TREE_TYPE (field),
+ tmp, field, NULL_TREE);
return tmp;
}
tree type = TREE_TYPE (desc);
gcc_assert (GFC_DESCRIPTOR_TYPE_P (type));
if (integer_zerop (dim)
- && GFC_TYPE_ARRAY_AKIND (type) == GFC_ARRAY_ALLOCATABLE)
+ && (GFC_TYPE_ARRAY_AKIND (type) == GFC_ARRAY_ALLOCATABLE
+ ||GFC_TYPE_ARRAY_AKIND (type) == GFC_ARRAY_ASSUMED_SHAPE_CONT
+ ||GFC_TYPE_ARRAY_AKIND (type) == GFC_ARRAY_POINTER_CONT))
return gfc_index_one_node;
return gfc_conv_descriptor_stride (desc, dim);
field = gfc_advance_chain (field, LBOUND_SUBFIELD);
gcc_assert (field != NULL_TREE && TREE_TYPE (field) == gfc_array_index_type);
- tmp = fold_build3 (COMPONENT_REF, TREE_TYPE (field),
- tmp, field, NULL_TREE);
+ tmp = fold_build3_loc (input_location, COMPONENT_REF, TREE_TYPE (field),
+ tmp, field, NULL_TREE);
return tmp;
}
field = gfc_advance_chain (field, UBOUND_SUBFIELD);
gcc_assert (field != NULL_TREE && TREE_TYPE (field) == gfc_array_index_type);
- tmp = fold_build3 (COMPONENT_REF, TREE_TYPE (field),
- tmp, field, NULL_TREE);
+ tmp = fold_build3_loc (input_location, COMPONENT_REF, TREE_TYPE (field),
+ tmp, field, NULL_TREE);
return tmp;
}
}
+/* Modify a descriptor such that the lbound of a given dimension is the value
+ specified. This also updates ubound and offset accordingly. */
+
+void
+gfc_conv_shift_descriptor_lbound (stmtblock_t* block, tree desc,
+ int dim, tree new_lbound)
+{
+ tree offs, ubound, lbound, stride;
+ tree diff, offs_diff;
+
+ new_lbound = fold_convert (gfc_array_index_type, new_lbound);
+
+ offs = gfc_conv_descriptor_offset_get (desc);
+ lbound = gfc_conv_descriptor_lbound_get (desc, gfc_rank_cst[dim]);
+ ubound = gfc_conv_descriptor_ubound_get (desc, gfc_rank_cst[dim]);
+ stride = gfc_conv_descriptor_stride_get (desc, gfc_rank_cst[dim]);
+
+ /* Get difference (new - old) by which to shift stuff. */
+ diff = fold_build2_loc (input_location, MINUS_EXPR, gfc_array_index_type,
+ new_lbound, lbound);
+
+ /* Shift ubound and offset accordingly. This has to be done before
+ updating the lbound, as they depend on the lbound expression! */
+ ubound = fold_build2_loc (input_location, PLUS_EXPR, gfc_array_index_type,
+ ubound, diff);
+ gfc_conv_descriptor_ubound_set (block, desc, gfc_rank_cst[dim], ubound);
+ offs_diff = fold_build2_loc (input_location, MULT_EXPR, gfc_array_index_type,
+ diff, stride);
+ offs = fold_build2_loc (input_location, MINUS_EXPR, gfc_array_index_type,
+ offs, offs_diff);
+ gfc_conv_descriptor_offset_set (block, desc, offs);
+
+ /* Finally set lbound to value we want. */
+ gfc_conv_descriptor_lbound_set (block, desc, gfc_rank_cst[dim], new_lbound);
+}
+
+
/* Cleanup those #defines. */
#undef DATA_FIELD
/* Free a gfc_ss chain. */
-static void
+void
gfc_free_ss_chain (gfc_ss * ss)
{
gfc_ss *next;
switch (ss->type)
{
case GFC_SS_SECTION:
- for (n = 0; n < GFC_MAX_DIMENSIONS; n++)
+ for (n = 0; n < ss->data.info.dimen; n++)
{
- if (ss->data.info.subscript[n])
- gfc_free_ss_chain (ss->data.info.subscript[n]);
+ if (ss->data.info.subscript[ss->data.info.dim[n]])
+ gfc_free_ss_chain (ss->data.info.subscript[ss->data.info.dim[n]]);
}
break;
tree tmp;
if (as && as->type == AS_EXPLICIT)
- for (dim = 0; dim < se->loop->dimen; dim++)
+ for (n = 0; n < se->loop->dimen; n++)
{
- n = se->loop->order[dim];
+ dim = se->ss->data.info.dim[n];
+ gcc_assert (dim < as->rank);
+ gcc_assert (se->loop->dimen == as->rank);
if (se->loop->to[n] == NULL_TREE)
{
/* Evaluate the lower bound. */
upper = fold_convert (gfc_array_index_type, tmpse.expr);
/* Set the upper bound of the loop to UPPER - LOWER. */
- tmp = fold_build2 (MINUS_EXPR, gfc_array_index_type, upper, lower);
+ tmp = fold_build2_loc (input_location, MINUS_EXPR,
+ gfc_array_index_type, upper, lower);
tmp = gfc_evaluate_now (tmp, &se->pre);
se->loop->to[n] = tmp;
}
if (onstack)
{
/* Make a temporary variable to hold the data. */
- tmp = fold_build2 (MINUS_EXPR, TREE_TYPE (nelem), nelem,
- gfc_index_one_node);
+ tmp = fold_build2_loc (input_location, MINUS_EXPR, TREE_TYPE (nelem),
+ nelem, gfc_index_one_node);
tmp = build_range_type (gfc_array_index_type, gfc_index_zero_node,
tmp);
tmp = build_array_type (gfc_get_element_type (TREE_TYPE (desc)),
tmp = gfc_build_memcpy_call (packed, source_data, size);
gfc_add_expr_to_block (&do_copying, tmp);
- was_packed = fold_build2 (EQ_EXPR, boolean_type_node,
- packed, source_data);
+ was_packed = fold_build2_loc (input_location, EQ_EXPR,
+ boolean_type_node, packed,
+ source_data);
tmp = gfc_finish_block (&do_copying);
tmp = build3_v (COND_EXPR, was_packed, tmp,
build_empty_stmt (input_location));
}
+/* Get the array reference dimension corresponding to the given loop dimension.
+ It is different from the true array dimension given by the dim array in
+ the case of a partial array reference
+ It is different from the loop dimension in the case of a transposed array.
+ */
+
+static int
+get_array_ref_dim (gfc_ss_info *info, int loop_dim)
+{
+ int n, array_dim, array_ref_dim;
+
+ array_ref_dim = 0;
+ array_dim = info->dim[loop_dim];
+
+ for (n = 0; n < info->dimen; n++)
+ if (n != loop_dim && info->dim[n] < array_dim)
+ array_ref_dim++;
+
+ return array_ref_dim;
+}
+
+
/* Generate code to create and initialize the descriptor for a temporary
array. This is used for both temporaries needed by the scalarizer, and
functions returning arrays. Adjusts the loop variables to be
tree eltype, tree initial, bool dynamic,
bool dealloc, bool callee_alloc, locus * where)
{
+ tree from[GFC_MAX_DIMENSIONS], to[GFC_MAX_DIMENSIONS];
tree type;
tree desc;
tree tmp;
tree nelem;
tree cond;
tree or_expr;
- int n;
- int dim;
+ int n, dim, tmp_dim;
+
+ memset (from, 0, sizeof (from));
+ memset (to, 0, sizeof (to));
gcc_assert (info->dimen > 0);
+ gcc_assert (loop->dimen == info->dimen);
if (gfc_option.warn_array_temp && where)
gfc_warning ("Creating array temporary at %L", where);
/* Set the lower bound to zero. */
- for (dim = 0; dim < info->dimen; dim++)
+ for (n = 0; n < loop->dimen; n++)
{
- n = loop->order[dim];
+ dim = info->dim[n];
+
/* Callee allocated arrays may not have a known bound yet. */
if (loop->to[n])
- loop->to[n] = gfc_evaluate_now (fold_build2 (MINUS_EXPR,
- gfc_array_index_type,
- loop->to[n], loop->from[n]), pre);
+ loop->to[n] = gfc_evaluate_now (
+ fold_build2_loc (input_location, MINUS_EXPR,
+ gfc_array_index_type,
+ loop->to[n], loop->from[n]),
+ pre);
loop->from[n] = gfc_index_zero_node;
+ /* We are constructing the temporary's descriptor based on the loop
+ dimensions. As the dimensions may be accessed in arbitrary order
+ (think of transpose) the size taken from the n'th loop may not map
+ to the n'th dimension of the array. We need to reconstruct loop infos
+ in the right order before using it to set the descriptor
+ bounds. */
+ tmp_dim = get_array_ref_dim (info, n);
+ from[tmp_dim] = loop->from[n];
+ to[tmp_dim] = loop->to[n];
+
info->delta[dim] = gfc_index_zero_node;
info->start[dim] = gfc_index_zero_node;
info->end[dim] = gfc_index_zero_node;
info->stride[dim] = gfc_index_one_node;
- info->dim[dim] = dim;
}
/* Initialize the descriptor. */
type =
- gfc_get_array_type_bounds (eltype, info->dimen, 0, loop->from, loop->to, 1,
+ gfc_get_array_type_bounds (eltype, info->dimen, 0, from, to, 1,
GFC_ARRAY_UNKNOWN, true);
desc = gfc_create_var (type, "atmp");
GFC_DECL_PACKED_ARRAY (desc) = 1;
or_expr = NULL_TREE;
- /* If there is at least one null loop->to[n], it is a callee allocated
+ /* If there is at least one null loop->to[n], it is a callee allocated
array. */
- for (n = 0; n < info->dimen; n++)
+ for (n = 0; n < loop->dimen; n++)
if (loop->to[n] == NULL_TREE)
{
size = NULL_TREE;
break;
}
- for (n = 0; n < info->dimen; n++)
- {
+ for (n = 0; n < loop->dimen; n++)
+ {
+ dim = info->dim[n];
+
if (size == NULL_TREE)
{
/* For a callee allocated array express the loop bounds in terms
of the descriptor fields. */
- tmp =
- fold_build2 (MINUS_EXPR, gfc_array_index_type,
- gfc_conv_descriptor_ubound_get (desc, gfc_rank_cst[n]),
- gfc_conv_descriptor_lbound_get (desc, gfc_rank_cst[n]));
+ tmp = fold_build2_loc (input_location,
+ MINUS_EXPR, gfc_array_index_type,
+ gfc_conv_descriptor_ubound_get (desc, gfc_rank_cst[n]),
+ gfc_conv_descriptor_lbound_get (desc, gfc_rank_cst[n]));
loop->to[n] = tmp;
continue;
}
gfc_conv_descriptor_lbound_set (pre, desc, gfc_rank_cst[n],
gfc_index_zero_node);
- gfc_conv_descriptor_ubound_set (pre, desc, gfc_rank_cst[n], loop->to[n]);
+ gfc_conv_descriptor_ubound_set (pre, desc, gfc_rank_cst[n],
+ to[n]);
- tmp = fold_build2 (PLUS_EXPR, gfc_array_index_type,
- loop->to[n], gfc_index_one_node);
+ tmp = fold_build2_loc (input_location, PLUS_EXPR, gfc_array_index_type,
+ to[n], gfc_index_one_node);
/* Check whether the size for this dimension is negative. */
- cond = fold_build2 (LE_EXPR, boolean_type_node, tmp,
- gfc_index_zero_node);
+ cond = fold_build2_loc (input_location, LE_EXPR, boolean_type_node, tmp,
+ gfc_index_zero_node);
cond = gfc_evaluate_now (cond, pre);
if (n == 0)
or_expr = cond;
else
- or_expr = fold_build2 (TRUTH_OR_EXPR, boolean_type_node, or_expr, cond);
+ or_expr = fold_build2_loc (input_location, TRUTH_OR_EXPR,
+ boolean_type_node, or_expr, cond);
- size = fold_build2 (MULT_EXPR, gfc_array_index_type, size, tmp);
+ size = fold_build2_loc (input_location, MULT_EXPR, gfc_array_index_type,
+ size, tmp);
size = gfc_evaluate_now (size, pre);
}
{
/* If or_expr is true, then the extent in at least one
dimension is zero and the size is set to zero. */
- size = fold_build3 (COND_EXPR, gfc_array_index_type,
- or_expr, gfc_index_zero_node, size);
+ size = fold_build3_loc (input_location, COND_EXPR, gfc_array_index_type,
+ or_expr, gfc_index_zero_node, size);
nelem = size;
- size = fold_build2 (MULT_EXPR, gfc_array_index_type, size,
+ size = fold_build2_loc (input_location, MULT_EXPR, gfc_array_index_type,
+ size,
fold_convert (gfc_array_index_type,
TYPE_SIZE_UNIT (gfc_get_element_type (type))));
}
}
-/* Generate code to transpose array EXPR by creating a new descriptor
- in which the dimension specifications have been reversed. */
-
-void
-gfc_conv_array_transpose (gfc_se * se, gfc_expr * expr)
-{
- tree dest, src, dest_index, src_index;
- gfc_loopinfo *loop;
- gfc_ss_info *dest_info;
- gfc_ss *dest_ss, *src_ss;
- gfc_se src_se;
- int n;
-
- loop = se->loop;
-
- src_ss = gfc_walk_expr (expr);
- dest_ss = se->ss;
-
- dest_info = &dest_ss->data.info;
- gcc_assert (dest_info->dimen == 2);
-
- /* Get a descriptor for EXPR. */
- gfc_init_se (&src_se, NULL);
- gfc_conv_expr_descriptor (&src_se, expr, src_ss);
- gfc_add_block_to_block (&se->pre, &src_se.pre);
- gfc_add_block_to_block (&se->post, &src_se.post);
- src = src_se.expr;
-
- /* Allocate a new descriptor for the return value. */
- dest = gfc_create_var (TREE_TYPE (src), "atmp");
- dest_info->descriptor = dest;
- se->expr = dest;
-
- /* Copy across the dtype field. */
- gfc_add_modify (&se->pre,
- gfc_conv_descriptor_dtype (dest),
- gfc_conv_descriptor_dtype (src));
-
- /* Copy the dimension information, renumbering dimension 1 to 0 and
- 0 to 1. */
- for (n = 0; n < 2; n++)
- {
- dest_info->delta[n] = gfc_index_zero_node;
- dest_info->start[n] = gfc_index_zero_node;
- dest_info->end[n] = gfc_index_zero_node;
- dest_info->stride[n] = gfc_index_one_node;
- dest_info->dim[n] = n;
-
- dest_index = gfc_rank_cst[n];
- src_index = gfc_rank_cst[1 - n];
-
- gfc_conv_descriptor_stride_set (&se->pre, dest, dest_index,
- gfc_conv_descriptor_stride_get (src, src_index));
-
- gfc_conv_descriptor_lbound_set (&se->pre, dest, dest_index,
- gfc_conv_descriptor_lbound_get (src, src_index));
-
- gfc_conv_descriptor_ubound_set (&se->pre, dest, dest_index,
- gfc_conv_descriptor_ubound_get (src, src_index));
-
- if (!loop->to[n])
- {
- gcc_assert (integer_zerop (loop->from[n]));
- loop->to[n] =
- fold_build2 (MINUS_EXPR, gfc_array_index_type,
- gfc_conv_descriptor_ubound_get (dest, dest_index),
- gfc_conv_descriptor_lbound_get (dest, dest_index));
- }
- }
-
- /* Copy the data pointer. */
- dest_info->data = gfc_conv_descriptor_data_get (src);
- gfc_conv_descriptor_data_set (&se->pre, dest, dest_info->data);
-
- /* Copy the offset. This is not changed by transposition; the top-left
- element is still at the same offset as before, except where the loop
- starts at zero. */
- if (!integer_zerop (loop->from[0]))
- dest_info->offset = gfc_conv_descriptor_offset_get (src);
- else
- dest_info->offset = gfc_index_zero_node;
-
- gfc_conv_descriptor_offset_set (&se->pre, dest,
- dest_info->offset);
-
- if (dest_info->dimen > loop->temp_dim)
- loop->temp_dim = dest_info->dimen;
-}
-
-
/* Return the number of iterations in a loop that starts at START,
ends at END, and has step STEP. */
tree type;
type = TREE_TYPE (step);
- tmp = fold_build2 (MINUS_EXPR, type, end, start);
- tmp = fold_build2 (FLOOR_DIV_EXPR, type, tmp, step);
- tmp = fold_build2 (PLUS_EXPR, type, tmp, build_int_cst (type, 1));
- tmp = fold_build2 (MAX_EXPR, type, tmp, build_int_cst (type, 0));
+ tmp = fold_build2_loc (input_location, MINUS_EXPR, type, end, start);
+ tmp = fold_build2_loc (input_location, FLOOR_DIV_EXPR, type, tmp, step);
+ tmp = fold_build2_loc (input_location, PLUS_EXPR, type, tmp,
+ build_int_cst (type, 1));
+ tmp = fold_build2_loc (input_location, MAX_EXPR, type, tmp,
+ build_int_cst (type, 0));
return fold_convert (gfc_array_index_type, tmp);
}
ubound = gfc_conv_descriptor_ubound_get (desc, gfc_rank_cst[0]);
/* Add EXTRA to the upper bound. */
- tmp = fold_build2 (PLUS_EXPR, gfc_array_index_type, ubound, extra);
+ tmp = fold_build2_loc (input_location, PLUS_EXPR, gfc_array_index_type,
+ ubound, extra);
gfc_conv_descriptor_ubound_set (pblock, desc, gfc_rank_cst[0], tmp);
/* Get the value of the current data pointer. */
/* Calculate the new array size. */
size = TYPE_SIZE_UNIT (gfc_get_element_type (TREE_TYPE (desc)));
- tmp = fold_build2 (PLUS_EXPR, gfc_array_index_type,
- ubound, gfc_index_one_node);
- arg1 = fold_build2 (MULT_EXPR, size_type_node,
- fold_convert (size_type_node, tmp),
- fold_convert (size_type_node, size));
+ tmp = fold_build2_loc (input_location, PLUS_EXPR, gfc_array_index_type,
+ ubound, gfc_index_one_node);
+ arg1 = fold_build2_loc (input_location, MULT_EXPR, size_type_node,
+ fold_convert (size_type_node, tmp),
+ fold_convert (size_type_node, size));
/* Call the realloc() function. */
tmp = gfc_call_realloc (pblock, arg0, arg1);
esize = size_in_bytes (gfc_get_element_type (TREE_TYPE (desc)));
esize = fold_convert (gfc_charlen_type_node, esize);
- esize = fold_build2 (TRUNC_DIV_EXPR, gfc_charlen_type_node, esize,
+ esize = fold_build2_loc (input_location, TRUNC_DIV_EXPR,
+ gfc_charlen_type_node, esize,
build_int_cst (gfc_charlen_type_node,
gfc_character_kinds[i].bit_size / 8));
{
/* Verify that all constructor elements are of the same
length. */
- tree cond = fold_build2 (NE_EXPR, boolean_type_node,
- first_len_val, se->string_length);
+ tree cond = fold_build2_loc (input_location, NE_EXPR,
+ boolean_type_node, first_len_val,
+ se->string_length);
gfc_trans_runtime_check
(true, false, cond, &se->pre, &expr->where,
"Different CHARACTER lengths (%ld/%ld) in array constructor",
{
tmp = gfc_get_iteration_count (loop.from[n], loop.to[n],
gfc_index_one_node);
- size = fold_build2 (MULT_EXPR, gfc_array_index_type, size, tmp);
+ size = fold_build2_loc (input_location, MULT_EXPR,
+ gfc_array_index_type, size, tmp);
}
/* Grow the constructed array by SIZE elements. */
gcc_assert (se.ss == gfc_ss_terminator);
/* Increment the offset. */
- tmp = fold_build2 (PLUS_EXPR, gfc_array_index_type,
- *poffset, gfc_index_one_node);
+ tmp = fold_build2_loc (input_location, PLUS_EXPR, gfc_array_index_type,
+ *poffset, gfc_index_one_node);
gfc_add_modify (&body, *poffset, tmp);
/* Finish the loop. */
gfc_trans_array_ctor_element (&body, desc, *poffset,
&se, c->expr);
- *poffset = fold_build2 (PLUS_EXPR, gfc_array_index_type,
- *poffset, gfc_index_one_node);
+ *poffset = fold_build2_loc (input_location, PLUS_EXPR,
+ gfc_array_index_type,
+ *poffset, gfc_index_one_node);
}
else
{
/* Collect multiple scalar constants into a constructor. */
- tree list;
+ VEC(constructor_elt,gc) *v = NULL;
tree init;
tree bound;
tree tmptype;
HOST_WIDE_INT idx = 0;
p = c;
- list = NULL_TREE;
/* Count the number of consecutive scalar constants. */
while (p && !(p->iterator
|| p->expr->expr_type != EXPR_CONSTANT))
(gfc_get_pchar_type (p->expr->ts.kind),
se.expr);
- list = tree_cons (build_int_cst (gfc_array_index_type,
- idx++), se.expr, list);
+ CONSTRUCTOR_APPEND_ELT (v,
+ build_int_cst (gfc_array_index_type,
+ idx++),
+ se.expr);
c = p;
p = gfc_constructor_next (p);
}
gfc_index_zero_node, bound);
tmptype = build_array_type (type, tmptype);
- init = build_constructor_from_list (tmptype, nreverse (list));
+ init = build_constructor (tmptype, v);
TREE_CONSTANT (init) = 1;
TREE_STATIC (init) = 1;
/* Create a static variable to hold the data. */
tmp, init, bound);
gfc_add_expr_to_block (&body, tmp);
- *poffset = fold_build2 (PLUS_EXPR, gfc_array_index_type,
- *poffset,
+ *poffset = fold_build2_loc (input_location, PLUS_EXPR,
+ gfc_array_index_type, *poffset,
build_int_cst (gfc_array_index_type, n));
}
if (!INTEGER_CST_P (*poffset))
tmp2 = gfc_conv_mpz_to_tree (size, gfc_index_integer_kind);
/* Grow the array by TMP * TMP2 elements. */
- tmp = fold_build2 (MULT_EXPR, gfc_array_index_type, tmp, tmp2);
+ tmp = fold_build2_loc (input_location, MULT_EXPR,
+ gfc_array_index_type, tmp, tmp2);
gfc_grow_array (&implied_do_block, desc, tmp);
}
/* Generate the exit condition. Depending on the sign of
the step variable we have to generate the correct
comparison. */
- tmp = fold_build2 (GT_EXPR, boolean_type_node, step,
- build_int_cst (TREE_TYPE (step), 0));
- cond = fold_build3 (COND_EXPR, boolean_type_node, tmp,
- fold_build2 (GT_EXPR, boolean_type_node,
- shadow_loopvar, end),
- fold_build2 (LT_EXPR, boolean_type_node,
- shadow_loopvar, end));
+ tmp = fold_build2_loc (input_location, GT_EXPR, boolean_type_node,
+ step, build_int_cst (TREE_TYPE (step), 0));
+ cond = fold_build3_loc (input_location, COND_EXPR,
+ boolean_type_node, tmp,
+ fold_build2_loc (input_location, GT_EXPR,
+ boolean_type_node, shadow_loopvar, end),
+ fold_build2_loc (input_location, LT_EXPR,
+ boolean_type_node, shadow_loopvar, end));
tmp = build1_v (GOTO_EXPR, exit_label);
TREE_USED (exit_label) = 1;
tmp = build3_v (COND_EXPR, cond, tmp,
gfc_add_expr_to_block (&body, loopbody);
/* Increase loop variable by step. */
- tmp = fold_build2 (PLUS_EXPR, TREE_TYPE (shadow_loopvar), shadow_loopvar, step);
+ tmp = fold_build2_loc (input_location, PLUS_EXPR,
+ TREE_TYPE (shadow_loopvar), shadow_loopvar,
+ step);
gfc_add_modify (&body, shadow_loopvar, tmp);
/* Finish the loop. */
tree
gfc_build_constant_array_constructor (gfc_expr * expr, tree type)
{
- tree tmptype, list, init, tmp;
+ tree tmptype, init, tmp;
HOST_WIDE_INT nelem;
gfc_constructor *c;
gfc_array_spec as;
gfc_se se;
int i;
+ VEC(constructor_elt,gc) *v = NULL;
/* First traverse the constructor list, converting the constants
to tree to build an initializer. */
nelem = 0;
- list = NULL_TREE;
c = gfc_constructor_first (expr->value.constructor);
while (c)
{
else if (POINTER_TYPE_P (type))
se.expr = gfc_build_addr_expr (gfc_get_pchar_type (c->expr->ts.kind),
se.expr);
- list = tree_cons (build_int_cst (gfc_array_index_type, nelem),
- se.expr, list);
+ CONSTRUCTOR_APPEND_ELT (v, build_int_cst (gfc_array_index_type, nelem),
+ se.expr);
c = gfc_constructor_next (c);
nelem++;
}
tmptype = gfc_get_nodesc_array_type (type, &as, PACKED_STATIC, true);
- init = build_constructor_from_list (tmptype, nreverse (list));
+ /* as is not needed anymore. */
+ for (i = 0; i < as.rank + as.corank; i++)
+ {
+ gfc_free_expr (as.lower[i]);
+ gfc_free_expr (as.upper[i]);
+ }
+
+ init = build_constructor (tmptype, v);
TREE_CONSTANT (init) = 1;
TREE_STATIC (init) = 1;
/* Only allow nonzero "from" in one-dimensional arrays. */
if (loop->dimen != 1)
return NULL_TREE;
- tmp = fold_build2 (MINUS_EXPR, gfc_array_index_type,
- loop->to[i], loop->from[i]);
+ tmp = fold_build2_loc (input_location, MINUS_EXPR,
+ gfc_array_index_type,
+ loop->to[i], loop->from[i]);
}
else
tmp = loop->to[i];
- tmp = fold_build2 (PLUS_EXPR, gfc_array_index_type,
- tmp, gfc_index_one_node);
- size = fold_build2 (MULT_EXPR, gfc_array_index_type, size, tmp);
+ tmp = fold_build2_loc (input_location, PLUS_EXPR, gfc_array_index_type,
+ tmp, gfc_index_one_node);
+ size = fold_build2_loc (input_location, MULT_EXPR, gfc_array_index_type,
+ size, tmp);
}
return size;
tree offsetvar;
tree desc;
tree type;
+ tree tmp;
bool dynamic;
bool old_first_len, old_typespec_chararray_ctor;
tree old_first_len_val;
loop->from[n] = gfc_index_zero_node;
loop->to[n] = gfc_conv_mpz_to_tree (ss->expr->shape [n],
gfc_index_integer_kind);
- loop->to[n] = fold_build2 (MINUS_EXPR, gfc_array_index_type,
- loop->to[n], gfc_index_one_node);
+ loop->to[n] = fold_build2_loc (input_location, MINUS_EXPR,
+ gfc_array_index_type,
+ loop->to[n], gfc_index_one_node);
}
}
}
}
+ if (TREE_CODE (loop->to[0]) == VAR_DECL)
+ dynamic = true;
+
gfc_trans_create_temp_array (&loop->pre, &loop->post, loop, &ss->data.info,
type, NULL_TREE, dynamic, true, false, where);
/* If the array grows dynamically, the upper bound of the loop variable
is determined by the array's final upper bound. */
if (dynamic)
- loop->to[0] = gfc_conv_descriptor_ubound_get (desc, gfc_rank_cst[0]);
+ {
+ tmp = fold_build2_loc (input_location, MINUS_EXPR,
+ gfc_array_index_type,
+ offsetvar, gfc_index_one_node);
+ tmp = gfc_evaluate_now (tmp, &loop->pre);
+ gfc_conv_descriptor_ubound_set (&loop->pre, desc, gfc_rank_cst[0], tmp);
+ if (loop->to[0] && TREE_CODE (loop->to[0]) == VAR_DECL)
+ gfc_add_modify (&loop->pre, loop->to[0], tmp);
+ else
+ loop->to[0] = tmp;
+ }
if (TREE_USED (offsetvar))
pushdecl (offsetvar);
else
gcc_assert (INTEGER_CST_P (offset));
+
#if 0
/* Disable bound checking for now because it's probably broken. */
if (gfc_option.rtcheck & GFC_RTCHECK_BOUNDS)
gfc_init_se (&se, NULL);
desc = info->subscript[dim]->data.info.descriptor;
zero = gfc_rank_cst[0];
- tmp = fold_build2 (MINUS_EXPR, gfc_array_index_type,
+ tmp = fold_build2_loc (input_location, MINUS_EXPR,
+ gfc_array_index_type,
gfc_conv_descriptor_ubound_get (desc, zero),
gfc_conv_descriptor_lbound_get (desc, zero));
tmp = gfc_evaluate_now (tmp, &loop->pre);
tmp = gfc_conv_array_offset (se.expr);
ss->data.info.offset = gfc_evaluate_now (tmp, block);
+
+ /* Make absolutely sure that the saved_offset is indeed saved
+ so that the variable is still accessible after the loops
+ are translated. */
+ ss->data.info.saved_offset = ss->data.info.offset;
}
}
gfc_init_block (&loop->pre);
gfc_init_block (&loop->post);
- /* Initially scalarize in order. */
+ /* Initially scalarize in order and default to no loop reversal. */
for (n = 0; n < GFC_MAX_DIMENSIONS; n++)
- loop->order[n] = n;
+ {
+ loop->order[n] = n;
+ loop->reverse[n] = GFC_CANNOT_REVERSE;
+ }
loop->ss = gfc_ss_terminator;
}
asprintf (&msg, "Index '%%ld' of dimension %d "
"outside of expected range (%%ld:%%ld)", n+1);
- fault = fold_build2 (LT_EXPR, boolean_type_node, index, tmp_lo);
+ fault = fold_build2_loc (input_location, LT_EXPR, boolean_type_node,
+ index, tmp_lo);
gfc_trans_runtime_check (true, false, fault, &se->pre, where, msg,
fold_convert (long_integer_type_node, index),
fold_convert (long_integer_type_node, tmp_lo),
fold_convert (long_integer_type_node, tmp_up));
- fault = fold_build2 (GT_EXPR, boolean_type_node, index, tmp_up);
+ fault = fold_build2_loc (input_location, GT_EXPR, boolean_type_node,
+ index, tmp_up);
gfc_trans_runtime_check (true, false, fault, &se->pre, where, msg,
fold_convert (long_integer_type_node, index),
fold_convert (long_integer_type_node, tmp_lo),
asprintf (&msg, "Index '%%ld' of dimension %d "
"below lower bound of %%ld", n+1);
- fault = fold_build2 (LT_EXPR, boolean_type_node, index, tmp_lo);
+ fault = fold_build2_loc (input_location, LT_EXPR, boolean_type_node,
+ index, tmp_lo);
gfc_trans_runtime_check (true, false, fault, &se->pre, where, msg,
fold_convert (long_integer_type_node, index),
fold_convert (long_integer_type_node, tmp_lo));
/* Return the offset for an index. Performs bound checking for elemental
- dimensions. Single element references are processed separately. */
+ dimensions. Single element references are processed separately.
+ DIM is the array dimension, I is the loop dimension. */
static tree
gfc_conv_array_index_offset (gfc_se * se, gfc_ss_info * info, int dim, int i,
desc = info->subscript[dim]->data.info.descriptor;
/* Get a zero-based index into the vector. */
- index = fold_build2 (MINUS_EXPR, gfc_array_index_type,
- se->loop->loopvar[i], se->loop->from[i]);
+ index = fold_build2_loc (input_location, MINUS_EXPR,
+ gfc_array_index_type,
+ se->loop->loopvar[i], se->loop->from[i]);
/* Multiply the index by the stride. */
- index = fold_build2 (MULT_EXPR, gfc_array_index_type,
- index, gfc_conv_array_stride (desc, 0));
+ index = fold_build2_loc (input_location, MULT_EXPR,
+ gfc_array_index_type,
+ index, gfc_conv_array_stride (desc, 0));
/* Read the vector to get an index into info->descriptor. */
data = build_fold_indirect_ref_loc (input_location,
/* Scalarized dimension. */
gcc_assert (info && se->loop);
- /* Multiply the loop variable by the stride and delta. */
+ /* Multiply the loop variable by the stride and delta. */
index = se->loop->loopvar[i];
- if (!integer_onep (info->stride[i]))
- index = fold_build2 (MULT_EXPR, gfc_array_index_type, index,
- info->stride[i]);
- if (!integer_zerop (info->delta[i]))
- index = fold_build2 (PLUS_EXPR, gfc_array_index_type, index,
- info->delta[i]);
+ if (!integer_onep (info->stride[dim]))
+ index = fold_build2_loc (input_location, MULT_EXPR,
+ gfc_array_index_type, index,
+ info->stride[dim]);
+ if (!integer_zerop (info->delta[dim]))
+ index = fold_build2_loc (input_location, PLUS_EXPR,
+ gfc_array_index_type, index,
+ info->delta[dim]);
break;
default:
/* Temporary array or derived type component. */
gcc_assert (se->loop);
index = se->loop->loopvar[se->loop->order[i]];
- if (!integer_zerop (info->delta[i]))
- index = fold_build2 (PLUS_EXPR, gfc_array_index_type,
- index, info->delta[i]);
+ if (!integer_zerop (info->delta[dim]))
+ index = fold_build2_loc (input_location, PLUS_EXPR,
+ gfc_array_index_type, index, info->delta[dim]);
}
/* Multiply by the stride. */
if (!integer_onep (stride))
- index = fold_build2 (MULT_EXPR, gfc_array_index_type, index, stride);
+ index = fold_build2_loc (input_location, MULT_EXPR, gfc_array_index_type,
+ index, stride);
return index;
}
/* Add the offset for this dimension to the stored offset for all other
dimensions. */
if (!integer_zerop (info->offset))
- index = fold_build2 (PLUS_EXPR, gfc_array_index_type, index, info->offset);
+ index = fold_build2_loc (input_location, PLUS_EXPR, gfc_array_index_type,
+ index, info->offset);
if (se->ss->expr && is_subref_array (se->ss->expr))
decl = se->ss->expr->symtree->n.sym->backend_decl;
{
se->string_length = se->ss->string_length;
gfc_conv_scalarized_array_ref (se, NULL);
+ gfc_advance_se_ss_chain (se);
}
tmp = tmpse.expr;
}
- cond = fold_build2 (LT_EXPR, boolean_type_node,
- indexse.expr, tmp);
+ cond = fold_build2_loc (input_location, LT_EXPR, boolean_type_node,
+ indexse.expr, tmp);
asprintf (&msg, "Index '%%ld' of dimension %d of array '%s' "
"below lower bound of %%ld", n+1, sym->name);
gfc_trans_runtime_check (true, false, cond, &se->pre, where, msg,
tmp = tmpse.expr;
}
- cond = fold_build2 (GT_EXPR, boolean_type_node,
- indexse.expr, tmp);
+ cond = fold_build2_loc (input_location, GT_EXPR,
+ boolean_type_node, indexse.expr, tmp);
asprintf (&msg, "Index '%%ld' of dimension %d of array '%s' "
"above upper bound of %%ld", n+1, sym->name);
gfc_trans_runtime_check (true, false, cond, &se->pre, where, msg,
/* Multiply the index by the stride. */
stride = gfc_conv_array_stride (se->expr, n);
- tmp = fold_build2 (MULT_EXPR, gfc_array_index_type, indexse.expr,
- stride);
+ tmp = fold_build2_loc (input_location, MULT_EXPR, gfc_array_index_type,
+ indexse.expr, stride);
/* And add it to the total. */
- index = fold_build2 (PLUS_EXPR, gfc_array_index_type, index, tmp);
+ index = fold_build2_loc (input_location, PLUS_EXPR,
+ gfc_array_index_type, index, tmp);
}
tmp = gfc_conv_array_offset (se->expr);
if (!integer_zerop (tmp))
- index = fold_build2 (PLUS_EXPR, gfc_array_index_type, index, tmp);
+ index = fold_build2_loc (input_location, PLUS_EXPR,
+ gfc_array_index_type, index, tmp);
/* Access the calculated element. */
tmp = gfc_conv_array_data (se->expr);
stride);
gfc_add_block_to_block (pblock, &se.pre);
- info->offset = fold_build2 (PLUS_EXPR, gfc_array_index_type,
- info->offset, index);
+ info->offset = fold_build2_loc (input_location, PLUS_EXPR,
+ gfc_array_index_type,
+ info->offset, index);
info->offset = gfc_evaluate_now (info->offset, pblock);
}
-
- i = loop->order[0];
- stride = gfc_conv_array_stride (info->descriptor, info->dim[i]);
}
- else
- stride = gfc_conv_array_stride (info->descriptor, 0);
+
+ i = loop->order[0];
+ /* For the time being, the innermost loop is unconditionally on
+ the first dimension of the scalarization loop. */
+ gcc_assert (i == 0);
+ stride = gfc_conv_array_stride (info->descriptor, info->dim[i]);
/* Calculate the stride of the innermost loop. Hopefully this will
- allow the backend optimizers to do their stuff more effectively.
- */
+ allow the backend optimizers to do their stuff more effectively.
+ */
info->stride0 = gfc_evaluate_now (stride, pblock);
}
else
index = gfc_conv_array_index_offset (&se, info, info->dim[i], i,
ar, stride);
gfc_add_block_to_block (pblock, &se.pre);
- info->offset = fold_build2 (PLUS_EXPR, gfc_array_index_type,
- info->offset, index);
+ info->offset = fold_build2_loc (input_location, PLUS_EXPR,
+ gfc_array_index_type, info->offset,
+ index);
info->offset = gfc_evaluate_now (info->offset, pblock);
}
loop->from[n]);
OMP_FOR_INIT (stmt) = init;
/* The exit condition. */
- TREE_VEC_ELT (cond, 0) = build2 (LE_EXPR, boolean_type_node,
- loop->loopvar[n], loop->to[n]);
+ TREE_VEC_ELT (cond, 0) = build2_loc (input_location, LE_EXPR,
+ boolean_type_node,
+ loop->loopvar[n], loop->to[n]);
+ SET_EXPR_LOCATION (TREE_VEC_ELT (cond, 0), input_location);
OMP_FOR_COND (stmt) = cond;
/* Increment the loopvar. */
- tmp = build2 (PLUS_EXPR, gfc_array_index_type,
- loop->loopvar[n], gfc_index_one_node);
- TREE_VEC_ELT (incr, 0) = fold_build2 (MODIFY_EXPR,
+ tmp = build2_loc (input_location, PLUS_EXPR, gfc_array_index_type,
+ loop->loopvar[n], gfc_index_one_node);
+ TREE_VEC_ELT (incr, 0) = fold_build2_loc (input_location, MODIFY_EXPR,
void_type_node, loop->loopvar[n], tmp);
OMP_FOR_INCR (stmt) = incr;
}
else
{
+ bool reverse_loop = (loop->reverse[n] == GFC_REVERSE_SET)
+ && (loop->temp_ss == NULL);
+
loopbody = gfc_finish_block (pbody);
+ if (reverse_loop)
+ {
+ tmp = loop->from[n];
+ loop->from[n] = loop->to[n];
+ loop->to[n] = tmp;
+ }
+
/* Initialize the loopvar. */
if (loop->loopvar[n] != loop->from[n])
gfc_add_modify (&loop->code[n], loop->loopvar[n], loop->from[n]);
gfc_init_block (&block);
/* The exit condition. */
- cond = fold_build2 (GT_EXPR, boolean_type_node,
- loop->loopvar[n], loop->to[n]);
+ cond = fold_build2_loc (input_location, reverse_loop ? LT_EXPR : GT_EXPR,
+ boolean_type_node, loop->loopvar[n], loop->to[n]);
tmp = build1_v (GOTO_EXPR, exit_label);
TREE_USED (exit_label) = 1;
tmp = build3_v (COND_EXPR, cond, tmp, build_empty_stmt (input_location));
gfc_add_expr_to_block (&block, loopbody);
/* Increment the loopvar. */
- tmp = fold_build2 (PLUS_EXPR, gfc_array_index_type,
- loop->loopvar[n], gfc_index_one_node);
+ tmp = fold_build2_loc (input_location,
+ reverse_loop ? MINUS_EXPR : PLUS_EXPR,
+ gfc_array_index_type, loop->loopvar[n],
+ gfc_index_one_node);
+
gfc_add_modify (&block, loop->loopvar[n], tmp);
/* Build the loop. */
}
-/* Calculate the upper bound of an array section. */
-
-static tree
-gfc_conv_section_upper_bound (gfc_ss * ss, int n, stmtblock_t * pblock)
-{
- int dim;
- gfc_expr *end;
- tree desc;
- tree bound;
- gfc_se se;
- gfc_ss_info *info;
-
- gcc_assert (ss->type == GFC_SS_SECTION);
-
- info = &ss->data.info;
- dim = info->dim[n];
-
- if (info->ref->u.ar.dimen_type[dim] == DIMEN_VECTOR)
- /* We'll calculate the upper bound once we have access to the
- vector's descriptor. */
- return NULL;
-
- gcc_assert (info->ref->u.ar.dimen_type[dim] == DIMEN_RANGE);
- desc = info->descriptor;
- end = info->ref->u.ar.end[dim];
-
- if (end)
- {
- /* The upper bound was specified. */
- gfc_init_se (&se, NULL);
- gfc_conv_expr_type (&se, end, gfc_array_index_type);
- gfc_add_block_to_block (pblock, &se.pre);
- bound = se.expr;
- }
- else
- {
- /* No upper bound was specified, so use the bound of the array. */
- bound = gfc_conv_array_ubound (desc, dim);
- }
-
- return bound;
-}
-
-
/* Calculate the lower bound of an array section. */
static void
-gfc_conv_section_startstride (gfc_loopinfo * loop, gfc_ss * ss, int n)
+gfc_conv_section_startstride (gfc_loopinfo * loop, gfc_ss * ss, int dim)
{
gfc_expr *start;
gfc_expr *end;
tree desc;
gfc_se se;
gfc_ss_info *info;
- int dim;
gcc_assert (ss->type == GFC_SS_SECTION);
info = &ss->data.info;
- dim = info->dim[n];
if (info->ref->u.ar.dimen_type[dim] == DIMEN_VECTOR)
{
/* We use a zero-based index to access the vector. */
- info->start[n] = gfc_index_zero_node;
- info->end[n] = gfc_index_zero_node;
- info->stride[n] = gfc_index_one_node;
+ info->start[dim] = gfc_index_zero_node;
+ info->stride[dim] = gfc_index_one_node;
+ info->end[dim] = NULL;
return;
}
gfc_init_se (&se, NULL);
gfc_conv_expr_type (&se, start, gfc_array_index_type);
gfc_add_block_to_block (&loop->pre, &se.pre);
- info->start[n] = se.expr;
+ info->start[dim] = se.expr;
}
else
{
/* No lower bound specified so use the bound of the array. */
- info->start[n] = gfc_conv_array_lbound (desc, dim);
+ info->start[dim] = gfc_conv_array_lbound (desc, dim);
}
- info->start[n] = gfc_evaluate_now (info->start[n], &loop->pre);
+ info->start[dim] = gfc_evaluate_now (info->start[dim], &loop->pre);
/* Similarly calculate the end. Although this is not used in the
scalarizer, it is needed when checking bounds and where the end
gfc_init_se (&se, NULL);
gfc_conv_expr_type (&se, end, gfc_array_index_type);
gfc_add_block_to_block (&loop->pre, &se.pre);
- info->end[n] = se.expr;
+ info->end[dim] = se.expr;
}
else
{
/* No upper bound specified so use the bound of the array. */
- info->end[n] = gfc_conv_array_ubound (desc, dim);
+ info->end[dim] = gfc_conv_array_ubound (desc, dim);
}
- info->end[n] = gfc_evaluate_now (info->end[n], &loop->pre);
+ info->end[dim] = gfc_evaluate_now (info->end[dim], &loop->pre);
/* Calculate the stride. */
if (stride == NULL)
- info->stride[n] = gfc_index_one_node;
+ info->stride[dim] = gfc_index_one_node;
else
{
gfc_init_se (&se, NULL);
gfc_conv_expr_type (&se, stride, gfc_array_index_type);
gfc_add_block_to_block (&loop->pre, &se.pre);
- info->stride[n] = gfc_evaluate_now (se.expr, &loop->pre);
+ info->stride[dim] = gfc_evaluate_now (se.expr, &loop->pre);
}
}
gfc_conv_ss_descriptor (&loop->pre, ss, !loop->array_parameter);
for (n = 0; n < ss->data.info.dimen; n++)
- gfc_conv_section_startstride (loop, ss, n);
+ gfc_conv_section_startstride (loop, ss, ss->data.info.dim[n]);
break;
case GFC_SS_INTRINSIC:
if (ss->type != GFC_SS_SECTION)
continue;
+ /* Catch allocatable lhs in f2003. */
+ if (gfc_option.flag_realloc_lhs && ss->is_alloc_lhs)
+ continue;
+
gfc_start_block (&inner);
/* TODO: range checking for mapped dimensions. */
check_upper = true;
/* Zero stride is not allowed. */
- tmp = fold_build2 (EQ_EXPR, boolean_type_node, info->stride[n],
- gfc_index_zero_node);
+ tmp = fold_build2_loc (input_location, EQ_EXPR, boolean_type_node,
+ info->stride[dim], gfc_index_zero_node);
asprintf (&msg, "Zero stride is not allowed, for dimension %d "
- "of array '%s'", info->dim[n]+1,
- ss->expr->symtree->name);
+ "of array '%s'", dim + 1, ss->expr->symtree->name);
gfc_trans_runtime_check (true, false, tmp, &inner,
&ss->expr->where, msg);
gfc_free (msg);
desc = ss->data.info.descriptor;
/* This is the run-time equivalent of resolve.c's
- check_dimension(). The logical is more readable there
- than it is here, with all the trees. */
+ check_dimension(). The logical is more readable there
+ than it is here, with all the trees. */
lbound = gfc_conv_array_lbound (desc, dim);
- end = info->end[n];
+ end = info->end[dim];
if (check_upper)
ubound = gfc_conv_array_ubound (desc, dim);
else
ubound = NULL;
/* non_zerosized is true when the selected range is not
- empty. */
- stride_pos = fold_build2 (GT_EXPR, boolean_type_node,
- info->stride[n], gfc_index_zero_node);
- tmp = fold_build2 (LE_EXPR, boolean_type_node, info->start[n],
- end);
- stride_pos = fold_build2 (TRUTH_AND_EXPR, boolean_type_node,
- stride_pos, tmp);
-
- stride_neg = fold_build2 (LT_EXPR, boolean_type_node,
- info->stride[n], gfc_index_zero_node);
- tmp = fold_build2 (GE_EXPR, boolean_type_node, info->start[n],
- end);
- stride_neg = fold_build2 (TRUTH_AND_EXPR, boolean_type_node,
- stride_neg, tmp);
- non_zerosized = fold_build2 (TRUTH_OR_EXPR, boolean_type_node,
- stride_pos, stride_neg);
+ empty. */
+ stride_pos = fold_build2_loc (input_location, GT_EXPR,
+ boolean_type_node, info->stride[dim],
+ gfc_index_zero_node);
+ tmp = fold_build2_loc (input_location, LE_EXPR, boolean_type_node,
+ info->start[dim], end);
+ stride_pos = fold_build2_loc (input_location, TRUTH_AND_EXPR,
+ boolean_type_node, stride_pos, tmp);
+
+ stride_neg = fold_build2_loc (input_location, LT_EXPR,
+ boolean_type_node,
+ info->stride[dim], gfc_index_zero_node);
+ tmp = fold_build2_loc (input_location, GE_EXPR, boolean_type_node,
+ info->start[dim], end);
+ stride_neg = fold_build2_loc (input_location, TRUTH_AND_EXPR,
+ boolean_type_node,
+ stride_neg, tmp);
+ non_zerosized = fold_build2_loc (input_location, TRUTH_OR_EXPR,
+ boolean_type_node,
+ stride_pos, stride_neg);
/* Check the start of the range against the lower and upper
bounds of the array, if the range is not empty.
error message. */
if (check_upper)
{
- tmp = fold_build2 (LT_EXPR, boolean_type_node,
- info->start[n], lbound);
- tmp = fold_build2 (TRUTH_AND_EXPR, boolean_type_node,
- non_zerosized, tmp);
- tmp2 = fold_build2 (GT_EXPR, boolean_type_node,
- info->start[n], ubound);
- tmp2 = fold_build2 (TRUTH_AND_EXPR, boolean_type_node,
- non_zerosized, tmp2);
+ tmp = fold_build2_loc (input_location, LT_EXPR,
+ boolean_type_node,
+ info->start[dim], lbound);
+ tmp = fold_build2_loc (input_location, TRUTH_AND_EXPR,
+ boolean_type_node,
+ non_zerosized, tmp);
+ tmp2 = fold_build2_loc (input_location, GT_EXPR,
+ boolean_type_node,
+ info->start[dim], ubound);
+ tmp2 = fold_build2_loc (input_location, TRUTH_AND_EXPR,
+ boolean_type_node,
+ non_zerosized, tmp2);
asprintf (&msg, "Index '%%ld' of dimension %d of array '%s' "
- "outside of expected range (%%ld:%%ld)",
- info->dim[n]+1, ss->expr->symtree->name);
- gfc_trans_runtime_check (true, false, tmp, &inner,
+ "outside of expected range (%%ld:%%ld)",
+ dim + 1, ss->expr->symtree->name);
+ gfc_trans_runtime_check (true, false, tmp, &inner,
&ss->expr->where, msg,
- fold_convert (long_integer_type_node, info->start[n]),
- fold_convert (long_integer_type_node, lbound),
+ fold_convert (long_integer_type_node, info->start[dim]),
+ fold_convert (long_integer_type_node, lbound),
fold_convert (long_integer_type_node, ubound));
- gfc_trans_runtime_check (true, false, tmp2, &inner,
+ gfc_trans_runtime_check (true, false, tmp2, &inner,
&ss->expr->where, msg,
- fold_convert (long_integer_type_node, info->start[n]),
- fold_convert (long_integer_type_node, lbound),
+ fold_convert (long_integer_type_node, info->start[dim]),
+ fold_convert (long_integer_type_node, lbound),
fold_convert (long_integer_type_node, ubound));
gfc_free (msg);
}
else
{
- tmp = fold_build2 (LT_EXPR, boolean_type_node,
- info->start[n], lbound);
- tmp = fold_build2 (TRUTH_AND_EXPR, boolean_type_node,
- non_zerosized, tmp);
+ tmp = fold_build2_loc (input_location, LT_EXPR,
+ boolean_type_node,
+ info->start[dim], lbound);
+ tmp = fold_build2_loc (input_location, TRUTH_AND_EXPR,
+ boolean_type_node, non_zerosized, tmp);
asprintf (&msg, "Index '%%ld' of dimension %d of array '%s' "
- "below lower bound of %%ld",
- info->dim[n]+1, ss->expr->symtree->name);
- gfc_trans_runtime_check (true, false, tmp, &inner,
+ "below lower bound of %%ld",
+ dim + 1, ss->expr->symtree->name);
+ gfc_trans_runtime_check (true, false, tmp, &inner,
&ss->expr->where, msg,
- fold_convert (long_integer_type_node, info->start[n]),
+ fold_convert (long_integer_type_node, info->start[dim]),
fold_convert (long_integer_type_node, lbound));
gfc_free (msg);
}
necessarily "end" (think 0:5:3, which doesn't contain 5)
and check it against both lower and upper bounds. */
- tmp = fold_build2 (MINUS_EXPR, gfc_array_index_type, end,
- info->start[n]);
- tmp = fold_build2 (TRUNC_MOD_EXPR, gfc_array_index_type, tmp,
- info->stride[n]);
- tmp = fold_build2 (MINUS_EXPR, gfc_array_index_type, end,
- tmp);
- tmp2 = fold_build2 (LT_EXPR, boolean_type_node, tmp, lbound);
- tmp2 = fold_build2 (TRUTH_AND_EXPR, boolean_type_node,
- non_zerosized, tmp2);
+ tmp = fold_build2_loc (input_location, MINUS_EXPR,
+ gfc_array_index_type, end,
+ info->start[dim]);
+ tmp = fold_build2_loc (input_location, TRUNC_MOD_EXPR,
+ gfc_array_index_type, tmp,
+ info->stride[dim]);
+ tmp = fold_build2_loc (input_location, MINUS_EXPR,
+ gfc_array_index_type, end, tmp);
+ tmp2 = fold_build2_loc (input_location, LT_EXPR,
+ boolean_type_node, tmp, lbound);
+ tmp2 = fold_build2_loc (input_location, TRUTH_AND_EXPR,
+ boolean_type_node, non_zerosized, tmp2);
if (check_upper)
{
- tmp3 = fold_build2 (GT_EXPR, boolean_type_node, tmp, ubound);
- tmp3 = fold_build2 (TRUTH_AND_EXPR, boolean_type_node,
- non_zerosized, tmp3);
+ tmp3 = fold_build2_loc (input_location, GT_EXPR,
+ boolean_type_node, tmp, ubound);
+ tmp3 = fold_build2_loc (input_location, TRUTH_AND_EXPR,
+ boolean_type_node, non_zerosized, tmp3);
asprintf (&msg, "Index '%%ld' of dimension %d of array '%s' "
- "outside of expected range (%%ld:%%ld)",
- info->dim[n]+1, ss->expr->symtree->name);
+ "outside of expected range (%%ld:%%ld)",
+ dim + 1, ss->expr->symtree->name);
gfc_trans_runtime_check (true, false, tmp2, &inner,
&ss->expr->where, msg,
fold_convert (long_integer_type_node, tmp),
else
{
asprintf (&msg, "Index '%%ld' of dimension %d of array '%s' "
- "below lower bound of %%ld",
- info->dim[n]+1, ss->expr->symtree->name);
+ "below lower bound of %%ld",
+ dim + 1, ss->expr->symtree->name);
gfc_trans_runtime_check (true, false, tmp2, &inner,
&ss->expr->where, msg,
fold_convert (long_integer_type_node, tmp),
fold_convert (long_integer_type_node, lbound));
gfc_free (msg);
}
-
+
/* Check the section sizes match. */
- tmp = fold_build2 (MINUS_EXPR, gfc_array_index_type, end,
- info->start[n]);
- tmp = fold_build2 (FLOOR_DIV_EXPR, gfc_array_index_type, tmp,
- info->stride[n]);
- tmp = fold_build2 (PLUS_EXPR, gfc_array_index_type,
- gfc_index_one_node, tmp);
- tmp = fold_build2 (MAX_EXPR, gfc_array_index_type, tmp,
- build_int_cst (gfc_array_index_type, 0));
+ tmp = fold_build2_loc (input_location, MINUS_EXPR,
+ gfc_array_index_type, end,
+ info->start[dim]);
+ tmp = fold_build2_loc (input_location, FLOOR_DIV_EXPR,
+ gfc_array_index_type, tmp,
+ info->stride[dim]);
+ tmp = fold_build2_loc (input_location, PLUS_EXPR,
+ gfc_array_index_type,
+ gfc_index_one_node, tmp);
+ tmp = fold_build2_loc (input_location, MAX_EXPR,
+ gfc_array_index_type, tmp,
+ build_int_cst (gfc_array_index_type, 0));
/* We remember the size of the first section, and check all the
- others against this. */
+ others against this. */
if (size[n])
{
- tmp3 = fold_build2 (NE_EXPR, boolean_type_node, tmp, size[n]);
+ tmp3 = fold_build2_loc (input_location, NE_EXPR,
+ boolean_type_node, tmp, size[n]);
asprintf (&msg, "Array bound mismatch for dimension %d "
"of array '%s' (%%ld/%%ld)",
- info->dim[n]+1, ss->expr->symtree->name);
+ dim + 1, ss->expr->symtree->name);
gfc_trans_runtime_check (true, false, tmp3, &inner,
&ss->expr->where, msg,
if (gfc_symbols_could_alias (lsym, rsym))
return 1;
- if (rsym->ts.type != BT_DERIVED
- && lsym->ts.type != BT_DERIVED)
+ if (rsym->ts.type != BT_DERIVED && rsym->ts.type != BT_CLASS
+ && lsym->ts.type != BT_DERIVED && lsym->ts.type != BT_CLASS)
return 0;
/* For derived types we must check all the component types. We can ignore
gfc_ref *lref;
gfc_ref *rref;
int nDepend = 0;
+ int i, j;
loop->temp_ss = NULL;
lref = dest->expr->ref;
rref = ss->expr->ref;
- nDepend = gfc_dep_resolver (lref, rref);
+ nDepend = gfc_dep_resolver (lref, rref, &loop->reverse[0]);
+
if (nDepend == 1)
break;
+
+ for (i = 0; i < dest->data.info.dimen; i++)
+ for (j = 0; j < ss->data.info.dimen; j++)
+ if (i != j
+ && dest->data.info.dim[i] == ss->data.info.dim[j])
+ {
+ /* If we don't access array elements in the same order,
+ there is a dependency. */
+ nDepend = 1;
+ goto temporary;
+ }
#if 0
/* TODO : loop shifting. */
if (nDepend == 1)
}
}
+temporary:
+
if (nDepend == 1)
{
tree base_type = gfc_typenode_for_spec (&dest->expr->ts);
void
gfc_conv_loop_setup (gfc_loopinfo * loop, locus * where)
{
- int n;
+ int n, dim, spec_dim;
gfc_ss_info *info;
gfc_ss_info *specinfo;
gfc_ss *ss;
loopspec[n] = NULL;
dynamic[n] = false;
/* We use one SS term, and use that to determine the bounds of the
- loop for this dimension. We try to pick the simplest term. */
+ loop for this dimension. We try to pick the simplest term. */
for (ss = loop->ss; ss != gfc_ss_terminator; ss = ss->loop_chain)
{
+ if (ss->type == GFC_SS_SCALAR || ss->type == GFC_SS_REFERENCE)
+ continue;
+
+ info = &ss->data.info;
+ dim = info->dim[n];
+
+ if (loopspec[n] != NULL)
+ {
+ specinfo = &loopspec[n]->data.info;
+ spec_dim = specinfo->dim[n];
+ }
+ else
+ {
+ /* Silence unitialized warnings. */
+ specinfo = NULL;
+ spec_dim = 0;
+ }
+
if (ss->shape)
{
+ gcc_assert (ss->shape[dim]);
/* The frontend has worked out the size for us. */
- if (!loopspec[n] || !loopspec[n]->shape
- || !integer_zerop (loopspec[n]->data.info.start[n]))
+ if (!loopspec[n]
+ || !loopspec[n]->shape
+ || !integer_zerop (specinfo->start[spec_dim]))
/* Prefer zero-based descriptors if possible. */
loopspec[n] = ss;
continue;
/* TODO: Pick the best bound if we have a choice between a
function and something else. */
- if (ss->type == GFC_SS_FUNCTION)
- {
- loopspec[n] = ss;
- continue;
- }
+ if (ss->type == GFC_SS_FUNCTION)
+ {
+ loopspec[n] = ss;
+ continue;
+ }
- if (ss->type != GFC_SS_SECTION)
+ /* Avoid using an allocatable lhs in an assignment, since
+ there might be a reallocation coming. */
+ if (loopspec[n] && ss->is_alloc_lhs)
continue;
- if (loopspec[n])
- specinfo = &loopspec[n]->data.info;
- else
- specinfo = NULL;
- info = &ss->data.info;
+ if (ss->type != GFC_SS_SECTION)
+ continue;
- if (!specinfo)
+ if (!loopspec[n])
loopspec[n] = ss;
/* Criteria for choosing a loop specifier (most important first):
doesn't need realloc
*/
else if (loopspec[n]->type == GFC_SS_CONSTRUCTOR && dynamic[n])
loopspec[n] = ss;
- else if (integer_onep (info->stride[n])
- && !integer_onep (specinfo->stride[n]))
+ else if (integer_onep (info->stride[dim])
+ && !integer_onep (specinfo->stride[spec_dim]))
loopspec[n] = ss;
- else if (INTEGER_CST_P (info->stride[n])
- && !INTEGER_CST_P (specinfo->stride[n]))
+ else if (INTEGER_CST_P (info->stride[dim])
+ && !INTEGER_CST_P (specinfo->stride[spec_dim]))
loopspec[n] = ss;
- else if (INTEGER_CST_P (info->start[n])
- && !INTEGER_CST_P (specinfo->start[n]))
+ else if (INTEGER_CST_P (info->start[dim])
+ && !INTEGER_CST_P (specinfo->start[spec_dim]))
loopspec[n] = ss;
/* We don't work out the upper bound.
else if (INTEGER_CST_P (info->finish[n])
gcc_assert (loopspec[n]);
info = &loopspec[n]->data.info;
+ dim = info->dim[n];
/* Set the extents of this range. */
cshape = loopspec[n]->shape;
- if (cshape && INTEGER_CST_P (info->start[n])
- && INTEGER_CST_P (info->stride[n]))
+ if (cshape && INTEGER_CST_P (info->start[dim])
+ && INTEGER_CST_P (info->stride[dim]))
{
- loop->from[n] = info->start[n];
- mpz_set (i, cshape[n]);
+ loop->from[n] = info->start[dim];
+ mpz_set (i, cshape[get_array_ref_dim (info, n)]);
mpz_sub_ui (i, i, 1);
/* To = from + (size - 1) * stride. */
tmp = gfc_conv_mpz_to_tree (i, gfc_index_integer_kind);
- if (!integer_onep (info->stride[n]))
- tmp = fold_build2 (MULT_EXPR, gfc_array_index_type,
- tmp, info->stride[n]);
- loop->to[n] = fold_build2 (PLUS_EXPR, gfc_array_index_type,
- loop->from[n], tmp);
+ if (!integer_onep (info->stride[dim]))
+ tmp = fold_build2_loc (input_location, MULT_EXPR,
+ gfc_array_index_type, tmp,
+ info->stride[dim]);
+ loop->to[n] = fold_build2_loc (input_location, PLUS_EXPR,
+ gfc_array_index_type,
+ loop->from[n], tmp);
}
else
{
- loop->from[n] = info->start[n];
+ loop->from[n] = info->start[dim];
switch (loopspec[n]->type)
{
case GFC_SS_CONSTRUCTOR:
case GFC_SS_SECTION:
/* Use the end expression if it exists and is not constant,
so that it is only evaluated once. */
- if (info->end[n] && !INTEGER_CST_P (info->end[n]))
- loop->to[n] = info->end[n];
- else
- loop->to[n] = gfc_conv_section_upper_bound (loopspec[n], n,
- &loop->pre);
+ loop->to[n] = info->end[dim];
break;
- case GFC_SS_FUNCTION:
+ case GFC_SS_FUNCTION:
/* The loop bound will be set when we generate the call. */
- gcc_assert (loop->to[n] == NULL_TREE);
- break;
+ gcc_assert (loop->to[n] == NULL_TREE);
+ break;
default:
gcc_unreachable ();
}
/* Transform everything so we have a simple incrementing variable. */
- if (integer_onep (info->stride[n]))
- info->delta[n] = gfc_index_zero_node;
+ if (integer_onep (info->stride[dim]))
+ info->delta[dim] = gfc_index_zero_node;
else
{
/* Set the delta for this section. */
- info->delta[n] = gfc_evaluate_now (loop->from[n], &loop->pre);
+ info->delta[dim] = gfc_evaluate_now (loop->from[n], &loop->pre);
/* Number of iterations is (end - start + step) / step.
with start = 0, this simplifies to
last = end / step;
for (i = 0; i<=last; i++){...}; */
- tmp = fold_build2 (MINUS_EXPR, gfc_array_index_type,
- loop->to[n], loop->from[n]);
- tmp = fold_build2 (FLOOR_DIV_EXPR, gfc_array_index_type,
- tmp, info->stride[n]);
- tmp = fold_build2 (MAX_EXPR, gfc_array_index_type, tmp,
- build_int_cst (gfc_array_index_type, -1));
+ tmp = fold_build2_loc (input_location, MINUS_EXPR,
+ gfc_array_index_type, loop->to[n],
+ loop->from[n]);
+ tmp = fold_build2_loc (input_location, FLOOR_DIV_EXPR,
+ gfc_array_index_type, tmp, info->stride[dim]);
+ tmp = fold_build2_loc (input_location, MAX_EXPR, gfc_array_index_type,
+ tmp, build_int_cst (gfc_array_index_type, -1));
loop->to[n] = gfc_evaluate_now (tmp, &loop->pre);
/* Make the loop variable start at 0. */
loop->from[n] = gfc_index_zero_node;
memset (&loop->temp_ss->data.info, 0, sizeof (gfc_ss_info));
loop->temp_ss->type = GFC_SS_SECTION;
loop->temp_ss->data.info.dimen = n;
+
+ gcc_assert (loop->temp_ss->data.info.dimen != 0);
+ for (n = 0; n < loop->temp_ss->data.info.dimen; n++)
+ loop->temp_ss->data.info.dim[n] = n;
+
gfc_trans_create_temp_array (&loop->pre, &loop->post, loop,
&loop->temp_ss->data.info, tmp, NULL_TREE,
false, true, false, where);
/* If we are specifying the range the delta is already set. */
if (loopspec[n] != ss)
{
+ dim = ss->data.info.dim[n];
+
/* Calculate the offset relative to the loop variable.
- First multiply by the stride. */
+ First multiply by the stride. */
tmp = loop->from[n];
- if (!integer_onep (info->stride[n]))
- tmp = fold_build2 (MULT_EXPR, gfc_array_index_type,
- tmp, info->stride[n]);
+ if (!integer_onep (info->stride[dim]))
+ tmp = fold_build2_loc (input_location, MULT_EXPR,
+ gfc_array_index_type,
+ tmp, info->stride[dim]);
/* Then subtract this from our starting value. */
- tmp = fold_build2 (MINUS_EXPR, gfc_array_index_type,
- info->start[n], tmp);
+ tmp = fold_build2_loc (input_location, MINUS_EXPR,
+ gfc_array_index_type,
+ info->start[dim], tmp);
- info->delta[n] = gfc_evaluate_now (tmp, &loop->pre);
+ info->delta[dim] = gfc_evaluate_now (tmp, &loop->pre);
}
}
}
}
-/* Fills in an array descriptor, and returns the size of the array. The size
- will be a simple_val, ie a variable or a constant. Also calculates the
- offset of the base. Returns the size of the array.
+/* Calculate the size of a given array dimension from the bounds. This
+ is simply (ubound - lbound + 1) if this expression is positive
+ or 0 if it is negative (pick either one if it is zero). Optionally
+ (if or_expr is present) OR the (expression != 0) condition to it. */
+
+tree
+gfc_conv_array_extent_dim (tree lbound, tree ubound, tree* or_expr)
+{
+ tree res;
+ tree cond;
+
+ /* Calculate (ubound - lbound + 1). */
+ res = fold_build2_loc (input_location, MINUS_EXPR, gfc_array_index_type,
+ ubound, lbound);
+ res = fold_build2_loc (input_location, PLUS_EXPR, gfc_array_index_type, res,
+ gfc_index_one_node);
+
+ /* Check whether the size for this dimension is negative. */
+ cond = fold_build2_loc (input_location, LE_EXPR, boolean_type_node, res,
+ gfc_index_zero_node);
+ res = fold_build3_loc (input_location, COND_EXPR, gfc_array_index_type, cond,
+ gfc_index_zero_node, res);
+
+ /* Build OR expression. */
+ if (or_expr)
+ *or_expr = fold_build2_loc (input_location, TRUTH_OR_EXPR,
+ boolean_type_node, *or_expr, cond);
+
+ return res;
+}
+
+
+/* For an array descriptor, get the total number of elements. This is just
+ the product of the extents along all dimensions. */
+
+tree
+gfc_conv_descriptor_size (tree desc, int rank)
+{
+ tree res;
+ int dim;
+
+ res = gfc_index_one_node;
+
+ for (dim = 0; dim < rank; ++dim)
+ {
+ tree lbound;
+ tree ubound;
+ tree extent;
+
+ lbound = gfc_conv_descriptor_lbound_get (desc, gfc_rank_cst[dim]);
+ ubound = gfc_conv_descriptor_ubound_get (desc, gfc_rank_cst[dim]);
+
+ extent = gfc_conv_array_extent_dim (lbound, ubound, NULL);
+ res = fold_build2_loc (input_location, MULT_EXPR, gfc_array_index_type,
+ res, extent);
+ }
+
+ return res;
+}
+
+
+/* Helper function for marking a boolean expression tree as unlikely. */
+
+static tree
+gfc_unlikely (tree cond)
+{
+ tree tmp;
+
+ cond = fold_convert (long_integer_type_node, cond);
+ tmp = build_zero_cst (long_integer_type_node);
+ cond = build_call_expr_loc (input_location,
+ built_in_decls[BUILT_IN_EXPECT], 2, cond, tmp);
+ cond = fold_convert (boolean_type_node, cond);
+ return cond;
+}
+
+/* Fills in an array descriptor, and returns the size of the array.
+ The size will be a simple_val, ie a variable or a constant. Also
+ calculates the offset of the base. The pointer argument overflow,
+ which should be of integer type, will increase in value if overflow
+ occurs during the size calculation. Returns the size of the array.
{
stride = 1;
offset = 0;
for (n = 0; n < rank; n++)
{
- a.lbound[n] = specified_lower_bound;
- offset = offset + a.lbond[n] * stride;
- size = 1 - lbound;
- a.ubound[n] = specified_upper_bound;
- a.stride[n] = stride;
- size = siz >= 0 ? ubound + size : 0; //size = ubound + 1 - lbound
- stride = stride * size;
+ a.lbound[n] = specified_lower_bound;
+ offset = offset + a.lbond[n] * stride;
+ size = 1 - lbound;
+ a.ubound[n] = specified_upper_bound;
+ a.stride[n] = stride;
+ size = siz >= 0 ? ubound + size : 0; //size = ubound + 1 - lbound
+ overflow += size == 0 ? 0: (MAX/size < stride ? 1: 0);
+ stride = stride * size;
}
+ element_size = sizeof (array element);
+ stride = (size_t) stride;
+ overflow += element_size == 0 ? 0: (MAX/element_size < stride ? 1: 0);
+ stride = stride * element_size;
return (stride);
} */
/*GCC ARRAYS*/
static tree
gfc_array_init_size (tree descriptor, int rank, int corank, tree * poffset,
gfc_expr ** lower, gfc_expr ** upper,
- stmtblock_t * pblock)
+ stmtblock_t * pblock, tree * overflow)
{
tree type;
tree tmp;
tree size;
tree offset;
tree stride;
- tree cond;
+ tree element_size;
tree or_expr;
tree thencase;
tree elsecase;
tmp = gfc_conv_descriptor_dtype (descriptor);
gfc_add_modify (pblock, tmp, gfc_get_dtype (TREE_TYPE (descriptor)));
- or_expr = NULL_TREE;
+ or_expr = boolean_false_node;
for (n = 0; n < rank; n++)
{
+ tree conv_lbound;
+ tree conv_ubound;
+
/* We have 3 possibilities for determining the size of the array:
- lower == NULL => lbound = 1, ubound = upper[n]
- upper[n] = NULL => lbound = 1, ubound = lower[n]
- upper[n] != NULL => lbound = lower[n], ubound = upper[n] */
+ lower == NULL => lbound = 1, ubound = upper[n]
+ upper[n] = NULL => lbound = 1, ubound = lower[n]
+ upper[n] != NULL => lbound = lower[n], ubound = upper[n] */
ubound = upper[n];
/* Set lower bound. */
else
{
gcc_assert (lower[n]);
- if (ubound)
- {
+ if (ubound)
+ {
gfc_conv_expr_type (&se, lower[n], gfc_array_index_type);
gfc_add_block_to_block (pblock, &se.pre);
- }
- else
- {
- se.expr = gfc_index_one_node;
- ubound = lower[n];
- }
+ }
+ else
+ {
+ se.expr = gfc_index_one_node;
+ ubound = lower[n];
+ }
}
gfc_conv_descriptor_lbound_set (pblock, descriptor, gfc_rank_cst[n],
se.expr);
+ conv_lbound = se.expr;
/* Work out the offset for this component. */
- tmp = fold_build2 (MULT_EXPR, gfc_array_index_type, se.expr, stride);
- offset = fold_build2 (MINUS_EXPR, gfc_array_index_type, offset, tmp);
-
- /* Start the calculation for the size of this dimension. */
- size = fold_build2 (MINUS_EXPR, gfc_array_index_type,
- gfc_index_one_node, se.expr);
+ tmp = fold_build2_loc (input_location, MULT_EXPR, gfc_array_index_type,
+ se.expr, stride);
+ offset = fold_build2_loc (input_location, MINUS_EXPR,
+ gfc_array_index_type, offset, tmp);
/* Set upper bound. */
gfc_init_se (&se, NULL);
gfc_conv_expr_type (&se, ubound, gfc_array_index_type);
gfc_add_block_to_block (pblock, &se.pre);
- gfc_conv_descriptor_ubound_set (pblock, descriptor, gfc_rank_cst[n], se.expr);
+ gfc_conv_descriptor_ubound_set (pblock, descriptor,
+ gfc_rank_cst[n], se.expr);
+ conv_ubound = se.expr;
/* Store the stride. */
- gfc_conv_descriptor_stride_set (pblock, descriptor, gfc_rank_cst[n], stride);
-
- /* Calculate the size of this dimension. */
- size = fold_build2 (PLUS_EXPR, gfc_array_index_type, se.expr, size);
-
- /* Check whether the size for this dimension is negative. */
- cond = fold_build2 (LE_EXPR, boolean_type_node, size,
- gfc_index_zero_node);
- if (n == 0)
- or_expr = cond;
- else
- or_expr = fold_build2 (TRUTH_OR_EXPR, boolean_type_node, or_expr, cond);
-
- size = fold_build3 (COND_EXPR, gfc_array_index_type, cond,
- gfc_index_zero_node, size);
-
+ gfc_conv_descriptor_stride_set (pblock, descriptor,
+ gfc_rank_cst[n], stride);
+
+ /* Calculate size and check whether extent is negative. */
+ size = gfc_conv_array_extent_dim (conv_lbound, conv_ubound, &or_expr);
+ size = gfc_evaluate_now (size, pblock);
+
+ /* Check whether multiplying the stride by the number of
+ elements in this dimension would overflow. We must also check
+ whether the current dimension has zero size in order to avoid
+ division by zero.
+ */
+ tmp = fold_build2_loc (input_location, TRUNC_DIV_EXPR,
+ gfc_array_index_type,
+ fold_convert (gfc_array_index_type,
+ TYPE_MAX_VALUE (gfc_array_index_type)),
+ size);
+ tmp = fold_build3_loc
+ (input_location, COND_EXPR, integer_type_node,
+ gfc_unlikely (fold_build2_loc (input_location, LT_EXPR,
+ boolean_type_node, tmp, stride)),
+ integer_one_node, integer_zero_node);
+ tmp = fold_build3_loc
+ (input_location, COND_EXPR, integer_type_node,
+ gfc_unlikely (fold_build2_loc (input_location, EQ_EXPR,
+ boolean_type_node, size,
+ build_zero_cst (gfc_array_index_type))),
+ integer_zero_node, tmp);
+ tmp = fold_build2_loc (input_location, PLUS_EXPR, integer_type_node,
+ *overflow, tmp);
+ *overflow = gfc_evaluate_now (tmp, pblock);
+
/* Multiply the stride by the number of elements in this dimension. */
- stride = fold_build2 (MULT_EXPR, gfc_array_index_type, stride, size);
+ stride = fold_build2_loc (input_location, MULT_EXPR,
+ gfc_array_index_type, stride, size);
stride = gfc_evaluate_now (stride, pblock);
}
}
else
{
- if (ubound || n == rank + corank - 1)
- {
+ if (ubound || n == rank + corank - 1)
+ {
gfc_conv_expr_type (&se, lower[n], gfc_array_index_type);
gfc_add_block_to_block (pblock, &se.pre);
- }
- else
- {
- se.expr = gfc_index_one_node;
- ubound = lower[n];
- }
+ }
+ else
+ {
+ se.expr = gfc_index_one_node;
+ ubound = lower[n];
+ }
}
gfc_conv_descriptor_lbound_set (pblock, descriptor, gfc_rank_cst[n],
se.expr);
gcc_assert (ubound);
gfc_conv_expr_type (&se, ubound, gfc_array_index_type);
gfc_add_block_to_block (pblock, &se.pre);
- gfc_conv_descriptor_ubound_set (pblock, descriptor, gfc_rank_cst[n], se.expr);
+ gfc_conv_descriptor_ubound_set (pblock, descriptor,
+ gfc_rank_cst[n], se.expr);
}
}
/* The stride is the number of elements in the array, so multiply by the
size of an element to get the total size. */
tmp = TYPE_SIZE_UNIT (gfc_get_element_type (type));
- size = fold_build2 (MULT_EXPR, gfc_array_index_type, stride,
- fold_convert (gfc_array_index_type, tmp));
+ /* Convert to size_t. */
+ element_size = fold_convert (sizetype, tmp);
+ stride = fold_convert (sizetype, stride);
+
+ /* First check for overflow. Since an array of type character can
+ have zero element_size, we must check for that before
+ dividing. */
+ tmp = fold_build2_loc (input_location, TRUNC_DIV_EXPR,
+ sizetype,
+ TYPE_MAX_VALUE (sizetype), element_size);
+ tmp = fold_build3_loc (input_location, COND_EXPR, integer_type_node,
+ gfc_unlikely (fold_build2_loc (input_location, LT_EXPR,
+ boolean_type_node, tmp,
+ stride)),
+ integer_one_node, integer_zero_node);
+ tmp = fold_build3_loc (input_location, COND_EXPR, integer_type_node,
+ gfc_unlikely (fold_build2_loc (input_location, EQ_EXPR,
+ boolean_type_node,
+ element_size,
+ size_zero_node)),
+ integer_zero_node, tmp);
+ tmp = fold_build2_loc (input_location, PLUS_EXPR, integer_type_node,
+ *overflow, tmp);
+ *overflow = gfc_evaluate_now (tmp, pblock);
+
+ size = fold_build2_loc (input_location, MULT_EXPR, sizetype,
+ stride, element_size);
if (poffset != NULL)
{
var = gfc_create_var (TREE_TYPE (size), "size");
gfc_start_block (&thenblock);
- gfc_add_modify (&thenblock, var, gfc_index_zero_node);
+ gfc_add_modify (&thenblock, var, size_zero_node);
thencase = gfc_finish_block (&thenblock);
gfc_start_block (&elseblock);
tree pointer;
tree offset;
tree size;
+ tree msg;
+ tree error;
+ tree overflow; /* Boolean storing whether size calculation overflows. */
+ tree var_overflow;
+ tree cond;
+ stmtblock_t elseblock;
gfc_expr **lower;
gfc_expr **upper;
gfc_ref *ref, *prev_ref = NULL;
break;
}
+ overflow = integer_zero_node;
size = gfc_array_init_size (se->expr, ref->u.ar.as->rank,
ref->u.ar.as->corank, &offset, lower, upper,
- &se->pre);
+ &se->pre, &overflow);
+
+ var_overflow = gfc_create_var (integer_type_node, "overflow");
+ gfc_add_modify (&se->pre, var_overflow, overflow);
+
+ /* Generate the block of code handling overflow. */
+ msg = gfc_build_addr_expr (pchar_type_node, gfc_build_localized_cstring_const
+ ("Integer overflow when calculating the amount of "
+ "memory to allocate"));
+ error = build_call_expr_loc (input_location,
+ gfor_fndecl_runtime_error, 1, msg);
+
+ if (pstat != NULL_TREE && !integer_zerop (pstat))
+ {
+ /* Set the status variable if it's present. */
+ stmtblock_t set_status_block;
+ tree status_type = pstat ? TREE_TYPE (TREE_TYPE (pstat)) : NULL_TREE;
+ gfc_start_block (&set_status_block);
+ gfc_add_modify (&set_status_block,
+ fold_build1_loc (input_location, INDIRECT_REF,
+ status_type, pstat),
+ build_int_cst (status_type, LIBERROR_ALLOCATION));
+
+ tmp = fold_build2_loc (input_location, EQ_EXPR, boolean_type_node,
+ pstat, build_int_cst (TREE_TYPE (pstat), 0));
+ error = fold_build3_loc (input_location, COND_EXPR, void_type_node, tmp,
+ error, gfc_finish_block (&set_status_block));
+ }
+
+ gfc_start_block (&elseblock);
+
/* Allocate memory to store the data. */
pointer = gfc_conv_descriptor_data_get (se->expr);
STRIP_NOPS (pointer);
/* The allocate_array variants take the old pointer as first argument. */
if (allocatable_array)
- tmp = gfc_allocate_array_with_status (&se->pre, pointer, size, pstat, expr);
+ tmp = gfc_allocate_array_with_status (&elseblock, pointer, size, pstat, expr);
else
- tmp = gfc_allocate_with_status (&se->pre, size, pstat);
- tmp = fold_build2 (MODIFY_EXPR, void_type_node, pointer, tmp);
+ tmp = gfc_allocate_with_status (&elseblock, size, pstat);
+ tmp = fold_build2_loc (input_location, MODIFY_EXPR, void_type_node, pointer,
+ tmp);
+
+ gfc_add_expr_to_block (&elseblock, tmp);
+
+ cond = gfc_unlikely (fold_build2_loc (input_location, NE_EXPR, boolean_type_node,
+ var_overflow, integer_zero_node));
+ tmp = fold_build3_loc (input_location, COND_EXPR, void_type_node, cond,
+ error, gfc_finish_block (&elseblock));
+
gfc_add_expr_to_block (&se->pre, tmp);
gfc_conv_descriptor_offset_set (&se->pre, se->expr, offset);
- if (expr->ts.type == BT_DERIVED
+ if ((expr->ts.type == BT_DERIVED || expr->ts.type == BT_CLASS)
&& expr->ts.u.derived->attr.alloc_comp)
{
tmp = gfc_nullify_alloc_comp (expr->ts.u.derived, se->expr,
gfc_add_expr_to_block (&block, tmp);
/* Zero the data pointer. */
- tmp = fold_build2 (MODIFY_EXPR, void_type_node,
- var, build_int_cst (TREE_TYPE (var), 0));
+ tmp = fold_build2_loc (input_location, MODIFY_EXPR, void_type_node,
+ var, build_int_cst (TREE_TYPE (var), 0));
gfc_add_expr_to_block (&block, tmp);
return gfc_finish_block (&block);
gfc_add_modify (pblock, ubound, se.expr);
}
/* The offset of this dimension. offset = offset - lbound * stride. */
- tmp = fold_build2 (MULT_EXPR, gfc_array_index_type, lbound, size);
- offset = fold_build2 (MINUS_EXPR, gfc_array_index_type, offset, tmp);
+ tmp = fold_build2_loc (input_location, MULT_EXPR, gfc_array_index_type,
+ lbound, size);
+ offset = fold_build2_loc (input_location, MINUS_EXPR, gfc_array_index_type,
+ offset, tmp);
/* The size of this dimension, and the stride of the next. */
if (dim + 1 < as->rank)
if (ubound != NULL_TREE && !(stride && INTEGER_CST_P (stride)))
{
/* Calculate stride = size * (ubound + 1 - lbound). */
- tmp = fold_build2 (MINUS_EXPR, gfc_array_index_type,
- gfc_index_one_node, lbound);
- tmp = fold_build2 (PLUS_EXPR, gfc_array_index_type, ubound, tmp);
- tmp = fold_build2 (MULT_EXPR, gfc_array_index_type, size, tmp);
+ tmp = fold_build2_loc (input_location, MINUS_EXPR,
+ gfc_array_index_type,
+ gfc_index_one_node, lbound);
+ tmp = fold_build2_loc (input_location, PLUS_EXPR,
+ gfc_array_index_type, ubound, tmp);
+ tmp = fold_build2_loc (input_location, MULT_EXPR,
+ gfc_array_index_type, size, tmp);
if (stride)
gfc_add_modify (pblock, stride, tmp);
else
/* Make sure that negative size arrays are translated
to being zero size. */
- tmp = fold_build2 (GE_EXPR, boolean_type_node,
- stride, gfc_index_zero_node);
- tmp = fold_build3 (COND_EXPR, gfc_array_index_type, tmp,
- stride, gfc_index_zero_node);
+ tmp = fold_build2_loc (input_location, GE_EXPR, boolean_type_node,
+ stride, gfc_index_zero_node);
+ tmp = fold_build3_loc (input_location, COND_EXPR,
+ gfc_array_index_type, tmp,
+ stride, gfc_index_zero_node);
gfc_add_modify (pblock, stride, tmp);
}
/* Generate code to initialize/allocate an array variable. */
-tree
-gfc_trans_auto_array_allocation (tree decl, gfc_symbol * sym, tree fnbody)
+void
+gfc_trans_auto_array_allocation (tree decl, gfc_symbol * sym,
+ gfc_wrapped_block * block)
{
- stmtblock_t block;
+ stmtblock_t init;
tree type;
tree tmp;
tree size;
/* Do nothing for USEd variables. */
if (sym->attr.use_assoc)
- return fnbody;
+ return;
type = TREE_TYPE (decl);
gcc_assert (GFC_ARRAY_TYPE_P (type));
onstack = TREE_CODE (type) != POINTER_TYPE;
- gfc_start_block (&block);
+ gfc_start_block (&init);
/* Evaluate character string length. */
if (sym->ts.type == BT_CHARACTER
&& onstack && !INTEGER_CST_P (sym->ts.u.cl->backend_decl))
{
- gfc_conv_string_length (sym->ts.u.cl, NULL, &block);
+ gfc_conv_string_length (sym->ts.u.cl, NULL, &init);
- gfc_trans_vla_type_sizes (sym, &block);
+ gfc_trans_vla_type_sizes (sym, &init);
/* Emit a DECL_EXPR for this variable, which will cause the
gimplifier to allocate storage, and all that good stuff. */
- tmp = fold_build1 (DECL_EXPR, TREE_TYPE (decl), decl);
- gfc_add_expr_to_block (&block, tmp);
+ tmp = fold_build1_loc (input_location, DECL_EXPR, TREE_TYPE (decl), decl);
+ gfc_add_expr_to_block (&init, tmp);
}
if (onstack)
{
- gfc_add_expr_to_block (&block, fnbody);
- return gfc_finish_block (&block);
+ gfc_add_init_cleanup (block, gfc_finish_block (&init), NULL_TREE);
+ return;
}
type = TREE_TYPE (type);
if (sym->ts.type == BT_CHARACTER
&& !INTEGER_CST_P (sym->ts.u.cl->backend_decl))
- gfc_conv_string_length (sym->ts.u.cl, NULL, &block);
+ gfc_conv_string_length (sym->ts.u.cl, NULL, &init);
- size = gfc_trans_array_bounds (type, sym, &offset, &block);
+ size = gfc_trans_array_bounds (type, sym, &offset, &init);
/* Don't actually allocate space for Cray Pointees. */
if (sym->attr.cray_pointee)
{
if (TREE_CODE (GFC_TYPE_ARRAY_OFFSET (type)) == VAR_DECL)
- gfc_add_modify (&block, GFC_TYPE_ARRAY_OFFSET (type), offset);
- gfc_add_expr_to_block (&block, fnbody);
- return gfc_finish_block (&block);
+ gfc_add_modify (&init, GFC_TYPE_ARRAY_OFFSET (type), offset);
+
+ gfc_add_init_cleanup (block, gfc_finish_block (&init), NULL_TREE);
+ return;
}
/* The size is the number of elements in the array, so multiply by the
size of an element to get the total size. */
tmp = TYPE_SIZE_UNIT (gfc_get_element_type (type));
- size = fold_build2 (MULT_EXPR, gfc_array_index_type, size,
- fold_convert (gfc_array_index_type, tmp));
+ size = fold_build2_loc (input_location, MULT_EXPR, gfc_array_index_type,
+ size, fold_convert (gfc_array_index_type, tmp));
/* Allocate memory to hold the data. */
- tmp = gfc_call_malloc (&block, TREE_TYPE (decl), size);
- gfc_add_modify (&block, decl, tmp);
+ tmp = gfc_call_malloc (&init, TREE_TYPE (decl), size);
+ gfc_add_modify (&init, decl, tmp);
/* Set offset of the array. */
if (TREE_CODE (GFC_TYPE_ARRAY_OFFSET (type)) == VAR_DECL)
- gfc_add_modify (&block, GFC_TYPE_ARRAY_OFFSET (type), offset);
-
+ gfc_add_modify (&init, GFC_TYPE_ARRAY_OFFSET (type), offset);
/* Automatic arrays should not have initializers. */
gcc_assert (!sym->value);
- gfc_add_expr_to_block (&block, fnbody);
-
/* Free the temporary. */
tmp = gfc_call_free (convert (pvoid_type_node, decl));
- gfc_add_expr_to_block (&block, tmp);
- return gfc_finish_block (&block);
+ gfc_add_init_cleanup (block, gfc_finish_block (&init), tmp);
}
/* Generate entry and exit code for g77 calling convention arrays. */
-tree
-gfc_trans_g77_array (gfc_symbol * sym, tree body)
+void
+gfc_trans_g77_array (gfc_symbol * sym, gfc_wrapped_block * block)
{
tree parm;
tree type;
locus loc;
tree offset;
tree tmp;
- tree stmt;
- stmtblock_t block;
+ tree stmt;
+ stmtblock_t init;
- gfc_get_backend_locus (&loc);
+ gfc_save_backend_locus (&loc);
gfc_set_backend_locus (&sym->declared_at);
/* Descriptor type. */
type = TREE_TYPE (parm);
gcc_assert (GFC_ARRAY_TYPE_P (type));
- gfc_start_block (&block);
+ gfc_start_block (&init);
if (sym->ts.type == BT_CHARACTER
&& TREE_CODE (sym->ts.u.cl->backend_decl) == VAR_DECL)
- gfc_conv_string_length (sym->ts.u.cl, NULL, &block);
+ gfc_conv_string_length (sym->ts.u.cl, NULL, &init);
/* Evaluate the bounds of the array. */
- gfc_trans_array_bounds (type, sym, &offset, &block);
+ gfc_trans_array_bounds (type, sym, &offset, &init);
/* Set the offset. */
if (TREE_CODE (GFC_TYPE_ARRAY_OFFSET (type)) == VAR_DECL)
- gfc_add_modify (&block, GFC_TYPE_ARRAY_OFFSET (type), offset);
+ gfc_add_modify (&init, GFC_TYPE_ARRAY_OFFSET (type), offset);
/* Set the pointer itself if we aren't using the parameter directly. */
if (TREE_CODE (parm) != PARM_DECL)
{
tmp = convert (TREE_TYPE (parm), GFC_DECL_SAVED_DESCRIPTOR (parm));
- gfc_add_modify (&block, parm, tmp);
+ gfc_add_modify (&init, parm, tmp);
}
- stmt = gfc_finish_block (&block);
+ stmt = gfc_finish_block (&init);
- gfc_set_backend_locus (&loc);
-
- gfc_start_block (&block);
+ gfc_restore_backend_locus (&loc);
/* Add the initialization code to the start of the function. */
stmt = build3_v (COND_EXPR, tmp, stmt, build_empty_stmt (input_location));
}
- gfc_add_expr_to_block (&block, stmt);
- gfc_add_expr_to_block (&block, body);
-
- return gfc_finish_block (&block);
+ gfc_add_init_cleanup (block, stmt, NULL_TREE);
}
Code is also added to copy the data back at the end of the function.
*/
-tree
-gfc_trans_dummy_array_bias (gfc_symbol * sym, tree tmpdesc, tree body)
+void
+gfc_trans_dummy_array_bias (gfc_symbol * sym, tree tmpdesc,
+ gfc_wrapped_block * block)
{
tree size;
tree type;
tree offset;
locus loc;
- stmtblock_t block;
- stmtblock_t cleanup;
+ stmtblock_t init;
+ tree stmtInit, stmtCleanup;
tree lbound;
tree ubound;
tree dubound;
tree dlbound;
tree dumdesc;
tree tmp;
- tree stmt;
tree stride, stride2;
tree stmt_packed;
tree stmt_unpacked;
/* Do nothing for pointer and allocatable arrays. */
if (sym->attr.pointer || sym->attr.allocatable)
- return body;
+ return;
if (sym->attr.dummy && gfc_is_nodesc_array (sym))
- return gfc_trans_g77_array (sym, body);
+ {
+ gfc_trans_g77_array (sym, block);
+ return;
+ }
- gfc_get_backend_locus (&loc);
+ gfc_save_backend_locus (&loc);
gfc_set_backend_locus (&sym->declared_at);
/* Descriptor type. */
type = TREE_TYPE (tmpdesc);
gcc_assert (GFC_ARRAY_TYPE_P (type));
dumdesc = GFC_DECL_SAVED_DESCRIPTOR (tmpdesc);
- dumdesc = build_fold_indirect_ref_loc (input_location,
- dumdesc);
- gfc_start_block (&block);
+ dumdesc = build_fold_indirect_ref_loc (input_location, dumdesc);
+ gfc_start_block (&init);
if (sym->ts.type == BT_CHARACTER
&& TREE_CODE (sym->ts.u.cl->backend_decl) == VAR_DECL)
- gfc_conv_string_length (sym->ts.u.cl, NULL, &block);
+ gfc_conv_string_length (sym->ts.u.cl, NULL, &init);
checkparm = (sym->as->type == AS_EXPLICIT
&& (gfc_option.rtcheck & GFC_RTCHECK_BOUNDS));
no_repack = !(GFC_DECL_PACKED_ARRAY (tmpdesc)
- || GFC_DECL_PARTIAL_PACKED_ARRAY (tmpdesc));
+ || GFC_DECL_PARTIAL_PACKED_ARRAY (tmpdesc));
if (GFC_DECL_PARTIAL_PACKED_ARRAY (tmpdesc))
{
/* For non-constant shape arrays we only check if the first dimension
- is contiguous. Repacking higher dimensions wouldn't gain us
- anything as we still don't know the array stride. */
+ is contiguous. Repacking higher dimensions wouldn't gain us
+ anything as we still don't know the array stride. */
partial = gfc_create_var (boolean_type_node, "partial");
TREE_USED (partial) = 1;
tmp = gfc_conv_descriptor_stride_get (dumdesc, gfc_rank_cst[0]);
- tmp = fold_build2 (EQ_EXPR, boolean_type_node, tmp, gfc_index_one_node);
- gfc_add_modify (&block, partial, tmp);
+ tmp = fold_build2_loc (input_location, EQ_EXPR, boolean_type_node, tmp,
+ gfc_index_one_node);
+ gfc_add_modify (&init, partial, tmp);
}
else
- {
- partial = NULL_TREE;
- }
+ partial = NULL_TREE;
/* The naming of stmt_unpacked and stmt_packed may be counter-intuitive
here, however I think it does the right thing. */
{
/* Set the first stride. */
stride = gfc_conv_descriptor_stride_get (dumdesc, gfc_rank_cst[0]);
- stride = gfc_evaluate_now (stride, &block);
+ stride = gfc_evaluate_now (stride, &init);
- tmp = fold_build2 (EQ_EXPR, boolean_type_node,
- stride, gfc_index_zero_node);
- tmp = fold_build3 (COND_EXPR, gfc_array_index_type, tmp,
- gfc_index_one_node, stride);
+ tmp = fold_build2_loc (input_location, EQ_EXPR, boolean_type_node,
+ stride, gfc_index_zero_node);
+ tmp = fold_build3_loc (input_location, COND_EXPR, gfc_array_index_type,
+ tmp, gfc_index_one_node, stride);
stride = GFC_TYPE_ARRAY_STRIDE (type, 0);
- gfc_add_modify (&block, stride, tmp);
+ gfc_add_modify (&init, stride, tmp);
/* Allow the user to disable array repacking. */
stmt_unpacked = NULL_TREE;
if (stmt_packed != NULL_TREE && stmt_unpacked != NULL_TREE)
{
/* Don't repack unknown shape arrays when the first stride is 1. */
- tmp = fold_build3 (COND_EXPR, TREE_TYPE (stmt_packed),
- partial, stmt_packed, stmt_unpacked);
+ tmp = fold_build3_loc (input_location, COND_EXPR, TREE_TYPE (stmt_packed),
+ partial, stmt_packed, stmt_unpacked);
}
else
tmp = stmt_packed != NULL_TREE ? stmt_packed : stmt_unpacked;
- gfc_add_modify (&block, tmpdesc, fold_convert (type, tmp));
+ gfc_add_modify (&init, tmpdesc, fold_convert (type, tmp));
offset = gfc_index_zero_node;
size = gfc_index_one_node;
dlbound = gfc_conv_descriptor_lbound_get (dumdesc, gfc_rank_cst[n]);
}
else
- {
+ {
dubound = NULL_TREE;
dlbound = NULL_TREE;
- }
+ }
lbound = GFC_TYPE_ARRAY_LBOUND (type, n);
if (!INTEGER_CST_P (lbound))
- {
- gfc_init_se (&se, NULL);
- gfc_conv_expr_type (&se, sym->as->lower[n],
- gfc_array_index_type);
- gfc_add_block_to_block (&block, &se.pre);
- gfc_add_modify (&block, lbound, se.expr);
- }
+ {
+ gfc_init_se (&se, NULL);
+ gfc_conv_expr_type (&se, sym->as->lower[n],
+ gfc_array_index_type);
+ gfc_add_block_to_block (&init, &se.pre);
+ gfc_add_modify (&init, lbound, se.expr);
+ }
ubound = GFC_TYPE_ARRAY_UBOUND (type, n);
/* Set the desired upper bound. */
if (sym->as->upper[n])
{
/* We know what we want the upper bound to be. */
- if (!INTEGER_CST_P (ubound))
- {
+ if (!INTEGER_CST_P (ubound))
+ {
gfc_init_se (&se, NULL);
gfc_conv_expr_type (&se, sym->as->upper[n],
- gfc_array_index_type);
- gfc_add_block_to_block (&block, &se.pre);
- gfc_add_modify (&block, ubound, se.expr);
- }
+ gfc_array_index_type);
+ gfc_add_block_to_block (&init, &se.pre);
+ gfc_add_modify (&init, ubound, se.expr);
+ }
/* Check the sizes match. */
if (checkparm)
char * msg;
tree temp;
- temp = fold_build2 (MINUS_EXPR, gfc_array_index_type,
- ubound, lbound);
- temp = fold_build2 (PLUS_EXPR, gfc_array_index_type,
- gfc_index_one_node, temp);
-
- stride2 = fold_build2 (MINUS_EXPR, gfc_array_index_type,
- dubound, dlbound);
- stride2 = fold_build2 (PLUS_EXPR, gfc_array_index_type,
- gfc_index_one_node, stride2);
-
- tmp = fold_build2 (NE_EXPR, gfc_array_index_type, temp, stride2);
+ temp = fold_build2_loc (input_location, MINUS_EXPR,
+ gfc_array_index_type, ubound, lbound);
+ temp = fold_build2_loc (input_location, PLUS_EXPR,
+ gfc_array_index_type,
+ gfc_index_one_node, temp);
+ stride2 = fold_build2_loc (input_location, MINUS_EXPR,
+ gfc_array_index_type, dubound,
+ dlbound);
+ stride2 = fold_build2_loc (input_location, PLUS_EXPR,
+ gfc_array_index_type,
+ gfc_index_one_node, stride2);
+ tmp = fold_build2_loc (input_location, NE_EXPR,
+ gfc_array_index_type, temp, stride2);
asprintf (&msg, "Dimension %d of array '%s' has extent "
- "%%ld instead of %%ld", n+1, sym->name);
+ "%%ld instead of %%ld", n+1, sym->name);
- gfc_trans_runtime_check (true, false, tmp, &block, &loc, msg,
+ gfc_trans_runtime_check (true, false, tmp, &init, &loc, msg,
fold_convert (long_integer_type_node, temp),
fold_convert (long_integer_type_node, stride2));
{
/* For assumed shape arrays move the upper bound by the same amount
as the lower bound. */
- tmp = fold_build2 (MINUS_EXPR, gfc_array_index_type,
- dubound, dlbound);
- tmp = fold_build2 (PLUS_EXPR, gfc_array_index_type, tmp, lbound);
- gfc_add_modify (&block, ubound, tmp);
+ tmp = fold_build2_loc (input_location, MINUS_EXPR,
+ gfc_array_index_type, dubound, dlbound);
+ tmp = fold_build2_loc (input_location, PLUS_EXPR,
+ gfc_array_index_type, tmp, lbound);
+ gfc_add_modify (&init, ubound, tmp);
}
/* The offset of this dimension. offset = offset - lbound * stride. */
- tmp = fold_build2 (MULT_EXPR, gfc_array_index_type, lbound, stride);
- offset = fold_build2 (MINUS_EXPR, gfc_array_index_type, offset, tmp);
+ tmp = fold_build2_loc (input_location, MULT_EXPR, gfc_array_index_type,
+ lbound, stride);
+ offset = fold_build2_loc (input_location, MINUS_EXPR,
+ gfc_array_index_type, offset, tmp);
/* The size of this dimension, and the stride of the next. */
if (n + 1 < sym->as->rank)
- {
- stride = GFC_TYPE_ARRAY_STRIDE (type, n + 1);
+ {
+ stride = GFC_TYPE_ARRAY_STRIDE (type, n + 1);
- if (no_repack || partial != NULL_TREE)
- {
- stmt_unpacked =
- gfc_conv_descriptor_stride_get (dumdesc, gfc_rank_cst[n+1]);
- }
+ if (no_repack || partial != NULL_TREE)
+ stmt_unpacked =
+ gfc_conv_descriptor_stride_get (dumdesc, gfc_rank_cst[n+1]);
- /* Figure out the stride if not a known constant. */
- if (!INTEGER_CST_P (stride))
- {
- if (no_repack)
- stmt_packed = NULL_TREE;
- else
- {
- /* Calculate stride = size * (ubound + 1 - lbound). */
- tmp = fold_build2 (MINUS_EXPR, gfc_array_index_type,
- gfc_index_one_node, lbound);
- tmp = fold_build2 (PLUS_EXPR, gfc_array_index_type,
- ubound, tmp);
- size = fold_build2 (MULT_EXPR, gfc_array_index_type,
- size, tmp);
- stmt_packed = size;
- }
-
- /* Assign the stride. */
- if (stmt_packed != NULL_TREE && stmt_unpacked != NULL_TREE)
- tmp = fold_build3 (COND_EXPR, gfc_array_index_type, partial,
- stmt_unpacked, stmt_packed);
- else
- tmp = (stmt_packed != NULL_TREE) ? stmt_packed : stmt_unpacked;
- gfc_add_modify (&block, stride, tmp);
- }
- }
+ /* Figure out the stride if not a known constant. */
+ if (!INTEGER_CST_P (stride))
+ {
+ if (no_repack)
+ stmt_packed = NULL_TREE;
+ else
+ {
+ /* Calculate stride = size * (ubound + 1 - lbound). */
+ tmp = fold_build2_loc (input_location, MINUS_EXPR,
+ gfc_array_index_type,
+ gfc_index_one_node, lbound);
+ tmp = fold_build2_loc (input_location, PLUS_EXPR,
+ gfc_array_index_type, ubound, tmp);
+ size = fold_build2_loc (input_location, MULT_EXPR,
+ gfc_array_index_type, size, tmp);
+ stmt_packed = size;
+ }
+
+ /* Assign the stride. */
+ if (stmt_packed != NULL_TREE && stmt_unpacked != NULL_TREE)
+ tmp = fold_build3_loc (input_location, COND_EXPR,
+ gfc_array_index_type, partial,
+ stmt_unpacked, stmt_packed);
+ else
+ tmp = (stmt_packed != NULL_TREE) ? stmt_packed : stmt_unpacked;
+ gfc_add_modify (&init, stride, tmp);
+ }
+ }
else
{
stride = GFC_TYPE_ARRAY_SIZE (type);
if (stride && !INTEGER_CST_P (stride))
{
/* Calculate size = stride * (ubound + 1 - lbound). */
- tmp = fold_build2 (MINUS_EXPR, gfc_array_index_type,
- gfc_index_one_node, lbound);
- tmp = fold_build2 (PLUS_EXPR, gfc_array_index_type,
- ubound, tmp);
- tmp = fold_build2 (MULT_EXPR, gfc_array_index_type,
- GFC_TYPE_ARRAY_STRIDE (type, n), tmp);
- gfc_add_modify (&block, stride, tmp);
+ tmp = fold_build2_loc (input_location, MINUS_EXPR,
+ gfc_array_index_type,
+ gfc_index_one_node, lbound);
+ tmp = fold_build2_loc (input_location, PLUS_EXPR,
+ gfc_array_index_type,
+ ubound, tmp);
+ tmp = fold_build2_loc (input_location, MULT_EXPR,
+ gfc_array_index_type,
+ GFC_TYPE_ARRAY_STRIDE (type, n), tmp);
+ gfc_add_modify (&init, stride, tmp);
}
}
}
/* Set the offset. */
if (TREE_CODE (GFC_TYPE_ARRAY_OFFSET (type)) == VAR_DECL)
- gfc_add_modify (&block, GFC_TYPE_ARRAY_OFFSET (type), offset);
+ gfc_add_modify (&init, GFC_TYPE_ARRAY_OFFSET (type), offset);
- gfc_trans_vla_type_sizes (sym, &block);
+ gfc_trans_vla_type_sizes (sym, &init);
- stmt = gfc_finish_block (&block);
-
- gfc_start_block (&block);
+ stmtInit = gfc_finish_block (&init);
/* Only do the entry/initialization code if the arg is present. */
dumdesc = GFC_DECL_SAVED_DESCRIPTOR (tmpdesc);
if (optional_arg)
{
tmp = gfc_conv_expr_present (sym);
- stmt = build3_v (COND_EXPR, tmp, stmt, build_empty_stmt (input_location));
+ stmtInit = build3_v (COND_EXPR, tmp, stmtInit,
+ build_empty_stmt (input_location));
}
- gfc_add_expr_to_block (&block, stmt);
-
- /* Add the main function body. */
- gfc_add_expr_to_block (&block, body);
/* Cleanup code. */
- if (!no_repack)
+ if (no_repack)
+ stmtCleanup = NULL_TREE;
+ else
{
+ stmtblock_t cleanup;
gfc_start_block (&cleanup);
-
+
if (sym->attr.intent != INTENT_IN)
{
/* Copy the data back. */
tmp = gfc_call_free (tmpdesc);
gfc_add_expr_to_block (&cleanup, tmp);
- stmt = gfc_finish_block (&cleanup);
+ stmtCleanup = gfc_finish_block (&cleanup);
/* Only do the cleanup if the array was repacked. */
- tmp = build_fold_indirect_ref_loc (input_location,
- dumdesc);
+ tmp = build_fold_indirect_ref_loc (input_location, dumdesc);
tmp = gfc_conv_descriptor_data_get (tmp);
- tmp = fold_build2 (NE_EXPR, boolean_type_node, tmp, tmpdesc);
- stmt = build3_v (COND_EXPR, tmp, stmt, build_empty_stmt (input_location));
+ tmp = fold_build2_loc (input_location, NE_EXPR, boolean_type_node,
+ tmp, tmpdesc);
+ stmtCleanup = build3_v (COND_EXPR, tmp, stmtCleanup,
+ build_empty_stmt (input_location));
if (optional_arg)
- {
- tmp = gfc_conv_expr_present (sym);
- stmt = build3_v (COND_EXPR, tmp, stmt,
- build_empty_stmt (input_location));
- }
- gfc_add_expr_to_block (&block, stmt);
+ {
+ tmp = gfc_conv_expr_present (sym);
+ stmtCleanup = build3_v (COND_EXPR, tmp, stmtCleanup,
+ build_empty_stmt (input_location));
+ }
}
+
/* We don't need to free any memory allocated by internal_pack as it will
be freed at the end of the function by pop_context. */
- return gfc_finish_block (&block);
+ gfc_add_init_cleanup (block, stmtInit, stmtCleanup);
+
+ gfc_restore_backend_locus (&loc);
}
case REF_COMPONENT:
field = ref->u.c.component->backend_decl;
gcc_assert (field && TREE_CODE (field) == FIELD_DECL);
- tmp = fold_build3 (COMPONENT_REF, TREE_TYPE (field),
- tmp, field, NULL_TREE);
+ tmp = fold_build3_loc (input_location, COMPONENT_REF,
+ TREE_TYPE (field),
+ tmp, field, NULL_TREE);
break;
case REF_SUBSTRING:
gfc_init_se (&start, NULL);
gfc_conv_expr_type (&start, ref->u.ar.as->lower[n], gfc_array_index_type);
jtmp = gfc_evaluate_now (start.expr, block);
- itmp = fold_build2 (MINUS_EXPR, gfc_array_index_type, itmp, jtmp);
- itmp = fold_build2 (MULT_EXPR, gfc_array_index_type, itmp, stride);
- index = fold_build2 (PLUS_EXPR, gfc_array_index_type, itmp, index);
+ itmp = fold_build2_loc (input_location, MINUS_EXPR,
+ gfc_array_index_type, itmp, jtmp);
+ itmp = fold_build2_loc (input_location, MULT_EXPR,
+ gfc_array_index_type, itmp, stride);
+ index = fold_build2_loc (input_location, PLUS_EXPR,
+ gfc_array_index_type, itmp, index);
index = gfc_evaluate_now (index, block);
/* Update the stride. */
gfc_init_se (&start, NULL);
gfc_conv_expr_type (&start, ref->u.ar.as->upper[n], gfc_array_index_type);
- itmp = fold_build2 (MINUS_EXPR, gfc_array_index_type, start.expr, jtmp);
- itmp = fold_build2 (PLUS_EXPR, gfc_array_index_type,
- gfc_index_one_node, itmp);
- stride = fold_build2 (MULT_EXPR, gfc_array_index_type, stride, itmp);
+ itmp = fold_build2_loc (input_location, MINUS_EXPR,
+ gfc_array_index_type, start.expr,
+ jtmp);
+ itmp = fold_build2_loc (input_location, PLUS_EXPR,
+ gfc_array_index_type,
+ gfc_index_one_node, itmp);
+ stride = fold_build2_loc (input_location, MULT_EXPR,
+ gfc_array_index_type, stride, itmp);
stride = gfc_evaluate_now (stride, block);
}
/* Add the string lengths and assign them to the expression
string length backend declaration. */
gfc_add_modify (&se->pre, expr->ts.u.cl->backend_decl,
- fold_build2 (PLUS_EXPR, gfc_charlen_type_node,
+ fold_build2_loc (input_location, PLUS_EXPR,
+ gfc_charlen_type_node,
expr->value.op.op1->ts.u.cl->backend_decl,
expr->value.op.op2->ts.u.cl->backend_decl));
}
gfc_add_block_to_block (&se->pre, &tse.pre);
gfc_add_block_to_block (&se->post, &tse.post);
tse.expr = fold_convert (gfc_charlen_type_node, tse.expr);
- tse.expr = fold_build2 (MAX_EXPR, gfc_charlen_type_node, tse.expr,
- build_int_cst (gfc_charlen_type_node, 0));
+ tse.expr = fold_build2_loc (input_location, MAX_EXPR,
+ gfc_charlen_type_node, tse.expr,
+ build_int_cst (gfc_charlen_type_node, 0));
expr->ts.u.cl->backend_decl = tse.expr;
gfc_free_interface_mapping (&mapping);
break;
}
-
/* Convert an array for passing as an actual argument. Expressions and
vector subscripts are evaluated and stored in a temporary, which is then
passed. For whole arrays the descriptor is passed. For array sections
EXPR is the right-hand side of a pointer assignment and
se->expr is the descriptor for the previously-evaluated
left-hand side. The function creates an assignment from
- EXPR to se->expr. */
+ EXPR to se->expr.
+
+
+ The se->force_tmp flag disables the non-copying descriptor optimization
+ that is used for transpose. It may be used in cases where there is an
+ alias between the transpose argument and another argument in the same
+ function call. */
void
gfc_conv_expr_descriptor (gfc_se * se, gfc_expr * expr, gfc_ss * ss)
{
gfc_loopinfo loop;
- gfc_ss *secss;
gfc_ss_info *info;
int need_tmp;
int n;
tree offset;
int full;
bool subref_array_target = false;
+ gfc_expr *arg;
+ gcc_assert (ss != NULL);
gcc_assert (ss != gfc_ss_terminator);
/* Special case things we know we can pass easily. */
/* If we have a linear array section, we can pass it directly.
Otherwise we need to copy it into a temporary. */
- /* Find the SS for the array section. */
- secss = ss;
- while (secss != gfc_ss_terminator && secss->type != GFC_SS_SECTION)
- secss = secss->next;
-
- gcc_assert (secss != gfc_ss_terminator);
- info = &secss->data.info;
+ gcc_assert (ss->type == GFC_SS_SECTION);
+ gcc_assert (ss->expr == expr);
+ info = &ss->data.info;
/* Get the descriptor for the array. */
- gfc_conv_ss_descriptor (&se->pre, secss, 0);
+ gfc_conv_ss_descriptor (&se->pre, ss, 0);
desc = info->descriptor;
subref_array_target = se->direct_byref && is_subref_array (expr);
need_tmp = gfc_ref_needs_temporary_p (expr->ref)
&& !subref_array_target;
+ if (se->force_tmp)
+ need_tmp = 1;
+
if (need_tmp)
full = 0;
else if (GFC_ARRAY_TYPE_P (TREE_TYPE (desc)))
full = gfc_full_array_ref_p (info->ref, NULL);
if (full)
+ for (n = 0; n < info->dimen; n++)
+ if (info->dim[n] != n)
+ {
+ full = 0;
+ break;
+ }
+
+ if (full)
{
- if (se->direct_byref)
+ if (se->direct_byref && !se->byref_noassign)
{
/* Copy the descriptor for pointer assignments. */
gfc_add_modify (&se->pre, se->expr, desc);
break;
case EXPR_FUNCTION:
+
+ /* We don't need to copy data in some cases. */
+ arg = gfc_get_noncopying_intrinsic_argument (expr);
+ if (arg)
+ {
+ /* This is a call to transpose... */
+ gcc_assert (expr->value.function.isym->id == GFC_ISYM_TRANSPOSE);
+ /* ... which has already been handled by the scalarizer, so
+ that we just need to get its argument's descriptor. */
+ gfc_conv_expr_descriptor (se, expr->value.function.actual->expr, ss);
+ return;
+ }
+
/* A transformational function return value will be a temporary
array descriptor. We still need to go through the scalarizer
to create the descriptor. Elemental functions ar handled as
arbitrary expressions, i.e. copy to a temporary. */
- secss = ss;
- /* Look for the SS for this function. */
- while (secss != gfc_ss_terminator
- && (secss->type != GFC_SS_FUNCTION || secss->expr != expr))
- secss = secss->next;
if (se->direct_byref)
{
- gcc_assert (secss != gfc_ss_terminator);
+ gcc_assert (ss->type == GFC_SS_FUNCTION && ss->expr == expr);
/* For pointer assignments pass the descriptor directly. */
- se->ss = secss;
+ if (se->ss == NULL)
+ se->ss = ss;
+ else
+ gcc_assert (se->ss == ss);
se->expr = gfc_build_addr_expr (NULL_TREE, se->expr);
gfc_conv_expr (se, expr);
return;
}
- if (secss == gfc_ss_terminator)
+ if (ss->expr != expr || ss->type != GFC_SS_FUNCTION)
{
- /* Elemental function. */
+ if (ss->expr != expr)
+ /* Elemental function. */
+ gcc_assert ((expr->value.function.esym != NULL
+ && expr->value.function.esym->attr.elemental)
+ || (expr->value.function.isym != NULL
+ && expr->value.function.isym->elemental));
+ else
+ gcc_assert (ss->type == GFC_SS_INTRINSIC);
+
need_tmp = 1;
if (expr->ts.type == BT_CHARACTER
&& expr->ts.u.cl->length->expr_type != EXPR_CONSTANT)
else
{
/* Transformational function. */
- info = &secss->data.info;
+ info = &ss->data.info;
need_tmp = 0;
}
break;
{
need_tmp = 0;
info = &ss->data.info;
- secss = ss;
}
else
{
need_tmp = 1;
- secss = NULL;
info = NULL;
}
break;
default:
/* Something complicated. Copy it into a temporary. */
need_tmp = 1;
- secss = NULL;
info = NULL;
break;
}
+ /* If we are creating a temporary, we don't need to bother about aliases
+ anymore. */
+ if (need_tmp)
+ se->force_tmp = 0;
+
gfc_init_loopinfo (&loop);
/* Associate the SS with the loop. */
gfc_trans_scalarizing_loops (&loop, &block);
desc = loop.temp_ss->data.info.descriptor;
-
- gcc_assert (is_gimple_lvalue (desc));
}
else if (expr->expr_type == EXPR_FUNCTION)
{
se->string_length = gfc_get_expr_charlen (expr);
desc = info->descriptor;
- gcc_assert (secss && secss != gfc_ss_terminator);
- if (se->direct_byref)
+ if (se->direct_byref && !se->byref_noassign)
{
/* For pointer assignments we fill in the destination. */
parm = se->expr;
}
offset = gfc_index_zero_node;
- dim = 0;
/* The following can be somewhat confusing. We have two
descriptors, a new one and the original array.
{parm, parmtype, dim} refer to the new one.
- {desc, type, n, secss, loop} refer to the original, which maybe
+ {desc, type, n, loop} refer to the original, which maybe
a descriptorless array.
The bounds of the scalarization are the bounds of the section.
We don't have to worry about numeric overflows when calculating
}
else
{
- /* Check we haven't somehow got out of sync. */
- gcc_assert (info->dim[dim] == n);
-
/* Evaluate and remember the start of the section. */
- start = info->start[dim];
+ start = info->start[n];
stride = gfc_evaluate_now (stride, &loop.pre);
}
tmp = gfc_conv_array_lbound (desc, n);
- tmp = fold_build2 (MINUS_EXPR, TREE_TYPE (tmp), start, tmp);
-
- tmp = fold_build2 (MULT_EXPR, TREE_TYPE (tmp), tmp, stride);
- offset = fold_build2 (PLUS_EXPR, TREE_TYPE (tmp), offset, tmp);
+ tmp = fold_build2_loc (input_location, MINUS_EXPR, TREE_TYPE (tmp),
+ start, tmp);
+ tmp = fold_build2_loc (input_location, MULT_EXPR, TREE_TYPE (tmp),
+ tmp, stride);
+ offset = fold_build2_loc (input_location, PLUS_EXPR, TREE_TYPE (tmp),
+ offset, tmp);
if (info->ref
&& info->ref->u.ar.dimen_type[n] == DIMEN_ELEMENT)
/* Vector subscripts need copying and are handled elsewhere. */
if (info->ref)
gcc_assert (info->ref->u.ar.dimen_type[n] == DIMEN_RANGE);
+
+ /* look for the corresponding scalarizer dimension: dim. */
+ for (dim = 0; dim < ndim; dim++)
+ if (info->dim[dim] == n)
+ break;
+
+ /* loop exited early: the DIM being looked for has been found. */
+ gcc_assert (dim < ndim);
/* Set the new lower bound. */
from = loop.from[dim];
|| info->ref->u.ar.type != AR_FULL)
&& !integer_onep (from))
{
- tmp = fold_build2 (MINUS_EXPR, gfc_array_index_type,
- gfc_index_one_node, from);
- to = fold_build2 (PLUS_EXPR, gfc_array_index_type, to, tmp);
+ tmp = fold_build2_loc (input_location, MINUS_EXPR,
+ gfc_array_index_type, gfc_index_one_node,
+ from);
+ to = fold_build2_loc (input_location, PLUS_EXPR,
+ gfc_array_index_type, to, tmp);
from = gfc_index_one_node;
}
gfc_conv_descriptor_lbound_set (&loop.pre, parm,
/* Multiply the stride by the section stride to get the
total stride. */
- stride = fold_build2 (MULT_EXPR, gfc_array_index_type,
- stride, info->stride[dim]);
+ stride = fold_build2_loc (input_location, MULT_EXPR,
+ gfc_array_index_type,
+ stride, info->stride[n]);
if (se->direct_byref
- && info->ref
- && info->ref->u.ar.type != AR_FULL)
+ && info->ref
+ && info->ref->u.ar.type != AR_FULL)
{
- base = fold_build2 (MINUS_EXPR, TREE_TYPE (base),
- base, stride);
+ base = fold_build2_loc (input_location, MINUS_EXPR,
+ TREE_TYPE (base), base, stride);
}
else if (GFC_ARRAY_TYPE_P (TREE_TYPE (desc)))
{
tmp = gfc_conv_array_lbound (desc, n);
- tmp = fold_build2 (MINUS_EXPR, TREE_TYPE (base),
- tmp, loop.from[dim]);
- tmp = fold_build2 (MULT_EXPR, TREE_TYPE (base),
- tmp, gfc_conv_array_stride (desc, n));
- base = fold_build2 (PLUS_EXPR, TREE_TYPE (base),
- tmp, base);
+ tmp = fold_build2_loc (input_location, MINUS_EXPR,
+ TREE_TYPE (base), tmp, loop.from[dim]);
+ tmp = fold_build2_loc (input_location, MULT_EXPR,
+ TREE_TYPE (base), tmp,
+ gfc_conv_array_stride (desc, n));
+ base = fold_build2_loc (input_location, PLUS_EXPR,
+ TREE_TYPE (base), tmp, base);
}
/* Store the new stride. */
gfc_conv_descriptor_stride_set (&loop.pre, parm,
gfc_rank_cst[dim], stride);
-
- dim++;
}
if (se->data_not_needed)
desc = parm;
}
- if (!se->direct_byref)
+ if (!se->direct_byref || se->byref_noassign)
{
/* Get a pointer to the new descriptor. */
if (se->want_pointer)
tree ubound = gfc_conv_descriptor_ubound_get (desc, gfc_index_zero_node);
tree lbound = gfc_conv_descriptor_lbound_get (desc, gfc_index_zero_node);
- *size = fold_build2 (MINUS_EXPR, gfc_array_index_type, ubound, lbound);
- *size = fold_build2 (PLUS_EXPR, gfc_array_index_type, *size,
- gfc_index_one_node);
- *size = fold_build2 (MAX_EXPR, gfc_array_index_type, *size,
- gfc_index_zero_node);
+ *size = fold_build2_loc (input_location, MINUS_EXPR,
+ gfc_array_index_type, ubound, lbound);
+ *size = fold_build2_loc (input_location, PLUS_EXPR, gfc_array_index_type,
+ *size, gfc_index_one_node);
+ *size = fold_build2_loc (input_location, MAX_EXPR, gfc_array_index_type,
+ *size, gfc_index_zero_node);
}
elem = TYPE_SIZE_UNIT (gfc_get_element_type (TREE_TYPE (desc)));
- *size = fold_build2 (MULT_EXPR, gfc_array_index_type, *size,
- fold_convert (gfc_array_index_type, elem));
+ *size = fold_build2_loc (input_location, MULT_EXPR, gfc_array_index_type,
+ *size, fold_convert (gfc_array_index_type, elem));
}
/* Convert an array for passing as an actual parameter. */
ultimate_ptr_comp = false;
ultimate_alloc_comp = false;
+
for (ref = expr->ref; ref; ref = ref->next)
{
if (ref->next == NULL)
if (sym->ts.type == BT_CHARACTER)
se->string_length = sym->ts.u.cl->backend_decl;
- if (sym->ts.type == BT_DERIVED)
+ if (sym->ts.type == BT_DERIVED || sym->ts.type == BT_CLASS)
{
gfc_conv_expr_descriptor (se, expr, ss);
se->expr = gfc_conv_array_data (se->expr);
contiguous = g77 && !this_array_result && contiguous;
/* There is no need to pack and unpack the array, if it is contiguous
- and not deferred or assumed shape. */
+ and not a deferred- or assumed-shape array, or if it is simply
+ contiguous. */
no_pack = ((sym && sym->as
&& !sym->attr.pointer
&& sym->as->type != AS_DEFERRED
||
(ref && ref->u.ar.as
&& ref->u.ar.as->type != AS_DEFERRED
- && ref->u.ar.as->type != AS_ASSUMED_SHAPE));
+ && ref->u.ar.as->type != AS_ASSUMED_SHAPE)
+ ||
+ gfc_is_simply_contiguous (expr, false));
no_pack = contiguous && no_pack;
/* Deallocate the allocatable components of structures that are
not variable. */
- if (expr->ts.type == BT_DERIVED
+ if ((expr->ts.type == BT_DERIVED || expr->ts.type == BT_CLASS)
&& expr->ts.u.derived->attr.alloc_comp
&& expr->expr_type != EXPR_VARIABLE)
{
gfc_add_expr_to_block (&se->post, tmp);
}
- if (g77)
+ if (g77 || (fsym && fsym->attr.contiguous
+ && !gfc_is_simply_contiguous (expr, false)))
{
+ tree origptr = NULL_TREE;
+
desc = se->expr;
+
+ /* For contiguous arrays, save the original value of the descriptor. */
+ if (!g77)
+ {
+ origptr = gfc_create_var (pvoid_type_node, "origptr");
+ tmp = build_fold_indirect_ref_loc (input_location, desc);
+ tmp = gfc_conv_array_data (tmp);
+ tmp = fold_build2_loc (input_location, MODIFY_EXPR,
+ TREE_TYPE (origptr), origptr,
+ fold_convert (TREE_TYPE (origptr), tmp));
+ gfc_add_expr_to_block (&se->pre, tmp);
+ }
+
/* Repack the array. */
if (gfc_option.warn_array_temp)
{
if (fsym && fsym->attr.optional && sym && sym->attr.optional)
{
tmp = gfc_conv_expr_present (sym);
- ptr = build3 (COND_EXPR, TREE_TYPE (se->expr), tmp,
- fold_convert (TREE_TYPE (se->expr), ptr),
+ ptr = build3_loc (input_location, COND_EXPR, TREE_TYPE (se->expr),
+ tmp, fold_convert (TREE_TYPE (se->expr), ptr),
fold_convert (TREE_TYPE (se->expr), null_pointer_node));
}
ptr = gfc_evaluate_now (ptr, &se->pre);
- se->expr = ptr;
+ /* Use the packed data for the actual argument, except for contiguous arrays,
+ where the descriptor's data component is set. */
+ if (g77)
+ se->expr = ptr;
+ else
+ {
+ tmp = build_fold_indirect_ref_loc (input_location, desc);
+ gfc_conv_descriptor_data_set (&se->pre, tmp, ptr);
+ }
if (gfc_option.rtcheck & GFC_RTCHECK_ARRAY_TEMPS)
{
tmp = build_fold_indirect_ref_loc (input_location,
desc);
tmp = gfc_conv_array_data (tmp);
- tmp = fold_build2 (NE_EXPR, boolean_type_node,
- fold_convert (TREE_TYPE (tmp), ptr), tmp);
+ tmp = fold_build2_loc (input_location, NE_EXPR, boolean_type_node,
+ fold_convert (TREE_TYPE (tmp), ptr), tmp);
if (fsym && fsym->attr.optional && sym && sym->attr.optional)
- tmp = fold_build2 (TRUTH_AND_EXPR, boolean_type_node,
- gfc_conv_expr_present (sym), tmp);
+ tmp = fold_build2_loc (input_location, TRUTH_AND_EXPR,
+ boolean_type_node,
+ gfc_conv_expr_present (sym), tmp);
gfc_trans_runtime_check (false, true, tmp, &se->pre,
&expr->where, msg);
tmp = build_fold_indirect_ref_loc (input_location,
desc);
tmp = gfc_conv_array_data (tmp);
- tmp = fold_build2 (NE_EXPR, boolean_type_node,
- fold_convert (TREE_TYPE (tmp), ptr), tmp);
+ tmp = fold_build2_loc (input_location, NE_EXPR, boolean_type_node,
+ fold_convert (TREE_TYPE (tmp), ptr), tmp);
if (fsym && fsym->attr.optional && sym && sym->attr.optional)
- tmp = fold_build2 (TRUTH_AND_EXPR, boolean_type_node,
- gfc_conv_expr_present (sym), tmp);
+ tmp = fold_build2_loc (input_location, TRUTH_AND_EXPR,
+ boolean_type_node,
+ gfc_conv_expr_present (sym), tmp);
tmp = build3_v (COND_EXPR, tmp, stmt, build_empty_stmt (input_location));
gfc_add_block_to_block (&block, &se->post);
gfc_init_block (&se->post);
+
+ /* Reset the descriptor pointer. */
+ if (!g77)
+ {
+ tmp = build_fold_indirect_ref_loc (input_location, desc);
+ gfc_conv_descriptor_data_set (&se->post, tmp, origptr);
+ }
+
gfc_add_block_to_block (&se->post, &block);
}
}
gfc_add_expr_to_block (&block, tmp);
/* Zero the data pointer. */
- tmp = fold_build2 (MODIFY_EXPR, void_type_node,
- var, build_int_cst (TREE_TYPE (var), 0));
+ tmp = fold_build2_loc (input_location, MODIFY_EXPR, void_type_node,
+ var, build_int_cst (TREE_TYPE (var), 0));
gfc_add_expr_to_block (&block, tmp);
return gfc_finish_block (&block);
idx = gfc_rank_cst[rank - 1];
nelems = gfc_conv_descriptor_ubound_get (decl, idx);
tmp = gfc_conv_descriptor_lbound_get (decl, idx);
- tmp = fold_build2 (MINUS_EXPR, gfc_array_index_type, nelems, tmp);
- tmp = fold_build2 (PLUS_EXPR, gfc_array_index_type,
- tmp, gfc_index_one_node);
+ tmp = fold_build2_loc (input_location, MINUS_EXPR, gfc_array_index_type,
+ nelems, tmp);
+ tmp = fold_build2_loc (input_location, PLUS_EXPR, gfc_array_index_type,
+ tmp, gfc_index_one_node);
tmp = gfc_evaluate_now (tmp, block);
nelems = gfc_conv_descriptor_stride_get (decl, idx);
- tmp = fold_build2 (MULT_EXPR, gfc_array_index_type, nelems, tmp);
+ tmp = fold_build2_loc (input_location, MULT_EXPR, gfc_array_index_type,
+ nelems, tmp);
return gfc_evaluate_now (tmp, block);
}
If no_malloc is set, only the copy is done. */
static tree
-duplicate_allocatable(tree dest, tree src, tree type, int rank,
- bool no_malloc)
+duplicate_allocatable (tree dest, tree src, tree type, int rank,
+ bool no_malloc)
{
tree tmp;
tree size;
if (rank == 0)
{
tmp = null_pointer_node;
- tmp = fold_build2 (MODIFY_EXPR, type, dest, tmp);
+ tmp = fold_build2_loc (input_location, MODIFY_EXPR, type, dest, tmp);
gfc_add_expr_to_block (&block, tmp);
null_data = gfc_finish_block (&block);
gfc_init_block (&block);
- size = TYPE_SIZE_UNIT (type);
+ size = TYPE_SIZE_UNIT (TREE_TYPE (type));
if (!no_malloc)
{
tmp = gfc_call_malloc (&block, type, size);
- tmp = fold_build2 (MODIFY_EXPR, void_type_node, dest,
- fold_convert (type, tmp));
+ tmp = fold_build2_loc (input_location, MODIFY_EXPR, void_type_node,
+ dest, fold_convert (type, tmp));
gfc_add_expr_to_block (&block, tmp);
}
nelems = get_full_array_size (&block, src, rank);
tmp = fold_convert (gfc_array_index_type,
TYPE_SIZE_UNIT (gfc_get_element_type (type)));
- size = fold_build2 (MULT_EXPR, gfc_array_index_type, nelems, tmp);
+ size = fold_build2_loc (input_location, MULT_EXPR, gfc_array_index_type,
+ nelems, tmp);
if (!no_malloc)
{
tmp = TREE_TYPE (gfc_conv_descriptor_data_get (src));
null_cond = gfc_conv_descriptor_data_get (src);
null_cond = convert (pvoid_type_node, null_cond);
- null_cond = fold_build2 (NE_EXPR, boolean_type_node,
- null_cond, null_pointer_node);
+ null_cond = fold_build2_loc (input_location, NE_EXPR, boolean_type_node,
+ null_cond, null_pointer_node);
return build3_v (COND_EXPR, null_cond, tmp, null_data);
}
tree
gfc_duplicate_allocatable (tree dest, tree src, tree type, int rank)
{
- return duplicate_allocatable(dest, src, type, rank, false);
+ return duplicate_allocatable (dest, src, type, rank, false);
}
tree
gfc_copy_allocatable_data (tree dest, tree src, tree type, int rank)
{
- return duplicate_allocatable(dest, src, type, rank, true);
+ return duplicate_allocatable (dest, src, type, rank, true);
}
gfc_loopinfo loop;
stmtblock_t fnblock;
stmtblock_t loopbody;
+ tree decl_type;
tree tmp;
tree comp;
tree dcmp;
gfc_init_block (&fnblock);
- if (POINTER_TYPE_P (TREE_TYPE (decl)) && rank != 0)
+ decl_type = TREE_TYPE (decl);
+
+ if ((POINTER_TYPE_P (decl_type) && rank != 0)
+ || (TREE_CODE (decl_type) == REFERENCE_TYPE && rank == 0))
+
decl = build_fold_indirect_ref_loc (input_location,
decl);
+ /* Just in case in gets dereferenced. */
+ decl_type = TREE_TYPE (decl);
+
/* If this an array of derived types with allocatable components
build a loop and recursively call this function. */
- if (TREE_CODE (TREE_TYPE (decl)) == ARRAY_TYPE
- || GFC_DESCRIPTOR_TYPE_P (TREE_TYPE (decl)))
+ if (TREE_CODE (decl_type) == ARRAY_TYPE
+ || GFC_DESCRIPTOR_TYPE_P (decl_type))
{
tmp = gfc_conv_array_data (decl);
var = build_fold_indirect_ref_loc (input_location,
tmp);
/* Get the number of elements - 1 and set the counter. */
- if (GFC_DESCRIPTOR_TYPE_P (TREE_TYPE (decl)))
+ if (GFC_DESCRIPTOR_TYPE_P (decl_type))
{
/* Use the descriptor for an allocatable array. Since this
is a full array reference, we only need the descriptor
information from dimension = rank. */
tmp = get_full_array_size (&fnblock, decl, rank);
- tmp = fold_build2 (MINUS_EXPR, gfc_array_index_type,
- tmp, gfc_index_one_node);
+ tmp = fold_build2_loc (input_location, MINUS_EXPR,
+ gfc_array_index_type, tmp,
+ gfc_index_one_node);
null_cond = gfc_conv_descriptor_data_get (decl);
- null_cond = fold_build2 (NE_EXPR, boolean_type_node, null_cond,
- build_int_cst (TREE_TYPE (null_cond), 0));
+ null_cond = fold_build2_loc (input_location, NE_EXPR,
+ boolean_type_node, null_cond,
+ build_int_cst (TREE_TYPE (null_cond), 0));
}
else
{
/* Otherwise use the TYPE_DOMAIN information. */
- tmp = array_type_nelts (TREE_TYPE (decl));
+ tmp = array_type_nelts (decl_type);
tmp = fold_convert (gfc_array_index_type, tmp);
}
{
if (GFC_DESCRIPTOR_TYPE_P (TREE_TYPE (dest)))
{
- tmp = gfc_duplicate_allocatable (dest, decl, TREE_TYPE(decl), rank);
+ tmp = gfc_duplicate_allocatable (dest, decl, decl_type, rank);
gfc_add_expr_to_block (&fnblock, tmp);
}
tmp = build_fold_indirect_ref_loc (input_location,
act on a chain of components. */
for (c = der_type->components; c; c = c->next)
{
- bool cmp_has_alloc_comps = (c->ts.type == BT_DERIVED)
+ bool cmp_has_alloc_comps = (c->ts.type == BT_DERIVED
+ || c->ts.type == BT_CLASS)
&& c->ts.u.derived->attr.alloc_comp;
cdecl = c->backend_decl;
ctype = TREE_TYPE (cdecl);
switch (purpose)
{
case DEALLOCATE_ALLOC_COMP:
- /* Do not deallocate the components of ultimate pointer
- components. */
- if (cmp_has_alloc_comps && !c->attr.pointer)
- {
- comp = fold_build3 (COMPONENT_REF, ctype,
- decl, cdecl, NULL_TREE);
- rank = c->as ? c->as->rank : 0;
- tmp = structure_alloc_comps (c->ts.u.derived, comp, NULL_TREE,
- rank, purpose);
- gfc_add_expr_to_block (&fnblock, tmp);
- }
-
if (c->attr.allocatable && c->attr.dimension)
{
- comp = fold_build3 (COMPONENT_REF, ctype,
- decl, cdecl, NULL_TREE);
+ comp = fold_build3_loc (input_location, COMPONENT_REF, ctype,
+ decl, cdecl, NULL_TREE);
+ if (cmp_has_alloc_comps && !c->attr.pointer)
+ {
+ /* Do not deallocate the components of ultimate pointer
+ components. */
+ tmp = structure_alloc_comps (c->ts.u.derived, comp, NULL_TREE,
+ c->as->rank, purpose);
+ gfc_add_expr_to_block (&fnblock, tmp);
+ }
tmp = gfc_trans_dealloc_allocated (comp);
gfc_add_expr_to_block (&fnblock, tmp);
}
else if (c->attr.allocatable)
{
/* Allocatable scalar components. */
- comp = fold_build3 (COMPONENT_REF, ctype, decl, cdecl, NULL_TREE);
+ comp = fold_build3_loc (input_location, COMPONENT_REF, ctype,
+ decl, cdecl, NULL_TREE);
- tmp = gfc_deallocate_with_status (comp, NULL_TREE, true, NULL);
+ tmp = gfc_deallocate_scalar_with_status (comp, NULL, true, NULL,
+ c->ts);
gfc_add_expr_to_block (&fnblock, tmp);
- tmp = fold_build2 (MODIFY_EXPR, void_type_node, comp,
- build_int_cst (TREE_TYPE (comp), 0));
+ tmp = fold_build2_loc (input_location, MODIFY_EXPR,
+ void_type_node, comp,
+ build_int_cst (TREE_TYPE (comp), 0));
gfc_add_expr_to_block (&fnblock, tmp);
}
- else if (c->ts.type == BT_CLASS
- && c->ts.u.derived->components->attr.allocatable)
+ else if (c->ts.type == BT_CLASS && CLASS_DATA (c)->attr.allocatable)
{
/* Allocatable scalar CLASS components. */
- comp = fold_build3 (COMPONENT_REF, ctype, decl, cdecl, NULL_TREE);
+ comp = fold_build3_loc (input_location, COMPONENT_REF, ctype,
+ decl, cdecl, NULL_TREE);
- /* Add reference to '$data' component. */
- tmp = c->ts.u.derived->components->backend_decl;
- comp = fold_build3 (COMPONENT_REF, TREE_TYPE (tmp),
- comp, tmp, NULL_TREE);
+ /* Add reference to '_data' component. */
+ tmp = CLASS_DATA (c)->backend_decl;
+ comp = fold_build3_loc (input_location, COMPONENT_REF,
+ TREE_TYPE (tmp), comp, tmp, NULL_TREE);
- tmp = gfc_deallocate_with_status (comp, NULL_TREE, true, NULL);
+ tmp = gfc_deallocate_scalar_with_status (comp, NULL, true, NULL,
+ CLASS_DATA (c)->ts);
gfc_add_expr_to_block (&fnblock, tmp);
- tmp = fold_build2 (MODIFY_EXPR, void_type_node, comp,
- build_int_cst (TREE_TYPE (comp), 0));
+ tmp = fold_build2_loc (input_location, MODIFY_EXPR,
+ void_type_node, comp,
+ build_int_cst (TREE_TYPE (comp), 0));
gfc_add_expr_to_block (&fnblock, tmp);
}
break;
continue;
else if (c->attr.allocatable && c->attr.dimension)
{
- comp = fold_build3 (COMPONENT_REF, ctype,
- decl, cdecl, NULL_TREE);
+ comp = fold_build3_loc (input_location, COMPONENT_REF, ctype,
+ decl, cdecl, NULL_TREE);
gfc_conv_descriptor_data_set (&fnblock, comp, null_pointer_node);
}
else if (c->attr.allocatable)
{
/* Allocatable scalar components. */
- comp = fold_build3 (COMPONENT_REF, ctype, decl, cdecl, NULL_TREE);
- tmp = fold_build2 (MODIFY_EXPR, void_type_node, comp,
- build_int_cst (TREE_TYPE (comp), 0));
+ comp = fold_build3_loc (input_location, COMPONENT_REF, ctype,
+ decl, cdecl, NULL_TREE);
+ tmp = fold_build2_loc (input_location, MODIFY_EXPR,
+ void_type_node, comp,
+ build_int_cst (TREE_TYPE (comp), 0));
gfc_add_expr_to_block (&fnblock, tmp);
}
- else if (c->ts.type == BT_CLASS
- && c->ts.u.derived->components->attr.allocatable)
+ else if (c->ts.type == BT_CLASS && CLASS_DATA (c)->attr.allocatable)
{
/* Allocatable scalar CLASS components. */
- comp = fold_build3 (COMPONENT_REF, ctype, decl, cdecl, NULL_TREE);
- /* Add reference to '$data' component. */
- tmp = c->ts.u.derived->components->backend_decl;
- comp = fold_build3 (COMPONENT_REF, TREE_TYPE (tmp),
- comp, tmp, NULL_TREE);
- tmp = fold_build2 (MODIFY_EXPR, void_type_node, comp,
- build_int_cst (TREE_TYPE (comp), 0));
+ comp = fold_build3_loc (input_location, COMPONENT_REF, ctype,
+ decl, cdecl, NULL_TREE);
+ /* Add reference to '_data' component. */
+ tmp = CLASS_DATA (c)->backend_decl;
+ comp = fold_build3_loc (input_location, COMPONENT_REF,
+ TREE_TYPE (tmp), comp, tmp, NULL_TREE);
+ tmp = fold_build2_loc (input_location, MODIFY_EXPR,
+ void_type_node, comp,
+ build_int_cst (TREE_TYPE (comp), 0));
gfc_add_expr_to_block (&fnblock, tmp);
}
else if (cmp_has_alloc_comps)
{
- comp = fold_build3 (COMPONENT_REF, ctype,
- decl, cdecl, NULL_TREE);
+ comp = fold_build3_loc (input_location, COMPONENT_REF, ctype,
+ decl, cdecl, NULL_TREE);
rank = c->as ? c->as->rank : 0;
tmp = structure_alloc_comps (c->ts.u.derived, comp, NULL_TREE,
rank, purpose);
continue;
/* We need source and destination components. */
- comp = fold_build3 (COMPONENT_REF, ctype, decl, cdecl, NULL_TREE);
- dcmp = fold_build3 (COMPONENT_REF, ctype, dest, cdecl, NULL_TREE);
+ comp = fold_build3_loc (input_location, COMPONENT_REF, ctype, decl,
+ cdecl, NULL_TREE);
+ dcmp = fold_build3_loc (input_location, COMPONENT_REF, ctype, dest,
+ cdecl, NULL_TREE);
dcmp = fold_convert (TREE_TYPE (comp), dcmp);
if (c->attr.allocatable && !cmp_has_alloc_comps)
{
rank = c->as ? c->as->rank : 0;
- tmp = gfc_duplicate_allocatable(dcmp, comp, ctype, rank);
+ tmp = gfc_duplicate_allocatable (dcmp, comp, ctype, rank);
gfc_add_expr_to_block (&fnblock, tmp);
}
}
-/* Check for default initializer; sym->value is not enough as it is also
- set for EXPR_NULL of allocatables. */
+/* Returns the value of LBOUND for an expression. This could be broken out
+ from gfc_conv_intrinsic_bound but this seemed to be simpler. This is
+ called by gfc_alloc_allocatable_for_assignment. */
+static tree
+get_std_lbound (gfc_expr *expr, tree desc, int dim, bool assumed_size)
+{
+ tree lbound;
+ tree ubound;
+ tree stride;
+ tree cond, cond1, cond3, cond4;
+ tree tmp;
+ if (GFC_DESCRIPTOR_TYPE_P (TREE_TYPE (desc)))
+ {
+ tmp = gfc_rank_cst[dim];
+ lbound = gfc_conv_descriptor_lbound_get (desc, tmp);
+ ubound = gfc_conv_descriptor_ubound_get (desc, tmp);
+ stride = gfc_conv_descriptor_stride_get (desc, tmp);
+ cond1 = fold_build2_loc (input_location, GE_EXPR, boolean_type_node,
+ ubound, lbound);
+ cond3 = fold_build2_loc (input_location, GE_EXPR, boolean_type_node,
+ stride, gfc_index_zero_node);
+ cond3 = fold_build2_loc (input_location, TRUTH_AND_EXPR,
+ boolean_type_node, cond3, cond1);
+ cond4 = fold_build2_loc (input_location, LT_EXPR, boolean_type_node,
+ stride, gfc_index_zero_node);
+ if (assumed_size)
+ cond = fold_build2_loc (input_location, EQ_EXPR, boolean_type_node,
+ tmp, build_int_cst (gfc_array_index_type,
+ expr->rank - 1));
+ else
+ cond = boolean_false_node;
+
+ cond1 = fold_build2_loc (input_location, TRUTH_OR_EXPR,
+ boolean_type_node, cond3, cond4);
+ cond = fold_build2_loc (input_location, TRUTH_OR_EXPR,
+ boolean_type_node, cond, cond1);
-static bool
-has_default_initializer (gfc_symbol *der)
+ return fold_build3_loc (input_location, COND_EXPR,
+ gfc_array_index_type, cond,
+ lbound, gfc_index_one_node);
+ }
+ else if (expr->expr_type == EXPR_VARIABLE)
+ {
+ tmp = TREE_TYPE (expr->symtree->n.sym->backend_decl);
+ return GFC_TYPE_ARRAY_LBOUND(tmp, dim);
+ }
+ else if (expr->expr_type == EXPR_FUNCTION)
+ {
+ /* A conversion function, so use the argument. */
+ expr = expr->value.function.actual->expr;
+ if (expr->expr_type != EXPR_VARIABLE)
+ return gfc_index_one_node;
+ desc = TREE_TYPE (expr->symtree->n.sym->backend_decl);
+ return get_std_lbound (expr, desc, dim, assumed_size);
+ }
+
+ return gfc_index_one_node;
+}
+
+
+/* Returns true if an expression represents an lhs that can be reallocated
+ on assignment. */
+
+bool
+gfc_is_reallocatable_lhs (gfc_expr *expr)
{
- gfc_component *c;
+ gfc_ref * ref;
+
+ if (!expr->ref)
+ return false;
+
+ /* An allocatable variable. */
+ if (expr->symtree->n.sym->attr.allocatable
+ && expr->ref
+ && expr->ref->type == REF_ARRAY
+ && expr->ref->u.ar.type == AR_FULL)
+ return true;
+
+ /* All that can be left are allocatable components. */
+ if ((expr->symtree->n.sym->ts.type != BT_DERIVED
+ && expr->symtree->n.sym->ts.type != BT_CLASS)
+ || !expr->symtree->n.sym->ts.u.derived->attr.alloc_comp)
+ return false;
- gcc_assert (der->attr.flavor == FL_DERIVED);
- for (c = der->components; c; c = c->next)
- if ((c->ts.type != BT_DERIVED && c->initializer)
- || (c->ts.type == BT_DERIVED
- && (!c->attr.pointer && has_default_initializer (c->ts.u.derived))))
+ /* Find a component ref followed by an array reference. */
+ for (ref = expr->ref; ref; ref = ref->next)
+ if (ref->next
+ && ref->type == REF_COMPONENT
+ && ref->next->type == REF_ARRAY
+ && !ref->next->next)
break;
- return c != NULL;
+ if (!ref)
+ return false;
+
+ /* Return true if valid reallocatable lhs. */
+ if (ref->u.c.component->attr.allocatable
+ && ref->next->u.ar.type == AR_FULL)
+ return true;
+
+ return false;
+}
+
+
+/* Allocate the lhs of an assignment to an allocatable array, otherwise
+ reallocate it. */
+
+tree
+gfc_alloc_allocatable_for_assignment (gfc_loopinfo *loop,
+ gfc_expr *expr1,
+ gfc_expr *expr2)
+{
+ stmtblock_t realloc_block;
+ stmtblock_t alloc_block;
+ stmtblock_t fblock;
+ gfc_ss *rss;
+ gfc_ss *lss;
+ tree realloc_expr;
+ tree alloc_expr;
+ tree size1;
+ tree size2;
+ tree array1;
+ tree cond;
+ tree tmp;
+ tree tmp2;
+ tree lbound;
+ tree ubound;
+ tree desc;
+ tree desc2;
+ tree offset;
+ tree jump_label1;
+ tree jump_label2;
+ tree neq_size;
+ tree lbd;
+ int n;
+ int dim;
+ gfc_array_spec * as;
+
+ /* x = f(...) with x allocatable. In this case, expr1 is the rhs.
+ Find the lhs expression in the loop chain and set expr1 and
+ expr2 accordingly. */
+ if (expr1->expr_type == EXPR_FUNCTION && expr2 == NULL)
+ {
+ expr2 = expr1;
+ /* Find the ss for the lhs. */
+ lss = loop->ss;
+ for (; lss && lss != gfc_ss_terminator; lss = lss->loop_chain)
+ if (lss->expr && lss->expr->expr_type == EXPR_VARIABLE)
+ break;
+ if (lss == gfc_ss_terminator)
+ return NULL_TREE;
+ expr1 = lss->expr;
+ }
+
+ /* Bail out if this is not a valid allocate on assignment. */
+ if (!gfc_is_reallocatable_lhs (expr1)
+ || (expr2 && !expr2->rank))
+ return NULL_TREE;
+
+ /* Find the ss for the lhs. */
+ lss = loop->ss;
+ for (; lss && lss != gfc_ss_terminator; lss = lss->loop_chain)
+ if (lss->expr == expr1)
+ break;
+
+ if (lss == gfc_ss_terminator)
+ return NULL_TREE;
+
+ /* Find an ss for the rhs. For operator expressions, we see the
+ ss's for the operands. Any one of these will do. */
+ rss = loop->ss;
+ for (; rss && rss != gfc_ss_terminator; rss = rss->loop_chain)
+ if (rss->expr != expr1 && rss != loop->temp_ss)
+ break;
+
+ if (expr2 && rss == gfc_ss_terminator)
+ return NULL_TREE;
+
+ gfc_start_block (&fblock);
+
+ /* Since the lhs is allocatable, this must be a descriptor type.
+ Get the data and array size. */
+ desc = lss->data.info.descriptor;
+ gcc_assert (GFC_DESCRIPTOR_TYPE_P (TREE_TYPE (desc)));
+ array1 = gfc_conv_descriptor_data_get (desc);
+ size1 = gfc_conv_descriptor_size (desc, expr1->rank);
+
+ /* Get the rhs size. Fix both sizes. */
+ if (expr2)
+ desc2 = rss->data.info.descriptor;
+ else
+ desc2 = NULL_TREE;
+ size2 = gfc_index_one_node;
+ for (n = 0; n < expr2->rank; n++)
+ {
+ tmp = fold_build2_loc (input_location, MINUS_EXPR,
+ gfc_array_index_type,
+ loop->to[n], loop->from[n]);
+ tmp = fold_build2_loc (input_location, PLUS_EXPR,
+ gfc_array_index_type,
+ tmp, gfc_index_one_node);
+ size2 = fold_build2_loc (input_location, MULT_EXPR,
+ gfc_array_index_type,
+ tmp, size2);
+ }
+ size1 = gfc_evaluate_now (size1, &fblock);
+ size2 = gfc_evaluate_now (size2, &fblock);
+ cond = fold_build2_loc (input_location, NE_EXPR, boolean_type_node,
+ size1, size2);
+ neq_size = gfc_evaluate_now (cond, &fblock);
+
+ /* If the lhs is allocated and the lhs and rhs are equal length, jump
+ past the realloc/malloc. This allows F95 compliant expressions
+ to escape allocation on assignment. */
+ jump_label1 = gfc_build_label_decl (NULL_TREE);
+ jump_label2 = gfc_build_label_decl (NULL_TREE);
+
+ /* Allocate if data is NULL. */
+ cond = fold_build2_loc (input_location, EQ_EXPR, boolean_type_node,
+ array1, build_int_cst (TREE_TYPE (array1), 0));
+ tmp = build3_v (COND_EXPR, cond,
+ build1_v (GOTO_EXPR, jump_label1),
+ build_empty_stmt (input_location));
+ gfc_add_expr_to_block (&fblock, tmp);
+
+ /* Reallocate if sizes are different. */
+ tmp = build3_v (COND_EXPR, neq_size,
+ build1_v (GOTO_EXPR, jump_label1),
+ build_empty_stmt (input_location));
+ gfc_add_expr_to_block (&fblock, tmp);
+
+ if (expr2 && expr2->expr_type == EXPR_FUNCTION
+ && expr2->value.function.isym
+ && expr2->value.function.isym->conversion)
+ {
+ /* For conversion functions, take the arg. */
+ gfc_expr *arg = expr2->value.function.actual->expr;
+ as = gfc_get_full_arrayspec_from_expr (arg);
+ }
+ else if (expr2)
+ as = gfc_get_full_arrayspec_from_expr (expr2);
+ else
+ as = NULL;
+
+ /* Reset the lhs bounds if any are different from the rhs. */
+ if (as && expr2->expr_type == EXPR_VARIABLE)
+ {
+ for (n = 0; n < expr1->rank; n++)
+ {
+ /* First check the lbounds. */
+ dim = rss->data.info.dim[n];
+ lbd = get_std_lbound (expr2, desc2, dim,
+ as->type == AS_ASSUMED_SIZE);
+ lbound = gfc_conv_descriptor_lbound_get (desc, gfc_rank_cst[n]);
+ cond = fold_build2_loc (input_location, NE_EXPR,
+ boolean_type_node, lbd, lbound);
+ tmp = build3_v (COND_EXPR, cond,
+ build1_v (GOTO_EXPR, jump_label1),
+ build_empty_stmt (input_location));
+ gfc_add_expr_to_block (&fblock, tmp);
+
+ /* Now check the shape. */
+ tmp = fold_build2_loc (input_location, MINUS_EXPR,
+ gfc_array_index_type,
+ loop->to[n], loop->from[n]);
+ tmp = fold_build2_loc (input_location, PLUS_EXPR,
+ gfc_array_index_type,
+ tmp, lbound);
+ ubound = gfc_conv_descriptor_ubound_get (desc, gfc_rank_cst[n]);
+ tmp = fold_build2_loc (input_location, MINUS_EXPR,
+ gfc_array_index_type,
+ tmp, ubound);
+ cond = fold_build2_loc (input_location, NE_EXPR,
+ boolean_type_node,
+ tmp, gfc_index_zero_node);
+ tmp = build3_v (COND_EXPR, cond,
+ build1_v (GOTO_EXPR, jump_label1),
+ build_empty_stmt (input_location));
+ gfc_add_expr_to_block (&fblock, tmp);
+ }
+ }
+
+ /* Otherwise jump past the (re)alloc code. */
+ tmp = build1_v (GOTO_EXPR, jump_label2);
+ gfc_add_expr_to_block (&fblock, tmp);
+
+ /* Add the label to start automatic (re)allocation. */
+ tmp = build1_v (LABEL_EXPR, jump_label1);
+ gfc_add_expr_to_block (&fblock, tmp);
+
+ /* Now modify the lhs descriptor and the associated scalarizer
+ variables.
+ 7.4.1.3: If variable is or becomes an unallocated allocatable
+ variable, then it is allocated with each deferred type parameter
+ equal to the corresponding type parameters of expr , with the
+ shape of expr , and with each lower bound equal to the
+ corresponding element of LBOUND(expr). */
+ size1 = gfc_index_one_node;
+ offset = gfc_index_zero_node;
+
+ for (n = 0; n < expr2->rank; n++)
+ {
+ tmp = fold_build2_loc (input_location, MINUS_EXPR,
+ gfc_array_index_type,
+ loop->to[n], loop->from[n]);
+ tmp = fold_build2_loc (input_location, PLUS_EXPR,
+ gfc_array_index_type,
+ tmp, gfc_index_one_node);
+
+ lbound = gfc_index_one_node;
+ ubound = tmp;
+
+ if (as)
+ {
+ lbd = get_std_lbound (expr2, desc2, n,
+ as->type == AS_ASSUMED_SIZE);
+ ubound = fold_build2_loc (input_location,
+ MINUS_EXPR,
+ gfc_array_index_type,
+ ubound, lbound);
+ ubound = fold_build2_loc (input_location,
+ PLUS_EXPR,
+ gfc_array_index_type,
+ ubound, lbd);
+ lbound = lbd;
+ }
+
+ gfc_conv_descriptor_lbound_set (&fblock, desc,
+ gfc_rank_cst[n],
+ lbound);
+ gfc_conv_descriptor_ubound_set (&fblock, desc,
+ gfc_rank_cst[n],
+ ubound);
+ gfc_conv_descriptor_stride_set (&fblock, desc,
+ gfc_rank_cst[n],
+ size1);
+ lbound = gfc_conv_descriptor_lbound_get (desc,
+ gfc_rank_cst[n]);
+ tmp2 = fold_build2_loc (input_location, MULT_EXPR,
+ gfc_array_index_type,
+ lbound, size1);
+ offset = fold_build2_loc (input_location, MINUS_EXPR,
+ gfc_array_index_type,
+ offset, tmp2);
+ size1 = fold_build2_loc (input_location, MULT_EXPR,
+ gfc_array_index_type,
+ tmp, size1);
+ }
+
+ /* Set the lhs descriptor and scalarizer offsets. For rank > 1,
+ the array offset is saved and the info.offset is used for a
+ running offset. Use the saved_offset instead. */
+ tmp = gfc_conv_descriptor_offset (desc);
+ gfc_add_modify (&fblock, tmp, offset);
+ if (lss->data.info.saved_offset
+ && TREE_CODE (lss->data.info.saved_offset) == VAR_DECL)
+ gfc_add_modify (&fblock, lss->data.info.saved_offset, tmp);
+
+ /* Now set the deltas for the lhs. */
+ for (n = 0; n < expr1->rank; n++)
+ {
+ tmp = gfc_conv_descriptor_lbound_get (desc, gfc_rank_cst[n]);
+ dim = lss->data.info.dim[n];
+ tmp = fold_build2_loc (input_location, MINUS_EXPR,
+ gfc_array_index_type, tmp,
+ loop->from[dim]);
+ if (lss->data.info.delta[dim]
+ && TREE_CODE (lss->data.info.delta[dim]) == VAR_DECL)
+ gfc_add_modify (&fblock, lss->data.info.delta[dim], tmp);
+ }
+
+ /* Get the new lhs size in bytes. */
+ if (expr1->ts.type == BT_CHARACTER && expr1->ts.deferred)
+ {
+ tmp = expr2->ts.u.cl->backend_decl;
+ gcc_assert (expr1->ts.u.cl->backend_decl);
+ tmp = fold_convert (TREE_TYPE (expr1->ts.u.cl->backend_decl), tmp);
+ gfc_add_modify (&fblock, expr1->ts.u.cl->backend_decl, tmp);
+ }
+ else if (expr1->ts.type == BT_CHARACTER && expr1->ts.u.cl->backend_decl)
+ {
+ tmp = TYPE_SIZE_UNIT (TREE_TYPE (gfc_typenode_for_spec (&expr1->ts)));
+ tmp = fold_build2_loc (input_location, MULT_EXPR,
+ gfc_array_index_type, tmp,
+ expr1->ts.u.cl->backend_decl);
+ }
+ else
+ tmp = TYPE_SIZE_UNIT (gfc_typenode_for_spec (&expr1->ts));
+ tmp = fold_convert (gfc_array_index_type, tmp);
+ size2 = fold_build2_loc (input_location, MULT_EXPR,
+ gfc_array_index_type,
+ tmp, size2);
+ size2 = fold_convert (size_type_node, size2);
+ size2 = gfc_evaluate_now (size2, &fblock);
+
+ /* Realloc expression. Note that the scalarizer uses desc.data
+ in the array reference - (*desc.data)[<element>]. */
+ gfc_init_block (&realloc_block);
+ tmp = build_call_expr_loc (input_location,
+ built_in_decls[BUILT_IN_REALLOC], 2,
+ fold_convert (pvoid_type_node, array1),
+ size2);
+ gfc_conv_descriptor_data_set (&realloc_block,
+ desc, tmp);
+ realloc_expr = gfc_finish_block (&realloc_block);
+
+ /* Only reallocate if sizes are different. */
+ tmp = build3_v (COND_EXPR, neq_size, realloc_expr,
+ build_empty_stmt (input_location));
+ realloc_expr = tmp;
+
+
+ /* Malloc expression. */
+ gfc_init_block (&alloc_block);
+ tmp = build_call_expr_loc (input_location,
+ built_in_decls[BUILT_IN_MALLOC], 1,
+ size2);
+ gfc_conv_descriptor_data_set (&alloc_block,
+ desc, tmp);
+ tmp = gfc_conv_descriptor_dtype (desc);
+ gfc_add_modify (&alloc_block, tmp, gfc_get_dtype (TREE_TYPE (desc)));
+ alloc_expr = gfc_finish_block (&alloc_block);
+
+ /* Malloc if not allocated; realloc otherwise. */
+ tmp = build_int_cst (TREE_TYPE (array1), 0);
+ cond = fold_build2_loc (input_location, EQ_EXPR,
+ boolean_type_node,
+ array1, tmp);
+ tmp = build3_v (COND_EXPR, cond, alloc_expr, realloc_expr);
+ gfc_add_expr_to_block (&fblock, tmp);
+
+ /* Make sure that the scalarizer data pointer is updated. */
+ if (lss->data.info.data
+ && TREE_CODE (lss->data.info.data) == VAR_DECL)
+ {
+ tmp = gfc_conv_descriptor_data_get (desc);
+ gfc_add_modify (&fblock, lss->data.info.data, tmp);
+ }
+
+ /* Add the exit label. */
+ tmp = build1_v (LABEL_EXPR, jump_label2);
+ gfc_add_expr_to_block (&fblock, tmp);
+
+ return gfc_finish_block (&fblock);
}
Do likewise, recursively if necessary, with the allocatable components of
derived types. */
-tree
-gfc_trans_deferred_array (gfc_symbol * sym, tree body)
+void
+gfc_trans_deferred_array (gfc_symbol * sym, gfc_wrapped_block * block)
{
tree type;
tree tmp;
tree descriptor;
- stmtblock_t fnblock;
+ stmtblock_t init;
+ stmtblock_t cleanup;
locus loc;
int rank;
bool sym_has_alloc_comp;
- sym_has_alloc_comp = (sym->ts.type == BT_DERIVED)
+ sym_has_alloc_comp = (sym->ts.type == BT_DERIVED
+ || sym->ts.type == BT_CLASS)
&& sym->ts.u.derived->attr.alloc_comp;
/* Make sure the frontend gets these right. */
if (!(sym->attr.pointer || sym->attr.allocatable || sym_has_alloc_comp))
- fatal_error ("Possible frontend bug: Deferred array size without pointer, "
+ fatal_error ("Possible front-end bug: Deferred array size without pointer, "
"allocatable attribute or derived type without allocatable "
"components.");
- gfc_init_block (&fnblock);
+ gfc_init_block (&init);
gcc_assert (TREE_CODE (sym->backend_decl) == VAR_DECL
|| TREE_CODE (sym->backend_decl) == PARM_DECL);
if (sym->ts.type == BT_CHARACTER
&& !INTEGER_CST_P (sym->ts.u.cl->backend_decl))
{
- gfc_conv_string_length (sym->ts.u.cl, NULL, &fnblock);
- gfc_trans_vla_type_sizes (sym, &fnblock);
+ gfc_conv_string_length (sym->ts.u.cl, NULL, &init);
+ gfc_trans_vla_type_sizes (sym, &init);
}
/* Dummy, use associated and result variables don't need anything special. */
if (sym->attr.dummy || sym->attr.use_assoc || sym->attr.result)
{
- gfc_add_expr_to_block (&fnblock, body);
-
- return gfc_finish_block (&fnblock);
+ gfc_add_init_cleanup (block, gfc_finish_block (&init), NULL_TREE);
+ return;
}
- gfc_get_backend_locus (&loc);
+ gfc_save_backend_locus (&loc);
gfc_set_backend_locus (&sym->declared_at);
descriptor = sym->backend_decl;
{
/* SAVEd variables are not freed on exit. */
gfc_trans_static_array_pointer (sym);
- return body;
+
+ gfc_add_init_cleanup (block, gfc_finish_block (&init), NULL_TREE);
+ gfc_restore_backend_locus (&loc);
+ return;
}
/* Get the descriptor type. */
if (!sym->attr.save
&& !(TREE_STATIC (sym->backend_decl) && sym->attr.is_main_program))
{
- if (sym->value == NULL || !has_default_initializer (sym->ts.u.derived))
+ if (sym->value == NULL
+ || !gfc_has_default_initializer (sym->ts.u.derived))
{
rank = sym->as ? sym->as->rank : 0;
- tmp = gfc_nullify_alloc_comp (sym->ts.u.derived, descriptor, rank);
- gfc_add_expr_to_block (&fnblock, tmp);
+ tmp = gfc_nullify_alloc_comp (sym->ts.u.derived,
+ descriptor, rank);
+ gfc_add_expr_to_block (&init, tmp);
}
else
- {
- tmp = gfc_init_default_dt (sym, NULL, false);
- gfc_add_expr_to_block (&fnblock, tmp);
- }
+ gfc_init_default_dt (sym, &init, false);
}
}
else if (!GFC_DESCRIPTOR_TYPE_P (type))
/* If the backend_decl is not a descriptor, we must have a pointer
to one. */
descriptor = build_fold_indirect_ref_loc (input_location,
- sym->backend_decl);
+ sym->backend_decl);
type = TREE_TYPE (descriptor);
}
/* NULLIFY the data pointer. */
if (GFC_DESCRIPTOR_TYPE_P (type) && !sym->attr.save)
- gfc_conv_descriptor_data_set (&fnblock, descriptor, null_pointer_node);
-
- gfc_add_expr_to_block (&fnblock, body);
+ gfc_conv_descriptor_data_set (&init, descriptor, null_pointer_node);
- gfc_set_backend_locus (&loc);
+ gfc_init_block (&cleanup);
+ gfc_restore_backend_locus (&loc);
/* Allocatable arrays need to be freed when they go out of scope.
The allocatable components of pointers must not be touched. */
int rank;
rank = sym->as ? sym->as->rank : 0;
tmp = gfc_deallocate_alloc_comp (sym->ts.u.derived, descriptor, rank);
- gfc_add_expr_to_block (&fnblock, tmp);
+ gfc_add_expr_to_block (&cleanup, tmp);
}
if (sym->attr.allocatable && sym->attr.dimension
&& !sym->attr.save && !sym->attr.result)
{
tmp = gfc_trans_dealloc_allocated (sym->backend_decl);
- gfc_add_expr_to_block (&fnblock, tmp);
+ gfc_add_expr_to_block (&cleanup, tmp);
}
- return gfc_finish_block (&fnblock);
+ gfc_add_init_cleanup (block, gfc_finish_block (&init),
+ gfc_finish_block (&cleanup));
}
/************ Expression Walking Functions ******************/
gfc_intrinsic_sym *isym;
gfc_symbol *sym;
gfc_component *comp = NULL;
+ int n;
isym = expr->value.function.isym;
newss->expr = expr;
newss->next = ss;
newss->data.info.dimen = expr->rank;
+ for (n = 0; n < newss->data.info.dimen; n++)
+ newss->data.info.dim[n] = n;
return newss;
}
/* Walk an expression. Add walked expressions to the head of the SS chain.
A wholly scalar expression will not be added. */
-static gfc_ss *
+gfc_ss *
gfc_walk_subexpr (gfc_ss * ss, gfc_expr * expr)
{
gfc_ss *head;