/* Array translation routines
- Copyright (C) 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2009, 2010
+ Copyright (C) 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2009, 2010,
+ 2011
Free Software Foundation, Inc.
Contributed by Paul Brook <paul@nowt.org>
and Steven Bosscher <s.bosscher@student.tudelft.nl>
#include "system.h"
#include "coretypes.h"
#include "tree.h"
+#include "gimple.h"
#include "diagnostic-core.h" /* For internal_error/fatal_error. */
#include "flags.h"
#include "gfortran.h"
#include "trans-const.h"
#include "dependency.h"
-static gfc_ss *gfc_walk_subexpr (gfc_ss *, gfc_expr *);
static bool gfc_get_array_constructor_size (mpz_t *, gfc_constructor_base);
/* The contents of this structure aren't actually used, just the address. */
#define OFFSET_FIELD 1
#define DTYPE_FIELD 2
#define DIMENSION_FIELD 3
+#define CAF_TOKEN_FIELD 4
#define STRIDE_SUBFIELD 0
#define LBOUND_SUBFIELD 1
field = TYPE_FIELDS (type);
gcc_assert (DATA_FIELD == 0);
- t = fold_build3 (COMPONENT_REF, TREE_TYPE (field), desc, field, NULL_TREE);
+ t = fold_build3_loc (input_location, COMPONENT_REF, TREE_TYPE (field), desc,
+ field, NULL_TREE);
t = fold_convert (GFC_TYPE_ARRAY_DATAPTR_TYPE (type), t);
return t;
field = TYPE_FIELDS (type);
gcc_assert (DATA_FIELD == 0);
- t = fold_build3 (COMPONENT_REF, TREE_TYPE (field), desc, field, NULL_TREE);
+ t = fold_build3_loc (input_location, COMPONENT_REF, TREE_TYPE (field), desc,
+ field, NULL_TREE);
gfc_add_modify (block, t, fold_convert (TREE_TYPE (field), value));
}
field = TYPE_FIELDS (type);
gcc_assert (DATA_FIELD == 0);
- t = fold_build3 (COMPONENT_REF, TREE_TYPE (field), desc, field, NULL_TREE);
+ t = fold_build3_loc (input_location, COMPONENT_REF, TREE_TYPE (field), desc,
+ field, NULL_TREE);
return gfc_build_addr_expr (NULL_TREE, t);
}
field = gfc_advance_chain (TYPE_FIELDS (type), OFFSET_FIELD);
gcc_assert (field != NULL_TREE && TREE_TYPE (field) == gfc_array_index_type);
- return fold_build3 (COMPONENT_REF, TREE_TYPE (field),
- desc, field, NULL_TREE);
+ return fold_build3_loc (input_location, COMPONENT_REF, TREE_TYPE (field),
+ desc, field, NULL_TREE);
}
tree
field = gfc_advance_chain (TYPE_FIELDS (type), DTYPE_FIELD);
gcc_assert (field != NULL_TREE && TREE_TYPE (field) == gfc_array_index_type);
- return fold_build3 (COMPONENT_REF, TREE_TYPE (field),
- desc, field, NULL_TREE);
+ return fold_build3_loc (input_location, COMPONENT_REF, TREE_TYPE (field),
+ desc, field, NULL_TREE);
}
static tree
&& TREE_CODE (TREE_TYPE (field)) == ARRAY_TYPE
&& TREE_CODE (TREE_TYPE (TREE_TYPE (field))) == RECORD_TYPE);
- tmp = fold_build3 (COMPONENT_REF, TREE_TYPE (field),
- desc, field, NULL_TREE);
+ tmp = fold_build3_loc (input_location, COMPONENT_REF, TREE_TYPE (field),
+ desc, field, NULL_TREE);
tmp = gfc_build_array_ref (tmp, dim, NULL);
return tmp;
}
+
+tree
+gfc_conv_descriptor_token (tree desc)
+{
+ tree type;
+ tree field;
+
+ type = TREE_TYPE (desc);
+ gcc_assert (GFC_DESCRIPTOR_TYPE_P (type));
+ gcc_assert (GFC_TYPE_ARRAY_AKIND (type) == GFC_ARRAY_ALLOCATABLE);
+ gcc_assert (gfc_option.coarray == GFC_FCOARRAY_LIB);
+ field = gfc_advance_chain (TYPE_FIELDS (type), CAF_TOKEN_FIELD);
+ gcc_assert (field != NULL_TREE && TREE_TYPE (field) == prvoid_type_node);
+
+ return fold_build3_loc (input_location, COMPONENT_REF, TREE_TYPE (field),
+ desc, field, NULL_TREE);
+}
+
+
static tree
gfc_conv_descriptor_stride (tree desc, tree dim)
{
field = gfc_advance_chain (field, STRIDE_SUBFIELD);
gcc_assert (field != NULL_TREE && TREE_TYPE (field) == gfc_array_index_type);
- tmp = fold_build3 (COMPONENT_REF, TREE_TYPE (field),
- tmp, field, NULL_TREE);
+ tmp = fold_build3_loc (input_location, COMPONENT_REF, TREE_TYPE (field),
+ tmp, field, NULL_TREE);
return tmp;
}
field = gfc_advance_chain (field, LBOUND_SUBFIELD);
gcc_assert (field != NULL_TREE && TREE_TYPE (field) == gfc_array_index_type);
- tmp = fold_build3 (COMPONENT_REF, TREE_TYPE (field),
- tmp, field, NULL_TREE);
+ tmp = fold_build3_loc (input_location, COMPONENT_REF, TREE_TYPE (field),
+ tmp, field, NULL_TREE);
return tmp;
}
field = gfc_advance_chain (field, UBOUND_SUBFIELD);
gcc_assert (field != NULL_TREE && TREE_TYPE (field) == gfc_array_index_type);
- tmp = fold_build3 (COMPONENT_REF, TREE_TYPE (field),
- tmp, field, NULL_TREE);
+ tmp = fold_build3_loc (input_location, COMPONENT_REF, TREE_TYPE (field),
+ tmp, field, NULL_TREE);
return tmp;
}
}
+/* Modify a descriptor such that the lbound of a given dimension is the value
+ specified. This also updates ubound and offset accordingly. */
+
+void
+gfc_conv_shift_descriptor_lbound (stmtblock_t* block, tree desc,
+ int dim, tree new_lbound)
+{
+ tree offs, ubound, lbound, stride;
+ tree diff, offs_diff;
+
+ new_lbound = fold_convert (gfc_array_index_type, new_lbound);
+
+ offs = gfc_conv_descriptor_offset_get (desc);
+ lbound = gfc_conv_descriptor_lbound_get (desc, gfc_rank_cst[dim]);
+ ubound = gfc_conv_descriptor_ubound_get (desc, gfc_rank_cst[dim]);
+ stride = gfc_conv_descriptor_stride_get (desc, gfc_rank_cst[dim]);
+
+ /* Get difference (new - old) by which to shift stuff. */
+ diff = fold_build2_loc (input_location, MINUS_EXPR, gfc_array_index_type,
+ new_lbound, lbound);
+
+ /* Shift ubound and offset accordingly. This has to be done before
+ updating the lbound, as they depend on the lbound expression! */
+ ubound = fold_build2_loc (input_location, PLUS_EXPR, gfc_array_index_type,
+ ubound, diff);
+ gfc_conv_descriptor_ubound_set (block, desc, gfc_rank_cst[dim], ubound);
+ offs_diff = fold_build2_loc (input_location, MULT_EXPR, gfc_array_index_type,
+ diff, stride);
+ offs = fold_build2_loc (input_location, MINUS_EXPR, gfc_array_index_type,
+ offs, offs_diff);
+ gfc_conv_descriptor_offset_set (block, desc, offs);
+
+ /* Finally set lbound to value we want. */
+ gfc_conv_descriptor_lbound_set (block, desc, gfc_rank_cst[dim], new_lbound);
+}
+
+
/* Cleanup those #defines. */
#undef DATA_FIELD
#undef OFFSET_FIELD
#undef DTYPE_FIELD
#undef DIMENSION_FIELD
+#undef CAF_TOKEN_FIELD
#undef STRIDE_SUBFIELD
#undef LBOUND_SUBFIELD
#undef UBOUND_SUBFIELD
/* Free a gfc_ss chain. */
-static void
+void
gfc_free_ss_chain (gfc_ss * ss)
{
gfc_ss *next;
break;
}
- gfc_free (ss);
+ free (ss);
+}
+
+
+/* Creates and initializes an array type gfc_ss struct. */
+
+gfc_ss *
+gfc_get_array_ss (gfc_ss *next, gfc_expr *expr, int dimen, gfc_ss_type type)
+{
+ gfc_ss *ss;
+ gfc_ss_info *info;
+ int i;
+
+ ss = gfc_get_ss ();
+ ss->next = next;
+ ss->type = type;
+ ss->expr = expr;
+ info = &ss->data.info;
+ info->dimen = dimen;
+ for (i = 0; i < info->dimen; i++)
+ info->dim[i] = i;
+
+ return ss;
+}
+
+
+/* Creates and initializes a temporary type gfc_ss struct. */
+
+gfc_ss *
+gfc_get_temp_ss (tree type, tree string_length, int dimen)
+{
+ gfc_ss *ss;
+
+ ss = gfc_get_ss ();
+ ss->next = gfc_ss_terminator;
+ ss->type = GFC_SS_TEMP;
+ ss->string_length = string_length;
+ ss->data.temp.dimen = dimen;
+ ss->data.temp.type = type;
+
+ return ss;
+}
+
+
+/* Creates and initializes a scalar type gfc_ss struct. */
+
+gfc_ss *
+gfc_get_scalar_ss (gfc_ss *next, gfc_expr *expr)
+{
+ gfc_ss *ss;
+
+ ss = gfc_get_ss ();
+ ss->next = next;
+ ss->type = GFC_SS_SCALAR;
+ ss->expr = expr;
+
+ return ss;
}
tree tmp;
if (as && as->type == AS_EXPLICIT)
- for (dim = 0; dim < se->loop->dimen; dim++)
+ for (n = 0; n < se->loop->dimen; n++)
{
- n = se->loop->order[dim];
+ dim = se->ss->data.info.dim[n];
+ gcc_assert (dim < as->rank);
+ gcc_assert (se->loop->dimen == as->rank);
if (se->loop->to[n] == NULL_TREE)
{
/* Evaluate the lower bound. */
upper = fold_convert (gfc_array_index_type, tmpse.expr);
/* Set the upper bound of the loop to UPPER - LOWER. */
- tmp = fold_build2 (MINUS_EXPR, gfc_array_index_type, upper, lower);
+ tmp = fold_build2_loc (input_location, MINUS_EXPR,
+ gfc_array_index_type, upper, lower);
tmp = gfc_evaluate_now (tmp, &se->pre);
se->loop->to[n] = tmp;
}
{
/* Allocate the temporary. */
onstack = !dynamic && initial == NULL_TREE
- && gfc_can_put_var_on_stack (size);
+ && (gfc_option.flag_stack_arrays
+ || gfc_can_put_var_on_stack (size));
if (onstack)
{
/* Make a temporary variable to hold the data. */
- tmp = fold_build2 (MINUS_EXPR, TREE_TYPE (nelem), nelem,
- gfc_index_one_node);
+ tmp = fold_build2_loc (input_location, MINUS_EXPR, TREE_TYPE (nelem),
+ nelem, gfc_index_one_node);
+ tmp = gfc_evaluate_now (tmp, pre);
tmp = build_range_type (gfc_array_index_type, gfc_index_zero_node,
tmp);
tmp = build_array_type (gfc_get_element_type (TREE_TYPE (desc)),
tmp);
tmp = gfc_create_var (tmp, "A");
+ /* If we're here only because of -fstack-arrays we have to
+ emit a DECL_EXPR to make the gimplifier emit alloca calls. */
+ if (!gfc_can_put_var_on_stack (size))
+ gfc_add_expr_to_block (pre,
+ fold_build1_loc (input_location,
+ DECL_EXPR, TREE_TYPE (tmp),
+ tmp));
tmp = gfc_build_addr_expr (NULL_TREE, tmp);
gfc_conv_descriptor_data_set (pre, desc, tmp);
}
tmp = gfc_build_memcpy_call (packed, source_data, size);
gfc_add_expr_to_block (&do_copying, tmp);
- was_packed = fold_build2 (EQ_EXPR, boolean_type_node,
- packed, source_data);
+ was_packed = fold_build2_loc (input_location, EQ_EXPR,
+ boolean_type_node, packed,
+ source_data);
tmp = gfc_finish_block (&do_copying);
tmp = build3_v (COND_EXPR, was_packed, tmp,
build_empty_stmt (input_location));
}
+/* Get the array reference dimension corresponding to the given loop dimension.
+ It is different from the true array dimension given by the dim array in
+ the case of a partial array reference
+ It is different from the loop dimension in the case of a transposed array.
+ */
+
+static int
+get_array_ref_dim (gfc_ss_info *info, int loop_dim)
+{
+ int n, array_dim, array_ref_dim;
+
+ array_ref_dim = 0;
+ array_dim = info->dim[loop_dim];
+
+ for (n = 0; n < info->dimen; n++)
+ if (n != loop_dim && info->dim[n] < array_dim)
+ array_ref_dim++;
+
+ return array_ref_dim;
+}
+
+
/* Generate code to create and initialize the descriptor for a temporary
array. This is used for both temporaries needed by the scalarizer, and
functions returning arrays. Adjusts the loop variables to be
tree eltype, tree initial, bool dynamic,
bool dealloc, bool callee_alloc, locus * where)
{
+ tree from[GFC_MAX_DIMENSIONS], to[GFC_MAX_DIMENSIONS];
tree type;
tree desc;
tree tmp;
tree nelem;
tree cond;
tree or_expr;
- int n;
- int dim;
+ int n, dim, tmp_dim;
+
+ memset (from, 0, sizeof (from));
+ memset (to, 0, sizeof (to));
gcc_assert (info->dimen > 0);
+ gcc_assert (loop->dimen == info->dimen);
if (gfc_option.warn_array_temp && where)
gfc_warning ("Creating array temporary at %L", where);
/* Set the lower bound to zero. */
- for (dim = 0; dim < info->dimen; dim++)
+ for (n = 0; n < loop->dimen; n++)
{
- n = loop->order[dim];
+ dim = info->dim[n];
+
/* Callee allocated arrays may not have a known bound yet. */
if (loop->to[n])
- loop->to[n] = gfc_evaluate_now (fold_build2 (MINUS_EXPR,
- gfc_array_index_type,
- loop->to[n], loop->from[n]), pre);
+ loop->to[n] = gfc_evaluate_now (
+ fold_build2_loc (input_location, MINUS_EXPR,
+ gfc_array_index_type,
+ loop->to[n], loop->from[n]),
+ pre);
loop->from[n] = gfc_index_zero_node;
+ /* We are constructing the temporary's descriptor based on the loop
+ dimensions. As the dimensions may be accessed in arbitrary order
+ (think of transpose) the size taken from the n'th loop may not map
+ to the n'th dimension of the array. We need to reconstruct loop infos
+ in the right order before using it to set the descriptor
+ bounds. */
+ tmp_dim = get_array_ref_dim (info, n);
+ from[tmp_dim] = loop->from[n];
+ to[tmp_dim] = loop->to[n];
+
info->delta[dim] = gfc_index_zero_node;
info->start[dim] = gfc_index_zero_node;
info->end[dim] = gfc_index_zero_node;
info->stride[dim] = gfc_index_one_node;
- info->dim[dim] = dim;
}
/* Initialize the descriptor. */
type =
- gfc_get_array_type_bounds (eltype, info->dimen, 0, loop->from, loop->to, 1,
+ gfc_get_array_type_bounds (eltype, info->dimen, 0, from, to, 1,
GFC_ARRAY_UNKNOWN, true);
desc = gfc_create_var (type, "atmp");
GFC_DECL_PACKED_ARRAY (desc) = 1;
or_expr = NULL_TREE;
- /* If there is at least one null loop->to[n], it is a callee allocated
+ /* If there is at least one null loop->to[n], it is a callee allocated
array. */
- for (n = 0; n < info->dimen; n++)
+ for (n = 0; n < loop->dimen; n++)
if (loop->to[n] == NULL_TREE)
{
size = NULL_TREE;
break;
}
- for (n = 0; n < info->dimen; n++)
- {
+ for (n = 0; n < loop->dimen; n++)
+ {
dim = info->dim[n];
if (size == NULL_TREE)
{
/* For a callee allocated array express the loop bounds in terms
of the descriptor fields. */
- tmp = fold_build2 (
+ tmp = fold_build2_loc (input_location,
MINUS_EXPR, gfc_array_index_type,
gfc_conv_descriptor_ubound_get (desc, gfc_rank_cst[dim]),
gfc_conv_descriptor_lbound_get (desc, gfc_rank_cst[dim]));
}
/* Store the stride and bound components in the descriptor. */
- gfc_conv_descriptor_stride_set (pre, desc, gfc_rank_cst[dim], size);
+ gfc_conv_descriptor_stride_set (pre, desc, gfc_rank_cst[n], size);
- gfc_conv_descriptor_lbound_set (pre, desc, gfc_rank_cst[dim],
+ gfc_conv_descriptor_lbound_set (pre, desc, gfc_rank_cst[n],
gfc_index_zero_node);
- gfc_conv_descriptor_ubound_set (pre, desc, gfc_rank_cst[dim],
- loop->to[n]);
+ gfc_conv_descriptor_ubound_set (pre, desc, gfc_rank_cst[n],
+ to[n]);
- tmp = fold_build2 (PLUS_EXPR, gfc_array_index_type,
- loop->to[n], gfc_index_one_node);
+ tmp = fold_build2_loc (input_location, PLUS_EXPR, gfc_array_index_type,
+ to[n], gfc_index_one_node);
/* Check whether the size for this dimension is negative. */
- cond = fold_build2 (LE_EXPR, boolean_type_node, tmp,
- gfc_index_zero_node);
+ cond = fold_build2_loc (input_location, LE_EXPR, boolean_type_node, tmp,
+ gfc_index_zero_node);
cond = gfc_evaluate_now (cond, pre);
if (n == 0)
or_expr = cond;
else
- or_expr = fold_build2 (TRUTH_OR_EXPR, boolean_type_node, or_expr, cond);
+ or_expr = fold_build2_loc (input_location, TRUTH_OR_EXPR,
+ boolean_type_node, or_expr, cond);
- size = fold_build2 (MULT_EXPR, gfc_array_index_type, size, tmp);
+ size = fold_build2_loc (input_location, MULT_EXPR, gfc_array_index_type,
+ size, tmp);
size = gfc_evaluate_now (size, pre);
}
{
/* If or_expr is true, then the extent in at least one
dimension is zero and the size is set to zero. */
- size = fold_build3 (COND_EXPR, gfc_array_index_type,
- or_expr, gfc_index_zero_node, size);
+ size = fold_build3_loc (input_location, COND_EXPR, gfc_array_index_type,
+ or_expr, gfc_index_zero_node, size);
nelem = size;
- size = fold_build2 (MULT_EXPR, gfc_array_index_type, size,
+ size = fold_build2_loc (input_location, MULT_EXPR, gfc_array_index_type,
+ size,
fold_convert (gfc_array_index_type,
TYPE_SIZE_UNIT (gfc_get_element_type (type))));
}
}
-/* Generate code to transpose array EXPR by creating a new descriptor
- in which the dimension specifications have been reversed. */
-
-void
-gfc_conv_array_transpose (gfc_se * se, gfc_expr * expr)
-{
- tree dest, src, dest_index, src_index;
- gfc_loopinfo *loop;
- gfc_ss_info *dest_info;
- gfc_ss *dest_ss, *src_ss;
- gfc_se src_se;
- int n;
-
- loop = se->loop;
-
- src_ss = gfc_walk_expr (expr);
- dest_ss = se->ss;
-
- dest_info = &dest_ss->data.info;
- gcc_assert (dest_info->dimen == 2);
-
- /* Get a descriptor for EXPR. */
- gfc_init_se (&src_se, NULL);
- gfc_conv_expr_descriptor (&src_se, expr, src_ss);
- gfc_add_block_to_block (&se->pre, &src_se.pre);
- gfc_add_block_to_block (&se->post, &src_se.post);
- src = src_se.expr;
-
- /* Allocate a new descriptor for the return value. */
- dest = gfc_create_var (TREE_TYPE (src), "atmp");
- dest_info->descriptor = dest;
- se->expr = dest;
-
- /* Copy across the dtype field. */
- gfc_add_modify (&se->pre,
- gfc_conv_descriptor_dtype (dest),
- gfc_conv_descriptor_dtype (src));
-
- /* Copy the dimension information, renumbering dimension 1 to 0 and
- 0 to 1. */
- for (n = 0; n < 2; n++)
- {
- dest_info->delta[n] = gfc_index_zero_node;
- dest_info->start[n] = gfc_index_zero_node;
- dest_info->end[n] = gfc_index_zero_node;
- dest_info->stride[n] = gfc_index_one_node;
- dest_info->dim[n] = n;
-
- dest_index = gfc_rank_cst[n];
- src_index = gfc_rank_cst[1 - n];
-
- gfc_conv_descriptor_stride_set (&se->pre, dest, dest_index,
- gfc_conv_descriptor_stride_get (src, src_index));
-
- gfc_conv_descriptor_lbound_set (&se->pre, dest, dest_index,
- gfc_conv_descriptor_lbound_get (src, src_index));
-
- gfc_conv_descriptor_ubound_set (&se->pre, dest, dest_index,
- gfc_conv_descriptor_ubound_get (src, src_index));
-
- if (!loop->to[n])
- {
- gcc_assert (integer_zerop (loop->from[n]));
- loop->to[n] =
- fold_build2 (MINUS_EXPR, gfc_array_index_type,
- gfc_conv_descriptor_ubound_get (dest, dest_index),
- gfc_conv_descriptor_lbound_get (dest, dest_index));
- }
- }
-
- /* Copy the data pointer. */
- dest_info->data = gfc_conv_descriptor_data_get (src);
- gfc_conv_descriptor_data_set (&se->pre, dest, dest_info->data);
-
- /* Copy the offset. This is not changed by transposition; the top-left
- element is still at the same offset as before, except where the loop
- starts at zero. */
- if (!integer_zerop (loop->from[0]))
- dest_info->offset = gfc_conv_descriptor_offset_get (src);
- else
- dest_info->offset = gfc_index_zero_node;
-
- gfc_conv_descriptor_offset_set (&se->pre, dest,
- dest_info->offset);
-
- if (dest_info->dimen > loop->temp_dim)
- loop->temp_dim = dest_info->dimen;
-}
-
-
/* Return the number of iterations in a loop that starts at START,
ends at END, and has step STEP. */
tree type;
type = TREE_TYPE (step);
- tmp = fold_build2 (MINUS_EXPR, type, end, start);
- tmp = fold_build2 (FLOOR_DIV_EXPR, type, tmp, step);
- tmp = fold_build2 (PLUS_EXPR, type, tmp, build_int_cst (type, 1));
- tmp = fold_build2 (MAX_EXPR, type, tmp, build_int_cst (type, 0));
+ tmp = fold_build2_loc (input_location, MINUS_EXPR, type, end, start);
+ tmp = fold_build2_loc (input_location, FLOOR_DIV_EXPR, type, tmp, step);
+ tmp = fold_build2_loc (input_location, PLUS_EXPR, type, tmp,
+ build_int_cst (type, 1));
+ tmp = fold_build2_loc (input_location, MAX_EXPR, type, tmp,
+ build_int_cst (type, 0));
return fold_convert (gfc_array_index_type, tmp);
}
ubound = gfc_conv_descriptor_ubound_get (desc, gfc_rank_cst[0]);
/* Add EXTRA to the upper bound. */
- tmp = fold_build2 (PLUS_EXPR, gfc_array_index_type, ubound, extra);
+ tmp = fold_build2_loc (input_location, PLUS_EXPR, gfc_array_index_type,
+ ubound, extra);
gfc_conv_descriptor_ubound_set (pblock, desc, gfc_rank_cst[0], tmp);
/* Get the value of the current data pointer. */
/* Calculate the new array size. */
size = TYPE_SIZE_UNIT (gfc_get_element_type (TREE_TYPE (desc)));
- tmp = fold_build2 (PLUS_EXPR, gfc_array_index_type,
- ubound, gfc_index_one_node);
- arg1 = fold_build2 (MULT_EXPR, size_type_node,
- fold_convert (size_type_node, tmp),
- fold_convert (size_type_node, size));
+ tmp = fold_build2_loc (input_location, PLUS_EXPR, gfc_array_index_type,
+ ubound, gfc_index_one_node);
+ arg1 = fold_build2_loc (input_location, MULT_EXPR, size_type_node,
+ fold_convert (size_type_node, tmp),
+ fold_convert (size_type_node, size));
/* Call the realloc() function. */
tmp = gfc_call_realloc (pblock, arg0, arg1);
esize = size_in_bytes (gfc_get_element_type (TREE_TYPE (desc)));
esize = fold_convert (gfc_charlen_type_node, esize);
- esize = fold_build2 (TRUNC_DIV_EXPR, gfc_charlen_type_node, esize,
+ esize = fold_build2_loc (input_location, TRUNC_DIV_EXPR,
+ gfc_charlen_type_node, esize,
build_int_cst (gfc_charlen_type_node,
gfc_character_kinds[i].bit_size / 8));
{
/* Verify that all constructor elements are of the same
length. */
- tree cond = fold_build2 (NE_EXPR, boolean_type_node,
- first_len_val, se->string_length);
+ tree cond = fold_build2_loc (input_location, NE_EXPR,
+ boolean_type_node, first_len_val,
+ se->string_length);
gfc_trans_runtime_check
(true, false, cond, &se->pre, &expr->where,
"Different CHARACTER lengths (%ld/%ld) in array constructor",
{
tmp = gfc_get_iteration_count (loop.from[n], loop.to[n],
gfc_index_one_node);
- size = fold_build2 (MULT_EXPR, gfc_array_index_type, size, tmp);
+ size = fold_build2_loc (input_location, MULT_EXPR,
+ gfc_array_index_type, size, tmp);
}
/* Grow the constructed array by SIZE elements. */
gcc_assert (se.ss == gfc_ss_terminator);
/* Increment the offset. */
- tmp = fold_build2 (PLUS_EXPR, gfc_array_index_type,
- *poffset, gfc_index_one_node);
+ tmp = fold_build2_loc (input_location, PLUS_EXPR, gfc_array_index_type,
+ *poffset, gfc_index_one_node);
gfc_add_modify (&body, *poffset, tmp);
/* Finish the loop. */
gfc_trans_array_ctor_element (&body, desc, *poffset,
&se, c->expr);
- *poffset = fold_build2 (PLUS_EXPR, gfc_array_index_type,
- *poffset, gfc_index_one_node);
+ *poffset = fold_build2_loc (input_location, PLUS_EXPR,
+ gfc_array_index_type,
+ *poffset, gfc_index_one_node);
}
else
{
p = gfc_constructor_next (p);
}
- bound = build_int_cst (NULL_TREE, n - 1);
+ bound = size_int (n - 1);
/* Create an array type to hold them. */
tmptype = build_range_type (gfc_array_index_type,
gfc_index_zero_node, bound);
init = gfc_build_addr_expr (NULL_TREE, init);
size = TREE_INT_CST_LOW (TYPE_SIZE_UNIT (type));
- bound = build_int_cst (NULL_TREE, n * size);
+ bound = build_int_cst (size_type_node, n * size);
tmp = build_call_expr_loc (input_location,
- built_in_decls[BUILT_IN_MEMCPY], 3,
- tmp, init, bound);
+ builtin_decl_explicit (BUILT_IN_MEMCPY),
+ 3, tmp, init, bound);
gfc_add_expr_to_block (&body, tmp);
- *poffset = fold_build2 (PLUS_EXPR, gfc_array_index_type,
- *poffset,
+ *poffset = fold_build2_loc (input_location, PLUS_EXPR,
+ gfc_array_index_type, *poffset,
build_int_cst (gfc_array_index_type, n));
}
if (!INTEGER_CST_P (*poffset))
tmp2 = gfc_conv_mpz_to_tree (size, gfc_index_integer_kind);
/* Grow the array by TMP * TMP2 elements. */
- tmp = fold_build2 (MULT_EXPR, gfc_array_index_type, tmp, tmp2);
+ tmp = fold_build2_loc (input_location, MULT_EXPR,
+ gfc_array_index_type, tmp, tmp2);
gfc_grow_array (&implied_do_block, desc, tmp);
}
/* Generate the exit condition. Depending on the sign of
the step variable we have to generate the correct
comparison. */
- tmp = fold_build2 (GT_EXPR, boolean_type_node, step,
- build_int_cst (TREE_TYPE (step), 0));
- cond = fold_build3 (COND_EXPR, boolean_type_node, tmp,
- fold_build2 (GT_EXPR, boolean_type_node,
- shadow_loopvar, end),
- fold_build2 (LT_EXPR, boolean_type_node,
- shadow_loopvar, end));
+ tmp = fold_build2_loc (input_location, GT_EXPR, boolean_type_node,
+ step, build_int_cst (TREE_TYPE (step), 0));
+ cond = fold_build3_loc (input_location, COND_EXPR,
+ boolean_type_node, tmp,
+ fold_build2_loc (input_location, GT_EXPR,
+ boolean_type_node, shadow_loopvar, end),
+ fold_build2_loc (input_location, LT_EXPR,
+ boolean_type_node, shadow_loopvar, end));
tmp = build1_v (GOTO_EXPR, exit_label);
TREE_USED (exit_label) = 1;
tmp = build3_v (COND_EXPR, cond, tmp,
gfc_add_expr_to_block (&body, loopbody);
/* Increase loop variable by step. */
- tmp = fold_build2 (PLUS_EXPR, TREE_TYPE (shadow_loopvar), shadow_loopvar, step);
+ tmp = fold_build2_loc (input_location, PLUS_EXPR,
+ TREE_TYPE (shadow_loopvar), shadow_loopvar,
+ step);
gfc_add_modify (&body, shadow_loopvar, tmp);
/* Finish the loop. */
}
+/* A catch-all to obtain the string length for anything that is not a
+ a substring of non-constant length, a constant, array or variable. */
+
+static void
+get_array_ctor_all_strlen (stmtblock_t *block, gfc_expr *e, tree *len)
+{
+ gfc_se se;
+ gfc_ss *ss;
+
+ /* Don't bother if we already know the length is a constant. */
+ if (*len && INTEGER_CST_P (*len))
+ return;
+
+ if (!e->ref && e->ts.u.cl && e->ts.u.cl->length
+ && e->ts.u.cl->length->expr_type == EXPR_CONSTANT)
+ {
+ /* This is easy. */
+ gfc_conv_const_charlen (e->ts.u.cl);
+ *len = e->ts.u.cl->backend_decl;
+ }
+ else
+ {
+ /* Otherwise, be brutal even if inefficient. */
+ ss = gfc_walk_expr (e);
+ gfc_init_se (&se, NULL);
+
+ /* No function call, in case of side effects. */
+ se.no_function_call = 1;
+ if (ss == gfc_ss_terminator)
+ gfc_conv_expr (&se, e);
+ else
+ gfc_conv_expr_descriptor (&se, e, ss);
+
+ /* Fix the value. */
+ *len = gfc_evaluate_now (se.string_length, &se.pre);
+
+ gfc_add_block_to_block (block, &se.pre);
+ gfc_add_block_to_block (block, &se.post);
+
+ e->ts.u.cl->backend_decl = *len;
+ }
+}
+
+
/* Figure out the string length of a variable reference expression.
Used by get_array_ctor_strlen. */
static void
-get_array_ctor_var_strlen (gfc_expr * expr, tree * len)
+get_array_ctor_var_strlen (stmtblock_t *block, gfc_expr * expr, tree * len)
{
gfc_ref *ref;
gfc_typespec *ts;
case REF_SUBSTRING:
if (ref->u.ss.start->expr_type != EXPR_CONSTANT
|| ref->u.ss.end->expr_type != EXPR_CONSTANT)
- break;
+ {
+ /* Note that this might evaluate expr. */
+ get_array_ctor_all_strlen (block, expr, len);
+ return;
+ }
mpz_init_set_ui (char_len, 1);
mpz_add (char_len, char_len, ref->u.ss.end->value.integer);
mpz_sub (char_len, char_len, ref->u.ss.start->value.integer);
return;
default:
- /* TODO: Substrings are tricky because we can't evaluate the
- expression more than once. For now we just give up, and hope
- we can figure it out elsewhere. */
- return;
+ gcc_unreachable ();
}
}
}
-/* A catch-all to obtain the string length for anything that is not a
- constant, array or variable. */
-static void
-get_array_ctor_all_strlen (stmtblock_t *block, gfc_expr *e, tree *len)
-{
- gfc_se se;
- gfc_ss *ss;
-
- /* Don't bother if we already know the length is a constant. */
- if (*len && INTEGER_CST_P (*len))
- return;
-
- if (!e->ref && e->ts.u.cl && e->ts.u.cl->length
- && e->ts.u.cl->length->expr_type == EXPR_CONSTANT)
- {
- /* This is easy. */
- gfc_conv_const_charlen (e->ts.u.cl);
- *len = e->ts.u.cl->backend_decl;
- }
- else
- {
- /* Otherwise, be brutal even if inefficient. */
- ss = gfc_walk_expr (e);
- gfc_init_se (&se, NULL);
-
- /* No function call, in case of side effects. */
- se.no_function_call = 1;
- if (ss == gfc_ss_terminator)
- gfc_conv_expr (&se, e);
- else
- gfc_conv_expr_descriptor (&se, e, ss);
-
- /* Fix the value. */
- *len = gfc_evaluate_now (se.string_length, &se.pre);
-
- gfc_add_block_to_block (block, &se.pre);
- gfc_add_block_to_block (block, &se.post);
-
- e->ts.u.cl->backend_decl = *len;
- }
-}
-
-
/* Figure out the string length of a character array constructor.
If len is NULL, don't calculate the length; this happens for recursive calls
when a sub-array-constructor is an element but not at the first position,
case EXPR_VARIABLE:
is_const = false;
if (len)
- get_array_ctor_var_strlen (c->expr, len);
+ get_array_ctor_var_strlen (block, c->expr, len);
break;
default:
tmptype = gfc_get_nodesc_array_type (type, &as, PACKED_STATIC, true);
+ /* as is not needed anymore. */
+ for (i = 0; i < as.rank + as.corank; i++)
+ {
+ gfc_free_expr (as.lower[i]);
+ gfc_free_expr (as.upper[i]);
+ }
+
init = build_constructor (tmptype, v);
TREE_CONSTANT (init) = 1;
info->start[i] = gfc_index_zero_node;
info->end[i] = gfc_index_zero_node;
info->stride[i] = gfc_index_one_node;
- info->dim[i] = i;
}
if (info->dimen > loop->temp_dim)
/* Only allow nonzero "from" in one-dimensional arrays. */
if (loop->dimen != 1)
return NULL_TREE;
- tmp = fold_build2 (MINUS_EXPR, gfc_array_index_type,
- loop->to[i], loop->from[i]);
+ tmp = fold_build2_loc (input_location, MINUS_EXPR,
+ gfc_array_index_type,
+ loop->to[i], loop->from[i]);
}
else
tmp = loop->to[i];
- tmp = fold_build2 (PLUS_EXPR, gfc_array_index_type,
- tmp, gfc_index_one_node);
- size = fold_build2 (MULT_EXPR, gfc_array_index_type, size, tmp);
+ tmp = fold_build2_loc (input_location, PLUS_EXPR, gfc_array_index_type,
+ tmp, gfc_index_one_node);
+ size = fold_build2_loc (input_location, MULT_EXPR, gfc_array_index_type,
+ size, tmp);
}
return size;
tree offsetvar;
tree desc;
tree type;
+ tree tmp;
bool dynamic;
bool old_first_len, old_typespec_chararray_ctor;
tree old_first_len_val;
first_len = true;
}
- ss->data.info.dimen = loop->dimen;
+ gcc_assert (ss->data.info.dimen == loop->dimen);
c = ss->expr->value.constructor;
if (ss->expr->ts.type == BT_CHARACTER)
loop->from[n] = gfc_index_zero_node;
loop->to[n] = gfc_conv_mpz_to_tree (ss->expr->shape [n],
gfc_index_integer_kind);
- loop->to[n] = fold_build2 (MINUS_EXPR, gfc_array_index_type,
- loop->to[n], gfc_index_one_node);
+ loop->to[n] = fold_build2_loc (input_location, MINUS_EXPR,
+ gfc_array_index_type,
+ loop->to[n], gfc_index_one_node);
}
}
}
}
+ if (TREE_CODE (loop->to[0]) == VAR_DECL)
+ dynamic = true;
+
gfc_trans_create_temp_array (&loop->pre, &loop->post, loop, &ss->data.info,
type, NULL_TREE, dynamic, true, false, where);
/* If the array grows dynamically, the upper bound of the loop variable
is determined by the array's final upper bound. */
if (dynamic)
- loop->to[0] = gfc_conv_descriptor_ubound_get (desc, gfc_rank_cst[0]);
+ {
+ tmp = fold_build2_loc (input_location, MINUS_EXPR,
+ gfc_array_index_type,
+ offsetvar, gfc_index_one_node);
+ tmp = gfc_evaluate_now (tmp, &loop->pre);
+ gfc_conv_descriptor_ubound_set (&loop->pre, desc, gfc_rank_cst[0], tmp);
+ if (loop->to[0] && TREE_CODE (loop->to[0]) == VAR_DECL)
+ gfc_add_modify (&loop->pre, loop->to[0], tmp);
+ else
+ loop->to[0] = tmp;
+ }
if (TREE_USED (offsetvar))
pushdecl (offsetvar);
else
gcc_assert (INTEGER_CST_P (offset));
+
#if 0
/* Disable bound checking for now because it's probably broken. */
if (gfc_option.rtcheck & GFC_RTCHECK_BOUNDS)
gfc_init_se (&se, NULL);
desc = info->subscript[dim]->data.info.descriptor;
zero = gfc_rank_cst[0];
- tmp = fold_build2 (MINUS_EXPR, gfc_array_index_type,
+ tmp = fold_build2_loc (input_location, MINUS_EXPR,
+ gfc_array_index_type,
gfc_conv_descriptor_ubound_get (desc, zero),
gfc_conv_descriptor_lbound_get (desc, zero));
tmp = gfc_evaluate_now (tmp, &loop->pre);
tmp = gfc_conv_array_offset (se.expr);
ss->data.info.offset = gfc_evaluate_now (tmp, block);
+
+ /* Make absolutely sure that the saved_offset is indeed saved
+ so that the variable is still accessible after the loops
+ are translated. */
+ ss->data.info.saved_offset = ss->data.info.offset;
}
}
for (n = 0; n < GFC_MAX_DIMENSIONS; n++)
{
loop->order[n] = n;
- loop->reverse[n] = GFC_CANNOT_REVERSE;
+ loop->reverse[n] = GFC_INHIBIT_REVERSE;
}
loop->ss = gfc_ss_terminator;
asprintf (&msg, "Index '%%ld' of dimension %d "
"outside of expected range (%%ld:%%ld)", n+1);
- fault = fold_build2 (LT_EXPR, boolean_type_node, index, tmp_lo);
+ fault = fold_build2_loc (input_location, LT_EXPR, boolean_type_node,
+ index, tmp_lo);
gfc_trans_runtime_check (true, false, fault, &se->pre, where, msg,
fold_convert (long_integer_type_node, index),
fold_convert (long_integer_type_node, tmp_lo),
fold_convert (long_integer_type_node, tmp_up));
- fault = fold_build2 (GT_EXPR, boolean_type_node, index, tmp_up);
+ fault = fold_build2_loc (input_location, GT_EXPR, boolean_type_node,
+ index, tmp_up);
gfc_trans_runtime_check (true, false, fault, &se->pre, where, msg,
fold_convert (long_integer_type_node, index),
fold_convert (long_integer_type_node, tmp_lo),
fold_convert (long_integer_type_node, tmp_up));
- gfc_free (msg);
+ free (msg);
}
else
{
asprintf (&msg, "Index '%%ld' of dimension %d "
"below lower bound of %%ld", n+1);
- fault = fold_build2 (LT_EXPR, boolean_type_node, index, tmp_lo);
+ fault = fold_build2_loc (input_location, LT_EXPR, boolean_type_node,
+ index, tmp_lo);
gfc_trans_runtime_check (true, false, fault, &se->pre, where, msg,
fold_convert (long_integer_type_node, index),
fold_convert (long_integer_type_node, tmp_lo));
- gfc_free (msg);
+ free (msg);
}
return index;
gcc_assert (ar->type != AR_ELEMENT);
switch (ar->dimen_type[dim])
{
+ case DIMEN_THIS_IMAGE:
+ gcc_unreachable ();
+ break;
case DIMEN_ELEMENT:
/* Elemental dimension. */
gcc_assert (info->subscript[dim]
desc = info->subscript[dim]->data.info.descriptor;
/* Get a zero-based index into the vector. */
- index = fold_build2 (MINUS_EXPR, gfc_array_index_type,
- se->loop->loopvar[i], se->loop->from[i]);
+ index = fold_build2_loc (input_location, MINUS_EXPR,
+ gfc_array_index_type,
+ se->loop->loopvar[i], se->loop->from[i]);
/* Multiply the index by the stride. */
- index = fold_build2 (MULT_EXPR, gfc_array_index_type,
- index, gfc_conv_array_stride (desc, 0));
+ index = fold_build2_loc (input_location, MULT_EXPR,
+ gfc_array_index_type,
+ index, gfc_conv_array_stride (desc, 0));
/* Read the vector to get an index into info->descriptor. */
data = build_fold_indirect_ref_loc (input_location,
/* Multiply the loop variable by the stride and delta. */
index = se->loop->loopvar[i];
if (!integer_onep (info->stride[dim]))
- index = fold_build2 (MULT_EXPR, gfc_array_index_type, index,
- info->stride[dim]);
+ index = fold_build2_loc (input_location, MULT_EXPR,
+ gfc_array_index_type, index,
+ info->stride[dim]);
if (!integer_zerop (info->delta[dim]))
- index = fold_build2 (PLUS_EXPR, gfc_array_index_type, index,
- info->delta[dim]);
+ index = fold_build2_loc (input_location, PLUS_EXPR,
+ gfc_array_index_type, index,
+ info->delta[dim]);
break;
default:
/* Temporary array or derived type component. */
gcc_assert (se->loop);
index = se->loop->loopvar[se->loop->order[i]];
+
+ /* Pointer functions can have stride[0] different from unity.
+ Use the stride returned by the function call and stored in
+ the descriptor for the temporary. */
+ if (se->ss && se->ss->type == GFC_SS_FUNCTION
+ && se->ss->expr
+ && se->ss->expr->symtree
+ && se->ss->expr->symtree->n.sym->result
+ && se->ss->expr->symtree->n.sym->result->attr.pointer)
+ stride = gfc_conv_descriptor_stride_get (info->descriptor,
+ gfc_rank_cst[dim]);
+
if (!integer_zerop (info->delta[dim]))
- index = fold_build2 (PLUS_EXPR, gfc_array_index_type,
- index, info->delta[dim]);
+ index = fold_build2_loc (input_location, PLUS_EXPR,
+ gfc_array_index_type, index, info->delta[dim]);
}
/* Multiply by the stride. */
if (!integer_onep (stride))
- index = fold_build2 (MULT_EXPR, gfc_array_index_type, index, stride);
+ index = fold_build2_loc (input_location, MULT_EXPR, gfc_array_index_type,
+ index, stride);
return index;
}
/* Add the offset for this dimension to the stored offset for all other
dimensions. */
if (!integer_zerop (info->offset))
- index = fold_build2 (PLUS_EXPR, gfc_array_index_type, index, info->offset);
+ index = fold_build2_loc (input_location, PLUS_EXPR, gfc_array_index_type,
+ index, info->offset);
if (se->ss->expr && is_subref_array (se->ss->expr))
decl = se->ss->expr->symtree->n.sym->backend_decl;
{
se->string_length = se->ss->string_length;
gfc_conv_scalarized_array_ref (se, NULL);
+ gfc_advance_se_ss_chain (se);
}
+/* Add T to the offset pair *OFFSET, *CST_OFFSET. */
+
+static void
+add_to_offset (tree *cst_offset, tree *offset, tree t)
+{
+ if (TREE_CODE (t) == INTEGER_CST)
+ *cst_offset = int_const_binop (PLUS_EXPR, *cst_offset, t);
+ else
+ {
+ if (!integer_zerop (*offset))
+ *offset = fold_build2_loc (input_location, PLUS_EXPR,
+ gfc_array_index_type, *offset, t);
+ else
+ *offset = t;
+ }
+}
/* Build an array reference. se->expr already holds the array descriptor.
This should be either a variable, indirect variable reference or component
locus * where)
{
int n;
- tree index;
+ tree offset, cst_offset;
tree tmp;
tree stride;
gfc_se indexse;
gfc_se tmpse;
if (ar->dimen == 0)
- return;
+ {
+ gcc_assert (ar->codimen);
+
+ if (GFC_DESCRIPTOR_TYPE_P (TREE_TYPE (se->expr)))
+ se->expr = build_fold_indirect_ref (gfc_conv_array_data (se->expr));
+ else
+ {
+ if (GFC_ARRAY_TYPE_P (TREE_TYPE (se->expr))
+ && TREE_CODE (TREE_TYPE (se->expr)) == POINTER_TYPE)
+ se->expr = build_fold_indirect_ref_loc (input_location, se->expr);
+
+ /* Use the actual tree type and not the wrapped coarray. */
+ if (!se->want_pointer)
+ se->expr = fold_convert (TYPE_MAIN_VARIANT (TREE_TYPE (se->expr)),
+ se->expr);
+ }
+
+ return;
+ }
/* Handle scalarized references separately. */
if (ar->type != AR_ELEMENT)
return;
}
- index = gfc_index_zero_node;
+ cst_offset = offset = gfc_index_zero_node;
+ add_to_offset (&cst_offset, &offset, gfc_conv_array_offset (se->expr));
- /* Calculate the offsets from all the dimensions. */
- for (n = 0; n < ar->dimen; n++)
+ /* Calculate the offsets from all the dimensions. Make sure to associate
+ the final offset so that we form a chain of loop invariant summands. */
+ for (n = ar->dimen - 1; n >= 0; n--)
{
/* Calculate the index for this dimension. */
gfc_init_se (&indexse, se);
tmp = tmpse.expr;
}
- cond = fold_build2 (LT_EXPR, boolean_type_node,
- indexse.expr, tmp);
+ cond = fold_build2_loc (input_location, LT_EXPR, boolean_type_node,
+ indexse.expr, tmp);
asprintf (&msg, "Index '%%ld' of dimension %d of array '%s' "
"below lower bound of %%ld", n+1, sym->name);
gfc_trans_runtime_check (true, false, cond, &se->pre, where, msg,
fold_convert (long_integer_type_node,
indexse.expr),
fold_convert (long_integer_type_node, tmp));
- gfc_free (msg);
+ free (msg);
/* Upper bound, but not for the last dimension of assumed-size
arrays. */
tmp = tmpse.expr;
}
- cond = fold_build2 (GT_EXPR, boolean_type_node,
- indexse.expr, tmp);
+ cond = fold_build2_loc (input_location, GT_EXPR,
+ boolean_type_node, indexse.expr, tmp);
asprintf (&msg, "Index '%%ld' of dimension %d of array '%s' "
"above upper bound of %%ld", n+1, sym->name);
gfc_trans_runtime_check (true, false, cond, &se->pre, where, msg,
fold_convert (long_integer_type_node,
indexse.expr),
fold_convert (long_integer_type_node, tmp));
- gfc_free (msg);
+ free (msg);
}
}
/* Multiply the index by the stride. */
stride = gfc_conv_array_stride (se->expr, n);
- tmp = fold_build2 (MULT_EXPR, gfc_array_index_type, indexse.expr,
- stride);
+ tmp = fold_build2_loc (input_location, MULT_EXPR, gfc_array_index_type,
+ indexse.expr, stride);
/* And add it to the total. */
- index = fold_build2 (PLUS_EXPR, gfc_array_index_type, index, tmp);
+ add_to_offset (&cst_offset, &offset, tmp);
}
- tmp = gfc_conv_array_offset (se->expr);
- if (!integer_zerop (tmp))
- index = fold_build2 (PLUS_EXPR, gfc_array_index_type, index, tmp);
+ if (!integer_zerop (cst_offset))
+ offset = fold_build2_loc (input_location, PLUS_EXPR,
+ gfc_array_index_type, offset, cst_offset);
/* Access the calculated element. */
tmp = gfc_conv_array_data (se->expr);
tmp = build_fold_indirect_ref (tmp);
- se->expr = gfc_build_array_ref (tmp, index, sym->backend_decl);
+ se->expr = gfc_build_array_ref (tmp, offset, sym->backend_decl);
}
gfc_ss_info *info;
gfc_ss *ss;
gfc_se se;
+ gfc_array_ref *ar;
int i;
/* This code will be executed before entering the scalarization loop
if (dim >= info->dimen)
continue;
+ if (info->ref)
+ {
+ ar = &info->ref->u.ar;
+ i = loop->order[dim + 1];
+ }
+ else
+ {
+ ar = NULL;
+ i = dim + 1;
+ }
+
+
if (dim == info->dimen - 1)
{
/* For the outermost loop calculate the offset due to any
base offset of the array. */
if (info->ref)
{
- for (i = 0; i < info->ref->u.ar.dimen; i++)
+ for (i = 0; i < ar->dimen; i++)
{
- if (info->ref->u.ar.dimen_type[i] != DIMEN_ELEMENT)
+ if (ar->dimen_type[i] != DIMEN_ELEMENT)
continue;
gfc_init_se (&se, NULL);
se.expr = info->descriptor;
stride = gfc_conv_array_stride (info->descriptor, i);
index = gfc_conv_array_index_offset (&se, info, i, -1,
- &info->ref->u.ar,
- stride);
+ ar, stride);
gfc_add_block_to_block (pblock, &se.pre);
- info->offset = fold_build2 (PLUS_EXPR, gfc_array_index_type,
- info->offset, index);
+ info->offset = fold_build2_loc (input_location, PLUS_EXPR,
+ gfc_array_index_type,
+ info->offset, index);
info->offset = gfc_evaluate_now (info->offset, pblock);
}
-
- i = loop->order[0];
- stride = gfc_conv_array_stride (info->descriptor, info->dim[i]);
}
- else
- stride = gfc_conv_array_stride (info->descriptor, 0);
+
+ i = loop->order[0];
+ /* For the time being, the innermost loop is unconditionally on
+ the first dimension of the scalarization loop. */
+ gcc_assert (i == 0);
+ stride = gfc_conv_array_stride (info->descriptor, info->dim[i]);
/* Calculate the stride of the innermost loop. Hopefully this will
- allow the backend optimizers to do their stuff more effectively.
- */
+ allow the backend optimizers to do their stuff more effectively.
+ */
info->stride0 = gfc_evaluate_now (stride, pblock);
}
else
{
/* Add the offset for the previous loop dimension. */
- gfc_array_ref *ar;
-
- if (info->ref)
- {
- ar = &info->ref->u.ar;
- i = loop->order[dim + 1];
- }
- else
- {
- ar = NULL;
- i = dim + 1;
- }
-
gfc_init_se (&se, NULL);
se.loop = loop;
se.expr = info->descriptor;
index = gfc_conv_array_index_offset (&se, info, info->dim[i], i,
ar, stride);
gfc_add_block_to_block (pblock, &se.pre);
- info->offset = fold_build2 (PLUS_EXPR, gfc_array_index_type,
- info->offset, index);
+ info->offset = fold_build2_loc (input_location, PLUS_EXPR,
+ gfc_array_index_type, info->offset,
+ index);
info->offset = gfc_evaluate_now (info->offset, pblock);
}
loop->from[n]);
OMP_FOR_INIT (stmt) = init;
/* The exit condition. */
- TREE_VEC_ELT (cond, 0) = build2 (LE_EXPR, boolean_type_node,
- loop->loopvar[n], loop->to[n]);
+ TREE_VEC_ELT (cond, 0) = build2_loc (input_location, LE_EXPR,
+ boolean_type_node,
+ loop->loopvar[n], loop->to[n]);
+ SET_EXPR_LOCATION (TREE_VEC_ELT (cond, 0), input_location);
OMP_FOR_COND (stmt) = cond;
/* Increment the loopvar. */
- tmp = build2 (PLUS_EXPR, gfc_array_index_type,
- loop->loopvar[n], gfc_index_one_node);
- TREE_VEC_ELT (incr, 0) = fold_build2 (MODIFY_EXPR,
+ tmp = build2_loc (input_location, PLUS_EXPR, gfc_array_index_type,
+ loop->loopvar[n], gfc_index_one_node);
+ TREE_VEC_ELT (incr, 0) = fold_build2_loc (input_location, MODIFY_EXPR,
void_type_node, loop->loopvar[n], tmp);
OMP_FOR_INCR (stmt) = incr;
gfc_init_block (&block);
/* The exit condition. */
- cond = fold_build2 (reverse_loop ? LT_EXPR : GT_EXPR,
+ cond = fold_build2_loc (input_location, reverse_loop ? LT_EXPR : GT_EXPR,
boolean_type_node, loop->loopvar[n], loop->to[n]);
tmp = build1_v (GOTO_EXPR, exit_label);
TREE_USED (exit_label) = 1;
gfc_add_expr_to_block (&block, loopbody);
/* Increment the loopvar. */
- tmp = fold_build2 (reverse_loop ? MINUS_EXPR : PLUS_EXPR,
- gfc_array_index_type, loop->loopvar[n],
- gfc_index_one_node);
+ tmp = fold_build2_loc (input_location,
+ reverse_loop ? MINUS_EXPR : PLUS_EXPR,
+ gfc_array_index_type, loop->loopvar[n],
+ gfc_index_one_node);
gfc_add_modify (&block, loop->loopvar[n], tmp);
}
+/* Precalculate (either lower or upper) bound of an array section.
+ BLOCK: Block in which the (pre)calculation code will go.
+ BOUNDS[DIM]: Where the bound value will be stored once evaluated.
+ VALUES[DIM]: Specified bound (NULL <=> unspecified).
+ DESC: Array descriptor from which the bound will be picked if unspecified
+ (either lower or upper bound according to LBOUND). */
+
+static void
+evaluate_bound (stmtblock_t *block, tree *bounds, gfc_expr ** values,
+ tree desc, int dim, bool lbound)
+{
+ gfc_se se;
+ gfc_expr * input_val = values[dim];
+ tree *output = &bounds[dim];
+
+
+ if (input_val)
+ {
+ /* Specified section bound. */
+ gfc_init_se (&se, NULL);
+ gfc_conv_expr_type (&se, input_val, gfc_array_index_type);
+ gfc_add_block_to_block (block, &se.pre);
+ *output = se.expr;
+ }
+ else
+ {
+ /* No specific bound specified so use the bound of the array. */
+ *output = lbound ? gfc_conv_array_lbound (desc, dim) :
+ gfc_conv_array_ubound (desc, dim);
+ }
+ *output = gfc_evaluate_now (*output, block);
+}
+
+
/* Calculate the lower bound of an array section. */
static void
gfc_conv_section_startstride (gfc_loopinfo * loop, gfc_ss * ss, int dim)
{
- gfc_expr *start;
- gfc_expr *end;
- gfc_expr *stride;
+ gfc_expr *stride = NULL;
tree desc;
gfc_se se;
gfc_ss_info *info;
+ gfc_array_ref *ar;
gcc_assert (ss->type == GFC_SS_SECTION);
info = &ss->data.info;
+ ar = &info->ref->u.ar;
- if (info->ref->u.ar.dimen_type[dim] == DIMEN_VECTOR)
+ if (ar->dimen_type[dim] == DIMEN_VECTOR)
{
/* We use a zero-based index to access the vector. */
info->start[dim] = gfc_index_zero_node;
- info->stride[dim] = gfc_index_one_node;
info->end[dim] = NULL;
+ info->stride[dim] = gfc_index_one_node;
return;
}
- gcc_assert (info->ref->u.ar.dimen_type[dim] == DIMEN_RANGE);
+ gcc_assert (ar->dimen_type[dim] == DIMEN_RANGE
+ || ar->dimen_type[dim] == DIMEN_THIS_IMAGE);
desc = info->descriptor;
- start = info->ref->u.ar.start[dim];
- end = info->ref->u.ar.end[dim];
- stride = info->ref->u.ar.stride[dim];
+ stride = ar->stride[dim];
/* Calculate the start of the range. For vector subscripts this will
be the range of the vector. */
- if (start)
- {
- /* Specified section start. */
- gfc_init_se (&se, NULL);
- gfc_conv_expr_type (&se, start, gfc_array_index_type);
- gfc_add_block_to_block (&loop->pre, &se.pre);
- info->start[dim] = se.expr;
- }
- else
- {
- /* No lower bound specified so use the bound of the array. */
- info->start[dim] = gfc_conv_array_lbound (desc, dim);
- }
- info->start[dim] = gfc_evaluate_now (info->start[dim], &loop->pre);
+ evaluate_bound (&loop->pre, info->start, ar->start, desc, dim, true);
/* Similarly calculate the end. Although this is not used in the
scalarizer, it is needed when checking bounds and where the end
is an expression with side-effects. */
- if (end)
- {
- /* Specified section start. */
- gfc_init_se (&se, NULL);
- gfc_conv_expr_type (&se, end, gfc_array_index_type);
- gfc_add_block_to_block (&loop->pre, &se.pre);
- info->end[dim] = se.expr;
- }
- else
- {
- /* No upper bound specified so use the bound of the array. */
- info->end[dim] = gfc_conv_array_ubound (desc, dim);
- }
- info->end[dim] = gfc_evaluate_now (info->end[dim], &loop->pre);
+ evaluate_bound (&loop->pre, info->end, ar->end, desc, dim, false);
/* Calculate the stride. */
if (stride == NULL)
loop->dimen = 0;
/* Determine the rank of the loop. */
- for (ss = loop->ss;
- ss != gfc_ss_terminator && loop->dimen == 0; ss = ss->loop_chain)
+ for (ss = loop->ss; ss != gfc_ss_terminator; ss = ss->loop_chain)
{
switch (ss->type)
{
case GFC_SS_FUNCTION:
case GFC_SS_COMPONENT:
loop->dimen = ss->data.info.dimen;
- break;
+ goto done;
/* As usual, lbound and ubound are exceptions!. */
case GFC_SS_INTRINSIC:
{
case GFC_ISYM_LBOUND:
case GFC_ISYM_UBOUND:
+ case GFC_ISYM_LCOBOUND:
+ case GFC_ISYM_UCOBOUND:
+ case GFC_ISYM_THIS_IMAGE:
loop->dimen = ss->data.info.dimen;
+ goto done;
default:
break;
/* We should have determined the rank of the expression by now. If
not, that's bad news. */
- gcc_assert (loop->dimen != 0);
+ gcc_unreachable ();
+done:
/* Loop over all the SS in the chain. */
for (ss = loop->ss; ss != gfc_ss_terminator; ss = ss->loop_chain)
{
/* Fall through to supply start and stride. */
case GFC_ISYM_LBOUND:
case GFC_ISYM_UBOUND:
+ case GFC_ISYM_LCOBOUND:
+ case GFC_ISYM_UCOBOUND:
+ case GFC_ISYM_THIS_IMAGE:
break;
+
default:
continue;
}
if (ss->type != GFC_SS_SECTION)
continue;
+ /* Catch allocatable lhs in f2003. */
+ if (gfc_option.flag_realloc_lhs && ss->is_alloc_lhs)
+ continue;
+
gfc_start_block (&inner);
/* TODO: range checking for mapped dimensions. */
check_upper = true;
/* Zero stride is not allowed. */
- tmp = fold_build2 (EQ_EXPR, boolean_type_node, info->stride[dim],
- gfc_index_zero_node);
+ tmp = fold_build2_loc (input_location, EQ_EXPR, boolean_type_node,
+ info->stride[dim], gfc_index_zero_node);
asprintf (&msg, "Zero stride is not allowed, for dimension %d "
"of array '%s'", dim + 1, ss->expr->symtree->name);
gfc_trans_runtime_check (true, false, tmp, &inner,
&ss->expr->where, msg);
- gfc_free (msg);
+ free (msg);
desc = ss->data.info.descriptor;
/* non_zerosized is true when the selected range is not
empty. */
- stride_pos = fold_build2 (GT_EXPR, boolean_type_node,
- info->stride[dim], gfc_index_zero_node);
- tmp = fold_build2 (LE_EXPR, boolean_type_node, info->start[dim],
- end);
- stride_pos = fold_build2 (TRUTH_AND_EXPR, boolean_type_node,
- stride_pos, tmp);
-
- stride_neg = fold_build2 (LT_EXPR, boolean_type_node,
- info->stride[dim], gfc_index_zero_node);
- tmp = fold_build2 (GE_EXPR, boolean_type_node, info->start[dim],
- end);
- stride_neg = fold_build2 (TRUTH_AND_EXPR, boolean_type_node,
- stride_neg, tmp);
- non_zerosized = fold_build2 (TRUTH_OR_EXPR, boolean_type_node,
- stride_pos, stride_neg);
+ stride_pos = fold_build2_loc (input_location, GT_EXPR,
+ boolean_type_node, info->stride[dim],
+ gfc_index_zero_node);
+ tmp = fold_build2_loc (input_location, LE_EXPR, boolean_type_node,
+ info->start[dim], end);
+ stride_pos = fold_build2_loc (input_location, TRUTH_AND_EXPR,
+ boolean_type_node, stride_pos, tmp);
+
+ stride_neg = fold_build2_loc (input_location, LT_EXPR,
+ boolean_type_node,
+ info->stride[dim], gfc_index_zero_node);
+ tmp = fold_build2_loc (input_location, GE_EXPR, boolean_type_node,
+ info->start[dim], end);
+ stride_neg = fold_build2_loc (input_location, TRUTH_AND_EXPR,
+ boolean_type_node,
+ stride_neg, tmp);
+ non_zerosized = fold_build2_loc (input_location, TRUTH_OR_EXPR,
+ boolean_type_node,
+ stride_pos, stride_neg);
/* Check the start of the range against the lower and upper
bounds of the array, if the range is not empty.
error message. */
if (check_upper)
{
- tmp = fold_build2 (LT_EXPR, boolean_type_node,
- info->start[dim], lbound);
- tmp = fold_build2 (TRUTH_AND_EXPR, boolean_type_node,
- non_zerosized, tmp);
- tmp2 = fold_build2 (GT_EXPR, boolean_type_node,
- info->start[dim], ubound);
- tmp2 = fold_build2 (TRUTH_AND_EXPR, boolean_type_node,
- non_zerosized, tmp2);
+ tmp = fold_build2_loc (input_location, LT_EXPR,
+ boolean_type_node,
+ info->start[dim], lbound);
+ tmp = fold_build2_loc (input_location, TRUTH_AND_EXPR,
+ boolean_type_node,
+ non_zerosized, tmp);
+ tmp2 = fold_build2_loc (input_location, GT_EXPR,
+ boolean_type_node,
+ info->start[dim], ubound);
+ tmp2 = fold_build2_loc (input_location, TRUTH_AND_EXPR,
+ boolean_type_node,
+ non_zerosized, tmp2);
asprintf (&msg, "Index '%%ld' of dimension %d of array '%s' "
"outside of expected range (%%ld:%%ld)",
dim + 1, ss->expr->symtree->name);
fold_convert (long_integer_type_node, info->start[dim]),
fold_convert (long_integer_type_node, lbound),
fold_convert (long_integer_type_node, ubound));
- gfc_free (msg);
+ free (msg);
}
else
{
- tmp = fold_build2 (LT_EXPR, boolean_type_node,
- info->start[dim], lbound);
- tmp = fold_build2 (TRUTH_AND_EXPR, boolean_type_node,
- non_zerosized, tmp);
+ tmp = fold_build2_loc (input_location, LT_EXPR,
+ boolean_type_node,
+ info->start[dim], lbound);
+ tmp = fold_build2_loc (input_location, TRUTH_AND_EXPR,
+ boolean_type_node, non_zerosized, tmp);
asprintf (&msg, "Index '%%ld' of dimension %d of array '%s' "
"below lower bound of %%ld",
dim + 1, ss->expr->symtree->name);
&ss->expr->where, msg,
fold_convert (long_integer_type_node, info->start[dim]),
fold_convert (long_integer_type_node, lbound));
- gfc_free (msg);
+ free (msg);
}
/* Compute the last element of the range, which is not
necessarily "end" (think 0:5:3, which doesn't contain 5)
and check it against both lower and upper bounds. */
- tmp = fold_build2 (MINUS_EXPR, gfc_array_index_type, end,
- info->start[dim]);
- tmp = fold_build2 (TRUNC_MOD_EXPR, gfc_array_index_type, tmp,
- info->stride[dim]);
- tmp = fold_build2 (MINUS_EXPR, gfc_array_index_type, end,
- tmp);
- tmp2 = fold_build2 (LT_EXPR, boolean_type_node, tmp, lbound);
- tmp2 = fold_build2 (TRUTH_AND_EXPR, boolean_type_node,
- non_zerosized, tmp2);
+ tmp = fold_build2_loc (input_location, MINUS_EXPR,
+ gfc_array_index_type, end,
+ info->start[dim]);
+ tmp = fold_build2_loc (input_location, TRUNC_MOD_EXPR,
+ gfc_array_index_type, tmp,
+ info->stride[dim]);
+ tmp = fold_build2_loc (input_location, MINUS_EXPR,
+ gfc_array_index_type, end, tmp);
+ tmp2 = fold_build2_loc (input_location, LT_EXPR,
+ boolean_type_node, tmp, lbound);
+ tmp2 = fold_build2_loc (input_location, TRUTH_AND_EXPR,
+ boolean_type_node, non_zerosized, tmp2);
if (check_upper)
{
- tmp3 = fold_build2 (GT_EXPR, boolean_type_node, tmp, ubound);
- tmp3 = fold_build2 (TRUTH_AND_EXPR, boolean_type_node,
- non_zerosized, tmp3);
+ tmp3 = fold_build2_loc (input_location, GT_EXPR,
+ boolean_type_node, tmp, ubound);
+ tmp3 = fold_build2_loc (input_location, TRUTH_AND_EXPR,
+ boolean_type_node, non_zerosized, tmp3);
asprintf (&msg, "Index '%%ld' of dimension %d of array '%s' "
"outside of expected range (%%ld:%%ld)",
dim + 1, ss->expr->symtree->name);
fold_convert (long_integer_type_node, tmp),
fold_convert (long_integer_type_node, ubound),
fold_convert (long_integer_type_node, lbound));
- gfc_free (msg);
+ free (msg);
}
else
{
&ss->expr->where, msg,
fold_convert (long_integer_type_node, tmp),
fold_convert (long_integer_type_node, lbound));
- gfc_free (msg);
+ free (msg);
}
/* Check the section sizes match. */
- tmp = fold_build2 (MINUS_EXPR, gfc_array_index_type, end,
- info->start[dim]);
- tmp = fold_build2 (FLOOR_DIV_EXPR, gfc_array_index_type, tmp,
- info->stride[dim]);
- tmp = fold_build2 (PLUS_EXPR, gfc_array_index_type,
- gfc_index_one_node, tmp);
- tmp = fold_build2 (MAX_EXPR, gfc_array_index_type, tmp,
- build_int_cst (gfc_array_index_type, 0));
+ tmp = fold_build2_loc (input_location, MINUS_EXPR,
+ gfc_array_index_type, end,
+ info->start[dim]);
+ tmp = fold_build2_loc (input_location, FLOOR_DIV_EXPR,
+ gfc_array_index_type, tmp,
+ info->stride[dim]);
+ tmp = fold_build2_loc (input_location, PLUS_EXPR,
+ gfc_array_index_type,
+ gfc_index_one_node, tmp);
+ tmp = fold_build2_loc (input_location, MAX_EXPR,
+ gfc_array_index_type, tmp,
+ build_int_cst (gfc_array_index_type, 0));
/* We remember the size of the first section, and check all the
others against this. */
if (size[n])
{
- tmp3 = fold_build2 (NE_EXPR, boolean_type_node, tmp, size[n]);
+ tmp3 = fold_build2_loc (input_location, NE_EXPR,
+ boolean_type_node, tmp, size[n]);
asprintf (&msg, "Array bound mismatch for dimension %d "
"of array '%s' (%%ld/%%ld)",
dim + 1, ss->expr->symtree->name);
fold_convert (long_integer_type_node, tmp),
fold_convert (long_integer_type_node, size[n]));
- gfc_free (msg);
+ free (msg);
}
else
size[n] = gfc_evaluate_now (tmp, &inner);
}
}
+/* Return true if both symbols could refer to the same data object. Does
+ not take account of aliasing due to equivalence statements. */
+
+static int
+symbols_could_alias (gfc_symbol *lsym, gfc_symbol *rsym, bool lsym_pointer,
+ bool lsym_target, bool rsym_pointer, bool rsym_target)
+{
+ /* Aliasing isn't possible if the symbols have different base types. */
+ if (gfc_compare_types (&lsym->ts, &rsym->ts) == 0)
+ return 0;
+
+ /* Pointers can point to other pointers and target objects. */
+
+ if ((lsym_pointer && (rsym_pointer || rsym_target))
+ || (rsym_pointer && (lsym_pointer || lsym_target)))
+ return 1;
+
+ /* Special case: Argument association, cf. F90 12.4.1.6, F2003 12.4.1.7
+ and F2008 12.5.2.13 items 3b and 4b. The pointer case (a) is already
+ checked above. */
+ if (lsym_target && rsym_target
+ && ((lsym->attr.dummy && !lsym->attr.contiguous
+ && (!lsym->attr.dimension || lsym->as->type == AS_ASSUMED_SHAPE))
+ || (rsym->attr.dummy && !rsym->attr.contiguous
+ && (!rsym->attr.dimension
+ || rsym->as->type == AS_ASSUMED_SHAPE))))
+ return 1;
+
+ return 0;
+}
+
/* Return true if the two SS could be aliased, i.e. both point to the same data
object. */
gfc_ref *rref;
gfc_symbol *lsym;
gfc_symbol *rsym;
+ bool lsym_pointer, lsym_target, rsym_pointer, rsym_target;
lsym = lss->expr->symtree->n.sym;
rsym = rss->expr->symtree->n.sym;
- if (gfc_symbols_could_alias (lsym, rsym))
+
+ lsym_pointer = lsym->attr.pointer;
+ lsym_target = lsym->attr.target;
+ rsym_pointer = rsym->attr.pointer;
+ rsym_target = rsym->attr.target;
+
+ if (symbols_could_alias (lsym, rsym, lsym_pointer, lsym_target,
+ rsym_pointer, rsym_target))
return 1;
- if (rsym->ts.type != BT_DERIVED
- && lsym->ts.type != BT_DERIVED)
+ if (rsym->ts.type != BT_DERIVED && rsym->ts.type != BT_CLASS
+ && lsym->ts.type != BT_DERIVED && lsym->ts.type != BT_CLASS)
return 0;
/* For derived types we must check all the component types. We can ignore
if (lref->type != REF_COMPONENT)
continue;
- if (gfc_symbols_could_alias (lref->u.c.sym, rsym))
+ lsym_pointer = lsym_pointer || lref->u.c.sym->attr.pointer;
+ lsym_target = lsym_target || lref->u.c.sym->attr.target;
+
+ if (symbols_could_alias (lref->u.c.sym, rsym, lsym_pointer, lsym_target,
+ rsym_pointer, rsym_target))
return 1;
+ if ((lsym_pointer && (rsym_pointer || rsym_target))
+ || (rsym_pointer && (lsym_pointer || lsym_target)))
+ {
+ if (gfc_compare_types (&lref->u.c.component->ts,
+ &rsym->ts))
+ return 1;
+ }
+
for (rref = rss->expr->ref; rref != rss->data.info.ref;
rref = rref->next)
{
if (rref->type != REF_COMPONENT)
continue;
- if (gfc_symbols_could_alias (lref->u.c.sym, rref->u.c.sym))
+ rsym_pointer = rsym_pointer || rref->u.c.sym->attr.pointer;
+ rsym_target = lsym_target || rref->u.c.sym->attr.target;
+
+ if (symbols_could_alias (lref->u.c.sym, rref->u.c.sym,
+ lsym_pointer, lsym_target,
+ rsym_pointer, rsym_target))
return 1;
+
+ if ((lsym_pointer && (rsym_pointer || rsym_target))
+ || (rsym_pointer && (lsym_pointer || lsym_target)))
+ {
+ if (gfc_compare_types (&lref->u.c.component->ts,
+ &rref->u.c.sym->ts))
+ return 1;
+ if (gfc_compare_types (&lref->u.c.sym->ts,
+ &rref->u.c.component->ts))
+ return 1;
+ if (gfc_compare_types (&lref->u.c.component->ts,
+ &rref->u.c.component->ts))
+ return 1;
+ }
}
}
+ lsym_pointer = lsym->attr.pointer;
+ lsym_target = lsym->attr.target;
+ lsym_pointer = lsym->attr.pointer;
+ lsym_target = lsym->attr.target;
+
for (rref = rss->expr->ref; rref != rss->data.info.ref; rref = rref->next)
{
if (rref->type != REF_COMPONENT)
break;
- if (gfc_symbols_could_alias (rref->u.c.sym, lsym))
+ rsym_pointer = rsym_pointer || rref->u.c.sym->attr.pointer;
+ rsym_target = lsym_target || rref->u.c.sym->attr.target;
+
+ if (symbols_could_alias (rref->u.c.sym, lsym,
+ lsym_pointer, lsym_target,
+ rsym_pointer, rsym_target))
return 1;
+
+ if ((lsym_pointer && (rsym_pointer || rsym_target))
+ || (rsym_pointer && (lsym_pointer || lsym_target)))
+ {
+ if (gfc_compare_types (&lsym->ts, &rref->u.c.component->ts))
+ return 1;
+ }
}
return 0;
gfc_ref *lref;
gfc_ref *rref;
int nDepend = 0;
+ int i, j;
loop->temp_ss = NULL;
if (nDepend == 1)
break;
+
+ for (i = 0; i < dest->data.info.dimen; i++)
+ for (j = 0; j < ss->data.info.dimen; j++)
+ if (i != j
+ && dest->data.info.dim[i] == ss->data.info.dim[j])
+ {
+ /* If we don't access array elements in the same order,
+ there is a dependency. */
+ nDepend = 1;
+ goto temporary;
+ }
#if 0
/* TODO : loop shifting. */
if (nDepend == 1)
}
}
+temporary:
+
if (nDepend == 1)
{
tree base_type = gfc_typenode_for_spec (&dest->expr->ts);
if (GFC_ARRAY_TYPE_P (base_type)
|| GFC_DESCRIPTOR_TYPE_P (base_type))
base_type = gfc_get_element_type (base_type);
- loop->temp_ss = gfc_get_ss ();
- loop->temp_ss->type = GFC_SS_TEMP;
- loop->temp_ss->data.temp.type = base_type;
- loop->temp_ss->string_length = dest->string_length;
- loop->temp_ss->data.temp.dimen = loop->dimen;
- loop->temp_ss->next = gfc_ss_terminator;
+ loop->temp_ss = gfc_get_temp_ss (base_type, dest->string_length,
+ loop->dimen);
gfc_add_ss_to_loop (loop, loop->temp_ss);
}
else
continue;
}
+ /* Avoid using an allocatable lhs in an assignment, since
+ there might be a reallocation coming. */
+ if (loopspec[n] && ss->is_alloc_lhs)
+ continue;
+
if (ss->type != GFC_SS_SECTION)
continue;
known lower bound
known upper bound
*/
- else if (loopspec[n]->type == GFC_SS_CONSTRUCTOR && dynamic[n])
+ else if ((loopspec[n]->type == GFC_SS_CONSTRUCTOR && dynamic[n])
+ || n >= loop->dimen)
loopspec[n] = ss;
else if (integer_onep (info->stride[dim])
&& !integer_onep (specinfo->stride[spec_dim]))
&& INTEGER_CST_P (info->stride[dim]))
{
loop->from[n] = info->start[dim];
- mpz_set (i, cshape[n]);
+ mpz_set (i, cshape[get_array_ref_dim (info, n)]);
mpz_sub_ui (i, i, 1);
/* To = from + (size - 1) * stride. */
tmp = gfc_conv_mpz_to_tree (i, gfc_index_integer_kind);
if (!integer_onep (info->stride[dim]))
- tmp = fold_build2 (MULT_EXPR, gfc_array_index_type,
- tmp, info->stride[dim]);
- loop->to[n] = fold_build2 (PLUS_EXPR, gfc_array_index_type,
- loop->from[n], tmp);
+ tmp = fold_build2_loc (input_location, MULT_EXPR,
+ gfc_array_index_type, tmp,
+ info->stride[dim]);
+ loop->to[n] = fold_build2_loc (input_location, PLUS_EXPR,
+ gfc_array_index_type,
+ loop->from[n], tmp);
}
else
{
}
/* Transform everything so we have a simple incrementing variable. */
- if (integer_onep (info->stride[dim]))
+ if (n < loop->dimen && integer_onep (info->stride[dim]))
info->delta[dim] = gfc_index_zero_node;
- else
+ else if (n < loop->dimen)
{
/* Set the delta for this section. */
info->delta[dim] = gfc_evaluate_now (loop->from[n], &loop->pre);
with start = 0, this simplifies to
last = end / step;
for (i = 0; i<=last; i++){...}; */
- tmp = fold_build2 (MINUS_EXPR, gfc_array_index_type,
- loop->to[n], loop->from[n]);
- tmp = fold_build2 (FLOOR_DIV_EXPR, gfc_array_index_type,
- tmp, info->stride[dim]);
- tmp = fold_build2 (MAX_EXPR, gfc_array_index_type, tmp,
- build_int_cst (gfc_array_index_type, -1));
+ tmp = fold_build2_loc (input_location, MINUS_EXPR,
+ gfc_array_index_type, loop->to[n],
+ loop->from[n]);
+ tmp = fold_build2_loc (input_location, FLOOR_DIV_EXPR,
+ gfc_array_index_type, tmp, info->stride[dim]);
+ tmp = fold_build2_loc (input_location, MAX_EXPR, gfc_array_index_type,
+ tmp, build_int_cst (gfc_array_index_type, -1));
loop->to[n] = gfc_evaluate_now (tmp, &loop->pre);
/* Make the loop variable start at 0. */
loop->from[n] = gfc_index_zero_node;
memset (&loop->temp_ss->data.info, 0, sizeof (gfc_ss_info));
loop->temp_ss->type = GFC_SS_SECTION;
loop->temp_ss->data.info.dimen = n;
+
+ gcc_assert (loop->temp_ss->data.info.dimen != 0);
+ for (n = 0; n < loop->temp_ss->data.info.dimen; n++)
+ loop->temp_ss->data.info.dim[n] = n;
+
gfc_trans_create_temp_array (&loop->pre, &loop->post, loop,
&loop->temp_ss->data.info, tmp, NULL_TREE,
false, true, false, where);
First multiply by the stride. */
tmp = loop->from[n];
if (!integer_onep (info->stride[dim]))
- tmp = fold_build2 (MULT_EXPR, gfc_array_index_type,
- tmp, info->stride[dim]);
+ tmp = fold_build2_loc (input_location, MULT_EXPR,
+ gfc_array_index_type,
+ tmp, info->stride[dim]);
/* Then subtract this from our starting value. */
- tmp = fold_build2 (MINUS_EXPR, gfc_array_index_type,
- info->start[dim], tmp);
+ tmp = fold_build2_loc (input_location, MINUS_EXPR,
+ gfc_array_index_type,
+ info->start[dim], tmp);
info->delta[dim] = gfc_evaluate_now (tmp, &loop->pre);
}
}
-/* Fills in an array descriptor, and returns the size of the array. The size
- will be a simple_val, ie a variable or a constant. Also calculates the
- offset of the base. Returns the size of the array.
+/* Calculate the size of a given array dimension from the bounds. This
+ is simply (ubound - lbound + 1) if this expression is positive
+ or 0 if it is negative (pick either one if it is zero). Optionally
+ (if or_expr is present) OR the (expression != 0) condition to it. */
+
+tree
+gfc_conv_array_extent_dim (tree lbound, tree ubound, tree* or_expr)
+{
+ tree res;
+ tree cond;
+
+ /* Calculate (ubound - lbound + 1). */
+ res = fold_build2_loc (input_location, MINUS_EXPR, gfc_array_index_type,
+ ubound, lbound);
+ res = fold_build2_loc (input_location, PLUS_EXPR, gfc_array_index_type, res,
+ gfc_index_one_node);
+
+ /* Check whether the size for this dimension is negative. */
+ cond = fold_build2_loc (input_location, LE_EXPR, boolean_type_node, res,
+ gfc_index_zero_node);
+ res = fold_build3_loc (input_location, COND_EXPR, gfc_array_index_type, cond,
+ gfc_index_zero_node, res);
+
+ /* Build OR expression. */
+ if (or_expr)
+ *or_expr = fold_build2_loc (input_location, TRUTH_OR_EXPR,
+ boolean_type_node, *or_expr, cond);
+
+ return res;
+}
+
+
+/* For an array descriptor, get the total number of elements. This is just
+ the product of the extents along from_dim to to_dim. */
+
+static tree
+gfc_conv_descriptor_size_1 (tree desc, int from_dim, int to_dim)
+{
+ tree res;
+ int dim;
+
+ res = gfc_index_one_node;
+
+ for (dim = from_dim; dim < to_dim; ++dim)
+ {
+ tree lbound;
+ tree ubound;
+ tree extent;
+
+ lbound = gfc_conv_descriptor_lbound_get (desc, gfc_rank_cst[dim]);
+ ubound = gfc_conv_descriptor_ubound_get (desc, gfc_rank_cst[dim]);
+
+ extent = gfc_conv_array_extent_dim (lbound, ubound, NULL);
+ res = fold_build2_loc (input_location, MULT_EXPR, gfc_array_index_type,
+ res, extent);
+ }
+
+ return res;
+}
+
+
+/* Full size of an array. */
+
+tree
+gfc_conv_descriptor_size (tree desc, int rank)
+{
+ return gfc_conv_descriptor_size_1 (desc, 0, rank);
+}
+
+
+/* Size of a coarray for all dimensions but the last. */
+
+tree
+gfc_conv_descriptor_cosize (tree desc, int rank, int corank)
+{
+ return gfc_conv_descriptor_size_1 (desc, rank, rank + corank - 1);
+}
+
+
+/* Fills in an array descriptor, and returns the size of the array.
+ The size will be a simple_val, ie a variable or a constant. Also
+ calculates the offset of the base. The pointer argument overflow,
+ which should be of integer type, will increase in value if overflow
+ occurs during the size calculation. Returns the size of the array.
{
stride = 1;
offset = 0;
for (n = 0; n < rank; n++)
{
- a.lbound[n] = specified_lower_bound;
- offset = offset + a.lbond[n] * stride;
- size = 1 - lbound;
- a.ubound[n] = specified_upper_bound;
- a.stride[n] = stride;
- size = siz >= 0 ? ubound + size : 0; //size = ubound + 1 - lbound
- stride = stride * size;
+ a.lbound[n] = specified_lower_bound;
+ offset = offset + a.lbond[n] * stride;
+ size = 1 - lbound;
+ a.ubound[n] = specified_upper_bound;
+ a.stride[n] = stride;
+ size = size >= 0 ? ubound + size : 0; //size = ubound + 1 - lbound
+ overflow += size == 0 ? 0: (MAX/size < stride ? 1: 0);
+ stride = stride * size;
}
+ for (n = rank; n < rank+corank; n++)
+ (Set lcobound/ucobound as above.)
+ element_size = sizeof (array element);
+ if (!rank)
+ return element_size
+ stride = (size_t) stride;
+ overflow += element_size == 0 ? 0: (MAX/element_size < stride ? 1: 0);
+ stride = stride * element_size;
return (stride);
} */
/*GCC ARRAYS*/
static tree
gfc_array_init_size (tree descriptor, int rank, int corank, tree * poffset,
- gfc_expr ** lower, gfc_expr ** upper,
- stmtblock_t * pblock)
+ gfc_expr ** lower, gfc_expr ** upper, stmtblock_t * pblock,
+ stmtblock_t * descriptor_block, tree * overflow)
{
tree type;
tree tmp;
tree size;
tree offset;
tree stride;
- tree cond;
+ tree element_size;
tree or_expr;
tree thencase;
tree elsecase;
+ tree cond;
tree var;
stmtblock_t thenblock;
stmtblock_t elseblock;
/* Set the dtype. */
tmp = gfc_conv_descriptor_dtype (descriptor);
- gfc_add_modify (pblock, tmp, gfc_get_dtype (TREE_TYPE (descriptor)));
+ gfc_add_modify (descriptor_block, tmp, gfc_get_dtype (TREE_TYPE (descriptor)));
- or_expr = NULL_TREE;
+ or_expr = boolean_false_node;
for (n = 0; n < rank; n++)
{
+ tree conv_lbound;
+ tree conv_ubound;
+
/* We have 3 possibilities for determining the size of the array:
- lower == NULL => lbound = 1, ubound = upper[n]
- upper[n] = NULL => lbound = 1, ubound = lower[n]
- upper[n] != NULL => lbound = lower[n], ubound = upper[n] */
+ lower == NULL => lbound = 1, ubound = upper[n]
+ upper[n] = NULL => lbound = 1, ubound = lower[n]
+ upper[n] != NULL => lbound = lower[n], ubound = upper[n] */
ubound = upper[n];
/* Set lower bound. */
else
{
gcc_assert (lower[n]);
- if (ubound)
- {
+ if (ubound)
+ {
gfc_conv_expr_type (&se, lower[n], gfc_array_index_type);
gfc_add_block_to_block (pblock, &se.pre);
- }
- else
- {
- se.expr = gfc_index_one_node;
- ubound = lower[n];
- }
+ }
+ else
+ {
+ se.expr = gfc_index_one_node;
+ ubound = lower[n];
+ }
}
- gfc_conv_descriptor_lbound_set (pblock, descriptor, gfc_rank_cst[n],
- se.expr);
+ gfc_conv_descriptor_lbound_set (descriptor_block, descriptor,
+ gfc_rank_cst[n], se.expr);
+ conv_lbound = se.expr;
/* Work out the offset for this component. */
- tmp = fold_build2 (MULT_EXPR, gfc_array_index_type, se.expr, stride);
- offset = fold_build2 (MINUS_EXPR, gfc_array_index_type, offset, tmp);
-
- /* Start the calculation for the size of this dimension. */
- size = fold_build2 (MINUS_EXPR, gfc_array_index_type,
- gfc_index_one_node, se.expr);
+ tmp = fold_build2_loc (input_location, MULT_EXPR, gfc_array_index_type,
+ se.expr, stride);
+ offset = fold_build2_loc (input_location, MINUS_EXPR,
+ gfc_array_index_type, offset, tmp);
/* Set upper bound. */
gfc_init_se (&se, NULL);
gfc_conv_expr_type (&se, ubound, gfc_array_index_type);
gfc_add_block_to_block (pblock, &se.pre);
- gfc_conv_descriptor_ubound_set (pblock, descriptor, gfc_rank_cst[n], se.expr);
+ gfc_conv_descriptor_ubound_set (descriptor_block, descriptor,
+ gfc_rank_cst[n], se.expr);
+ conv_ubound = se.expr;
/* Store the stride. */
- gfc_conv_descriptor_stride_set (pblock, descriptor, gfc_rank_cst[n], stride);
-
- /* Calculate the size of this dimension. */
- size = fold_build2 (PLUS_EXPR, gfc_array_index_type, se.expr, size);
-
- /* Check whether the size for this dimension is negative. */
- cond = fold_build2 (LE_EXPR, boolean_type_node, size,
- gfc_index_zero_node);
- if (n == 0)
- or_expr = cond;
- else
- or_expr = fold_build2 (TRUTH_OR_EXPR, boolean_type_node, or_expr, cond);
-
- size = fold_build3 (COND_EXPR, gfc_array_index_type, cond,
- gfc_index_zero_node, size);
-
+ gfc_conv_descriptor_stride_set (descriptor_block, descriptor,
+ gfc_rank_cst[n], stride);
+
+ /* Calculate size and check whether extent is negative. */
+ size = gfc_conv_array_extent_dim (conv_lbound, conv_ubound, &or_expr);
+ size = gfc_evaluate_now (size, pblock);
+
+ /* Check whether multiplying the stride by the number of
+ elements in this dimension would overflow. We must also check
+ whether the current dimension has zero size in order to avoid
+ division by zero.
+ */
+ tmp = fold_build2_loc (input_location, TRUNC_DIV_EXPR,
+ gfc_array_index_type,
+ fold_convert (gfc_array_index_type,
+ TYPE_MAX_VALUE (gfc_array_index_type)),
+ size);
+ cond = gfc_unlikely (fold_build2_loc (input_location, LT_EXPR,
+ boolean_type_node, tmp, stride));
+ tmp = fold_build3_loc (input_location, COND_EXPR, integer_type_node, cond,
+ integer_one_node, integer_zero_node);
+ cond = gfc_unlikely (fold_build2_loc (input_location, EQ_EXPR,
+ boolean_type_node, size,
+ gfc_index_zero_node));
+ tmp = fold_build3_loc (input_location, COND_EXPR, integer_type_node, cond,
+ integer_zero_node, tmp);
+ tmp = fold_build2_loc (input_location, PLUS_EXPR, integer_type_node,
+ *overflow, tmp);
+ *overflow = gfc_evaluate_now (tmp, pblock);
+
/* Multiply the stride by the number of elements in this dimension. */
- stride = fold_build2 (MULT_EXPR, gfc_array_index_type, stride, size);
+ stride = fold_build2_loc (input_location, MULT_EXPR,
+ gfc_array_index_type, stride, size);
stride = gfc_evaluate_now (stride, pblock);
}
}
else
{
- if (ubound || n == rank + corank - 1)
- {
+ if (ubound || n == rank + corank - 1)
+ {
gfc_conv_expr_type (&se, lower[n], gfc_array_index_type);
gfc_add_block_to_block (pblock, &se.pre);
- }
- else
- {
- se.expr = gfc_index_one_node;
- ubound = lower[n];
- }
+ }
+ else
+ {
+ se.expr = gfc_index_one_node;
+ ubound = lower[n];
+ }
}
- gfc_conv_descriptor_lbound_set (pblock, descriptor, gfc_rank_cst[n],
- se.expr);
+ gfc_conv_descriptor_lbound_set (descriptor_block, descriptor,
+ gfc_rank_cst[n], se.expr);
if (n < rank + corank - 1)
{
gcc_assert (ubound);
gfc_conv_expr_type (&se, ubound, gfc_array_index_type);
gfc_add_block_to_block (pblock, &se.pre);
- gfc_conv_descriptor_ubound_set (pblock, descriptor, gfc_rank_cst[n], se.expr);
+ gfc_conv_descriptor_ubound_set (descriptor_block, descriptor,
+ gfc_rank_cst[n], se.expr);
}
}
/* The stride is the number of elements in the array, so multiply by the
size of an element to get the total size. */
tmp = TYPE_SIZE_UNIT (gfc_get_element_type (type));
- size = fold_build2 (MULT_EXPR, gfc_array_index_type, stride,
- fold_convert (gfc_array_index_type, tmp));
+ /* Convert to size_t. */
+ element_size = fold_convert (size_type_node, tmp);
+
+ if (rank == 0)
+ return element_size;
+
+ stride = fold_convert (size_type_node, stride);
+
+ /* First check for overflow. Since an array of type character can
+ have zero element_size, we must check for that before
+ dividing. */
+ tmp = fold_build2_loc (input_location, TRUNC_DIV_EXPR,
+ size_type_node,
+ TYPE_MAX_VALUE (size_type_node), element_size);
+ cond = gfc_unlikely (fold_build2_loc (input_location, LT_EXPR,
+ boolean_type_node, tmp, stride));
+ tmp = fold_build3_loc (input_location, COND_EXPR, integer_type_node, cond,
+ integer_one_node, integer_zero_node);
+ cond = gfc_unlikely (fold_build2_loc (input_location, EQ_EXPR,
+ boolean_type_node, element_size,
+ build_int_cst (size_type_node, 0)));
+ tmp = fold_build3_loc (input_location, COND_EXPR, integer_type_node, cond,
+ integer_zero_node, tmp);
+ tmp = fold_build2_loc (input_location, PLUS_EXPR, integer_type_node,
+ *overflow, tmp);
+ *overflow = gfc_evaluate_now (tmp, pblock);
+
+ size = fold_build2_loc (input_location, MULT_EXPR, size_type_node,
+ stride, element_size);
if (poffset != NULL)
{
if (integer_zerop (or_expr))
return size;
if (integer_onep (or_expr))
- return gfc_index_zero_node;
+ return build_int_cst (size_type_node, 0);
var = gfc_create_var (TREE_TYPE (size), "size");
gfc_start_block (&thenblock);
- gfc_add_modify (&thenblock, var, gfc_index_zero_node);
+ gfc_add_modify (&thenblock, var, build_int_cst (size_type_node, 0));
thencase = gfc_finish_block (&thenblock);
gfc_start_block (&elseblock);
/*GCC ARRAYS*/
bool
-gfc_array_allocate (gfc_se * se, gfc_expr * expr, tree pstat)
+gfc_array_allocate (gfc_se * se, gfc_expr * expr, tree status, tree errmsg,
+ tree errlen)
{
tree tmp;
tree pointer;
- tree offset;
+ tree offset = NULL_TREE;
+ tree token = NULL_TREE;
tree size;
+ tree msg;
+ tree error = NULL_TREE;
+ tree overflow; /* Boolean storing whether size calculation overflows. */
+ tree var_overflow = NULL_TREE;
+ tree cond;
+ tree set_descriptor;
+ stmtblock_t set_descriptor_block;
+ stmtblock_t elseblock;
gfc_expr **lower;
gfc_expr **upper;
gfc_ref *ref, *prev_ref = NULL;
- bool allocatable_array, coarray;
+ bool allocatable, coarray, dimension;
ref = expr->ref;
if (!prev_ref)
{
- allocatable_array = expr->symtree->n.sym->attr.allocatable;
+ allocatable = expr->symtree->n.sym->attr.allocatable;
coarray = expr->symtree->n.sym->attr.codimension;
+ dimension = expr->symtree->n.sym->attr.dimension;
}
else
{
- allocatable_array = prev_ref->u.c.component->attr.allocatable;
+ allocatable = prev_ref->u.c.component->attr.allocatable;
coarray = prev_ref->u.c.component->attr.codimension;
+ dimension = prev_ref->u.c.component->attr.dimension;
}
- /* Return if this is a scalar coarray. */
- if ((!prev_ref && !expr->symtree->n.sym->attr.dimension)
- || (prev_ref && !prev_ref->u.c.component->attr.dimension))
- {
- gcc_assert (coarray);
- return false;
- }
+ if (!dimension)
+ gcc_assert (coarray);
/* Figure out the size of the array. */
switch (ref->u.ar.type)
break;
}
+ overflow = integer_zero_node;
+
+ gfc_init_block (&set_descriptor_block);
size = gfc_array_init_size (se->expr, ref->u.ar.as->rank,
ref->u.ar.as->corank, &offset, lower, upper,
- &se->pre);
+ &se->pre, &set_descriptor_block, &overflow);
+
+ if (dimension)
+ {
+
+ var_overflow = gfc_create_var (integer_type_node, "overflow");
+ gfc_add_modify (&se->pre, var_overflow, overflow);
+
+ /* Generate the block of code handling overflow. */
+ msg = gfc_build_addr_expr (pchar_type_node,
+ gfc_build_localized_cstring_const
+ ("Integer overflow when calculating the amount of "
+ "memory to allocate"));
+ error = build_call_expr_loc (input_location, gfor_fndecl_runtime_error,
+ 1, msg);
+ }
+
+ if (status != NULL_TREE)
+ {
+ tree status_type = TREE_TYPE (status);
+ stmtblock_t set_status_block;
+
+ gfc_start_block (&set_status_block);
+ gfc_add_modify (&set_status_block, status,
+ build_int_cst (status_type, LIBERROR_ALLOCATION));
+ error = gfc_finish_block (&set_status_block);
+ }
+
+ gfc_start_block (&elseblock);
/* Allocate memory to store the data. */
pointer = gfc_conv_descriptor_data_get (se->expr);
STRIP_NOPS (pointer);
- /* The allocate_array variants take the old pointer as first argument. */
- if (allocatable_array)
- tmp = gfc_allocate_array_with_status (&se->pre, pointer, size, pstat, expr);
+ if (coarray && gfc_option.coarray == GFC_FCOARRAY_LIB)
+ token = gfc_build_addr_expr (NULL_TREE,
+ gfc_conv_descriptor_token (se->expr));
+
+ /* The allocatable variant takes the old pointer as first argument. */
+ if (allocatable)
+ gfc_allocate_allocatable (&elseblock, pointer, size, token,
+ status, errmsg, errlen, expr);
+ else
+ gfc_allocate_using_malloc (&elseblock, pointer, size, status);
+
+ if (dimension)
+ {
+ cond = gfc_unlikely (fold_build2_loc (input_location, NE_EXPR,
+ boolean_type_node, var_overflow, integer_zero_node));
+ tmp = fold_build3_loc (input_location, COND_EXPR, void_type_node, cond,
+ error, gfc_finish_block (&elseblock));
+ }
else
- tmp = gfc_allocate_with_status (&se->pre, size, pstat);
- tmp = fold_build2 (MODIFY_EXPR, void_type_node, pointer, tmp);
+ tmp = gfc_finish_block (&elseblock);
+
gfc_add_expr_to_block (&se->pre, tmp);
- gfc_conv_descriptor_offset_set (&se->pre, se->expr, offset);
+ /* Update the array descriptors. */
+ if (dimension)
+ gfc_conv_descriptor_offset_set (&set_descriptor_block, se->expr, offset);
+
+ set_descriptor = gfc_finish_block (&set_descriptor_block);
+ if (status != NULL_TREE)
+ {
+ cond = fold_build2_loc (input_location, EQ_EXPR,
+ boolean_type_node, status,
+ build_int_cst (TREE_TYPE (status), 0));
+ gfc_add_expr_to_block (&se->pre,
+ fold_build3_loc (input_location, COND_EXPR, void_type_node,
+ gfc_likely (cond), set_descriptor,
+ build_empty_stmt (input_location)));
+ }
+ else
+ gfc_add_expr_to_block (&se->pre, set_descriptor);
- if (expr->ts.type == BT_DERIVED
+ if ((expr->ts.type == BT_DERIVED || expr->ts.type == BT_CLASS)
&& expr->ts.u.derived->attr.alloc_comp)
{
tmp = gfc_nullify_alloc_comp (expr->ts.u.derived, se->expr,
gfc_add_expr_to_block (&block, tmp);
/* Zero the data pointer. */
- tmp = fold_build2 (MODIFY_EXPR, void_type_node,
- var, build_int_cst (TREE_TYPE (var), 0));
+ tmp = fold_build2_loc (input_location, MODIFY_EXPR, void_type_node,
+ var, build_int_cst (TREE_TYPE (var), 0));
gfc_add_expr_to_block (&block, tmp);
return gfc_finish_block (&block);
gfc_se se;
HOST_WIDE_INT hi;
unsigned HOST_WIDE_INT lo;
- tree index;
+ tree index, range;
VEC(constructor_elt,gc) *v = NULL;
switch (expr->expr_type)
else
index = NULL_TREE;
+ if (mpz_cmp_si (c->repeat, 1) > 0)
+ {
+ tree tmp1, tmp2;
+ mpz_t maxval;
+
+ mpz_init (maxval);
+ mpz_add (maxval, c->offset, c->repeat);
+ mpz_sub_ui (maxval, maxval, 1);
+ tmp2 = gfc_conv_mpz_to_tree (maxval, gfc_index_integer_kind);
+ if (mpz_cmp_si (c->offset, 0) != 0)
+ {
+ mpz_add_ui (maxval, c->offset, 1);
+ tmp1 = gfc_conv_mpz_to_tree (maxval, gfc_index_integer_kind);
+ }
+ else
+ tmp1 = gfc_conv_mpz_to_tree (c->offset, gfc_index_integer_kind);
+
+ range = fold_build2 (RANGE_EXPR, gfc_array_index_type, tmp1, tmp2);
+ mpz_clear (maxval);
+ }
+ else
+ range = NULL;
+
gfc_init_se (&se, NULL);
switch (c->expr->expr_type)
{
case EXPR_CONSTANT:
gfc_conv_constant (&se, c->expr);
- CONSTRUCTOR_APPEND_ELT (v, index, se.expr);
break;
case EXPR_STRUCTURE:
gfc_conv_structure (&se, c->expr, 1);
- CONSTRUCTOR_APPEND_ELT (v, index, se.expr);
break;
-
default:
/* Catch those occasional beasts that do not simplify
for one reason or another, assuming that if they are
standard defying the frontend will catch them. */
gfc_conv_expr (&se, c->expr);
- CONSTRUCTOR_APPEND_ELT (v, index, se.expr);
break;
}
+
+ if (range == NULL_TREE)
+ CONSTRUCTOR_APPEND_ELT (v, index, se.expr);
+ else
+ {
+ if (index != NULL_TREE)
+ CONSTRUCTOR_APPEND_ELT (v, index, se.expr);
+ CONSTRUCTOR_APPEND_ELT (v, range, se.expr);
+ }
}
break;
}
+/* Generate code to evaluate non-constant coarray cobounds. */
+
+void
+gfc_trans_array_cobounds (tree type, stmtblock_t * pblock,
+ const gfc_symbol *sym)
+{
+ int dim;
+ tree ubound;
+ tree lbound;
+ gfc_se se;
+ gfc_array_spec *as;
+
+ as = sym->as;
+
+ for (dim = as->rank; dim < as->rank + as->corank; dim++)
+ {
+ /* Evaluate non-constant array bound expressions. */
+ lbound = GFC_TYPE_ARRAY_LBOUND (type, dim);
+ if (as->lower[dim] && !INTEGER_CST_P (lbound))
+ {
+ gfc_init_se (&se, NULL);
+ gfc_conv_expr_type (&se, as->lower[dim], gfc_array_index_type);
+ gfc_add_block_to_block (pblock, &se.pre);
+ gfc_add_modify (pblock, lbound, se.expr);
+ }
+ ubound = GFC_TYPE_ARRAY_UBOUND (type, dim);
+ if (as->upper[dim] && !INTEGER_CST_P (ubound))
+ {
+ gfc_init_se (&se, NULL);
+ gfc_conv_expr_type (&se, as->upper[dim], gfc_array_index_type);
+ gfc_add_block_to_block (pblock, &se.pre);
+ gfc_add_modify (pblock, ubound, se.expr);
+ }
+ }
+}
+
+
/* Generate code to evaluate non-constant array bounds. Sets *poffset and
returns the size (in elements) of the array. */
gfc_add_modify (pblock, ubound, se.expr);
}
/* The offset of this dimension. offset = offset - lbound * stride. */
- tmp = fold_build2 (MULT_EXPR, gfc_array_index_type, lbound, size);
- offset = fold_build2 (MINUS_EXPR, gfc_array_index_type, offset, tmp);
+ tmp = fold_build2_loc (input_location, MULT_EXPR, gfc_array_index_type,
+ lbound, size);
+ offset = fold_build2_loc (input_location, MINUS_EXPR, gfc_array_index_type,
+ offset, tmp);
/* The size of this dimension, and the stride of the next. */
if (dim + 1 < as->rank)
if (ubound != NULL_TREE && !(stride && INTEGER_CST_P (stride)))
{
/* Calculate stride = size * (ubound + 1 - lbound). */
- tmp = fold_build2 (MINUS_EXPR, gfc_array_index_type,
- gfc_index_one_node, lbound);
- tmp = fold_build2 (PLUS_EXPR, gfc_array_index_type, ubound, tmp);
- tmp = fold_build2 (MULT_EXPR, gfc_array_index_type, size, tmp);
+ tmp = fold_build2_loc (input_location, MINUS_EXPR,
+ gfc_array_index_type,
+ gfc_index_one_node, lbound);
+ tmp = fold_build2_loc (input_location, PLUS_EXPR,
+ gfc_array_index_type, ubound, tmp);
+ tmp = fold_build2_loc (input_location, MULT_EXPR,
+ gfc_array_index_type, size, tmp);
if (stride)
gfc_add_modify (pblock, stride, tmp);
else
/* Make sure that negative size arrays are translated
to being zero size. */
- tmp = fold_build2 (GE_EXPR, boolean_type_node,
- stride, gfc_index_zero_node);
- tmp = fold_build3 (COND_EXPR, gfc_array_index_type, tmp,
- stride, gfc_index_zero_node);
+ tmp = fold_build2_loc (input_location, GE_EXPR, boolean_type_node,
+ stride, gfc_index_zero_node);
+ tmp = fold_build3_loc (input_location, COND_EXPR,
+ gfc_array_index_type, tmp,
+ stride, gfc_index_zero_node);
gfc_add_modify (pblock, stride, tmp);
}
size = stride;
}
+ gfc_trans_array_cobounds (type, pblock, sym);
gfc_trans_vla_type_sizes (sym, pblock);
*poffset = offset;
{
stmtblock_t init;
tree type;
- tree tmp;
+ tree tmp = NULL_TREE;
tree size;
tree offset;
+ tree space;
+ tree inittree;
bool onstack;
gcc_assert (!(sym->attr.pointer || sym->attr.allocatable));
gcc_assert (GFC_ARRAY_TYPE_P (type));
onstack = TREE_CODE (type) != POINTER_TYPE;
- gfc_start_block (&init);
+ gfc_init_block (&init);
/* Evaluate character string length. */
if (sym->ts.type == BT_CHARACTER
/* Emit a DECL_EXPR for this variable, which will cause the
gimplifier to allocate storage, and all that good stuff. */
- tmp = fold_build1 (DECL_EXPR, TREE_TYPE (decl), decl);
+ tmp = fold_build1_loc (input_location, DECL_EXPR, TREE_TYPE (decl), decl);
gfc_add_expr_to_block (&init, tmp);
}
return;
}
- /* The size is the number of elements in the array, so multiply by the
- size of an element to get the total size. */
- tmp = TYPE_SIZE_UNIT (gfc_get_element_type (type));
- size = fold_build2 (MULT_EXPR, gfc_array_index_type, size,
- fold_convert (gfc_array_index_type, tmp));
+ if (gfc_option.flag_stack_arrays)
+ {
+ gcc_assert (TREE_CODE (TREE_TYPE (decl)) == POINTER_TYPE);
+ space = build_decl (sym->declared_at.lb->location,
+ VAR_DECL, create_tmp_var_name ("A"),
+ TREE_TYPE (TREE_TYPE (decl)));
+ gfc_trans_vla_type_sizes (sym, &init);
+ }
+ else
+ {
+ /* The size is the number of elements in the array, so multiply by the
+ size of an element to get the total size. */
+ tmp = TYPE_SIZE_UNIT (gfc_get_element_type (type));
+ size = fold_build2_loc (input_location, MULT_EXPR, gfc_array_index_type,
+ size, fold_convert (gfc_array_index_type, tmp));
- /* Allocate memory to hold the data. */
- tmp = gfc_call_malloc (&init, TREE_TYPE (decl), size);
- gfc_add_modify (&init, decl, tmp);
+ /* Allocate memory to hold the data. */
+ tmp = gfc_call_malloc (&init, TREE_TYPE (decl), size);
+ gfc_add_modify (&init, decl, tmp);
+
+ /* Free the temporary. */
+ tmp = gfc_call_free (convert (pvoid_type_node, decl));
+ space = NULL_TREE;
+ }
/* Set offset of the array. */
if (TREE_CODE (GFC_TYPE_ARRAY_OFFSET (type)) == VAR_DECL)
/* Automatic arrays should not have initializers. */
gcc_assert (!sym->value);
- /* Free the temporary. */
- tmp = gfc_call_free (convert (pvoid_type_node, decl));
+ inittree = gfc_finish_block (&init);
- gfc_add_init_cleanup (block, gfc_finish_block (&init), tmp);
+ if (space)
+ {
+ tree addr;
+ pushdecl (space);
+
+ /* Don't create new scope, emit the DECL_EXPR in exactly the scope
+ where also space is located. */
+ gfc_init_block (&init);
+ tmp = fold_build1_loc (input_location, DECL_EXPR,
+ TREE_TYPE (space), space);
+ gfc_add_expr_to_block (&init, tmp);
+ addr = fold_build1_loc (sym->declared_at.lb->location,
+ ADDR_EXPR, TREE_TYPE (decl), space);
+ gfc_add_modify (&init, decl, addr);
+ gfc_add_init_cleanup (block, gfc_finish_block (&init), NULL_TREE);
+ tmp = NULL_TREE;
+ }
+ gfc_add_init_cleanup (block, inittree, tmp);
}
locus loc;
tree offset;
tree tmp;
- tree stmt;
+ tree stmt;
stmtblock_t init;
- gfc_get_backend_locus (&loc);
+ gfc_save_backend_locus (&loc);
gfc_set_backend_locus (&sym->declared_at);
/* Descriptor type. */
}
stmt = gfc_finish_block (&init);
- gfc_set_backend_locus (&loc);
+ gfc_restore_backend_locus (&loc);
/* Add the initialization code to the start of the function. */
return;
}
- gfc_get_backend_locus (&loc);
+ gfc_save_backend_locus (&loc);
gfc_set_backend_locus (&sym->declared_at);
/* Descriptor type. */
partial = gfc_create_var (boolean_type_node, "partial");
TREE_USED (partial) = 1;
tmp = gfc_conv_descriptor_stride_get (dumdesc, gfc_rank_cst[0]);
- tmp = fold_build2 (EQ_EXPR, boolean_type_node, tmp, gfc_index_one_node);
+ tmp = fold_build2_loc (input_location, EQ_EXPR, boolean_type_node, tmp,
+ gfc_index_one_node);
gfc_add_modify (&init, partial, tmp);
}
else
stride = gfc_conv_descriptor_stride_get (dumdesc, gfc_rank_cst[0]);
stride = gfc_evaluate_now (stride, &init);
- tmp = fold_build2 (EQ_EXPR, boolean_type_node,
- stride, gfc_index_zero_node);
- tmp = fold_build3 (COND_EXPR, gfc_array_index_type, tmp,
- gfc_index_one_node, stride);
+ tmp = fold_build2_loc (input_location, EQ_EXPR, boolean_type_node,
+ stride, gfc_index_zero_node);
+ tmp = fold_build3_loc (input_location, COND_EXPR, gfc_array_index_type,
+ tmp, gfc_index_one_node, stride);
stride = GFC_TYPE_ARRAY_STRIDE (type, 0);
gfc_add_modify (&init, stride, tmp);
if (stmt_packed != NULL_TREE && stmt_unpacked != NULL_TREE)
{
/* Don't repack unknown shape arrays when the first stride is 1. */
- tmp = fold_build3 (COND_EXPR, TREE_TYPE (stmt_packed),
- partial, stmt_packed, stmt_unpacked);
+ tmp = fold_build3_loc (input_location, COND_EXPR, TREE_TYPE (stmt_packed),
+ partial, stmt_packed, stmt_unpacked);
}
else
tmp = stmt_packed != NULL_TREE ? stmt_packed : stmt_unpacked;
char * msg;
tree temp;
- temp = fold_build2 (MINUS_EXPR, gfc_array_index_type,
- ubound, lbound);
- temp = fold_build2 (PLUS_EXPR, gfc_array_index_type,
- gfc_index_one_node, temp);
-
- stride2 = fold_build2 (MINUS_EXPR, gfc_array_index_type,
- dubound, dlbound);
- stride2 = fold_build2 (PLUS_EXPR, gfc_array_index_type,
- gfc_index_one_node, stride2);
-
- tmp = fold_build2 (NE_EXPR, gfc_array_index_type, temp, stride2);
+ temp = fold_build2_loc (input_location, MINUS_EXPR,
+ gfc_array_index_type, ubound, lbound);
+ temp = fold_build2_loc (input_location, PLUS_EXPR,
+ gfc_array_index_type,
+ gfc_index_one_node, temp);
+ stride2 = fold_build2_loc (input_location, MINUS_EXPR,
+ gfc_array_index_type, dubound,
+ dlbound);
+ stride2 = fold_build2_loc (input_location, PLUS_EXPR,
+ gfc_array_index_type,
+ gfc_index_one_node, stride2);
+ tmp = fold_build2_loc (input_location, NE_EXPR,
+ gfc_array_index_type, temp, stride2);
asprintf (&msg, "Dimension %d of array '%s' has extent "
"%%ld instead of %%ld", n+1, sym->name);
fold_convert (long_integer_type_node, temp),
fold_convert (long_integer_type_node, stride2));
- gfc_free (msg);
+ free (msg);
}
}
else
{
/* For assumed shape arrays move the upper bound by the same amount
as the lower bound. */
- tmp = fold_build2 (MINUS_EXPR, gfc_array_index_type,
- dubound, dlbound);
- tmp = fold_build2 (PLUS_EXPR, gfc_array_index_type, tmp, lbound);
+ tmp = fold_build2_loc (input_location, MINUS_EXPR,
+ gfc_array_index_type, dubound, dlbound);
+ tmp = fold_build2_loc (input_location, PLUS_EXPR,
+ gfc_array_index_type, tmp, lbound);
gfc_add_modify (&init, ubound, tmp);
}
/* The offset of this dimension. offset = offset - lbound * stride. */
- tmp = fold_build2 (MULT_EXPR, gfc_array_index_type, lbound, stride);
- offset = fold_build2 (MINUS_EXPR, gfc_array_index_type, offset, tmp);
+ tmp = fold_build2_loc (input_location, MULT_EXPR, gfc_array_index_type,
+ lbound, stride);
+ offset = fold_build2_loc (input_location, MINUS_EXPR,
+ gfc_array_index_type, offset, tmp);
/* The size of this dimension, and the stride of the next. */
if (n + 1 < sym->as->rank)
else
{
/* Calculate stride = size * (ubound + 1 - lbound). */
- tmp = fold_build2 (MINUS_EXPR, gfc_array_index_type,
- gfc_index_one_node, lbound);
- tmp = fold_build2 (PLUS_EXPR, gfc_array_index_type,
- ubound, tmp);
- size = fold_build2 (MULT_EXPR, gfc_array_index_type,
- size, tmp);
+ tmp = fold_build2_loc (input_location, MINUS_EXPR,
+ gfc_array_index_type,
+ gfc_index_one_node, lbound);
+ tmp = fold_build2_loc (input_location, PLUS_EXPR,
+ gfc_array_index_type, ubound, tmp);
+ size = fold_build2_loc (input_location, MULT_EXPR,
+ gfc_array_index_type, size, tmp);
stmt_packed = size;
}
/* Assign the stride. */
if (stmt_packed != NULL_TREE && stmt_unpacked != NULL_TREE)
- tmp = fold_build3 (COND_EXPR, gfc_array_index_type, partial,
- stmt_unpacked, stmt_packed);
+ tmp = fold_build3_loc (input_location, COND_EXPR,
+ gfc_array_index_type, partial,
+ stmt_unpacked, stmt_packed);
else
tmp = (stmt_packed != NULL_TREE) ? stmt_packed : stmt_unpacked;
gfc_add_modify (&init, stride, tmp);
if (stride && !INTEGER_CST_P (stride))
{
/* Calculate size = stride * (ubound + 1 - lbound). */
- tmp = fold_build2 (MINUS_EXPR, gfc_array_index_type,
- gfc_index_one_node, lbound);
- tmp = fold_build2 (PLUS_EXPR, gfc_array_index_type,
- ubound, tmp);
- tmp = fold_build2 (MULT_EXPR, gfc_array_index_type,
- GFC_TYPE_ARRAY_STRIDE (type, n), tmp);
+ tmp = fold_build2_loc (input_location, MINUS_EXPR,
+ gfc_array_index_type,
+ gfc_index_one_node, lbound);
+ tmp = fold_build2_loc (input_location, PLUS_EXPR,
+ gfc_array_index_type,
+ ubound, tmp);
+ tmp = fold_build2_loc (input_location, MULT_EXPR,
+ gfc_array_index_type,
+ GFC_TYPE_ARRAY_STRIDE (type, n), tmp);
gfc_add_modify (&init, stride, tmp);
}
}
}
+ gfc_trans_array_cobounds (type, &init, sym);
+
/* Set the offset. */
if (TREE_CODE (GFC_TYPE_ARRAY_OFFSET (type)) == VAR_DECL)
gfc_add_modify (&init, GFC_TYPE_ARRAY_OFFSET (type), offset);
/* Only do the cleanup if the array was repacked. */
tmp = build_fold_indirect_ref_loc (input_location, dumdesc);
tmp = gfc_conv_descriptor_data_get (tmp);
- tmp = fold_build2 (NE_EXPR, boolean_type_node, tmp, tmpdesc);
+ tmp = fold_build2_loc (input_location, NE_EXPR, boolean_type_node,
+ tmp, tmpdesc);
stmtCleanup = build3_v (COND_EXPR, tmp, stmtCleanup,
build_empty_stmt (input_location));
/* We don't need to free any memory allocated by internal_pack as it will
be freed at the end of the function by pop_context. */
gfc_add_init_cleanup (block, stmtInit, stmtCleanup);
+
+ gfc_restore_backend_locus (&loc);
}
case REF_COMPONENT:
field = ref->u.c.component->backend_decl;
gcc_assert (field && TREE_CODE (field) == FIELD_DECL);
- tmp = fold_build3 (COMPONENT_REF, TREE_TYPE (field),
- tmp, field, NULL_TREE);
+ tmp = fold_build3_loc (input_location, COMPONENT_REF,
+ TREE_TYPE (field),
+ tmp, field, NULL_TREE);
break;
case REF_SUBSTRING:
gfc_init_se (&start, NULL);
gfc_conv_expr_type (&start, ref->u.ar.as->lower[n], gfc_array_index_type);
jtmp = gfc_evaluate_now (start.expr, block);
- itmp = fold_build2 (MINUS_EXPR, gfc_array_index_type, itmp, jtmp);
- itmp = fold_build2 (MULT_EXPR, gfc_array_index_type, itmp, stride);
- index = fold_build2 (PLUS_EXPR, gfc_array_index_type, itmp, index);
+ itmp = fold_build2_loc (input_location, MINUS_EXPR,
+ gfc_array_index_type, itmp, jtmp);
+ itmp = fold_build2_loc (input_location, MULT_EXPR,
+ gfc_array_index_type, itmp, stride);
+ index = fold_build2_loc (input_location, PLUS_EXPR,
+ gfc_array_index_type, itmp, index);
index = gfc_evaluate_now (index, block);
/* Update the stride. */
gfc_init_se (&start, NULL);
gfc_conv_expr_type (&start, ref->u.ar.as->upper[n], gfc_array_index_type);
- itmp = fold_build2 (MINUS_EXPR, gfc_array_index_type, start.expr, jtmp);
- itmp = fold_build2 (PLUS_EXPR, gfc_array_index_type,
- gfc_index_one_node, itmp);
- stride = fold_build2 (MULT_EXPR, gfc_array_index_type, stride, itmp);
+ itmp = fold_build2_loc (input_location, MINUS_EXPR,
+ gfc_array_index_type, start.expr,
+ jtmp);
+ itmp = fold_build2_loc (input_location, PLUS_EXPR,
+ gfc_array_index_type,
+ gfc_index_one_node, itmp);
+ stride = fold_build2_loc (input_location, MULT_EXPR,
+ gfc_array_index_type, stride, itmp);
stride = gfc_evaluate_now (stride, block);
}
/* Add the string lengths and assign them to the expression
string length backend declaration. */
gfc_add_modify (&se->pre, expr->ts.u.cl->backend_decl,
- fold_build2 (PLUS_EXPR, gfc_charlen_type_node,
+ fold_build2_loc (input_location, PLUS_EXPR,
+ gfc_charlen_type_node,
expr->value.op.op1->ts.u.cl->backend_decl,
expr->value.op.op2->ts.u.cl->backend_decl));
}
gfc_add_block_to_block (&se->pre, &tse.pre);
gfc_add_block_to_block (&se->post, &tse.post);
tse.expr = fold_convert (gfc_charlen_type_node, tse.expr);
- tse.expr = fold_build2 (MAX_EXPR, gfc_charlen_type_node, tse.expr,
- build_int_cst (gfc_charlen_type_node, 0));
+ tse.expr = fold_build2_loc (input_location, MAX_EXPR,
+ gfc_charlen_type_node, tse.expr,
+ build_int_cst (gfc_charlen_type_node, 0));
expr->ts.u.cl->backend_decl = tse.expr;
gfc_free_interface_mapping (&mapping);
break;
}
}
-
+/* Helper function to check dimensions. */
+static bool
+dim_ok (gfc_ss_info *info)
+{
+ int n;
+ for (n = 0; n < info->dimen; n++)
+ if (info->dim[n] != n)
+ return false;
+ return true;
+}
/* Convert an array for passing as an actual argument. Expressions and
vector subscripts are evaluated and stored in a temporary, which is then
EXPR is the right-hand side of a pointer assignment and
se->expr is the descriptor for the previously-evaluated
left-hand side. The function creates an assignment from
- EXPR to se->expr. */
+ EXPR to se->expr.
+
+
+ The se->force_tmp flag disables the non-copying descriptor optimization
+ that is used for transpose. It may be used in cases where there is an
+ alias between the transpose argument and another argument in the same
+ function call. */
void
gfc_conv_expr_descriptor (gfc_se * se, gfc_expr * expr, gfc_ss * ss)
{
gfc_loopinfo loop;
- gfc_ss *secss;
gfc_ss_info *info;
int need_tmp;
int n;
tree offset;
int full;
bool subref_array_target = false;
+ gfc_expr *arg;
+ gcc_assert (ss != NULL);
gcc_assert (ss != gfc_ss_terminator);
/* Special case things we know we can pass easily. */
/* If we have a linear array section, we can pass it directly.
Otherwise we need to copy it into a temporary. */
- /* Find the SS for the array section. */
- secss = ss;
- while (secss != gfc_ss_terminator && secss->type != GFC_SS_SECTION)
- secss = secss->next;
-
- gcc_assert (secss != gfc_ss_terminator);
- info = &secss->data.info;
+ gcc_assert (ss->type == GFC_SS_SECTION);
+ gcc_assert (ss->expr == expr);
+ info = &ss->data.info;
/* Get the descriptor for the array. */
- gfc_conv_ss_descriptor (&se->pre, secss, 0);
+ gfc_conv_ss_descriptor (&se->pre, ss, 0);
desc = info->descriptor;
subref_array_target = se->direct_byref && is_subref_array (expr);
need_tmp = gfc_ref_needs_temporary_p (expr->ref)
&& !subref_array_target;
+ if (se->force_tmp)
+ need_tmp = 1;
+
if (need_tmp)
full = 0;
else if (GFC_ARRAY_TYPE_P (TREE_TYPE (desc)))
else
full = gfc_full_array_ref_p (info->ref, NULL);
- if (full)
+ if (full && dim_ok (info))
{
- if (se->direct_byref)
+ if (se->direct_byref && !se->byref_noassign)
{
/* Copy the descriptor for pointer assignments. */
gfc_add_modify (&se->pre, se->expr, desc);
break;
case EXPR_FUNCTION:
+
+ /* We don't need to copy data in some cases. */
+ arg = gfc_get_noncopying_intrinsic_argument (expr);
+ if (arg)
+ {
+ /* This is a call to transpose... */
+ gcc_assert (expr->value.function.isym->id == GFC_ISYM_TRANSPOSE);
+ /* ... which has already been handled by the scalarizer, so
+ that we just need to get its argument's descriptor. */
+ gfc_conv_expr_descriptor (se, expr->value.function.actual->expr, ss);
+ return;
+ }
+
/* A transformational function return value will be a temporary
array descriptor. We still need to go through the scalarizer
to create the descriptor. Elemental functions ar handled as
arbitrary expressions, i.e. copy to a temporary. */
- secss = ss;
- /* Look for the SS for this function. */
- while (secss != gfc_ss_terminator
- && (secss->type != GFC_SS_FUNCTION || secss->expr != expr))
- secss = secss->next;
if (se->direct_byref)
{
- gcc_assert (secss != gfc_ss_terminator);
+ gcc_assert (ss->type == GFC_SS_FUNCTION && ss->expr == expr);
/* For pointer assignments pass the descriptor directly. */
- se->ss = secss;
+ if (se->ss == NULL)
+ se->ss = ss;
+ else
+ gcc_assert (se->ss == ss);
se->expr = gfc_build_addr_expr (NULL_TREE, se->expr);
gfc_conv_expr (se, expr);
return;
}
- if (secss == gfc_ss_terminator)
+ if (ss->expr != expr || ss->type != GFC_SS_FUNCTION)
{
- /* Elemental function. */
+ if (ss->expr != expr)
+ /* Elemental function. */
+ gcc_assert ((expr->value.function.esym != NULL
+ && expr->value.function.esym->attr.elemental)
+ || (expr->value.function.isym != NULL
+ && expr->value.function.isym->elemental));
+ else
+ gcc_assert (ss->type == GFC_SS_INTRINSIC);
+
need_tmp = 1;
if (expr->ts.type == BT_CHARACTER
&& expr->ts.u.cl->length->expr_type != EXPR_CONSTANT)
else
{
/* Transformational function. */
- info = &secss->data.info;
+ info = &ss->data.info;
need_tmp = 0;
}
break;
{
need_tmp = 0;
info = &ss->data.info;
- secss = ss;
}
else
{
need_tmp = 1;
- secss = NULL;
info = NULL;
}
break;
default:
/* Something complicated. Copy it into a temporary. */
need_tmp = 1;
- secss = NULL;
info = NULL;
break;
}
+ /* If we are creating a temporary, we don't need to bother about aliases
+ anymore. */
+ if (need_tmp)
+ se->force_tmp = 0;
+
gfc_init_loopinfo (&loop);
/* Associate the SS with the loop. */
if (need_tmp)
{
- /* Tell the scalarizer to make a temporary. */
- loop.temp_ss = gfc_get_ss ();
- loop.temp_ss->type = GFC_SS_TEMP;
- loop.temp_ss->next = gfc_ss_terminator;
-
- if (expr->ts.type == BT_CHARACTER
- && !expr->ts.u.cl->backend_decl)
+ if (expr->ts.type == BT_CHARACTER && !expr->ts.u.cl->backend_decl)
get_array_charlen (expr, se);
- loop.temp_ss->data.temp.type = gfc_typenode_for_spec (&expr->ts);
-
- if (expr->ts.type == BT_CHARACTER)
- loop.temp_ss->string_length = expr->ts.u.cl->backend_decl;
- else
- loop.temp_ss->string_length = NULL;
+ /* Tell the scalarizer to make a temporary. */
+ loop.temp_ss = gfc_get_temp_ss (gfc_typenode_for_spec (&expr->ts),
+ ((expr->ts.type == BT_CHARACTER)
+ ? expr->ts.u.cl->backend_decl
+ : NULL),
+ loop.dimen);
se->string_length = loop.temp_ss->string_length;
- loop.temp_ss->data.temp.dimen = loop.dimen;
+ gcc_assert (loop.temp_ss->data.temp.dimen == loop.dimen);
gfc_add_ss_to_loop (&loop, loop.temp_ss);
}
lse.string_length = rse.string_length;
tmp = gfc_trans_scalar_assign (&lse, &rse, expr->ts, true,
- expr->expr_type == EXPR_VARIABLE, true);
+ expr->expr_type == EXPR_VARIABLE
+ || expr->expr_type == EXPR_ARRAY, true);
gfc_add_expr_to_block (&block, tmp);
/* Finish the copying loops. */
desc = loop.temp_ss->data.info.descriptor;
}
- else if (expr->expr_type == EXPR_FUNCTION)
+ else if (expr->expr_type == EXPR_FUNCTION && dim_ok (info))
{
desc = info->descriptor;
se->string_length = ss->string_length;
limits will be the limits of the section.
A function may decide to repack the array to speed up access, but
we're not bothered about that here. */
- int dim, ndim;
+ int dim, ndim, codim;
tree parm;
tree parmtype;
tree stride;
tree to;
tree base;
+ ndim = info->ref ? info->ref->u.ar.dimen : info->dimen;
+
+ if (se->want_coarray)
+ {
+ gfc_array_ref *ar = &info->ref->u.ar;
+
+ codim = gfc_get_corank (expr);
+ for (n = 0; n < codim - 1; n++)
+ {
+ /* Make sure we are not lost somehow. */
+ gcc_assert (ar->dimen_type[n + ndim] == DIMEN_THIS_IMAGE);
+
+ /* Make sure the call to gfc_conv_section_startstride won't
+ generate unnecessary code to calculate stride. */
+ gcc_assert (ar->stride[n + ndim] == NULL);
+
+ gfc_conv_section_startstride (&loop, ss, n + ndim);
+ loop.from[n + loop.dimen] = info->start[n + ndim];
+ loop.to[n + loop.dimen] = info->end[n + ndim];
+ }
+
+ gcc_assert (n == codim - 1);
+ evaluate_bound (&loop.pre, info->start, ar->start,
+ info->descriptor, n + ndim, true);
+ loop.from[n + loop.dimen] = info->start[n + ndim];
+ }
+ else
+ codim = 0;
+
/* Set the string_length for a character array. */
if (expr->ts.type == BT_CHARACTER)
se->string_length = gfc_get_expr_charlen (expr);
desc = info->descriptor;
- gcc_assert (secss && secss != gfc_ss_terminator);
- if (se->direct_byref)
+ if (se->direct_byref && !se->byref_noassign)
{
/* For pointer assignments we fill in the destination. */
parm = se->expr;
{
/* Otherwise make a new one. */
parmtype = gfc_get_element_type (TREE_TYPE (desc));
- parmtype = gfc_get_array_type_bounds (parmtype, loop.dimen, 0,
+ parmtype = gfc_get_array_type_bounds (parmtype, loop.dimen, codim,
loop.from, loop.to, 0,
GFC_ARRAY_UNKNOWN, false);
parm = gfc_create_var (parmtype, "parm");
}
offset = gfc_index_zero_node;
- dim = 0;
/* The following can be somewhat confusing. We have two
descriptors, a new one and the original array.
{parm, parmtype, dim} refer to the new one.
- {desc, type, n, secss, loop} refer to the original, which maybe
+ {desc, type, n, loop} refer to the original, which maybe
a descriptorless array.
The bounds of the scalarization are the bounds of the section.
We don't have to worry about numeric overflows when calculating
else
base = NULL_TREE;
- ndim = info->ref ? info->ref->u.ar.dimen : info->dimen;
for (n = 0; n < ndim; n++)
{
stride = gfc_conv_array_stride (desc, n);
}
else
{
- /* Check we haven't somehow got out of sync. */
- gcc_assert (info->dim[dim] == n);
-
/* Evaluate and remember the start of the section. */
start = info->start[n];
stride = gfc_evaluate_now (stride, &loop.pre);
}
tmp = gfc_conv_array_lbound (desc, n);
- tmp = fold_build2 (MINUS_EXPR, TREE_TYPE (tmp), start, tmp);
-
- tmp = fold_build2 (MULT_EXPR, TREE_TYPE (tmp), tmp, stride);
- offset = fold_build2 (PLUS_EXPR, TREE_TYPE (tmp), offset, tmp);
+ tmp = fold_build2_loc (input_location, MINUS_EXPR, TREE_TYPE (tmp),
+ start, tmp);
+ tmp = fold_build2_loc (input_location, MULT_EXPR, TREE_TYPE (tmp),
+ tmp, stride);
+ offset = fold_build2_loc (input_location, PLUS_EXPR, TREE_TYPE (tmp),
+ offset, tmp);
if (info->ref
&& info->ref->u.ar.dimen_type[n] == DIMEN_ELEMENT)
/* Vector subscripts need copying and are handled elsewhere. */
if (info->ref)
gcc_assert (info->ref->u.ar.dimen_type[n] == DIMEN_RANGE);
+
+ /* look for the corresponding scalarizer dimension: dim. */
+ for (dim = 0; dim < ndim; dim++)
+ if (info->dim[dim] == n)
+ break;
+
+ /* loop exited early: the DIM being looked for has been found. */
+ gcc_assert (dim < ndim);
/* Set the new lower bound. */
from = loop.from[dim];
|| info->ref->u.ar.type != AR_FULL)
&& !integer_onep (from))
{
- tmp = fold_build2 (MINUS_EXPR, gfc_array_index_type,
- gfc_index_one_node, from);
- to = fold_build2 (PLUS_EXPR, gfc_array_index_type, to, tmp);
+ tmp = fold_build2_loc (input_location, MINUS_EXPR,
+ gfc_array_index_type, gfc_index_one_node,
+ from);
+ to = fold_build2_loc (input_location, PLUS_EXPR,
+ gfc_array_index_type, to, tmp);
from = gfc_index_one_node;
}
gfc_conv_descriptor_lbound_set (&loop.pre, parm,
/* Multiply the stride by the section stride to get the
total stride. */
- stride = fold_build2 (MULT_EXPR, gfc_array_index_type,
- stride, info->stride[n]);
+ stride = fold_build2_loc (input_location, MULT_EXPR,
+ gfc_array_index_type,
+ stride, info->stride[n]);
if (se->direct_byref
&& info->ref
&& info->ref->u.ar.type != AR_FULL)
{
- base = fold_build2 (MINUS_EXPR, TREE_TYPE (base),
- base, stride);
+ base = fold_build2_loc (input_location, MINUS_EXPR,
+ TREE_TYPE (base), base, stride);
}
else if (GFC_ARRAY_TYPE_P (TREE_TYPE (desc)))
{
tmp = gfc_conv_array_lbound (desc, n);
- tmp = fold_build2 (MINUS_EXPR, TREE_TYPE (base),
- tmp, loop.from[dim]);
- tmp = fold_build2 (MULT_EXPR, TREE_TYPE (base),
- tmp, gfc_conv_array_stride (desc, n));
- base = fold_build2 (PLUS_EXPR, TREE_TYPE (base),
- tmp, base);
+ tmp = fold_build2_loc (input_location, MINUS_EXPR,
+ TREE_TYPE (base), tmp, loop.from[dim]);
+ tmp = fold_build2_loc (input_location, MULT_EXPR,
+ TREE_TYPE (base), tmp,
+ gfc_conv_array_stride (desc, n));
+ base = fold_build2_loc (input_location, PLUS_EXPR,
+ TREE_TYPE (base), tmp, base);
}
/* Store the new stride. */
gfc_conv_descriptor_stride_set (&loop.pre, parm,
gfc_rank_cst[dim], stride);
+ }
- dim++;
+ for (n = loop.dimen; n < loop.dimen + codim; n++)
+ {
+ from = loop.from[n];
+ to = loop.to[n];
+ gfc_conv_descriptor_lbound_set (&loop.pre, parm,
+ gfc_rank_cst[n], from);
+ if (n < loop.dimen + codim - 1)
+ gfc_conv_descriptor_ubound_set (&loop.pre, parm,
+ gfc_rank_cst[n], to);
}
if (se->data_not_needed)
desc = parm;
}
- if (!se->direct_byref)
+ if (!se->direct_byref || se->byref_noassign)
{
/* Get a pointer to the new descriptor. */
if (se->want_pointer)
tree ubound = gfc_conv_descriptor_ubound_get (desc, gfc_index_zero_node);
tree lbound = gfc_conv_descriptor_lbound_get (desc, gfc_index_zero_node);
- *size = fold_build2 (MINUS_EXPR, gfc_array_index_type, ubound, lbound);
- *size = fold_build2 (PLUS_EXPR, gfc_array_index_type, *size,
- gfc_index_one_node);
- *size = fold_build2 (MAX_EXPR, gfc_array_index_type, *size,
- gfc_index_zero_node);
+ *size = fold_build2_loc (input_location, MINUS_EXPR,
+ gfc_array_index_type, ubound, lbound);
+ *size = fold_build2_loc (input_location, PLUS_EXPR, gfc_array_index_type,
+ *size, gfc_index_one_node);
+ *size = fold_build2_loc (input_location, MAX_EXPR, gfc_array_index_type,
+ *size, gfc_index_zero_node);
}
elem = TYPE_SIZE_UNIT (gfc_get_element_type (TREE_TYPE (desc)));
- *size = fold_build2 (MULT_EXPR, gfc_array_index_type, *size,
- fold_convert (gfc_array_index_type, elem));
+ *size = fold_build2_loc (input_location, MULT_EXPR, gfc_array_index_type,
+ *size, fold_convert (gfc_array_index_type, elem));
}
/* Convert an array for passing as an actual parameter. */
if (sym->ts.type == BT_CHARACTER)
se->string_length = sym->ts.u.cl->backend_decl;
- if (sym->ts.type == BT_DERIVED)
+ if (sym->ts.type == BT_DERIVED || sym->ts.type == BT_CLASS)
{
gfc_conv_expr_descriptor (se, expr, ss);
se->expr = gfc_conv_array_data (se->expr);
/* Deallocate the allocatable components of structures that are
not variable. */
- if (expr->ts.type == BT_DERIVED
+ if ((expr->ts.type == BT_DERIVED || expr->ts.type == BT_CLASS)
&& expr->ts.u.derived->attr.alloc_comp
&& expr->expr_type != EXPR_VARIABLE)
{
- tmp = build_fold_indirect_ref_loc (input_location,
- se->expr);
+ tmp = build_fold_indirect_ref_loc (input_location, se->expr);
tmp = gfc_deallocate_alloc_comp (expr->ts.u.derived, tmp, expr->rank);
- gfc_add_expr_to_block (&se->post, tmp);
+
+ /* The components shall be deallocated before their containing entity. */
+ gfc_prepend_expr_to_block (&se->post, tmp);
}
if (g77 || (fsym && fsym->attr.contiguous
origptr = gfc_create_var (pvoid_type_node, "origptr");
tmp = build_fold_indirect_ref_loc (input_location, desc);
tmp = gfc_conv_array_data (tmp);
- tmp = fold_build2 (MODIFY_EXPR, TREE_TYPE (origptr), origptr,
- fold_convert (TREE_TYPE (origptr), tmp));
+ tmp = fold_build2_loc (input_location, MODIFY_EXPR,
+ TREE_TYPE (origptr), origptr,
+ fold_convert (TREE_TYPE (origptr), tmp));
gfc_add_expr_to_block (&se->pre, tmp);
}
if (fsym && fsym->attr.optional && sym && sym->attr.optional)
{
tmp = gfc_conv_expr_present (sym);
- ptr = build3 (COND_EXPR, TREE_TYPE (se->expr), tmp,
- fold_convert (TREE_TYPE (se->expr), ptr),
+ ptr = build3_loc (input_location, COND_EXPR, TREE_TYPE (se->expr),
+ tmp, fold_convert (TREE_TYPE (se->expr), ptr),
fold_convert (TREE_TYPE (se->expr), null_pointer_node));
}
tmp = build_fold_indirect_ref_loc (input_location,
desc);
tmp = gfc_conv_array_data (tmp);
- tmp = fold_build2 (NE_EXPR, boolean_type_node,
- fold_convert (TREE_TYPE (tmp), ptr), tmp);
+ tmp = fold_build2_loc (input_location, NE_EXPR, boolean_type_node,
+ fold_convert (TREE_TYPE (tmp), ptr), tmp);
if (fsym && fsym->attr.optional && sym && sym->attr.optional)
- tmp = fold_build2 (TRUTH_AND_EXPR, boolean_type_node,
- gfc_conv_expr_present (sym), tmp);
+ tmp = fold_build2_loc (input_location, TRUTH_AND_EXPR,
+ boolean_type_node,
+ gfc_conv_expr_present (sym), tmp);
gfc_trans_runtime_check (false, true, tmp, &se->pre,
&expr->where, msg);
- gfc_free (msg);
+ free (msg);
}
gfc_start_block (&block);
tmp = build_fold_indirect_ref_loc (input_location,
desc);
tmp = gfc_conv_array_data (tmp);
- tmp = fold_build2 (NE_EXPR, boolean_type_node,
- fold_convert (TREE_TYPE (tmp), ptr), tmp);
+ tmp = fold_build2_loc (input_location, NE_EXPR, boolean_type_node,
+ fold_convert (TREE_TYPE (tmp), ptr), tmp);
if (fsym && fsym->attr.optional && sym && sym->attr.optional)
- tmp = fold_build2 (TRUTH_AND_EXPR, boolean_type_node,
- gfc_conv_expr_present (sym), tmp);
+ tmp = fold_build2_loc (input_location, TRUTH_AND_EXPR,
+ boolean_type_node,
+ gfc_conv_expr_present (sym), tmp);
tmp = build3_v (COND_EXPR, tmp, stmt, build_empty_stmt (input_location));
gfc_add_expr_to_block (&block, tmp);
/* Zero the data pointer. */
- tmp = fold_build2 (MODIFY_EXPR, void_type_node,
- var, build_int_cst (TREE_TYPE (var), 0));
+ tmp = fold_build2_loc (input_location, MODIFY_EXPR, void_type_node,
+ var, build_int_cst (TREE_TYPE (var), 0));
gfc_add_expr_to_block (&block, tmp);
return gfc_finish_block (&block);
idx = gfc_rank_cst[rank - 1];
nelems = gfc_conv_descriptor_ubound_get (decl, idx);
tmp = gfc_conv_descriptor_lbound_get (decl, idx);
- tmp = fold_build2 (MINUS_EXPR, gfc_array_index_type, nelems, tmp);
- tmp = fold_build2 (PLUS_EXPR, gfc_array_index_type,
- tmp, gfc_index_one_node);
+ tmp = fold_build2_loc (input_location, MINUS_EXPR, gfc_array_index_type,
+ nelems, tmp);
+ tmp = fold_build2_loc (input_location, PLUS_EXPR, gfc_array_index_type,
+ tmp, gfc_index_one_node);
tmp = gfc_evaluate_now (tmp, block);
nelems = gfc_conv_descriptor_stride_get (decl, idx);
- tmp = fold_build2 (MULT_EXPR, gfc_array_index_type, nelems, tmp);
+ tmp = fold_build2_loc (input_location, MULT_EXPR, gfc_array_index_type,
+ nelems, tmp);
return gfc_evaluate_now (tmp, block);
}
If no_malloc is set, only the copy is done. */
static tree
-duplicate_allocatable(tree dest, tree src, tree type, int rank,
- bool no_malloc)
+duplicate_allocatable (tree dest, tree src, tree type, int rank,
+ bool no_malloc)
{
tree tmp;
tree size;
if (rank == 0)
{
tmp = null_pointer_node;
- tmp = fold_build2 (MODIFY_EXPR, type, dest, tmp);
+ tmp = fold_build2_loc (input_location, MODIFY_EXPR, type, dest, tmp);
gfc_add_expr_to_block (&block, tmp);
null_data = gfc_finish_block (&block);
gfc_init_block (&block);
- size = TYPE_SIZE_UNIT (type);
+ size = TYPE_SIZE_UNIT (TREE_TYPE (type));
if (!no_malloc)
{
tmp = gfc_call_malloc (&block, type, size);
- tmp = fold_build2 (MODIFY_EXPR, void_type_node, dest,
- fold_convert (type, tmp));
+ tmp = fold_build2_loc (input_location, MODIFY_EXPR, void_type_node,
+ dest, fold_convert (type, tmp));
gfc_add_expr_to_block (&block, tmp);
}
- tmp = built_in_decls[BUILT_IN_MEMCPY];
+ tmp = builtin_decl_explicit (BUILT_IN_MEMCPY);
tmp = build_call_expr_loc (input_location, tmp, 3,
dest, src, size);
}
nelems = get_full_array_size (&block, src, rank);
tmp = fold_convert (gfc_array_index_type,
TYPE_SIZE_UNIT (gfc_get_element_type (type)));
- size = fold_build2 (MULT_EXPR, gfc_array_index_type, nelems, tmp);
+ size = fold_build2_loc (input_location, MULT_EXPR, gfc_array_index_type,
+ nelems, tmp);
if (!no_malloc)
{
tmp = TREE_TYPE (gfc_conv_descriptor_data_get (src));
/* We know the temporary and the value will be the same length,
so can use memcpy. */
- tmp = built_in_decls[BUILT_IN_MEMCPY];
+ tmp = builtin_decl_explicit (BUILT_IN_MEMCPY);
tmp = build_call_expr_loc (input_location,
tmp, 3, gfc_conv_descriptor_data_get (dest),
gfc_conv_descriptor_data_get (src), size);
null_cond = gfc_conv_descriptor_data_get (src);
null_cond = convert (pvoid_type_node, null_cond);
- null_cond = fold_build2 (NE_EXPR, boolean_type_node,
- null_cond, null_pointer_node);
+ null_cond = fold_build2_loc (input_location, NE_EXPR, boolean_type_node,
+ null_cond, null_pointer_node);
return build3_v (COND_EXPR, null_cond, tmp, null_data);
}
tree
gfc_duplicate_allocatable (tree dest, tree src, tree type, int rank)
{
- return duplicate_allocatable(dest, src, type, rank, false);
+ return duplicate_allocatable (dest, src, type, rank, false);
}
tree
gfc_copy_allocatable_data (tree dest, tree src, tree type, int rank)
{
- return duplicate_allocatable(dest, src, type, rank, true);
+ return duplicate_allocatable (dest, src, type, rank, true);
}
is a full array reference, we only need the descriptor
information from dimension = rank. */
tmp = get_full_array_size (&fnblock, decl, rank);
- tmp = fold_build2 (MINUS_EXPR, gfc_array_index_type,
- tmp, gfc_index_one_node);
+ tmp = fold_build2_loc (input_location, MINUS_EXPR,
+ gfc_array_index_type, tmp,
+ gfc_index_one_node);
null_cond = gfc_conv_descriptor_data_get (decl);
- null_cond = fold_build2 (NE_EXPR, boolean_type_node, null_cond,
- build_int_cst (TREE_TYPE (null_cond), 0));
+ null_cond = fold_build2_loc (input_location, NE_EXPR,
+ boolean_type_node, null_cond,
+ build_int_cst (TREE_TYPE (null_cond), 0));
}
else
{
act on a chain of components. */
for (c = der_type->components; c; c = c->next)
{
- bool cmp_has_alloc_comps = (c->ts.type == BT_DERIVED)
+ bool cmp_has_alloc_comps = (c->ts.type == BT_DERIVED
+ || c->ts.type == BT_CLASS)
&& c->ts.u.derived->attr.alloc_comp;
cdecl = c->backend_decl;
ctype = TREE_TYPE (cdecl);
switch (purpose)
{
case DEALLOCATE_ALLOC_COMP:
- /* Do not deallocate the components of ultimate pointer
- components. */
if (cmp_has_alloc_comps && !c->attr.pointer)
{
- comp = fold_build3 (COMPONENT_REF, ctype,
- decl, cdecl, NULL_TREE);
+ /* Do not deallocate the components of ultimate pointer
+ components. */
+ comp = fold_build3_loc (input_location, COMPONENT_REF, ctype,
+ decl, cdecl, NULL_TREE);
rank = c->as ? c->as->rank : 0;
tmp = structure_alloc_comps (c->ts.u.derived, comp, NULL_TREE,
rank, purpose);
gfc_add_expr_to_block (&fnblock, tmp);
}
- if (c->attr.allocatable && c->attr.dimension)
+ if (c->attr.allocatable
+ && (c->attr.dimension || c->attr.codimension))
{
- comp = fold_build3 (COMPONENT_REF, ctype,
- decl, cdecl, NULL_TREE);
+ comp = fold_build3_loc (input_location, COMPONENT_REF, ctype,
+ decl, cdecl, NULL_TREE);
tmp = gfc_trans_dealloc_allocated (comp);
gfc_add_expr_to_block (&fnblock, tmp);
}
else if (c->attr.allocatable)
{
/* Allocatable scalar components. */
- comp = fold_build3 (COMPONENT_REF, ctype, decl, cdecl, NULL_TREE);
+ comp = fold_build3_loc (input_location, COMPONENT_REF, ctype,
+ decl, cdecl, NULL_TREE);
- tmp = gfc_deallocate_with_status (comp, NULL_TREE, true, NULL);
+ tmp = gfc_deallocate_scalar_with_status (comp, NULL, true, NULL,
+ c->ts);
gfc_add_expr_to_block (&fnblock, tmp);
- tmp = fold_build2 (MODIFY_EXPR, void_type_node, comp,
- build_int_cst (TREE_TYPE (comp), 0));
+ tmp = fold_build2_loc (input_location, MODIFY_EXPR,
+ void_type_node, comp,
+ build_int_cst (TREE_TYPE (comp), 0));
gfc_add_expr_to_block (&fnblock, tmp);
}
else if (c->ts.type == BT_CLASS && CLASS_DATA (c)->attr.allocatable)
{
/* Allocatable scalar CLASS components. */
- comp = fold_build3 (COMPONENT_REF, ctype, decl, cdecl, NULL_TREE);
+ comp = fold_build3_loc (input_location, COMPONENT_REF, ctype,
+ decl, cdecl, NULL_TREE);
- /* Add reference to '$data' component. */
+ /* Add reference to '_data' component. */
tmp = CLASS_DATA (c)->backend_decl;
- comp = fold_build3 (COMPONENT_REF, TREE_TYPE (tmp),
- comp, tmp, NULL_TREE);
+ comp = fold_build3_loc (input_location, COMPONENT_REF,
+ TREE_TYPE (tmp), comp, tmp, NULL_TREE);
- tmp = gfc_deallocate_with_status (comp, NULL_TREE, true, NULL);
+ tmp = gfc_deallocate_scalar_with_status (comp, NULL, true, NULL,
+ CLASS_DATA (c)->ts);
gfc_add_expr_to_block (&fnblock, tmp);
- tmp = fold_build2 (MODIFY_EXPR, void_type_node, comp,
- build_int_cst (TREE_TYPE (comp), 0));
+ tmp = fold_build2_loc (input_location, MODIFY_EXPR,
+ void_type_node, comp,
+ build_int_cst (TREE_TYPE (comp), 0));
gfc_add_expr_to_block (&fnblock, tmp);
}
break;
case NULLIFY_ALLOC_COMP:
if (c->attr.pointer)
continue;
- else if (c->attr.allocatable && c->attr.dimension)
+ else if (c->attr.allocatable
+ && (c->attr.dimension|| c->attr.codimension))
{
- comp = fold_build3 (COMPONENT_REF, ctype,
- decl, cdecl, NULL_TREE);
+ comp = fold_build3_loc (input_location, COMPONENT_REF, ctype,
+ decl, cdecl, NULL_TREE);
gfc_conv_descriptor_data_set (&fnblock, comp, null_pointer_node);
}
else if (c->attr.allocatable)
{
/* Allocatable scalar components. */
- comp = fold_build3 (COMPONENT_REF, ctype, decl, cdecl, NULL_TREE);
- tmp = fold_build2 (MODIFY_EXPR, void_type_node, comp,
- build_int_cst (TREE_TYPE (comp), 0));
+ comp = fold_build3_loc (input_location, COMPONENT_REF, ctype,
+ decl, cdecl, NULL_TREE);
+ tmp = fold_build2_loc (input_location, MODIFY_EXPR,
+ void_type_node, comp,
+ build_int_cst (TREE_TYPE (comp), 0));
gfc_add_expr_to_block (&fnblock, tmp);
}
else if (c->ts.type == BT_CLASS && CLASS_DATA (c)->attr.allocatable)
{
/* Allocatable scalar CLASS components. */
- comp = fold_build3 (COMPONENT_REF, ctype, decl, cdecl, NULL_TREE);
- /* Add reference to '$data' component. */
+ comp = fold_build3_loc (input_location, COMPONENT_REF, ctype,
+ decl, cdecl, NULL_TREE);
+ /* Add reference to '_data' component. */
tmp = CLASS_DATA (c)->backend_decl;
- comp = fold_build3 (COMPONENT_REF, TREE_TYPE (tmp),
- comp, tmp, NULL_TREE);
- tmp = fold_build2 (MODIFY_EXPR, void_type_node, comp,
- build_int_cst (TREE_TYPE (comp), 0));
+ comp = fold_build3_loc (input_location, COMPONENT_REF,
+ TREE_TYPE (tmp), comp, tmp, NULL_TREE);
+ tmp = fold_build2_loc (input_location, MODIFY_EXPR,
+ void_type_node, comp,
+ build_int_cst (TREE_TYPE (comp), 0));
gfc_add_expr_to_block (&fnblock, tmp);
}
else if (cmp_has_alloc_comps)
{
- comp = fold_build3 (COMPONENT_REF, ctype,
- decl, cdecl, NULL_TREE);
+ comp = fold_build3_loc (input_location, COMPONENT_REF, ctype,
+ decl, cdecl, NULL_TREE);
rank = c->as ? c->as->rank : 0;
tmp = structure_alloc_comps (c->ts.u.derived, comp, NULL_TREE,
rank, purpose);
continue;
/* We need source and destination components. */
- comp = fold_build3 (COMPONENT_REF, ctype, decl, cdecl, NULL_TREE);
- dcmp = fold_build3 (COMPONENT_REF, ctype, dest, cdecl, NULL_TREE);
+ comp = fold_build3_loc (input_location, COMPONENT_REF, ctype, decl,
+ cdecl, NULL_TREE);
+ dcmp = fold_build3_loc (input_location, COMPONENT_REF, ctype, dest,
+ cdecl, NULL_TREE);
dcmp = fold_convert (TREE_TYPE (comp), dcmp);
if (c->attr.allocatable && !cmp_has_alloc_comps)
{
rank = c->as ? c->as->rank : 0;
- tmp = gfc_duplicate_allocatable(dcmp, comp, ctype, rank);
+ tmp = gfc_duplicate_allocatable (dcmp, comp, ctype, rank);
gfc_add_expr_to_block (&fnblock, tmp);
}
}
+/* Returns the value of LBOUND for an expression. This could be broken out
+ from gfc_conv_intrinsic_bound but this seemed to be simpler. This is
+ called by gfc_alloc_allocatable_for_assignment. */
+static tree
+get_std_lbound (gfc_expr *expr, tree desc, int dim, bool assumed_size)
+{
+ tree lbound;
+ tree ubound;
+ tree stride;
+ tree cond, cond1, cond3, cond4;
+ tree tmp;
+ gfc_ref *ref;
+
+ if (GFC_DESCRIPTOR_TYPE_P (TREE_TYPE (desc)))
+ {
+ tmp = gfc_rank_cst[dim];
+ lbound = gfc_conv_descriptor_lbound_get (desc, tmp);
+ ubound = gfc_conv_descriptor_ubound_get (desc, tmp);
+ stride = gfc_conv_descriptor_stride_get (desc, tmp);
+ cond1 = fold_build2_loc (input_location, GE_EXPR, boolean_type_node,
+ ubound, lbound);
+ cond3 = fold_build2_loc (input_location, GE_EXPR, boolean_type_node,
+ stride, gfc_index_zero_node);
+ cond3 = fold_build2_loc (input_location, TRUTH_AND_EXPR,
+ boolean_type_node, cond3, cond1);
+ cond4 = fold_build2_loc (input_location, LT_EXPR, boolean_type_node,
+ stride, gfc_index_zero_node);
+ if (assumed_size)
+ cond = fold_build2_loc (input_location, EQ_EXPR, boolean_type_node,
+ tmp, build_int_cst (gfc_array_index_type,
+ expr->rank - 1));
+ else
+ cond = boolean_false_node;
+
+ cond1 = fold_build2_loc (input_location, TRUTH_OR_EXPR,
+ boolean_type_node, cond3, cond4);
+ cond = fold_build2_loc (input_location, TRUTH_OR_EXPR,
+ boolean_type_node, cond, cond1);
+
+ return fold_build3_loc (input_location, COND_EXPR,
+ gfc_array_index_type, cond,
+ lbound, gfc_index_one_node);
+ }
+ else if (expr->expr_type == EXPR_VARIABLE)
+ {
+ tmp = TREE_TYPE (expr->symtree->n.sym->backend_decl);
+ for (ref = expr->ref; ref; ref = ref->next)
+ {
+ if (ref->type == REF_COMPONENT
+ && ref->u.c.component->as
+ && ref->next
+ && ref->next->u.ar.type == AR_FULL)
+ tmp = TREE_TYPE (ref->u.c.component->backend_decl);
+ }
+ return GFC_TYPE_ARRAY_LBOUND(tmp, dim);
+ }
+ else if (expr->expr_type == EXPR_FUNCTION)
+ {
+ /* A conversion function, so use the argument. */
+ expr = expr->value.function.actual->expr;
+ if (expr->expr_type != EXPR_VARIABLE)
+ return gfc_index_one_node;
+ desc = TREE_TYPE (expr->symtree->n.sym->backend_decl);
+ return get_std_lbound (expr, desc, dim, assumed_size);
+ }
+
+ return gfc_index_one_node;
+}
+
+
+/* Returns true if an expression represents an lhs that can be reallocated
+ on assignment. */
+
+bool
+gfc_is_reallocatable_lhs (gfc_expr *expr)
+{
+ gfc_ref * ref;
+
+ if (!expr->ref)
+ return false;
+
+ /* An allocatable variable. */
+ if (expr->symtree->n.sym->attr.allocatable
+ && expr->ref
+ && expr->ref->type == REF_ARRAY
+ && expr->ref->u.ar.type == AR_FULL)
+ return true;
+
+ /* All that can be left are allocatable components. */
+ if ((expr->symtree->n.sym->ts.type != BT_DERIVED
+ && expr->symtree->n.sym->ts.type != BT_CLASS)
+ || !expr->symtree->n.sym->ts.u.derived->attr.alloc_comp)
+ return false;
+
+ /* Find a component ref followed by an array reference. */
+ for (ref = expr->ref; ref; ref = ref->next)
+ if (ref->next
+ && ref->type == REF_COMPONENT
+ && ref->next->type == REF_ARRAY
+ && !ref->next->next)
+ break;
+
+ if (!ref)
+ return false;
+
+ /* Return true if valid reallocatable lhs. */
+ if (ref->u.c.component->attr.allocatable
+ && ref->next->u.ar.type == AR_FULL)
+ return true;
+
+ return false;
+}
+
+
+/* Allocate the lhs of an assignment to an allocatable array, otherwise
+ reallocate it. */
+
+tree
+gfc_alloc_allocatable_for_assignment (gfc_loopinfo *loop,
+ gfc_expr *expr1,
+ gfc_expr *expr2)
+{
+ stmtblock_t realloc_block;
+ stmtblock_t alloc_block;
+ stmtblock_t fblock;
+ gfc_ss *rss;
+ gfc_ss *lss;
+ tree realloc_expr;
+ tree alloc_expr;
+ tree size1;
+ tree size2;
+ tree array1;
+ tree cond;
+ tree tmp;
+ tree tmp2;
+ tree lbound;
+ tree ubound;
+ tree desc;
+ tree desc2;
+ tree offset;
+ tree jump_label1;
+ tree jump_label2;
+ tree neq_size;
+ tree lbd;
+ int n;
+ int dim;
+ gfc_array_spec * as;
+
+ /* x = f(...) with x allocatable. In this case, expr1 is the rhs.
+ Find the lhs expression in the loop chain and set expr1 and
+ expr2 accordingly. */
+ if (expr1->expr_type == EXPR_FUNCTION && expr2 == NULL)
+ {
+ expr2 = expr1;
+ /* Find the ss for the lhs. */
+ lss = loop->ss;
+ for (; lss && lss != gfc_ss_terminator; lss = lss->loop_chain)
+ if (lss->expr && lss->expr->expr_type == EXPR_VARIABLE)
+ break;
+ if (lss == gfc_ss_terminator)
+ return NULL_TREE;
+ expr1 = lss->expr;
+ }
+
+ /* Bail out if this is not a valid allocate on assignment. */
+ if (!gfc_is_reallocatable_lhs (expr1)
+ || (expr2 && !expr2->rank))
+ return NULL_TREE;
+
+ /* Find the ss for the lhs. */
+ lss = loop->ss;
+ for (; lss && lss != gfc_ss_terminator; lss = lss->loop_chain)
+ if (lss->expr == expr1)
+ break;
+
+ if (lss == gfc_ss_terminator)
+ return NULL_TREE;
+
+ /* Find an ss for the rhs. For operator expressions, we see the
+ ss's for the operands. Any one of these will do. */
+ rss = loop->ss;
+ for (; rss && rss != gfc_ss_terminator; rss = rss->loop_chain)
+ if (rss->expr != expr1 && rss != loop->temp_ss)
+ break;
+
+ if (expr2 && rss == gfc_ss_terminator)
+ return NULL_TREE;
+
+ gfc_start_block (&fblock);
+
+ /* Since the lhs is allocatable, this must be a descriptor type.
+ Get the data and array size. */
+ desc = lss->data.info.descriptor;
+ gcc_assert (GFC_DESCRIPTOR_TYPE_P (TREE_TYPE (desc)));
+ array1 = gfc_conv_descriptor_data_get (desc);
+
+ /* 7.4.1.3 "If variable is an allocated allocatable variable, it is
+ deallocated if expr is an array of different shape or any of the
+ corresponding length type parameter values of variable and expr
+ differ." This assures F95 compatibility. */
+ jump_label1 = gfc_build_label_decl (NULL_TREE);
+ jump_label2 = gfc_build_label_decl (NULL_TREE);
+
+ /* Allocate if data is NULL. */
+ cond = fold_build2_loc (input_location, EQ_EXPR, boolean_type_node,
+ array1, build_int_cst (TREE_TYPE (array1), 0));
+ tmp = build3_v (COND_EXPR, cond,
+ build1_v (GOTO_EXPR, jump_label1),
+ build_empty_stmt (input_location));
+ gfc_add_expr_to_block (&fblock, tmp);
+
+ /* Get arrayspec if expr is a full array. */
+ if (expr2 && expr2->expr_type == EXPR_FUNCTION
+ && expr2->value.function.isym
+ && expr2->value.function.isym->conversion)
+ {
+ /* For conversion functions, take the arg. */
+ gfc_expr *arg = expr2->value.function.actual->expr;
+ as = gfc_get_full_arrayspec_from_expr (arg);
+ }
+ else if (expr2)
+ as = gfc_get_full_arrayspec_from_expr (expr2);
+ else
+ as = NULL;
+
+ /* If the lhs shape is not the same as the rhs jump to setting the
+ bounds and doing the reallocation....... */
+ for (n = 0; n < expr1->rank; n++)
+ {
+ /* Check the shape. */
+ lbound = gfc_conv_descriptor_lbound_get (desc, gfc_rank_cst[n]);
+ ubound = gfc_conv_descriptor_ubound_get (desc, gfc_rank_cst[n]);
+ tmp = fold_build2_loc (input_location, MINUS_EXPR,
+ gfc_array_index_type,
+ loop->to[n], loop->from[n]);
+ tmp = fold_build2_loc (input_location, PLUS_EXPR,
+ gfc_array_index_type,
+ tmp, lbound);
+ tmp = fold_build2_loc (input_location, MINUS_EXPR,
+ gfc_array_index_type,
+ tmp, ubound);
+ cond = fold_build2_loc (input_location, NE_EXPR,
+ boolean_type_node,
+ tmp, gfc_index_zero_node);
+ tmp = build3_v (COND_EXPR, cond,
+ build1_v (GOTO_EXPR, jump_label1),
+ build_empty_stmt (input_location));
+ gfc_add_expr_to_block (&fblock, tmp);
+ }
+
+ /* ....else jump past the (re)alloc code. */
+ tmp = build1_v (GOTO_EXPR, jump_label2);
+ gfc_add_expr_to_block (&fblock, tmp);
+
+ /* Add the label to start automatic (re)allocation. */
+ tmp = build1_v (LABEL_EXPR, jump_label1);
+ gfc_add_expr_to_block (&fblock, tmp);
+
+ size1 = gfc_conv_descriptor_size (desc, expr1->rank);
+
+ /* Get the rhs size. Fix both sizes. */
+ if (expr2)
+ desc2 = rss->data.info.descriptor;
+ else
+ desc2 = NULL_TREE;
+ size2 = gfc_index_one_node;
+ for (n = 0; n < expr2->rank; n++)
+ {
+ tmp = fold_build2_loc (input_location, MINUS_EXPR,
+ gfc_array_index_type,
+ loop->to[n], loop->from[n]);
+ tmp = fold_build2_loc (input_location, PLUS_EXPR,
+ gfc_array_index_type,
+ tmp, gfc_index_one_node);
+ size2 = fold_build2_loc (input_location, MULT_EXPR,
+ gfc_array_index_type,
+ tmp, size2);
+ }
+
+ size1 = gfc_evaluate_now (size1, &fblock);
+ size2 = gfc_evaluate_now (size2, &fblock);
+
+ cond = fold_build2_loc (input_location, NE_EXPR, boolean_type_node,
+ size1, size2);
+ neq_size = gfc_evaluate_now (cond, &fblock);
+
+
+ /* Now modify the lhs descriptor and the associated scalarizer
+ variables. F2003 7.4.1.3: "If variable is or becomes an
+ unallocated allocatable variable, then it is allocated with each
+ deferred type parameter equal to the corresponding type parameters
+ of expr , with the shape of expr , and with each lower bound equal
+ to the corresponding element of LBOUND(expr)."
+ Reuse size1 to keep a dimension-by-dimension track of the
+ stride of the new array. */
+ size1 = gfc_index_one_node;
+ offset = gfc_index_zero_node;
+
+ for (n = 0; n < expr2->rank; n++)
+ {
+ tmp = fold_build2_loc (input_location, MINUS_EXPR,
+ gfc_array_index_type,
+ loop->to[n], loop->from[n]);
+ tmp = fold_build2_loc (input_location, PLUS_EXPR,
+ gfc_array_index_type,
+ tmp, gfc_index_one_node);
+
+ lbound = gfc_index_one_node;
+ ubound = tmp;
+
+ if (as)
+ {
+ lbd = get_std_lbound (expr2, desc2, n,
+ as->type == AS_ASSUMED_SIZE);
+ ubound = fold_build2_loc (input_location,
+ MINUS_EXPR,
+ gfc_array_index_type,
+ ubound, lbound);
+ ubound = fold_build2_loc (input_location,
+ PLUS_EXPR,
+ gfc_array_index_type,
+ ubound, lbd);
+ lbound = lbd;
+ }
+
+ gfc_conv_descriptor_lbound_set (&fblock, desc,
+ gfc_rank_cst[n],
+ lbound);
+ gfc_conv_descriptor_ubound_set (&fblock, desc,
+ gfc_rank_cst[n],
+ ubound);
+ gfc_conv_descriptor_stride_set (&fblock, desc,
+ gfc_rank_cst[n],
+ size1);
+ lbound = gfc_conv_descriptor_lbound_get (desc,
+ gfc_rank_cst[n]);
+ tmp2 = fold_build2_loc (input_location, MULT_EXPR,
+ gfc_array_index_type,
+ lbound, size1);
+ offset = fold_build2_loc (input_location, MINUS_EXPR,
+ gfc_array_index_type,
+ offset, tmp2);
+ size1 = fold_build2_loc (input_location, MULT_EXPR,
+ gfc_array_index_type,
+ tmp, size1);
+ }
+
+ /* Set the lhs descriptor and scalarizer offsets. For rank > 1,
+ the array offset is saved and the info.offset is used for a
+ running offset. Use the saved_offset instead. */
+ tmp = gfc_conv_descriptor_offset (desc);
+ gfc_add_modify (&fblock, tmp, offset);
+ if (lss->data.info.saved_offset
+ && TREE_CODE (lss->data.info.saved_offset) == VAR_DECL)
+ gfc_add_modify (&fblock, lss->data.info.saved_offset, tmp);
+
+ /* Now set the deltas for the lhs. */
+ for (n = 0; n < expr1->rank; n++)
+ {
+ tmp = gfc_conv_descriptor_lbound_get (desc, gfc_rank_cst[n]);
+ dim = lss->data.info.dim[n];
+ tmp = fold_build2_loc (input_location, MINUS_EXPR,
+ gfc_array_index_type, tmp,
+ loop->from[dim]);
+ if (lss->data.info.delta[dim]
+ && TREE_CODE (lss->data.info.delta[dim]) == VAR_DECL)
+ gfc_add_modify (&fblock, lss->data.info.delta[dim], tmp);
+ }
+
+ /* Get the new lhs size in bytes. */
+ if (expr1->ts.type == BT_CHARACTER && expr1->ts.deferred)
+ {
+ tmp = expr2->ts.u.cl->backend_decl;
+ gcc_assert (expr1->ts.u.cl->backend_decl);
+ tmp = fold_convert (TREE_TYPE (expr1->ts.u.cl->backend_decl), tmp);
+ gfc_add_modify (&fblock, expr1->ts.u.cl->backend_decl, tmp);
+ }
+ else if (expr1->ts.type == BT_CHARACTER && expr1->ts.u.cl->backend_decl)
+ {
+ tmp = TYPE_SIZE_UNIT (TREE_TYPE (gfc_typenode_for_spec (&expr1->ts)));
+ tmp = fold_build2_loc (input_location, MULT_EXPR,
+ gfc_array_index_type, tmp,
+ expr1->ts.u.cl->backend_decl);
+ }
+ else
+ tmp = TYPE_SIZE_UNIT (gfc_typenode_for_spec (&expr1->ts));
+ tmp = fold_convert (gfc_array_index_type, tmp);
+ size2 = fold_build2_loc (input_location, MULT_EXPR,
+ gfc_array_index_type,
+ tmp, size2);
+ size2 = fold_convert (size_type_node, size2);
+ size2 = gfc_evaluate_now (size2, &fblock);
+
+ /* Realloc expression. Note that the scalarizer uses desc.data
+ in the array reference - (*desc.data)[<element>]. */
+ gfc_init_block (&realloc_block);
+ tmp = build_call_expr_loc (input_location,
+ builtin_decl_explicit (BUILT_IN_REALLOC), 2,
+ fold_convert (pvoid_type_node, array1),
+ size2);
+ gfc_conv_descriptor_data_set (&realloc_block,
+ desc, tmp);
+ realloc_expr = gfc_finish_block (&realloc_block);
+
+ /* Only reallocate if sizes are different. */
+ tmp = build3_v (COND_EXPR, neq_size, realloc_expr,
+ build_empty_stmt (input_location));
+ realloc_expr = tmp;
+
+
+ /* Malloc expression. */
+ gfc_init_block (&alloc_block);
+ tmp = build_call_expr_loc (input_location,
+ builtin_decl_explicit (BUILT_IN_MALLOC),
+ 1, size2);
+ gfc_conv_descriptor_data_set (&alloc_block,
+ desc, tmp);
+ tmp = gfc_conv_descriptor_dtype (desc);
+ gfc_add_modify (&alloc_block, tmp, gfc_get_dtype (TREE_TYPE (desc)));
+ alloc_expr = gfc_finish_block (&alloc_block);
+
+ /* Malloc if not allocated; realloc otherwise. */
+ tmp = build_int_cst (TREE_TYPE (array1), 0);
+ cond = fold_build2_loc (input_location, EQ_EXPR,
+ boolean_type_node,
+ array1, tmp);
+ tmp = build3_v (COND_EXPR, cond, alloc_expr, realloc_expr);
+ gfc_add_expr_to_block (&fblock, tmp);
+
+ /* Make sure that the scalarizer data pointer is updated. */
+ if (lss->data.info.data
+ && TREE_CODE (lss->data.info.data) == VAR_DECL)
+ {
+ tmp = gfc_conv_descriptor_data_get (desc);
+ gfc_add_modify (&fblock, lss->data.info.data, tmp);
+ }
+
+ /* Add the exit label. */
+ tmp = build1_v (LABEL_EXPR, jump_label2);
+ gfc_add_expr_to_block (&fblock, tmp);
+
+ return gfc_finish_block (&fblock);
+}
+
+
/* NULLIFY an allocatable/pointer array on function entry, free it on exit.
Do likewise, recursively if necessary, with the allocatable components of
derived types. */
int rank;
bool sym_has_alloc_comp;
- sym_has_alloc_comp = (sym->ts.type == BT_DERIVED)
+ sym_has_alloc_comp = (sym->ts.type == BT_DERIVED
+ || sym->ts.type == BT_CLASS)
&& sym->ts.u.derived->attr.alloc_comp;
/* Make sure the frontend gets these right. */
if (!(sym->attr.pointer || sym->attr.allocatable || sym_has_alloc_comp))
- fatal_error ("Possible frontend bug: Deferred array size without pointer, "
+ fatal_error ("Possible front-end bug: Deferred array size without pointer, "
"allocatable attribute or derived type without allocatable "
"components.");
+ gfc_save_backend_locus (&loc);
+ gfc_set_backend_locus (&sym->declared_at);
gfc_init_block (&init);
gcc_assert (TREE_CODE (sym->backend_decl) == VAR_DECL
if (sym->attr.dummy || sym->attr.use_assoc || sym->attr.result)
{
gfc_add_init_cleanup (block, gfc_finish_block (&init), NULL_TREE);
+ gfc_restore_backend_locus (&loc);
return;
}
- gfc_get_backend_locus (&loc);
- gfc_set_backend_locus (&sym->declared_at);
descriptor = sym->backend_decl;
/* Although static, derived types with default initializers and
gfc_trans_static_array_pointer (sym);
gfc_add_init_cleanup (block, gfc_finish_block (&init), NULL_TREE);
+ gfc_restore_backend_locus (&loc);
return;
}
if (GFC_DESCRIPTOR_TYPE_P (type) && !sym->attr.save)
gfc_conv_descriptor_data_set (&init, descriptor, null_pointer_node);
+ gfc_restore_backend_locus (&loc);
gfc_init_block (&cleanup);
- gfc_set_backend_locus (&loc);
/* Allocatable arrays need to be freed when they go out of scope.
The allocatable components of pointers must not be touched. */
gfc_add_expr_to_block (&cleanup, tmp);
}
- if (sym->attr.allocatable && sym->attr.dimension
+ if (sym->attr.allocatable && (sym->attr.dimension || sym->attr.codimension)
&& !sym->attr.save && !sym->attr.result)
{
tmp = gfc_trans_dealloc_allocated (sym->backend_decl);
gfc_walk_variable_expr (gfc_ss * ss, gfc_expr * expr)
{
gfc_ref *ref;
- gfc_array_ref *ar;
- gfc_ss *newss;
- int n;
for (ref = expr->ref; ref; ref = ref->next)
if (ref->type == REF_ARRAY && ref->u.ar.type != AR_ELEMENT)
break;
+ return gfc_walk_array_ref (ss, expr, ref);
+}
+
+
+gfc_ss *
+gfc_walk_array_ref (gfc_ss * ss, gfc_expr * expr, gfc_ref * ref)
+{
+ gfc_array_ref *ar;
+ gfc_ss *newss;
+ int n;
+
for (; ref; ref = ref->next)
{
if (ref->type == REF_SUBSTRING)
{
- newss = gfc_get_ss ();
- newss->type = GFC_SS_SCALAR;
- newss->expr = ref->u.ss.start;
- newss->next = ss;
- ss = newss;
-
- newss = gfc_get_ss ();
- newss->type = GFC_SS_SCALAR;
- newss->expr = ref->u.ss.end;
- newss->next = ss;
- ss = newss;
+ ss = gfc_get_scalar_ss (ss, ref->u.ss.start);
+ ss = gfc_get_scalar_ss (ss, ref->u.ss.end);
}
/* We're only interested in array sections from now on. */
ar = &ref->u.ar;
- if (ar->as->rank == 0)
- {
- /* Scalar coarray. */
- continue;
- }
-
switch (ar->type)
{
case AR_ELEMENT:
- for (n = 0; n < ar->dimen; n++)
- {
- newss = gfc_get_ss ();
- newss->type = GFC_SS_SCALAR;
- newss->expr = ar->start[n];
- newss->next = ss;
- ss = newss;
- }
+ for (n = ar->dimen + ar->codimen - 1; n >= 0; n--)
+ ss = gfc_get_scalar_ss (ss, ar->start[n]);
break;
case AR_FULL:
- newss = gfc_get_ss ();
- newss->type = GFC_SS_SECTION;
- newss->expr = expr;
- newss->next = ss;
- newss->data.info.dimen = ar->as->rank;
+ newss = gfc_get_array_ss (ss, expr, ar->as->rank, GFC_SS_SECTION);
newss->data.info.ref = ref;
/* Make sure array is the same as array(:,:), this way
ar->dimen = ar->as->rank;
for (n = 0; n < ar->dimen; n++)
{
- newss->data.info.dim[n] = n;
ar->dimen_type[n] = DIMEN_RANGE;
gcc_assert (ar->start[n] == NULL);
break;
case AR_SECTION:
- newss = gfc_get_ss ();
- newss->type = GFC_SS_SECTION;
- newss->expr = expr;
- newss->next = ss;
- newss->data.info.dimen = 0;
+ newss = gfc_get_array_ss (ss, expr, 0, GFC_SS_SECTION);
newss->data.info.ref = ref;
- /* We add SS chains for all the subscripts in the section. */
+ /* We add SS chains for all the subscripts in the section. */
for (n = 0; n < ar->dimen; n++)
{
gfc_ss *indexss;
case DIMEN_ELEMENT:
/* Add SS for elemental (scalar) subscripts. */
gcc_assert (ar->start[n]);
- indexss = gfc_get_ss ();
- indexss->type = GFC_SS_SCALAR;
- indexss->expr = ar->start[n];
- indexss->next = gfc_ss_terminator;
+ indexss = gfc_get_scalar_ss (gfc_ss_terminator, ar->start[n]);
indexss->loop_chain = gfc_ss_terminator;
newss->data.info.subscript[n] = indexss;
break;
case DIMEN_VECTOR:
/* Create a GFC_SS_VECTOR index in which we can store
the vector's descriptor. */
- indexss = gfc_get_ss ();
- indexss->type = GFC_SS_VECTOR;
- indexss->expr = ar->start[n];
- indexss->next = gfc_ss_terminator;
+ indexss = gfc_get_array_ss (gfc_ss_terminator, ar->start[n],
+ 1, GFC_SS_VECTOR);
indexss->loop_chain = gfc_ss_terminator;
newss->data.info.subscript[n] = indexss;
newss->data.info.dim[newss->data.info.dimen] = n;
gcc_unreachable ();
}
}
- /* We should have at least one non-elemental dimension. */
- gcc_assert (newss->data.info.dimen > 0);
+ /* We should have at least one non-elemental dimension,
+ unless we are creating a descriptor for a (scalar) coarray. */
+ gcc_assert (newss->data.info.dimen > 0
+ || newss->data.info.ref->u.ar.as->corank > 0);
ss = newss;
break;
{
gfc_ss *head;
gfc_ss *head2;
- gfc_ss *newss;
head = gfc_walk_subexpr (ss, expr->value.op.op1);
if (expr->value.op.op2 == NULL)
/* One of the operands needs scalarization, the other is scalar.
Create a gfc_ss for the scalar expression. */
- newss = gfc_get_ss ();
- newss->type = GFC_SS_SCALAR;
if (head == ss)
{
/* First operand is scalar. We build the chain in reverse order, so
head = head->next;
/* Check we haven't somehow broken the chain. */
gcc_assert (head);
- newss->next = ss;
- head->next = newss;
- newss->expr = expr->value.op.op1;
+ head->next = gfc_get_scalar_ss (ss, expr->value.op.op1);
}
else /* head2 == head */
{
gcc_assert (head2 == head);
/* Second operand is scalar. */
- newss->next = head2;
- head2 = newss;
- newss->expr = expr->value.op.op2;
+ head2 = gfc_get_scalar_ss (head2, expr->value.op.op2);
}
return head2;
if (newss == head)
{
/* Scalar argument. */
- newss = gfc_get_ss ();
+ gcc_assert (type == GFC_SS_SCALAR || type == GFC_SS_REFERENCE);
+ newss = gfc_get_scalar_ss (head, arg->expr);
newss->type = type;
- newss->expr = arg->expr;
- newss->next = head;
}
else
scalar = 0;
static gfc_ss *
gfc_walk_function_expr (gfc_ss * ss, gfc_expr * expr)
{
- gfc_ss *newss;
gfc_intrinsic_sym *isym;
gfc_symbol *sym;
gfc_component *comp = NULL;
gfc_is_proc_ptr_comp (expr, &comp);
if ((!comp && gfc_return_by_reference (sym) && sym->result->attr.dimension)
|| (comp && comp->attr.dimension))
- {
- newss = gfc_get_ss ();
- newss->type = GFC_SS_FUNCTION;
- newss->expr = expr;
- newss->next = ss;
- newss->data.info.dimen = expr->rank;
- return newss;
- }
+ return gfc_get_array_ss (ss, expr, expr->rank, GFC_SS_FUNCTION);
/* Walk the parameters of an elemental function. For now we always pass
by reference. */
static gfc_ss *
gfc_walk_array_constructor (gfc_ss * ss, gfc_expr * expr)
{
- gfc_ss *newss;
- int n;
-
- newss = gfc_get_ss ();
- newss->type = GFC_SS_CONSTRUCTOR;
- newss->expr = expr;
- newss->next = ss;
- newss->data.info.dimen = expr->rank;
- for (n = 0; n < expr->rank; n++)
- newss->data.info.dim[n] = n;
-
- return newss;
+ return gfc_get_array_ss (ss, expr, expr->rank, GFC_SS_CONSTRUCTOR);
}
/* Walk an expression. Add walked expressions to the head of the SS chain.
A wholly scalar expression will not be added. */
-static gfc_ss *
+gfc_ss *
gfc_walk_subexpr (gfc_ss * ss, gfc_expr * expr)
{
gfc_ss *head;