/* Array translation routines
- Copyright (C) 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2009, 2010
+ Copyright (C) 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2009, 2010,
+ 2011
Free Software Foundation, Inc.
Contributed by Paul Brook <paul@nowt.org>
and Steven Bosscher <s.bosscher@student.tudelft.nl>
#include "system.h"
#include "coretypes.h"
#include "tree.h"
+#include "gimple.h"
#include "diagnostic-core.h" /* For internal_error/fatal_error. */
#include "flags.h"
#include "gfortran.h"
#include "trans-const.h"
#include "dependency.h"
-static gfc_ss *gfc_walk_subexpr (gfc_ss *, gfc_expr *);
static bool gfc_get_array_constructor_size (mpz_t *, gfc_constructor_base);
/* The contents of this structure aren't actually used, just the address. */
#define OFFSET_FIELD 1
#define DTYPE_FIELD 2
#define DIMENSION_FIELD 3
+#define CAF_TOKEN_FIELD 4
#define STRIDE_SUBFIELD 0
#define LBOUND_SUBFIELD 1
return tmp;
}
+
+tree
+gfc_conv_descriptor_token (tree desc)
+{
+ tree type;
+ tree field;
+
+ type = TREE_TYPE (desc);
+ gcc_assert (GFC_DESCRIPTOR_TYPE_P (type));
+ gcc_assert (GFC_TYPE_ARRAY_AKIND (type) == GFC_ARRAY_ALLOCATABLE);
+ gcc_assert (gfc_option.coarray == GFC_FCOARRAY_LIB);
+ field = gfc_advance_chain (TYPE_FIELDS (type), CAF_TOKEN_FIELD);
+ gcc_assert (field != NULL_TREE && TREE_TYPE (field) == prvoid_type_node);
+
+ return fold_build3_loc (input_location, COMPONENT_REF, TREE_TYPE (field),
+ desc, field, NULL_TREE);
+}
+
+
static tree
gfc_conv_descriptor_stride (tree desc, tree dim)
{
#undef OFFSET_FIELD
#undef DTYPE_FIELD
#undef DIMENSION_FIELD
+#undef CAF_TOKEN_FIELD
#undef STRIDE_SUBFIELD
#undef LBOUND_SUBFIELD
#undef UBOUND_SUBFIELD
/* Free a gfc_ss chain. */
-static void
+void
gfc_free_ss_chain (gfc_ss * ss)
{
gfc_ss *next;
break;
}
- gfc_free (ss);
+ free (ss);
+}
+
+
+/* Creates and initializes an array type gfc_ss struct. */
+
+gfc_ss *
+gfc_get_array_ss (gfc_ss *next, gfc_expr *expr, int dimen, gfc_ss_type type)
+{
+ gfc_ss *ss;
+ gfc_ss_info *info;
+ int i;
+
+ ss = gfc_get_ss ();
+ ss->next = next;
+ ss->type = type;
+ ss->expr = expr;
+ info = &ss->data.info;
+ info->dimen = dimen;
+ for (i = 0; i < info->dimen; i++)
+ info->dim[i] = i;
+
+ return ss;
+}
+
+
+/* Creates and initializes a temporary type gfc_ss struct. */
+
+gfc_ss *
+gfc_get_temp_ss (tree type, tree string_length, int dimen)
+{
+ gfc_ss *ss;
+
+ ss = gfc_get_ss ();
+ ss->next = gfc_ss_terminator;
+ ss->type = GFC_SS_TEMP;
+ ss->string_length = string_length;
+ ss->data.temp.dimen = dimen;
+ ss->data.temp.type = type;
+
+ return ss;
+}
+
+
+/* Creates and initializes a scalar type gfc_ss struct. */
+
+gfc_ss *
+gfc_get_scalar_ss (gfc_ss *next, gfc_expr *expr)
+{
+ gfc_ss *ss;
+
+ ss = gfc_get_ss ();
+ ss->next = next;
+ ss->type = GFC_SS_SCALAR;
+ ss->expr = expr;
+
+ return ss;
}
{
/* Allocate the temporary. */
onstack = !dynamic && initial == NULL_TREE
- && gfc_can_put_var_on_stack (size);
+ && (gfc_option.flag_stack_arrays
+ || gfc_can_put_var_on_stack (size));
if (onstack)
{
/* Make a temporary variable to hold the data. */
tmp = fold_build2_loc (input_location, MINUS_EXPR, TREE_TYPE (nelem),
nelem, gfc_index_one_node);
+ tmp = gfc_evaluate_now (tmp, pre);
tmp = build_range_type (gfc_array_index_type, gfc_index_zero_node,
tmp);
tmp = build_array_type (gfc_get_element_type (TREE_TYPE (desc)),
tmp);
tmp = gfc_create_var (tmp, "A");
+ /* If we're here only because of -fstack-arrays we have to
+ emit a DECL_EXPR to make the gimplifier emit alloca calls. */
+ if (!gfc_can_put_var_on_stack (size))
+ gfc_add_expr_to_block (pre,
+ fold_build1_loc (input_location,
+ DECL_EXPR, TREE_TYPE (tmp),
+ tmp));
tmp = gfc_build_addr_expr (NULL_TREE, tmp);
gfc_conv_descriptor_data_set (pre, desc, tmp);
}
array_dim = info->dim[loop_dim];
for (n = 0; n < info->dimen; n++)
- if (n != loop_dim && info->dim[n] < array_dim)
+ if (info->dim[n] < array_dim)
array_ref_dim++;
return array_ref_dim;
of the descriptor fields. */
tmp = fold_build2_loc (input_location,
MINUS_EXPR, gfc_array_index_type,
- gfc_conv_descriptor_ubound_get (desc, gfc_rank_cst[n]),
- gfc_conv_descriptor_lbound_get (desc, gfc_rank_cst[n]));
+ gfc_conv_descriptor_ubound_get (desc, gfc_rank_cst[dim]),
+ gfc_conv_descriptor_lbound_get (desc, gfc_rank_cst[dim]));
loop->to[n] = tmp;
continue;
}
}
-/* Generate code to transpose array EXPR by creating a new descriptor
- in which the dimension specifications have been reversed. */
-
-void
-gfc_conv_array_transpose (gfc_se * se, gfc_expr * expr)
-{
- tree dest, src, dest_index, src_index;
- gfc_loopinfo *loop;
- gfc_ss_info *dest_info;
- gfc_ss *dest_ss, *src_ss;
- gfc_se src_se;
- int n;
-
- loop = se->loop;
-
- src_ss = gfc_walk_expr (expr);
- dest_ss = se->ss;
-
- dest_info = &dest_ss->data.info;
- gcc_assert (dest_info->dimen == 2);
-
- /* Get a descriptor for EXPR. */
- gfc_init_se (&src_se, NULL);
- gfc_conv_expr_descriptor (&src_se, expr, src_ss);
- gfc_add_block_to_block (&se->pre, &src_se.pre);
- gfc_add_block_to_block (&se->post, &src_se.post);
- src = src_se.expr;
-
- /* Allocate a new descriptor for the return value. */
- dest = gfc_create_var (TREE_TYPE (src), "transp");
- dest_info->descriptor = dest;
- se->expr = dest;
-
- /* Copy across the dtype field. */
- gfc_add_modify (&se->pre,
- gfc_conv_descriptor_dtype (dest),
- gfc_conv_descriptor_dtype (src));
-
- /* Copy the dimension information, renumbering dimension 1 to 0 and
- 0 to 1. */
- for (n = 0; n < 2; n++)
- {
- dest_info->delta[n] = gfc_index_zero_node;
- dest_info->start[n] = gfc_index_zero_node;
- dest_info->end[n] = gfc_index_zero_node;
- dest_info->stride[n] = gfc_index_one_node;
- dest_info->dim[n] = n;
-
- dest_index = gfc_rank_cst[n];
- src_index = gfc_rank_cst[1 - n];
-
- gfc_conv_descriptor_stride_set (&se->pre, dest, dest_index,
- gfc_conv_descriptor_stride_get (src, src_index));
-
- gfc_conv_descriptor_lbound_set (&se->pre, dest, dest_index,
- gfc_conv_descriptor_lbound_get (src, src_index));
-
- gfc_conv_descriptor_ubound_set (&se->pre, dest, dest_index,
- gfc_conv_descriptor_ubound_get (src, src_index));
-
- if (!loop->to[n])
- {
- gcc_assert (integer_zerop (loop->from[n]));
- loop->to[n] =
- fold_build2_loc (input_location, MINUS_EXPR, gfc_array_index_type,
- gfc_conv_descriptor_ubound_get (dest, dest_index),
- gfc_conv_descriptor_lbound_get (dest, dest_index));
- }
- }
-
- /* Copy the data pointer. */
- dest_info->data = gfc_conv_descriptor_data_get (src);
- gfc_conv_descriptor_data_set (&se->pre, dest, dest_info->data);
-
- /* Copy the offset. This is not changed by transposition; the top-left
- element is still at the same offset as before, except where the loop
- starts at zero. */
- if (!integer_zerop (loop->from[0]))
- dest_info->offset = gfc_conv_descriptor_offset_get (src);
- else
- dest_info->offset = gfc_index_zero_node;
-
- gfc_conv_descriptor_offset_set (&se->pre, dest,
- dest_info->offset);
-
- if (dest_info->dimen > loop->temp_dim)
- loop->temp_dim = dest_info->dimen;
-}
-
-
/* Return the number of iterations in a loop that starts at START,
ends at END, and has step STEP. */
p = gfc_constructor_next (p);
}
- bound = build_int_cst (NULL_TREE, n - 1);
+ bound = size_int (n - 1);
/* Create an array type to hold them. */
tmptype = build_range_type (gfc_array_index_type,
gfc_index_zero_node, bound);
init = gfc_build_addr_expr (NULL_TREE, init);
size = TREE_INT_CST_LOW (TYPE_SIZE_UNIT (type));
- bound = build_int_cst (NULL_TREE, n * size);
+ bound = build_int_cst (size_type_node, n * size);
tmp = build_call_expr_loc (input_location,
- built_in_decls[BUILT_IN_MEMCPY], 3,
- tmp, init, bound);
+ builtin_decl_explicit (BUILT_IN_MEMCPY),
+ 3, tmp, init, bound);
gfc_add_expr_to_block (&body, tmp);
*poffset = fold_build2_loc (input_location, PLUS_EXPR,
}
+/* A catch-all to obtain the string length for anything that is not a
+ a substring of non-constant length, a constant, array or variable. */
+
+static void
+get_array_ctor_all_strlen (stmtblock_t *block, gfc_expr *e, tree *len)
+{
+ gfc_se se;
+ gfc_ss *ss;
+
+ /* Don't bother if we already know the length is a constant. */
+ if (*len && INTEGER_CST_P (*len))
+ return;
+
+ if (!e->ref && e->ts.u.cl && e->ts.u.cl->length
+ && e->ts.u.cl->length->expr_type == EXPR_CONSTANT)
+ {
+ /* This is easy. */
+ gfc_conv_const_charlen (e->ts.u.cl);
+ *len = e->ts.u.cl->backend_decl;
+ }
+ else
+ {
+ /* Otherwise, be brutal even if inefficient. */
+ ss = gfc_walk_expr (e);
+ gfc_init_se (&se, NULL);
+
+ /* No function call, in case of side effects. */
+ se.no_function_call = 1;
+ if (ss == gfc_ss_terminator)
+ gfc_conv_expr (&se, e);
+ else
+ gfc_conv_expr_descriptor (&se, e, ss);
+
+ /* Fix the value. */
+ *len = gfc_evaluate_now (se.string_length, &se.pre);
+
+ gfc_add_block_to_block (block, &se.pre);
+ gfc_add_block_to_block (block, &se.post);
+
+ e->ts.u.cl->backend_decl = *len;
+ }
+}
+
+
/* Figure out the string length of a variable reference expression.
Used by get_array_ctor_strlen. */
static void
-get_array_ctor_var_strlen (gfc_expr * expr, tree * len)
+get_array_ctor_var_strlen (stmtblock_t *block, gfc_expr * expr, tree * len)
{
gfc_ref *ref;
gfc_typespec *ts;
case REF_SUBSTRING:
if (ref->u.ss.start->expr_type != EXPR_CONSTANT
|| ref->u.ss.end->expr_type != EXPR_CONSTANT)
- break;
+ {
+ /* Note that this might evaluate expr. */
+ get_array_ctor_all_strlen (block, expr, len);
+ return;
+ }
mpz_init_set_ui (char_len, 1);
mpz_add (char_len, char_len, ref->u.ss.end->value.integer);
mpz_sub (char_len, char_len, ref->u.ss.start->value.integer);
return;
default:
- /* TODO: Substrings are tricky because we can't evaluate the
- expression more than once. For now we just give up, and hope
- we can figure it out elsewhere. */
- return;
+ gcc_unreachable ();
}
}
}
-/* A catch-all to obtain the string length for anything that is not a
- constant, array or variable. */
-static void
-get_array_ctor_all_strlen (stmtblock_t *block, gfc_expr *e, tree *len)
-{
- gfc_se se;
- gfc_ss *ss;
-
- /* Don't bother if we already know the length is a constant. */
- if (*len && INTEGER_CST_P (*len))
- return;
-
- if (!e->ref && e->ts.u.cl && e->ts.u.cl->length
- && e->ts.u.cl->length->expr_type == EXPR_CONSTANT)
- {
- /* This is easy. */
- gfc_conv_const_charlen (e->ts.u.cl);
- *len = e->ts.u.cl->backend_decl;
- }
- else
- {
- /* Otherwise, be brutal even if inefficient. */
- ss = gfc_walk_expr (e);
- gfc_init_se (&se, NULL);
-
- /* No function call, in case of side effects. */
- se.no_function_call = 1;
- if (ss == gfc_ss_terminator)
- gfc_conv_expr (&se, e);
- else
- gfc_conv_expr_descriptor (&se, e, ss);
-
- /* Fix the value. */
- *len = gfc_evaluate_now (se.string_length, &se.pre);
-
- gfc_add_block_to_block (block, &se.pre);
- gfc_add_block_to_block (block, &se.post);
-
- e->ts.u.cl->backend_decl = *len;
- }
-}
-
-
/* Figure out the string length of a character array constructor.
If len is NULL, don't calculate the length; this happens for recursive calls
when a sub-array-constructor is an element but not at the first position,
case EXPR_VARIABLE:
is_const = false;
if (len)
- get_array_ctor_var_strlen (c->expr, len);
+ get_array_ctor_var_strlen (block, c->expr, len);
break;
default:
tmptype = gfc_get_nodesc_array_type (type, &as, PACKED_STATIC, true);
+ /* as is not needed anymore. */
+ for (i = 0; i < as.rank + as.corank; i++)
+ {
+ gfc_free_expr (as.lower[i]);
+ gfc_free_expr (as.upper[i]);
+ }
+
init = build_constructor (tmptype, v);
TREE_CONSTANT (init) = 1;
info->start[i] = gfc_index_zero_node;
info->end[i] = gfc_index_zero_node;
info->stride[i] = gfc_index_one_node;
- info->dim[i] = i;
}
if (info->dimen > loop->temp_dim)
tree offsetvar;
tree desc;
tree type;
+ tree tmp;
bool dynamic;
bool old_first_len, old_typespec_chararray_ctor;
tree old_first_len_val;
first_len = true;
}
- ss->data.info.dimen = loop->dimen;
+ gcc_assert (ss->data.info.dimen == loop->dimen);
c = ss->expr->value.constructor;
if (ss->expr->ts.type == BT_CHARACTER)
}
}
+ if (TREE_CODE (loop->to[0]) == VAR_DECL)
+ dynamic = true;
+
gfc_trans_create_temp_array (&loop->pre, &loop->post, loop, &ss->data.info,
type, NULL_TREE, dynamic, true, false, where);
/* If the array grows dynamically, the upper bound of the loop variable
is determined by the array's final upper bound. */
if (dynamic)
- loop->to[0] = gfc_conv_descriptor_ubound_get (desc, gfc_rank_cst[0]);
+ {
+ tmp = fold_build2_loc (input_location, MINUS_EXPR,
+ gfc_array_index_type,
+ offsetvar, gfc_index_one_node);
+ tmp = gfc_evaluate_now (tmp, &loop->pre);
+ gfc_conv_descriptor_ubound_set (&loop->pre, desc, gfc_rank_cst[0], tmp);
+ if (loop->to[0] && TREE_CODE (loop->to[0]) == VAR_DECL)
+ gfc_add_modify (&loop->pre, loop->to[0], tmp);
+ else
+ loop->to[0] = tmp;
+ }
if (TREE_USED (offsetvar))
pushdecl (offsetvar);
else
gcc_assert (INTEGER_CST_P (offset));
+
#if 0
/* Disable bound checking for now because it's probably broken. */
if (gfc_option.rtcheck & GFC_RTCHECK_BOUNDS)
tmp = gfc_conv_array_offset (se.expr);
ss->data.info.offset = gfc_evaluate_now (tmp, block);
+
+ /* Make absolutely sure that the saved_offset is indeed saved
+ so that the variable is still accessible after the loops
+ are translated. */
+ ss->data.info.saved_offset = ss->data.info.offset;
}
}
for (n = 0; n < GFC_MAX_DIMENSIONS; n++)
{
loop->order[n] = n;
- loop->reverse[n] = GFC_CANNOT_REVERSE;
+ loop->reverse[n] = GFC_INHIBIT_REVERSE;
}
loop->ss = gfc_ss_terminator;
fold_convert (long_integer_type_node, index),
fold_convert (long_integer_type_node, tmp_lo),
fold_convert (long_integer_type_node, tmp_up));
- gfc_free (msg);
+ free (msg);
}
else
{
gfc_trans_runtime_check (true, false, fault, &se->pre, where, msg,
fold_convert (long_integer_type_node, index),
fold_convert (long_integer_type_node, tmp_lo));
- gfc_free (msg);
+ free (msg);
}
return index;
gcc_assert (ar->type != AR_ELEMENT);
switch (ar->dimen_type[dim])
{
+ case DIMEN_THIS_IMAGE:
+ gcc_unreachable ();
+ break;
case DIMEN_ELEMENT:
/* Elemental dimension. */
gcc_assert (info->subscript[dim]
/* Temporary array or derived type component. */
gcc_assert (se->loop);
index = se->loop->loopvar[se->loop->order[i]];
+
+ /* Pointer functions can have stride[0] different from unity.
+ Use the stride returned by the function call and stored in
+ the descriptor for the temporary. */
+ if (se->ss && se->ss->type == GFC_SS_FUNCTION
+ && se->ss->expr
+ && se->ss->expr->symtree
+ && se->ss->expr->symtree->n.sym->result
+ && se->ss->expr->symtree->n.sym->result->attr.pointer)
+ stride = gfc_conv_descriptor_stride_get (info->descriptor,
+ gfc_rank_cst[dim]);
+
if (!integer_zerop (info->delta[dim]))
index = fold_build2_loc (input_location, PLUS_EXPR,
gfc_array_index_type, index, info->delta[dim]);
{
se->string_length = se->ss->string_length;
gfc_conv_scalarized_array_ref (se, NULL);
+ gfc_advance_se_ss_chain (se);
}
+/* Add T to the offset pair *OFFSET, *CST_OFFSET. */
+
+static void
+add_to_offset (tree *cst_offset, tree *offset, tree t)
+{
+ if (TREE_CODE (t) == INTEGER_CST)
+ *cst_offset = int_const_binop (PLUS_EXPR, *cst_offset, t);
+ else
+ {
+ if (!integer_zerop (*offset))
+ *offset = fold_build2_loc (input_location, PLUS_EXPR,
+ gfc_array_index_type, *offset, t);
+ else
+ *offset = t;
+ }
+}
/* Build an array reference. se->expr already holds the array descriptor.
This should be either a variable, indirect variable reference or component
locus * where)
{
int n;
- tree index;
+ tree offset, cst_offset;
tree tmp;
tree stride;
gfc_se indexse;
gfc_se tmpse;
if (ar->dimen == 0)
- return;
+ {
+ gcc_assert (ar->codimen);
+
+ if (GFC_DESCRIPTOR_TYPE_P (TREE_TYPE (se->expr)))
+ se->expr = build_fold_indirect_ref (gfc_conv_array_data (se->expr));
+ else
+ {
+ if (GFC_ARRAY_TYPE_P (TREE_TYPE (se->expr))
+ && TREE_CODE (TREE_TYPE (se->expr)) == POINTER_TYPE)
+ se->expr = build_fold_indirect_ref_loc (input_location, se->expr);
+
+ /* Use the actual tree type and not the wrapped coarray. */
+ if (!se->want_pointer)
+ se->expr = fold_convert (TYPE_MAIN_VARIANT (TREE_TYPE (se->expr)),
+ se->expr);
+ }
+
+ return;
+ }
/* Handle scalarized references separately. */
if (ar->type != AR_ELEMENT)
return;
}
- index = gfc_index_zero_node;
+ cst_offset = offset = gfc_index_zero_node;
+ add_to_offset (&cst_offset, &offset, gfc_conv_array_offset (se->expr));
- /* Calculate the offsets from all the dimensions. */
- for (n = 0; n < ar->dimen; n++)
+ /* Calculate the offsets from all the dimensions. Make sure to associate
+ the final offset so that we form a chain of loop invariant summands. */
+ for (n = ar->dimen - 1; n >= 0; n--)
{
/* Calculate the index for this dimension. */
gfc_init_se (&indexse, se);
fold_convert (long_integer_type_node,
indexse.expr),
fold_convert (long_integer_type_node, tmp));
- gfc_free (msg);
+ free (msg);
/* Upper bound, but not for the last dimension of assumed-size
arrays. */
fold_convert (long_integer_type_node,
indexse.expr),
fold_convert (long_integer_type_node, tmp));
- gfc_free (msg);
+ free (msg);
}
}
indexse.expr, stride);
/* And add it to the total. */
- index = fold_build2_loc (input_location, PLUS_EXPR,
- gfc_array_index_type, index, tmp);
+ add_to_offset (&cst_offset, &offset, tmp);
}
- tmp = gfc_conv_array_offset (se->expr);
- if (!integer_zerop (tmp))
- index = fold_build2_loc (input_location, PLUS_EXPR,
- gfc_array_index_type, index, tmp);
+ if (!integer_zerop (cst_offset))
+ offset = fold_build2_loc (input_location, PLUS_EXPR,
+ gfc_array_index_type, offset, cst_offset);
/* Access the calculated element. */
tmp = gfc_conv_array_data (se->expr);
tmp = build_fold_indirect_ref (tmp);
- se->expr = gfc_build_array_ref (tmp, index, sym->backend_decl);
+ se->expr = gfc_build_array_ref (tmp, offset, sym->backend_decl);
+}
+
+
+/* Add the offset corresponding to array's ARRAY_DIM dimension and loop's
+ LOOP_DIM dimension (if any) to array's offset. */
+
+static void
+add_array_offset (stmtblock_t *pblock, gfc_loopinfo *loop, gfc_ss *ss,
+ gfc_array_ref *ar, int array_dim, int loop_dim)
+{
+ gfc_se se;
+ gfc_ss_info *info;
+ tree stride, index;
+
+ info = &ss->data.info;
+
+ gfc_init_se (&se, NULL);
+ se.loop = loop;
+ se.expr = info->descriptor;
+ stride = gfc_conv_array_stride (info->descriptor, array_dim);
+ index = gfc_conv_array_index_offset (&se, info, array_dim, loop_dim, ar,
+ stride);
+ gfc_add_block_to_block (pblock, &se.pre);
+
+ info->offset = fold_build2_loc (input_location, PLUS_EXPR,
+ gfc_array_index_type,
+ info->offset, index);
+ info->offset = gfc_evaluate_now (info->offset, pblock);
}
gfc_trans_preloop_setup (gfc_loopinfo * loop, int dim, int flag,
stmtblock_t * pblock)
{
- tree index;
tree stride;
gfc_ss_info *info;
gfc_ss *ss;
- gfc_se se;
+ gfc_array_ref *ar;
int i;
/* This code will be executed before entering the scalarization loop
if (dim >= info->dimen)
continue;
+ gcc_assert (info->dimen == loop->dimen);
- if (dim == info->dimen - 1)
- {
- /* For the outermost loop calculate the offset due to any
- elemental dimensions. It will have been initialized with the
- base offset of the array. */
- if (info->ref)
- {
- for (i = 0; i < info->ref->u.ar.dimen; i++)
- {
- if (info->ref->u.ar.dimen_type[i] != DIMEN_ELEMENT)
- continue;
+ if (info->ref)
+ ar = &info->ref->u.ar;
+ else
+ ar = NULL;
- gfc_init_se (&se, NULL);
- se.loop = loop;
- se.expr = info->descriptor;
- stride = gfc_conv_array_stride (info->descriptor, i);
- index = gfc_conv_array_index_offset (&se, info, i, -1,
- &info->ref->u.ar,
- stride);
- gfc_add_block_to_block (pblock, &se.pre);
-
- info->offset = fold_build2_loc (input_location, PLUS_EXPR,
- gfc_array_index_type,
- info->offset, index);
- info->offset = gfc_evaluate_now (info->offset, pblock);
- }
- }
+ if (dim == loop->dimen - 1)
+ i = 0;
+ else
+ i = dim + 1;
- i = loop->order[0];
- /* For the time being, the innermost loop is unconditionally on
- the first dimension of the scalarization loop. */
- gcc_assert (i == 0);
+ /* For the time being, there is no loop reordering. */
+ gcc_assert (i == loop->order[i]);
+ i = loop->order[i];
+
+ if (dim == loop->dimen - 1)
+ {
stride = gfc_conv_array_stride (info->descriptor, info->dim[i]);
/* Calculate the stride of the innermost loop. Hopefully this will
allow the backend optimizers to do their stuff more effectively.
*/
info->stride0 = gfc_evaluate_now (stride, pblock);
- }
- else
- {
- /* Add the offset for the previous loop dimension. */
- gfc_array_ref *ar;
+ /* For the outermost loop calculate the offset due to any
+ elemental dimensions. It will have been initialized with the
+ base offset of the array. */
if (info->ref)
{
- ar = &info->ref->u.ar;
- i = loop->order[dim + 1];
- }
- else
- {
- ar = NULL;
- i = dim + 1;
- }
+ for (i = 0; i < ar->dimen; i++)
+ {
+ if (ar->dimen_type[i] != DIMEN_ELEMENT)
+ continue;
- gfc_init_se (&se, NULL);
- se.loop = loop;
- se.expr = info->descriptor;
- stride = gfc_conv_array_stride (info->descriptor, info->dim[i]);
- index = gfc_conv_array_index_offset (&se, info, info->dim[i], i,
- ar, stride);
- gfc_add_block_to_block (pblock, &se.pre);
- info->offset = fold_build2_loc (input_location, PLUS_EXPR,
- gfc_array_index_type, info->offset,
- index);
- info->offset = gfc_evaluate_now (info->offset, pblock);
+ add_array_offset (pblock, loop, ss, ar, i, /* unused */ -1);
+ }
+ }
}
+ else
+ /* Add the offset for the previous loop dimension. */
+ add_array_offset (pblock, loop, ss, ar, info->dim[i], i);
/* Remember this offset for the second loop. */
if (dim == loop->temp_dim - 1)
}
-/* Calculate the lower bound of an array section. */
+/* Precalculate (either lower or upper) bound of an array section.
+ BLOCK: Block in which the (pre)calculation code will go.
+ BOUNDS[DIM]: Where the bound value will be stored once evaluated.
+ VALUES[DIM]: Specified bound (NULL <=> unspecified).
+ DESC: Array descriptor from which the bound will be picked if unspecified
+ (either lower or upper bound according to LBOUND). */
static void
-gfc_conv_section_startstride (gfc_loopinfo * loop, gfc_ss * ss, int dim)
+evaluate_bound (stmtblock_t *block, tree *bounds, gfc_expr ** values,
+ tree desc, int dim, bool lbound)
{
- gfc_expr *start;
- gfc_expr *end;
- gfc_expr *stride;
- tree desc;
gfc_se se;
- gfc_ss_info *info;
-
- gcc_assert (ss->type == GFC_SS_SECTION);
+ gfc_expr * input_val = values[dim];
+ tree *output = &bounds[dim];
- info = &ss->data.info;
- if (info->ref->u.ar.dimen_type[dim] == DIMEN_VECTOR)
+ if (input_val)
+ {
+ /* Specified section bound. */
+ gfc_init_se (&se, NULL);
+ gfc_conv_expr_type (&se, input_val, gfc_array_index_type);
+ gfc_add_block_to_block (block, &se.pre);
+ *output = se.expr;
+ }
+ else
+ {
+ /* No specific bound specified so use the bound of the array. */
+ *output = lbound ? gfc_conv_array_lbound (desc, dim) :
+ gfc_conv_array_ubound (desc, dim);
+ }
+ *output = gfc_evaluate_now (*output, block);
+}
+
+
+/* Calculate the lower bound of an array section. */
+
+static void
+gfc_conv_section_startstride (gfc_loopinfo * loop, gfc_ss * ss, int dim)
+{
+ gfc_expr *stride = NULL;
+ tree desc;
+ gfc_se se;
+ gfc_ss_info *info;
+ gfc_array_ref *ar;
+
+ gcc_assert (ss->type == GFC_SS_SECTION);
+
+ info = &ss->data.info;
+ ar = &info->ref->u.ar;
+
+ if (ar->dimen_type[dim] == DIMEN_VECTOR)
{
/* We use a zero-based index to access the vector. */
info->start[dim] = gfc_index_zero_node;
- info->stride[dim] = gfc_index_one_node;
info->end[dim] = NULL;
+ info->stride[dim] = gfc_index_one_node;
return;
}
- gcc_assert (info->ref->u.ar.dimen_type[dim] == DIMEN_RANGE);
+ gcc_assert (ar->dimen_type[dim] == DIMEN_RANGE
+ || ar->dimen_type[dim] == DIMEN_THIS_IMAGE);
desc = info->descriptor;
- start = info->ref->u.ar.start[dim];
- end = info->ref->u.ar.end[dim];
- stride = info->ref->u.ar.stride[dim];
+ stride = ar->stride[dim];
/* Calculate the start of the range. For vector subscripts this will
be the range of the vector. */
- if (start)
- {
- /* Specified section start. */
- gfc_init_se (&se, NULL);
- gfc_conv_expr_type (&se, start, gfc_array_index_type);
- gfc_add_block_to_block (&loop->pre, &se.pre);
- info->start[dim] = se.expr;
- }
- else
- {
- /* No lower bound specified so use the bound of the array. */
- info->start[dim] = gfc_conv_array_lbound (desc, dim);
- }
- info->start[dim] = gfc_evaluate_now (info->start[dim], &loop->pre);
+ evaluate_bound (&loop->pre, info->start, ar->start, desc, dim, true);
/* Similarly calculate the end. Although this is not used in the
scalarizer, it is needed when checking bounds and where the end
is an expression with side-effects. */
- if (end)
- {
- /* Specified section start. */
- gfc_init_se (&se, NULL);
- gfc_conv_expr_type (&se, end, gfc_array_index_type);
- gfc_add_block_to_block (&loop->pre, &se.pre);
- info->end[dim] = se.expr;
- }
- else
- {
- /* No upper bound specified so use the bound of the array. */
- info->end[dim] = gfc_conv_array_ubound (desc, dim);
- }
- info->end[dim] = gfc_evaluate_now (info->end[dim], &loop->pre);
+ evaluate_bound (&loop->pre, info->end, ar->end, desc, dim, false);
/* Calculate the stride. */
if (stride == NULL)
loop->dimen = 0;
/* Determine the rank of the loop. */
- for (ss = loop->ss;
- ss != gfc_ss_terminator && loop->dimen == 0; ss = ss->loop_chain)
+ for (ss = loop->ss; ss != gfc_ss_terminator; ss = ss->loop_chain)
{
switch (ss->type)
{
case GFC_SS_FUNCTION:
case GFC_SS_COMPONENT:
loop->dimen = ss->data.info.dimen;
- break;
+ goto done;
/* As usual, lbound and ubound are exceptions!. */
case GFC_SS_INTRINSIC:
{
case GFC_ISYM_LBOUND:
case GFC_ISYM_UBOUND:
+ case GFC_ISYM_LCOBOUND:
+ case GFC_ISYM_UCOBOUND:
+ case GFC_ISYM_THIS_IMAGE:
loop->dimen = ss->data.info.dimen;
+ goto done;
default:
break;
/* We should have determined the rank of the expression by now. If
not, that's bad news. */
- gcc_assert (loop->dimen != 0);
+ gcc_unreachable ();
+done:
/* Loop over all the SS in the chain. */
for (ss = loop->ss; ss != gfc_ss_terminator; ss = ss->loop_chain)
{
/* Fall through to supply start and stride. */
case GFC_ISYM_LBOUND:
case GFC_ISYM_UBOUND:
+ case GFC_ISYM_LCOBOUND:
+ case GFC_ISYM_UCOBOUND:
+ case GFC_ISYM_THIS_IMAGE:
break;
+
default:
continue;
}
if (ss->type != GFC_SS_SECTION)
continue;
+ /* Catch allocatable lhs in f2003. */
+ if (gfc_option.flag_realloc_lhs && ss->is_alloc_lhs)
+ continue;
+
gfc_start_block (&inner);
/* TODO: range checking for mapped dimensions. */
"of array '%s'", dim + 1, ss->expr->symtree->name);
gfc_trans_runtime_check (true, false, tmp, &inner,
&ss->expr->where, msg);
- gfc_free (msg);
+ free (msg);
desc = ss->data.info.descriptor;
fold_convert (long_integer_type_node, info->start[dim]),
fold_convert (long_integer_type_node, lbound),
fold_convert (long_integer_type_node, ubound));
- gfc_free (msg);
+ free (msg);
}
else
{
&ss->expr->where, msg,
fold_convert (long_integer_type_node, info->start[dim]),
fold_convert (long_integer_type_node, lbound));
- gfc_free (msg);
+ free (msg);
}
/* Compute the last element of the range, which is not
fold_convert (long_integer_type_node, tmp),
fold_convert (long_integer_type_node, ubound),
fold_convert (long_integer_type_node, lbound));
- gfc_free (msg);
+ free (msg);
}
else
{
&ss->expr->where, msg,
fold_convert (long_integer_type_node, tmp),
fold_convert (long_integer_type_node, lbound));
- gfc_free (msg);
+ free (msg);
}
/* Check the section sizes match. */
fold_convert (long_integer_type_node, tmp),
fold_convert (long_integer_type_node, size[n]));
- gfc_free (msg);
+ free (msg);
}
else
size[n] = gfc_evaluate_now (tmp, &inner);
}
}
+/* Return true if both symbols could refer to the same data object. Does
+ not take account of aliasing due to equivalence statements. */
+
+static int
+symbols_could_alias (gfc_symbol *lsym, gfc_symbol *rsym, bool lsym_pointer,
+ bool lsym_target, bool rsym_pointer, bool rsym_target)
+{
+ /* Aliasing isn't possible if the symbols have different base types. */
+ if (gfc_compare_types (&lsym->ts, &rsym->ts) == 0)
+ return 0;
+
+ /* Pointers can point to other pointers and target objects. */
+
+ if ((lsym_pointer && (rsym_pointer || rsym_target))
+ || (rsym_pointer && (lsym_pointer || lsym_target)))
+ return 1;
+
+ /* Special case: Argument association, cf. F90 12.4.1.6, F2003 12.4.1.7
+ and F2008 12.5.2.13 items 3b and 4b. The pointer case (a) is already
+ checked above. */
+ if (lsym_target && rsym_target
+ && ((lsym->attr.dummy && !lsym->attr.contiguous
+ && (!lsym->attr.dimension || lsym->as->type == AS_ASSUMED_SHAPE))
+ || (rsym->attr.dummy && !rsym->attr.contiguous
+ && (!rsym->attr.dimension
+ || rsym->as->type == AS_ASSUMED_SHAPE))))
+ return 1;
+
+ return 0;
+}
+
/* Return true if the two SS could be aliased, i.e. both point to the same data
object. */
gfc_ref *rref;
gfc_symbol *lsym;
gfc_symbol *rsym;
+ bool lsym_pointer, lsym_target, rsym_pointer, rsym_target;
lsym = lss->expr->symtree->n.sym;
rsym = rss->expr->symtree->n.sym;
- if (gfc_symbols_could_alias (lsym, rsym))
+
+ lsym_pointer = lsym->attr.pointer;
+ lsym_target = lsym->attr.target;
+ rsym_pointer = rsym->attr.pointer;
+ rsym_target = rsym->attr.target;
+
+ if (symbols_could_alias (lsym, rsym, lsym_pointer, lsym_target,
+ rsym_pointer, rsym_target))
return 1;
- if (rsym->ts.type != BT_DERIVED
- && lsym->ts.type != BT_DERIVED)
+ if (rsym->ts.type != BT_DERIVED && rsym->ts.type != BT_CLASS
+ && lsym->ts.type != BT_DERIVED && lsym->ts.type != BT_CLASS)
return 0;
/* For derived types we must check all the component types. We can ignore
if (lref->type != REF_COMPONENT)
continue;
- if (gfc_symbols_could_alias (lref->u.c.sym, rsym))
+ lsym_pointer = lsym_pointer || lref->u.c.sym->attr.pointer;
+ lsym_target = lsym_target || lref->u.c.sym->attr.target;
+
+ if (symbols_could_alias (lref->u.c.sym, rsym, lsym_pointer, lsym_target,
+ rsym_pointer, rsym_target))
return 1;
+ if ((lsym_pointer && (rsym_pointer || rsym_target))
+ || (rsym_pointer && (lsym_pointer || lsym_target)))
+ {
+ if (gfc_compare_types (&lref->u.c.component->ts,
+ &rsym->ts))
+ return 1;
+ }
+
for (rref = rss->expr->ref; rref != rss->data.info.ref;
rref = rref->next)
{
if (rref->type != REF_COMPONENT)
continue;
- if (gfc_symbols_could_alias (lref->u.c.sym, rref->u.c.sym))
+ rsym_pointer = rsym_pointer || rref->u.c.sym->attr.pointer;
+ rsym_target = lsym_target || rref->u.c.sym->attr.target;
+
+ if (symbols_could_alias (lref->u.c.sym, rref->u.c.sym,
+ lsym_pointer, lsym_target,
+ rsym_pointer, rsym_target))
return 1;
+
+ if ((lsym_pointer && (rsym_pointer || rsym_target))
+ || (rsym_pointer && (lsym_pointer || lsym_target)))
+ {
+ if (gfc_compare_types (&lref->u.c.component->ts,
+ &rref->u.c.sym->ts))
+ return 1;
+ if (gfc_compare_types (&lref->u.c.sym->ts,
+ &rref->u.c.component->ts))
+ return 1;
+ if (gfc_compare_types (&lref->u.c.component->ts,
+ &rref->u.c.component->ts))
+ return 1;
+ }
}
}
+ lsym_pointer = lsym->attr.pointer;
+ lsym_target = lsym->attr.target;
+ lsym_pointer = lsym->attr.pointer;
+ lsym_target = lsym->attr.target;
+
for (rref = rss->expr->ref; rref != rss->data.info.ref; rref = rref->next)
{
if (rref->type != REF_COMPONENT)
break;
- if (gfc_symbols_could_alias (rref->u.c.sym, lsym))
+ rsym_pointer = rsym_pointer || rref->u.c.sym->attr.pointer;
+ rsym_target = lsym_target || rref->u.c.sym->attr.target;
+
+ if (symbols_could_alias (rref->u.c.sym, lsym,
+ lsym_pointer, lsym_target,
+ rsym_pointer, rsym_target))
return 1;
+
+ if ((lsym_pointer && (rsym_pointer || rsym_target))
+ || (rsym_pointer && (lsym_pointer || lsym_target)))
+ {
+ if (gfc_compare_types (&lsym->ts, &rref->u.c.component->ts))
+ return 1;
+ }
}
return 0;
if (GFC_ARRAY_TYPE_P (base_type)
|| GFC_DESCRIPTOR_TYPE_P (base_type))
base_type = gfc_get_element_type (base_type);
- loop->temp_ss = gfc_get_ss ();
- loop->temp_ss->type = GFC_SS_TEMP;
- loop->temp_ss->data.temp.type = base_type;
- loop->temp_ss->string_length = dest->string_length;
- loop->temp_ss->data.temp.dimen = loop->dimen;
- loop->temp_ss->next = gfc_ss_terminator;
+ loop->temp_ss = gfc_get_temp_ss (base_type, dest->string_length,
+ loop->dimen);
gfc_add_ss_to_loop (loop, loop->temp_ss);
}
else
continue;
}
+ /* Avoid using an allocatable lhs in an assignment, since
+ there might be a reallocation coming. */
+ if (loopspec[n] && ss->is_alloc_lhs)
+ continue;
+
if (ss->type != GFC_SS_SECTION)
continue;
known lower bound
known upper bound
*/
- else if (loopspec[n]->type == GFC_SS_CONSTRUCTOR && dynamic[n])
+ else if ((loopspec[n]->type == GFC_SS_CONSTRUCTOR && dynamic[n])
+ || n >= loop->dimen)
loopspec[n] = ss;
else if (integer_onep (info->stride[dim])
&& !integer_onep (specinfo->stride[spec_dim]))
}
/* Transform everything so we have a simple incrementing variable. */
- if (integer_onep (info->stride[dim]))
+ if (n < loop->dimen && integer_onep (info->stride[dim]))
info->delta[dim] = gfc_index_zero_node;
- else
+ else if (n < loop->dimen)
{
/* Set the delta for this section. */
info->delta[dim] = gfc_evaluate_now (loop->from[n], &loop->pre);
/* For an array descriptor, get the total number of elements. This is just
- the product of the extents along all dimensions. */
+ the product of the extents along from_dim to to_dim. */
-tree
-gfc_conv_descriptor_size (tree desc, int rank)
+static tree
+gfc_conv_descriptor_size_1 (tree desc, int from_dim, int to_dim)
{
tree res;
int dim;
res = gfc_index_one_node;
- for (dim = 0; dim < rank; ++dim)
+ for (dim = from_dim; dim < to_dim; ++dim)
{
tree lbound;
tree ubound;
}
-/* Fills in an array descriptor, and returns the size of the array. The size
- will be a simple_val, ie a variable or a constant. Also calculates the
- offset of the base. Returns the size of the array.
+/* Full size of an array. */
+
+tree
+gfc_conv_descriptor_size (tree desc, int rank)
+{
+ return gfc_conv_descriptor_size_1 (desc, 0, rank);
+}
+
+
+/* Size of a coarray for all dimensions but the last. */
+
+tree
+gfc_conv_descriptor_cosize (tree desc, int rank, int corank)
+{
+ return gfc_conv_descriptor_size_1 (desc, rank, rank + corank - 1);
+}
+
+
+/* Fills in an array descriptor, and returns the size of the array.
+ The size will be a simple_val, ie a variable or a constant. Also
+ calculates the offset of the base. The pointer argument overflow,
+ which should be of integer type, will increase in value if overflow
+ occurs during the size calculation. Returns the size of the array.
{
stride = 1;
offset = 0;
size = 1 - lbound;
a.ubound[n] = specified_upper_bound;
a.stride[n] = stride;
- size = siz >= 0 ? ubound + size : 0; //size = ubound + 1 - lbound
+ size = size >= 0 ? ubound + size : 0; //size = ubound + 1 - lbound
+ overflow += size == 0 ? 0: (MAX/size < stride ? 1: 0);
stride = stride * size;
}
+ for (n = rank; n < rank+corank; n++)
+ (Set lcobound/ucobound as above.)
+ element_size = sizeof (array element);
+ if (!rank)
+ return element_size
+ stride = (size_t) stride;
+ overflow += element_size == 0 ? 0: (MAX/element_size < stride ? 1: 0);
+ stride = stride * element_size;
return (stride);
} */
/*GCC ARRAYS*/
static tree
gfc_array_init_size (tree descriptor, int rank, int corank, tree * poffset,
- gfc_expr ** lower, gfc_expr ** upper,
- stmtblock_t * pblock)
+ gfc_expr ** lower, gfc_expr ** upper, stmtblock_t * pblock,
+ stmtblock_t * descriptor_block, tree * overflow)
{
tree type;
tree tmp;
tree size;
tree offset;
tree stride;
+ tree element_size;
tree or_expr;
tree thencase;
tree elsecase;
+ tree cond;
tree var;
stmtblock_t thenblock;
stmtblock_t elseblock;
/* Set the dtype. */
tmp = gfc_conv_descriptor_dtype (descriptor);
- gfc_add_modify (pblock, tmp, gfc_get_dtype (TREE_TYPE (descriptor)));
+ gfc_add_modify (descriptor_block, tmp, gfc_get_dtype (TREE_TYPE (descriptor)));
or_expr = boolean_false_node;
ubound = lower[n];
}
}
- gfc_conv_descriptor_lbound_set (pblock, descriptor, gfc_rank_cst[n],
- se.expr);
+ gfc_conv_descriptor_lbound_set (descriptor_block, descriptor,
+ gfc_rank_cst[n], se.expr);
conv_lbound = se.expr;
/* Work out the offset for this component. */
gfc_conv_expr_type (&se, ubound, gfc_array_index_type);
gfc_add_block_to_block (pblock, &se.pre);
- gfc_conv_descriptor_ubound_set (pblock, descriptor,
+ gfc_conv_descriptor_ubound_set (descriptor_block, descriptor,
gfc_rank_cst[n], se.expr);
conv_ubound = se.expr;
/* Store the stride. */
- gfc_conv_descriptor_stride_set (pblock, descriptor,
+ gfc_conv_descriptor_stride_set (descriptor_block, descriptor,
gfc_rank_cst[n], stride);
/* Calculate size and check whether extent is negative. */
size = gfc_conv_array_extent_dim (conv_lbound, conv_ubound, &or_expr);
-
+ size = gfc_evaluate_now (size, pblock);
+
+ /* Check whether multiplying the stride by the number of
+ elements in this dimension would overflow. We must also check
+ whether the current dimension has zero size in order to avoid
+ division by zero.
+ */
+ tmp = fold_build2_loc (input_location, TRUNC_DIV_EXPR,
+ gfc_array_index_type,
+ fold_convert (gfc_array_index_type,
+ TYPE_MAX_VALUE (gfc_array_index_type)),
+ size);
+ cond = gfc_unlikely (fold_build2_loc (input_location, LT_EXPR,
+ boolean_type_node, tmp, stride));
+ tmp = fold_build3_loc (input_location, COND_EXPR, integer_type_node, cond,
+ integer_one_node, integer_zero_node);
+ cond = gfc_unlikely (fold_build2_loc (input_location, EQ_EXPR,
+ boolean_type_node, size,
+ gfc_index_zero_node));
+ tmp = fold_build3_loc (input_location, COND_EXPR, integer_type_node, cond,
+ integer_zero_node, tmp);
+ tmp = fold_build2_loc (input_location, PLUS_EXPR, integer_type_node,
+ *overflow, tmp);
+ *overflow = gfc_evaluate_now (tmp, pblock);
+
/* Multiply the stride by the number of elements in this dimension. */
stride = fold_build2_loc (input_location, MULT_EXPR,
gfc_array_index_type, stride, size);
ubound = lower[n];
}
}
- gfc_conv_descriptor_lbound_set (pblock, descriptor, gfc_rank_cst[n],
- se.expr);
+ gfc_conv_descriptor_lbound_set (descriptor_block, descriptor,
+ gfc_rank_cst[n], se.expr);
if (n < rank + corank - 1)
{
gcc_assert (ubound);
gfc_conv_expr_type (&se, ubound, gfc_array_index_type);
gfc_add_block_to_block (pblock, &se.pre);
- gfc_conv_descriptor_ubound_set (pblock, descriptor,
+ gfc_conv_descriptor_ubound_set (descriptor_block, descriptor,
gfc_rank_cst[n], se.expr);
}
}
/* The stride is the number of elements in the array, so multiply by the
size of an element to get the total size. */
tmp = TYPE_SIZE_UNIT (gfc_get_element_type (type));
- size = fold_build2_loc (input_location, MULT_EXPR, gfc_array_index_type,
- stride, fold_convert (gfc_array_index_type, tmp));
+ /* Convert to size_t. */
+ element_size = fold_convert (size_type_node, tmp);
+
+ if (rank == 0)
+ return element_size;
+
+ stride = fold_convert (size_type_node, stride);
+
+ /* First check for overflow. Since an array of type character can
+ have zero element_size, we must check for that before
+ dividing. */
+ tmp = fold_build2_loc (input_location, TRUNC_DIV_EXPR,
+ size_type_node,
+ TYPE_MAX_VALUE (size_type_node), element_size);
+ cond = gfc_unlikely (fold_build2_loc (input_location, LT_EXPR,
+ boolean_type_node, tmp, stride));
+ tmp = fold_build3_loc (input_location, COND_EXPR, integer_type_node, cond,
+ integer_one_node, integer_zero_node);
+ cond = gfc_unlikely (fold_build2_loc (input_location, EQ_EXPR,
+ boolean_type_node, element_size,
+ build_int_cst (size_type_node, 0)));
+ tmp = fold_build3_loc (input_location, COND_EXPR, integer_type_node, cond,
+ integer_zero_node, tmp);
+ tmp = fold_build2_loc (input_location, PLUS_EXPR, integer_type_node,
+ *overflow, tmp);
+ *overflow = gfc_evaluate_now (tmp, pblock);
+
+ size = fold_build2_loc (input_location, MULT_EXPR, size_type_node,
+ stride, element_size);
if (poffset != NULL)
{
if (integer_zerop (or_expr))
return size;
if (integer_onep (or_expr))
- return gfc_index_zero_node;
+ return build_int_cst (size_type_node, 0);
var = gfc_create_var (TREE_TYPE (size), "size");
gfc_start_block (&thenblock);
- gfc_add_modify (&thenblock, var, gfc_index_zero_node);
+ gfc_add_modify (&thenblock, var, build_int_cst (size_type_node, 0));
thencase = gfc_finish_block (&thenblock);
gfc_start_block (&elseblock);
/*GCC ARRAYS*/
bool
-gfc_array_allocate (gfc_se * se, gfc_expr * expr, tree pstat)
+gfc_array_allocate (gfc_se * se, gfc_expr * expr, tree status, tree errmsg,
+ tree errlen)
{
tree tmp;
tree pointer;
- tree offset;
+ tree offset = NULL_TREE;
+ tree token = NULL_TREE;
tree size;
+ tree msg;
+ tree error = NULL_TREE;
+ tree overflow; /* Boolean storing whether size calculation overflows. */
+ tree var_overflow = NULL_TREE;
+ tree cond;
+ tree set_descriptor;
+ stmtblock_t set_descriptor_block;
+ stmtblock_t elseblock;
gfc_expr **lower;
gfc_expr **upper;
gfc_ref *ref, *prev_ref = NULL;
- bool allocatable_array, coarray;
+ bool allocatable, coarray, dimension;
ref = expr->ref;
if (!prev_ref)
{
- allocatable_array = expr->symtree->n.sym->attr.allocatable;
+ allocatable = expr->symtree->n.sym->attr.allocatable;
coarray = expr->symtree->n.sym->attr.codimension;
+ dimension = expr->symtree->n.sym->attr.dimension;
}
else
{
- allocatable_array = prev_ref->u.c.component->attr.allocatable;
+ allocatable = prev_ref->u.c.component->attr.allocatable;
coarray = prev_ref->u.c.component->attr.codimension;
+ dimension = prev_ref->u.c.component->attr.dimension;
}
- /* Return if this is a scalar coarray. */
- if ((!prev_ref && !expr->symtree->n.sym->attr.dimension)
- || (prev_ref && !prev_ref->u.c.component->attr.dimension))
- {
- gcc_assert (coarray);
- return false;
- }
+ if (!dimension)
+ gcc_assert (coarray);
/* Figure out the size of the array. */
switch (ref->u.ar.type)
break;
}
+ overflow = integer_zero_node;
+
+ gfc_init_block (&set_descriptor_block);
size = gfc_array_init_size (se->expr, ref->u.ar.as->rank,
ref->u.ar.as->corank, &offset, lower, upper,
- &se->pre);
+ &se->pre, &set_descriptor_block, &overflow);
+
+ if (dimension)
+ {
+
+ var_overflow = gfc_create_var (integer_type_node, "overflow");
+ gfc_add_modify (&se->pre, var_overflow, overflow);
+
+ /* Generate the block of code handling overflow. */
+ msg = gfc_build_addr_expr (pchar_type_node,
+ gfc_build_localized_cstring_const
+ ("Integer overflow when calculating the amount of "
+ "memory to allocate"));
+ error = build_call_expr_loc (input_location, gfor_fndecl_runtime_error,
+ 1, msg);
+ }
+
+ if (status != NULL_TREE)
+ {
+ tree status_type = TREE_TYPE (status);
+ stmtblock_t set_status_block;
+
+ gfc_start_block (&set_status_block);
+ gfc_add_modify (&set_status_block, status,
+ build_int_cst (status_type, LIBERROR_ALLOCATION));
+ error = gfc_finish_block (&set_status_block);
+ }
+
+ gfc_start_block (&elseblock);
/* Allocate memory to store the data. */
pointer = gfc_conv_descriptor_data_get (se->expr);
STRIP_NOPS (pointer);
- /* The allocate_array variants take the old pointer as first argument. */
- if (allocatable_array)
- tmp = gfc_allocate_array_with_status (&se->pre, pointer, size, pstat, expr);
+ if (coarray && gfc_option.coarray == GFC_FCOARRAY_LIB)
+ token = gfc_build_addr_expr (NULL_TREE,
+ gfc_conv_descriptor_token (se->expr));
+
+ /* The allocatable variant takes the old pointer as first argument. */
+ if (allocatable)
+ gfc_allocate_allocatable (&elseblock, pointer, size, token,
+ status, errmsg, errlen, expr);
else
- tmp = gfc_allocate_with_status (&se->pre, size, pstat);
- tmp = fold_build2_loc (input_location, MODIFY_EXPR, void_type_node, pointer,
- tmp);
+ gfc_allocate_using_malloc (&elseblock, pointer, size, status);
+
+ if (dimension)
+ {
+ cond = gfc_unlikely (fold_build2_loc (input_location, NE_EXPR,
+ boolean_type_node, var_overflow, integer_zero_node));
+ tmp = fold_build3_loc (input_location, COND_EXPR, void_type_node, cond,
+ error, gfc_finish_block (&elseblock));
+ }
+ else
+ tmp = gfc_finish_block (&elseblock);
+
gfc_add_expr_to_block (&se->pre, tmp);
- gfc_conv_descriptor_offset_set (&se->pre, se->expr, offset);
+ /* Update the array descriptors. */
+ if (dimension)
+ gfc_conv_descriptor_offset_set (&set_descriptor_block, se->expr, offset);
+
+ set_descriptor = gfc_finish_block (&set_descriptor_block);
+ if (status != NULL_TREE)
+ {
+ cond = fold_build2_loc (input_location, EQ_EXPR,
+ boolean_type_node, status,
+ build_int_cst (TREE_TYPE (status), 0));
+ gfc_add_expr_to_block (&se->pre,
+ fold_build3_loc (input_location, COND_EXPR, void_type_node,
+ gfc_likely (cond), set_descriptor,
+ build_empty_stmt (input_location)));
+ }
+ else
+ gfc_add_expr_to_block (&se->pre, set_descriptor);
- if (expr->ts.type == BT_DERIVED
+ if ((expr->ts.type == BT_DERIVED || expr->ts.type == BT_CLASS)
&& expr->ts.u.derived->attr.alloc_comp)
{
tmp = gfc_nullify_alloc_comp (expr->ts.u.derived, se->expr,
gfc_se se;
HOST_WIDE_INT hi;
unsigned HOST_WIDE_INT lo;
- tree index;
+ tree index, range;
VEC(constructor_elt,gc) *v = NULL;
switch (expr->expr_type)
else
index = NULL_TREE;
+ if (mpz_cmp_si (c->repeat, 1) > 0)
+ {
+ tree tmp1, tmp2;
+ mpz_t maxval;
+
+ mpz_init (maxval);
+ mpz_add (maxval, c->offset, c->repeat);
+ mpz_sub_ui (maxval, maxval, 1);
+ tmp2 = gfc_conv_mpz_to_tree (maxval, gfc_index_integer_kind);
+ if (mpz_cmp_si (c->offset, 0) != 0)
+ {
+ mpz_add_ui (maxval, c->offset, 1);
+ tmp1 = gfc_conv_mpz_to_tree (maxval, gfc_index_integer_kind);
+ }
+ else
+ tmp1 = gfc_conv_mpz_to_tree (c->offset, gfc_index_integer_kind);
+
+ range = fold_build2 (RANGE_EXPR, gfc_array_index_type, tmp1, tmp2);
+ mpz_clear (maxval);
+ }
+ else
+ range = NULL;
+
gfc_init_se (&se, NULL);
switch (c->expr->expr_type)
{
case EXPR_CONSTANT:
gfc_conv_constant (&se, c->expr);
- CONSTRUCTOR_APPEND_ELT (v, index, se.expr);
break;
case EXPR_STRUCTURE:
gfc_conv_structure (&se, c->expr, 1);
- CONSTRUCTOR_APPEND_ELT (v, index, se.expr);
break;
-
default:
/* Catch those occasional beasts that do not simplify
for one reason or another, assuming that if they are
standard defying the frontend will catch them. */
gfc_conv_expr (&se, c->expr);
- CONSTRUCTOR_APPEND_ELT (v, index, se.expr);
break;
}
+
+ if (range == NULL_TREE)
+ CONSTRUCTOR_APPEND_ELT (v, index, se.expr);
+ else
+ {
+ if (index != NULL_TREE)
+ CONSTRUCTOR_APPEND_ELT (v, index, se.expr);
+ CONSTRUCTOR_APPEND_ELT (v, range, se.expr);
+ }
}
break;
}
+/* Generate code to evaluate non-constant coarray cobounds. */
+
+void
+gfc_trans_array_cobounds (tree type, stmtblock_t * pblock,
+ const gfc_symbol *sym)
+{
+ int dim;
+ tree ubound;
+ tree lbound;
+ gfc_se se;
+ gfc_array_spec *as;
+
+ as = sym->as;
+
+ for (dim = as->rank; dim < as->rank + as->corank; dim++)
+ {
+ /* Evaluate non-constant array bound expressions. */
+ lbound = GFC_TYPE_ARRAY_LBOUND (type, dim);
+ if (as->lower[dim] && !INTEGER_CST_P (lbound))
+ {
+ gfc_init_se (&se, NULL);
+ gfc_conv_expr_type (&se, as->lower[dim], gfc_array_index_type);
+ gfc_add_block_to_block (pblock, &se.pre);
+ gfc_add_modify (pblock, lbound, se.expr);
+ }
+ ubound = GFC_TYPE_ARRAY_UBOUND (type, dim);
+ if (as->upper[dim] && !INTEGER_CST_P (ubound))
+ {
+ gfc_init_se (&se, NULL);
+ gfc_conv_expr_type (&se, as->upper[dim], gfc_array_index_type);
+ gfc_add_block_to_block (pblock, &se.pre);
+ gfc_add_modify (pblock, ubound, se.expr);
+ }
+ }
+}
+
+
/* Generate code to evaluate non-constant array bounds. Sets *poffset and
returns the size (in elements) of the array. */
size = stride;
}
+ gfc_trans_array_cobounds (type, pblock, sym);
gfc_trans_vla_type_sizes (sym, pblock);
*poffset = offset;
{
stmtblock_t init;
tree type;
- tree tmp;
+ tree tmp = NULL_TREE;
tree size;
tree offset;
+ tree space;
+ tree inittree;
bool onstack;
gcc_assert (!(sym->attr.pointer || sym->attr.allocatable));
gcc_assert (GFC_ARRAY_TYPE_P (type));
onstack = TREE_CODE (type) != POINTER_TYPE;
- gfc_start_block (&init);
+ gfc_init_block (&init);
/* Evaluate character string length. */
if (sym->ts.type == BT_CHARACTER
return;
}
- /* The size is the number of elements in the array, so multiply by the
- size of an element to get the total size. */
- tmp = TYPE_SIZE_UNIT (gfc_get_element_type (type));
- size = fold_build2_loc (input_location, MULT_EXPR, gfc_array_index_type,
- size, fold_convert (gfc_array_index_type, tmp));
+ if (gfc_option.flag_stack_arrays)
+ {
+ gcc_assert (TREE_CODE (TREE_TYPE (decl)) == POINTER_TYPE);
+ space = build_decl (sym->declared_at.lb->location,
+ VAR_DECL, create_tmp_var_name ("A"),
+ TREE_TYPE (TREE_TYPE (decl)));
+ gfc_trans_vla_type_sizes (sym, &init);
+ }
+ else
+ {
+ /* The size is the number of elements in the array, so multiply by the
+ size of an element to get the total size. */
+ tmp = TYPE_SIZE_UNIT (gfc_get_element_type (type));
+ size = fold_build2_loc (input_location, MULT_EXPR, gfc_array_index_type,
+ size, fold_convert (gfc_array_index_type, tmp));
+
+ /* Allocate memory to hold the data. */
+ tmp = gfc_call_malloc (&init, TREE_TYPE (decl), size);
+ gfc_add_modify (&init, decl, tmp);
- /* Allocate memory to hold the data. */
- tmp = gfc_call_malloc (&init, TREE_TYPE (decl), size);
- gfc_add_modify (&init, decl, tmp);
+ /* Free the temporary. */
+ tmp = gfc_call_free (convert (pvoid_type_node, decl));
+ space = NULL_TREE;
+ }
/* Set offset of the array. */
if (TREE_CODE (GFC_TYPE_ARRAY_OFFSET (type)) == VAR_DECL)
/* Automatic arrays should not have initializers. */
gcc_assert (!sym->value);
- /* Free the temporary. */
- tmp = gfc_call_free (convert (pvoid_type_node, decl));
+ inittree = gfc_finish_block (&init);
+
+ if (space)
+ {
+ tree addr;
+ pushdecl (space);
- gfc_add_init_cleanup (block, gfc_finish_block (&init), tmp);
+ /* Don't create new scope, emit the DECL_EXPR in exactly the scope
+ where also space is located. */
+ gfc_init_block (&init);
+ tmp = fold_build1_loc (input_location, DECL_EXPR,
+ TREE_TYPE (space), space);
+ gfc_add_expr_to_block (&init, tmp);
+ addr = fold_build1_loc (sym->declared_at.lb->location,
+ ADDR_EXPR, TREE_TYPE (decl), space);
+ gfc_add_modify (&init, decl, addr);
+ gfc_add_init_cleanup (block, gfc_finish_block (&init), NULL_TREE);
+ tmp = NULL_TREE;
+ }
+ gfc_add_init_cleanup (block, inittree, tmp);
}
locus loc;
tree offset;
tree tmp;
- tree stmt;
+ tree stmt;
stmtblock_t init;
- gfc_get_backend_locus (&loc);
+ gfc_save_backend_locus (&loc);
gfc_set_backend_locus (&sym->declared_at);
/* Descriptor type. */
}
stmt = gfc_finish_block (&init);
- gfc_set_backend_locus (&loc);
+ gfc_restore_backend_locus (&loc);
/* Add the initialization code to the start of the function. */
return;
}
- gfc_get_backend_locus (&loc);
+ gfc_save_backend_locus (&loc);
gfc_set_backend_locus (&sym->declared_at);
/* Descriptor type. */
fold_convert (long_integer_type_node, temp),
fold_convert (long_integer_type_node, stride2));
- gfc_free (msg);
+ free (msg);
}
}
else
}
}
+ gfc_trans_array_cobounds (type, &init, sym);
+
/* Set the offset. */
if (TREE_CODE (GFC_TYPE_ARRAY_OFFSET (type)) == VAR_DECL)
gfc_add_modify (&init, GFC_TYPE_ARRAY_OFFSET (type), offset);
/* We don't need to free any memory allocated by internal_pack as it will
be freed at the end of the function by pop_context. */
gfc_add_init_cleanup (block, stmtInit, stmtCleanup);
+
+ gfc_restore_backend_locus (&loc);
}
}
}
-
+/* Helper function to check dimensions. */
+static bool
+dim_ok (gfc_ss_info *info)
+{
+ int n;
+ for (n = 0; n < info->dimen; n++)
+ if (info->dim[n] != n)
+ return false;
+ return true;
+}
/* Convert an array for passing as an actual argument. Expressions and
vector subscripts are evaluated and stored in a temporary, which is then
EXPR is the right-hand side of a pointer assignment and
se->expr is the descriptor for the previously-evaluated
left-hand side. The function creates an assignment from
- EXPR to se->expr. */
+ EXPR to se->expr.
+
+
+ The se->force_tmp flag disables the non-copying descriptor optimization
+ that is used for transpose. It may be used in cases where there is an
+ alias between the transpose argument and another argument in the same
+ function call. */
void
gfc_conv_expr_descriptor (gfc_se * se, gfc_expr * expr, gfc_ss * ss)
{
gfc_loopinfo loop;
- gfc_ss *secss;
gfc_ss_info *info;
int need_tmp;
int n;
tree offset;
int full;
bool subref_array_target = false;
+ gfc_expr *arg;
+ gcc_assert (ss != NULL);
gcc_assert (ss != gfc_ss_terminator);
/* Special case things we know we can pass easily. */
/* If we have a linear array section, we can pass it directly.
Otherwise we need to copy it into a temporary. */
- /* Find the SS for the array section. */
- secss = ss;
- while (secss != gfc_ss_terminator && secss->type != GFC_SS_SECTION)
- secss = secss->next;
-
- gcc_assert (secss != gfc_ss_terminator);
- info = &secss->data.info;
+ gcc_assert (ss->type == GFC_SS_SECTION);
+ gcc_assert (ss->expr == expr);
+ info = &ss->data.info;
/* Get the descriptor for the array. */
- gfc_conv_ss_descriptor (&se->pre, secss, 0);
+ gfc_conv_ss_descriptor (&se->pre, ss, 0);
desc = info->descriptor;
subref_array_target = se->direct_byref && is_subref_array (expr);
need_tmp = gfc_ref_needs_temporary_p (expr->ref)
&& !subref_array_target;
+ if (se->force_tmp)
+ need_tmp = 1;
+
if (need_tmp)
full = 0;
else if (GFC_ARRAY_TYPE_P (TREE_TYPE (desc)))
else
full = gfc_full_array_ref_p (info->ref, NULL);
- if (full)
+ if (full && dim_ok (info))
{
if (se->direct_byref && !se->byref_noassign)
{
break;
case EXPR_FUNCTION:
+
+ /* We don't need to copy data in some cases. */
+ arg = gfc_get_noncopying_intrinsic_argument (expr);
+ if (arg)
+ {
+ /* This is a call to transpose... */
+ gcc_assert (expr->value.function.isym->id == GFC_ISYM_TRANSPOSE);
+ /* ... which has already been handled by the scalarizer, so
+ that we just need to get its argument's descriptor. */
+ gfc_conv_expr_descriptor (se, expr->value.function.actual->expr, ss);
+ return;
+ }
+
/* A transformational function return value will be a temporary
array descriptor. We still need to go through the scalarizer
to create the descriptor. Elemental functions ar handled as
arbitrary expressions, i.e. copy to a temporary. */
- secss = ss;
- /* Look for the SS for this function. */
- while (secss != gfc_ss_terminator
- && (secss->type != GFC_SS_FUNCTION || secss->expr != expr))
- secss = secss->next;
if (se->direct_byref)
{
- gcc_assert (secss != gfc_ss_terminator);
+ gcc_assert (ss->type == GFC_SS_FUNCTION && ss->expr == expr);
/* For pointer assignments pass the descriptor directly. */
- se->ss = secss;
+ if (se->ss == NULL)
+ se->ss = ss;
+ else
+ gcc_assert (se->ss == ss);
se->expr = gfc_build_addr_expr (NULL_TREE, se->expr);
gfc_conv_expr (se, expr);
return;
}
- if (secss == gfc_ss_terminator)
+ if (ss->expr != expr || ss->type != GFC_SS_FUNCTION)
{
- /* Elemental function. */
+ if (ss->expr != expr)
+ /* Elemental function. */
+ gcc_assert ((expr->value.function.esym != NULL
+ && expr->value.function.esym->attr.elemental)
+ || (expr->value.function.isym != NULL
+ && expr->value.function.isym->elemental));
+ else
+ gcc_assert (ss->type == GFC_SS_INTRINSIC);
+
need_tmp = 1;
if (expr->ts.type == BT_CHARACTER
&& expr->ts.u.cl->length->expr_type != EXPR_CONSTANT)
else
{
/* Transformational function. */
- info = &secss->data.info;
+ info = &ss->data.info;
need_tmp = 0;
}
break;
{
need_tmp = 0;
info = &ss->data.info;
- secss = ss;
}
else
{
need_tmp = 1;
- secss = NULL;
info = NULL;
}
break;
default:
/* Something complicated. Copy it into a temporary. */
need_tmp = 1;
- secss = NULL;
info = NULL;
break;
}
+ /* If we are creating a temporary, we don't need to bother about aliases
+ anymore. */
+ if (need_tmp)
+ se->force_tmp = 0;
+
gfc_init_loopinfo (&loop);
/* Associate the SS with the loop. */
if (need_tmp)
{
- /* Tell the scalarizer to make a temporary. */
- loop.temp_ss = gfc_get_ss ();
- loop.temp_ss->type = GFC_SS_TEMP;
- loop.temp_ss->next = gfc_ss_terminator;
-
- if (expr->ts.type == BT_CHARACTER
- && !expr->ts.u.cl->backend_decl)
+ if (expr->ts.type == BT_CHARACTER && !expr->ts.u.cl->backend_decl)
get_array_charlen (expr, se);
- loop.temp_ss->data.temp.type = gfc_typenode_for_spec (&expr->ts);
-
- if (expr->ts.type == BT_CHARACTER)
- loop.temp_ss->string_length = expr->ts.u.cl->backend_decl;
- else
- loop.temp_ss->string_length = NULL;
+ /* Tell the scalarizer to make a temporary. */
+ loop.temp_ss = gfc_get_temp_ss (gfc_typenode_for_spec (&expr->ts),
+ ((expr->ts.type == BT_CHARACTER)
+ ? expr->ts.u.cl->backend_decl
+ : NULL),
+ loop.dimen);
se->string_length = loop.temp_ss->string_length;
- loop.temp_ss->data.temp.dimen = loop.dimen;
+ gcc_assert (loop.temp_ss->data.temp.dimen == loop.dimen);
gfc_add_ss_to_loop (&loop, loop.temp_ss);
}
lse.string_length = rse.string_length;
tmp = gfc_trans_scalar_assign (&lse, &rse, expr->ts, true,
- expr->expr_type == EXPR_VARIABLE, true);
+ expr->expr_type == EXPR_VARIABLE
+ || expr->expr_type == EXPR_ARRAY, true);
gfc_add_expr_to_block (&block, tmp);
/* Finish the copying loops. */
desc = loop.temp_ss->data.info.descriptor;
}
- else if (expr->expr_type == EXPR_FUNCTION)
+ else if (expr->expr_type == EXPR_FUNCTION && dim_ok (info))
{
desc = info->descriptor;
se->string_length = ss->string_length;
limits will be the limits of the section.
A function may decide to repack the array to speed up access, but
we're not bothered about that here. */
- int dim, ndim;
+ int dim, ndim, codim;
tree parm;
tree parmtype;
tree stride;
tree to;
tree base;
+ ndim = info->ref ? info->ref->u.ar.dimen : info->dimen;
+
+ if (se->want_coarray)
+ {
+ gfc_array_ref *ar = &info->ref->u.ar;
+
+ codim = gfc_get_corank (expr);
+ for (n = 0; n < codim - 1; n++)
+ {
+ /* Make sure we are not lost somehow. */
+ gcc_assert (ar->dimen_type[n + ndim] == DIMEN_THIS_IMAGE);
+
+ /* Make sure the call to gfc_conv_section_startstride won't
+ generate unnecessary code to calculate stride. */
+ gcc_assert (ar->stride[n + ndim] == NULL);
+
+ gfc_conv_section_startstride (&loop, ss, n + ndim);
+ loop.from[n + loop.dimen] = info->start[n + ndim];
+ loop.to[n + loop.dimen] = info->end[n + ndim];
+ }
+
+ gcc_assert (n == codim - 1);
+ evaluate_bound (&loop.pre, info->start, ar->start,
+ info->descriptor, n + ndim, true);
+ loop.from[n + loop.dimen] = info->start[n + ndim];
+ }
+ else
+ codim = 0;
+
/* Set the string_length for a character array. */
if (expr->ts.type == BT_CHARACTER)
se->string_length = gfc_get_expr_charlen (expr);
desc = info->descriptor;
- gcc_assert (secss && secss != gfc_ss_terminator);
if (se->direct_byref && !se->byref_noassign)
{
/* For pointer assignments we fill in the destination. */
{
/* Otherwise make a new one. */
parmtype = gfc_get_element_type (TREE_TYPE (desc));
- parmtype = gfc_get_array_type_bounds (parmtype, loop.dimen, 0,
+ parmtype = gfc_get_array_type_bounds (parmtype, loop.dimen, codim,
loop.from, loop.to, 0,
GFC_ARRAY_UNKNOWN, false);
parm = gfc_create_var (parmtype, "parm");
}
offset = gfc_index_zero_node;
- dim = 0;
/* The following can be somewhat confusing. We have two
descriptors, a new one and the original array.
{parm, parmtype, dim} refer to the new one.
- {desc, type, n, secss, loop} refer to the original, which maybe
+ {desc, type, n, loop} refer to the original, which maybe
a descriptorless array.
The bounds of the scalarization are the bounds of the section.
We don't have to worry about numeric overflows when calculating
else
base = NULL_TREE;
- ndim = info->ref ? info->ref->u.ar.dimen : info->dimen;
for (n = 0; n < ndim; n++)
{
stride = gfc_conv_array_stride (desc, n);
}
else
{
- /* Check we haven't somehow got out of sync. */
- gcc_assert (info->dim[dim] == n);
-
/* Evaluate and remember the start of the section. */
start = info->start[n];
stride = gfc_evaluate_now (stride, &loop.pre);
/* Vector subscripts need copying and are handled elsewhere. */
if (info->ref)
gcc_assert (info->ref->u.ar.dimen_type[n] == DIMEN_RANGE);
+
+ /* look for the corresponding scalarizer dimension: dim. */
+ for (dim = 0; dim < ndim; dim++)
+ if (info->dim[dim] == n)
+ break;
+
+ /* loop exited early: the DIM being looked for has been found. */
+ gcc_assert (dim < ndim);
/* Set the new lower bound. */
from = loop.from[dim];
/* Store the new stride. */
gfc_conv_descriptor_stride_set (&loop.pre, parm,
gfc_rank_cst[dim], stride);
+ }
- dim++;
+ for (n = loop.dimen; n < loop.dimen + codim; n++)
+ {
+ from = loop.from[n];
+ to = loop.to[n];
+ gfc_conv_descriptor_lbound_set (&loop.pre, parm,
+ gfc_rank_cst[n], from);
+ if (n < loop.dimen + codim - 1)
+ gfc_conv_descriptor_ubound_set (&loop.pre, parm,
+ gfc_rank_cst[n], to);
}
if (se->data_not_needed)
if (sym->ts.type == BT_CHARACTER)
se->string_length = sym->ts.u.cl->backend_decl;
- if (sym->ts.type == BT_DERIVED)
+ if (sym->ts.type == BT_DERIVED || sym->ts.type == BT_CLASS)
{
gfc_conv_expr_descriptor (se, expr, ss);
se->expr = gfc_conv_array_data (se->expr);
/* Deallocate the allocatable components of structures that are
not variable. */
- if (expr->ts.type == BT_DERIVED
+ if ((expr->ts.type == BT_DERIVED || expr->ts.type == BT_CLASS)
&& expr->ts.u.derived->attr.alloc_comp
&& expr->expr_type != EXPR_VARIABLE)
{
- tmp = build_fold_indirect_ref_loc (input_location,
- se->expr);
+ tmp = build_fold_indirect_ref_loc (input_location, se->expr);
tmp = gfc_deallocate_alloc_comp (expr->ts.u.derived, tmp, expr->rank);
- gfc_add_expr_to_block (&se->post, tmp);
+
+ /* The components shall be deallocated before their containing entity. */
+ gfc_prepend_expr_to_block (&se->post, tmp);
}
if (g77 || (fsym && fsym->attr.contiguous
gfc_trans_runtime_check (false, true, tmp, &se->pre,
&expr->where, msg);
- gfc_free (msg);
+ free (msg);
}
gfc_start_block (&block);
null_data = gfc_finish_block (&block);
gfc_init_block (&block);
- size = TYPE_SIZE_UNIT (type);
+ size = TYPE_SIZE_UNIT (TREE_TYPE (type));
if (!no_malloc)
{
tmp = gfc_call_malloc (&block, type, size);
gfc_add_expr_to_block (&block, tmp);
}
- tmp = built_in_decls[BUILT_IN_MEMCPY];
+ tmp = builtin_decl_explicit (BUILT_IN_MEMCPY);
tmp = build_call_expr_loc (input_location, tmp, 3,
dest, src, size);
}
/* We know the temporary and the value will be the same length,
so can use memcpy. */
- tmp = built_in_decls[BUILT_IN_MEMCPY];
+ tmp = builtin_decl_explicit (BUILT_IN_MEMCPY);
tmp = build_call_expr_loc (input_location,
tmp, 3, gfc_conv_descriptor_data_get (dest),
gfc_conv_descriptor_data_get (src), size);
act on a chain of components. */
for (c = der_type->components; c; c = c->next)
{
- bool cmp_has_alloc_comps = (c->ts.type == BT_DERIVED)
+ bool cmp_has_alloc_comps = (c->ts.type == BT_DERIVED
+ || c->ts.type == BT_CLASS)
&& c->ts.u.derived->attr.alloc_comp;
cdecl = c->backend_decl;
ctype = TREE_TYPE (cdecl);
switch (purpose)
{
case DEALLOCATE_ALLOC_COMP:
- /* Do not deallocate the components of ultimate pointer
- components. */
if (cmp_has_alloc_comps && !c->attr.pointer)
{
+ /* Do not deallocate the components of ultimate pointer
+ components. */
comp = fold_build3_loc (input_location, COMPONENT_REF, ctype,
decl, cdecl, NULL_TREE);
rank = c->as ? c->as->rank : 0;
gfc_add_expr_to_block (&fnblock, tmp);
}
- if (c->attr.allocatable && c->attr.dimension)
+ if (c->attr.allocatable
+ && (c->attr.dimension || c->attr.codimension))
{
comp = fold_build3_loc (input_location, COMPONENT_REF, ctype,
decl, cdecl, NULL_TREE);
comp = fold_build3_loc (input_location, COMPONENT_REF, ctype,
decl, cdecl, NULL_TREE);
- tmp = gfc_deallocate_with_status (comp, NULL_TREE, true, NULL);
+ tmp = gfc_deallocate_scalar_with_status (comp, NULL, true, NULL,
+ c->ts);
gfc_add_expr_to_block (&fnblock, tmp);
tmp = fold_build2_loc (input_location, MODIFY_EXPR,
comp = fold_build3_loc (input_location, COMPONENT_REF, ctype,
decl, cdecl, NULL_TREE);
- /* Add reference to '$data' component. */
+ /* Add reference to '_data' component. */
tmp = CLASS_DATA (c)->backend_decl;
comp = fold_build3_loc (input_location, COMPONENT_REF,
TREE_TYPE (tmp), comp, tmp, NULL_TREE);
- tmp = gfc_deallocate_with_status (comp, NULL_TREE, true, NULL);
+ tmp = gfc_deallocate_scalar_with_status (comp, NULL, true, NULL,
+ CLASS_DATA (c)->ts);
gfc_add_expr_to_block (&fnblock, tmp);
tmp = fold_build2_loc (input_location, MODIFY_EXPR,
case NULLIFY_ALLOC_COMP:
if (c->attr.pointer)
continue;
- else if (c->attr.allocatable && c->attr.dimension)
+ else if (c->attr.allocatable
+ && (c->attr.dimension|| c->attr.codimension))
{
comp = fold_build3_loc (input_location, COMPONENT_REF, ctype,
decl, cdecl, NULL_TREE);
/* Allocatable scalar CLASS components. */
comp = fold_build3_loc (input_location, COMPONENT_REF, ctype,
decl, cdecl, NULL_TREE);
- /* Add reference to '$data' component. */
+ /* Add reference to '_data' component. */
tmp = CLASS_DATA (c)->backend_decl;
comp = fold_build3_loc (input_location, COMPONENT_REF,
TREE_TYPE (tmp), comp, tmp, NULL_TREE);
}
+/* Returns the value of LBOUND for an expression. This could be broken out
+ from gfc_conv_intrinsic_bound but this seemed to be simpler. This is
+ called by gfc_alloc_allocatable_for_assignment. */
+static tree
+get_std_lbound (gfc_expr *expr, tree desc, int dim, bool assumed_size)
+{
+ tree lbound;
+ tree ubound;
+ tree stride;
+ tree cond, cond1, cond3, cond4;
+ tree tmp;
+ gfc_ref *ref;
+
+ if (GFC_DESCRIPTOR_TYPE_P (TREE_TYPE (desc)))
+ {
+ tmp = gfc_rank_cst[dim];
+ lbound = gfc_conv_descriptor_lbound_get (desc, tmp);
+ ubound = gfc_conv_descriptor_ubound_get (desc, tmp);
+ stride = gfc_conv_descriptor_stride_get (desc, tmp);
+ cond1 = fold_build2_loc (input_location, GE_EXPR, boolean_type_node,
+ ubound, lbound);
+ cond3 = fold_build2_loc (input_location, GE_EXPR, boolean_type_node,
+ stride, gfc_index_zero_node);
+ cond3 = fold_build2_loc (input_location, TRUTH_AND_EXPR,
+ boolean_type_node, cond3, cond1);
+ cond4 = fold_build2_loc (input_location, LT_EXPR, boolean_type_node,
+ stride, gfc_index_zero_node);
+ if (assumed_size)
+ cond = fold_build2_loc (input_location, EQ_EXPR, boolean_type_node,
+ tmp, build_int_cst (gfc_array_index_type,
+ expr->rank - 1));
+ else
+ cond = boolean_false_node;
+
+ cond1 = fold_build2_loc (input_location, TRUTH_OR_EXPR,
+ boolean_type_node, cond3, cond4);
+ cond = fold_build2_loc (input_location, TRUTH_OR_EXPR,
+ boolean_type_node, cond, cond1);
+
+ return fold_build3_loc (input_location, COND_EXPR,
+ gfc_array_index_type, cond,
+ lbound, gfc_index_one_node);
+ }
+ else if (expr->expr_type == EXPR_VARIABLE)
+ {
+ tmp = TREE_TYPE (expr->symtree->n.sym->backend_decl);
+ for (ref = expr->ref; ref; ref = ref->next)
+ {
+ if (ref->type == REF_COMPONENT
+ && ref->u.c.component->as
+ && ref->next
+ && ref->next->u.ar.type == AR_FULL)
+ tmp = TREE_TYPE (ref->u.c.component->backend_decl);
+ }
+ return GFC_TYPE_ARRAY_LBOUND(tmp, dim);
+ }
+ else if (expr->expr_type == EXPR_FUNCTION)
+ {
+ /* A conversion function, so use the argument. */
+ expr = expr->value.function.actual->expr;
+ if (expr->expr_type != EXPR_VARIABLE)
+ return gfc_index_one_node;
+ desc = TREE_TYPE (expr->symtree->n.sym->backend_decl);
+ return get_std_lbound (expr, desc, dim, assumed_size);
+ }
+
+ return gfc_index_one_node;
+}
+
+
+/* Returns true if an expression represents an lhs that can be reallocated
+ on assignment. */
+
+bool
+gfc_is_reallocatable_lhs (gfc_expr *expr)
+{
+ gfc_ref * ref;
+
+ if (!expr->ref)
+ return false;
+
+ /* An allocatable variable. */
+ if (expr->symtree->n.sym->attr.allocatable
+ && expr->ref
+ && expr->ref->type == REF_ARRAY
+ && expr->ref->u.ar.type == AR_FULL)
+ return true;
+
+ /* All that can be left are allocatable components. */
+ if ((expr->symtree->n.sym->ts.type != BT_DERIVED
+ && expr->symtree->n.sym->ts.type != BT_CLASS)
+ || !expr->symtree->n.sym->ts.u.derived->attr.alloc_comp)
+ return false;
+
+ /* Find a component ref followed by an array reference. */
+ for (ref = expr->ref; ref; ref = ref->next)
+ if (ref->next
+ && ref->type == REF_COMPONENT
+ && ref->next->type == REF_ARRAY
+ && !ref->next->next)
+ break;
+
+ if (!ref)
+ return false;
+
+ /* Return true if valid reallocatable lhs. */
+ if (ref->u.c.component->attr.allocatable
+ && ref->next->u.ar.type == AR_FULL)
+ return true;
+
+ return false;
+}
+
+
+/* Allocate the lhs of an assignment to an allocatable array, otherwise
+ reallocate it. */
+
+tree
+gfc_alloc_allocatable_for_assignment (gfc_loopinfo *loop,
+ gfc_expr *expr1,
+ gfc_expr *expr2)
+{
+ stmtblock_t realloc_block;
+ stmtblock_t alloc_block;
+ stmtblock_t fblock;
+ gfc_ss *rss;
+ gfc_ss *lss;
+ tree realloc_expr;
+ tree alloc_expr;
+ tree size1;
+ tree size2;
+ tree array1;
+ tree cond;
+ tree tmp;
+ tree tmp2;
+ tree lbound;
+ tree ubound;
+ tree desc;
+ tree desc2;
+ tree offset;
+ tree jump_label1;
+ tree jump_label2;
+ tree neq_size;
+ tree lbd;
+ int n;
+ int dim;
+ gfc_array_spec * as;
+
+ /* x = f(...) with x allocatable. In this case, expr1 is the rhs.
+ Find the lhs expression in the loop chain and set expr1 and
+ expr2 accordingly. */
+ if (expr1->expr_type == EXPR_FUNCTION && expr2 == NULL)
+ {
+ expr2 = expr1;
+ /* Find the ss for the lhs. */
+ lss = loop->ss;
+ for (; lss && lss != gfc_ss_terminator; lss = lss->loop_chain)
+ if (lss->expr && lss->expr->expr_type == EXPR_VARIABLE)
+ break;
+ if (lss == gfc_ss_terminator)
+ return NULL_TREE;
+ expr1 = lss->expr;
+ }
+
+ /* Bail out if this is not a valid allocate on assignment. */
+ if (!gfc_is_reallocatable_lhs (expr1)
+ || (expr2 && !expr2->rank))
+ return NULL_TREE;
+
+ /* Find the ss for the lhs. */
+ lss = loop->ss;
+ for (; lss && lss != gfc_ss_terminator; lss = lss->loop_chain)
+ if (lss->expr == expr1)
+ break;
+
+ if (lss == gfc_ss_terminator)
+ return NULL_TREE;
+
+ /* Find an ss for the rhs. For operator expressions, we see the
+ ss's for the operands. Any one of these will do. */
+ rss = loop->ss;
+ for (; rss && rss != gfc_ss_terminator; rss = rss->loop_chain)
+ if (rss->expr != expr1 && rss != loop->temp_ss)
+ break;
+
+ if (expr2 && rss == gfc_ss_terminator)
+ return NULL_TREE;
+
+ gfc_start_block (&fblock);
+
+ /* Since the lhs is allocatable, this must be a descriptor type.
+ Get the data and array size. */
+ desc = lss->data.info.descriptor;
+ gcc_assert (GFC_DESCRIPTOR_TYPE_P (TREE_TYPE (desc)));
+ array1 = gfc_conv_descriptor_data_get (desc);
+
+ /* 7.4.1.3 "If variable is an allocated allocatable variable, it is
+ deallocated if expr is an array of different shape or any of the
+ corresponding length type parameter values of variable and expr
+ differ." This assures F95 compatibility. */
+ jump_label1 = gfc_build_label_decl (NULL_TREE);
+ jump_label2 = gfc_build_label_decl (NULL_TREE);
+
+ /* Allocate if data is NULL. */
+ cond = fold_build2_loc (input_location, EQ_EXPR, boolean_type_node,
+ array1, build_int_cst (TREE_TYPE (array1), 0));
+ tmp = build3_v (COND_EXPR, cond,
+ build1_v (GOTO_EXPR, jump_label1),
+ build_empty_stmt (input_location));
+ gfc_add_expr_to_block (&fblock, tmp);
+
+ /* Get arrayspec if expr is a full array. */
+ if (expr2 && expr2->expr_type == EXPR_FUNCTION
+ && expr2->value.function.isym
+ && expr2->value.function.isym->conversion)
+ {
+ /* For conversion functions, take the arg. */
+ gfc_expr *arg = expr2->value.function.actual->expr;
+ as = gfc_get_full_arrayspec_from_expr (arg);
+ }
+ else if (expr2)
+ as = gfc_get_full_arrayspec_from_expr (expr2);
+ else
+ as = NULL;
+
+ /* If the lhs shape is not the same as the rhs jump to setting the
+ bounds and doing the reallocation....... */
+ for (n = 0; n < expr1->rank; n++)
+ {
+ /* Check the shape. */
+ lbound = gfc_conv_descriptor_lbound_get (desc, gfc_rank_cst[n]);
+ ubound = gfc_conv_descriptor_ubound_get (desc, gfc_rank_cst[n]);
+ tmp = fold_build2_loc (input_location, MINUS_EXPR,
+ gfc_array_index_type,
+ loop->to[n], loop->from[n]);
+ tmp = fold_build2_loc (input_location, PLUS_EXPR,
+ gfc_array_index_type,
+ tmp, lbound);
+ tmp = fold_build2_loc (input_location, MINUS_EXPR,
+ gfc_array_index_type,
+ tmp, ubound);
+ cond = fold_build2_loc (input_location, NE_EXPR,
+ boolean_type_node,
+ tmp, gfc_index_zero_node);
+ tmp = build3_v (COND_EXPR, cond,
+ build1_v (GOTO_EXPR, jump_label1),
+ build_empty_stmt (input_location));
+ gfc_add_expr_to_block (&fblock, tmp);
+ }
+
+ /* ....else jump past the (re)alloc code. */
+ tmp = build1_v (GOTO_EXPR, jump_label2);
+ gfc_add_expr_to_block (&fblock, tmp);
+
+ /* Add the label to start automatic (re)allocation. */
+ tmp = build1_v (LABEL_EXPR, jump_label1);
+ gfc_add_expr_to_block (&fblock, tmp);
+
+ size1 = gfc_conv_descriptor_size (desc, expr1->rank);
+
+ /* Get the rhs size. Fix both sizes. */
+ if (expr2)
+ desc2 = rss->data.info.descriptor;
+ else
+ desc2 = NULL_TREE;
+ size2 = gfc_index_one_node;
+ for (n = 0; n < expr2->rank; n++)
+ {
+ tmp = fold_build2_loc (input_location, MINUS_EXPR,
+ gfc_array_index_type,
+ loop->to[n], loop->from[n]);
+ tmp = fold_build2_loc (input_location, PLUS_EXPR,
+ gfc_array_index_type,
+ tmp, gfc_index_one_node);
+ size2 = fold_build2_loc (input_location, MULT_EXPR,
+ gfc_array_index_type,
+ tmp, size2);
+ }
+
+ size1 = gfc_evaluate_now (size1, &fblock);
+ size2 = gfc_evaluate_now (size2, &fblock);
+
+ cond = fold_build2_loc (input_location, NE_EXPR, boolean_type_node,
+ size1, size2);
+ neq_size = gfc_evaluate_now (cond, &fblock);
+
+
+ /* Now modify the lhs descriptor and the associated scalarizer
+ variables. F2003 7.4.1.3: "If variable is or becomes an
+ unallocated allocatable variable, then it is allocated with each
+ deferred type parameter equal to the corresponding type parameters
+ of expr , with the shape of expr , and with each lower bound equal
+ to the corresponding element of LBOUND(expr)."
+ Reuse size1 to keep a dimension-by-dimension track of the
+ stride of the new array. */
+ size1 = gfc_index_one_node;
+ offset = gfc_index_zero_node;
+
+ for (n = 0; n < expr2->rank; n++)
+ {
+ tmp = fold_build2_loc (input_location, MINUS_EXPR,
+ gfc_array_index_type,
+ loop->to[n], loop->from[n]);
+ tmp = fold_build2_loc (input_location, PLUS_EXPR,
+ gfc_array_index_type,
+ tmp, gfc_index_one_node);
+
+ lbound = gfc_index_one_node;
+ ubound = tmp;
+
+ if (as)
+ {
+ lbd = get_std_lbound (expr2, desc2, n,
+ as->type == AS_ASSUMED_SIZE);
+ ubound = fold_build2_loc (input_location,
+ MINUS_EXPR,
+ gfc_array_index_type,
+ ubound, lbound);
+ ubound = fold_build2_loc (input_location,
+ PLUS_EXPR,
+ gfc_array_index_type,
+ ubound, lbd);
+ lbound = lbd;
+ }
+
+ gfc_conv_descriptor_lbound_set (&fblock, desc,
+ gfc_rank_cst[n],
+ lbound);
+ gfc_conv_descriptor_ubound_set (&fblock, desc,
+ gfc_rank_cst[n],
+ ubound);
+ gfc_conv_descriptor_stride_set (&fblock, desc,
+ gfc_rank_cst[n],
+ size1);
+ lbound = gfc_conv_descriptor_lbound_get (desc,
+ gfc_rank_cst[n]);
+ tmp2 = fold_build2_loc (input_location, MULT_EXPR,
+ gfc_array_index_type,
+ lbound, size1);
+ offset = fold_build2_loc (input_location, MINUS_EXPR,
+ gfc_array_index_type,
+ offset, tmp2);
+ size1 = fold_build2_loc (input_location, MULT_EXPR,
+ gfc_array_index_type,
+ tmp, size1);
+ }
+
+ /* Set the lhs descriptor and scalarizer offsets. For rank > 1,
+ the array offset is saved and the info.offset is used for a
+ running offset. Use the saved_offset instead. */
+ tmp = gfc_conv_descriptor_offset (desc);
+ gfc_add_modify (&fblock, tmp, offset);
+ if (lss->data.info.saved_offset
+ && TREE_CODE (lss->data.info.saved_offset) == VAR_DECL)
+ gfc_add_modify (&fblock, lss->data.info.saved_offset, tmp);
+
+ /* Now set the deltas for the lhs. */
+ for (n = 0; n < expr1->rank; n++)
+ {
+ tmp = gfc_conv_descriptor_lbound_get (desc, gfc_rank_cst[n]);
+ dim = lss->data.info.dim[n];
+ tmp = fold_build2_loc (input_location, MINUS_EXPR,
+ gfc_array_index_type, tmp,
+ loop->from[dim]);
+ if (lss->data.info.delta[dim]
+ && TREE_CODE (lss->data.info.delta[dim]) == VAR_DECL)
+ gfc_add_modify (&fblock, lss->data.info.delta[dim], tmp);
+ }
+
+ /* Get the new lhs size in bytes. */
+ if (expr1->ts.type == BT_CHARACTER && expr1->ts.deferred)
+ {
+ tmp = expr2->ts.u.cl->backend_decl;
+ gcc_assert (expr1->ts.u.cl->backend_decl);
+ tmp = fold_convert (TREE_TYPE (expr1->ts.u.cl->backend_decl), tmp);
+ gfc_add_modify (&fblock, expr1->ts.u.cl->backend_decl, tmp);
+ }
+ else if (expr1->ts.type == BT_CHARACTER && expr1->ts.u.cl->backend_decl)
+ {
+ tmp = TYPE_SIZE_UNIT (TREE_TYPE (gfc_typenode_for_spec (&expr1->ts)));
+ tmp = fold_build2_loc (input_location, MULT_EXPR,
+ gfc_array_index_type, tmp,
+ expr1->ts.u.cl->backend_decl);
+ }
+ else
+ tmp = TYPE_SIZE_UNIT (gfc_typenode_for_spec (&expr1->ts));
+ tmp = fold_convert (gfc_array_index_type, tmp);
+ size2 = fold_build2_loc (input_location, MULT_EXPR,
+ gfc_array_index_type,
+ tmp, size2);
+ size2 = fold_convert (size_type_node, size2);
+ size2 = gfc_evaluate_now (size2, &fblock);
+
+ /* Realloc expression. Note that the scalarizer uses desc.data
+ in the array reference - (*desc.data)[<element>]. */
+ gfc_init_block (&realloc_block);
+ tmp = build_call_expr_loc (input_location,
+ builtin_decl_explicit (BUILT_IN_REALLOC), 2,
+ fold_convert (pvoid_type_node, array1),
+ size2);
+ gfc_conv_descriptor_data_set (&realloc_block,
+ desc, tmp);
+ realloc_expr = gfc_finish_block (&realloc_block);
+
+ /* Only reallocate if sizes are different. */
+ tmp = build3_v (COND_EXPR, neq_size, realloc_expr,
+ build_empty_stmt (input_location));
+ realloc_expr = tmp;
+
+
+ /* Malloc expression. */
+ gfc_init_block (&alloc_block);
+ tmp = build_call_expr_loc (input_location,
+ builtin_decl_explicit (BUILT_IN_MALLOC),
+ 1, size2);
+ gfc_conv_descriptor_data_set (&alloc_block,
+ desc, tmp);
+ tmp = gfc_conv_descriptor_dtype (desc);
+ gfc_add_modify (&alloc_block, tmp, gfc_get_dtype (TREE_TYPE (desc)));
+ alloc_expr = gfc_finish_block (&alloc_block);
+
+ /* Malloc if not allocated; realloc otherwise. */
+ tmp = build_int_cst (TREE_TYPE (array1), 0);
+ cond = fold_build2_loc (input_location, EQ_EXPR,
+ boolean_type_node,
+ array1, tmp);
+ tmp = build3_v (COND_EXPR, cond, alloc_expr, realloc_expr);
+ gfc_add_expr_to_block (&fblock, tmp);
+
+ /* Make sure that the scalarizer data pointer is updated. */
+ if (lss->data.info.data
+ && TREE_CODE (lss->data.info.data) == VAR_DECL)
+ {
+ tmp = gfc_conv_descriptor_data_get (desc);
+ gfc_add_modify (&fblock, lss->data.info.data, tmp);
+ }
+
+ /* Add the exit label. */
+ tmp = build1_v (LABEL_EXPR, jump_label2);
+ gfc_add_expr_to_block (&fblock, tmp);
+
+ return gfc_finish_block (&fblock);
+}
+
+
/* NULLIFY an allocatable/pointer array on function entry, free it on exit.
Do likewise, recursively if necessary, with the allocatable components of
derived types. */
int rank;
bool sym_has_alloc_comp;
- sym_has_alloc_comp = (sym->ts.type == BT_DERIVED)
+ sym_has_alloc_comp = (sym->ts.type == BT_DERIVED
+ || sym->ts.type == BT_CLASS)
&& sym->ts.u.derived->attr.alloc_comp;
/* Make sure the frontend gets these right. */
if (!(sym->attr.pointer || sym->attr.allocatable || sym_has_alloc_comp))
- fatal_error ("Possible frontend bug: Deferred array size without pointer, "
+ fatal_error ("Possible front-end bug: Deferred array size without pointer, "
"allocatable attribute or derived type without allocatable "
"components.");
+ gfc_save_backend_locus (&loc);
+ gfc_set_backend_locus (&sym->declared_at);
gfc_init_block (&init);
gcc_assert (TREE_CODE (sym->backend_decl) == VAR_DECL
if (sym->attr.dummy || sym->attr.use_assoc || sym->attr.result)
{
gfc_add_init_cleanup (block, gfc_finish_block (&init), NULL_TREE);
+ gfc_restore_backend_locus (&loc);
return;
}
- gfc_get_backend_locus (&loc);
- gfc_set_backend_locus (&sym->declared_at);
descriptor = sym->backend_decl;
/* Although static, derived types with default initializers and
gfc_trans_static_array_pointer (sym);
gfc_add_init_cleanup (block, gfc_finish_block (&init), NULL_TREE);
+ gfc_restore_backend_locus (&loc);
return;
}
if (GFC_DESCRIPTOR_TYPE_P (type) && !sym->attr.save)
gfc_conv_descriptor_data_set (&init, descriptor, null_pointer_node);
+ gfc_restore_backend_locus (&loc);
gfc_init_block (&cleanup);
- gfc_set_backend_locus (&loc);
/* Allocatable arrays need to be freed when they go out of scope.
The allocatable components of pointers must not be touched. */
gfc_add_expr_to_block (&cleanup, tmp);
}
- if (sym->attr.allocatable && sym->attr.dimension
+ if (sym->attr.allocatable && (sym->attr.dimension || sym->attr.codimension)
&& !sym->attr.save && !sym->attr.result)
{
tmp = gfc_trans_dealloc_allocated (sym->backend_decl);
gfc_walk_variable_expr (gfc_ss * ss, gfc_expr * expr)
{
gfc_ref *ref;
- gfc_array_ref *ar;
- gfc_ss *newss;
- int n;
for (ref = expr->ref; ref; ref = ref->next)
if (ref->type == REF_ARRAY && ref->u.ar.type != AR_ELEMENT)
break;
+ return gfc_walk_array_ref (ss, expr, ref);
+}
+
+
+gfc_ss *
+gfc_walk_array_ref (gfc_ss * ss, gfc_expr * expr, gfc_ref * ref)
+{
+ gfc_array_ref *ar;
+ gfc_ss *newss;
+ int n;
+
for (; ref; ref = ref->next)
{
if (ref->type == REF_SUBSTRING)
{
- newss = gfc_get_ss ();
- newss->type = GFC_SS_SCALAR;
- newss->expr = ref->u.ss.start;
- newss->next = ss;
- ss = newss;
-
- newss = gfc_get_ss ();
- newss->type = GFC_SS_SCALAR;
- newss->expr = ref->u.ss.end;
- newss->next = ss;
- ss = newss;
+ ss = gfc_get_scalar_ss (ss, ref->u.ss.start);
+ ss = gfc_get_scalar_ss (ss, ref->u.ss.end);
}
/* We're only interested in array sections from now on. */
ar = &ref->u.ar;
- if (ar->as->rank == 0)
- {
- /* Scalar coarray. */
- continue;
- }
-
switch (ar->type)
{
case AR_ELEMENT:
- for (n = 0; n < ar->dimen; n++)
- {
- newss = gfc_get_ss ();
- newss->type = GFC_SS_SCALAR;
- newss->expr = ar->start[n];
- newss->next = ss;
- ss = newss;
- }
+ for (n = ar->dimen + ar->codimen - 1; n >= 0; n--)
+ ss = gfc_get_scalar_ss (ss, ar->start[n]);
break;
case AR_FULL:
- newss = gfc_get_ss ();
- newss->type = GFC_SS_SECTION;
- newss->expr = expr;
- newss->next = ss;
- newss->data.info.dimen = ar->as->rank;
+ newss = gfc_get_array_ss (ss, expr, ar->as->rank, GFC_SS_SECTION);
newss->data.info.ref = ref;
/* Make sure array is the same as array(:,:), this way
ar->dimen = ar->as->rank;
for (n = 0; n < ar->dimen; n++)
{
- newss->data.info.dim[n] = n;
ar->dimen_type[n] = DIMEN_RANGE;
gcc_assert (ar->start[n] == NULL);
break;
case AR_SECTION:
- newss = gfc_get_ss ();
- newss->type = GFC_SS_SECTION;
- newss->expr = expr;
- newss->next = ss;
- newss->data.info.dimen = 0;
+ newss = gfc_get_array_ss (ss, expr, 0, GFC_SS_SECTION);
newss->data.info.ref = ref;
- /* We add SS chains for all the subscripts in the section. */
+ /* We add SS chains for all the subscripts in the section. */
for (n = 0; n < ar->dimen; n++)
{
gfc_ss *indexss;
case DIMEN_ELEMENT:
/* Add SS for elemental (scalar) subscripts. */
gcc_assert (ar->start[n]);
- indexss = gfc_get_ss ();
- indexss->type = GFC_SS_SCALAR;
- indexss->expr = ar->start[n];
- indexss->next = gfc_ss_terminator;
+ indexss = gfc_get_scalar_ss (gfc_ss_terminator, ar->start[n]);
indexss->loop_chain = gfc_ss_terminator;
newss->data.info.subscript[n] = indexss;
break;
case DIMEN_VECTOR:
/* Create a GFC_SS_VECTOR index in which we can store
the vector's descriptor. */
- indexss = gfc_get_ss ();
- indexss->type = GFC_SS_VECTOR;
- indexss->expr = ar->start[n];
- indexss->next = gfc_ss_terminator;
+ indexss = gfc_get_array_ss (gfc_ss_terminator, ar->start[n],
+ 1, GFC_SS_VECTOR);
indexss->loop_chain = gfc_ss_terminator;
newss->data.info.subscript[n] = indexss;
newss->data.info.dim[newss->data.info.dimen] = n;
gcc_unreachable ();
}
}
- /* We should have at least one non-elemental dimension. */
- gcc_assert (newss->data.info.dimen > 0);
+ /* We should have at least one non-elemental dimension,
+ unless we are creating a descriptor for a (scalar) coarray. */
+ gcc_assert (newss->data.info.dimen > 0
+ || newss->data.info.ref->u.ar.as->corank > 0);
ss = newss;
break;
{
gfc_ss *head;
gfc_ss *head2;
- gfc_ss *newss;
head = gfc_walk_subexpr (ss, expr->value.op.op1);
if (expr->value.op.op2 == NULL)
/* One of the operands needs scalarization, the other is scalar.
Create a gfc_ss for the scalar expression. */
- newss = gfc_get_ss ();
- newss->type = GFC_SS_SCALAR;
if (head == ss)
{
/* First operand is scalar. We build the chain in reverse order, so
head = head->next;
/* Check we haven't somehow broken the chain. */
gcc_assert (head);
- newss->next = ss;
- head->next = newss;
- newss->expr = expr->value.op.op1;
+ head->next = gfc_get_scalar_ss (ss, expr->value.op.op1);
}
else /* head2 == head */
{
gcc_assert (head2 == head);
/* Second operand is scalar. */
- newss->next = head2;
- head2 = newss;
- newss->expr = expr->value.op.op2;
+ head2 = gfc_get_scalar_ss (head2, expr->value.op.op2);
}
return head2;
if (newss == head)
{
/* Scalar argument. */
- newss = gfc_get_ss ();
+ gcc_assert (type == GFC_SS_SCALAR || type == GFC_SS_REFERENCE);
+ newss = gfc_get_scalar_ss (head, arg->expr);
newss->type = type;
- newss->expr = arg->expr;
- newss->next = head;
}
else
scalar = 0;
static gfc_ss *
gfc_walk_function_expr (gfc_ss * ss, gfc_expr * expr)
{
- gfc_ss *newss;
gfc_intrinsic_sym *isym;
gfc_symbol *sym;
gfc_component *comp = NULL;
- int n;
isym = expr->value.function.isym;
gfc_is_proc_ptr_comp (expr, &comp);
if ((!comp && gfc_return_by_reference (sym) && sym->result->attr.dimension)
|| (comp && comp->attr.dimension))
- {
- newss = gfc_get_ss ();
- newss->type = GFC_SS_FUNCTION;
- newss->expr = expr;
- newss->next = ss;
- newss->data.info.dimen = expr->rank;
- for (n = 0; n < newss->data.info.dimen; n++)
- newss->data.info.dim[n] = n;
- return newss;
- }
+ return gfc_get_array_ss (ss, expr, expr->rank, GFC_SS_FUNCTION);
/* Walk the parameters of an elemental function. For now we always pass
by reference. */
static gfc_ss *
gfc_walk_array_constructor (gfc_ss * ss, gfc_expr * expr)
{
- gfc_ss *newss;
- int n;
-
- newss = gfc_get_ss ();
- newss->type = GFC_SS_CONSTRUCTOR;
- newss->expr = expr;
- newss->next = ss;
- newss->data.info.dimen = expr->rank;
- for (n = 0; n < expr->rank; n++)
- newss->data.info.dim[n] = n;
-
- return newss;
+ return gfc_get_array_ss (ss, expr, expr->rank, GFC_SS_CONSTRUCTOR);
}
/* Walk an expression. Add walked expressions to the head of the SS chain.
A wholly scalar expression will not be added. */
-static gfc_ss *
+gfc_ss *
gfc_walk_subexpr (gfc_ss * ss, gfc_expr * expr)
{
gfc_ss *head;