/* Array translation routines
- Copyright (C) 2002, 2003, 2004, 2005 Free Software Foundation, Inc.
+ Copyright (C) 2002, 2003, 2004, 2005, 2006, 2007
+ Free Software Foundation, Inc.
Contributed by Paul Brook <paul@nowt.org>
and Steven Bosscher <s.bosscher@student.tudelft.nl>
return t;
}
-/* This provides WRITE access to the data field. */
+/* This provides WRITE access to the data field.
+
+ TUPLES_P is true if we are generating tuples.
+
+ This function gets called through the following macros:
+ gfc_conv_descriptor_data_set
+ gfc_conv_descriptor_data_set_tuples. */
void
-gfc_conv_descriptor_data_set (stmtblock_t *block, tree desc, tree value)
+gfc_conv_descriptor_data_set_internal (stmtblock_t *block,
+ tree desc, tree value,
+ bool tuples_p)
{
tree field, type, t;
gcc_assert (DATA_FIELD == 0);
t = build3 (COMPONENT_REF, TREE_TYPE (field), desc, field, NULL_TREE);
- gfc_add_modify_expr (block, t, fold_convert (TREE_TYPE (field), value));
+ gfc_add_modify (block, t, fold_convert (TREE_TYPE (field), value), tuples_p);
}
gcc_assert (DATA_FIELD == 0);
t = build3 (COMPONENT_REF, TREE_TYPE (field), desc, field, NULL_TREE);
- return gfc_build_addr_expr (NULL, t);
+ return build_fold_addr_expr (t);
}
tree
/* Generate code to allocate an array temporary, or create a variable to
- hold the data. If size is NULL zero the descriptor so that so that the
- callee will allocate the array. Also generates code to free the array
- afterwards.
+ hold the data. If size is NULL, zero the descriptor so that the
+ callee will allocate the array. If DEALLOC is true, also generate code to
+ free the array afterwards.
Initialization code is added to PRE and finalization code to POST.
DYNAMIC is true if the caller may want to extend the array later
static void
gfc_trans_allocate_array_storage (stmtblock_t * pre, stmtblock_t * post,
- gfc_ss_info * info, tree size, tree nelem,
- bool dynamic)
+ gfc_ss_info * info, tree size, tree nelem,
+ bool dynamic, bool dealloc)
{
tree tmp;
tree args;
{
/* Make a temporary variable to hold the data. */
tmp = fold_build2 (MINUS_EXPR, TREE_TYPE (nelem), nelem,
- integer_one_node);
+ gfc_index_one_node);
tmp = build_range_type (gfc_array_index_type, gfc_index_zero_node,
tmp);
tmp = build_array_type (gfc_get_element_type (TREE_TYPE (desc)),
tmp);
tmp = gfc_create_var (tmp, "A");
- tmp = gfc_build_addr_expr (NULL, tmp);
+ tmp = build_fold_addr_expr (tmp);
gfc_conv_descriptor_data_set (pre, desc, tmp);
}
else
tmp = gfor_fndecl_internal_malloc64;
else
gcc_unreachable ();
- tmp = gfc_build_function_call (tmp, args);
+ tmp = build_function_call_expr (tmp, args);
tmp = gfc_evaluate_now (tmp, pre);
gfc_conv_descriptor_data_set (pre, desc, tmp);
}
tmp = gfc_conv_descriptor_offset (desc);
gfc_add_modify_expr (pre, tmp, gfc_index_zero_node);
- if (!onstack)
+ if (dealloc && !onstack)
{
/* Free the temporary. */
tmp = gfc_conv_descriptor_data_get (desc);
tmp = fold_convert (pvoid_type_node, tmp);
tmp = gfc_chainon_list (NULL_TREE, tmp);
- tmp = gfc_build_function_call (gfor_fndecl_internal_free, tmp);
+ tmp = build_function_call_expr (gfor_fndecl_internal_free, tmp);
gfc_add_expr_to_block (post, tmp);
}
}
-/* Generate code to allocate and initialize the descriptor for a temporary
+/* Generate code to create and initialize the descriptor for a temporary
array. This is used for both temporaries needed by the scalarizer, and
- functions returning arrays. Adjusts the loop variables to be zero-based,
- and calculates the loop bounds for callee allocated arrays.
- Also fills in the descriptor, data and offset fields of info if known.
- Returns the size of the array, or NULL for a callee allocated array.
+ functions returning arrays. Adjusts the loop variables to be
+ zero-based, and calculates the loop bounds for callee allocated arrays.
+ Allocate the array unless it's callee allocated (we have a callee
+ allocated array if 'callee_alloc' is true, or if loop->to[n] is
+ NULL_TREE for any n). Also fills in the descriptor, data and offset
+ fields of info if known. Returns the size of the array, or NULL for a
+ callee allocated array.
- PRE, POST and DYNAMIC are as for gfc_trans_allocate_array_storage. */
+ PRE, POST, DYNAMIC and DEALLOC are as for gfc_trans_allocate_array_storage.
+ */
tree
-gfc_trans_allocate_temp_array (stmtblock_t * pre, stmtblock_t * post,
- gfc_loopinfo * loop, gfc_ss_info * info,
- tree eltype, bool dynamic)
+gfc_trans_create_temp_array (stmtblock_t * pre, stmtblock_t * post,
+ gfc_loopinfo * loop, gfc_ss_info * info,
+ tree eltype, bool dynamic, bool dealloc,
+ bool callee_alloc, bool function)
{
tree type;
tree desc;
tree tmp;
tree size;
tree nelem;
+ tree cond;
+ tree or_expr;
+ tree thencase;
+ tree elsecase;
+ tree var;
+ stmtblock_t thenblock;
+ stmtblock_t elseblock;
int n;
int dim;
info->delta[dim] = gfc_index_zero_node;
info->start[dim] = gfc_index_zero_node;
+ info->end[dim] = gfc_index_zero_node;
info->stride[dim] = gfc_index_one_node;
info->dim[dim] = dim;
}
size = size * sizeof(element);
*/
+ or_expr = NULL_TREE;
+
for (n = 0; n < info->dimen; n++)
{
if (loop->to[n] == NULL_TREE)
tmp = fold_build2 (PLUS_EXPR, gfc_array_index_type,
loop->to[n], gfc_index_one_node);
+ if (function)
+ {
+ /* Check whether the size for this dimension is negative. */
+ cond = fold_build2 (LE_EXPR, boolean_type_node, tmp,
+ gfc_index_zero_node);
+
+ cond = gfc_evaluate_now (cond, pre);
+
+ if (n == 0)
+ or_expr = cond;
+ else
+ or_expr = fold_build2 (TRUTH_OR_EXPR, boolean_type_node, or_expr, cond);
+ }
size = fold_build2 (MULT_EXPR, gfc_array_index_type, size, tmp);
size = gfc_evaluate_now (size, pre);
}
/* Get the size of the array. */
- nelem = size;
- if (size)
- size = fold_build2 (MULT_EXPR, gfc_array_index_type, size,
- TYPE_SIZE_UNIT (gfc_get_element_type (type)));
- gfc_trans_allocate_array_storage (pre, post, info, size, nelem, dynamic);
+ if (size && !callee_alloc)
+ {
+ if (function)
+ {
+ /* If we know at compile-time whether any dimension size is
+ negative, we can avoid a conditional and pass the true size
+ to gfc_trans_allocate_array_storage, which can then decide
+ whether to allocate this on the heap or on the stack. */
+ if (integer_zerop (or_expr))
+ ;
+ else if (integer_onep (or_expr))
+ size = gfc_index_zero_node;
+ else
+ {
+ var = gfc_create_var (TREE_TYPE (size), "size");
+ gfc_start_block (&thenblock);
+ gfc_add_modify_expr (&thenblock, var, gfc_index_zero_node);
+ thencase = gfc_finish_block (&thenblock);
+
+ gfc_start_block (&elseblock);
+ gfc_add_modify_expr (&elseblock, var, size);
+ elsecase = gfc_finish_block (&elseblock);
+
+ tmp = gfc_evaluate_now (or_expr, pre);
+ tmp = build3_v (COND_EXPR, tmp, thencase, elsecase);
+ gfc_add_expr_to_block (pre, tmp);
+ size = var;
+ }
+ }
+
+ nelem = size;
+ size = fold_build2 (MULT_EXPR, gfc_array_index_type, size,
+ TYPE_SIZE_UNIT (gfc_get_element_type (type)));
+ }
+ else
+ {
+ nelem = size;
+ size = NULL_TREE;
+ }
+
+ gfc_trans_allocate_array_storage (pre, post, info, size, nelem, dynamic,
+ dealloc);
if (info->dimen > loop->temp_dim)
loop->temp_dim = info->dimen;
}
+/* Generate code to transpose array EXPR by creating a new descriptor
+ in which the dimension specifications have been reversed. */
+
+void
+gfc_conv_array_transpose (gfc_se * se, gfc_expr * expr)
+{
+ tree dest, src, dest_index, src_index;
+ gfc_loopinfo *loop;
+ gfc_ss_info *dest_info, *src_info;
+ gfc_ss *dest_ss, *src_ss;
+ gfc_se src_se;
+ int n;
+
+ loop = se->loop;
+
+ src_ss = gfc_walk_expr (expr);
+ dest_ss = se->ss;
+
+ src_info = &src_ss->data.info;
+ dest_info = &dest_ss->data.info;
+ gcc_assert (dest_info->dimen == 2);
+ gcc_assert (src_info->dimen == 2);
+
+ /* Get a descriptor for EXPR. */
+ gfc_init_se (&src_se, NULL);
+ gfc_conv_expr_descriptor (&src_se, expr, src_ss);
+ gfc_add_block_to_block (&se->pre, &src_se.pre);
+ gfc_add_block_to_block (&se->post, &src_se.post);
+ src = src_se.expr;
+
+ /* Allocate a new descriptor for the return value. */
+ dest = gfc_create_var (TREE_TYPE (src), "atmp");
+ dest_info->descriptor = dest;
+ se->expr = dest;
+
+ /* Copy across the dtype field. */
+ gfc_add_modify_expr (&se->pre,
+ gfc_conv_descriptor_dtype (dest),
+ gfc_conv_descriptor_dtype (src));
+
+ /* Copy the dimension information, renumbering dimension 1 to 0 and
+ 0 to 1. */
+ for (n = 0; n < 2; n++)
+ {
+ dest_info->delta[n] = gfc_index_zero_node;
+ dest_info->start[n] = gfc_index_zero_node;
+ dest_info->end[n] = gfc_index_zero_node;
+ dest_info->stride[n] = gfc_index_one_node;
+ dest_info->dim[n] = n;
+
+ dest_index = gfc_rank_cst[n];
+ src_index = gfc_rank_cst[1 - n];
+
+ gfc_add_modify_expr (&se->pre,
+ gfc_conv_descriptor_stride (dest, dest_index),
+ gfc_conv_descriptor_stride (src, src_index));
+
+ gfc_add_modify_expr (&se->pre,
+ gfc_conv_descriptor_lbound (dest, dest_index),
+ gfc_conv_descriptor_lbound (src, src_index));
+
+ gfc_add_modify_expr (&se->pre,
+ gfc_conv_descriptor_ubound (dest, dest_index),
+ gfc_conv_descriptor_ubound (src, src_index));
+
+ if (!loop->to[n])
+ {
+ gcc_assert (integer_zerop (loop->from[n]));
+ loop->to[n] = build2 (MINUS_EXPR, gfc_array_index_type,
+ gfc_conv_descriptor_ubound (dest, dest_index),
+ gfc_conv_descriptor_lbound (dest, dest_index));
+ }
+ }
+
+ /* Copy the data pointer. */
+ dest_info->data = gfc_conv_descriptor_data_get (src);
+ gfc_conv_descriptor_data_set (&se->pre, dest, dest_info->data);
+
+ /* Copy the offset. This is not changed by transposition: the top-left
+ element is still at the same offset as before. */
+ dest_info->offset = gfc_conv_descriptor_offset (src);
+ gfc_add_modify_expr (&se->pre,
+ gfc_conv_descriptor_offset (dest),
+ dest_info->offset);
+
+ if (dest_info->dimen > loop->temp_dim)
+ loop->temp_dim = dest_info->dimen;
+}
+
+
/* Return the number of iterations in a loop that starts at START,
ends at END, and has step STEP. */
gcc_unreachable ();
/* Set the new data pointer. */
- tmp = gfc_build_function_call (tmp, args);
+ tmp = build_function_call_expr (tmp, args);
gfc_conv_descriptor_data_set (pblock, desc, tmp);
}
gfc_conv_expr (se, expr);
/* Store the value. */
- tmp = gfc_build_indirect_ref (gfc_conv_descriptor_data_get (desc));
+ tmp = build_fold_indirect_ref (gfc_conv_descriptor_data_get (desc));
tmp = gfc_build_array_ref (tmp, offset);
if (expr->ts.type == BT_CHARACTER)
{
args = gfc_chainon_list (args, se->expr);
args = gfc_chainon_list (args, se->string_length);
tmp = built_in_decls[BUILT_IN_MEMCPY];
- tmp = gfc_build_function_call (tmp, args);
+ tmp = build_function_call_expr (tmp, args);
gfc_add_expr_to_block (&se->pre, tmp);
}
}
gfc_copy_loopinfo_to_se (&se, &loop);
se.ss = ss;
- if (expr->ts.type == BT_CHARACTER)
- gfc_todo_error ("character arrays in constructors");
-
gfc_trans_array_ctor_element (&body, desc, *poffset, &se, expr);
gcc_assert (se.ss == gfc_ss_terminator);
TREE_STATIC (tmp) = 1;
TREE_CONSTANT (tmp) = 1;
TREE_INVARIANT (tmp) = 1;
+ TREE_READONLY (tmp) = 1;
DECL_INITIAL (tmp) = init;
init = tmp;
/* Use BUILTIN_MEMCPY to assign the values. */
tmp = gfc_conv_descriptor_data_get (desc);
- tmp = gfc_build_indirect_ref (tmp);
+ tmp = build_fold_indirect_ref (tmp);
tmp = gfc_build_array_ref (tmp, *poffset);
- tmp = gfc_build_addr_expr (NULL, tmp);
- init = gfc_build_addr_expr (NULL, init);
+ tmp = build_fold_addr_expr (tmp);
+ init = build_fold_addr_expr (init);
size = TREE_INT_CST_LOW (TYPE_SIZE_UNIT (type));
bound = build_int_cst (NULL_TREE, n * size);
tmp = gfc_chainon_list (NULL_TREE, tmp);
tmp = gfc_chainon_list (tmp, init);
tmp = gfc_chainon_list (tmp, bound);
- tmp = gfc_build_function_call (built_in_decls[BUILT_IN_MEMCPY],
+ tmp = build_function_call_expr (built_in_decls[BUILT_IN_MEMCPY],
tmp);
gfc_add_expr_to_block (&body, tmp);
tree exit_label;
tree loopbody;
tree tmp2;
+ tree tmp_loopvar;
loopbody = gfc_finish_block (&body);
gfc_add_block_to_block (pblock, &se.pre);
loopvar = se.expr;
+ /* Make a temporary, store the current value in that
+ and return it, once the loop is done. */
+ tmp_loopvar = gfc_create_var (TREE_TYPE (loopvar), "loopvar");
+ gfc_add_modify_expr (pblock, tmp_loopvar, loopvar);
+
/* Initialize the loop. */
gfc_init_se (&se, NULL);
gfc_conv_expr_val (&se, c->iterator->start);
/* Add the exit label. */
tmp = build1_v (LABEL_EXPR, exit_label);
gfc_add_expr_to_block (pblock, tmp);
+
+ /* Restore the original value of the loop counter. */
+ gfc_add_modify_expr (pblock, loopvar, tmp_loopvar);
}
}
mpz_clear (size);
{
gfc_ref *ref;
gfc_typespec *ts;
+ mpz_t char_len;
/* Don't bother if we already know the length is a constant. */
if (*len && INTEGER_CST_P (*len))
/* Array references don't change the string length. */
break;
- case COMPONENT_REF:
+ case REF_COMPONENT:
/* Use the length of the component. */
ts = &ref->u.c.component->ts;
break;
+ case REF_SUBSTRING:
+ if (ref->u.ss.start->expr_type != EXPR_CONSTANT
+ || ref->u.ss.start->expr_type != EXPR_CONSTANT)
+ break;
+ mpz_init_set_ui (char_len, 1);
+ mpz_add (char_len, char_len, ref->u.ss.end->value.integer);
+ mpz_sub (char_len, char_len, ref->u.ss.start->value.integer);
+ *len = gfc_conv_mpz_to_tree (char_len,
+ gfc_default_character_kind);
+ *len = convert (gfc_charlen_type_node, *len);
+ mpz_clear (char_len);
+ return;
+
default:
/* TODO: Substrings are tricky because we can't evaluate the
expression more than once. For now we just give up, and hope
/* Figure out the string length of a character array constructor.
Returns TRUE if all elements are character constants. */
-static bool
+bool
get_array_ctor_strlen (gfc_constructor * c, tree * len)
{
bool is_const;
case EXPR_ARRAY:
if (!get_array_ctor_strlen (c->expr->value.constructor, len))
- is_const = FALSE;
+ is_const = false;
break;
case EXPR_VARIABLE:
break;
default:
- is_const = FALSE;
+ is_const = false;
+
+ /* Hope that whatever we have possesses a constant character
+ length! */
+ if (!(*len && INTEGER_CST_P (*len)) && c->expr->ts.cl)
+ {
+ gfc_conv_const_charlen (c->expr->ts.cl);
+ *len = c->expr->ts.cl->backend_decl;
+ }
/* TODO: For now we just ignore anything we don't know how to
handle, and hope we can figure it out a different way. */
break;
return is_const;
}
+/* Check whether the array constructor C consists entirely of constant
+ elements, and if so returns the number of those elements, otherwise
+ return zero. Note, an empty or NULL array constructor returns zero. */
+
+unsigned HOST_WIDE_INT
+gfc_constant_array_constructor_p (gfc_constructor * c)
+{
+ unsigned HOST_WIDE_INT nelem = 0;
+
+ while (c)
+ {
+ if (c->iterator
+ || c->expr->rank > 0
+ || c->expr->expr_type != EXPR_CONSTANT)
+ return 0;
+ c = c->next;
+ nelem++;
+ }
+ return nelem;
+}
+
+
+/* Given EXPR, the constant array constructor specified by an EXPR_ARRAY,
+ and the tree type of it's elements, TYPE, return a static constant
+ variable that is compile-time initialized. */
+
+tree
+gfc_build_constant_array_constructor (gfc_expr * expr, tree type)
+{
+ tree tmptype, list, init, tmp;
+ HOST_WIDE_INT nelem;
+ gfc_constructor *c;
+ gfc_array_spec as;
+ gfc_se se;
+
+
+ /* First traverse the constructor list, converting the constants
+ to tree to build an initializer. */
+ nelem = 0;
+ list = NULL_TREE;
+ c = expr->value.constructor;
+ while (c)
+ {
+ gfc_init_se (&se, NULL);
+ gfc_conv_constant (&se, c->expr);
+ if (c->expr->ts.type == BT_CHARACTER
+ && POINTER_TYPE_P (type))
+ se.expr = gfc_build_addr_expr (pchar_type_node, se.expr);
+ list = tree_cons (NULL_TREE, se.expr, list);
+ c = c->next;
+ nelem++;
+ }
+
+ /* Next determine the tree type for the array. We use the gfortran
+ front-end's gfc_get_nodesc_array_type in order to create a suitable
+ GFC_ARRAY_TYPE_P that may be used by the scalarizer. */
+
+ memset (&as, 0, sizeof (gfc_array_spec));
+
+ as.rank = 1;
+ as.type = AS_EXPLICIT;
+ as.lower[0] = gfc_int_expr (0);
+ as.upper[0] = gfc_int_expr (nelem - 1);
+ tmptype = gfc_get_nodesc_array_type (type, &as, 3);
+
+ init = build_constructor_from_list (tmptype, nreverse (list));
+
+ TREE_CONSTANT (init) = 1;
+ TREE_INVARIANT (init) = 1;
+ TREE_STATIC (init) = 1;
+
+ tmp = gfc_create_var (tmptype, "A");
+ TREE_STATIC (tmp) = 1;
+ TREE_CONSTANT (tmp) = 1;
+ TREE_INVARIANT (tmp) = 1;
+ TREE_READONLY (tmp) = 1;
+ DECL_INITIAL (tmp) = init;
+
+ return tmp;
+}
+
+
+/* Translate a constant EXPR_ARRAY array constructor for the scalarizer.
+ This mostly initializes the scalarizer state info structure with the
+ appropriate values to directly use the array created by the function
+ gfc_build_constant_array_constructor. */
+
+static void
+gfc_trans_constant_array_constructor (gfc_loopinfo * loop,
+ gfc_ss * ss, tree type)
+{
+ gfc_ss_info *info;
+ tree tmp;
+
+ tmp = gfc_build_constant_array_constructor (ss->expr, type);
+
+ info = &ss->data.info;
+
+ info->descriptor = tmp;
+ info->data = build_fold_addr_expr (tmp);
+ info->offset = fold_build1 (NEGATE_EXPR, gfc_array_index_type,
+ loop->from[0]);
+
+ info->delta[0] = gfc_index_zero_node;
+ info->start[0] = gfc_index_zero_node;
+ info->end[0] = gfc_index_zero_node;
+ info->stride[0] = gfc_index_one_node;
+ info->dim[0] = 0;
+
+ if (info->dimen > loop->temp_dim)
+ loop->temp_dim = info->dimen;
+}
+
/* Array constructors are handled by constructing a temporary, then using that
within the scalarization loop. This is not optimal, but seems by far the
tree offsetvar;
tree desc;
tree type;
- bool const_string;
bool dynamic;
ss->data.info.dimen = loop->dimen;
c = ss->expr->value.constructor;
if (ss->expr->ts.type == BT_CHARACTER)
{
- const_string = get_array_ctor_strlen (c, &ss->string_length);
+ bool const_string = get_array_ctor_strlen (c, &ss->string_length);
if (!ss->string_length)
gfc_todo_error ("complex character array constructors");
type = build_pointer_type (type);
}
else
- {
- const_string = TRUE;
- type = gfc_typenode_for_spec (&ss->expr->ts);
- }
+ type = gfc_typenode_for_spec (&ss->expr->ts);
/* See if the constructor determines the loop bounds. */
dynamic = false;
mpz_clear (size);
}
- gfc_trans_allocate_temp_array (&loop->pre, &loop->post, loop,
- &ss->data.info, type, dynamic);
+ /* Special case constant array constructors. */
+ if (!dynamic
+ && loop->dimen == 1
+ && INTEGER_CST_P (loop->from[0])
+ && INTEGER_CST_P (loop->to[0]))
+ {
+ unsigned HOST_WIDE_INT nelem = gfc_constant_array_constructor_p (c);
+ if (nelem > 0)
+ {
+ tree diff = fold_build2 (MINUS_EXPR, gfc_array_index_type,
+ loop->to[0], loop->from[0]);
+ if (compare_tree_int (diff, nelem - 1) == 0)
+ {
+ gfc_trans_constant_array_constructor (loop, ss, type);
+ return;
+ }
+ }
+ }
+
+ gfc_trans_create_temp_array (&loop->pre, &loop->post, loop, &ss->data.info,
+ type, dynamic, true, false, false);
desc = ss->data.info.descriptor;
offset = gfc_index_zero_node;
else
{
/* Descriptorless arrays. */
- return gfc_build_addr_expr (NULL, descriptor);
+ return build_fold_addr_expr (descriptor);
}
}
else
/* Generate code to perform an array index bound check. */
static tree
-gfc_trans_array_bound_check (gfc_se * se, tree descriptor, tree index, int n)
+gfc_trans_array_bound_check (gfc_se * se, tree descriptor, tree index, int n,
+ locus * where)
{
- tree cond;
tree fault;
tree tmp;
+ char *msg;
+ const char * name = NULL;
if (!flag_bounds_check)
return index;
index = gfc_evaluate_now (index, &se->pre);
+
+ /* We find a name for the error message. */
+ if (se->ss)
+ name = se->ss->expr->symtree->name;
+
+ if (!name && se->loop && se->loop->ss && se->loop->ss->expr
+ && se->loop->ss->expr->symtree)
+ name = se->loop->ss->expr->symtree->name;
+
+ if (!name && se->loop && se->loop->ss && se->loop->ss->loop_chain
+ && se->loop->ss->loop_chain->expr
+ && se->loop->ss->loop_chain->expr->symtree)
+ name = se->loop->ss->loop_chain->expr->symtree->name;
+
+ if (!name && se->loop && se->loop->ss && se->loop->ss->loop_chain
+ && se->loop->ss->loop_chain->expr->symtree)
+ name = se->loop->ss->loop_chain->expr->symtree->name;
+
+ if (!name && se->loop && se->loop->ss && se->loop->ss->expr)
+ {
+ if (se->loop->ss->expr->expr_type == EXPR_FUNCTION
+ && se->loop->ss->expr->value.function.name)
+ name = se->loop->ss->expr->value.function.name;
+ else
+ if (se->loop->ss->type == GFC_SS_CONSTRUCTOR
+ || se->loop->ss->type == GFC_SS_SCALAR)
+ name = "unnamed constant";
+ }
+
/* Check lower bound. */
tmp = gfc_conv_array_lbound (descriptor, n);
fault = fold_build2 (LT_EXPR, boolean_type_node, index, tmp);
+ if (name)
+ asprintf (&msg, "%s for array '%s', lower bound of dimension %d exceeded",
+ gfc_msg_fault, name, n+1);
+ else
+ asprintf (&msg, "%s, lower bound of dimension %d exceeded",
+ gfc_msg_fault, n+1);
+ gfc_trans_runtime_check (fault, msg, &se->pre, where);
+ gfc_free (msg);
+
/* Check upper bound. */
tmp = gfc_conv_array_ubound (descriptor, n);
- cond = fold_build2 (GT_EXPR, boolean_type_node, index, tmp);
- fault = fold_build2 (TRUTH_OR_EXPR, boolean_type_node, fault, cond);
-
- gfc_trans_runtime_check (fault, gfc_strconst_fault, &se->pre);
+ fault = fold_build2 (GT_EXPR, boolean_type_node, index, tmp);
+ if (name)
+ asprintf (&msg, "%s for array '%s', upper bound of dimension %d exceeded",
+ gfc_msg_fault, name, n+1);
+ else
+ asprintf (&msg, "%s, upper bound of dimension %d exceeded",
+ gfc_msg_fault, n+1);
+ gfc_trans_runtime_check (fault, msg, &se->pre, where);
+ gfc_free (msg);
return index;
}
/* We've already translated this value outside the loop. */
index = info->subscript[dim]->data.scalar.expr;
- index =
- gfc_trans_array_bound_check (se, info->descriptor, index, dim);
+ if ((ar->as->type != AS_ASSUMED_SIZE && !ar->as->cp_was_assumed)
+ || dim < ar->dimen - 1)
+ index = gfc_trans_array_bound_check (se, info->descriptor,
+ index, dim, &ar->where);
break;
case DIMEN_VECTOR:
index, gfc_conv_array_stride (desc, 0));
/* Read the vector to get an index into info->descriptor. */
- data = gfc_build_indirect_ref (gfc_conv_array_data (desc));
+ data = build_fold_indirect_ref (gfc_conv_array_data (desc));
index = gfc_build_array_ref (data, index);
index = gfc_evaluate_now (index, &se->pre);
/* Do any bounds checking on the final info->descriptor index. */
- index = gfc_trans_array_bound_check (se, info->descriptor,
- index, dim);
+ if ((ar->as->type != AS_ASSUMED_SIZE && !ar->as->cp_was_assumed)
+ || dim < ar->dimen - 1)
+ index = gfc_trans_array_bound_check (se, info->descriptor,
+ index, dim, &ar->where);
break;
case DIMEN_RANGE:
/* Multiply the loop variable by the stride and delta. */
index = se->loop->loopvar[i];
- index = fold_build2 (MULT_EXPR, gfc_array_index_type, index,
- info->stride[i]);
- index = fold_build2 (PLUS_EXPR, gfc_array_index_type, index,
- info->delta[i]);
+ if (!integer_onep (info->stride[i]))
+ index = fold_build2 (MULT_EXPR, gfc_array_index_type, index,
+ info->stride[i]);
+ if (!integer_zerop (info->delta[i]))
+ index = fold_build2 (PLUS_EXPR, gfc_array_index_type, index,
+ info->delta[i]);
break;
default:
}
/* Multiply by the stride. */
- index = fold_build2 (MULT_EXPR, gfc_array_index_type, index, stride);
+ if (!integer_onep (stride))
+ index = fold_build2 (MULT_EXPR, gfc_array_index_type, index, stride);
return index;
}
info->stride0);
/* Add the offset for this dimension to the stored offset for all other
dimensions. */
- index = fold_build2 (PLUS_EXPR, gfc_array_index_type, index, info->offset);
+ if (!integer_zerop (info->offset))
+ index = fold_build2 (PLUS_EXPR, gfc_array_index_type, index, info->offset);
- tmp = gfc_build_indirect_ref (info->data);
+ tmp = build_fold_indirect_ref (info->data);
se->expr = gfc_build_array_ref (tmp, index);
}
a(i, j, k) = base[offset + i * stride[0] + j * stride[1] + k * stride[2]]*/
void
-gfc_conv_array_ref (gfc_se * se, gfc_array_ref * ar)
+gfc_conv_array_ref (gfc_se * se, gfc_array_ref * ar, gfc_symbol * sym,
+ locus * where)
{
int n;
tree index;
tree tmp;
tree stride;
- tree fault;
gfc_se indexse;
/* Handle scalarized references separately. */
index = gfc_index_zero_node;
- fault = gfc_index_zero_node;
-
/* Calculate the offsets from all the dimensions. */
for (n = 0; n < ar->dimen; n++)
{
gfc_conv_expr_type (&indexse, ar->start[n], gfc_array_index_type);
gfc_add_block_to_block (&se->pre, &indexse.pre);
- if (flag_bounds_check)
+ if (flag_bounds_check &&
+ ((ar->as->type != AS_ASSUMED_SIZE && !ar->as->cp_was_assumed)
+ || n < ar->dimen - 1))
{
/* Check array bounds. */
tree cond;
-
- indexse.expr = gfc_evaluate_now (indexse.expr, &se->pre);
+ char *msg;
tmp = gfc_conv_array_lbound (se->expr, n);
cond = fold_build2 (LT_EXPR, boolean_type_node,
indexse.expr, tmp);
- fault =
- fold_build2 (TRUTH_OR_EXPR, boolean_type_node, fault, cond);
+ asprintf (&msg, "%s for array '%s', "
+ "lower bound of dimension %d exceeded", gfc_msg_fault,
+ sym->name, n+1);
+ gfc_trans_runtime_check (cond, msg, &se->pre, where);
+ gfc_free (msg);
tmp = gfc_conv_array_ubound (se->expr, n);
cond = fold_build2 (GT_EXPR, boolean_type_node,
indexse.expr, tmp);
- fault =
- fold_build2 (TRUTH_OR_EXPR, boolean_type_node, fault, cond);
+ asprintf (&msg, "%s for array '%s', "
+ "upper bound of dimension %d exceeded", gfc_msg_fault,
+ sym->name, n+1);
+ gfc_trans_runtime_check (cond, msg, &se->pre, where);
+ gfc_free (msg);
}
/* Multiply the index by the stride. */
index = fold_build2 (PLUS_EXPR, gfc_array_index_type, index, tmp);
}
- if (flag_bounds_check)
- gfc_trans_runtime_check (fault, gfc_strconst_fault, &se->pre);
-
tmp = gfc_conv_array_offset (se->expr);
if (!integer_zerop (tmp))
index = fold_build2 (PLUS_EXPR, gfc_array_index_type, index, tmp);
/* Access the calculated element. */
tmp = gfc_conv_array_data (se->expr);
- tmp = gfc_build_indirect_ref (tmp);
+ tmp = build_fold_indirect_ref (tmp);
se->expr = gfc_build_array_ref (tmp, index);
}
gfc_conv_section_startstride (gfc_loopinfo * loop, gfc_ss * ss, int n)
{
gfc_expr *start;
+ gfc_expr *end;
gfc_expr *stride;
tree desc;
gfc_se se;
{
/* We use a zero-based index to access the vector. */
info->start[n] = gfc_index_zero_node;
+ info->end[n] = gfc_index_zero_node;
info->stride[n] = gfc_index_one_node;
return;
}
gcc_assert (info->ref->u.ar.dimen_type[dim] == DIMEN_RANGE);
desc = info->descriptor;
start = info->ref->u.ar.start[dim];
+ end = info->ref->u.ar.end[dim];
stride = info->ref->u.ar.stride[dim];
/* Calculate the start of the range. For vector subscripts this will
}
info->start[n] = gfc_evaluate_now (info->start[n], &loop->pre);
+ /* Similarly calculate the end. Although this is not used in the
+ scalarizer, it is needed when checking bounds and where the end
+ is an expression with side-effects. */
+ if (end)
+ {
+ /* Specified section start. */
+ gfc_init_se (&se, NULL);
+ gfc_conv_expr_type (&se, end, gfc_array_index_type);
+ gfc_add_block_to_block (&loop->pre, &se.pre);
+ info->end[n] = se.expr;
+ }
+ else
+ {
+ /* No upper bound specified so use the bound of the array. */
+ info->end[n] = gfc_conv_array_ubound (desc, dim);
+ }
+ info->end[n] = gfc_evaluate_now (info->end[n], &loop->pre);
+
/* Calculate the stride. */
if (stride == NULL)
info->stride[n] = gfc_index_one_node;
loop->dimen = ss->data.info.dimen;
break;
+ /* As usual, lbound and ubound are exceptions!. */
+ case GFC_SS_INTRINSIC:
+ switch (ss->expr->value.function.isym->generic_id)
+ {
+ case GFC_ISYM_LBOUND:
+ case GFC_ISYM_UBOUND:
+ loop->dimen = ss->data.info.dimen;
+
+ default:
+ break;
+ }
+
default:
break;
}
gfc_conv_section_startstride (loop, ss, n);
break;
+ case GFC_SS_INTRINSIC:
+ switch (ss->expr->value.function.isym->generic_id)
+ {
+ /* Fall through to supply start and stride. */
+ case GFC_ISYM_LBOUND:
+ case GFC_ISYM_UBOUND:
+ break;
+ default:
+ continue;
+ }
+
case GFC_SS_CONSTRUCTOR:
case GFC_SS_FUNCTION:
for (n = 0; n < ss->data.info.dimen; n++)
{
ss->data.info.start[n] = gfc_index_zero_node;
+ ss->data.info.end[n] = gfc_index_zero_node;
ss->data.info.stride[n] = gfc_index_one_node;
}
break;
if (flag_bounds_check)
{
stmtblock_t block;
- tree fault;
- tree bound;
+ tree lbound, ubound;
tree end;
tree size[GFC_MAX_DIMENSIONS];
+ tree stride_pos, stride_neg, non_zerosized, tmp2;
gfc_ss_info *info;
+ char *msg;
int dim;
gfc_start_block (&block);
- fault = integer_zero_node;
for (n = 0; n < loop->dimen; n++)
size[n] = NULL_TREE;
dim = info->dim[n];
if (info->ref->u.ar.dimen_type[dim] != DIMEN_RANGE)
continue;
+ if (n == info->ref->u.ar.dimen - 1
+ && (info->ref->u.ar.as->type == AS_ASSUMED_SIZE
+ || info->ref->u.ar.as->cp_was_assumed))
+ continue;
desc = ss->data.info.descriptor;
- /* Check lower bound. */
- bound = gfc_conv_array_lbound (desc, dim);
- tmp = info->start[n];
- tmp = fold_build2 (LT_EXPR, boolean_type_node, tmp, bound);
- fault = fold_build2 (TRUTH_OR_EXPR, boolean_type_node, fault,
- tmp);
-
- /* Check the upper bound. */
- bound = gfc_conv_array_ubound (desc, dim);
- end = gfc_conv_section_upper_bound (ss, n, &block);
- tmp = fold_build2 (GT_EXPR, boolean_type_node, end, bound);
- fault = fold_build2 (TRUTH_OR_EXPR, boolean_type_node, fault,
- tmp);
+ /* This is the run-time equivalent of resolve.c's
+ check_dimension(). The logical is more readable there
+ than it is here, with all the trees. */
+ lbound = gfc_conv_array_lbound (desc, dim);
+ ubound = gfc_conv_array_ubound (desc, dim);
+ end = info->end[n];
+
+ /* Zero stride is not allowed. */
+ tmp = fold_build2 (EQ_EXPR, boolean_type_node, info->stride[n],
+ gfc_index_zero_node);
+ asprintf (&msg, "Zero stride is not allowed, for dimension %d "
+ "of array '%s'", info->dim[n]+1,
+ ss->expr->symtree->name);
+ gfc_trans_runtime_check (tmp, msg, &block, &ss->expr->where);
+ gfc_free (msg);
+
+ /* non_zerosized is true when the selected range is not
+ empty. */
+ stride_pos = fold_build2 (GT_EXPR, boolean_type_node,
+ info->stride[n], gfc_index_zero_node);
+ tmp = fold_build2 (LE_EXPR, boolean_type_node, info->start[n],
+ end);
+ stride_pos = fold_build2 (TRUTH_AND_EXPR, boolean_type_node,
+ stride_pos, tmp);
+
+ stride_neg = fold_build2 (LT_EXPR, boolean_type_node,
+ info->stride[n], gfc_index_zero_node);
+ tmp = fold_build2 (GE_EXPR, boolean_type_node, info->start[n],
+ end);
+ stride_neg = fold_build2 (TRUTH_AND_EXPR, boolean_type_node,
+ stride_neg, tmp);
+ non_zerosized = fold_build2 (TRUTH_OR_EXPR, boolean_type_node,
+ stride_pos, stride_neg);
+
+ /* Check the start of the range against the lower and upper
+ bounds of the array, if the range is not empty. */
+ tmp = fold_build2 (LT_EXPR, boolean_type_node, info->start[n],
+ lbound);
+ tmp = fold_build2 (TRUTH_AND_EXPR, boolean_type_node,
+ non_zerosized, tmp);
+ asprintf (&msg, "%s, lower bound of dimension %d of array '%s'"
+ " exceeded", gfc_msg_fault, info->dim[n]+1,
+ ss->expr->symtree->name);
+ gfc_trans_runtime_check (tmp, msg, &block, &ss->expr->where);
+ gfc_free (msg);
+
+ tmp = fold_build2 (GT_EXPR, boolean_type_node, info->start[n],
+ ubound);
+ tmp = fold_build2 (TRUTH_AND_EXPR, boolean_type_node,
+ non_zerosized, tmp);
+ asprintf (&msg, "%s, upper bound of dimension %d of array '%s'"
+ " exceeded", gfc_msg_fault, info->dim[n]+1,
+ ss->expr->symtree->name);
+ gfc_trans_runtime_check (tmp, msg, &block, &ss->expr->where);
+ gfc_free (msg);
+
+ /* Compute the last element of the range, which is not
+ necessarily "end" (think 0:5:3, which doesn't contain 5)
+ and check it against both lower and upper bounds. */
+ tmp2 = fold_build2 (MINUS_EXPR, gfc_array_index_type, end,
+ info->start[n]);
+ tmp2 = fold_build2 (TRUNC_MOD_EXPR, gfc_array_index_type, tmp2,
+ info->stride[n]);
+ tmp2 = fold_build2 (MINUS_EXPR, gfc_array_index_type, end,
+ tmp2);
+
+ tmp = fold_build2 (LT_EXPR, boolean_type_node, tmp2, lbound);
+ tmp = fold_build2 (TRUTH_AND_EXPR, boolean_type_node,
+ non_zerosized, tmp);
+ asprintf (&msg, "%s, lower bound of dimension %d of array '%s'"
+ " exceeded", gfc_msg_fault, info->dim[n]+1,
+ ss->expr->symtree->name);
+ gfc_trans_runtime_check (tmp, msg, &block, &ss->expr->where);
+ gfc_free (msg);
+
+ tmp = fold_build2 (GT_EXPR, boolean_type_node, tmp2, ubound);
+ tmp = fold_build2 (TRUTH_AND_EXPR, boolean_type_node,
+ non_zerosized, tmp);
+ asprintf (&msg, "%s, upper bound of dimension %d of array '%s'"
+ " exceeded", gfc_msg_fault, info->dim[n]+1,
+ ss->expr->symtree->name);
+ gfc_trans_runtime_check (tmp, msg, &block, &ss->expr->where);
+ gfc_free (msg);
/* Check the section sizes match. */
tmp = fold_build2 (MINUS_EXPR, gfc_array_index_type, end,
{
tmp =
fold_build2 (NE_EXPR, boolean_type_node, tmp, size[n]);
- fault =
- build2 (TRUTH_OR_EXPR, boolean_type_node, fault, tmp);
+ asprintf (&msg, "%s, size mismatch for dimension %d "
+ "of array '%s'", gfc_msg_bounds, info->dim[n]+1,
+ ss->expr->symtree->name);
+ gfc_trans_runtime_check (tmp, msg, &block, &ss->expr->where);
+ gfc_free (msg);
}
else
size[n] = gfc_evaluate_now (tmp, &block);
}
}
- gfc_trans_runtime_check (fault, gfc_strconst_bounds, &block);
tmp = gfc_finish_block (&block);
gfc_add_expr_to_block (&loop->pre, tmp);
if (ss->type != GFC_SS_SECTION)
continue;
- if (gfc_could_be_alias (dest, ss))
+ if (gfc_could_be_alias (dest, ss)
+ || gfc_are_equivalenced_arrays (dest->expr, ss->expr))
{
nDepend = 1;
break;
memset (&loop->temp_ss->data.info, 0, sizeof (gfc_ss_info));
loop->temp_ss->type = GFC_SS_SECTION;
loop->temp_ss->data.info.dimen = n;
- gfc_trans_allocate_temp_array (&loop->pre, &loop->post, loop,
- &loop->temp_ss->data.info, tmp, false);
+ gfc_trans_create_temp_array (&loop->pre, &loop->post, loop,
+ &loop->temp_ss->data.info, tmp, false, true,
+ false, false);
}
for (n = 0; n < loop->temp_dim; n++)
{
/* Calculate the offset relative to the loop variable.
First multiply by the stride. */
- tmp = fold_build2 (MULT_EXPR, gfc_array_index_type,
- loop->from[n], info->stride[n]);
+ tmp = loop->from[n];
+ if (!integer_onep (info->stride[n]))
+ tmp = fold_build2 (MULT_EXPR, gfc_array_index_type,
+ tmp, info->stride[n]);
/* Then subtract this from our starting value. */
tmp = fold_build2 (MINUS_EXPR, gfc_array_index_type,
tree size;
tree offset;
tree stride;
+ tree cond;
+ tree or_expr;
+ tree thencase;
+ tree elsecase;
+ tree var;
+ stmtblock_t thenblock;
+ stmtblock_t elseblock;
gfc_expr *ubound;
gfc_se se;
int n;
tmp = gfc_conv_descriptor_dtype (descriptor);
gfc_add_modify_expr (pblock, tmp, gfc_get_dtype (TREE_TYPE (descriptor)));
+ or_expr = NULL_TREE;
+
for (n = 0; n < rank; n++)
{
/* We have 3 possibilities for determining the size of the array:
/* Calculate the size of this dimension. */
size = fold_build2 (PLUS_EXPR, gfc_array_index_type, se.expr, size);
+ /* Check whether the size for this dimension is negative. */
+ cond = fold_build2 (LE_EXPR, boolean_type_node, size,
+ gfc_index_zero_node);
+ if (n == 0)
+ or_expr = cond;
+ else
+ or_expr = fold_build2 (TRUTH_OR_EXPR, boolean_type_node, or_expr, cond);
+
/* Multiply the stride by the number of elements in this dimension. */
stride = fold_build2 (MULT_EXPR, gfc_array_index_type, stride, size);
stride = gfc_evaluate_now (stride, pblock);
*poffset = offset;
}
- size = gfc_evaluate_now (size, pblock);
- return size;
+ if (integer_zerop (or_expr))
+ return size;
+ if (integer_onep (or_expr))
+ return gfc_index_zero_node;
+
+ var = gfc_create_var (TREE_TYPE (size), "size");
+ gfc_start_block (&thenblock);
+ gfc_add_modify_expr (&thenblock, var, gfc_index_zero_node);
+ thencase = gfc_finish_block (&thenblock);
+
+ gfc_start_block (&elseblock);
+ gfc_add_modify_expr (&elseblock, var, size);
+ elsecase = gfc_finish_block (&elseblock);
+
+ tmp = gfc_evaluate_now (or_expr, pblock);
+ tmp = build3_v (COND_EXPR, tmp, thencase, elsecase);
+ gfc_add_expr_to_block (pblock, tmp);
+
+ return var;
}
the work for an ALLOCATE statement. */
/*GCC ARRAYS*/
-void
-gfc_array_allocate (gfc_se * se, gfc_ref * ref, tree pstat)
+bool
+gfc_array_allocate (gfc_se * se, gfc_expr * expr, tree pstat)
{
tree tmp;
tree pointer;
tree size;
gfc_expr **lower;
gfc_expr **upper;
+ gfc_ref *ref, *prev_ref = NULL;
+ bool allocatable_array;
+
+ ref = expr->ref;
+
+ /* Find the last reference in the chain. */
+ while (ref && ref->next != NULL)
+ {
+ gcc_assert (ref->type != REF_ARRAY || ref->u.ar.type == AR_ELEMENT);
+ prev_ref = ref;
+ ref = ref->next;
+ }
+
+ if (ref == NULL || ref->type != REF_ARRAY)
+ return false;
+
+ if (!prev_ref)
+ allocatable_array = expr->symtree->n.sym->attr.allocatable;
+ else
+ allocatable_array = prev_ref->u.c.component->allocatable;
/* Figure out the size of the array. */
switch (ref->u.ar.type)
lower, upper, &se->pre);
/* Allocate memory to store the data. */
- tmp = gfc_conv_descriptor_data_addr (se->expr);
- pointer = gfc_evaluate_now (tmp, &se->pre);
+ pointer = gfc_conv_descriptor_data_get (se->expr);
+ STRIP_NOPS (pointer);
if (TYPE_PRECISION (gfc_array_index_type) == 32)
- allocate = gfor_fndecl_allocate;
+ {
+ if (allocatable_array)
+ allocate = gfor_fndecl_allocate_array;
+ else
+ allocate = gfor_fndecl_allocate;
+ }
else if (TYPE_PRECISION (gfc_array_index_type) == 64)
- allocate = gfor_fndecl_allocate64;
+ {
+ if (allocatable_array)
+ allocate = gfor_fndecl_allocate64_array;
+ else
+ allocate = gfor_fndecl_allocate64;
+ }
else
gcc_unreachable ();
- tmp = gfc_chainon_list (NULL_TREE, pointer);
+ tmp = NULL_TREE;
+ /* The allocate_array variants take the old pointer as first argument. */
+ if (allocatable_array)
+ tmp = gfc_chainon_list (tmp, pointer);
tmp = gfc_chainon_list (tmp, size);
tmp = gfc_chainon_list (tmp, pstat);
- tmp = gfc_build_function_call (allocate, tmp);
+ tmp = build_function_call_expr (allocate, tmp);
+ tmp = build2 (MODIFY_EXPR, void_type_node, pointer, tmp);
gfc_add_expr_to_block (&se->pre, tmp);
tmp = gfc_conv_descriptor_offset (se->expr);
gfc_add_modify_expr (&se->pre, tmp, offset);
+
+ if (expr->ts.type == BT_DERIVED
+ && expr->ts.derived->attr.alloc_comp)
+ {
+ tmp = gfc_nullify_alloc_comp (expr->ts.derived, se->expr,
+ ref->u.ar.as->rank);
+ gfc_add_expr_to_block (&se->pre, tmp);
+ }
+
+ return true;
}
gfc_start_block (&block);
/* Get a pointer to the data. */
- tmp = gfc_conv_descriptor_data_addr (descriptor);
- var = gfc_evaluate_now (tmp, &block);
+ var = gfc_conv_descriptor_data_get (descriptor);
+ STRIP_NOPS (var);
/* Parameter is the address of the data component. */
tmp = gfc_chainon_list (NULL_TREE, var);
tmp = gfc_chainon_list (tmp, pstat);
- tmp = gfc_build_function_call (gfor_fndecl_deallocate, tmp);
+ tmp = build_function_call_expr (gfor_fndecl_deallocate, tmp);
+ gfc_add_expr_to_block (&block, tmp);
+
+ /* Zero the data pointer. */
+ tmp = build2 (MODIFY_EXPR, void_type_node,
+ var, build_int_cst (TREE_TYPE (var), 0));
gfc_add_expr_to_block (&block, tmp);
return gfc_finish_block (&block);
}
break;
+ case EXPR_NULL:
+ return gfc_build_null_descriptor (type);
+
default:
gcc_unreachable ();
}
if (dim + 1 < as->rank)
stride = GFC_TYPE_ARRAY_STRIDE (type, dim + 1);
else
- stride = NULL_TREE;
+ stride = GFC_TYPE_ARRAY_SIZE (type);
if (ubound != NULL_TREE && !(stride && INTEGER_CST_P (stride)))
{
gfc_add_modify_expr (pblock, stride, tmp);
else
stride = gfc_evaluate_now (tmp, pblock);
+
+ /* Make sure that negative size arrays are translated
+ to being zero size. */
+ tmp = build2 (GE_EXPR, boolean_type_node,
+ stride, gfc_index_zero_node);
+ tmp = build3 (COND_EXPR, gfc_array_index_type, tmp,
+ stride, gfc_index_zero_node);
+ gfc_add_modify_expr (pblock, stride, tmp);
}
size = stride;
}
+ gfc_trans_vla_type_sizes (sym, pblock);
+
*poffset = offset;
return size;
}
{
gfc_trans_init_string_length (sym->ts.cl, &block);
+ gfc_trans_vla_type_sizes (sym, &block);
+
/* Emit a DECL_EXPR for this variable, which will cause the
gimplifier to allocate storage, and all that good stuff. */
tmp = build1 (DECL_EXPR, TREE_TYPE (decl), decl);
fndecl = gfor_fndecl_internal_malloc64;
else
gcc_unreachable ();
- tmp = gfc_build_function_call (fndecl, tmp);
+ tmp = build_function_call_expr (fndecl, tmp);
tmp = fold (convert (TREE_TYPE (decl), tmp));
gfc_add_modify_expr (&block, decl, tmp);
/* Free the temporary. */
tmp = convert (pvoid_type_node, decl);
tmp = gfc_chainon_list (NULL_TREE, tmp);
- tmp = gfc_build_function_call (gfor_fndecl_internal_free, tmp);
+ tmp = build_function_call_expr (gfor_fndecl_internal_free, tmp);
gfc_add_expr_to_block (&block, tmp);
return gfc_finish_block (&block);
locus loc;
tree offset;
tree tmp;
+ tree stmt;
stmtblock_t block;
gfc_get_backend_locus (&loc);
tmp = convert (TREE_TYPE (parm), GFC_DECL_SAVED_DESCRIPTOR (parm));
gfc_add_modify_expr (&block, parm, tmp);
}
- tmp = gfc_finish_block (&block);
+ stmt = gfc_finish_block (&block);
gfc_set_backend_locus (&loc);
gfc_start_block (&block);
+
/* Add the initialization code to the start of the function. */
- gfc_add_expr_to_block (&block, tmp);
+
+ if (sym->attr.optional || sym->attr.not_always_present)
+ {
+ tmp = gfc_conv_expr_present (sym);
+ stmt = build3_v (COND_EXPR, tmp, stmt, build_empty_stmt ());
+ }
+
+ gfc_add_expr_to_block (&block, stmt);
gfc_add_expr_to_block (&block, body);
return gfc_finish_block (&block);
tree dumdesc;
tree tmp;
tree stmt;
- tree stride;
+ tree stride, stride2;
tree stmt_packed;
tree stmt_unpacked;
tree partial;
type = TREE_TYPE (tmpdesc);
gcc_assert (GFC_ARRAY_TYPE_P (type));
dumdesc = GFC_DECL_SAVED_DESCRIPTOR (tmpdesc);
- dumdesc = gfc_build_indirect_ref (dumdesc);
+ dumdesc = build_fold_indirect_ref (dumdesc);
gfc_start_block (&block);
if (sym->ts.type == BT_CHARACTER
partial = gfc_create_var (boolean_type_node, "partial");
TREE_USED (partial) = 1;
tmp = gfc_conv_descriptor_stride (dumdesc, gfc_rank_cst[0]);
- tmp = fold_build2 (EQ_EXPR, boolean_type_node, tmp, integer_one_node);
+ tmp = fold_build2 (EQ_EXPR, boolean_type_node, tmp, gfc_index_one_node);
gfc_add_modify_expr (&block, partial, tmp);
}
else
stride = gfc_conv_descriptor_stride (dumdesc, gfc_rank_cst[0]);
stride = gfc_evaluate_now (stride, &block);
- tmp = build2 (EQ_EXPR, boolean_type_node, stride, integer_zero_node);
+ tmp = build2 (EQ_EXPR, boolean_type_node, stride, gfc_index_zero_node);
tmp = build3 (COND_EXPR, gfc_array_index_type, tmp,
gfc_index_one_node, stride);
stride = GFC_TYPE_ARRAY_STRIDE (type, 0);
/* A library call to repack the array if necessary. */
tmp = GFC_DECL_SAVED_DESCRIPTOR (tmpdesc);
tmp = gfc_chainon_list (NULL_TREE, tmp);
- stmt_unpacked = gfc_build_function_call (gfor_fndecl_in_pack, tmp);
+ stmt_unpacked = build_function_call_expr (gfor_fndecl_in_pack, tmp);
stride = gfc_index_one_node;
}
if (checkparm)
{
/* Check (ubound(a) - lbound(a) == ubound(b) - lbound(b)). */
+ char * msg;
tmp = fold_build2 (MINUS_EXPR, gfc_array_index_type,
ubound, lbound);
- stride = build2 (MINUS_EXPR, gfc_array_index_type,
+ stride2 = build2 (MINUS_EXPR, gfc_array_index_type,
dubound, dlbound);
- tmp = fold_build2 (NE_EXPR, gfc_array_index_type, tmp, stride);
- gfc_trans_runtime_check (tmp, gfc_strconst_bounds, &block);
+ tmp = fold_build2 (NE_EXPR, gfc_array_index_type, tmp, stride2);
+ asprintf (&msg, "%s for dimension %d of array '%s'",
+ gfc_msg_bounds, n+1, sym->name);
+ gfc_trans_runtime_check (tmp, msg, &block, &loc);
+ gfc_free (msg);
}
}
else
gfc_add_modify_expr (&block, stride, tmp);
}
}
+ else
+ {
+ stride = GFC_TYPE_ARRAY_SIZE (type);
+
+ if (stride && !INTEGER_CST_P (stride))
+ {
+ /* Calculate size = stride * (ubound + 1 - lbound). */
+ tmp = fold_build2 (MINUS_EXPR, gfc_array_index_type,
+ gfc_index_one_node, lbound);
+ tmp = fold_build2 (PLUS_EXPR, gfc_array_index_type,
+ ubound, tmp);
+ tmp = fold_build2 (MULT_EXPR, gfc_array_index_type,
+ GFC_TYPE_ARRAY_STRIDE (type, n), tmp);
+ gfc_add_modify_expr (&block, stride, tmp);
+ }
+ }
}
/* Set the offset. */
if (TREE_CODE (GFC_TYPE_ARRAY_OFFSET (type)) == VAR_DECL)
gfc_add_modify_expr (&block, GFC_TYPE_ARRAY_OFFSET (type), offset);
+ gfc_trans_vla_type_sizes (sym, &block);
+
stmt = gfc_finish_block (&block);
gfc_start_block (&block);
/* Copy the data back. */
tmp = gfc_chainon_list (NULL_TREE, dumdesc);
tmp = gfc_chainon_list (tmp, tmpdesc);
- tmp = gfc_build_function_call (gfor_fndecl_in_unpack, tmp);
+ tmp = build_function_call_expr (gfor_fndecl_in_unpack, tmp);
gfc_add_expr_to_block (&cleanup, tmp);
}
/* Free the temporary. */
tmp = gfc_chainon_list (NULL_TREE, tmpdesc);
- tmp = gfc_build_function_call (gfor_fndecl_internal_free, tmp);
+ tmp = build_function_call_expr (gfor_fndecl_internal_free, tmp);
gfc_add_expr_to_block (&cleanup, tmp);
stmt = gfc_finish_block (&cleanup);
/* Only do the cleanup if the array was repacked. */
- tmp = gfc_build_indirect_ref (dumdesc);
+ tmp = build_fold_indirect_ref (dumdesc);
tmp = gfc_conv_descriptor_data_get (tmp);
tmp = build2 (NE_EXPR, boolean_type_node, tmp, tmpdesc);
stmt = build3_v (COND_EXPR, tmp, stmt, build_empty_stmt ());
tree start;
tree offset;
int full;
- gfc_ref *ref;
gcc_assert (ss != gfc_ss_terminator);
- /* TODO: Pass constant array constructors without a temporary. */
/* Special case things we know we can pass easily. */
switch (expr->expr_type)
{
else if (se->direct_byref)
full = 0;
else
- {
- ref = info->ref;
- gcc_assert (ref->u.ar.type == AR_SECTION);
-
- full = 1;
- for (n = 0; n < ref->u.ar.dimen; n++)
- {
- /* Detect passing the full array as a section. This could do
- even more checking, but it doesn't seem worth it. */
- if (ref->u.ar.start[n]
- || ref->u.ar.end[n]
- || (ref->u.ar.stride[n]
- && !gfc_expr_is_one (ref->u.ar.stride[n], 0)))
- {
- full = 0;
- break;
- }
- }
- }
+ full = gfc_full_array_ref_p (info->ref);
if (full)
{
{
/* We pass full arrays directly. This means that pointers and
allocatable arrays should also work. */
- se->expr = gfc_build_addr_expr (NULL_TREE, desc);
+ se->expr = build_fold_addr_expr (desc);
}
else
{
/* For pointer assignments pass the descriptor directly. */
se->ss = secss;
- se->expr = gfc_build_addr_expr (NULL, se->expr);
+ se->expr = build_fold_addr_expr (se->expr);
gfc_conv_expr (se, expr);
return;
}
}
break;
+ case EXPR_ARRAY:
+ /* Constant array constructors don't need a temporary. */
+ if (ss->type == GFC_SS_CONSTRUCTOR
+ && expr->ts.type != BT_CHARACTER
+ && gfc_constant_array_constructor_p (expr->value.constructor))
+ {
+ need_tmp = 0;
+ info = &ss->data.info;
+ secss = ss;
+ }
+ else
+ {
+ need_tmp = 1;
+ secss = NULL;
+ info = NULL;
+ }
+ break;
+
default:
/* Something complicated. Copy it into a temporary. */
need_tmp = 1;
loop.temp_ss->next = gfc_ss_terminator;
if (expr->ts.type == BT_CHARACTER)
{
- gcc_assert (expr->ts.cl && expr->ts.cl->length
- && expr->ts.cl->length->expr_type == EXPR_CONSTANT);
- loop.temp_ss->string_length = gfc_conv_mpz_to_tree
- (expr->ts.cl->length->value.integer,
- expr->ts.cl->length->ts.kind);
- expr->ts.cl->backend_decl = loop.temp_ss->string_length;
- }
- loop.temp_ss->data.temp.type = gfc_typenode_for_spec (&expr->ts);
-
- /* ... which can hold our string, if present. */
- if (expr->ts.type == BT_CHARACTER)
- {
- loop.temp_ss->string_length = TYPE_SIZE_UNIT (loop.temp_ss->data.temp.type);
+ if (expr->ts.cl == NULL)
+ {
+ /* This had better be a substring reference! */
+ gfc_ref *char_ref = expr->ref;
+ for (; char_ref; char_ref = char_ref->next)
+ if (char_ref->type == REF_SUBSTRING)
+ {
+ mpz_t char_len;
+ expr->ts.cl = gfc_get_charlen ();
+ expr->ts.cl->next = char_ref->u.ss.length->next;
+ char_ref->u.ss.length->next = expr->ts.cl;
+
+ mpz_init_set_ui (char_len, 1);
+ mpz_add (char_len, char_len,
+ char_ref->u.ss.end->value.integer);
+ mpz_sub (char_len, char_len,
+ char_ref->u.ss.start->value.integer);
+ expr->ts.cl->backend_decl
+ = gfc_conv_mpz_to_tree (char_len,
+ gfc_default_character_kind);
+ /* Cast is necessary for *-charlen refs. */
+ expr->ts.cl->backend_decl
+ = convert (gfc_charlen_type_node,
+ expr->ts.cl->backend_decl);
+ mpz_clear (char_len);
+ break;
+ }
+ gcc_assert (char_ref != NULL);
+ loop.temp_ss->data.temp.type
+ = gfc_typenode_for_spec (&expr->ts);
+ loop.temp_ss->string_length = expr->ts.cl->backend_decl;
+ }
+ else if (expr->ts.cl->length
+ && expr->ts.cl->length->expr_type == EXPR_CONSTANT)
+ {
+ expr->ts.cl->backend_decl
+ = gfc_conv_mpz_to_tree (expr->ts.cl->length->value.integer,
+ expr->ts.cl->length->ts.kind);
+ loop.temp_ss->data.temp.type
+ = gfc_typenode_for_spec (&expr->ts);
+ loop.temp_ss->string_length
+ = TYPE_SIZE_UNIT (loop.temp_ss->data.temp.type);
+ }
+ else
+ {
+ loop.temp_ss->data.temp.type
+ = gfc_typenode_for_spec (&expr->ts);
+ loop.temp_ss->string_length = expr->ts.cl->backend_decl;
+ }
se->string_length = loop.temp_ss->string_length;
}
else
- loop.temp_ss->string_length = NULL;
+ {
+ loop.temp_ss->data.temp.type
+ = gfc_typenode_for_spec (&expr->ts);
+ loop.temp_ss->string_length = NULL;
+ }
loop.temp_ss->data.temp.dimen = loop.dimen;
gfc_add_ss_to_loop (&loop, loop.temp_ss);
}
if (expr->ts.type == BT_CHARACTER)
{
gfc_conv_expr (&rse, expr);
- rse.expr = gfc_build_indirect_ref (rse.expr);
+ if (POINTER_TYPE_P (TREE_TYPE (rse.expr)))
+ rse.expr = build_fold_indirect_ref (rse.expr);
}
else
gfc_conv_expr_val (&rse, expr);
/* Finish the copying loops. */
gfc_trans_scalarizing_loops (&loop, &block);
- /* Set the first stride component to zero to indicate a temporary. */
desc = loop.temp_ss->data.info.descriptor;
- tmp = gfc_conv_descriptor_stride (desc, gfc_rank_cst[0]);
- gfc_add_modify_expr (&loop.pre, tmp, gfc_index_zero_node);
gcc_assert (is_gimple_lvalue (desc));
}
limits will be the limits of the section.
A function may decide to repack the array to speed up access, but
we're not bothered about that here. */
- int dim;
+ int dim, ndim;
tree parm;
tree parmtype;
tree stride;
else
base = NULL_TREE;
- for (n = 0; n < info->ref->u.ar.dimen; n++)
+ ndim = info->ref ? info->ref->u.ar.dimen : info->dimen;
+ for (n = 0; n < ndim; n++)
{
stride = gfc_conv_array_stride (desc, n);
/* Work out the offset. */
- if (info->ref->u.ar.dimen_type[n] == DIMEN_ELEMENT)
+ if (info->ref
+ && info->ref->u.ar.dimen_type[n] == DIMEN_ELEMENT)
{
gcc_assert (info->subscript[n]
&& info->subscript[n]->type == GFC_SS_SCALAR);
tmp = fold_build2 (MULT_EXPR, TREE_TYPE (tmp), tmp, stride);
offset = fold_build2 (PLUS_EXPR, TREE_TYPE (tmp), offset, tmp);
- if (info->ref->u.ar.dimen_type[n] == DIMEN_ELEMENT)
+ if (info->ref
+ && info->ref->u.ar.dimen_type[n] == DIMEN_ELEMENT)
{
/* For elemental dimensions, we only need the offset. */
continue;
}
/* Vector subscripts need copying and are handled elsewhere. */
- gcc_assert (info->ref->u.ar.dimen_type[n] == DIMEN_RANGE);
+ if (info->ref)
+ gcc_assert (info->ref->u.ar.dimen_type[n] == DIMEN_RANGE);
/* Set the new lower bound. */
from = loop.from[dim];
/* If we have an array section or are assigning to a pointer,
make sure that the lower bound is 1. References to the full
array should otherwise keep the original bounds. */
- if ((info->ref->u.ar.type != AR_FULL || se->direct_byref)
+ if ((!info->ref
+ || info->ref->u.ar.type != AR_FULL
+ || se->direct_byref)
&& !integer_onep (from))
{
tmp = fold_build2 (MINUS_EXPR, gfc_array_index_type,
dim++;
}
- /* Point the data pointer at the first element in the section. */
- tmp = gfc_conv_array_data (desc);
- tmp = gfc_build_indirect_ref (tmp);
- tmp = gfc_build_array_ref (tmp, offset);
- offset = gfc_build_addr_expr (gfc_array_dataptr_type (desc), tmp);
- gfc_conv_descriptor_data_set (&loop.pre, parm, offset);
+ if (se->data_not_needed)
+ gfc_conv_descriptor_data_set (&loop.pre, parm, gfc_index_zero_node);
+ else
+ {
+ /* Point the data pointer at the first element in the section. */
+ tmp = gfc_conv_array_data (desc);
+ tmp = build_fold_indirect_ref (tmp);
+ tmp = gfc_build_array_ref (tmp, offset);
+ offset = gfc_build_addr_expr (gfc_array_dataptr_type (desc), tmp);
+ gfc_conv_descriptor_data_set (&loop.pre, parm, offset);
+ }
- if (se->direct_byref)
+ if (se->direct_byref && !se->data_not_needed)
{
/* Set the offset. */
tmp = gfc_conv_descriptor_offset (parm);
{
/* Get a pointer to the new descriptor. */
if (se->want_pointer)
- se->expr = gfc_build_addr_expr (NULL, desc);
+ se->expr = build_fold_addr_expr (desc);
else
se->expr = desc;
}
if (sym->attr.dummy || POINTER_TYPE_P (TREE_TYPE (tmp)))
se->expr = tmp;
else
- se->expr = gfc_build_addr_expr (NULL, tmp);
+ se->expr = build_fold_addr_expr (tmp);
return;
}
if (sym->attr.allocatable)
{
- se->expr = gfc_conv_array_data (tmp);
+ if (sym->attr.dummy)
+ {
+ gfc_conv_expr_descriptor (se, expr, ss);
+ se->expr = gfc_conv_array_data (se->expr);
+ }
+ else
+ se->expr = gfc_conv_array_data (tmp);
return;
}
}
se->want_pointer = 1;
gfc_conv_expr_descriptor (se, expr, ss);
+ /* Deallocate the allocatable components of structures that are
+ not variable. */
+ if (expr->ts.type == BT_DERIVED
+ && expr->ts.derived->attr.alloc_comp
+ && expr->expr_type != EXPR_VARIABLE)
+ {
+ tmp = build_fold_indirect_ref (se->expr);
+ tmp = gfc_deallocate_alloc_comp (expr->ts.derived, tmp, expr->rank);
+ gfc_add_expr_to_block (&se->post, tmp);
+ }
+
if (g77)
{
desc = se->expr;
/* Repack the array. */
tmp = gfc_chainon_list (NULL_TREE, desc);
- ptr = gfc_build_function_call (gfor_fndecl_in_pack, tmp);
+ ptr = build_function_call_expr (gfor_fndecl_in_pack, tmp);
ptr = gfc_evaluate_now (ptr, &se->pre);
se->expr = ptr;
/* Copy the data back. */
tmp = gfc_chainon_list (NULL_TREE, desc);
tmp = gfc_chainon_list (tmp, ptr);
- tmp = gfc_build_function_call (gfor_fndecl_in_unpack, tmp);
+ tmp = build_function_call_expr (gfor_fndecl_in_unpack, tmp);
gfc_add_expr_to_block (&block, tmp);
/* Free the temporary. */
tmp = convert (pvoid_type_node, ptr);
tmp = gfc_chainon_list (NULL_TREE, tmp);
- tmp = gfc_build_function_call (gfor_fndecl_internal_free, tmp);
+ tmp = build_function_call_expr (gfor_fndecl_internal_free, tmp);
gfc_add_expr_to_block (&block, tmp);
stmt = gfc_finish_block (&block);
gfc_init_block (&block);
/* Only if it was repacked. This code needs to be executed before the
loop cleanup code. */
- tmp = gfc_build_indirect_ref (desc);
+ tmp = build_fold_indirect_ref (desc);
tmp = gfc_conv_array_data (tmp);
tmp = build2 (NE_EXPR, boolean_type_node, ptr, tmp);
tmp = build3_v (COND_EXPR, tmp, stmt, build_empty_stmt ());
}
-/* NULLIFY an allocatable/pointer array on function entry, free it on exit. */
+/* Generate code to deallocate an array, if it is allocated. */
+
+tree
+gfc_trans_dealloc_allocated (tree descriptor)
+{
+ tree tmp;
+ tree ptr;
+ tree var;
+ stmtblock_t block;
+
+ gfc_start_block (&block);
+
+ var = gfc_conv_descriptor_data_get (descriptor);
+ STRIP_NOPS (var);
+ tmp = gfc_create_var (gfc_array_index_type, NULL);
+ ptr = build_fold_addr_expr (tmp);
+
+ /* Call array_deallocate with an int* present in the second argument.
+ Although it is ignored here, it's presence ensures that arrays that
+ are already deallocated are ignored. */
+ tmp = gfc_chainon_list (NULL_TREE, var);
+ tmp = gfc_chainon_list (tmp, ptr);
+ tmp = build_function_call_expr (gfor_fndecl_deallocate, tmp);
+ gfc_add_expr_to_block (&block, tmp);
+
+ /* Zero the data pointer. */
+ tmp = build2 (MODIFY_EXPR, void_type_node,
+ var, build_int_cst (TREE_TYPE (var), 0));
+ gfc_add_expr_to_block (&block, tmp);
+
+ return gfc_finish_block (&block);
+}
+
+
+/* This helper function calculates the size in words of a full array. */
+
+static tree
+get_full_array_size (stmtblock_t *block, tree decl, int rank)
+{
+ tree idx;
+ tree nelems;
+ tree tmp;
+ idx = gfc_rank_cst[rank - 1];
+ nelems = gfc_conv_descriptor_ubound (decl, idx);
+ tmp = gfc_conv_descriptor_lbound (decl, idx);
+ tmp = build2 (MINUS_EXPR, gfc_array_index_type, nelems, tmp);
+ tmp = build2 (PLUS_EXPR, gfc_array_index_type,
+ tmp, gfc_index_one_node);
+ tmp = gfc_evaluate_now (tmp, block);
+
+ nelems = gfc_conv_descriptor_stride (decl, idx);
+ tmp = build2 (MULT_EXPR, gfc_array_index_type, nelems, tmp);
+ return gfc_evaluate_now (tmp, block);
+}
+
+
+/* Allocate dest to the same size as src, and copy src -> dest. */
+
+tree
+gfc_duplicate_allocatable(tree dest, tree src, tree type, int rank)
+{
+ tree tmp;
+ tree size;
+ tree nelems;
+ tree args;
+ tree null_cond;
+ tree null_data;
+ stmtblock_t block;
+
+ /* If the source is null, set the destination to null. */
+ gfc_init_block (&block);
+ gfc_conv_descriptor_data_set (&block, dest, null_pointer_node);
+ null_data = gfc_finish_block (&block);
+
+ gfc_init_block (&block);
+
+ nelems = get_full_array_size (&block, src, rank);
+ size = fold_build2 (MULT_EXPR, gfc_array_index_type, nelems,
+ TYPE_SIZE_UNIT (gfc_get_element_type (type)));
+
+ /* Allocate memory to the destination. */
+ tmp = gfc_chainon_list (NULL_TREE, size);
+ if (gfc_index_integer_kind == 4)
+ tmp = build_function_call_expr (gfor_fndecl_internal_malloc, tmp);
+ else if (gfc_index_integer_kind == 8)
+ tmp = build_function_call_expr (gfor_fndecl_internal_malloc64, tmp);
+ else
+ gcc_unreachable ();
+ tmp = fold (convert (TREE_TYPE (gfc_conv_descriptor_data_get (src)),
+ tmp));
+ gfc_conv_descriptor_data_set (&block, dest, tmp);
+
+ /* We know the temporary and the value will be the same length,
+ so can use memcpy. */
+ tmp = gfc_conv_descriptor_data_get (dest);
+ args = gfc_chainon_list (NULL_TREE, tmp);
+ tmp = gfc_conv_descriptor_data_get (src);
+ args = gfc_chainon_list (args, tmp);
+ args = gfc_chainon_list (args, size);
+ tmp = built_in_decls[BUILT_IN_MEMCPY];
+ tmp = build_function_call_expr (tmp, args);
+ gfc_add_expr_to_block (&block, tmp);
+ tmp = gfc_finish_block (&block);
+
+ /* Null the destination if the source is null; otherwise do
+ the allocate and copy. */
+ null_cond = gfc_conv_descriptor_data_get (src);
+ null_cond = convert (pvoid_type_node, null_cond);
+ null_cond = build2 (NE_EXPR, boolean_type_node, null_cond,
+ null_pointer_node);
+ return build3_v (COND_EXPR, null_cond, tmp, null_data);
+}
+
+
+/* Recursively traverse an object of derived type, generating code to
+ deallocate, nullify or copy allocatable components. This is the work horse
+ function for the functions named in this enum. */
+
+enum {DEALLOCATE_ALLOC_COMP = 1, NULLIFY_ALLOC_COMP, COPY_ALLOC_COMP};
+
+static tree
+structure_alloc_comps (gfc_symbol * der_type, tree decl,
+ tree dest, int rank, int purpose)
+{
+ gfc_component *c;
+ gfc_loopinfo loop;
+ stmtblock_t fnblock;
+ stmtblock_t loopbody;
+ tree tmp;
+ tree comp;
+ tree dcmp;
+ tree nelems;
+ tree index;
+ tree var;
+ tree cdecl;
+ tree ctype;
+ tree vref, dref;
+ tree null_cond = NULL_TREE;
+
+ gfc_init_block (&fnblock);
+
+ if (POINTER_TYPE_P (TREE_TYPE (decl)))
+ decl = build_fold_indirect_ref (decl);
+
+ /* If this an array of derived types with allocatable components
+ build a loop and recursively call this function. */
+ if (TREE_CODE (TREE_TYPE (decl)) == ARRAY_TYPE
+ || GFC_DESCRIPTOR_TYPE_P (TREE_TYPE (decl)))
+ {
+ tmp = gfc_conv_array_data (decl);
+ var = build_fold_indirect_ref (tmp);
+
+ /* Get the number of elements - 1 and set the counter. */
+ if (GFC_DESCRIPTOR_TYPE_P (TREE_TYPE (decl)))
+ {
+ /* Use the descriptor for an allocatable array. Since this
+ is a full array reference, we only need the descriptor
+ information from dimension = rank. */
+ tmp = get_full_array_size (&fnblock, decl, rank);
+ tmp = build2 (MINUS_EXPR, gfc_array_index_type,
+ tmp, gfc_index_one_node);
+
+ null_cond = gfc_conv_descriptor_data_get (decl);
+ null_cond = build2 (NE_EXPR, boolean_type_node, null_cond,
+ build_int_cst (TREE_TYPE (tmp), 0));
+ }
+ else
+ {
+ /* Otherwise use the TYPE_DOMAIN information. */
+ tmp = array_type_nelts (TREE_TYPE (decl));
+ tmp = fold_convert (gfc_array_index_type, tmp);
+ }
+
+ /* Remember that this is, in fact, the no. of elements - 1. */
+ nelems = gfc_evaluate_now (tmp, &fnblock);
+ index = gfc_create_var (gfc_array_index_type, "S");
+
+ /* Build the body of the loop. */
+ gfc_init_block (&loopbody);
+
+ vref = gfc_build_array_ref (var, index);
+
+ if (purpose == COPY_ALLOC_COMP)
+ {
+ tmp = gfc_duplicate_allocatable (dest, decl, TREE_TYPE(decl), rank);
+ gfc_add_expr_to_block (&fnblock, tmp);
+
+ tmp = build_fold_indirect_ref (gfc_conv_descriptor_data_get (dest));
+ dref = gfc_build_array_ref (tmp, index);
+ tmp = structure_alloc_comps (der_type, vref, dref, rank, purpose);
+ }
+ else
+ tmp = structure_alloc_comps (der_type, vref, NULL_TREE, rank, purpose);
+
+ gfc_add_expr_to_block (&loopbody, tmp);
+
+ /* Build the loop and return. */
+ gfc_init_loopinfo (&loop);
+ loop.dimen = 1;
+ loop.from[0] = gfc_index_zero_node;
+ loop.loopvar[0] = index;
+ loop.to[0] = nelems;
+ gfc_trans_scalarizing_loops (&loop, &loopbody);
+ gfc_add_block_to_block (&fnblock, &loop.pre);
+
+ tmp = gfc_finish_block (&fnblock);
+ if (null_cond != NULL_TREE)
+ tmp = build3_v (COND_EXPR, null_cond, tmp, build_empty_stmt ());
+
+ return tmp;
+ }
+
+ /* Otherwise, act on the components or recursively call self to
+ act on a chain of components. */
+ for (c = der_type->components; c; c = c->next)
+ {
+ bool cmp_has_alloc_comps = (c->ts.type == BT_DERIVED)
+ && c->ts.derived->attr.alloc_comp;
+ cdecl = c->backend_decl;
+ ctype = TREE_TYPE (cdecl);
+
+ switch (purpose)
+ {
+ case DEALLOCATE_ALLOC_COMP:
+ /* Do not deallocate the components of ultimate pointer
+ components. */
+ if (cmp_has_alloc_comps && !c->pointer)
+ {
+ comp = build3 (COMPONENT_REF, ctype, decl, cdecl, NULL_TREE);
+ rank = c->as ? c->as->rank : 0;
+ tmp = structure_alloc_comps (c->ts.derived, comp, NULL_TREE,
+ rank, purpose);
+ gfc_add_expr_to_block (&fnblock, tmp);
+ }
+
+ if (c->allocatable)
+ {
+ comp = build3 (COMPONENT_REF, ctype, decl, cdecl, NULL_TREE);
+ tmp = gfc_trans_dealloc_allocated (comp);
+ gfc_add_expr_to_block (&fnblock, tmp);
+ }
+ break;
+
+ case NULLIFY_ALLOC_COMP:
+ if (c->pointer)
+ continue;
+ else if (c->allocatable)
+ {
+ comp = build3 (COMPONENT_REF, ctype, decl, cdecl, NULL_TREE);
+ gfc_conv_descriptor_data_set (&fnblock, comp, null_pointer_node);
+ }
+ else if (cmp_has_alloc_comps)
+ {
+ comp = build3 (COMPONENT_REF, ctype, decl, cdecl, NULL_TREE);
+ rank = c->as ? c->as->rank : 0;
+ tmp = structure_alloc_comps (c->ts.derived, comp, NULL_TREE,
+ rank, purpose);
+ gfc_add_expr_to_block (&fnblock, tmp);
+ }
+ break;
+
+ case COPY_ALLOC_COMP:
+ if (c->pointer)
+ continue;
+
+ /* We need source and destination components. */
+ comp = build3 (COMPONENT_REF, ctype, decl, cdecl, NULL_TREE);
+ dcmp = build3 (COMPONENT_REF, ctype, dest, cdecl, NULL_TREE);
+ dcmp = fold_convert (TREE_TYPE (comp), dcmp);
+
+ if (c->allocatable && !cmp_has_alloc_comps)
+ {
+ tmp = gfc_duplicate_allocatable(dcmp, comp, ctype, c->as->rank);
+ gfc_add_expr_to_block (&fnblock, tmp);
+ }
+
+ if (cmp_has_alloc_comps)
+ {
+ rank = c->as ? c->as->rank : 0;
+ tmp = fold_convert (TREE_TYPE (dcmp), comp);
+ gfc_add_modify_expr (&fnblock, dcmp, tmp);
+ tmp = structure_alloc_comps (c->ts.derived, comp, dcmp,
+ rank, purpose);
+ gfc_add_expr_to_block (&fnblock, tmp);
+ }
+ break;
+
+ default:
+ gcc_unreachable ();
+ break;
+ }
+ }
+
+ return gfc_finish_block (&fnblock);
+}
+
+/* Recursively traverse an object of derived type, generating code to
+ nullify allocatable components. */
+
+tree
+gfc_nullify_alloc_comp (gfc_symbol * der_type, tree decl, int rank)
+{
+ return structure_alloc_comps (der_type, decl, NULL_TREE, rank,
+ NULLIFY_ALLOC_COMP);
+}
+
+
+/* Recursively traverse an object of derived type, generating code to
+ deallocate allocatable components. */
+
+tree
+gfc_deallocate_alloc_comp (gfc_symbol * der_type, tree decl, int rank)
+{
+ return structure_alloc_comps (der_type, decl, NULL_TREE, rank,
+ DEALLOCATE_ALLOC_COMP);
+}
+
+
+/* Recursively traverse an object of derived type, generating code to
+ copy its allocatable components. */
+
+tree
+gfc_copy_alloc_comp (gfc_symbol * der_type, tree decl, tree dest, int rank)
+{
+ return structure_alloc_comps (der_type, decl, dest, rank, COPY_ALLOC_COMP);
+}
+
+
+/* NULLIFY an allocatable/pointer array on function entry, free it on exit.
+ Do likewise, recursively if necessary, with the allocatable components of
+ derived types. */
tree
gfc_trans_deferred_array (gfc_symbol * sym, tree body)
tree type;
tree tmp;
tree descriptor;
- tree deallocate;
- stmtblock_t block;
stmtblock_t fnblock;
locus loc;
+ int rank;
+ bool sym_has_alloc_comp;
+
+ sym_has_alloc_comp = (sym->ts.type == BT_DERIVED)
+ && sym->ts.derived->attr.alloc_comp;
/* Make sure the frontend gets these right. */
- if (!(sym->attr.pointer || sym->attr.allocatable))
- fatal_error
- ("Possible frontend bug: Deferred array size without pointer or allocatable attribute.");
+ if (!(sym->attr.pointer || sym->attr.allocatable || sym_has_alloc_comp))
+ fatal_error ("Possible frontend bug: Deferred array size without pointer, "
+ "allocatable attribute or derived type without allocatable "
+ "components.");
gfc_init_block (&fnblock);
- gcc_assert (TREE_CODE (sym->backend_decl) == VAR_DECL);
+ gcc_assert (TREE_CODE (sym->backend_decl) == VAR_DECL
+ || TREE_CODE (sym->backend_decl) == PARM_DECL);
+
if (sym->ts.type == BT_CHARACTER
&& !INTEGER_CST_P (sym->ts.cl->backend_decl))
- gfc_trans_init_string_length (sym->ts.cl, &fnblock);
+ {
+ gfc_trans_init_string_length (sym->ts.cl, &fnblock);
+ gfc_trans_vla_type_sizes (sym, &fnblock);
+ }
/* Dummy and use associated variables don't need anything special. */
if (sym->attr.dummy || sym->attr.use_assoc)
gfc_set_backend_locus (&sym->declared_at);
descriptor = sym->backend_decl;
- if (TREE_STATIC (descriptor))
+ /* Although static, derived types with default initializers and
+ allocatable components must not be nulled wholesale; instead they
+ are treated component by component. */
+ if (TREE_STATIC (descriptor) && !sym_has_alloc_comp)
{
/* SAVEd variables are not freed on exit. */
gfc_trans_static_array_pointer (sym);
/* Get the descriptor type. */
type = TREE_TYPE (sym->backend_decl);
- gcc_assert (GFC_DESCRIPTOR_TYPE_P (type));
-
+
+ if (sym_has_alloc_comp && !(sym->attr.pointer || sym->attr.allocatable))
+ {
+ rank = sym->as ? sym->as->rank : 0;
+ tmp = gfc_nullify_alloc_comp (sym->ts.derived, descriptor, rank);
+ gfc_add_expr_to_block (&fnblock, tmp);
+ }
+ else if (!GFC_DESCRIPTOR_TYPE_P (type))
+ {
+ /* If the backend_decl is not a descriptor, we must have a pointer
+ to one. */
+ descriptor = build_fold_indirect_ref (sym->backend_decl);
+ type = TREE_TYPE (descriptor);
+ }
+
/* NULLIFY the data pointer. */
- gfc_conv_descriptor_data_set (&fnblock, descriptor, null_pointer_node);
+ if (GFC_DESCRIPTOR_TYPE_P (type))
+ gfc_conv_descriptor_data_set (&fnblock, descriptor, null_pointer_node);
gfc_add_expr_to_block (&fnblock, body);
gfc_set_backend_locus (&loc);
- /* Allocatable arrays need to be freed when they go out of scope. */
- if (sym->attr.allocatable)
- {
- gfc_start_block (&block);
-
- /* Deallocate if still allocated at the end of the procedure. */
- deallocate = gfc_array_deallocate (descriptor, null_pointer_node);
- tmp = gfc_conv_descriptor_data_get (descriptor);
- tmp = build2 (NE_EXPR, boolean_type_node, tmp,
- build_int_cst (TREE_TYPE (tmp), 0));
- tmp = build3_v (COND_EXPR, tmp, deallocate, build_empty_stmt ());
- gfc_add_expr_to_block (&block, tmp);
+ /* Allocatable arrays need to be freed when they go out of scope.
+ The allocatable components of pointers must not be touched. */
+ if (sym_has_alloc_comp && !(sym->attr.function || sym->attr.result)
+ && !sym->attr.pointer)
+ {
+ int rank;
+ rank = sym->as ? sym->as->rank : 0;
+ tmp = gfc_deallocate_alloc_comp (sym->ts.derived, descriptor, rank);
+ gfc_add_expr_to_block (&fnblock, tmp);
+ }
- tmp = gfc_finish_block (&block);
+ if (sym->attr.allocatable)
+ {
+ tmp = gfc_trans_dealloc_allocated (sym->backend_decl);
gfc_add_expr_to_block (&fnblock, tmp);
}
/* Reverse a SS chain. */
-static gfc_ss *
+gfc_ss *
gfc_reverse_ss (gfc_ss * ss)
{
gfc_ss *next;
/* Walk the arguments of an elemental function. */
gfc_ss *
-gfc_walk_elemental_function_args (gfc_ss * ss, gfc_expr * expr,
+gfc_walk_elemental_function_args (gfc_ss * ss, gfc_actual_arglist *arg,
gfc_ss_type type)
{
- gfc_actual_arglist *arg;
int scalar;
gfc_ss *head;
gfc_ss *tail;
head = gfc_ss_terminator;
tail = NULL;
scalar = 1;
- for (arg = expr->value.function.actual; arg; arg = arg->next)
+ for (; arg; arg = arg->next)
{
if (!arg->expr)
continue;
/* Walk the parameters of an elemental function. For now we always pass
by reference. */
if (sym->attr.elemental)
- return gfc_walk_elemental_function_args (ss, expr, GFC_SS_REFERENCE);
+ return gfc_walk_elemental_function_args (ss, expr->value.function.actual,
+ GFC_SS_REFERENCE);
/* Scalar functions are OK as these are evaluated outside the scalarization
loop. Pass back and let the caller deal with it. */