/* Free a gfc_ss chain. */
-static void
+void
gfc_free_ss_chain (gfc_ss * ss)
{
gfc_ss *next;
tmptype = gfc_get_nodesc_array_type (type, &as, PACKED_STATIC, true);
+ /* as is not needed anymore. */
+ for (i = 0; i < as.rank + as.corank; i++)
+ {
+ gfc_free_expr (as.lower[i]);
+ gfc_free_expr (as.upper[i]);
+ }
+
init = build_constructor (tmptype, v);
TREE_CONSTANT (init) = 1;
tree offsetvar;
tree desc;
tree type;
+ tree tmp;
bool dynamic;
bool old_first_len, old_typespec_chararray_ctor;
tree old_first_len_val;
}
}
+ if (TREE_CODE (loop->to[0]) == VAR_DECL)
+ dynamic = true;
+
gfc_trans_create_temp_array (&loop->pre, &loop->post, loop, &ss->data.info,
type, NULL_TREE, dynamic, true, false, where);
/* If the array grows dynamically, the upper bound of the loop variable
is determined by the array's final upper bound. */
if (dynamic)
- loop->to[0] = gfc_conv_descriptor_ubound_get (desc, gfc_rank_cst[0]);
+ {
+ tmp = fold_build2_loc (input_location, MINUS_EXPR,
+ gfc_array_index_type,
+ offsetvar, gfc_index_one_node);
+ tmp = gfc_evaluate_now (tmp, &loop->pre);
+ gfc_conv_descriptor_ubound_set (&loop->pre, desc, gfc_rank_cst[0], tmp);
+ if (loop->to[0] && TREE_CODE (loop->to[0]) == VAR_DECL)
+ gfc_add_modify (&loop->pre, loop->to[0], tmp);
+ else
+ loop->to[0] = tmp;
+ }
if (TREE_USED (offsetvar))
pushdecl (offsetvar);
else
gcc_assert (INTEGER_CST_P (offset));
+
#if 0
/* Disable bound checking for now because it's probably broken. */
if (gfc_option.rtcheck & GFC_RTCHECK_BOUNDS)
tmp = gfc_conv_array_offset (se.expr);
ss->data.info.offset = gfc_evaluate_now (tmp, block);
+
+ /* Make absolutely sure that the saved_offset is indeed saved
+ so that the variable is still accessible after the loops
+ are translated. */
+ ss->data.info.saved_offset = ss->data.info.offset;
}
}
{
se->string_length = se->ss->string_length;
gfc_conv_scalarized_array_ref (se, NULL);
+ gfc_advance_se_ss_chain (se);
}
if (ss->type != GFC_SS_SECTION)
continue;
+ /* Catch allocatable lhs in f2003. */
+ if (gfc_option.flag_realloc_lhs && ss->is_alloc_lhs)
+ continue;
+
gfc_start_block (&inner);
/* TODO: range checking for mapped dimensions. */
if (gfc_symbols_could_alias (lsym, rsym))
return 1;
- if (rsym->ts.type != BT_DERIVED
- && lsym->ts.type != BT_DERIVED)
+ if (rsym->ts.type != BT_DERIVED && rsym->ts.type != BT_CLASS
+ && lsym->ts.type != BT_DERIVED && lsym->ts.type != BT_CLASS)
return 0;
/* For derived types we must check all the component types. We can ignore
continue;
}
+ /* Avoid using an allocatable lhs in an assignment, since
+ there might be a reallocation coming. */
+ if (loopspec[n] && ss->is_alloc_lhs)
+ continue;
+
if (ss->type != GFC_SS_SECTION)
continue;
}
-/* Fills in an array descriptor, and returns the size of the array. The size
- will be a simple_val, ie a variable or a constant. Also calculates the
- offset of the base. Returns the size of the array.
+/* Helper function for marking a boolean expression tree as unlikely. */
+
+static tree
+gfc_unlikely (tree cond)
+{
+ tree tmp;
+
+ cond = fold_convert (long_integer_type_node, cond);
+ tmp = build_zero_cst (long_integer_type_node);
+ cond = build_call_expr_loc (input_location,
+ built_in_decls[BUILT_IN_EXPECT], 2, cond, tmp);
+ cond = fold_convert (boolean_type_node, cond);
+ return cond;
+}
+
+/* Fills in an array descriptor, and returns the size of the array.
+ The size will be a simple_val, ie a variable or a constant. Also
+ calculates the offset of the base. The pointer argument overflow,
+ which should be of integer type, will increase in value if overflow
+ occurs during the size calculation. Returns the size of the array.
{
stride = 1;
offset = 0;
a.ubound[n] = specified_upper_bound;
a.stride[n] = stride;
size = siz >= 0 ? ubound + size : 0; //size = ubound + 1 - lbound
+ overflow += size == 0 ? 0: (MAX/size < stride ? 1: 0);
stride = stride * size;
}
+ element_size = sizeof (array element);
+ stride = (size_t) stride;
+ overflow += element_size == 0 ? 0: (MAX/element_size < stride ? 1: 0);
+ stride = stride * element_size;
return (stride);
} */
/*GCC ARRAYS*/
static tree
gfc_array_init_size (tree descriptor, int rank, int corank, tree * poffset,
gfc_expr ** lower, gfc_expr ** upper,
- stmtblock_t * pblock)
+ stmtblock_t * pblock, tree * overflow)
{
tree type;
tree tmp;
tree size;
tree offset;
tree stride;
+ tree element_size;
tree or_expr;
tree thencase;
tree elsecase;
/* Calculate size and check whether extent is negative. */
size = gfc_conv_array_extent_dim (conv_lbound, conv_ubound, &or_expr);
-
+ size = gfc_evaluate_now (size, pblock);
+
+ /* Check whether multiplying the stride by the number of
+ elements in this dimension would overflow. We must also check
+ whether the current dimension has zero size in order to avoid
+ division by zero.
+ */
+ tmp = fold_build2_loc (input_location, TRUNC_DIV_EXPR,
+ gfc_array_index_type,
+ fold_convert (gfc_array_index_type,
+ TYPE_MAX_VALUE (gfc_array_index_type)),
+ size);
+ tmp = fold_build3_loc
+ (input_location, COND_EXPR, integer_type_node,
+ gfc_unlikely (fold_build2_loc (input_location, LT_EXPR,
+ boolean_type_node, tmp, stride)),
+ integer_one_node, integer_zero_node);
+ tmp = fold_build3_loc
+ (input_location, COND_EXPR, integer_type_node,
+ gfc_unlikely (fold_build2_loc (input_location, EQ_EXPR,
+ boolean_type_node, size,
+ build_zero_cst (gfc_array_index_type))),
+ integer_zero_node, tmp);
+ tmp = fold_build2_loc (input_location, PLUS_EXPR, integer_type_node,
+ *overflow, tmp);
+ *overflow = gfc_evaluate_now (tmp, pblock);
+
/* Multiply the stride by the number of elements in this dimension. */
stride = fold_build2_loc (input_location, MULT_EXPR,
gfc_array_index_type, stride, size);
/* The stride is the number of elements in the array, so multiply by the
size of an element to get the total size. */
tmp = TYPE_SIZE_UNIT (gfc_get_element_type (type));
- size = fold_build2_loc (input_location, MULT_EXPR, gfc_array_index_type,
- stride, fold_convert (gfc_array_index_type, tmp));
+ /* Convert to size_t. */
+ element_size = fold_convert (sizetype, tmp);
+ stride = fold_convert (sizetype, stride);
+
+ /* First check for overflow. Since an array of type character can
+ have zero element_size, we must check for that before
+ dividing. */
+ tmp = fold_build2_loc (input_location, TRUNC_DIV_EXPR,
+ sizetype,
+ TYPE_MAX_VALUE (sizetype), element_size);
+ tmp = fold_build3_loc (input_location, COND_EXPR, integer_type_node,
+ gfc_unlikely (fold_build2_loc (input_location, LT_EXPR,
+ boolean_type_node, tmp,
+ stride)),
+ integer_one_node, integer_zero_node);
+ tmp = fold_build3_loc (input_location, COND_EXPR, integer_type_node,
+ gfc_unlikely (fold_build2_loc (input_location, EQ_EXPR,
+ boolean_type_node,
+ element_size,
+ size_zero_node)),
+ integer_zero_node, tmp);
+ tmp = fold_build2_loc (input_location, PLUS_EXPR, integer_type_node,
+ *overflow, tmp);
+ *overflow = gfc_evaluate_now (tmp, pblock);
+
+ size = fold_build2_loc (input_location, MULT_EXPR, sizetype,
+ stride, element_size);
if (poffset != NULL)
{
var = gfc_create_var (TREE_TYPE (size), "size");
gfc_start_block (&thenblock);
- gfc_add_modify (&thenblock, var, gfc_index_zero_node);
+ gfc_add_modify (&thenblock, var, size_zero_node);
thencase = gfc_finish_block (&thenblock);
gfc_start_block (&elseblock);
tree pointer;
tree offset;
tree size;
+ tree msg;
+ tree error;
+ tree overflow; /* Boolean storing whether size calculation overflows. */
+ tree var_overflow;
+ tree cond;
+ stmtblock_t elseblock;
gfc_expr **lower;
gfc_expr **upper;
gfc_ref *ref, *prev_ref = NULL;
break;
}
+ overflow = integer_zero_node;
size = gfc_array_init_size (se->expr, ref->u.ar.as->rank,
ref->u.ar.as->corank, &offset, lower, upper,
- &se->pre);
+ &se->pre, &overflow);
+
+ var_overflow = gfc_create_var (integer_type_node, "overflow");
+ gfc_add_modify (&se->pre, var_overflow, overflow);
+
+ /* Generate the block of code handling overflow. */
+ msg = gfc_build_addr_expr (pchar_type_node, gfc_build_localized_cstring_const
+ ("Integer overflow when calculating the amount of "
+ "memory to allocate"));
+ error = build_call_expr_loc (input_location,
+ gfor_fndecl_runtime_error, 1, msg);
+
+ if (pstat != NULL_TREE && !integer_zerop (pstat))
+ {
+ /* Set the status variable if it's present. */
+ stmtblock_t set_status_block;
+ tree status_type = pstat ? TREE_TYPE (TREE_TYPE (pstat)) : NULL_TREE;
+
+ gfc_start_block (&set_status_block);
+ gfc_add_modify (&set_status_block,
+ fold_build1_loc (input_location, INDIRECT_REF,
+ status_type, pstat),
+ build_int_cst (status_type, LIBERROR_ALLOCATION));
+
+ tmp = fold_build2_loc (input_location, EQ_EXPR, boolean_type_node,
+ pstat, build_int_cst (TREE_TYPE (pstat), 0));
+ error = fold_build3_loc (input_location, COND_EXPR, void_type_node, tmp,
+ error, gfc_finish_block (&set_status_block));
+ }
+ gfc_start_block (&elseblock);
+
/* Allocate memory to store the data. */
pointer = gfc_conv_descriptor_data_get (se->expr);
STRIP_NOPS (pointer);
/* The allocate_array variants take the old pointer as first argument. */
if (allocatable_array)
- tmp = gfc_allocate_array_with_status (&se->pre, pointer, size, pstat, expr);
+ tmp = gfc_allocate_array_with_status (&elseblock, pointer, size, pstat, expr);
else
- tmp = gfc_allocate_with_status (&se->pre, size, pstat);
+ tmp = gfc_allocate_with_status (&elseblock, size, pstat);
tmp = fold_build2_loc (input_location, MODIFY_EXPR, void_type_node, pointer,
tmp);
+
+ gfc_add_expr_to_block (&elseblock, tmp);
+
+ cond = gfc_unlikely (fold_build2_loc (input_location, NE_EXPR, boolean_type_node,
+ var_overflow, integer_zero_node));
+ tmp = fold_build3_loc (input_location, COND_EXPR, void_type_node, cond,
+ error, gfc_finish_block (&elseblock));
+
gfc_add_expr_to_block (&se->pre, tmp);
gfc_conv_descriptor_offset_set (&se->pre, se->expr, offset);
- if (expr->ts.type == BT_DERIVED
+ if ((expr->ts.type == BT_DERIVED || expr->ts.type == BT_CLASS)
&& expr->ts.u.derived->attr.alloc_comp)
{
tmp = gfc_nullify_alloc_comp (expr->ts.u.derived, se->expr,
locus loc;
tree offset;
tree tmp;
- tree stmt;
+ tree stmt;
stmtblock_t init;
- gfc_get_backend_locus (&loc);
+ gfc_save_backend_locus (&loc);
gfc_set_backend_locus (&sym->declared_at);
/* Descriptor type. */
}
stmt = gfc_finish_block (&init);
- gfc_set_backend_locus (&loc);
+ gfc_restore_backend_locus (&loc);
/* Add the initialization code to the start of the function. */
return;
}
- gfc_get_backend_locus (&loc);
+ gfc_save_backend_locus (&loc);
gfc_set_backend_locus (&sym->declared_at);
/* Descriptor type. */
/* We don't need to free any memory allocated by internal_pack as it will
be freed at the end of the function by pop_context. */
gfc_add_init_cleanup (block, stmtInit, stmtCleanup);
+
+ gfc_restore_backend_locus (&loc);
}
}
-
/* Convert an array for passing as an actual argument. Expressions and
vector subscripts are evaluated and stored in a temporary, which is then
passed. For whole arrays the descriptor is passed. For array sections
EXPR is the right-hand side of a pointer assignment and
se->expr is the descriptor for the previously-evaluated
left-hand side. The function creates an assignment from
- EXPR to se->expr. */
+ EXPR to se->expr.
+
+
+ The se->force_tmp flag disables the non-copying descriptor optimization
+ that is used for transpose. It may be used in cases where there is an
+ alias between the transpose argument and another argument in the same
+ function call. */
void
gfc_conv_expr_descriptor (gfc_se * se, gfc_expr * expr, gfc_ss * ss)
{
gfc_loopinfo loop;
- gfc_ss *secss;
gfc_ss_info *info;
int need_tmp;
int n;
tree offset;
int full;
bool subref_array_target = false;
+ gfc_expr *arg;
+ gcc_assert (ss != NULL);
gcc_assert (ss != gfc_ss_terminator);
/* Special case things we know we can pass easily. */
/* If we have a linear array section, we can pass it directly.
Otherwise we need to copy it into a temporary. */
- /* Find the SS for the array section. */
- secss = ss;
- while (secss != gfc_ss_terminator && secss->type != GFC_SS_SECTION)
- secss = secss->next;
-
- gcc_assert (secss != gfc_ss_terminator);
- info = &secss->data.info;
+ gcc_assert (ss->type == GFC_SS_SECTION);
+ gcc_assert (ss->expr == expr);
+ info = &ss->data.info;
/* Get the descriptor for the array. */
- gfc_conv_ss_descriptor (&se->pre, secss, 0);
+ gfc_conv_ss_descriptor (&se->pre, ss, 0);
desc = info->descriptor;
subref_array_target = se->direct_byref && is_subref_array (expr);
need_tmp = gfc_ref_needs_temporary_p (expr->ref)
&& !subref_array_target;
+ if (se->force_tmp)
+ need_tmp = 1;
+
if (need_tmp)
full = 0;
else if (GFC_ARRAY_TYPE_P (TREE_TYPE (desc)))
full = gfc_full_array_ref_p (info->ref, NULL);
if (full)
+ for (n = 0; n < info->dimen; n++)
+ if (info->dim[n] != n)
+ {
+ full = 0;
+ break;
+ }
+
+ if (full)
{
if (se->direct_byref && !se->byref_noassign)
{
break;
case EXPR_FUNCTION:
+
+ /* We don't need to copy data in some cases. */
+ arg = gfc_get_noncopying_intrinsic_argument (expr);
+ if (arg)
+ {
+ /* This is a call to transpose... */
+ gcc_assert (expr->value.function.isym->id == GFC_ISYM_TRANSPOSE);
+ /* ... which has already been handled by the scalarizer, so
+ that we just need to get its argument's descriptor. */
+ gfc_conv_expr_descriptor (se, expr->value.function.actual->expr, ss);
+ return;
+ }
+
/* A transformational function return value will be a temporary
array descriptor. We still need to go through the scalarizer
to create the descriptor. Elemental functions ar handled as
arbitrary expressions, i.e. copy to a temporary. */
- secss = ss;
- /* Look for the SS for this function. */
- while (secss != gfc_ss_terminator
- && (secss->type != GFC_SS_FUNCTION || secss->expr != expr))
- secss = secss->next;
if (se->direct_byref)
{
- gcc_assert (secss != gfc_ss_terminator);
+ gcc_assert (ss->type == GFC_SS_FUNCTION && ss->expr == expr);
/* For pointer assignments pass the descriptor directly. */
- se->ss = secss;
+ if (se->ss == NULL)
+ se->ss = ss;
+ else
+ gcc_assert (se->ss == ss);
se->expr = gfc_build_addr_expr (NULL_TREE, se->expr);
gfc_conv_expr (se, expr);
return;
}
- if (secss == gfc_ss_terminator)
+ if (ss->expr != expr || ss->type != GFC_SS_FUNCTION)
{
- /* Elemental function. */
+ if (ss->expr != expr)
+ /* Elemental function. */
+ gcc_assert ((expr->value.function.esym != NULL
+ && expr->value.function.esym->attr.elemental)
+ || (expr->value.function.isym != NULL
+ && expr->value.function.isym->elemental));
+ else
+ gcc_assert (ss->type == GFC_SS_INTRINSIC);
+
need_tmp = 1;
if (expr->ts.type == BT_CHARACTER
&& expr->ts.u.cl->length->expr_type != EXPR_CONSTANT)
else
{
/* Transformational function. */
- info = &secss->data.info;
+ info = &ss->data.info;
need_tmp = 0;
}
break;
{
need_tmp = 0;
info = &ss->data.info;
- secss = ss;
}
else
{
need_tmp = 1;
- secss = NULL;
info = NULL;
}
break;
default:
/* Something complicated. Copy it into a temporary. */
need_tmp = 1;
- secss = NULL;
info = NULL;
break;
}
+ /* If we are creating a temporary, we don't need to bother about aliases
+ anymore. */
+ if (need_tmp)
+ se->force_tmp = 0;
+
gfc_init_loopinfo (&loop);
/* Associate the SS with the loop. */
se->string_length = gfc_get_expr_charlen (expr);
desc = info->descriptor;
- gcc_assert (secss && secss != gfc_ss_terminator);
if (se->direct_byref && !se->byref_noassign)
{
/* For pointer assignments we fill in the destination. */
}
offset = gfc_index_zero_node;
- dim = 0;
/* The following can be somewhat confusing. We have two
descriptors, a new one and the original array.
{parm, parmtype, dim} refer to the new one.
- {desc, type, n, secss, loop} refer to the original, which maybe
+ {desc, type, n, loop} refer to the original, which maybe
a descriptorless array.
The bounds of the scalarization are the bounds of the section.
We don't have to worry about numeric overflows when calculating
}
else
{
- /* Check we haven't somehow got out of sync. */
- gcc_assert (info->dim[dim] == n);
-
/* Evaluate and remember the start of the section. */
start = info->start[n];
stride = gfc_evaluate_now (stride, &loop.pre);
/* Vector subscripts need copying and are handled elsewhere. */
if (info->ref)
gcc_assert (info->ref->u.ar.dimen_type[n] == DIMEN_RANGE);
+
+ /* look for the corresponding scalarizer dimension: dim. */
+ for (dim = 0; dim < ndim; dim++)
+ if (info->dim[dim] == n)
+ break;
+
+ /* loop exited early: the DIM being looked for has been found. */
+ gcc_assert (dim < ndim);
/* Set the new lower bound. */
from = loop.from[dim];
/* Store the new stride. */
gfc_conv_descriptor_stride_set (&loop.pre, parm,
gfc_rank_cst[dim], stride);
-
- dim++;
}
if (se->data_not_needed)
if (sym->ts.type == BT_CHARACTER)
se->string_length = sym->ts.u.cl->backend_decl;
- if (sym->ts.type == BT_DERIVED)
+ if (sym->ts.type == BT_DERIVED || sym->ts.type == BT_CLASS)
{
gfc_conv_expr_descriptor (se, expr, ss);
se->expr = gfc_conv_array_data (se->expr);
/* Deallocate the allocatable components of structures that are
not variable. */
- if (expr->ts.type == BT_DERIVED
+ if ((expr->ts.type == BT_DERIVED || expr->ts.type == BT_CLASS)
&& expr->ts.u.derived->attr.alloc_comp
&& expr->expr_type != EXPR_VARIABLE)
{
null_data = gfc_finish_block (&block);
gfc_init_block (&block);
- size = TYPE_SIZE_UNIT (type);
+ size = TYPE_SIZE_UNIT (TREE_TYPE (type));
if (!no_malloc)
{
tmp = gfc_call_malloc (&block, type, size);
act on a chain of components. */
for (c = der_type->components; c; c = c->next)
{
- bool cmp_has_alloc_comps = (c->ts.type == BT_DERIVED)
+ bool cmp_has_alloc_comps = (c->ts.type == BT_DERIVED
+ || c->ts.type == BT_CLASS)
&& c->ts.u.derived->attr.alloc_comp;
cdecl = c->backend_decl;
ctype = TREE_TYPE (cdecl);
switch (purpose)
{
case DEALLOCATE_ALLOC_COMP:
- /* Do not deallocate the components of ultimate pointer
- components. */
- if (cmp_has_alloc_comps && !c->attr.pointer)
- {
- comp = fold_build3_loc (input_location, COMPONENT_REF, ctype,
- decl, cdecl, NULL_TREE);
- rank = c->as ? c->as->rank : 0;
- tmp = structure_alloc_comps (c->ts.u.derived, comp, NULL_TREE,
- rank, purpose);
- gfc_add_expr_to_block (&fnblock, tmp);
- }
-
if (c->attr.allocatable && c->attr.dimension)
{
comp = fold_build3_loc (input_location, COMPONENT_REF, ctype,
decl, cdecl, NULL_TREE);
+ if (cmp_has_alloc_comps && !c->attr.pointer)
+ {
+ /* Do not deallocate the components of ultimate pointer
+ components. */
+ tmp = structure_alloc_comps (c->ts.u.derived, comp, NULL_TREE,
+ c->as->rank, purpose);
+ gfc_add_expr_to_block (&fnblock, tmp);
+ }
tmp = gfc_trans_dealloc_allocated (comp);
gfc_add_expr_to_block (&fnblock, tmp);
}
comp = fold_build3_loc (input_location, COMPONENT_REF, ctype,
decl, cdecl, NULL_TREE);
- tmp = gfc_deallocate_with_status (comp, NULL_TREE, true, NULL);
+ tmp = gfc_deallocate_scalar_with_status (comp, NULL, true, NULL,
+ c->ts);
gfc_add_expr_to_block (&fnblock, tmp);
tmp = fold_build2_loc (input_location, MODIFY_EXPR,
comp = fold_build3_loc (input_location, COMPONENT_REF, ctype,
decl, cdecl, NULL_TREE);
- /* Add reference to '$data' component. */
+ /* Add reference to '_data' component. */
tmp = CLASS_DATA (c)->backend_decl;
comp = fold_build3_loc (input_location, COMPONENT_REF,
TREE_TYPE (tmp), comp, tmp, NULL_TREE);
- tmp = gfc_deallocate_with_status (comp, NULL_TREE, true, NULL);
+ tmp = gfc_deallocate_scalar_with_status (comp, NULL, true, NULL,
+ CLASS_DATA (c)->ts);
gfc_add_expr_to_block (&fnblock, tmp);
tmp = fold_build2_loc (input_location, MODIFY_EXPR,
/* Allocatable scalar CLASS components. */
comp = fold_build3_loc (input_location, COMPONENT_REF, ctype,
decl, cdecl, NULL_TREE);
- /* Add reference to '$data' component. */
+ /* Add reference to '_data' component. */
tmp = CLASS_DATA (c)->backend_decl;
comp = fold_build3_loc (input_location, COMPONENT_REF,
TREE_TYPE (tmp), comp, tmp, NULL_TREE);
}
+/* Returns the value of LBOUND for an expression. This could be broken out
+ from gfc_conv_intrinsic_bound but this seemed to be simpler. This is
+ called by gfc_alloc_allocatable_for_assignment. */
+static tree
+get_std_lbound (gfc_expr *expr, tree desc, int dim, bool assumed_size)
+{
+ tree lbound;
+ tree ubound;
+ tree stride;
+ tree cond, cond1, cond3, cond4;
+ tree tmp;
+ if (GFC_DESCRIPTOR_TYPE_P (TREE_TYPE (desc)))
+ {
+ tmp = gfc_rank_cst[dim];
+ lbound = gfc_conv_descriptor_lbound_get (desc, tmp);
+ ubound = gfc_conv_descriptor_ubound_get (desc, tmp);
+ stride = gfc_conv_descriptor_stride_get (desc, tmp);
+ cond1 = fold_build2_loc (input_location, GE_EXPR, boolean_type_node,
+ ubound, lbound);
+ cond3 = fold_build2_loc (input_location, GE_EXPR, boolean_type_node,
+ stride, gfc_index_zero_node);
+ cond3 = fold_build2_loc (input_location, TRUTH_AND_EXPR,
+ boolean_type_node, cond3, cond1);
+ cond4 = fold_build2_loc (input_location, LT_EXPR, boolean_type_node,
+ stride, gfc_index_zero_node);
+ if (assumed_size)
+ cond = fold_build2_loc (input_location, EQ_EXPR, boolean_type_node,
+ tmp, build_int_cst (gfc_array_index_type,
+ expr->rank - 1));
+ else
+ cond = boolean_false_node;
+
+ cond1 = fold_build2_loc (input_location, TRUTH_OR_EXPR,
+ boolean_type_node, cond3, cond4);
+ cond = fold_build2_loc (input_location, TRUTH_OR_EXPR,
+ boolean_type_node, cond, cond1);
+
+ return fold_build3_loc (input_location, COND_EXPR,
+ gfc_array_index_type, cond,
+ lbound, gfc_index_one_node);
+ }
+ else if (expr->expr_type == EXPR_VARIABLE)
+ {
+ tmp = TREE_TYPE (expr->symtree->n.sym->backend_decl);
+ return GFC_TYPE_ARRAY_LBOUND(tmp, dim);
+ }
+ else if (expr->expr_type == EXPR_FUNCTION)
+ {
+ /* A conversion function, so use the argument. */
+ expr = expr->value.function.actual->expr;
+ if (expr->expr_type != EXPR_VARIABLE)
+ return gfc_index_one_node;
+ desc = TREE_TYPE (expr->symtree->n.sym->backend_decl);
+ return get_std_lbound (expr, desc, dim, assumed_size);
+ }
+
+ return gfc_index_one_node;
+}
+
+
+/* Returns true if an expression represents an lhs that can be reallocated
+ on assignment. */
+
+bool
+gfc_is_reallocatable_lhs (gfc_expr *expr)
+{
+ gfc_ref * ref;
+
+ if (!expr->ref)
+ return false;
+
+ /* An allocatable variable. */
+ if (expr->symtree->n.sym->attr.allocatable
+ && expr->ref
+ && expr->ref->type == REF_ARRAY
+ && expr->ref->u.ar.type == AR_FULL)
+ return true;
+
+ /* All that can be left are allocatable components. */
+ if ((expr->symtree->n.sym->ts.type != BT_DERIVED
+ && expr->symtree->n.sym->ts.type != BT_CLASS)
+ || !expr->symtree->n.sym->ts.u.derived->attr.alloc_comp)
+ return false;
+
+ /* Find a component ref followed by an array reference. */
+ for (ref = expr->ref; ref; ref = ref->next)
+ if (ref->next
+ && ref->type == REF_COMPONENT
+ && ref->next->type == REF_ARRAY
+ && !ref->next->next)
+ break;
+
+ if (!ref)
+ return false;
+
+ /* Return true if valid reallocatable lhs. */
+ if (ref->u.c.component->attr.allocatable
+ && ref->next->u.ar.type == AR_FULL)
+ return true;
+
+ return false;
+}
+
+
+/* Allocate the lhs of an assignment to an allocatable array, otherwise
+ reallocate it. */
+
+tree
+gfc_alloc_allocatable_for_assignment (gfc_loopinfo *loop,
+ gfc_expr *expr1,
+ gfc_expr *expr2)
+{
+ stmtblock_t realloc_block;
+ stmtblock_t alloc_block;
+ stmtblock_t fblock;
+ gfc_ss *rss;
+ gfc_ss *lss;
+ tree realloc_expr;
+ tree alloc_expr;
+ tree size1;
+ tree size2;
+ tree array1;
+ tree cond;
+ tree tmp;
+ tree tmp2;
+ tree lbound;
+ tree ubound;
+ tree desc;
+ tree desc2;
+ tree offset;
+ tree jump_label1;
+ tree jump_label2;
+ tree neq_size;
+ tree lbd;
+ int n;
+ int dim;
+ gfc_array_spec * as;
+
+ /* x = f(...) with x allocatable. In this case, expr1 is the rhs.
+ Find the lhs expression in the loop chain and set expr1 and
+ expr2 accordingly. */
+ if (expr1->expr_type == EXPR_FUNCTION && expr2 == NULL)
+ {
+ expr2 = expr1;
+ /* Find the ss for the lhs. */
+ lss = loop->ss;
+ for (; lss && lss != gfc_ss_terminator; lss = lss->loop_chain)
+ if (lss->expr && lss->expr->expr_type == EXPR_VARIABLE)
+ break;
+ if (lss == gfc_ss_terminator)
+ return NULL_TREE;
+ expr1 = lss->expr;
+ }
+
+ /* Bail out if this is not a valid allocate on assignment. */
+ if (!gfc_is_reallocatable_lhs (expr1)
+ || (expr2 && !expr2->rank))
+ return NULL_TREE;
+
+ /* Find the ss for the lhs. */
+ lss = loop->ss;
+ for (; lss && lss != gfc_ss_terminator; lss = lss->loop_chain)
+ if (lss->expr == expr1)
+ break;
+
+ if (lss == gfc_ss_terminator)
+ return NULL_TREE;
+
+ /* Find an ss for the rhs. For operator expressions, we see the
+ ss's for the operands. Any one of these will do. */
+ rss = loop->ss;
+ for (; rss && rss != gfc_ss_terminator; rss = rss->loop_chain)
+ if (rss->expr != expr1 && rss != loop->temp_ss)
+ break;
+
+ if (expr2 && rss == gfc_ss_terminator)
+ return NULL_TREE;
+
+ gfc_start_block (&fblock);
+
+ /* Since the lhs is allocatable, this must be a descriptor type.
+ Get the data and array size. */
+ desc = lss->data.info.descriptor;
+ gcc_assert (GFC_DESCRIPTOR_TYPE_P (TREE_TYPE (desc)));
+ array1 = gfc_conv_descriptor_data_get (desc);
+ size1 = gfc_conv_descriptor_size (desc, expr1->rank);
+
+ /* Get the rhs size. Fix both sizes. */
+ if (expr2)
+ desc2 = rss->data.info.descriptor;
+ else
+ desc2 = NULL_TREE;
+ size2 = gfc_index_one_node;
+ for (n = 0; n < expr2->rank; n++)
+ {
+ tmp = fold_build2_loc (input_location, MINUS_EXPR,
+ gfc_array_index_type,
+ loop->to[n], loop->from[n]);
+ tmp = fold_build2_loc (input_location, PLUS_EXPR,
+ gfc_array_index_type,
+ tmp, gfc_index_one_node);
+ size2 = fold_build2_loc (input_location, MULT_EXPR,
+ gfc_array_index_type,
+ tmp, size2);
+ }
+ size1 = gfc_evaluate_now (size1, &fblock);
+ size2 = gfc_evaluate_now (size2, &fblock);
+ cond = fold_build2_loc (input_location, NE_EXPR, boolean_type_node,
+ size1, size2);
+ neq_size = gfc_evaluate_now (cond, &fblock);
+
+ /* If the lhs is allocated and the lhs and rhs are equal length, jump
+ past the realloc/malloc. This allows F95 compliant expressions
+ to escape allocation on assignment. */
+ jump_label1 = gfc_build_label_decl (NULL_TREE);
+ jump_label2 = gfc_build_label_decl (NULL_TREE);
+
+ /* Allocate if data is NULL. */
+ cond = fold_build2_loc (input_location, EQ_EXPR, boolean_type_node,
+ array1, build_int_cst (TREE_TYPE (array1), 0));
+ tmp = build3_v (COND_EXPR, cond,
+ build1_v (GOTO_EXPR, jump_label1),
+ build_empty_stmt (input_location));
+ gfc_add_expr_to_block (&fblock, tmp);
+
+ /* Reallocate if sizes are different. */
+ tmp = build3_v (COND_EXPR, neq_size,
+ build1_v (GOTO_EXPR, jump_label1),
+ build_empty_stmt (input_location));
+ gfc_add_expr_to_block (&fblock, tmp);
+
+ if (expr2 && expr2->expr_type == EXPR_FUNCTION
+ && expr2->value.function.isym
+ && expr2->value.function.isym->conversion)
+ {
+ /* For conversion functions, take the arg. */
+ gfc_expr *arg = expr2->value.function.actual->expr;
+ as = gfc_get_full_arrayspec_from_expr (arg);
+ }
+ else if (expr2)
+ as = gfc_get_full_arrayspec_from_expr (expr2);
+ else
+ as = NULL;
+
+ /* Reset the lhs bounds if any are different from the rhs. */
+ if (as && expr2->expr_type == EXPR_VARIABLE)
+ {
+ for (n = 0; n < expr1->rank; n++)
+ {
+ /* First check the lbounds. */
+ dim = rss->data.info.dim[n];
+ lbd = get_std_lbound (expr2, desc2, dim,
+ as->type == AS_ASSUMED_SIZE);
+ lbound = gfc_conv_descriptor_lbound_get (desc, gfc_rank_cst[n]);
+ cond = fold_build2_loc (input_location, NE_EXPR,
+ boolean_type_node, lbd, lbound);
+ tmp = build3_v (COND_EXPR, cond,
+ build1_v (GOTO_EXPR, jump_label1),
+ build_empty_stmt (input_location));
+ gfc_add_expr_to_block (&fblock, tmp);
+
+ /* Now check the shape. */
+ tmp = fold_build2_loc (input_location, MINUS_EXPR,
+ gfc_array_index_type,
+ loop->to[n], loop->from[n]);
+ tmp = fold_build2_loc (input_location, PLUS_EXPR,
+ gfc_array_index_type,
+ tmp, lbound);
+ ubound = gfc_conv_descriptor_ubound_get (desc, gfc_rank_cst[n]);
+ tmp = fold_build2_loc (input_location, MINUS_EXPR,
+ gfc_array_index_type,
+ tmp, ubound);
+ cond = fold_build2_loc (input_location, NE_EXPR,
+ boolean_type_node,
+ tmp, gfc_index_zero_node);
+ tmp = build3_v (COND_EXPR, cond,
+ build1_v (GOTO_EXPR, jump_label1),
+ build_empty_stmt (input_location));
+ gfc_add_expr_to_block (&fblock, tmp);
+ }
+ }
+
+ /* Otherwise jump past the (re)alloc code. */
+ tmp = build1_v (GOTO_EXPR, jump_label2);
+ gfc_add_expr_to_block (&fblock, tmp);
+
+ /* Add the label to start automatic (re)allocation. */
+ tmp = build1_v (LABEL_EXPR, jump_label1);
+ gfc_add_expr_to_block (&fblock, tmp);
+
+ /* Now modify the lhs descriptor and the associated scalarizer
+ variables.
+ 7.4.1.3: If variable is or becomes an unallocated allocatable
+ variable, then it is allocated with each deferred type parameter
+ equal to the corresponding type parameters of expr , with the
+ shape of expr , and with each lower bound equal to the
+ corresponding element of LBOUND(expr). */
+ size1 = gfc_index_one_node;
+ offset = gfc_index_zero_node;
+
+ for (n = 0; n < expr2->rank; n++)
+ {
+ tmp = fold_build2_loc (input_location, MINUS_EXPR,
+ gfc_array_index_type,
+ loop->to[n], loop->from[n]);
+ tmp = fold_build2_loc (input_location, PLUS_EXPR,
+ gfc_array_index_type,
+ tmp, gfc_index_one_node);
+
+ lbound = gfc_index_one_node;
+ ubound = tmp;
+
+ if (as)
+ {
+ lbd = get_std_lbound (expr2, desc2, n,
+ as->type == AS_ASSUMED_SIZE);
+ ubound = fold_build2_loc (input_location,
+ MINUS_EXPR,
+ gfc_array_index_type,
+ ubound, lbound);
+ ubound = fold_build2_loc (input_location,
+ PLUS_EXPR,
+ gfc_array_index_type,
+ ubound, lbd);
+ lbound = lbd;
+ }
+
+ gfc_conv_descriptor_lbound_set (&fblock, desc,
+ gfc_rank_cst[n],
+ lbound);
+ gfc_conv_descriptor_ubound_set (&fblock, desc,
+ gfc_rank_cst[n],
+ ubound);
+ gfc_conv_descriptor_stride_set (&fblock, desc,
+ gfc_rank_cst[n],
+ size1);
+ lbound = gfc_conv_descriptor_lbound_get (desc,
+ gfc_rank_cst[n]);
+ tmp2 = fold_build2_loc (input_location, MULT_EXPR,
+ gfc_array_index_type,
+ lbound, size1);
+ offset = fold_build2_loc (input_location, MINUS_EXPR,
+ gfc_array_index_type,
+ offset, tmp2);
+ size1 = fold_build2_loc (input_location, MULT_EXPR,
+ gfc_array_index_type,
+ tmp, size1);
+ }
+
+ /* Set the lhs descriptor and scalarizer offsets. For rank > 1,
+ the array offset is saved and the info.offset is used for a
+ running offset. Use the saved_offset instead. */
+ tmp = gfc_conv_descriptor_offset (desc);
+ gfc_add_modify (&fblock, tmp, offset);
+ if (lss->data.info.saved_offset
+ && TREE_CODE (lss->data.info.saved_offset) == VAR_DECL)
+ gfc_add_modify (&fblock, lss->data.info.saved_offset, tmp);
+
+ /* Now set the deltas for the lhs. */
+ for (n = 0; n < expr1->rank; n++)
+ {
+ tmp = gfc_conv_descriptor_lbound_get (desc, gfc_rank_cst[n]);
+ dim = lss->data.info.dim[n];
+ tmp = fold_build2_loc (input_location, MINUS_EXPR,
+ gfc_array_index_type, tmp,
+ loop->from[dim]);
+ if (lss->data.info.delta[dim]
+ && TREE_CODE (lss->data.info.delta[dim]) == VAR_DECL)
+ gfc_add_modify (&fblock, lss->data.info.delta[dim], tmp);
+ }
+
+ /* Get the new lhs size in bytes. */
+ if (expr1->ts.type == BT_CHARACTER && expr1->ts.deferred)
+ {
+ tmp = expr2->ts.u.cl->backend_decl;
+ gcc_assert (expr1->ts.u.cl->backend_decl);
+ tmp = fold_convert (TREE_TYPE (expr1->ts.u.cl->backend_decl), tmp);
+ gfc_add_modify (&fblock, expr1->ts.u.cl->backend_decl, tmp);
+ }
+ else if (expr1->ts.type == BT_CHARACTER && expr1->ts.u.cl->backend_decl)
+ {
+ tmp = TYPE_SIZE_UNIT (TREE_TYPE (gfc_typenode_for_spec (&expr1->ts)));
+ tmp = fold_build2_loc (input_location, MULT_EXPR,
+ gfc_array_index_type, tmp,
+ expr1->ts.u.cl->backend_decl);
+ }
+ else
+ tmp = TYPE_SIZE_UNIT (gfc_typenode_for_spec (&expr1->ts));
+ tmp = fold_convert (gfc_array_index_type, tmp);
+ size2 = fold_build2_loc (input_location, MULT_EXPR,
+ gfc_array_index_type,
+ tmp, size2);
+ size2 = fold_convert (size_type_node, size2);
+ size2 = gfc_evaluate_now (size2, &fblock);
+
+ /* Realloc expression. Note that the scalarizer uses desc.data
+ in the array reference - (*desc.data)[<element>]. */
+ gfc_init_block (&realloc_block);
+ tmp = build_call_expr_loc (input_location,
+ built_in_decls[BUILT_IN_REALLOC], 2,
+ fold_convert (pvoid_type_node, array1),
+ size2);
+ gfc_conv_descriptor_data_set (&realloc_block,
+ desc, tmp);
+ realloc_expr = gfc_finish_block (&realloc_block);
+
+ /* Only reallocate if sizes are different. */
+ tmp = build3_v (COND_EXPR, neq_size, realloc_expr,
+ build_empty_stmt (input_location));
+ realloc_expr = tmp;
+
+
+ /* Malloc expression. */
+ gfc_init_block (&alloc_block);
+ tmp = build_call_expr_loc (input_location,
+ built_in_decls[BUILT_IN_MALLOC], 1,
+ size2);
+ gfc_conv_descriptor_data_set (&alloc_block,
+ desc, tmp);
+ tmp = gfc_conv_descriptor_dtype (desc);
+ gfc_add_modify (&alloc_block, tmp, gfc_get_dtype (TREE_TYPE (desc)));
+ alloc_expr = gfc_finish_block (&alloc_block);
+
+ /* Malloc if not allocated; realloc otherwise. */
+ tmp = build_int_cst (TREE_TYPE (array1), 0);
+ cond = fold_build2_loc (input_location, EQ_EXPR,
+ boolean_type_node,
+ array1, tmp);
+ tmp = build3_v (COND_EXPR, cond, alloc_expr, realloc_expr);
+ gfc_add_expr_to_block (&fblock, tmp);
+
+ /* Make sure that the scalarizer data pointer is updated. */
+ if (lss->data.info.data
+ && TREE_CODE (lss->data.info.data) == VAR_DECL)
+ {
+ tmp = gfc_conv_descriptor_data_get (desc);
+ gfc_add_modify (&fblock, lss->data.info.data, tmp);
+ }
+
+ /* Add the exit label. */
+ tmp = build1_v (LABEL_EXPR, jump_label2);
+ gfc_add_expr_to_block (&fblock, tmp);
+
+ return gfc_finish_block (&fblock);
+}
+
+
/* NULLIFY an allocatable/pointer array on function entry, free it on exit.
Do likewise, recursively if necessary, with the allocatable components of
derived types. */
int rank;
bool sym_has_alloc_comp;
- sym_has_alloc_comp = (sym->ts.type == BT_DERIVED)
+ sym_has_alloc_comp = (sym->ts.type == BT_DERIVED
+ || sym->ts.type == BT_CLASS)
&& sym->ts.u.derived->attr.alloc_comp;
/* Make sure the frontend gets these right. */
if (!(sym->attr.pointer || sym->attr.allocatable || sym_has_alloc_comp))
- fatal_error ("Possible frontend bug: Deferred array size without pointer, "
+ fatal_error ("Possible front-end bug: Deferred array size without pointer, "
"allocatable attribute or derived type without allocatable "
"components.");
return;
}
- gfc_get_backend_locus (&loc);
+ gfc_save_backend_locus (&loc);
gfc_set_backend_locus (&sym->declared_at);
descriptor = sym->backend_decl;
gfc_trans_static_array_pointer (sym);
gfc_add_init_cleanup (block, gfc_finish_block (&init), NULL_TREE);
+ gfc_restore_backend_locus (&loc);
return;
}
gfc_conv_descriptor_data_set (&init, descriptor, null_pointer_node);
gfc_init_block (&cleanup);
- gfc_set_backend_locus (&loc);
+ gfc_restore_backend_locus (&loc);
/* Allocatable arrays need to be freed when they go out of scope.
The allocatable components of pointers must not be touched. */