/* Statement translation -- generate GCC trees from gfc_code.
- Copyright (C) 2002, 2003, 2004, 2005, 2006, 2007
+ Copyright (C) 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2009, 2010
Free Software Foundation, Inc.
Contributed by Paul Brook <paul@nowt.org>
and Steven Bosscher <s.bosscher@student.tudelft.nl>
#include "system.h"
#include "coretypes.h"
#include "tree.h"
-#include "tree-gimple.h"
-#include "ggc.h"
-#include "toplev.h"
-#include "real.h"
#include "gfortran.h"
#include "flags.h"
#include "trans.h"
#include "trans-const.h"
#include "arith.h"
#include "dependency.h"
+#include "ggc.h"
typedef struct iter_info
{
tree len;
tree addr;
tree len_tree;
- char *label_str;
int label_len;
/* Start a new block. */
gfc_init_se (&se, NULL);
gfc_start_block (&se.pre);
- gfc_conv_label_variable (&se, code->expr);
+ gfc_conv_label_variable (&se, code->expr1);
len = GFC_DECL_STRING_LEN (se.expr);
addr = GFC_DECL_ASSIGN_ADDR (se.expr);
- label_tree = gfc_get_label_decl (code->label);
+ label_tree = gfc_get_label_decl (code->label1);
- if (code->label->defined == ST_LABEL_TARGET)
+ if (code->label1->defined == ST_LABEL_TARGET)
{
label_tree = gfc_build_addr_expr (pvoid_type_node, label_tree);
len_tree = integer_minus_one_node;
}
else
{
- label_str = code->label->format->value.character.string;
- label_len = code->label->format->value.character.length;
+ gfc_expr *format = code->label1->format;
+
+ label_len = format->value.character.length;
len_tree = build_int_cst (NULL_TREE, label_len);
- label_tree = gfc_build_string_const (label_len + 1, label_str);
+ label_tree = gfc_build_wide_string_const (format->ts.kind, label_len + 1,
+ format->value.character.string);
label_tree = gfc_build_addr_expr (pvoid_type_node, label_tree);
}
- gfc_add_modify_expr (&se.pre, len, len_tree);
- gfc_add_modify_expr (&se.pre, addr, label_tree);
+ gfc_add_modify (&se.pre, len, len_tree);
+ gfc_add_modify (&se.pre, addr, label_tree);
return gfc_finish_block (&se.pre);
}
tree tmp;
gfc_se se;
- if (code->label != NULL)
- return build1_v (GOTO_EXPR, gfc_get_label_decl (code->label));
+ if (code->label1 != NULL)
+ return build1_v (GOTO_EXPR, gfc_get_label_decl (code->label1));
/* ASSIGNED GOTO. */
gfc_init_se (&se, NULL);
gfc_start_block (&se.pre);
- gfc_conv_label_variable (&se, code->expr);
+ gfc_conv_label_variable (&se, code->expr1);
tmp = GFC_DECL_STRING_LEN (se.expr);
- tmp = fold_build2 (NE_EXPR, boolean_type_node, tmp,
- build_int_cst (TREE_TYPE (tmp), -1));
- gfc_trans_runtime_check (tmp, &se.pre, &loc,
+ tmp = fold_build2_loc (input_location, NE_EXPR, boolean_type_node, tmp,
+ build_int_cst (TREE_TYPE (tmp), -1));
+ gfc_trans_runtime_check (true, false, tmp, &se.pre, &loc,
"Assigned label is not a target label");
assigned_goto = GFC_DECL_ASSIGN_ADDR (se.expr);
- code = code->block;
- if (code == NULL)
- {
- target = build1 (GOTO_EXPR, void_type_node, assigned_goto);
- gfc_add_expr_to_block (&se.pre, target);
- return gfc_finish_block (&se.pre);
- }
+ /* We're going to ignore a label list. It does not really change the
+ statement's semantics (because it is just a further restriction on
+ what's legal code); before, we were comparing label addresses here, but
+ that's a very fragile business and may break with optimization. So
+ just ignore it. */
- /* Check the label list. */
- do
- {
- target = gfc_get_label_decl (code->label);
- tmp = gfc_build_addr_expr (pvoid_type_node, target);
- tmp = build2 (EQ_EXPR, boolean_type_node, tmp, assigned_goto);
- tmp = build3_v (COND_EXPR, tmp,
- build1 (GOTO_EXPR, void_type_node, target),
- build_empty_stmt ());
- gfc_add_expr_to_block (&se.pre, tmp);
- code = code->block;
- }
- while (code != NULL);
- gfc_trans_runtime_check (boolean_true_node, &se.pre, &loc,
- "Assigned label is not in the list");
-
- return gfc_finish_block (&se.pre);
+ target = fold_build1_loc (input_location, GOTO_EXPR, void_type_node,
+ assigned_goto);
+ gfc_add_expr_to_block (&se.pre, target);
+ return gfc_finish_block (&se.pre);
}
can be used, as is, to copy the result back to the variable. */
static void
gfc_conv_elemental_dependencies (gfc_se * se, gfc_se * loopse,
- gfc_symbol * sym, gfc_actual_arglist * arg)
+ gfc_symbol * sym, gfc_actual_arglist * arg,
+ gfc_dep_check check_variable)
{
gfc_actual_arglist *arg0;
gfc_expr *e;
gfc_ss *ss;
gfc_ss_info *info;
gfc_symbol *fsym;
+ gfc_ref *ref;
int n;
- stmtblock_t block;
tree data;
tree offset;
tree size;
&& e->rank && fsym
&& fsym->attr.intent != INTENT_IN
&& gfc_check_fncall_dependency (e, fsym->attr.intent,
- sym, arg0))
+ sym, arg0, check_variable))
{
+ tree initial, temptype;
+ stmtblock_t temp_post;
+
/* Make a local loopinfo for the temporary creation, so that
none of the other ss->info's have to be renormalized. */
gfc_init_loopinfo (&tmp_loop);
+ tmp_loop.dimen = info->dimen;
for (n = 0; n < info->dimen; n++)
{
tmp_loop.to[n] = loopse->loop->to[n];
tmp_loop.order[n] = loopse->loop->order[n];
}
- /* Generate the temporary. Merge the block so that the
- declarations are put at the right binding level. */
- size = gfc_create_var (gfc_array_index_type, NULL);
- data = gfc_create_var (pvoid_type_node, NULL);
- gfc_start_block (&block);
- tmp = gfc_typenode_for_spec (&e->ts);
- tmp = gfc_trans_create_temp_array (&se->pre, &se->post,
- &tmp_loop, info, tmp,
- false, true, false);
- gfc_add_modify_expr (&se->pre, size, tmp);
- tmp = fold_convert (pvoid_type_node, info->data);
- gfc_add_modify_expr (&se->pre, data, tmp);
- gfc_merge_block_scope (&block);
-
/* Obtain the argument descriptor for unpacking. */
gfc_init_se (&parmse, NULL);
parmse.want_pointer = 1;
+
+ /* The scalarizer introduces some specific peculiarities when
+ handling elemental subroutines; the stride can be needed up to
+ the dim_array - 1, rather than dim_loop - 1 to calculate
+ offsets outside the loop. For this reason, we make sure that
+ the descriptor has the dimensionality of the array by converting
+ trailing elements into ranges with end = start. */
+ for (ref = e->ref; ref; ref = ref->next)
+ if (ref->type == REF_ARRAY && ref->u.ar.type == AR_SECTION)
+ break;
+
+ if (ref)
+ {
+ bool seen_range = false;
+ for (n = 0; n < ref->u.ar.dimen; n++)
+ {
+ if (ref->u.ar.dimen_type[n] == DIMEN_RANGE)
+ seen_range = true;
+
+ if (!seen_range
+ || ref->u.ar.dimen_type[n] != DIMEN_ELEMENT)
+ continue;
+
+ ref->u.ar.end[n] = gfc_copy_expr (ref->u.ar.start[n]);
+ ref->u.ar.dimen_type[n] = DIMEN_RANGE;
+ }
+ }
+
gfc_conv_expr_descriptor (&parmse, e, gfc_walk_expr (e));
gfc_add_block_to_block (&se->pre, &parmse.pre);
+ /* If we've got INTENT(INOUT) or a derived type with INTENT(OUT),
+ initialize the array temporary with a copy of the values. */
+ if (fsym->attr.intent == INTENT_INOUT
+ || (fsym->ts.type ==BT_DERIVED
+ && fsym->attr.intent == INTENT_OUT))
+ initial = parmse.expr;
+ else
+ initial = NULL_TREE;
+
+ /* Find the type of the temporary to create; we don't use the type
+ of e itself as this breaks for subcomponent-references in e (where
+ the type of e is that of the final reference, but parmse.expr's
+ type corresponds to the full derived-type). */
+ /* TODO: Fix this somehow so we don't need a temporary of the whole
+ array but instead only the components referenced. */
+ temptype = TREE_TYPE (parmse.expr); /* Pointer to descriptor. */
+ gcc_assert (TREE_CODE (temptype) == POINTER_TYPE);
+ temptype = TREE_TYPE (temptype);
+ temptype = gfc_get_element_type (temptype);
+
+ /* Generate the temporary. Cleaning up the temporary should be the
+ very last thing done, so we add the code to a new block and add it
+ to se->post as last instructions. */
+ size = gfc_create_var (gfc_array_index_type, NULL);
+ data = gfc_create_var (pvoid_type_node, NULL);
+ gfc_init_block (&temp_post);
+ tmp = gfc_trans_create_temp_array (&se->pre, &temp_post,
+ &tmp_loop, info, temptype,
+ initial,
+ false, true, false,
+ &arg->expr->where);
+ gfc_add_modify (&se->pre, size, tmp);
+ tmp = fold_convert (pvoid_type_node, info->data);
+ gfc_add_modify (&se->pre, data, tmp);
+
/* Calculate the offset for the temporary. */
offset = gfc_index_zero_node;
for (n = 0; n < info->dimen; n++)
{
- tmp = gfc_conv_descriptor_stride (info->descriptor,
- gfc_rank_cst[n]);
- tmp = fold_build2 (MULT_EXPR, gfc_array_index_type,
- loopse->loop->from[n], tmp);
- offset = fold_build2 (MINUS_EXPR, gfc_array_index_type,
- offset, tmp);
+ tmp = gfc_conv_descriptor_stride_get (info->descriptor,
+ gfc_rank_cst[n]);
+ tmp = fold_build2_loc (input_location, MULT_EXPR,
+ gfc_array_index_type,
+ loopse->loop->from[n], tmp);
+ offset = fold_build2_loc (input_location, MINUS_EXPR,
+ gfc_array_index_type, offset, tmp);
}
info->offset = gfc_create_var (gfc_array_index_type, NULL);
- gfc_add_modify_expr (&se->pre, info->offset, offset);
+ gfc_add_modify (&se->pre, info->offset, offset);
/* Copy the result back using unpack. */
- tmp = build_call_expr (gfor_fndecl_in_unpack, 2, parmse.expr, data);
+ tmp = build_call_expr_loc (input_location,
+ gfor_fndecl_in_unpack, 2, parmse.expr, data);
gfc_add_expr_to_block (&se->post, tmp);
+ /* parmse.pre is already added above. */
gfc_add_block_to_block (&se->post, &parmse.post);
+ gfc_add_block_to_block (&se->post, &temp_post);
}
}
}
/* Translate the CALL statement. Builds a call to an F95 subroutine. */
tree
-gfc_trans_call (gfc_code * code, bool dependency_check)
+gfc_trans_call (gfc_code * code, bool dependency_check,
+ tree mask, tree count1, bool invert)
{
gfc_se se;
gfc_ss * ss;
int has_alternate_specifier;
+ gfc_dep_check check_variable;
+ tree index = NULL_TREE;
+ tree maskexpr = NULL_TREE;
+ tree tmp;
/* A CALL starts a new block because the actual arguments may have to
be evaluated first. */
/* Translate the call. */
has_alternate_specifier
- = gfc_conv_function_call (&se, code->resolved_sym, code->ext.actual,
- NULL_TREE);
+ = gfc_conv_procedure_call (&se, code->resolved_sym, code->ext.actual,
+ code->expr1, NULL);
/* A subroutine without side-effect, by definition, does nothing! */
TREE_SIDE_EFFECTS (se.expr) = 1;
gfc_symbol *sym;
select_code = code->next;
gcc_assert(select_code->op == EXEC_SELECT);
- sym = select_code->expr->symtree->n.sym;
+ sym = select_code->expr1->symtree->n.sym;
se.expr = convert (gfc_typenode_for_spec (&sym->ts), se.expr);
if (sym->backend_decl == NULL)
sym->backend_decl = gfc_get_symbol_decl (sym);
- gfc_add_modify_expr (&se.pre, sym->backend_decl, se.expr);
+ gfc_add_modify (&se.pre, sym->backend_decl, se.expr);
}
else
gfc_add_expr_to_block (&se.pre, se.expr);
stmtblock_t body;
stmtblock_t block;
gfc_se loopse;
+ gfc_se depse;
/* gfc_walk_elemental_function_args renders the ss chain in the
- reverse order to the actual argument order. */
+ reverse order to the actual argument order. */
ss = gfc_reverse_ss (ss);
/* Initialize the loop. */
gfc_add_ss_to_loop (&loop, ss);
gfc_conv_ss_startstride (&loop);
- gfc_conv_loop_setup (&loop);
+ /* TODO: gfc_conv_loop_setup generates a temporary for vector
+ subscripts. This could be prevented in the elemental case
+ as temporaries are handled separatedly
+ (below in gfc_conv_elemental_dependencies). */
+ gfc_conv_loop_setup (&loop, &code->expr1->where);
gfc_mark_ss_chain_used (ss, 1);
/* Convert the arguments, checking for dependencies. */
/* For operator assignment, do dependency checking. */
if (dependency_check)
- {
- gfc_symbol *sym;
- sym = code->resolved_sym;
- gfc_conv_elemental_dependencies (&se, &loopse, sym,
- code->ext.actual);
- }
+ check_variable = ELEM_CHECK_VARIABLE;
+ else
+ check_variable = ELEM_DONT_CHECK_VARIABLE;
+
+ gfc_init_se (&depse, NULL);
+ gfc_conv_elemental_dependencies (&depse, &loopse, code->resolved_sym,
+ code->ext.actual, check_variable);
+
+ gfc_add_block_to_block (&loop.pre, &depse.pre);
+ gfc_add_block_to_block (&loop.post, &depse.post);
/* Generate the loop body. */
gfc_start_scalarized_body (&loop, &body);
gfc_init_block (&block);
+ if (mask && count1)
+ {
+ /* Form the mask expression according to the mask. */
+ index = count1;
+ maskexpr = gfc_build_array_ref (mask, index, NULL);
+ if (invert)
+ maskexpr = fold_build1_loc (input_location, TRUTH_NOT_EXPR,
+ TREE_TYPE (maskexpr), maskexpr);
+ }
+
/* Add the subroutine call to the block. */
- gfc_conv_function_call (&loopse, code->resolved_sym, code->ext.actual,
- NULL_TREE);
- gfc_add_expr_to_block (&loopse.pre, loopse.expr);
+ gfc_conv_procedure_call (&loopse, code->resolved_sym,
+ code->ext.actual, code->expr1, NULL);
+
+ if (mask && count1)
+ {
+ tmp = build3_v (COND_EXPR, maskexpr, loopse.expr,
+ build_empty_stmt (input_location));
+ gfc_add_expr_to_block (&loopse.pre, tmp);
+ tmp = fold_build2_loc (input_location, PLUS_EXPR,
+ gfc_array_index_type,
+ count1, gfc_index_one_node);
+ gfc_add_modify (&loopse.pre, count1, tmp);
+ }
+ else
+ gfc_add_expr_to_block (&loopse.pre, loopse.expr);
gfc_add_block_to_block (&block, &loopse.pre);
gfc_add_block_to_block (&block, &loopse.post);
/* Translate the RETURN statement. */
tree
-gfc_trans_return (gfc_code * code ATTRIBUTE_UNUSED)
+gfc_trans_return (gfc_code * code)
{
- if (code->expr)
+ if (code->expr1)
{
gfc_se se;
tree tmp;
tree result;
/* If code->expr is not NULL, this return statement must appear
- in a subroutine and current_fake_result_decl has already
+ in a subroutine and current_fake_result_decl has already
been generated. */
result = gfc_get_fake_result_decl (NULL, 0);
if (!result)
- {
- gfc_warning ("An alternate return at %L without a * dummy argument",
- &code->expr->where);
- return build1_v (GOTO_EXPR, gfc_get_return_label ());
- }
+ {
+ gfc_warning ("An alternate return at %L without a * dummy argument",
+ &code->expr1->where);
+ return gfc_generate_return ();
+ }
/* Start a new block for this statement. */
gfc_init_se (&se, NULL);
gfc_start_block (&se.pre);
- gfc_conv_expr (&se, code->expr);
+ gfc_conv_expr (&se, code->expr1);
- tmp = build2 (MODIFY_EXPR, TREE_TYPE (result), result,
- fold_convert (TREE_TYPE (result), se.expr));
+ /* Note that the actually returned expression is a simple value and
+ does not depend on any pointers or such; thus we can clean-up with
+ se.post before returning. */
+ tmp = fold_build2_loc (input_location, MODIFY_EXPR, TREE_TYPE (result),
+ result, fold_convert (TREE_TYPE (result),
+ se.expr));
gfc_add_expr_to_block (&se.pre, tmp);
+ gfc_add_block_to_block (&se.pre, &se.post);
- tmp = build1_v (GOTO_EXPR, gfc_get_return_label ());
+ tmp = gfc_generate_return ();
gfc_add_expr_to_block (&se.pre, tmp);
- gfc_add_block_to_block (&se.pre, &se.post);
return gfc_finish_block (&se.pre);
}
- else
- return build1_v (GOTO_EXPR, gfc_get_return_label ());
+
+ return gfc_generate_return ();
}
gfc_start_block (&se.pre);
- if (code->expr == NULL)
+ if (code->expr1 == NULL)
+ {
+ tmp = build_int_cst (gfc_int4_type_node, 0);
+ tmp = build_call_expr_loc (input_location,
+ gfor_fndecl_pause_string, 2,
+ build_int_cst (pchar_type_node, 0), tmp);
+ }
+ else if (code->expr1->ts.type == BT_INTEGER)
{
- tmp = build_int_cst (gfc_int4_type_node, code->ext.stop_code);
- tmp = build_call_expr (gfor_fndecl_pause_numeric, 1, tmp);
+ gfc_conv_expr (&se, code->expr1);
+ tmp = build_call_expr_loc (input_location,
+ gfor_fndecl_pause_numeric, 1,
+ fold_convert (gfc_int4_type_node, se.expr));
}
else
{
- gfc_conv_expr_reference (&se, code->expr);
- tmp = build_call_expr (gfor_fndecl_pause_string, 2,
+ gfc_conv_expr_reference (&se, code->expr1);
+ tmp = build_call_expr_loc (input_location,
+ gfor_fndecl_pause_string, 2,
se.expr, se.string_length);
}
to a runtime library call. */
tree
-gfc_trans_stop (gfc_code * code)
+gfc_trans_stop (gfc_code *code, bool error_stop)
{
tree gfc_int4_type_node = gfc_get_int_type (4);
gfc_se se;
gfc_init_se (&se, NULL);
gfc_start_block (&se.pre);
-
- if (code->expr == NULL)
+ if (code->expr1 == NULL)
+ {
+ tmp = build_int_cst (gfc_int4_type_node, 0);
+ tmp = build_call_expr_loc (input_location,
+ error_stop ? gfor_fndecl_error_stop_string
+ : gfor_fndecl_stop_string,
+ 2, build_int_cst (pchar_type_node, 0), tmp);
+ }
+ else if (code->expr1->ts.type == BT_INTEGER)
{
- tmp = build_int_cst (gfc_int4_type_node, code->ext.stop_code);
- tmp = build_call_expr (gfor_fndecl_stop_numeric, 1, tmp);
+ gfc_conv_expr (&se, code->expr1);
+ tmp = build_call_expr_loc (input_location,
+ error_stop ? gfor_fndecl_error_stop_numeric
+ : gfor_fndecl_stop_numeric_f08, 1,
+ fold_convert (gfc_int4_type_node, se.expr));
}
else
{
- gfc_conv_expr_reference (&se, code->expr);
- tmp = build_call_expr (gfor_fndecl_stop_string, 2,
- se.expr, se.string_length);
+ gfc_conv_expr_reference (&se, code->expr1);
+ tmp = build_call_expr_loc (input_location,
+ error_stop ? gfor_fndecl_error_stop_string
+ : gfor_fndecl_stop_string,
+ 2, se.expr, se.string_length);
}
gfc_add_expr_to_block (&se.pre, tmp);
}
+tree
+gfc_trans_sync (gfc_code *code, gfc_exec_op type __attribute__ ((unused)))
+{
+ gfc_se se;
+
+ if ((code->expr1 && (gfc_option.rtcheck & GFC_RTCHECK_BOUNDS)) || code->expr2)
+ {
+ gfc_init_se (&se, NULL);
+ gfc_start_block (&se.pre);
+ }
+
+ /* Check SYNC IMAGES(imageset) for valid image index.
+ FIXME: Add a check for image-set arrays. */
+ if (code->expr1 && (gfc_option.rtcheck & GFC_RTCHECK_BOUNDS)
+ && code->expr1->rank == 0)
+ {
+ tree cond;
+ gfc_conv_expr (&se, code->expr1);
+ cond = fold_build2_loc (input_location, NE_EXPR, boolean_type_node,
+ se.expr, build_int_cst (TREE_TYPE (se.expr), 1));
+ gfc_trans_runtime_check (true, false, cond, &se.pre,
+ &code->expr1->where, "Invalid image number "
+ "%d in SYNC IMAGES",
+ fold_convert (integer_type_node, se.expr));
+ }
+
+ /* If STAT is present, set it to zero. */
+ if (code->expr2)
+ {
+ gcc_assert (code->expr2->expr_type == EXPR_VARIABLE);
+ gfc_conv_expr (&se, code->expr2);
+ gfc_add_modify (&se.pre, se.expr, build_int_cst (TREE_TYPE (se.expr), 0));
+ }
+
+ if ((code->expr1 && (gfc_option.rtcheck & GFC_RTCHECK_BOUNDS)) || code->expr2)
+ return gfc_finish_block (&se.pre);
+
+ return NULL_TREE;
+}
+
+
/* Generate GENERIC for the IF construct. This function also deals with
the simple IF statement, because the front end translates the IF
statement into an IF construct.
{
gfc_se if_se;
tree stmt, elsestmt;
+ location_t loc;
/* Check for an unconditional ELSE clause. */
- if (!code->expr)
+ if (!code->expr1)
return gfc_trans_code (code->next);
/* Initialize a statement builder for each block. Puts in NULL_TREEs. */
gfc_start_block (&if_se.pre);
/* Calculate the IF condition expression. */
- gfc_conv_expr_val (&if_se, code->expr);
+ gfc_conv_expr_val (&if_se, code->expr1);
/* Translate the THEN clause. */
stmt = gfc_trans_code (code->next);
if (code->block)
elsestmt = gfc_trans_if_1 (code->block);
else
- elsestmt = build_empty_stmt ();
+ elsestmt = build_empty_stmt (input_location);
/* Build the condition expression and add it to the condition block. */
- stmt = fold_build3 (COND_EXPR, void_type_node, if_se.expr, stmt, elsestmt);
+ loc = code->expr1->where.lb ? code->expr1->where.lb->location : input_location;
+ stmt = fold_build3_loc (loc, COND_EXPR, void_type_node, if_se.expr, stmt,
+ elsestmt);
gfc_add_expr_to_block (&if_se.pre, stmt);
tree
gfc_trans_if (gfc_code * code)
{
- /* Ignore the top EXEC_IF, it only announces an IF construct. The
- actual code we must translate is in code->block. */
+ stmtblock_t body;
+ tree exit_label;
+
+ /* Create exit label so it is available for trans'ing the body code. */
+ exit_label = gfc_build_label_decl (NULL_TREE);
+ code->exit_label = exit_label;
- return gfc_trans_if_1 (code->block);
+ /* Translate the actual code in code->block. */
+ gfc_init_block (&body);
+ gfc_add_expr_to_block (&body, gfc_trans_if_1 (code->block));
+
+ /* Add exit label. */
+ gfc_add_expr_to_block (&body, build1_v (LABEL_EXPR, exit_label));
+
+ return gfc_finish_block (&body);
}
gfc_start_block (&se.pre);
/* Pre-evaluate COND. */
- gfc_conv_expr_val (&se, code->expr);
+ gfc_conv_expr_val (&se, code->expr1);
se.expr = gfc_evaluate_now (se.expr, &se.pre);
/* Build something to compare with. */
zero = gfc_build_const (TREE_TYPE (se.expr), integer_zero_node);
- if (code->label->value != code->label2->value)
+ if (code->label1->value != code->label2->value)
{
/* If (cond < 0) take branch1 else take branch2.
First build jumps to the COND .LT. 0 and the COND .EQ. 0 cases. */
- branch1 = build1_v (GOTO_EXPR, gfc_get_label_decl (code->label));
+ branch1 = build1_v (GOTO_EXPR, gfc_get_label_decl (code->label1));
branch2 = build1_v (GOTO_EXPR, gfc_get_label_decl (code->label2));
- if (code->label->value != code->label3->value)
- tmp = fold_build2 (LT_EXPR, boolean_type_node, se.expr, zero);
+ if (code->label1->value != code->label3->value)
+ tmp = fold_build2_loc (input_location, LT_EXPR, boolean_type_node,
+ se.expr, zero);
else
- tmp = fold_build2 (NE_EXPR, boolean_type_node, se.expr, zero);
+ tmp = fold_build2_loc (input_location, NE_EXPR, boolean_type_node,
+ se.expr, zero);
- branch1 = fold_build3 (COND_EXPR, void_type_node, tmp, branch1, branch2);
+ branch1 = fold_build3_loc (input_location, COND_EXPR, void_type_node,
+ tmp, branch1, branch2);
}
else
- branch1 = build1_v (GOTO_EXPR, gfc_get_label_decl (code->label));
+ branch1 = build1_v (GOTO_EXPR, gfc_get_label_decl (code->label1));
- if (code->label->value != code->label3->value
+ if (code->label1->value != code->label3->value
&& code->label2->value != code->label3->value)
{
/* if (cond <= 0) take branch1 else take branch2. */
branch2 = build1_v (GOTO_EXPR, gfc_get_label_decl (code->label3));
- tmp = fold_build2 (LE_EXPR, boolean_type_node, se.expr, zero);
- branch1 = fold_build3 (COND_EXPR, void_type_node, tmp, branch1, branch2);
+ tmp = fold_build2_loc (input_location, LE_EXPR, boolean_type_node,
+ se.expr, zero);
+ branch1 = fold_build3_loc (input_location, COND_EXPR, void_type_node,
+ tmp, branch1, branch2);
}
/* Append the COND_EXPR to the evaluation of COND, and return. */
}
+/* Translate a CRITICAL block. */
+tree
+gfc_trans_critical (gfc_code *code)
+{
+ stmtblock_t block;
+ tree tmp;
+
+ gfc_start_block (&block);
+ tmp = gfc_trans_code (code->block->next);
+ gfc_add_expr_to_block (&block, tmp);
+
+ return gfc_finish_block (&block);
+}
+
+
+/* Do proper initialization for ASSOCIATE names. */
+
+static void
+trans_associate_var (gfc_symbol *sym, gfc_wrapped_block *block)
+{
+ gfc_expr *e;
+ tree tmp;
+
+ gcc_assert (sym->assoc);
+ e = sym->assoc->target;
+
+ /* Do a `pointer assignment' with updated descriptor (or assign descriptor
+ to array temporary) for arrays with either unknown shape or if associating
+ to a variable. */
+ if (sym->attr.dimension
+ && (sym->as->type == AS_DEFERRED || sym->assoc->variable))
+ {
+ gfc_se se;
+ gfc_ss *ss;
+ tree desc;
+
+ desc = sym->backend_decl;
+
+ /* If association is to an expression, evaluate it and create temporary.
+ Otherwise, get descriptor of target for pointer assignment. */
+ gfc_init_se (&se, NULL);
+ ss = gfc_walk_expr (e);
+ if (sym->assoc->variable)
+ {
+ se.direct_byref = 1;
+ se.expr = desc;
+ }
+ gfc_conv_expr_descriptor (&se, e, ss);
+
+ /* If we didn't already do the pointer assignment, set associate-name
+ descriptor to the one generated for the temporary. */
+ if (!sym->assoc->variable)
+ {
+ int dim;
+
+ gfc_add_modify (&se.pre, desc, se.expr);
+
+ /* The generated descriptor has lower bound zero (as array
+ temporary), shift bounds so we get lower bounds of 1. */
+ for (dim = 0; dim < e->rank; ++dim)
+ gfc_conv_shift_descriptor_lbound (&se.pre, desc,
+ dim, gfc_index_one_node);
+ }
+
+ /* Done, register stuff as init / cleanup code. */
+ gfc_add_init_cleanup (block, gfc_finish_block (&se.pre),
+ gfc_finish_block (&se.post));
+ }
+
+ /* Do a scalar pointer assignment; this is for scalar variable targets. */
+ else if (gfc_is_associate_pointer (sym))
+ {
+ gfc_se se;
+
+ gcc_assert (!sym->attr.dimension);
+
+ gfc_init_se (&se, NULL);
+ gfc_conv_expr (&se, e);
+
+ tmp = TREE_TYPE (sym->backend_decl);
+ tmp = gfc_build_addr_expr (tmp, se.expr);
+ gfc_add_modify (&se.pre, sym->backend_decl, tmp);
+
+ gfc_add_init_cleanup (block, gfc_finish_block( &se.pre),
+ gfc_finish_block (&se.post));
+ }
+
+ /* Do a simple assignment. This is for scalar expressions, where we
+ can simply use expression assignment. */
+ else
+ {
+ gfc_expr *lhs;
+
+ lhs = gfc_lval_expr_from_sym (sym);
+ tmp = gfc_trans_assignment (lhs, e, false, true);
+ gfc_add_init_cleanup (block, tmp, NULL_TREE);
+ }
+}
+
+
+/* Translate a BLOCK construct. This is basically what we would do for a
+ procedure body. */
+
+tree
+gfc_trans_block_construct (gfc_code* code)
+{
+ gfc_namespace* ns;
+ gfc_symbol* sym;
+ gfc_wrapped_block block;
+ tree exit_label;
+ stmtblock_t body;
+ gfc_association_list *ass;
+
+ ns = code->ext.block.ns;
+ gcc_assert (ns);
+ sym = ns->proc_name;
+ gcc_assert (sym);
+
+ /* Process local variables. */
+ gcc_assert (!sym->tlink);
+ sym->tlink = sym;
+ gfc_process_block_locals (ns);
+
+ /* Generate code including exit-label. */
+ gfc_init_block (&body);
+ exit_label = gfc_build_label_decl (NULL_TREE);
+ code->exit_label = exit_label;
+ gfc_add_expr_to_block (&body, gfc_trans_code (ns->code));
+ gfc_add_expr_to_block (&body, build1_v (LABEL_EXPR, exit_label));
+
+ /* Finish everything. */
+ gfc_start_wrapped_block (&block, gfc_finish_block (&body));
+ gfc_trans_deferred_vars (sym, &block);
+ for (ass = code->ext.block.assoc; ass; ass = ass->next)
+ trans_associate_var (ass->st->n.sym, &block);
+
+ return gfc_finish_wrapped_block (&block);
+}
+
+
/* Translate the simple DO construct. This is where the loop variable has
integer type and step +-1. We can't use this in the general case
because integer overflow and floating point errors could give incorrect
static tree
gfc_trans_simple_do (gfc_code * code, stmtblock_t *pblock, tree dovar,
- tree from, tree to, tree step)
+ tree from, tree to, tree step, tree exit_cond)
{
stmtblock_t body;
tree type;
tree cond;
tree tmp;
+ tree saved_dovar = NULL;
tree cycle_label;
tree exit_label;
+ location_t loc;
type = TREE_TYPE (dovar);
+ loc = code->ext.iterator->start->where.lb->location;
+
/* Initialize the DO variable: dovar = from. */
- gfc_add_modify_expr (pblock, dovar, from);
+ gfc_add_modify_loc (loc, pblock, dovar, from);
+
+ /* Save value for do-tinkering checking. */
+ if (gfc_option.rtcheck & GFC_RTCHECK_DO)
+ {
+ saved_dovar = gfc_create_var (type, ".saved_dovar");
+ gfc_add_modify_loc (loc, pblock, saved_dovar, dovar);
+ }
/* Cycle and exit statements are implemented with gotos. */
cycle_label = gfc_build_label_decl (NULL_TREE);
exit_label = gfc_build_label_decl (NULL_TREE);
/* Put the labels where they can be found later. See gfc_trans_do(). */
- code->block->backend_decl = tree_cons (cycle_label, exit_label, NULL);
+ code->cycle_label = cycle_label;
+ code->exit_label = exit_label;
/* Loop body. */
gfc_start_block (&body);
/* Main loop body. */
- tmp = gfc_trans_code (code->block->next);
+ tmp = gfc_trans_code_cond (code->block->next, exit_cond);
gfc_add_expr_to_block (&body, tmp);
/* Label for cycle statements (if needed). */
gfc_add_expr_to_block (&body, tmp);
}
+ /* Check whether someone has modified the loop variable. */
+ if (gfc_option.rtcheck & GFC_RTCHECK_DO)
+ {
+ tmp = fold_build2_loc (loc, NE_EXPR, boolean_type_node,
+ dovar, saved_dovar);
+ gfc_trans_runtime_check (true, false, tmp, &body, &code->loc,
+ "Loop variable has been modified");
+ }
+
+ /* Exit the loop if there is an I/O result condition or error. */
+ if (exit_cond)
+ {
+ tmp = build1_v (GOTO_EXPR, exit_label);
+ tmp = fold_build3_loc (loc, COND_EXPR, void_type_node,
+ exit_cond, tmp,
+ build_empty_stmt (loc));
+ gfc_add_expr_to_block (&body, tmp);
+ }
+
/* Evaluate the loop condition. */
- cond = fold_build2 (EQ_EXPR, boolean_type_node, dovar, to);
- cond = gfc_evaluate_now (cond, &body);
+ cond = fold_build2_loc (loc, EQ_EXPR, boolean_type_node, dovar,
+ to);
+ cond = gfc_evaluate_now_loc (loc, cond, &body);
/* Increment the loop variable. */
- tmp = fold_build2 (PLUS_EXPR, type, dovar, step);
- gfc_add_modify_expr (&body, dovar, tmp);
+ tmp = fold_build2_loc (loc, PLUS_EXPR, type, dovar, step);
+ gfc_add_modify_loc (loc, &body, dovar, tmp);
+
+ if (gfc_option.rtcheck & GFC_RTCHECK_DO)
+ gfc_add_modify_loc (loc, &body, saved_dovar, dovar);
/* The loop exit. */
- tmp = build1_v (GOTO_EXPR, exit_label);
+ tmp = fold_build1_loc (loc, GOTO_EXPR, void_type_node, exit_label);
TREE_USED (exit_label) = 1;
- tmp = fold_build3 (COND_EXPR, void_type_node,
- cond, tmp, build_empty_stmt ());
+ tmp = fold_build3_loc (loc, COND_EXPR, void_type_node,
+ cond, tmp, build_empty_stmt (loc));
gfc_add_expr_to_block (&body, tmp);
/* Finish the loop body. */
tmp = gfc_finish_block (&body);
- tmp = build1_v (LOOP_EXPR, tmp);
+ tmp = fold_build1_loc (loc, LOOP_EXPR, void_type_node, tmp);
/* Only execute the loop if the number of iterations is positive. */
if (tree_int_cst_sgn (step) > 0)
- cond = fold_build2 (LE_EXPR, boolean_type_node, dovar, to);
+ cond = fold_build2_loc (loc, LE_EXPR, boolean_type_node, dovar,
+ to);
else
- cond = fold_build2 (GE_EXPR, boolean_type_node, dovar, to);
- tmp = fold_build3 (COND_EXPR, void_type_node,
- cond, tmp, build_empty_stmt ());
+ cond = fold_build2_loc (loc, GE_EXPR, boolean_type_node, dovar,
+ to);
+ tmp = fold_build3_loc (loc, COND_EXPR, void_type_node, cond, tmp,
+ build_empty_stmt (loc));
gfc_add_expr_to_block (pblock, tmp);
/* Add the exit label. */
because the loop count itself can overflow. */
tree
-gfc_trans_do (gfc_code * code)
+gfc_trans_do (gfc_code * code, tree exit_cond)
{
gfc_se se;
tree dovar;
+ tree saved_dovar = NULL;
tree from;
tree to;
tree step;
- tree empty;
tree countm1;
tree type;
tree utype;
tree pos_step;
stmtblock_t block;
stmtblock_t body;
+ location_t loc;
gfc_start_block (&block);
+ loc = code->ext.iterator->start->where.lb->location;
+
/* Evaluate all the expressions in the iterator. */
gfc_init_se (&se, NULL);
gfc_conv_expr_lhs (&se, code->ext.iterator->var);
gfc_add_block_to_block (&block, &se.pre);
step = gfc_evaluate_now (se.expr, &block);
+ if (gfc_option.rtcheck & GFC_RTCHECK_DO)
+ {
+ tmp = fold_build2_loc (input_location, EQ_EXPR, boolean_type_node, step,
+ build_zero_cst (type));
+ gfc_trans_runtime_check (true, false, tmp, &block, &code->loc,
+ "DO step value is zero");
+ }
+
/* Special case simple loops. */
if (TREE_CODE (type) == INTEGER_TYPE
&& (integer_onep (step)
|| tree_int_cst_equal (step, integer_minus_one_node)))
- return gfc_trans_simple_do (code, &block, dovar, from, to, step);
-
- /* We need a special check for empty loops:
- empty = (step > 0 ? to < from : to > from); */
- pos_step = fold_build2 (GT_EXPR, boolean_type_node, step,
- fold_convert (type, integer_zero_node));
- empty = fold_build3 (COND_EXPR, boolean_type_node, pos_step,
- fold_build2 (LT_EXPR, boolean_type_node, to, from),
- fold_build2 (GT_EXPR, boolean_type_node, to, from));
-
- /* Initialize loop count. This code is executed before we enter the
- loop body. We generate: countm1 = abs(to - from) / abs(step). */
- if (TREE_CODE (type) == INTEGER_TYPE)
- {
- tree ustep;
+ return gfc_trans_simple_do (code, &block, dovar, from, to, step, exit_cond);
- utype = unsigned_type_for (type);
+ pos_step = fold_build2_loc (loc, GT_EXPR, boolean_type_node, step,
+ build_zero_cst (type));
- /* tmp = abs(to - from) / abs(step) */
- ustep = fold_convert (utype, fold_build1 (ABS_EXPR, type, step));
- tmp = fold_build3 (COND_EXPR, type, pos_step,
- fold_build2 (MINUS_EXPR, type, to, from),
- fold_build2 (MINUS_EXPR, type, from, to));
- tmp = fold_build2 (TRUNC_DIV_EXPR, utype, fold_convert (utype, tmp),
- ustep);
- }
+ if (TREE_CODE (type) == INTEGER_TYPE)
+ utype = unsigned_type_for (type);
else
- {
- /* TODO: We could use the same width as the real type.
- This would probably cause more problems that it solves
- when we implement "long double" types. */
- utype = unsigned_type_for (gfc_array_index_type);
- tmp = fold_build2 (MINUS_EXPR, type, to, from);
- tmp = fold_build2 (RDIV_EXPR, type, tmp, step);
- tmp = fold_build1 (FIX_TRUNC_EXPR, utype, tmp);
- }
+ utype = unsigned_type_for (gfc_array_index_type);
countm1 = gfc_create_var (utype, "countm1");
- gfc_add_modify_expr (&block, countm1, tmp);
/* Cycle and exit statements are implemented with gotos. */
cycle_label = gfc_build_label_decl (NULL_TREE);
exit_label = gfc_build_label_decl (NULL_TREE);
TREE_USED (exit_label) = 1;
+ /* Put these labels where they can be found later. */
+ code->cycle_label = cycle_label;
+ code->exit_label = exit_label;
+
/* Initialize the DO variable: dovar = from. */
- gfc_add_modify_expr (&block, dovar, from);
+ gfc_add_modify (&block, dovar, from);
- /* If the loop is empty, go directly to the exit label. */
- tmp = fold_build3 (COND_EXPR, void_type_node, empty,
- build1_v (GOTO_EXPR, exit_label), build_empty_stmt ());
- gfc_add_expr_to_block (&block, tmp);
+ /* Save value for do-tinkering checking. */
+ if (gfc_option.rtcheck & GFC_RTCHECK_DO)
+ {
+ saved_dovar = gfc_create_var (type, ".saved_dovar");
+ gfc_add_modify_loc (loc, &block, saved_dovar, dovar);
+ }
- /* Loop body. */
- gfc_start_block (&body);
+ /* Initialize loop count and jump to exit label if the loop is empty.
+ This code is executed before we enter the loop body. We generate:
+ step_sign = sign(1,step);
+ if (step > 0)
+ {
+ if (to < from)
+ goto exit_label;
+ }
+ else
+ {
+ if (to > from)
+ goto exit_label;
+ }
+ countm1 = (to*step_sign - from*step_sign) / (step*step_sign);
+
+ */
+
+ if (TREE_CODE (type) == INTEGER_TYPE)
+ {
+ tree pos, neg, step_sign, to2, from2, step2;
+
+ /* Calculate SIGN (1,step), as (step < 0 ? -1 : 1) */
+
+ tmp = fold_build2_loc (loc, LT_EXPR, boolean_type_node, step,
+ build_int_cst (TREE_TYPE (step), 0));
+ step_sign = fold_build3_loc (loc, COND_EXPR, type, tmp,
+ build_int_cst (type, -1),
+ build_int_cst (type, 1));
+
+ tmp = fold_build2_loc (loc, LT_EXPR, boolean_type_node, to, from);
+ pos = fold_build3_loc (loc, COND_EXPR, void_type_node, tmp,
+ fold_build1_loc (loc, GOTO_EXPR, void_type_node,
+ exit_label),
+ build_empty_stmt (loc));
+
+ tmp = fold_build2_loc (loc, GT_EXPR, boolean_type_node, to,
+ from);
+ neg = fold_build3_loc (loc, COND_EXPR, void_type_node, tmp,
+ fold_build1_loc (loc, GOTO_EXPR, void_type_node,
+ exit_label),
+ build_empty_stmt (loc));
+ tmp = fold_build3_loc (loc, COND_EXPR, void_type_node,
+ pos_step, pos, neg);
+
+ gfc_add_expr_to_block (&block, tmp);
+
+ /* Calculate the loop count. to-from can overflow, so
+ we cast to unsigned. */
+
+ to2 = fold_build2_loc (loc, MULT_EXPR, type, step_sign, to);
+ from2 = fold_build2_loc (loc, MULT_EXPR, type, step_sign, from);
+ step2 = fold_build2_loc (loc, MULT_EXPR, type, step_sign, step);
+ step2 = fold_convert (utype, step2);
+ tmp = fold_build2_loc (loc, MINUS_EXPR, type, to2, from2);
+ tmp = fold_convert (utype, tmp);
+ tmp = fold_build2_loc (loc, TRUNC_DIV_EXPR, utype, tmp, step2);
+ tmp = fold_build2_loc (loc, MODIFY_EXPR, void_type_node, countm1, tmp);
+ gfc_add_expr_to_block (&block, tmp);
+ }
+ else
+ {
+ /* TODO: We could use the same width as the real type.
+ This would probably cause more problems that it solves
+ when we implement "long double" types. */
- /* Put these labels where they can be found later. We put the
- labels in a TREE_LIST node (because TREE_CHAIN is already
- used). cycle_label goes in TREE_PURPOSE (backend_decl), exit
- label in TREE_VALUE (backend_decl). */
+ tmp = fold_build2_loc (loc, MINUS_EXPR, type, to, from);
+ tmp = fold_build2_loc (loc, RDIV_EXPR, type, tmp, step);
+ tmp = fold_build1_loc (loc, FIX_TRUNC_EXPR, utype, tmp);
+ gfc_add_modify (&block, countm1, tmp);
+
+ /* We need a special check for empty loops:
+ empty = (step > 0 ? to < from : to > from); */
+ tmp = fold_build3_loc (loc, COND_EXPR, boolean_type_node, pos_step,
+ fold_build2_loc (loc, LT_EXPR,
+ boolean_type_node, to, from),
+ fold_build2_loc (loc, GT_EXPR,
+ boolean_type_node, to, from));
+ /* If the loop is empty, go directly to the exit label. */
+ tmp = fold_build3_loc (loc, COND_EXPR, void_type_node, tmp,
+ build1_v (GOTO_EXPR, exit_label),
+ build_empty_stmt (input_location));
+ gfc_add_expr_to_block (&block, tmp);
+ }
- code->block->backend_decl = tree_cons (cycle_label, exit_label, NULL);
+ /* Loop body. */
+ gfc_start_block (&body);
/* Main loop body. */
- tmp = gfc_trans_code (code->block->next);
+ tmp = gfc_trans_code_cond (code->block->next, exit_cond);
gfc_add_expr_to_block (&body, tmp);
/* Label for cycle statements (if needed). */
gfc_add_expr_to_block (&body, tmp);
}
+ /* Check whether someone has modified the loop variable. */
+ if (gfc_option.rtcheck & GFC_RTCHECK_DO)
+ {
+ tmp = fold_build2_loc (loc, NE_EXPR, boolean_type_node, dovar,
+ saved_dovar);
+ gfc_trans_runtime_check (true, false, tmp, &body, &code->loc,
+ "Loop variable has been modified");
+ }
+
+ /* Exit the loop if there is an I/O result condition or error. */
+ if (exit_cond)
+ {
+ tmp = build1_v (GOTO_EXPR, exit_label);
+ tmp = fold_build3_loc (loc, COND_EXPR, void_type_node,
+ exit_cond, tmp,
+ build_empty_stmt (input_location));
+ gfc_add_expr_to_block (&body, tmp);
+ }
+
/* Increment the loop variable. */
- tmp = build2 (PLUS_EXPR, type, dovar, step);
- gfc_add_modify_expr (&body, dovar, tmp);
+ tmp = fold_build2_loc (loc, PLUS_EXPR, type, dovar, step);
+ gfc_add_modify_loc (loc, &body, dovar, tmp);
+
+ if (gfc_option.rtcheck & GFC_RTCHECK_DO)
+ gfc_add_modify_loc (loc, &body, saved_dovar, dovar);
/* End with the loop condition. Loop until countm1 == 0. */
- cond = fold_build2 (EQ_EXPR, boolean_type_node, countm1,
- build_int_cst (utype, 0));
- tmp = build1_v (GOTO_EXPR, exit_label);
- tmp = fold_build3 (COND_EXPR, void_type_node,
- cond, tmp, build_empty_stmt ());
+ cond = fold_build2_loc (loc, EQ_EXPR, boolean_type_node, countm1,
+ build_int_cst (utype, 0));
+ tmp = fold_build1_loc (loc, GOTO_EXPR, void_type_node, exit_label);
+ tmp = fold_build3_loc (loc, COND_EXPR, void_type_node,
+ cond, tmp, build_empty_stmt (loc));
gfc_add_expr_to_block (&body, tmp);
/* Decrement the loop count. */
- tmp = build2 (MINUS_EXPR, utype, countm1, build_int_cst (utype, 1));
- gfc_add_modify_expr (&body, countm1, tmp);
+ tmp = fold_build2_loc (loc, MINUS_EXPR, utype, countm1,
+ build_int_cst (utype, 1));
+ gfc_add_modify_loc (loc, &body, countm1, tmp);
/* End of loop body. */
tmp = gfc_finish_block (&body);
/* The for loop itself. */
- tmp = build1_v (LOOP_EXPR, tmp);
+ tmp = fold_build1_loc (loc, LOOP_EXPR, void_type_node, tmp);
gfc_add_expr_to_block (&block, tmp);
/* Add the exit label. */
exit_label = gfc_build_label_decl (NULL_TREE);
/* Put the labels where they can be found later. See gfc_trans_do(). */
- code->block->backend_decl = tree_cons (cycle_label, exit_label, NULL);
+ code->cycle_label = cycle_label;
+ code->exit_label = exit_label;
/* Create a GIMPLE version of the exit condition. */
gfc_init_se (&cond, NULL);
- gfc_conv_expr_val (&cond, code->expr);
+ gfc_conv_expr_val (&cond, code->expr1);
gfc_add_block_to_block (&block, &cond.pre);
- cond.expr = fold_build1 (TRUTH_NOT_EXPR, boolean_type_node, cond.expr);
+ cond.expr = fold_build1_loc (code->expr1->where.lb->location,
+ TRUTH_NOT_EXPR, boolean_type_node, cond.expr);
/* Build "IF (! cond) GOTO exit_label". */
tmp = build1_v (GOTO_EXPR, exit_label);
TREE_USED (exit_label) = 1;
- tmp = fold_build3 (COND_EXPR, void_type_node,
- cond.expr, tmp, build_empty_stmt ());
+ tmp = fold_build3_loc (code->expr1->where.lb->location, COND_EXPR,
+ void_type_node, cond.expr, tmp,
+ build_empty_stmt (code->expr1->where.lb->location));
gfc_add_expr_to_block (&block, tmp);
/* The main body of the loop. */
gfc_init_block (&block);
/* Build the loop. */
- tmp = build1_v (LOOP_EXPR, tmp);
+ tmp = fold_build1_loc (code->expr1->where.lb->location, LOOP_EXPR,
+ void_type_node, tmp);
gfc_add_expr_to_block (&block, tmp);
/* Add the exit label. */
/* Calculate the switch expression. */
gfc_init_se (&se, NULL);
- gfc_conv_expr_val (&se, code->expr);
+ gfc_conv_expr_val (&se, code->expr1);
gfc_add_block_to_block (&block, &se.pre);
end_label = gfc_build_label_decl (NULL_TREE);
for (c = code->block; c; c = c->block)
{
- for (cp = c->ext.case_list; cp; cp = cp->next)
+ for (cp = c->ext.block.case_list; cp; cp = cp->next)
{
tree low, high;
tree label;
/* Add this case label.
Add parameter 'label', make it match GCC backend. */
- tmp = build3 (CASE_LABEL_EXPR, void_type_node, low, high, label);
+ tmp = fold_build3_loc (input_location, CASE_LABEL_EXPR,
+ void_type_node, low, high, label);
gfc_add_expr_to_block (&body, tmp);
}
always executed, and we don't generate code a COND_EXPR. */
for (c = code->block; c; c = c->block)
{
- for (cp = c->ext.case_list; cp; cp = cp->next)
+ for (cp = c->ext.block.case_list; cp; cp = cp->next)
{
if (cp->low)
{
/* Calculate the switch expression. We always need to do this
because it may have side effects. */
gfc_init_se (&se, NULL);
- gfc_conv_expr_val (&se, code->expr);
+ gfc_conv_expr_val (&se, code->expr1);
gfc_add_block_to_block (&block, &se.pre);
if (t == f && t != NULL)
{
tree true_tree, false_tree, stmt;
- true_tree = build_empty_stmt ();
- false_tree = build_empty_stmt ();
+ true_tree = build_empty_stmt (input_location);
+ false_tree = build_empty_stmt (input_location);
/* If we have a case for .TRUE. and for .FALSE., discard the default case.
Otherwise, if .TRUE. or .FALSE. is missing and there is a default case,
if (f != NULL)
false_tree = gfc_trans_code (f->next);
- stmt = fold_build3 (COND_EXPR, void_type_node, se.expr,
- true_tree, false_tree);
+ stmt = fold_build3_loc (input_location, COND_EXPR, void_type_node,
+ se.expr, true_tree, false_tree);
gfc_add_expr_to_block (&block, stmt);
}
}
+/* The jump table types are stored in static variables to avoid
+ constructing them from scratch every single time. */
+static GTY(()) tree select_struct[2];
+
/* Translate the SELECT CASE construct for CHARACTER case expressions.
Instead of generating compares and jumps, it is far simpler to
generate a data structure describing the cases in order and call a
static tree
gfc_trans_character_select (gfc_code *code)
{
- tree init, node, end_label, tmp, type, case_num, label;
+ tree init, end_label, tmp, type, case_num, label, fndecl;
stmtblock_t block, body;
gfc_case *cp, *d;
gfc_code *c;
- gfc_se se;
- int n;
+ gfc_se se, expr1se;
+ int n, k;
+ VEC(constructor_elt,gc) *inits = NULL;
+
+ tree pchartype = gfc_get_pchar_type (code->expr1->ts.kind);
+
+ /* The jump table types are stored in static variables to avoid
+ constructing them from scratch every single time. */
+ static tree ss_string1[2], ss_string1_len[2];
+ static tree ss_string2[2], ss_string2_len[2];
+ static tree ss_target[2];
+
+ cp = code->block->ext.block.case_list;
+ while (cp->left != NULL)
+ cp = cp->left;
+
+ /* Generate the body */
+ gfc_start_block (&block);
+ gfc_init_se (&expr1se, NULL);
+ gfc_conv_expr_reference (&expr1se, code->expr1);
+
+ gfc_add_block_to_block (&block, &expr1se.pre);
- static tree select_struct;
- static tree ss_string1, ss_string1_len;
- static tree ss_string2, ss_string2_len;
- static tree ss_target;
+ end_label = gfc_build_label_decl (NULL_TREE);
- if (select_struct == NULL)
+ gfc_init_block (&body);
+
+ /* Attempt to optimize length 1 selects. */
+ if (integer_onep (expr1se.string_length))
{
- tree gfc_int4_type_node = gfc_get_int_type (4);
+ for (d = cp; d; d = d->right)
+ {
+ int i;
+ if (d->low)
+ {
+ gcc_assert (d->low->expr_type == EXPR_CONSTANT
+ && d->low->ts.type == BT_CHARACTER);
+ if (d->low->value.character.length > 1)
+ {
+ for (i = 1; i < d->low->value.character.length; i++)
+ if (d->low->value.character.string[i] != ' ')
+ break;
+ if (i != d->low->value.character.length)
+ {
+ if (optimize && d->high && i == 1)
+ {
+ gcc_assert (d->high->expr_type == EXPR_CONSTANT
+ && d->high->ts.type == BT_CHARACTER);
+ if (d->high->value.character.length > 1
+ && (d->low->value.character.string[0]
+ == d->high->value.character.string[0])
+ && d->high->value.character.string[1] != ' '
+ && ((d->low->value.character.string[1] < ' ')
+ == (d->high->value.character.string[1]
+ < ' ')))
+ continue;
+ }
+ break;
+ }
+ }
+ }
+ if (d->high)
+ {
+ gcc_assert (d->high->expr_type == EXPR_CONSTANT
+ && d->high->ts.type == BT_CHARACTER);
+ if (d->high->value.character.length > 1)
+ {
+ for (i = 1; i < d->high->value.character.length; i++)
+ if (d->high->value.character.string[i] != ' ')
+ break;
+ if (i != d->high->value.character.length)
+ break;
+ }
+ }
+ }
+ if (d == NULL)
+ {
+ tree ctype = gfc_get_char_type (code->expr1->ts.kind);
+
+ for (c = code->block; c; c = c->block)
+ {
+ for (cp = c->ext.block.case_list; cp; cp = cp->next)
+ {
+ tree low, high;
+ tree label;
+ gfc_char_t r;
+
+ /* Assume it's the default case. */
+ low = high = NULL_TREE;
+
+ if (cp->low)
+ {
+ /* CASE ('ab') or CASE ('ab':'az') will never match
+ any length 1 character. */
+ if (cp->low->value.character.length > 1
+ && cp->low->value.character.string[1] != ' ')
+ continue;
+
+ if (cp->low->value.character.length > 0)
+ r = cp->low->value.character.string[0];
+ else
+ r = ' ';
+ low = build_int_cst (ctype, r);
+
+ /* If there's only a lower bound, set the high bound
+ to the maximum value of the case expression. */
+ if (!cp->high)
+ high = TYPE_MAX_VALUE (ctype);
+ }
+
+ if (cp->high)
+ {
+ if (!cp->low
+ || (cp->low->value.character.string[0]
+ != cp->high->value.character.string[0]))
+ {
+ if (cp->high->value.character.length > 0)
+ r = cp->high->value.character.string[0];
+ else
+ r = ' ';
+ high = build_int_cst (ctype, r);
+ }
+
+ /* Unbounded case. */
+ if (!cp->low)
+ low = TYPE_MIN_VALUE (ctype);
+ }
+
+ /* Build a label. */
+ label = gfc_build_label_decl (NULL_TREE);
+
+ /* Add this case label.
+ Add parameter 'label', make it match GCC backend. */
+ tmp = fold_build3_loc (input_location, CASE_LABEL_EXPR,
+ void_type_node, low, high, label);
+ gfc_add_expr_to_block (&body, tmp);
+ }
+
+ /* Add the statements for this case. */
+ tmp = gfc_trans_code (c->next);
+ gfc_add_expr_to_block (&body, tmp);
+
+ /* Break to the end of the construct. */
+ tmp = build1_v (GOTO_EXPR, end_label);
+ gfc_add_expr_to_block (&body, tmp);
+ }
+
+ tmp = gfc_string_to_single_character (expr1se.string_length,
+ expr1se.expr,
+ code->expr1->ts.kind);
+ case_num = gfc_create_var (ctype, "case_num");
+ gfc_add_modify (&block, case_num, tmp);
+
+ gfc_add_block_to_block (&block, &expr1se.post);
- select_struct = make_node (RECORD_TYPE);
- TYPE_NAME (select_struct) = get_identifier ("_jump_struct");
+ tmp = gfc_finish_block (&body);
+ tmp = build3_v (SWITCH_EXPR, case_num, tmp, NULL_TREE);
+ gfc_add_expr_to_block (&block, tmp);
+
+ tmp = build1_v (LABEL_EXPR, end_label);
+ gfc_add_expr_to_block (&block, tmp);
+
+ return gfc_finish_block (&block);
+ }
+ }
+
+ if (code->expr1->ts.kind == 1)
+ k = 0;
+ else if (code->expr1->ts.kind == 4)
+ k = 1;
+ else
+ gcc_unreachable ();
+
+ if (select_struct[k] == NULL)
+ {
+ tree *chain = NULL;
+ select_struct[k] = make_node (RECORD_TYPE);
+
+ if (code->expr1->ts.kind == 1)
+ TYPE_NAME (select_struct[k]) = get_identifier ("_jump_struct_char1");
+ else if (code->expr1->ts.kind == 4)
+ TYPE_NAME (select_struct[k]) = get_identifier ("_jump_struct_char4");
+ else
+ gcc_unreachable ();
#undef ADD_FIELD
-#define ADD_FIELD(NAME, TYPE) \
- ss_##NAME = gfc_add_field_to_struct \
- (&(TYPE_FIELDS (select_struct)), select_struct, \
- get_identifier (stringize(NAME)), TYPE)
+#define ADD_FIELD(NAME, TYPE) \
+ ss_##NAME[k] = gfc_add_field_to_struct (select_struct[k], \
+ get_identifier (stringize(NAME)), \
+ TYPE, \
+ &chain)
- ADD_FIELD (string1, pchar_type_node);
- ADD_FIELD (string1_len, gfc_int4_type_node);
+ ADD_FIELD (string1, pchartype);
+ ADD_FIELD (string1_len, gfc_charlen_type_node);
- ADD_FIELD (string2, pchar_type_node);
- ADD_FIELD (string2_len, gfc_int4_type_node);
+ ADD_FIELD (string2, pchartype);
+ ADD_FIELD (string2_len, gfc_charlen_type_node);
ADD_FIELD (target, integer_type_node);
#undef ADD_FIELD
- gfc_finish_type (select_struct);
+ gfc_finish_type (select_struct[k]);
}
- cp = code->block->ext.case_list;
- while (cp->left != NULL)
- cp = cp->left;
-
n = 0;
for (d = cp; d; d = d->right)
d->n = n++;
- end_label = gfc_build_label_decl (NULL_TREE);
-
- /* Generate the body */
- gfc_start_block (&block);
- gfc_init_block (&body);
-
for (c = code->block; c; c = c->block)
{
- for (d = c->ext.case_list; d; d = d->next)
+ for (d = c->ext.block.case_list; d; d = d->next)
{
label = gfc_build_label_decl (NULL_TREE);
- tmp = build3 (CASE_LABEL_EXPR, void_type_node,
- build_int_cst (NULL_TREE, d->n),
- build_int_cst (NULL_TREE, d->n), label);
+ tmp = fold_build3_loc (input_location, CASE_LABEL_EXPR,
+ void_type_node,
+ (d->low == NULL && d->high == NULL)
+ ? NULL : build_int_cst (NULL_TREE, d->n),
+ NULL, label);
gfc_add_expr_to_block (&body, tmp);
}
}
/* Generate the structure describing the branches */
- init = NULL_TREE;
-
- for(d = cp; d; d = d->right)
+ for (d = cp; d; d = d->right)
{
- node = NULL_TREE;
+ VEC(constructor_elt,gc) *node = NULL;
gfc_init_se (&se, NULL);
if (d->low == NULL)
{
- node = tree_cons (ss_string1, null_pointer_node, node);
- node = tree_cons (ss_string1_len, integer_zero_node, node);
+ CONSTRUCTOR_APPEND_ELT (node, ss_string1[k], null_pointer_node);
+ CONSTRUCTOR_APPEND_ELT (node, ss_string1_len[k], integer_zero_node);
}
else
{
gfc_conv_expr_reference (&se, d->low);
- node = tree_cons (ss_string1, se.expr, node);
- node = tree_cons (ss_string1_len, se.string_length, node);
+ CONSTRUCTOR_APPEND_ELT (node, ss_string1[k], se.expr);
+ CONSTRUCTOR_APPEND_ELT (node, ss_string1_len[k], se.string_length);
}
if (d->high == NULL)
{
- node = tree_cons (ss_string2, null_pointer_node, node);
- node = tree_cons (ss_string2_len, integer_zero_node, node);
+ CONSTRUCTOR_APPEND_ELT (node, ss_string2[k], null_pointer_node);
+ CONSTRUCTOR_APPEND_ELT (node, ss_string2_len[k], integer_zero_node);
}
else
{
gfc_init_se (&se, NULL);
gfc_conv_expr_reference (&se, d->high);
- node = tree_cons (ss_string2, se.expr, node);
- node = tree_cons (ss_string2_len, se.string_length, node);
+ CONSTRUCTOR_APPEND_ELT (node, ss_string2[k], se.expr);
+ CONSTRUCTOR_APPEND_ELT (node, ss_string2_len[k], se.string_length);
}
- node = tree_cons (ss_target, build_int_cst (integer_type_node, d->n),
- node);
+ CONSTRUCTOR_APPEND_ELT (node, ss_target[k],
+ build_int_cst (integer_type_node, d->n));
- tmp = build_constructor_from_list (select_struct, nreverse (node));
- init = tree_cons (NULL_TREE, tmp, init);
+ tmp = build_constructor (select_struct[k], node);
+ CONSTRUCTOR_APPEND_ELT (inits, NULL_TREE, tmp);
}
- type = build_array_type (select_struct, build_index_type
- (build_int_cst (NULL_TREE, n - 1)));
+ type = build_array_type (select_struct[k],
+ build_index_type (build_int_cst (NULL_TREE, n-1)));
- init = build_constructor_from_list (type, nreverse(init));
+ init = build_constructor (type, inits);
TREE_CONSTANT (init) = 1;
- TREE_INVARIANT (init) = 1;
TREE_STATIC (init) = 1;
/* Create a static variable to hold the jump table. */
tmp = gfc_create_var (type, "jumptable");
TREE_CONSTANT (tmp) = 1;
- TREE_INVARIANT (tmp) = 1;
TREE_STATIC (tmp) = 1;
TREE_READONLY (tmp) = 1;
DECL_INITIAL (tmp) = init;
/* Build the library call */
init = gfc_build_addr_expr (pvoid_type_node, init);
- gfc_init_se (&se, NULL);
- gfc_conv_expr_reference (&se, code->expr);
-
- gfc_add_block_to_block (&block, &se.pre);
+ if (code->expr1->ts.kind == 1)
+ fndecl = gfor_fndecl_select_string;
+ else if (code->expr1->ts.kind == 4)
+ fndecl = gfor_fndecl_select_string_char4;
+ else
+ gcc_unreachable ();
- tmp = build_call_expr (gfor_fndecl_select_string, 4, init,
- build_int_cst (NULL_TREE, n), se.expr,
- se.string_length);
+ tmp = build_call_expr_loc (input_location,
+ fndecl, 4, init, build_int_cst (NULL_TREE, n),
+ expr1se.expr, expr1se.string_length);
case_num = gfc_create_var (integer_type_node, "case_num");
- gfc_add_modify_expr (&block, case_num, tmp);
+ gfc_add_modify (&block, case_num, tmp);
- gfc_add_block_to_block (&block, &se.post);
+ gfc_add_block_to_block (&block, &expr1se.post);
tmp = gfc_finish_block (&body);
tmp = build3_v (SWITCH_EXPR, case_num, tmp, NULL_TREE);
tree
gfc_trans_select (gfc_code * code)
{
- gcc_assert (code && code->expr);
+ stmtblock_t block;
+ tree body;
+ tree exit_label;
+
+ gcc_assert (code && code->expr1);
+ gfc_init_block (&block);
+
+ /* Build the exit label and hang it in. */
+ exit_label = gfc_build_label_decl (NULL_TREE);
+ code->exit_label = exit_label;
/* Empty SELECT constructs are legal. */
if (code->block == NULL)
- return build_empty_stmt ();
+ body = build_empty_stmt (input_location);
/* Select the correct translation function. */
- switch (code->expr->ts.type)
- {
- case BT_LOGICAL: return gfc_trans_logical_select (code);
- case BT_INTEGER: return gfc_trans_integer_select (code);
- case BT_CHARACTER: return gfc_trans_character_select (code);
- default:
- gfc_internal_error ("gfc_trans_select(): Bad type for case expr.");
- /* Not reached */
- }
+ else
+ switch (code->expr1->ts.type)
+ {
+ case BT_LOGICAL:
+ body = gfc_trans_logical_select (code);
+ break;
+
+ case BT_INTEGER:
+ body = gfc_trans_integer_select (code);
+ break;
+
+ case BT_CHARACTER:
+ body = gfc_trans_character_select (code);
+ break;
+
+ default:
+ gfc_internal_error ("gfc_trans_select(): Bad type for case expr.");
+ /* Not reached */
+ }
+
+ /* Build everything together. */
+ gfc_add_expr_to_block (&block, body);
+ gfc_add_expr_to_block (&block, build1_v (LABEL_EXPR, exit_label));
+
+ return gfc_finish_block (&block);
}
static bool
forall_replace (gfc_expr *expr, gfc_symbol *sym, int *f)
{
- gcc_assert (expr->expr_type == EXPR_VARIABLE);
+ if (expr->expr_type != EXPR_VARIABLE)
+ return false;
if (*f == 2)
*f = 1;
gfc_symbol *sym ATTRIBUTE_UNUSED,
int *f ATTRIBUTE_UNUSED)
{
- gcc_assert (expr->expr_type == EXPR_VARIABLE);
+ if (expr->expr_type != EXPR_VARIABLE)
+ return false;
if (expr->symtree == new_symtree)
expr->symtree = old_symtree;
tree tmp;
/* Build a copy of the lvalue. */
- old_symtree = c->expr->symtree;
+ old_symtree = c->expr1->symtree;
old_sym = old_symtree->n.sym;
e = gfc_lval_expr_from_sym (old_sym);
if (old_sym->attr.dimension)
{
gfc_init_se (&tse, NULL);
- gfc_conv_subref_array_arg (&tse, e, 0, INTENT_IN);
+ gfc_conv_subref_array_arg (&tse, e, 0, INTENT_IN, false);
gfc_add_block_to_block (pre, &tse.pre);
gfc_add_block_to_block (post, &tse.post);
- tse.expr = build_fold_indirect_ref (tse.expr);
+ tse.expr = build_fold_indirect_ref_loc (input_location, tse.expr);
if (e->ts.type != BT_CHARACTER)
{
/* Use the variable offset for the temporary. */
- tmp = gfc_conv_descriptor_offset (tse.expr);
- gfc_add_modify_expr (pre, tmp,
- gfc_conv_array_offset (old_sym->backend_decl));
+ tmp = gfc_conv_array_offset (old_sym->backend_decl);
+ gfc_conv_descriptor_offset_set (pre, tse.expr, tmp);
}
}
else
}
tmp = gfc_trans_scalar_assign (&tse, &rse, e->ts, true,
- e->expr_type == EXPR_VARIABLE);
+ e->expr_type == EXPR_VARIABLE, true);
gfc_add_expr_to_block (pre, tmp);
}
gfc_free_expr (e);
new_sym = gfc_new_symbol (old_sym->name, NULL);
new_sym->ts = old_sym->ts;
new_sym->attr.referenced = 1;
+ new_sym->attr.temporary = 1;
new_sym->attr.dimension = old_sym->attr.dimension;
new_sym->attr.flavor = old_sym->attr.flavor;
/* Go through the expression reference replacing the old_symtree
with the new. */
- forall_replace_symtree (c->expr, old_sym, 2);
+ forall_replace_symtree (c->expr1, old_sym, 2);
/* Now we have made this temporary, we might as well use it for
the right hand side. */
int need_temp;
gfc_symbol *lsym;
- lsym = c->expr->symtree->n.sym;
- need_temp = gfc_check_dependency (c->expr, c->expr2, 0);
+ lsym = c->expr1->symtree->n.sym;
+ need_temp = gfc_check_dependency (c->expr1, c->expr2, 0);
/* Now check for dependencies within the 'variable'
expression itself. These are treated by making a complete
pointer components. We therefore leave these to their
own devices. */
if (lsym->ts.type == BT_DERIVED
- && lsym->ts.derived->attr.pointer_comp)
+ && lsym->ts.u.derived->attr.pointer_comp)
return need_temp;
new_symtree = NULL;
- if (find_forall_index (c->expr, lsym, 2) == SUCCESS)
+ if (find_forall_index (c->expr1, lsym, 2) == SUCCESS)
{
forall_make_variable_temp (c, pre, post);
need_temp = 0;
/* Substrings with dependencies are treated in the same
way. */
- if (c->expr->ts.type == BT_CHARACTER
- && c->expr->ref
+ if (c->expr1->ts.type == BT_CHARACTER
+ && c->expr1->ref
&& c->expr2->expr_type == EXPR_VARIABLE
&& lsym == c->expr2->symtree->n.sym)
{
- for (lref = c->expr->ref; lref; lref = lref->next)
+ for (lref = c->expr1->ref; lref; lref = lref->next)
if (lref->type == REF_SUBSTRING)
break;
for (rref = c->expr2->ref; rref; rref = rref->next)
static void
cleanup_forall_symtrees (gfc_code *c)
{
- forall_restore_symtree (c->expr);
+ forall_restore_symtree (c->expr1);
forall_restore_symtree (c->expr2);
gfc_free (new_symtree->n.sym);
gfc_free (new_symtree);
/* Initialize the mask index outside the FORALL nest. */
if (mask_flag && forall_tmp->mask)
- gfc_add_modify_expr (outer, forall_tmp->maskindex, gfc_index_zero_node);
+ gfc_add_modify (outer, forall_tmp->maskindex, gfc_index_zero_node);
iter = forall_tmp->this_loop;
nvar = forall_tmp->nvar;
gfc_init_block (&block);
/* The exit condition. */
- cond = fold_build2 (LE_EXPR, boolean_type_node,
- count, build_int_cst (TREE_TYPE (count), 0));
+ cond = fold_build2_loc (input_location, LE_EXPR, boolean_type_node,
+ count, build_int_cst (TREE_TYPE (count), 0));
tmp = build1_v (GOTO_EXPR, exit_label);
- tmp = fold_build3 (COND_EXPR, void_type_node,
- cond, tmp, build_empty_stmt ());
+ tmp = fold_build3_loc (input_location, COND_EXPR, void_type_node,
+ cond, tmp, build_empty_stmt (input_location));
gfc_add_expr_to_block (&block, tmp);
/* The main loop body. */
gfc_add_expr_to_block (&block, body);
/* Increment the loop variable. */
- tmp = build2 (PLUS_EXPR, TREE_TYPE (var), var, step);
- gfc_add_modify_expr (&block, var, tmp);
+ tmp = fold_build2_loc (input_location, PLUS_EXPR, TREE_TYPE (var), var,
+ step);
+ gfc_add_modify (&block, var, tmp);
/* Advance to the next mask element. Only do this for the
innermost loop. */
if (n == 0 && mask_flag && forall_tmp->mask)
{
tree maskindex = forall_tmp->maskindex;
- tmp = build2 (PLUS_EXPR, gfc_array_index_type,
- maskindex, gfc_index_one_node);
- gfc_add_modify_expr (&block, maskindex, tmp);
+ tmp = fold_build2_loc (input_location, PLUS_EXPR, gfc_array_index_type,
+ maskindex, gfc_index_one_node);
+ gfc_add_modify (&block, maskindex, tmp);
}
/* Decrement the loop counter. */
- tmp = build2 (MINUS_EXPR, TREE_TYPE (var), count,
- build_int_cst (TREE_TYPE (var), 1));
- gfc_add_modify_expr (&block, count, tmp);
+ tmp = fold_build2_loc (input_location, MINUS_EXPR, TREE_TYPE (var), count,
+ build_int_cst (TREE_TYPE (var), 1));
+ gfc_add_modify (&block, count, tmp);
body = gfc_finish_block (&block);
/* Loop var initialization. */
gfc_init_block (&block);
- gfc_add_modify_expr (&block, var, start);
+ gfc_add_modify (&block, var, start);
/* Initialize the loop counter. */
- tmp = fold_build2 (MINUS_EXPR, TREE_TYPE (var), step, start);
- tmp = fold_build2 (PLUS_EXPR, TREE_TYPE (var), end, tmp);
- tmp = fold_build2 (TRUNC_DIV_EXPR, TREE_TYPE (var), tmp, step);
- gfc_add_modify_expr (&block, count, tmp);
+ tmp = fold_build2_loc (input_location, MINUS_EXPR, TREE_TYPE (var), step,
+ start);
+ tmp = fold_build2_loc (input_location, PLUS_EXPR, TREE_TYPE (var), end,
+ tmp);
+ tmp = fold_build2_loc (input_location, TRUNC_DIV_EXPR, TREE_TYPE (var),
+ tmp, step);
+ gfc_add_modify (&block, count, tmp);
/* The loop expression. */
tmp = build1_v (LOOP_EXPR, body);
if (mask)
{
tmp = gfc_build_array_ref (mask, maskindex, NULL);
- body = build3_v (COND_EXPR, tmp, body, build_empty_stmt ());
+ body = build3_v (COND_EXPR, tmp, body,
+ build_empty_stmt (input_location));
}
}
body = gfc_trans_forall_loop (forall_tmp, body, mask_flag, &header);
tree tmp;
if (INTEGER_CST_P (size))
- {
- tmp = fold_build2 (MINUS_EXPR, gfc_array_index_type, size,
- gfc_index_one_node);
- }
+ tmp = fold_build2_loc (input_location, MINUS_EXPR, gfc_array_index_type,
+ size, gfc_index_one_node);
else
tmp = NULL_TREE;
*pdata = convert (pvoid_type_node, tmpvar);
tmp = gfc_call_malloc (pblock, TREE_TYPE (tmpvar), bytesize);
- gfc_add_modify_expr (pblock, tmpvar, tmp);
+ gfc_add_modify (pblock, tmpvar, tmp);
}
return tmpvar;
}
/* Use the scalar assignment as is. */
gfc_add_block_to_block (&block, &lse.pre);
- gfc_add_modify_expr (&block, lse.expr, tmp);
+ gfc_add_modify (&block, lse.expr, tmp);
gfc_add_block_to_block (&block, &lse.post);
/* Increment the count1. */
- tmp = fold_build2 (PLUS_EXPR, TREE_TYPE (count1), count1,
- gfc_index_one_node);
- gfc_add_modify_expr (&block, count1, tmp);
+ tmp = fold_build2_loc (input_location, PLUS_EXPR, TREE_TYPE (count1),
+ count1, gfc_index_one_node);
+ gfc_add_modify (&block, count1, tmp);
tmp = gfc_finish_block (&block);
}
/* Calculate the bounds of the scalarization. */
gfc_conv_ss_startstride (&loop1);
/* Setup the scalarizing loops. */
- gfc_conv_loop_setup (&loop1);
+ gfc_conv_loop_setup (&loop1, &expr->where);
gfc_mark_ss_chain_used (lss, 1);
/* Use the scalar assignment. */
rse.string_length = lse.string_length;
- tmp = gfc_trans_scalar_assign (&lse, &rse, expr->ts, false, false);
+ tmp = gfc_trans_scalar_assign (&lse, &rse, expr->ts, false, true, true);
/* Form the mask expression according to the mask tree list. */
if (wheremask)
{
wheremaskexpr = gfc_build_array_ref (wheremask, count3, NULL);
if (invert)
- wheremaskexpr = fold_build1 (TRUTH_NOT_EXPR,
- TREE_TYPE (wheremaskexpr),
- wheremaskexpr);
- tmp = fold_build3 (COND_EXPR, void_type_node,
- wheremaskexpr, tmp, build_empty_stmt ());
+ wheremaskexpr = fold_build1_loc (input_location, TRUTH_NOT_EXPR,
+ TREE_TYPE (wheremaskexpr),
+ wheremaskexpr);
+ tmp = fold_build3_loc (input_location, COND_EXPR, void_type_node,
+ wheremaskexpr, tmp,
+ build_empty_stmt (input_location));
}
gfc_add_expr_to_block (&body, tmp);
/* Increment count1. */
- tmp = fold_build2 (PLUS_EXPR, gfc_array_index_type,
- count1, gfc_index_one_node);
- gfc_add_modify_expr (&body, count1, tmp);
+ tmp = fold_build2_loc (input_location, PLUS_EXPR, gfc_array_index_type,
+ count1, gfc_index_one_node);
+ gfc_add_modify (&body, count1, tmp);
/* Increment count3. */
if (count3)
{
- tmp = fold_build2 (PLUS_EXPR, gfc_array_index_type,
- count3, gfc_index_one_node);
- gfc_add_modify_expr (&body, count3, tmp);
+ tmp = fold_build2_loc (input_location, PLUS_EXPR,
+ gfc_array_index_type, count3,
+ gfc_index_one_node);
+ gfc_add_modify (&body, count3, tmp);
}
/* Generate the copying loops. */
gfc_add_ss_to_loop (&loop, rss);
gfc_conv_ss_startstride (&loop);
- gfc_conv_loop_setup (&loop);
+ gfc_conv_loop_setup (&loop, &expr2->where);
gfc_mark_ss_chain_used (rss, 1);
/* Start the loop body. */
/* Use the scalar assignment. */
lse.string_length = rse.string_length;
tmp = gfc_trans_scalar_assign (&lse, &rse, expr2->ts, true,
- expr2->expr_type == EXPR_VARIABLE);
+ expr2->expr_type == EXPR_VARIABLE, true);
/* Form the mask expression according to the mask tree list. */
if (wheremask)
{
wheremaskexpr = gfc_build_array_ref (wheremask, count3, NULL);
if (invert)
- wheremaskexpr = fold_build1 (TRUTH_NOT_EXPR,
- TREE_TYPE (wheremaskexpr),
- wheremaskexpr);
- tmp = fold_build3 (COND_EXPR, void_type_node,
- wheremaskexpr, tmp, build_empty_stmt ());
+ wheremaskexpr = fold_build1_loc (input_location, TRUTH_NOT_EXPR,
+ TREE_TYPE (wheremaskexpr),
+ wheremaskexpr);
+ tmp = fold_build3_loc (input_location, COND_EXPR, void_type_node,
+ wheremaskexpr, tmp,
+ build_empty_stmt (input_location));
}
gfc_add_expr_to_block (&body1, tmp);
gfc_add_block_to_block (&block, &body1);
/* Increment count1. */
- tmp = fold_build2 (PLUS_EXPR, TREE_TYPE (count1), count1,
- gfc_index_one_node);
- gfc_add_modify_expr (&block, count1, tmp);
+ tmp = fold_build2_loc (input_location, PLUS_EXPR, TREE_TYPE (count1),
+ count1, gfc_index_one_node);
+ gfc_add_modify (&block, count1, tmp);
}
else
{
/* Increment count1. */
- tmp = fold_build2 (PLUS_EXPR, gfc_array_index_type,
- count1, gfc_index_one_node);
- gfc_add_modify_expr (&body1, count1, tmp);
+ tmp = fold_build2_loc (input_location, PLUS_EXPR, gfc_array_index_type,
+ count1, gfc_index_one_node);
+ gfc_add_modify (&body1, count1, tmp);
/* Increment count3. */
if (count3)
{
- tmp = fold_build2 (PLUS_EXPR, gfc_array_index_type,
- count3, gfc_index_one_node);
- gfc_add_modify_expr (&body1, count3, tmp);
+ tmp = fold_build2_loc (input_location, PLUS_EXPR,
+ gfc_array_index_type,
+ count3, gfc_index_one_node);
+ gfc_add_modify (&body1, count3, tmp);
}
/* Generate the copying loops. */
loop.array_parameter = 1;
/* Calculate the bounds of the scalarization. */
- save_flag = flag_bounds_check;
- flag_bounds_check = 0;
+ save_flag = gfc_option.rtcheck;
+ gfc_option.rtcheck &= !GFC_RTCHECK_BOUNDS;
gfc_conv_ss_startstride (&loop);
- flag_bounds_check = save_flag;
- gfc_conv_loop_setup (&loop);
+ gfc_option.rtcheck = save_flag;
+ gfc_conv_loop_setup (&loop, &expr2->where);
/* Figure out how many elements we need. */
for (i = 0; i < loop.dimen; i++)
{
- tmp = fold_build2 (MINUS_EXPR, gfc_array_index_type,
- gfc_index_one_node, loop.from[i]);
- tmp = fold_build2 (PLUS_EXPR, gfc_array_index_type,
- tmp, loop.to[i]);
- size = fold_build2 (MULT_EXPR, gfc_array_index_type, size, tmp);
+ tmp = fold_build2_loc (input_location, MINUS_EXPR,
+ gfc_array_index_type,
+ gfc_index_one_node, loop.from[i]);
+ tmp = fold_build2_loc (input_location, PLUS_EXPR,
+ gfc_array_index_type, tmp, loop.to[i]);
+ size = fold_build2_loc (input_location, MULT_EXPR,
+ gfc_array_index_type, size, tmp);
}
gfc_add_block_to_block (pblock, &loop.pre);
size = gfc_evaluate_now (size, pblock);
&& !forall_tmp->mask
&& INTEGER_CST_P (forall_tmp->size))
{
- inner_size = fold_build2 (MULT_EXPR, gfc_array_index_type,
- inner_size, forall_tmp->size);
+ inner_size = fold_build2_loc (input_location, MULT_EXPR,
+ gfc_array_index_type,
+ inner_size, forall_tmp->size);
forall_tmp = forall_tmp->prev_nest;
}
/* Otherwise, create a temporary variable to compute the result. */
number = gfc_create_var (gfc_array_index_type, "num");
- gfc_add_modify_expr (block, number, gfc_index_zero_node);
+ gfc_add_modify (block, number, gfc_index_zero_node);
gfc_start_block (&body);
if (inner_size_body)
gfc_add_block_to_block (&body, inner_size_body);
if (forall_tmp)
- tmp = build2 (PLUS_EXPR, gfc_array_index_type, number,
- inner_size);
+ tmp = fold_build2_loc (input_location, PLUS_EXPR,
+ gfc_array_index_type, number, inner_size);
else
tmp = inner_size;
- gfc_add_modify_expr (&body, number, tmp);
+ gfc_add_modify (&body, number, tmp);
tmp = gfc_finish_block (&body);
/* Generate loops. */
unit = fold_convert (gfc_array_index_type, TYPE_SIZE_UNIT (type));
if (!integer_onep (unit))
- bytesize = fold_build2 (MULT_EXPR, gfc_array_index_type, size, unit);
+ bytesize = fold_build2_loc (input_location, MULT_EXPR,
+ gfc_array_index_type, size, unit);
else
bytesize = size;
tmp = gfc_do_allocate (bytesize, size, ptemp1, block, type);
if (*ptemp1)
- tmp = build_fold_indirect_ref (tmp);
+ tmp = build_fold_indirect_ref_loc (input_location, tmp);
return tmp;
}
if (wheremask)
{
count = gfc_create_var (gfc_array_index_type, "count");
- gfc_add_modify_expr (block, count, gfc_index_zero_node);
+ gfc_add_modify (block, count, gfc_index_zero_node);
}
else
count = NULL;
/* Initialize count1. */
- gfc_add_modify_expr (block, count1, gfc_index_zero_node);
+ gfc_add_modify (block, count1, gfc_index_zero_node);
/* Calculate the size of temporary needed in the assignment. Return loop, lss
and rss which are used in function generate_loop_for_rhs_to_temp(). */
&lss, &rss);
/* The type of LHS. Used in function allocate_temp_for_forall_nest */
- if (expr1->ts.type == BT_CHARACTER && expr1->ts.cl->length)
+ if (expr1->ts.type == BT_CHARACTER && expr1->ts.u.cl->length)
{
- if (!expr1->ts.cl->backend_decl)
+ if (!expr1->ts.u.cl->backend_decl)
{
gfc_se tse;
gfc_init_se (&tse, NULL);
- gfc_conv_expr (&tse, expr1->ts.cl->length);
- expr1->ts.cl->backend_decl = tse.expr;
+ gfc_conv_expr (&tse, expr1->ts.u.cl->length);
+ expr1->ts.u.cl->backend_decl = tse.expr;
}
type = gfc_get_character_type_len (gfc_default_character_kind,
- expr1->ts.cl->backend_decl);
+ expr1->ts.u.cl->backend_decl);
}
else
type = gfc_typenode_for_spec (&expr1->ts);
gfc_add_expr_to_block (block, tmp);
/* Reset count1. */
- gfc_add_modify_expr (block, count1, gfc_index_zero_node);
+ gfc_add_modify (block, count1, gfc_index_zero_node);
/* Reset count. */
if (wheremask)
- gfc_add_modify_expr (block, count, gfc_index_zero_node);
+ gfc_add_modify (block, count, gfc_index_zero_node);
/* Generate codes to copy the temporary to lhs. */
tmp = generate_loop_for_temp_to_lhs (expr1, tmp1, count, count1,
tree tmp, tmp1, ptemp1;
count = gfc_create_var (gfc_array_index_type, "count");
- gfc_add_modify_expr (block, count, gfc_index_zero_node);
+ gfc_add_modify (block, count, gfc_index_zero_node);
inner_size = integer_one_node;
lss = gfc_walk_expr (expr1);
rse.want_pointer = 1;
gfc_conv_expr (&rse, expr2);
gfc_add_block_to_block (&body, &rse.pre);
- gfc_add_modify_expr (&body, lse.expr,
+ gfc_add_modify (&body, lse.expr,
fold_convert (TREE_TYPE (lse.expr), rse.expr));
gfc_add_block_to_block (&body, &rse.post);
/* Increment count. */
- tmp = fold_build2 (PLUS_EXPR, gfc_array_index_type,
- count, gfc_index_one_node);
- gfc_add_modify_expr (&body, count, tmp);
+ tmp = fold_build2_loc (input_location, PLUS_EXPR, gfc_array_index_type,
+ count, gfc_index_one_node);
+ gfc_add_modify (&body, count, tmp);
tmp = gfc_finish_block (&body);
gfc_add_expr_to_block (block, tmp);
/* Reset count. */
- gfc_add_modify_expr (block, count, gfc_index_zero_node);
+ gfc_add_modify (block, count, gfc_index_zero_node);
gfc_start_block (&body);
gfc_init_se (&lse, NULL);
lse.want_pointer = 1;
gfc_conv_expr (&lse, expr1);
gfc_add_block_to_block (&body, &lse.pre);
- gfc_add_modify_expr (&body, lse.expr, rse.expr);
+ gfc_add_modify (&body, lse.expr, rse.expr);
gfc_add_block_to_block (&body, &lse.post);
/* Increment count. */
- tmp = fold_build2 (PLUS_EXPR, gfc_array_index_type,
- count, gfc_index_one_node);
- gfc_add_modify_expr (&body, count, tmp);
+ tmp = fold_build2_loc (input_location, PLUS_EXPR, gfc_array_index_type,
+ count, gfc_index_one_node);
+ gfc_add_modify (&body, count, tmp);
tmp = gfc_finish_block (&body);
/* Generate body and loops according to the information in
/* Setup the scalarizing loops and bounds. */
gfc_conv_ss_startstride (&loop);
- gfc_conv_loop_setup (&loop);
+ gfc_conv_loop_setup (&loop, &expr2->where);
info = &rss->data.info;
desc = info->descriptor;
/* Make a new descriptor. */
parmtype = gfc_get_element_type (TREE_TYPE (desc));
- parmtype = gfc_get_array_type_bounds (parmtype, loop.dimen,
- loop.from, loop.to, 1);
+ parmtype = gfc_get_array_type_bounds (parmtype, loop.dimen, 0,
+ loop.from, loop.to, 1,
+ GFC_ARRAY_UNKNOWN, true);
/* Allocate temporary for nested forall construct. */
tmp1 = allocate_temp_for_forall_nest (nested_forall_info, parmtype,
gfc_add_block_to_block (&body, &lse.post);
/* Increment count. */
- tmp = fold_build2 (PLUS_EXPR, gfc_array_index_type,
- count, gfc_index_one_node);
- gfc_add_modify_expr (&body, count, tmp);
+ tmp = fold_build2_loc (input_location, PLUS_EXPR, gfc_array_index_type,
+ count, gfc_index_one_node);
+ gfc_add_modify (&body, count, tmp);
tmp = gfc_finish_block (&body);
gfc_add_expr_to_block (block, tmp);
/* Reset count. */
- gfc_add_modify_expr (block, count, gfc_index_zero_node);
+ gfc_add_modify (block, count, gfc_index_zero_node);
parm = gfc_build_array_ref (tmp1, count, NULL);
lss = gfc_walk_expr (expr1);
gfc_init_se (&lse, NULL);
gfc_conv_expr_descriptor (&lse, expr1, lss);
- gfc_add_modify_expr (&lse.pre, lse.expr, parm);
+ gfc_add_modify (&lse.pre, lse.expr, parm);
gfc_start_block (&body);
gfc_add_block_to_block (&body, &lse.pre);
gfc_add_block_to_block (&body, &lse.post);
/* Increment count. */
- tmp = fold_build2 (PLUS_EXPR, gfc_array_index_type,
- count, gfc_index_one_node);
- gfc_add_modify_expr (&body, count, tmp);
+ tmp = fold_build2_loc (input_location, PLUS_EXPR, gfc_array_index_type,
+ count, gfc_index_one_node);
+ gfc_add_modify (&body, count, tmp);
tmp = gfc_finish_block (&body);
bool need_mask;
/* Do nothing if the mask is false. */
- if (code->expr
- && code->expr->expr_type == EXPR_CONSTANT
- && !code->expr->value.logical)
- return build_empty_stmt ();
+ if (code->expr1
+ && code->expr1->expr_type == EXPR_CONSTANT
+ && !code->expr1->value.logical)
+ return build_empty_stmt (input_location);
n = 0;
/* Count the FORALL index number. */
for (n = 0; n < nvar; n++)
{
/* size = (end + step - start) / step. */
- tmp = fold_build2 (MINUS_EXPR, TREE_TYPE (start[n]),
- step[n], start[n]);
- tmp = fold_build2 (PLUS_EXPR, TREE_TYPE (end[n]), end[n], tmp);
-
- tmp = fold_build2 (FLOOR_DIV_EXPR, TREE_TYPE (tmp), tmp, step[n]);
+ tmp = fold_build2_loc (input_location, MINUS_EXPR, TREE_TYPE (start[n]),
+ step[n], start[n]);
+ tmp = fold_build2_loc (input_location, PLUS_EXPR, TREE_TYPE (end[n]),
+ end[n], tmp);
+ tmp = fold_build2_loc (input_location, FLOOR_DIV_EXPR, TREE_TYPE (tmp),
+ tmp, step[n]);
tmp = convert (gfc_array_index_type, tmp);
- size = fold_build2 (MULT_EXPR, gfc_array_index_type, size, tmp);
+ size = fold_build2_loc (input_location, MULT_EXPR, gfc_array_index_type,
+ size, tmp);
}
/* Record the nvar and size of current forall level. */
info->nvar = nvar;
info->size = size;
- if (code->expr)
+ if (code->expr1)
{
/* If the mask is .true., consider the FORALL unconditional. */
- if (code->expr->expr_type == EXPR_CONSTANT
- && code->expr->value.logical)
+ if (code->expr1->expr_type == EXPR_CONSTANT
+ && code->expr1->value.logical)
need_mask = false;
else
need_mask = true;
/* As the mask array can be very big, prefer compact boolean types. */
tree mask_type = gfc_get_logical_type (gfc_logical_kinds[0].kind);
- gfc_add_modify_expr (&block, maskindex, gfc_index_zero_node);
+ gfc_add_modify (&block, maskindex, gfc_index_zero_node);
/* Start of mask assignment loop body. */
gfc_start_block (&body);
/* Evaluate the mask expression. */
gfc_init_se (&se, NULL);
- gfc_conv_expr_val (&se, code->expr);
+ gfc_conv_expr_val (&se, code->expr1);
gfc_add_block_to_block (&body, &se.pre);
/* Store the mask. */
se.expr = convert (mask_type, se.expr);
tmp = gfc_build_array_ref (mask, maskindex, NULL);
- gfc_add_modify_expr (&body, tmp, se.expr);
+ gfc_add_modify (&body, tmp, se.expr);
/* Advance to the next mask element. */
- tmp = build2 (PLUS_EXPR, gfc_array_index_type,
- maskindex, gfc_index_one_node);
- gfc_add_modify_expr (&body, maskindex, tmp);
+ tmp = fold_build2_loc (input_location, PLUS_EXPR, gfc_array_index_type,
+ maskindex, gfc_index_one_node);
+ gfc_add_modify (&body, maskindex, tmp);
/* Generate the loops. */
tmp = gfc_finish_block (&body);
/* Temporaries due to array assignment data dependencies introduce
no end of problems. */
if (need_temp)
- gfc_trans_assign_need_temp (c->expr, c->expr2, NULL, false,
+ gfc_trans_assign_need_temp (c->expr1, c->expr2, NULL, false,
nested_forall_info, &block);
else
{
/* Use the normal assignment copying routines. */
- assign = gfc_trans_assignment (c->expr, c->expr2, false);
+ assign = gfc_trans_assignment (c->expr1, c->expr2, false, true);
/* Generate body and loops. */
tmp = gfc_trans_nested_forall_loop (nested_forall_info,
/* Pointer assignment inside FORALL. */
case EXEC_POINTER_ASSIGN:
- need_temp = gfc_check_dependency (c->expr, c->expr2, 0);
+ need_temp = gfc_check_dependency (c->expr1, c->expr2, 0);
if (need_temp)
- gfc_trans_pointer_assign_need_temp (c->expr, c->expr2,
+ gfc_trans_pointer_assign_need_temp (c->expr1, c->expr2,
nested_forall_info, &block);
else
{
/* Use the normal assignment copying routines. */
- assign = gfc_trans_pointer_assignment (c->expr, c->expr2);
+ assign = gfc_trans_pointer_assignment (c->expr1, c->expr2);
/* Generate body and loops. */
tmp = gfc_trans_nested_forall_loop (nested_forall_info,
/* Explicit subroutine calls are prevented by the frontend but interface
assignments can legitimately produce them. */
case EXEC_ASSIGN_CALL:
- assign = gfc_trans_call (c, true);
+ assign = gfc_trans_call (c, true, NULL_TREE, NULL_TREE, false);
tmp = gfc_trans_nested_forall_loop (nested_forall_info, assign, 1);
gfc_add_expr_to_block (&block, tmp);
break;
gfc_free (varexpr);
gfc_free (saved_vars);
+ for (this_forall = info->this_loop; this_forall;)
+ {
+ iter_info *next = this_forall->next;
+ gfc_free (this_forall);
+ this_forall = next;
+ }
+
/* Free the space for this forall_info. */
gfc_free (info);
/* Variable to index the temporary. */
count = gfc_create_var (gfc_array_index_type, "count");
/* Initialize count. */
- gfc_add_modify_expr (block, count, gfc_index_zero_node);
+ gfc_add_modify (block, count, gfc_index_zero_node);
gfc_start_block (&body);
gfc_add_ss_to_loop (&loop, rss);
gfc_conv_ss_startstride (&loop);
- gfc_conv_loop_setup (&loop);
+ gfc_conv_loop_setup (&loop, &me->where);
gfc_mark_ss_chain_used (rss, 1);
/* Start the loop body. */
gfc_add_block_to_block (&body1, &lse.pre);
gfc_add_block_to_block (&body1, &rse.pre);
- gfc_add_modify_expr (&body1, cond, fold_convert (mask_type, rse.expr));
+ gfc_add_modify (&body1, cond, fold_convert (mask_type, rse.expr));
if (mask && (cmask || pmask))
{
tmp = gfc_build_array_ref (mask, count, NULL);
if (invert)
- tmp = fold_build1 (TRUTH_NOT_EXPR, mask_type, tmp);
- gfc_add_modify_expr (&body1, mtmp, tmp);
+ tmp = fold_build1_loc (input_location, TRUTH_NOT_EXPR, mask_type, tmp);
+ gfc_add_modify (&body1, mtmp, tmp);
}
if (cmask)
tmp1 = gfc_build_array_ref (cmask, count, NULL);
tmp = cond;
if (mask)
- tmp = build2 (TRUTH_AND_EXPR, mask_type, mtmp, tmp);
- gfc_add_modify_expr (&body1, tmp1, tmp);
+ tmp = fold_build2_loc (input_location, TRUTH_AND_EXPR, mask_type,
+ mtmp, tmp);
+ gfc_add_modify (&body1, tmp1, tmp);
}
if (pmask)
{
tmp1 = gfc_build_array_ref (pmask, count, NULL);
- tmp = build1 (TRUTH_NOT_EXPR, mask_type, cond);
+ tmp = fold_build1_loc (input_location, TRUTH_NOT_EXPR, mask_type, cond);
if (mask)
- tmp = build2 (TRUTH_AND_EXPR, mask_type, mtmp, tmp);
- gfc_add_modify_expr (&body1, tmp1, tmp);
+ tmp = fold_build2_loc (input_location, TRUTH_AND_EXPR, mask_type, mtmp,
+ tmp);
+ gfc_add_modify (&body1, tmp1, tmp);
}
gfc_add_block_to_block (&body1, &lse.post);
else
{
/* Increment count. */
- tmp1 = fold_build2 (PLUS_EXPR, gfc_array_index_type, count,
- gfc_index_one_node);
- gfc_add_modify_expr (&body1, count, tmp1);
+ tmp1 = fold_build2_loc (input_location, PLUS_EXPR, gfc_array_index_type,
+ count, gfc_index_one_node);
+ gfc_add_modify (&body1, count, tmp1);
/* Generate the copying loops. */
gfc_trans_scalarizing_loops (&loop, &body1);
gfc_trans_where_assign (gfc_expr *expr1, gfc_expr *expr2,
tree mask, bool invert,
tree count1, tree count2,
- gfc_symbol *sym)
+ gfc_code *cnext)
{
gfc_se lse;
gfc_se rse;
stmtblock_t body;
tree index, maskexpr;
+ /* A defined assignment. */
+ if (cnext && cnext->resolved_sym)
+ return gfc_trans_call (cnext, true, mask, count1, invert);
+
#if 0
/* TODO: handle this special case.
Special case a single function returning an array. */
{
/* The rhs is scalar. Add a ss for the expression. */
rss = gfc_get_ss ();
+ rss->where = 1;
rss->next = gfc_ss_terminator;
rss->type = GFC_SS_SCALAR;
rss->expr = expr2;
gfc_conv_resolve_dependencies (&loop, lss_section, rss);
/* Setup the scalarizing loops. */
- gfc_conv_loop_setup (&loop);
+ gfc_conv_loop_setup (&loop, &expr2->where);
/* Setup the gfc_se structures. */
gfc_copy_loopinfo_to_se (&lse, &loop);
/* Translate the expression. */
gfc_conv_expr (&rse, expr2);
if (lss != gfc_ss_terminator && loop.temp_ss != NULL)
- {
- gfc_conv_tmp_array_ref (&lse);
- gfc_advance_se_ss_chain (&lse);
- }
+ gfc_conv_tmp_array_ref (&lse);
else
gfc_conv_expr (&lse, expr1);
index = count1;
maskexpr = gfc_build_array_ref (mask, index, NULL);
if (invert)
- maskexpr = fold_build1 (TRUTH_NOT_EXPR, TREE_TYPE (maskexpr), maskexpr);
+ maskexpr = fold_build1_loc (input_location, TRUTH_NOT_EXPR,
+ TREE_TYPE (maskexpr), maskexpr);
/* Use the scalar assignment as is. */
- if (sym == NULL)
- tmp = gfc_trans_scalar_assign (&lse, &rse, expr1->ts,
- loop.temp_ss != NULL, false);
- else
- tmp = gfc_conv_operator_assign (&lse, &rse, sym);
+ tmp = gfc_trans_scalar_assign (&lse, &rse, expr1->ts,
+ loop.temp_ss != NULL, false, true);
- tmp = build3_v (COND_EXPR, maskexpr, tmp, build_empty_stmt ());
+ tmp = build3_v (COND_EXPR, maskexpr, tmp, build_empty_stmt (input_location));
gfc_add_expr_to_block (&body, tmp);
if (lss == gfc_ss_terminator)
{
/* Increment count1. */
- tmp = fold_build2 (PLUS_EXPR, gfc_array_index_type,
- count1, gfc_index_one_node);
- gfc_add_modify_expr (&body, count1, tmp);
+ tmp = fold_build2_loc (input_location, PLUS_EXPR, gfc_array_index_type,
+ count1, gfc_index_one_node);
+ gfc_add_modify (&body, count1, tmp);
/* Use the scalar assignment as is. */
gfc_add_block_to_block (&block, &body);
{
/* Increment count1 before finish the main body of a scalarized
expression. */
- tmp = fold_build2 (PLUS_EXPR, gfc_array_index_type,
- count1, gfc_index_one_node);
- gfc_add_modify_expr (&body, count1, tmp);
+ tmp = fold_build2_loc (input_location, PLUS_EXPR,
+ gfc_array_index_type, count1, gfc_index_one_node);
+ gfc_add_modify (&body, count1, tmp);
gfc_trans_scalarized_loop_boundary (&loop, &body);
/* We need to copy the temporary to the actual lhs. */
lse.ss = lss;
gfc_conv_tmp_array_ref (&rse);
- gfc_advance_se_ss_chain (&rse);
gfc_conv_expr (&lse, expr1);
gcc_assert (lse.ss == gfc_ss_terminator
index = count2;
maskexpr = gfc_build_array_ref (mask, index, NULL);
if (invert)
- maskexpr = fold_build1 (TRUTH_NOT_EXPR, TREE_TYPE (maskexpr),
- maskexpr);
+ maskexpr = fold_build1_loc (input_location, TRUTH_NOT_EXPR,
+ TREE_TYPE (maskexpr), maskexpr);
/* Use the scalar assignment as is. */
- tmp = gfc_trans_scalar_assign (&lse, &rse, expr1->ts, false, false);
- tmp = build3_v (COND_EXPR, maskexpr, tmp, build_empty_stmt ());
+ tmp = gfc_trans_scalar_assign (&lse, &rse, expr1->ts, false, false,
+ true);
+ tmp = build3_v (COND_EXPR, maskexpr, tmp,
+ build_empty_stmt (input_location));
gfc_add_expr_to_block (&body, tmp);
/* Increment count2. */
- tmp = fold_build2 (PLUS_EXPR, gfc_array_index_type,
- count2, gfc_index_one_node);
- gfc_add_modify_expr (&body, count2, tmp);
+ tmp = fold_build2_loc (input_location, PLUS_EXPR,
+ gfc_array_index_type, count2,
+ gfc_index_one_node);
+ gfc_add_modify (&body, count2, tmp);
}
else
{
/* Increment count1. */
- tmp = fold_build2 (PLUS_EXPR, gfc_array_index_type,
- count1, gfc_index_one_node);
- gfc_add_modify_expr (&body, count1, tmp);
+ tmp = fold_build2_loc (input_location, PLUS_EXPR,
+ gfc_array_index_type, count1,
+ gfc_index_one_node);
+ gfc_add_modify (&body, count1, tmp);
}
/* Generate the copying loops. */
gfc_code *cblock;
gfc_code *cnext;
tree tmp;
+ tree cond;
tree count1, count2;
bool need_cmask;
bool need_pmask;
/* Two clauses, the first empty, the second non-empty. */
else if (mask)
{
- need_cmask = (cblock->block->expr != 0);
+ need_cmask = (cblock->block->expr1 != 0);
need_pmask = true;
}
else
{
/* Calculate the size of temporary needed by the mask-expr. */
gfc_init_block (&inner_size_body);
- inner_size = compute_inner_temp_size (cblock->expr, cblock->expr,
+ inner_size = compute_inner_temp_size (cblock->expr1, cblock->expr1,
&inner_size_body, &lss, &rss);
+ gfc_free_ss_chain (lss);
+ gfc_free_ss_chain (rss);
+
/* Calculate the total size of temporary needed. */
size = compute_overall_iter_number (nested_forall_info, inner_size,
&inner_size_body, block);
+ /* Check whether the size is negative. */
+ cond = fold_build2_loc (input_location, LE_EXPR, boolean_type_node, size,
+ gfc_index_zero_node);
+ size = fold_build3_loc (input_location, COND_EXPR, gfc_array_index_type,
+ cond, gfc_index_zero_node, size);
+ size = gfc_evaluate_now (size, block);
+
/* Allocate temporary for WHERE mask if needed. */
if (need_cmask)
cmask = allocate_temp_for_forall_nest_1 (mask_type, size, block,
bottom of the loop. */
/* Has mask-expr. */
- if (cblock->expr)
+ if (cblock->expr1)
{
/* Ensure that the WHERE mask will be evaluated exactly once.
If there are no statements in this WHERE/ELSEWHERE clause,
If this is the last clause of the WHERE construct, then
we don't need to update the pending control mask (pmask). */
if (mask)
- gfc_evaluate_where_mask (cblock->expr, nested_forall_info,
+ gfc_evaluate_where_mask (cblock->expr1, nested_forall_info,
mask, invert,
cblock->next ? cmask : NULL_TREE,
cblock->block ? pmask : NULL_TREE,
mask_type, block);
else
- gfc_evaluate_where_mask (cblock->expr, nested_forall_info,
+ gfc_evaluate_where_mask (cblock->expr1, nested_forall_info,
NULL_TREE, false,
(cblock->next || cblock->block)
? cmask : NULL_TREE,
goto evaluate;
case EXEC_ASSIGN:
- expr1 = cnext->expr;
+ expr1 = cnext->expr1;
expr2 = cnext->expr2;
evaluate:
if (nested_forall_info != NULL)
/* Variables to control maskexpr. */
count1 = gfc_create_var (gfc_array_index_type, "count1");
count2 = gfc_create_var (gfc_array_index_type, "count2");
- gfc_add_modify_expr (block, count1, gfc_index_zero_node);
- gfc_add_modify_expr (block, count2, gfc_index_zero_node);
+ gfc_add_modify (block, count1, gfc_index_zero_node);
+ gfc_add_modify (block, count2, gfc_index_zero_node);
tmp = gfc_trans_where_assign (expr1, expr2,
cmask, invert,
count1, count2,
- cnext->resolved_sym);
+ cnext);
tmp = gfc_trans_nested_forall_loop (nested_forall_info,
tmp, 1);
/* Variables to control maskexpr. */
count1 = gfc_create_var (gfc_array_index_type, "count1");
count2 = gfc_create_var (gfc_array_index_type, "count2");
- gfc_add_modify_expr (block, count1, gfc_index_zero_node);
- gfc_add_modify_expr (block, count2, gfc_index_zero_node);
+ gfc_add_modify (block, count1, gfc_index_zero_node);
+ gfc_add_modify (block, count2, gfc_index_zero_node);
tmp = gfc_trans_where_assign (expr1, expr2,
cmask, invert,
count1, count2,
- cnext->resolved_sym);
+ cnext);
gfc_add_expr_to_block (block, tmp);
}
gfc_ss *edss = 0;
gfc_ss *esss = 0;
- cond = cblock->expr;
- tdst = cblock->next->expr;
+ /* Allow the scalarizer to workshare simple where loops. */
+ if (ompws_flags & OMPWS_WORKSHARE_FLAG)
+ ompws_flags |= OMPWS_SCALARIZER_WS;
+
+ cond = cblock->expr1;
+ tdst = cblock->next->expr1;
tsrc = cblock->next->expr2;
- edst = eblock ? eblock->next->expr : NULL;
+ edst = eblock ? eblock->next->expr1 : NULL;
esrc = eblock ? eblock->next->expr2 : NULL;
gfc_start_block (&block);
if (tsss == gfc_ss_terminator)
{
tsss = gfc_get_ss ();
+ tsss->where = 1;
tsss->next = gfc_ss_terminator;
tsss->type = GFC_SS_SCALAR;
tsss->expr = tsrc;
if (esss == gfc_ss_terminator)
{
esss = gfc_get_ss ();
+ esss->where = 1;
esss->next = gfc_ss_terminator;
esss->type = GFC_SS_SCALAR;
esss->expr = esrc;
}
gfc_conv_ss_startstride (&loop);
- gfc_conv_loop_setup (&loop);
+ gfc_conv_loop_setup (&loop, &tdst->where);
gfc_mark_ss_chain_used (css, 1);
gfc_mark_ss_chain_used (tdss, 1);
gfc_conv_expr (&tsse, tsrc);
if (tdss != gfc_ss_terminator && loop.temp_ss != NULL)
- {
- gfc_conv_tmp_array_ref (&tdse);
- gfc_advance_se_ss_chain (&tdse);
- }
+ gfc_conv_tmp_array_ref (&tdse);
else
gfc_conv_expr (&tdse, tdst);
{
gfc_conv_expr (&esse, esrc);
if (edss != gfc_ss_terminator && loop.temp_ss != NULL)
- {
- gfc_conv_tmp_array_ref (&edse);
- gfc_advance_se_ss_chain (&edse);
- }
+ gfc_conv_tmp_array_ref (&edse);
else
- gfc_conv_expr (&edse, edst);
+ gfc_conv_expr (&edse, edst);
}
- tstmt = gfc_trans_scalar_assign (&tdse, &tsse, tdst->ts, false, false);
- estmt = eblock ? gfc_trans_scalar_assign (&edse, &esse, edst->ts, false, false)
- : build_empty_stmt ();
+ tstmt = gfc_trans_scalar_assign (&tdse, &tsse, tdst->ts, false, false, true);
+ estmt = eblock ? gfc_trans_scalar_assign (&edse, &esse, edst->ts, false,
+ false, true)
+ : build_empty_stmt (input_location);
tmp = build3_v (COND_EXPR, cexpr, tstmt, estmt);
gfc_add_expr_to_block (&body, tmp);
gfc_add_block_to_block (&body, &cse.post);
/* A simple "WHERE (cond) x = y" statement or block is
dependence free if cond is not dependent upon writing x,
and the source y is unaffected by the destination x. */
- if (!gfc_check_dependency (cblock->next->expr,
- cblock->expr, 0)
- && !gfc_check_dependency (cblock->next->expr,
+ if (!gfc_check_dependency (cblock->next->expr1,
+ cblock->expr1, 0)
+ && !gfc_check_dependency (cblock->next->expr1,
cblock->next->expr2, 0))
return gfc_trans_where_3 (cblock, NULL);
}
- else if (!eblock->expr
+ else if (!eblock->expr1
&& !eblock->block
&& eblock->next
&& eblock->next->op == EXEC_ASSIGN
block is dependence free if cond is not dependent on writes
to x1 and x2, y1 is not dependent on writes to x2, and y2
is not dependent on writes to x1, and both y's are not
- dependent upon their own x's. */
- if (!gfc_check_dependency(cblock->next->expr,
- cblock->expr, 0)
- && !gfc_check_dependency(eblock->next->expr,
- cblock->expr, 0)
- && !gfc_check_dependency(cblock->next->expr,
- eblock->next->expr2, 0)
- && !gfc_check_dependency(eblock->next->expr,
- cblock->next->expr2, 0)
- && !gfc_check_dependency(cblock->next->expr,
- cblock->next->expr2, 0)
- && !gfc_check_dependency(eblock->next->expr,
- eblock->next->expr2, 0))
+ dependent upon their own x's. In addition to this, the
+ final two dependency checks below exclude all but the same
+ array reference if the where and elswhere destinations
+ are the same. In short, this is VERY conservative and this
+ is needed because the two loops, required by the standard
+ are coalesced in gfc_trans_where_3. */
+ if (!gfc_check_dependency(cblock->next->expr1,
+ cblock->expr1, 0)
+ && !gfc_check_dependency(eblock->next->expr1,
+ cblock->expr1, 0)
+ && !gfc_check_dependency(cblock->next->expr1,
+ eblock->next->expr2, 1)
+ && !gfc_check_dependency(eblock->next->expr1,
+ cblock->next->expr2, 1)
+ && !gfc_check_dependency(cblock->next->expr1,
+ cblock->next->expr2, 1)
+ && !gfc_check_dependency(eblock->next->expr1,
+ eblock->next->expr2, 1)
+ && !gfc_check_dependency(cblock->next->expr1,
+ eblock->next->expr1, 0)
+ && !gfc_check_dependency(eblock->next->expr1,
+ cblock->next->expr1, 0))
return gfc_trans_where_3 (cblock, eblock);
}
}
{
tree cycle_label;
- cycle_label = TREE_PURPOSE (code->ext.whichloop->backend_decl);
+ cycle_label = code->ext.which_construct->cycle_label;
+ gcc_assert (cycle_label);
+
TREE_USED (cycle_label) = 1;
return build1_v (GOTO_EXPR, cycle_label);
}
{
tree exit_label;
- exit_label = TREE_VALUE (code->ext.whichloop->backend_decl);
+ exit_label = code->ext.which_construct->exit_label;
+ gcc_assert (exit_label);
+
TREE_USED (exit_label) = 1;
return build1_v (GOTO_EXPR, exit_label);
}
tree stat;
tree pstat;
tree error_label;
+ tree memsz;
stmtblock_t block;
- if (!code->ext.alloc_list)
+ if (!code->ext.alloc.list)
return NULL_TREE;
+ pstat = stat = error_label = tmp = memsz = NULL_TREE;
+
gfc_start_block (&block);
- if (code->expr)
+ /* Either STAT= and/or ERRMSG is present. */
+ if (code->expr1 || code->expr2)
{
tree gfc_int4_type_node = gfc_get_int_type (4);
stat = gfc_create_var (gfc_int4_type_node, "stat");
- pstat = build_fold_addr_expr (stat);
+ pstat = gfc_build_addr_expr (NULL_TREE, stat);
error_label = gfc_build_label_decl (NULL_TREE);
TREE_USED (error_label) = 1;
}
- else
- pstat = stat = error_label = NULL_TREE;
- for (al = code->ext.alloc_list; al != NULL; al = al->next)
+ for (al = code->ext.alloc.list; al != NULL; al = al->next)
{
- expr = al->expr;
+ expr = gfc_copy_expr (al->expr);
+
+ if (expr->ts.type == BT_CLASS)
+ gfc_add_data_component (expr);
gfc_init_se (&se, NULL);
gfc_start_block (&se.pre);
if (!gfc_array_allocate (&se, expr, pstat))
{
/* A scalar or derived type. */
- tmp = TYPE_SIZE_UNIT (TREE_TYPE (TREE_TYPE (se.expr)));
- if (expr->ts.type == BT_CHARACTER && tmp == NULL_TREE)
- tmp = se.string_length;
-
- tmp = gfc_allocate_with_status (&se.pre, tmp, pstat);
- tmp = build2 (MODIFY_EXPR, void_type_node, se.expr,
- fold_convert (TREE_TYPE (se.expr), tmp));
+ /* Determine allocate size. */
+ if (al->expr->ts.type == BT_CLASS && code->expr3)
+ {
+ if (code->expr3->ts.type == BT_CLASS)
+ {
+ gfc_expr *sz;
+ gfc_se se_sz;
+ sz = gfc_copy_expr (code->expr3);
+ gfc_add_vptr_component (sz);
+ gfc_add_size_component (sz);
+ gfc_init_se (&se_sz, NULL);
+ gfc_conv_expr (&se_sz, sz);
+ gfc_free_expr (sz);
+ memsz = se_sz.expr;
+ }
+ else
+ memsz = TYPE_SIZE_UNIT (gfc_typenode_for_spec (&code->expr3->ts));
+ }
+ else if (code->ext.alloc.ts.type != BT_UNKNOWN)
+ memsz = TYPE_SIZE_UNIT (gfc_typenode_for_spec (&code->ext.alloc.ts));
+ else
+ memsz = TYPE_SIZE_UNIT (TREE_TYPE (TREE_TYPE (se.expr)));
+
+ if (expr->ts.type == BT_CHARACTER && memsz == NULL_TREE)
+ memsz = se.string_length;
+
+ /* Allocate - for non-pointers with re-alloc checking. */
+ {
+ gfc_ref *ref;
+ bool allocatable;
+
+ ref = expr->ref;
+
+ /* Find the last reference in the chain. */
+ while (ref && ref->next != NULL)
+ {
+ gcc_assert (ref->type != REF_ARRAY || ref->u.ar.type == AR_ELEMENT);
+ ref = ref->next;
+ }
+
+ if (!ref)
+ allocatable = expr->symtree->n.sym->attr.allocatable;
+ else
+ allocatable = ref->u.c.component->attr.allocatable;
+
+ if (allocatable)
+ tmp = gfc_allocate_array_with_status (&se.pre, se.expr, memsz,
+ pstat, expr);
+ else
+ tmp = gfc_allocate_with_status (&se.pre, memsz, pstat);
+ }
+
+ tmp = fold_build2_loc (input_location, MODIFY_EXPR, void_type_node,
+ se.expr,
+ fold_convert (TREE_TYPE (se.expr), tmp));
gfc_add_expr_to_block (&se.pre, tmp);
- if (code->expr)
+ if (code->expr1 || code->expr2)
{
tmp = build1_v (GOTO_EXPR, error_label);
- parm = fold_build2 (NE_EXPR, boolean_type_node,
- stat, build_int_cst (TREE_TYPE (stat), 0));
- tmp = fold_build3 (COND_EXPR, void_type_node,
- parm, tmp, build_empty_stmt ());
+ parm = fold_build2_loc (input_location, NE_EXPR,
+ boolean_type_node, stat,
+ build_int_cst (TREE_TYPE (stat), 0));
+ tmp = fold_build3_loc (input_location, COND_EXPR, void_type_node,
+ parm, tmp,
+ build_empty_stmt (input_location));
gfc_add_expr_to_block (&se.pre, tmp);
}
- if (expr->ts.type == BT_DERIVED && expr->ts.derived->attr.alloc_comp)
+ if (expr->ts.type == BT_DERIVED && expr->ts.u.derived->attr.alloc_comp)
{
- tmp = build_fold_indirect_ref (se.expr);
- tmp = gfc_nullify_alloc_comp (expr->ts.derived, tmp, 0);
+ tmp = build_fold_indirect_ref_loc (input_location, se.expr);
+ tmp = gfc_nullify_alloc_comp (expr->ts.u.derived, tmp, 0);
gfc_add_expr_to_block (&se.pre, tmp);
}
tmp = gfc_finish_block (&se.pre);
gfc_add_expr_to_block (&block, tmp);
+
+ if (code->expr3 && !code->expr3->mold)
+ {
+ /* Initialization via SOURCE block
+ (or static default initializer). */
+ gfc_expr *rhs = gfc_copy_expr (code->expr3);
+ if (al->expr->ts.type == BT_CLASS)
+ {
+ gfc_se call;
+ gfc_actual_arglist *actual;
+ gfc_expr *ppc;
+ gfc_init_se (&call, NULL);
+ /* Do a polymorphic deep copy. */
+ actual = gfc_get_actual_arglist ();
+ actual->expr = gfc_copy_expr (rhs);
+ if (rhs->ts.type == BT_CLASS)
+ gfc_add_data_component (actual->expr);
+ actual->next = gfc_get_actual_arglist ();
+ actual->next->expr = gfc_copy_expr (al->expr);
+ gfc_add_data_component (actual->next->expr);
+ if (rhs->ts.type == BT_CLASS)
+ {
+ ppc = gfc_copy_expr (rhs);
+ gfc_add_vptr_component (ppc);
+ }
+ else
+ ppc = gfc_lval_expr_from_sym (gfc_find_derived_vtab (rhs->ts.u.derived));
+ gfc_add_component_ref (ppc, "_copy");
+ gfc_conv_procedure_call (&call, ppc->symtree->n.sym, actual,
+ ppc, NULL);
+ gfc_add_expr_to_block (&call.pre, call.expr);
+ gfc_add_block_to_block (&call.pre, &call.post);
+ tmp = gfc_finish_block (&call.pre);
+ }
+ else
+ tmp = gfc_trans_assignment (gfc_expr_to_initialize (expr),
+ rhs, false, false);
+ gfc_free_expr (rhs);
+ gfc_add_expr_to_block (&block, tmp);
+ }
+ else if (code->expr3 && code->expr3->mold
+ && code->expr3->ts.type == BT_CLASS)
+ {
+ /* Default-initialization via MOLD (polymorphic). */
+ gfc_expr *rhs = gfc_copy_expr (code->expr3);
+ gfc_se dst,src;
+ gfc_add_vptr_component (rhs);
+ gfc_add_def_init_component (rhs);
+ gfc_init_se (&dst, NULL);
+ gfc_init_se (&src, NULL);
+ gfc_conv_expr (&dst, expr);
+ gfc_conv_expr (&src, rhs);
+ gfc_add_block_to_block (&block, &src.pre);
+ tmp = gfc_build_memcpy_call (dst.expr, src.expr, memsz);
+ gfc_add_expr_to_block (&block, tmp);
+ gfc_free_expr (rhs);
+ }
+
+ /* Allocation of CLASS entities. */
+ gfc_free_expr (expr);
+ expr = al->expr;
+ if (expr->ts.type == BT_CLASS)
+ {
+ gfc_expr *lhs,*rhs;
+ gfc_se lse;
+
+ /* Initialize VPTR for CLASS objects. */
+ lhs = gfc_expr_to_initialize (expr);
+ gfc_add_vptr_component (lhs);
+ rhs = NULL;
+ if (code->expr3 && code->expr3->ts.type == BT_CLASS)
+ {
+ /* Polymorphic SOURCE: VPTR must be determined at run time. */
+ rhs = gfc_copy_expr (code->expr3);
+ gfc_add_vptr_component (rhs);
+ tmp = gfc_trans_pointer_assignment (lhs, rhs);
+ gfc_add_expr_to_block (&block, tmp);
+ gfc_free_expr (rhs);
+ }
+ else
+ {
+ /* VPTR is fixed at compile time. */
+ gfc_symbol *vtab;
+ gfc_typespec *ts;
+ if (code->expr3)
+ ts = &code->expr3->ts;
+ else if (expr->ts.type == BT_DERIVED)
+ ts = &expr->ts;
+ else if (code->ext.alloc.ts.type == BT_DERIVED)
+ ts = &code->ext.alloc.ts;
+ else if (expr->ts.type == BT_CLASS)
+ ts = &CLASS_DATA (expr)->ts;
+ else
+ ts = &expr->ts;
+
+ if (ts->type == BT_DERIVED)
+ {
+ vtab = gfc_find_derived_vtab (ts->u.derived);
+ gcc_assert (vtab);
+ gfc_init_se (&lse, NULL);
+ lse.want_pointer = 1;
+ gfc_conv_expr (&lse, lhs);
+ tmp = gfc_build_addr_expr (NULL_TREE,
+ gfc_get_symbol_decl (vtab));
+ gfc_add_modify (&block, lse.expr,
+ fold_convert (TREE_TYPE (lse.expr), tmp));
+ }
+ }
+ gfc_free_expr (lhs);
+ }
+
}
- /* Assign the value to the status variable. */
- if (code->expr)
+ /* STAT block. */
+ if (code->expr1)
{
tmp = build1_v (LABEL_EXPR, error_label);
gfc_add_expr_to_block (&block, tmp);
gfc_init_se (&se, NULL);
- gfc_conv_expr_lhs (&se, code->expr);
+ gfc_conv_expr_lhs (&se, code->expr1);
tmp = convert (TREE_TYPE (se.expr), stat);
- gfc_add_modify_expr (&block, se.expr, tmp);
+ gfc_add_modify (&block, se.expr, tmp);
+ }
+
+ /* ERRMSG block. */
+ if (code->expr2)
+ {
+ /* A better error message may be possible, but not required. */
+ const char *msg = "Attempt to allocate an allocated object";
+ tree errmsg, slen, dlen;
+
+ gfc_init_se (&se, NULL);
+ gfc_conv_expr_lhs (&se, code->expr2);
+
+ errmsg = gfc_create_var (pchar_type_node, "ERRMSG");
+
+ gfc_add_modify (&block, errmsg,
+ gfc_build_addr_expr (pchar_type_node,
+ gfc_build_localized_cstring_const (msg)));
+
+ slen = build_int_cst (gfc_charlen_type_node, ((int) strlen (msg)));
+ dlen = gfc_get_expr_charlen (code->expr2);
+ slen = fold_build2_loc (input_location, MIN_EXPR, TREE_TYPE (slen), dlen,
+ slen);
+
+ dlen = build_call_expr_loc (input_location,
+ built_in_decls[BUILT_IN_MEMCPY], 3,
+ gfc_build_addr_expr (pvoid_type_node, se.expr), errmsg, slen);
+
+ tmp = fold_build2_loc (input_location, NE_EXPR, boolean_type_node, stat,
+ build_int_cst (TREE_TYPE (stat), 0));
+
+ tmp = build3_v (COND_EXPR, tmp, dlen, build_empty_stmt (input_location));
+
+ gfc_add_expr_to_block (&block, tmp);
}
return gfc_finish_block (&block);
}
-/* Translate a DEALLOCATE statement.
- There are two cases within the for loop:
- (1) deallocate(a1, a2, a3) is translated into the following sequence
- _gfortran_deallocate(a1, 0B)
- _gfortran_deallocate(a2, 0B)
- _gfortran_deallocate(a3, 0B)
- where the STAT= variable is passed a NULL pointer.
- (2) deallocate(a1, a2, a3, stat=i) is translated into the following
- astat = 0
- _gfortran_deallocate(a1, &stat)
- astat = astat + stat
- _gfortran_deallocate(a2, &stat)
- astat = astat + stat
- _gfortran_deallocate(a3, &stat)
- astat = astat + stat
- In case (1), we simply return at the end of the for loop. In case (2)
- we set STAT= astat. */
+/* Translate a DEALLOCATE statement. */
+
tree
-gfc_trans_deallocate (gfc_code * code)
+gfc_trans_deallocate (gfc_code *code)
{
gfc_se se;
gfc_alloc *al;
- gfc_expr *expr;
tree apstat, astat, pstat, stat, tmp;
stmtblock_t block;
+ pstat = apstat = stat = astat = tmp = NULL_TREE;
+
gfc_start_block (&block);
- /* Set up the optional STAT= */
- if (code->expr)
+ /* Count the number of failed deallocations. If deallocate() was
+ called with STAT= , then set STAT to the count. If deallocate
+ was called with ERRMSG, then set ERRMG to a string. */
+ if (code->expr1 || code->expr2)
{
tree gfc_int4_type_node = gfc_get_int_type (4);
- /* Variable used with the library call. */
stat = gfc_create_var (gfc_int4_type_node, "stat");
- pstat = build_fold_addr_expr (stat);
+ pstat = gfc_build_addr_expr (NULL_TREE, stat);
/* Running total of possible deallocation failures. */
astat = gfc_create_var (gfc_int4_type_node, "astat");
- apstat = build_fold_addr_expr (astat);
+ apstat = gfc_build_addr_expr (NULL_TREE, astat);
/* Initialize astat to 0. */
- gfc_add_modify_expr (&block, astat, build_int_cst (TREE_TYPE (astat), 0));
+ gfc_add_modify (&block, astat, build_int_cst (TREE_TYPE (astat), 0));
}
- else
- pstat = apstat = stat = astat = NULL_TREE;
- for (al = code->ext.alloc_list; al != NULL; al = al->next)
+ for (al = code->ext.alloc.list; al != NULL; al = al->next)
{
- expr = al->expr;
+ gfc_expr *expr = gfc_copy_expr (al->expr);
gcc_assert (expr->expr_type == EXPR_VARIABLE);
+ if (expr->ts.type == BT_CLASS)
+ gfc_add_data_component (expr);
+
gfc_init_se (&se, NULL);
gfc_start_block (&se.pre);
se.descriptor_only = 1;
gfc_conv_expr (&se, expr);
- if (expr->ts.type == BT_DERIVED
- && expr->ts.derived->attr.alloc_comp)
- {
- gfc_ref *ref;
- gfc_ref *last = NULL;
- for (ref = expr->ref; ref; ref = ref->next)
- if (ref->type == REF_COMPONENT)
- last = ref;
-
- /* Do not deallocate the components of a derived type
- ultimate pointer component. */
- if (!(last && last->u.c.component->pointer)
- && !(!last && expr->symtree->n.sym->attr.pointer))
+ if (expr->rank)
+ {
+ if (expr->ts.type == BT_DERIVED && expr->ts.u.derived->attr.alloc_comp)
{
- tmp = gfc_deallocate_alloc_comp (expr->ts.derived, se.expr,
- expr->rank);
- gfc_add_expr_to_block (&se.pre, tmp);
+ gfc_ref *ref;
+ gfc_ref *last = NULL;
+ for (ref = expr->ref; ref; ref = ref->next)
+ if (ref->type == REF_COMPONENT)
+ last = ref;
+
+ /* Do not deallocate the components of a derived type
+ ultimate pointer component. */
+ if (!(last && last->u.c.component->attr.pointer)
+ && !(!last && expr->symtree->n.sym->attr.pointer))
+ {
+ tmp = gfc_deallocate_alloc_comp (expr->ts.u.derived, se.expr,
+ expr->rank);
+ gfc_add_expr_to_block (&se.pre, tmp);
+ }
}
+ tmp = gfc_array_deallocate (se.expr, pstat, expr);
+ gfc_add_expr_to_block (&se.pre, tmp);
}
-
- if (expr->rank)
- tmp = gfc_array_deallocate (se.expr, pstat);
else
{
- tmp = gfc_deallocate_with_status (se.expr, pstat, false);
+ tmp = gfc_deallocate_scalar_with_status (se.expr, pstat, false,
+ expr, expr->ts);
gfc_add_expr_to_block (&se.pre, tmp);
- tmp = build2 (MODIFY_EXPR, void_type_node,
- se.expr, build_int_cst (TREE_TYPE (se.expr), 0));
+ /* Set to zero after deallocation. */
+ tmp = fold_build2_loc (input_location, MODIFY_EXPR, void_type_node,
+ se.expr,
+ build_int_cst (TREE_TYPE (se.expr), 0));
+ gfc_add_expr_to_block (&se.pre, tmp);
+
+ if (al->expr->ts.type == BT_CLASS)
+ {
+ /* Reset _vptr component to declared type. */
+ gfc_expr *rhs, *lhs = gfc_copy_expr (al->expr);
+ gfc_symbol *vtab = gfc_find_derived_vtab (al->expr->ts.u.derived);
+ gfc_add_vptr_component (lhs);
+ rhs = gfc_lval_expr_from_sym (vtab);
+ tmp = gfc_trans_pointer_assignment (lhs, rhs);
+ gfc_add_expr_to_block (&se.pre, tmp);
+ gfc_free_expr (lhs);
+ gfc_free_expr (rhs);
+ }
}
- gfc_add_expr_to_block (&se.pre, tmp);
-
/* Keep track of the number of failed deallocations by adding stat
of the last deallocation to the running total. */
- if (code->expr)
+ if (code->expr1 || code->expr2)
{
- apstat = build2 (PLUS_EXPR, TREE_TYPE (stat), astat, stat);
- gfc_add_modify_expr (&se.pre, astat, apstat);
+ apstat = fold_build2_loc (input_location, PLUS_EXPR,
+ TREE_TYPE (stat), astat, stat);
+ gfc_add_modify (&se.pre, astat, apstat);
}
tmp = gfc_finish_block (&se.pre);
gfc_add_expr_to_block (&block, tmp);
-
+ gfc_free_expr (expr);
}
- /* Assign the value to the status variable. */
- if (code->expr)
+ /* Set STAT. */
+ if (code->expr1)
{
gfc_init_se (&se, NULL);
- gfc_conv_expr_lhs (&se, code->expr);
+ gfc_conv_expr_lhs (&se, code->expr1);
tmp = convert (TREE_TYPE (se.expr), astat);
- gfc_add_modify_expr (&block, se.expr, tmp);
+ gfc_add_modify (&block, se.expr, tmp);
+ }
+
+ /* Set ERRMSG. */
+ if (code->expr2)
+ {
+ /* A better error message may be possible, but not required. */
+ const char *msg = "Attempt to deallocate an unallocated object";
+ tree errmsg, slen, dlen;
+
+ gfc_init_se (&se, NULL);
+ gfc_conv_expr_lhs (&se, code->expr2);
+
+ errmsg = gfc_create_var (pchar_type_node, "ERRMSG");
+
+ gfc_add_modify (&block, errmsg,
+ gfc_build_addr_expr (pchar_type_node,
+ gfc_build_localized_cstring_const (msg)));
+
+ slen = build_int_cst (gfc_charlen_type_node, ((int) strlen (msg)));
+ dlen = gfc_get_expr_charlen (code->expr2);
+ slen = fold_build2_loc (input_location, MIN_EXPR, TREE_TYPE (slen), dlen,
+ slen);
+
+ dlen = build_call_expr_loc (input_location,
+ built_in_decls[BUILT_IN_MEMCPY], 3,
+ gfc_build_addr_expr (pvoid_type_node, se.expr), errmsg, slen);
+
+ tmp = fold_build2_loc (input_location, NE_EXPR, boolean_type_node, astat,
+ build_int_cst (TREE_TYPE (astat), 0));
+
+ tmp = build3_v (COND_EXPR, tmp, dlen, build_empty_stmt (input_location));
+
+ gfc_add_expr_to_block (&block, tmp);
}
return gfc_finish_block (&block);
}
+#include "gt-fortran-trans-stmt.h"