/* Statement translation -- generate GCC trees from gfc_code.
- Copyright (C) 2002, 2003, 2004 Free Software Foundation, Inc.
+ Copyright (C) 2002, 2003, 2004, 2005, 2006, 2007
+ Free Software Foundation, Inc.
Contributed by Paul Brook <paul@nowt.org>
and Steven Bosscher <s.bosscher@student.tudelft.nl>
You should have received a copy of the GNU General Public License
along with GCC; see the file COPYING. If not, write to the Free
-Software Foundation, 59 Temple Place - Suite 330, Boston, MA
-02111-1307, USA. */
+Software Foundation, 51 Franklin Street, Fifth Floor, Boston, MA
+02110-1301, USA. */
#include "config.h"
#include "coretypes.h"
#include "tree.h"
#include "tree-gimple.h"
-#include <stdio.h>
#include "ggc.h"
#include "toplev.h"
#include "real.h"
-#include <assert.h>
-#include <gmp.h>
#include "gfortran.h"
+#include "flags.h"
#include "trans.h"
#include "trans-stmt.h"
#include "trans-types.h"
#include "trans-array.h"
#include "trans-const.h"
#include "arith.h"
-
-int has_alternate_specifier;
+#include "dependency.h"
typedef struct iter_info
{
}
iter_info;
-typedef struct temporary_list
-{
- tree temporary;
- struct temporary_list *next;
-}
-temporary_list;
-
typedef struct forall_info
{
iter_info *this_loop;
tree mask;
- tree pmask;
tree maskindex;
int nvar;
tree size;
- struct forall_info *outer;
- struct forall_info *next_nest;
+ struct forall_info *prev_nest;
}
forall_info;
-static void gfc_trans_where_2 (gfc_code *, tree, tree, forall_info *,
- stmtblock_t *, temporary_list **temp);
+static void gfc_trans_where_2 (gfc_code *, tree, bool,
+ forall_info *, stmtblock_t *);
/* Translate a F95 label number to a LABEL_EXPR. */
return build1_v (LABEL_EXPR, gfc_get_label_decl (code->here));
}
+
+/* Given a variable expression which has been ASSIGNed to, find the decl
+ containing the auxiliary variables. For variables in common blocks this
+ is a field_decl. */
+
+void
+gfc_conv_label_variable (gfc_se * se, gfc_expr * expr)
+{
+ gcc_assert (expr->symtree->n.sym->attr.assign == 1);
+ gfc_conv_expr (se, expr);
+ /* Deals with variable in common block. Get the field declaration. */
+ if (TREE_CODE (se->expr) == COMPONENT_REF)
+ se->expr = TREE_OPERAND (se->expr, 1);
+ /* Deals with dummy argument. Get the parameter declaration. */
+ else if (TREE_CODE (se->expr) == INDIRECT_REF)
+ se->expr = TREE_OPERAND (se->expr, 0);
+}
+
/* Translate a label assignment statement. */
+
tree
gfc_trans_label_assign (gfc_code * code)
{
/* Start a new block. */
gfc_init_se (&se, NULL);
gfc_start_block (&se.pre);
- gfc_conv_expr (&se, code->expr);
+ gfc_conv_label_variable (&se, code->expr);
+
len = GFC_DECL_STRING_LEN (se.expr);
addr = GFC_DECL_ASSIGN_ADDR (se.expr);
{
label_str = code->label->format->value.character.string;
label_len = code->label->format->value.character.length;
- len_tree = build_int_2 (label_len, 0);
+ len_tree = build_int_cst (NULL_TREE, label_len);
label_tree = gfc_build_string_const (label_len + 1, label_str);
- label_tree = gfc_build_addr_expr (pchar_type_node, label_tree);
+ label_tree = gfc_build_addr_expr (pvoid_type_node, label_tree);
}
gfc_add_modify_expr (&se.pre, len, len_tree);
tree
gfc_trans_goto (gfc_code * code)
{
+ locus loc = code->loc;
tree assigned_goto;
tree target;
tree tmp;
- tree assign_error;
- tree range_error;
gfc_se se;
-
if (code->label != NULL)
return build1_v (GOTO_EXPR, gfc_get_label_decl (code->label));
/* ASSIGNED GOTO. */
gfc_init_se (&se, NULL);
gfc_start_block (&se.pre);
- gfc_conv_expr (&se, code->expr);
- assign_error =
- gfc_build_string_const (37, "Assigned label is not a target label");
+ gfc_conv_label_variable (&se, code->expr);
tmp = GFC_DECL_STRING_LEN (se.expr);
- tmp = build (NE_EXPR, boolean_type_node, tmp, integer_minus_one_node);
- gfc_trans_runtime_check (tmp, assign_error, &se.pre);
+ tmp = fold_build2 (NE_EXPR, boolean_type_node, tmp,
+ build_int_cst (TREE_TYPE (tmp), -1));
+ gfc_trans_runtime_check (tmp, "Assigned label is not a target label",
+ &se.pre, &loc);
assigned_goto = GFC_DECL_ASSIGN_ADDR (se.expr);
- target = build1 (GOTO_EXPR, void_type_node, assigned_goto);
code = code->block;
if (code == NULL)
{
+ target = build1 (GOTO_EXPR, void_type_node, assigned_goto);
gfc_add_expr_to_block (&se.pre, target);
return gfc_finish_block (&se.pre);
}
/* Check the label list. */
- range_error =
- gfc_build_string_const (34, "Assigned label is not in the list");
-
do
{
- tmp = gfc_get_label_decl (code->label);
- tmp = gfc_build_addr_expr (pvoid_type_node, tmp);
- tmp = build (EQ_EXPR, boolean_type_node, tmp, assigned_goto);
- tmp = build_v (COND_EXPR, tmp, target, build_empty_stmt ());
+ target = gfc_get_label_decl (code->label);
+ tmp = gfc_build_addr_expr (pvoid_type_node, target);
+ tmp = build2 (EQ_EXPR, boolean_type_node, tmp, assigned_goto);
+ tmp = build3_v (COND_EXPR, tmp,
+ build1 (GOTO_EXPR, void_type_node, target),
+ build_empty_stmt ());
gfc_add_expr_to_block (&se.pre, tmp);
code = code->block;
}
while (code != NULL);
- gfc_trans_runtime_check (boolean_true_node, range_error, &se.pre);
+ gfc_trans_runtime_check (boolean_true_node,
+ "Assigned label is not in the list", &se.pre, &loc);
+
return gfc_finish_block (&se.pre);
}
+/* Translate an ENTRY statement. Just adds a label for this entry point. */
+tree
+gfc_trans_entry (gfc_code * code)
+{
+ return build1_v (LABEL_EXPR, code->ext.entry->label);
+}
+
+
+/* Check for dependencies between INTENT(IN) and INTENT(OUT) arguments of
+ elemental subroutines. Make temporaries for output arguments if any such
+ dependencies are found. Output arguments are chosen because internal_unpack
+ can be used, as is, to copy the result back to the variable. */
+static void
+gfc_conv_elemental_dependencies (gfc_se * se, gfc_se * loopse,
+ gfc_symbol * sym, gfc_actual_arglist * arg)
+{
+ gfc_actual_arglist *arg0;
+ gfc_expr *e;
+ gfc_formal_arglist *formal;
+ gfc_loopinfo tmp_loop;
+ gfc_se parmse;
+ gfc_ss *ss;
+ gfc_ss_info *info;
+ gfc_symbol *fsym;
+ int n;
+ stmtblock_t block;
+ tree data;
+ tree offset;
+ tree size;
+ tree tmp;
+
+ if (loopse->ss == NULL)
+ return;
+
+ ss = loopse->ss;
+ arg0 = arg;
+ formal = sym->formal;
+
+ /* Loop over all the arguments testing for dependencies. */
+ for (; arg != NULL; arg = arg->next, formal = formal ? formal->next : NULL)
+ {
+ e = arg->expr;
+ if (e == NULL)
+ continue;
+
+ /* Obtain the info structure for the current argument. */
+ info = NULL;
+ for (ss = loopse->ss; ss && ss != gfc_ss_terminator; ss = ss->next)
+ {
+ if (ss->expr != e)
+ continue;
+ info = &ss->data.info;
+ break;
+ }
+
+ /* If there is a dependency, create a temporary and use it
+ instead of the variable. */
+ fsym = formal ? formal->sym : NULL;
+ if (e->expr_type == EXPR_VARIABLE
+ && e->rank && fsym
+ && fsym->attr.intent == INTENT_OUT
+ && gfc_check_fncall_dependency (e, INTENT_OUT, sym, arg0))
+ {
+ /* Make a local loopinfo for the temporary creation, so that
+ none of the other ss->info's have to be renormalized. */
+ gfc_init_loopinfo (&tmp_loop);
+ for (n = 0; n < info->dimen; n++)
+ {
+ tmp_loop.to[n] = loopse->loop->to[n];
+ tmp_loop.from[n] = loopse->loop->from[n];
+ tmp_loop.order[n] = loopse->loop->order[n];
+ }
+
+ /* Generate the temporary. Merge the block so that the
+ declarations are put at the right binding level. */
+ size = gfc_create_var (gfc_array_index_type, NULL);
+ data = gfc_create_var (pvoid_type_node, NULL);
+ gfc_start_block (&block);
+ tmp = gfc_typenode_for_spec (&e->ts);
+ tmp = gfc_trans_create_temp_array (&se->pre, &se->post,
+ &tmp_loop, info, tmp,
+ false, true, false, false);
+ gfc_add_modify_expr (&se->pre, size, tmp);
+ tmp = fold_convert (pvoid_type_node, info->data);
+ gfc_add_modify_expr (&se->pre, data, tmp);
+ gfc_merge_block_scope (&block);
+
+ /* Obtain the argument descriptor for unpacking. */
+ gfc_init_se (&parmse, NULL);
+ parmse.want_pointer = 1;
+ gfc_conv_expr_descriptor (&parmse, e, gfc_walk_expr (e));
+ gfc_add_block_to_block (&se->pre, &parmse.pre);
+
+ /* Calculate the offset for the temporary. */
+ offset = gfc_index_zero_node;
+ for (n = 0; n < info->dimen; n++)
+ {
+ tmp = gfc_conv_descriptor_stride (info->descriptor,
+ gfc_rank_cst[n]);
+ tmp = fold_build2 (MULT_EXPR, gfc_array_index_type,
+ loopse->loop->from[n], tmp);
+ offset = fold_build2 (MINUS_EXPR, gfc_array_index_type,
+ offset, tmp);
+ }
+ info->offset = gfc_create_var (gfc_array_index_type, NULL);
+ gfc_add_modify_expr (&se->pre, info->offset, offset);
+
+ /* Copy the result back using unpack. */
+ tmp = gfc_chainon_list (NULL_TREE, parmse.expr);
+ tmp = gfc_chainon_list (tmp, data);
+ tmp = build_function_call_expr (gfor_fndecl_in_unpack, tmp);
+ gfc_add_expr_to_block (&se->post, tmp);
+
+ gfc_add_block_to_block (&se->post, &parmse.post);
+ }
+ }
+}
+
+
/* Translate the CALL statement. Builds a call to an F95 subroutine. */
tree
-gfc_trans_call (gfc_code * code)
+gfc_trans_call (gfc_code * code, bool dependency_check)
{
gfc_se se;
+ gfc_ss * ss;
+ int has_alternate_specifier;
/* A CALL starts a new block because the actual arguments may have to
be evaluated first. */
gfc_init_se (&se, NULL);
gfc_start_block (&se.pre);
- assert (code->resolved_sym);
- has_alternate_specifier = 0;
+ gcc_assert (code->resolved_sym);
- /* Translate the call. */
- gfc_conv_function_call (&se, code->resolved_sym, code->ext.actual);
+ ss = gfc_ss_terminator;
+ if (code->resolved_sym->attr.elemental)
+ ss = gfc_walk_elemental_function_args (ss, code->ext.actual, GFC_SS_REFERENCE);
- /* A subroutine without side-effect, by definition, does nothing! */
- TREE_SIDE_EFFECTS (se.expr) = 1;
-
- /* Chain the pieces together and return the block. */
- if (has_alternate_specifier)
+ /* Is not an elemental subroutine call with array valued arguments. */
+ if (ss == gfc_ss_terminator)
{
- gfc_code *select_code;
- gfc_symbol *sym;
- select_code = code->next;
- assert(select_code->op == EXEC_SELECT);
- sym = select_code->expr->symtree->n.sym;
- se.expr = convert (gfc_typenode_for_spec (&sym->ts), se.expr);
- gfc_add_modify_expr (&se.pre, sym->backend_decl, se.expr);
+
+ /* Translate the call. */
+ has_alternate_specifier
+ = gfc_conv_function_call (&se, code->resolved_sym, code->ext.actual,
+ NULL_TREE);
+
+ /* A subroutine without side-effect, by definition, does nothing! */
+ TREE_SIDE_EFFECTS (se.expr) = 1;
+
+ /* Chain the pieces together and return the block. */
+ if (has_alternate_specifier)
+ {
+ gfc_code *select_code;
+ gfc_symbol *sym;
+ select_code = code->next;
+ gcc_assert(select_code->op == EXEC_SELECT);
+ sym = select_code->expr->symtree->n.sym;
+ se.expr = convert (gfc_typenode_for_spec (&sym->ts), se.expr);
+ if (sym->backend_decl == NULL)
+ sym->backend_decl = gfc_get_symbol_decl (sym);
+ gfc_add_modify_expr (&se.pre, sym->backend_decl, se.expr);
+ }
+ else
+ gfc_add_expr_to_block (&se.pre, se.expr);
+
+ gfc_add_block_to_block (&se.pre, &se.post);
}
+
else
- gfc_add_expr_to_block (&se.pre, se.expr);
+ {
+ /* An elemental subroutine call with array valued arguments has
+ to be scalarized. */
+ gfc_loopinfo loop;
+ stmtblock_t body;
+ stmtblock_t block;
+ gfc_se loopse;
+
+ /* gfc_walk_elemental_function_args renders the ss chain in the
+ reverse order to the actual argument order. */
+ ss = gfc_reverse_ss (ss);
+
+ /* Initialize the loop. */
+ gfc_init_se (&loopse, NULL);
+ gfc_init_loopinfo (&loop);
+ gfc_add_ss_to_loop (&loop, ss);
+
+ gfc_conv_ss_startstride (&loop);
+ gfc_conv_loop_setup (&loop);
+ gfc_mark_ss_chain_used (ss, 1);
+
+ /* Convert the arguments, checking for dependencies. */
+ gfc_copy_loopinfo_to_se (&loopse, &loop);
+ loopse.ss = ss;
+
+ /* For operator assignment, we need to do dependency checking.
+ We also check the intent of the parameters. */
+ if (dependency_check)
+ {
+ gfc_symbol *sym;
+ sym = code->resolved_sym;
+ gcc_assert (sym->formal->sym->attr.intent = INTENT_OUT);
+ gcc_assert (sym->formal->next->sym->attr.intent = INTENT_IN);
+ gfc_conv_elemental_dependencies (&se, &loopse, sym,
+ code->ext.actual);
+ }
+
+ /* Generate the loop body. */
+ gfc_start_scalarized_body (&loop, &body);
+ gfc_init_block (&block);
+
+ /* Add the subroutine call to the block. */
+ gfc_conv_function_call (&loopse, code->resolved_sym, code->ext.actual,
+ NULL_TREE);
+ gfc_add_expr_to_block (&loopse.pre, loopse.expr);
+
+ gfc_add_block_to_block (&block, &loopse.pre);
+ gfc_add_block_to_block (&block, &loopse.post);
+
+ /* Finish up the loop block and the loop. */
+ gfc_add_expr_to_block (&body, gfc_finish_block (&block));
+ gfc_trans_scalarizing_loops (&loop, &body);
+ gfc_add_block_to_block (&se.pre, &loop.pre);
+ gfc_add_block_to_block (&se.pre, &loop.post);
+ gfc_add_block_to_block (&se.pre, &se.post);
+ gfc_cleanup_loop (&loop);
+ }
- gfc_add_block_to_block (&se.pre, &se.post);
return gfc_finish_block (&se.pre);
}
tree tmp;
tree result;
- /* if code->expr is not NULL, this return statement must appear
+ /* If code->expr is not NULL, this return statement must appear
in a subroutine and current_fake_result_decl has already
been generated. */
- result = gfc_get_fake_result_decl (NULL);
+ result = gfc_get_fake_result_decl (NULL, 0);
if (!result)
{
gfc_warning ("An alternate return at %L without a * dummy argument",
gfc_conv_expr (&se, code->expr);
- tmp = build (MODIFY_EXPR, TREE_TYPE (result), result, se.expr);
+ tmp = build2 (MODIFY_EXPR, TREE_TYPE (result), result, se.expr);
gfc_add_expr_to_block (&se.pre, tmp);
tmp = build1_v (GOTO_EXPR, gfc_get_return_label ());
tree
gfc_trans_pause (gfc_code * code)
{
+ tree gfc_int4_type_node = gfc_get_int_type (4);
gfc_se se;
tree args;
tree tmp;
if (code->expr == NULL)
{
- tmp = build_int_2 (code->ext.stop_code, 0);
- TREE_TYPE (tmp) = gfc_int4_type_node;
+ tmp = build_int_cst (gfc_int4_type_node, code->ext.stop_code);
args = gfc_chainon_list (NULL_TREE, tmp);
fndecl = gfor_fndecl_pause_numeric;
}
fndecl = gfor_fndecl_pause_string;
}
- tmp = gfc_build_function_call (fndecl, args);
+ tmp = build_function_call_expr (fndecl, args);
gfc_add_expr_to_block (&se.pre, tmp);
gfc_add_block_to_block (&se.pre, &se.post);
tree
gfc_trans_stop (gfc_code * code)
{
+ tree gfc_int4_type_node = gfc_get_int_type (4);
gfc_se se;
tree args;
tree tmp;
if (code->expr == NULL)
{
- tmp = build_int_2 (code->ext.stop_code, 0);
- TREE_TYPE (tmp) = gfc_int4_type_node;
+ tmp = build_int_cst (gfc_int4_type_node, code->ext.stop_code);
args = gfc_chainon_list (NULL_TREE, tmp);
fndecl = gfor_fndecl_stop_numeric;
}
fndecl = gfor_fndecl_stop_string;
}
- tmp = gfc_build_function_call (fndecl, args);
+ tmp = build_function_call_expr (fndecl, args);
gfc_add_expr_to_block (&se.pre, tmp);
gfc_add_block_to_block (&se.pre, &se.post);
elsestmt = build_empty_stmt ();
/* Build the condition expression and add it to the condition block. */
- stmt = build_v (COND_EXPR, if_se.expr, stmt, elsestmt);
+ stmt = fold_build3 (COND_EXPR, void_type_node, if_se.expr, stmt, elsestmt);
gfc_add_expr_to_block (&if_se.pre, stmt);
}
-/* Translage an arithmetic IF expression.
+/* Translate an arithmetic IF expression.
IF (cond) label1, label2, label3 translates to
}
else // cond > 0
goto label3;
+
+ An optimized version can be generated in case of equal labels.
+ E.g., if label1 is equal to label2, we can translate it to
+
+ if (cond <= 0)
+ goto label1;
+ else
+ goto label3;
*/
tree
/* Pre-evaluate COND. */
gfc_conv_expr_val (&se, code->expr);
+ se.expr = gfc_evaluate_now (se.expr, &se.pre);
/* Build something to compare with. */
zero = gfc_build_const (TREE_TYPE (se.expr), integer_zero_node);
- /* If (cond < 0) take branch1 else take branch2.
- First build jumps to the COND .LT. 0 and the COND .EQ. 0 cases. */
- branch1 = build1_v (GOTO_EXPR, gfc_get_label_decl (code->label));
- branch2 = build1_v (GOTO_EXPR, gfc_get_label_decl (code->label2));
+ if (code->label->value != code->label2->value)
+ {
+ /* If (cond < 0) take branch1 else take branch2.
+ First build jumps to the COND .LT. 0 and the COND .EQ. 0 cases. */
+ branch1 = build1_v (GOTO_EXPR, gfc_get_label_decl (code->label));
+ branch2 = build1_v (GOTO_EXPR, gfc_get_label_decl (code->label2));
+
+ if (code->label->value != code->label3->value)
+ tmp = fold_build2 (LT_EXPR, boolean_type_node, se.expr, zero);
+ else
+ tmp = fold_build2 (NE_EXPR, boolean_type_node, se.expr, zero);
- tmp = build (LT_EXPR, boolean_type_node, se.expr, zero);
- branch1 = build_v (COND_EXPR, tmp, branch1, branch2);
+ branch1 = fold_build3 (COND_EXPR, void_type_node, tmp, branch1, branch2);
+ }
+ else
+ branch1 = build1_v (GOTO_EXPR, gfc_get_label_decl (code->label));
- /* if (cond <= 0) take branch1 else take branch2. */
- branch2 = build1_v (GOTO_EXPR, gfc_get_label_decl (code->label3));
- tmp = build (LE_EXPR, boolean_type_node, se.expr, zero);
- branch1 = build_v (COND_EXPR, tmp, branch1, branch2);
+ if (code->label->value != code->label3->value
+ && code->label2->value != code->label3->value)
+ {
+ /* if (cond <= 0) take branch1 else take branch2. */
+ branch2 = build1_v (GOTO_EXPR, gfc_get_label_decl (code->label3));
+ tmp = fold_build2 (LE_EXPR, boolean_type_node, se.expr, zero);
+ branch1 = fold_build3 (COND_EXPR, void_type_node, tmp, branch1, branch2);
+ }
/* Append the COND_EXPR to the evaluation of COND, and return. */
gfc_add_expr_to_block (&se.pre, branch1);
}
+/* Translate the simple DO construct. This is where the loop variable has
+ integer type and step +-1. We can't use this in the general case
+ because integer overflow and floating point errors could give incorrect
+ results.
+ We translate a do loop from:
+
+ DO dovar = from, to, step
+ body
+ END DO
+
+ to:
+
+ [Evaluate loop bounds and step]
+ dovar = from;
+ if ((step > 0) ? (dovar <= to) : (dovar => to))
+ {
+ for (;;)
+ {
+ body;
+ cycle_label:
+ cond = (dovar == to);
+ dovar += step;
+ if (cond) goto end_label;
+ }
+ }
+ end_label:
+
+ This helps the optimizers by avoiding the extra induction variable
+ used in the general case. */
+
+static tree
+gfc_trans_simple_do (gfc_code * code, stmtblock_t *pblock, tree dovar,
+ tree from, tree to, tree step)
+{
+ stmtblock_t body;
+ tree type;
+ tree cond;
+ tree tmp;
+ tree cycle_label;
+ tree exit_label;
+
+ type = TREE_TYPE (dovar);
+
+ /* Initialize the DO variable: dovar = from. */
+ gfc_add_modify_expr (pblock, dovar, from);
+
+ /* Cycle and exit statements are implemented with gotos. */
+ cycle_label = gfc_build_label_decl (NULL_TREE);
+ exit_label = gfc_build_label_decl (NULL_TREE);
+
+ /* Put the labels where they can be found later. See gfc_trans_do(). */
+ code->block->backend_decl = tree_cons (cycle_label, exit_label, NULL);
+
+ /* Loop body. */
+ gfc_start_block (&body);
+
+ /* Main loop body. */
+ tmp = gfc_trans_code (code->block->next);
+ gfc_add_expr_to_block (&body, tmp);
+
+ /* Label for cycle statements (if needed). */
+ if (TREE_USED (cycle_label))
+ {
+ tmp = build1_v (LABEL_EXPR, cycle_label);
+ gfc_add_expr_to_block (&body, tmp);
+ }
+
+ /* Evaluate the loop condition. */
+ cond = fold_build2 (EQ_EXPR, boolean_type_node, dovar, to);
+ cond = gfc_evaluate_now (cond, &body);
+
+ /* Increment the loop variable. */
+ tmp = fold_build2 (PLUS_EXPR, type, dovar, step);
+ gfc_add_modify_expr (&body, dovar, tmp);
+
+ /* The loop exit. */
+ tmp = build1_v (GOTO_EXPR, exit_label);
+ TREE_USED (exit_label) = 1;
+ tmp = fold_build3 (COND_EXPR, void_type_node,
+ cond, tmp, build_empty_stmt ());
+ gfc_add_expr_to_block (&body, tmp);
+
+ /* Finish the loop body. */
+ tmp = gfc_finish_block (&body);
+ tmp = build1_v (LOOP_EXPR, tmp);
+
+ /* Only execute the loop if the number of iterations is positive. */
+ if (tree_int_cst_sgn (step) > 0)
+ cond = fold_build2 (LE_EXPR, boolean_type_node, dovar, to);
+ else
+ cond = fold_build2 (GE_EXPR, boolean_type_node, dovar, to);
+ tmp = fold_build3 (COND_EXPR, void_type_node,
+ cond, tmp, build_empty_stmt ());
+ gfc_add_expr_to_block (pblock, tmp);
+
+ /* Add the exit label. */
+ tmp = build1_v (LABEL_EXPR, exit_label);
+ gfc_add_expr_to_block (pblock, tmp);
+
+ return gfc_finish_block (pblock);
+}
+
/* Translate the DO construct. This obviously is one of the most
important ones to get right with any compiler, but especially
so for Fortran.
- Currently we calculate the loop count before entering the loop, but
- it may be possible to optimize if step is a constant. The main
- advantage is that the loop test is a single GENERIC node
+ We special case some loop forms as described in gfc_trans_simple_do.
+ For other cases we implement them with a separate loop count,
+ as described in the standard.
We translate a do loop from:
to:
- pre_dovar;
- pre_from;
- pre_to;
- pre_step;
- temp1=to_expr-from_expr;
- step_temp=step_expr;
- range_temp=step_tmp/range_temp;
- for ( ; range_temp > 0 ; range_temp = range_temp - 1)
+ [evaluate loop bounds and step]
+ count = (to + step - from) / step;
+ dovar = from;
+ for (;;)
{
body;
cycle_label:
- dovar_temp = dovar
- dovar=dovar_temp + step_temp;
+ dovar += step
+ count--;
+ if (count <=0) goto exit_label;
}
exit_label:
- Some optimization is done for empty do loops. We can't just let
- dovar=to because it's possible for from+range*loopcount!=to. Anyone
- who writes empty DO deserves sub-optimal (but correct) code anyway.
-
TODO: Large loop counts
- Does not work loop counts which do not fit into a signed integer kind,
- ie. Does not work for loop counts > 2^31 for integer(kind=4) variables
+ The code above assumes the loop count fits into a signed integer kind,
+ i.e. Does not work for loop counts > 2^31 for integer(kind=4) variables
We must support the full range. */
tree
tree to;
tree step;
tree count;
+ tree count_one;
tree type;
tree cond;
tree cycle_label;
gfc_start_block (&block);
- /* Create GIMPLE versions of all expressions in the iterator. */
-
+ /* Evaluate all the expressions in the iterator. */
gfc_init_se (&se, NULL);
gfc_conv_expr_lhs (&se, code->ext.iterator->var);
gfc_add_block_to_block (&block, &se.pre);
type = TREE_TYPE (dovar);
gfc_init_se (&se, NULL);
- gfc_conv_expr_type (&se, code->ext.iterator->start, type);
+ gfc_conv_expr_val (&se, code->ext.iterator->start);
gfc_add_block_to_block (&block, &se.pre);
- from = se.expr;
+ from = gfc_evaluate_now (se.expr, &block);
gfc_init_se (&se, NULL);
- gfc_conv_expr_type (&se, code->ext.iterator->end, type);
+ gfc_conv_expr_val (&se, code->ext.iterator->end);
gfc_add_block_to_block (&block, &se.pre);
- to = se.expr;
+ to = gfc_evaluate_now (se.expr, &block);
gfc_init_se (&se, NULL);
- gfc_conv_expr_type (&se, code->ext.iterator->step, type);
-
- /* We don't want this changing part way through. */
- gfc_make_safe_expr (&se);
+ gfc_conv_expr_val (&se, code->ext.iterator->step);
gfc_add_block_to_block (&block, &se.pre);
- step = se.expr;
-
- /* Initialise loop count. This code is executed before we enter the
+ step = gfc_evaluate_now (se.expr, &block);
+
+ /* Special case simple loops. */
+ if (TREE_CODE (type) == INTEGER_TYPE
+ && (integer_onep (step)
+ || tree_int_cst_equal (step, integer_minus_one_node)))
+ return gfc_trans_simple_do (code, &block, dovar, from, to, step);
+
+ /* Initialize loop count. This code is executed before we enter the
loop body. We generate: count = (to + step - from) / step. */
- tmp = fold (build (MINUS_EXPR, type, step, from));
- tmp = fold (build (PLUS_EXPR, type, to, tmp));
- tmp = fold (build (TRUNC_DIV_EXPR, type, tmp, step));
-
- count = gfc_create_var (type, "count");
+ tmp = fold_build2 (MINUS_EXPR, type, step, from);
+ tmp = fold_build2 (PLUS_EXPR, type, to, tmp);
+ if (TREE_CODE (type) == INTEGER_TYPE)
+ {
+ tmp = fold_build2 (TRUNC_DIV_EXPR, type, tmp, step);
+ count = gfc_create_var (type, "count");
+ }
+ else
+ {
+ /* TODO: We could use the same width as the real type.
+ This would probably cause more problems that it solves
+ when we implement "long double" types. */
+ tmp = fold_build2 (RDIV_EXPR, type, tmp, step);
+ tmp = fold_build1 (FIX_TRUNC_EXPR, gfc_array_index_type, tmp);
+ count = gfc_create_var (gfc_array_index_type, "count");
+ }
gfc_add_modify_expr (&block, count, tmp);
- /* Initialise the DO variable: dovar = from. */
+ count_one = build_int_cst (TREE_TYPE (count), 1);
+
+ /* Initialize the DO variable: dovar = from. */
gfc_add_modify_expr (&block, dovar, from);
/* Loop body. */
exit_label = gfc_build_label_decl (NULL_TREE);
/* Start with the loop condition. Loop until count <= 0. */
- cond = build (LE_EXPR, boolean_type_node, count, integer_zero_node);
+ cond = fold_build2 (LE_EXPR, boolean_type_node, count,
+ build_int_cst (TREE_TYPE (count), 0));
tmp = build1_v (GOTO_EXPR, exit_label);
TREE_USED (exit_label) = 1;
- tmp = build_v (COND_EXPR, cond, tmp, build_empty_stmt ());
+ tmp = fold_build3 (COND_EXPR, void_type_node,
+ cond, tmp, build_empty_stmt ());
gfc_add_expr_to_block (&body, tmp);
/* Put these labels where they can be found later. We put the
}
/* Increment the loop variable. */
- tmp = build (PLUS_EXPR, type, dovar, step);
+ tmp = build2 (PLUS_EXPR, type, dovar, step);
gfc_add_modify_expr (&body, dovar, tmp);
/* Decrement the loop count. */
- tmp = build (MINUS_EXPR, type, count, integer_one_node);
+ tmp = build2 (MINUS_EXPR, TREE_TYPE (count), count, count_one);
gfc_add_modify_expr (&body, count, tmp);
/* End of loop body. */
tmp = gfc_finish_block (&body);
/* The for loop itself. */
- tmp = build_v (LOOP_EXPR, tmp);
+ tmp = build1_v (LOOP_EXPR, tmp);
gfc_add_expr_to_block (&block, tmp);
/* Add the exit label. */
gfc_init_se (&cond, NULL);
gfc_conv_expr_val (&cond, code->expr);
gfc_add_block_to_block (&block, &cond.pre);
- cond.expr = fold (build1 (TRUTH_NOT_EXPR, boolean_type_node, cond.expr));
+ cond.expr = fold_build1 (TRUTH_NOT_EXPR, boolean_type_node, cond.expr);
/* Build "IF (! cond) GOTO exit_label". */
tmp = build1_v (GOTO_EXPR, exit_label);
TREE_USED (exit_label) = 1;
- tmp = build_v (COND_EXPR, cond.expr, tmp, build_empty_stmt ());
+ tmp = fold_build3 (COND_EXPR, void_type_node,
+ cond.expr, tmp, build_empty_stmt ());
gfc_add_expr_to_block (&block, tmp);
/* The main body of the loop. */
gfc_init_block (&block);
/* Build the loop. */
- tmp = build_v (LOOP_EXPR, tmp);
+ tmp = build1_v (LOOP_EXPR, tmp);
gfc_add_expr_to_block (&block, tmp);
/* Add the exit label. */
internal representation of CASE(N).
In the first and second case, we need to set a value for
- high. In the thirth case, we don't because the GCC middle
+ high. In the third case, we don't because the GCC middle
end represents a single case value by just letting high be
a NULL_TREE. We can't do that because we need to be able
to represent unbounded cases. */
}
/* Build a label. */
- label = build_decl (LABEL_DECL, NULL_TREE, NULL_TREE);
- DECL_CONTEXT (label) = current_function_decl;
+ label = gfc_build_label_decl (NULL_TREE);
/* Add this case label.
Add parameter 'label', make it match GCC backend. */
- tmp = build (CASE_LABEL_EXPR, void_type_node, low, high, label);
+ tmp = build3 (CASE_LABEL_EXPR, void_type_node, low, high, label);
gfc_add_expr_to_block (&body, tmp);
}
}
tmp = gfc_finish_block (&body);
- tmp = build_v (SWITCH_EXPR, se.expr, tmp, NULL_TREE);
+ tmp = build3_v (SWITCH_EXPR, se.expr, tmp, NULL_TREE);
gfc_add_expr_to_block (&block, tmp);
tmp = build1_v (LABEL_EXPR, end_label);
}
else
{
- tree true_tree, false_tree;
+ tree true_tree, false_tree, stmt;
true_tree = build_empty_stmt ();
false_tree = build_empty_stmt ();
if (f != NULL)
false_tree = gfc_trans_code (f->next);
- gfc_add_expr_to_block (&block, build_v (COND_EXPR, se.expr,
- true_tree, false_tree));
+ stmt = fold_build3 (COND_EXPR, void_type_node, se.expr,
+ true_tree, false_tree);
+ gfc_add_expr_to_block (&block, stmt);
}
return gfc_finish_block (&block);
gfc_trans_character_select (gfc_code *code)
{
tree init, node, end_label, tmp, type, args, *labels;
+ tree case_label;
stmtblock_t block, body;
gfc_case *cp, *d;
gfc_code *c;
if (select_struct == NULL)
{
+ tree gfc_int4_type_node = gfc_get_int_type (4);
+
select_struct = make_node (RECORD_TYPE);
TYPE_NAME (select_struct) = get_identifier ("_jump_struct");
{
for (d = c->ext.case_list; d; d = d->next)
{
- tmp = build_v (LABEL_EXPR, labels[d->n]);
+ tmp = build1_v (LABEL_EXPR, labels[d->n]);
gfc_add_expr_to_block (&body, tmp);
}
tmp = gfc_trans_code (c->next);
gfc_add_expr_to_block (&body, tmp);
- tmp = build_v (GOTO_EXPR, end_label);
+ tmp = build1_v (GOTO_EXPR, end_label);
gfc_add_expr_to_block (&body, tmp);
}
tmp = gfc_build_addr_expr (pvoid_type_node, labels[i]);
node = tree_cons (ss_target, tmp, node);
- tmp = build1 (CONSTRUCTOR, select_struct, nreverse (node));
+ tmp = build_constructor_from_list (select_struct, nreverse (node));
init = tree_cons (NULL_TREE, tmp, init);
}
- type = build_array_type (select_struct,
- build_index_type (build_int_2(n - 1, 0)));
+ type = build_array_type (select_struct, build_index_type
+ (build_int_cst (NULL_TREE, n - 1)));
- init = build1 (CONSTRUCTOR, type, nreverse(init));
+ init = build_constructor_from_list (type, nreverse(init));
TREE_CONSTANT (init) = 1;
TREE_INVARIANT (init) = 1;
TREE_STATIC (init) = 1;
TREE_CONSTANT (tmp) = 1;
TREE_INVARIANT (tmp) = 1;
TREE_STATIC (tmp) = 1;
+ TREE_READONLY (tmp) = 1;
DECL_INITIAL (tmp) = init;
init = tmp;
init = gfc_build_addr_expr (pvoid_type_node, init);
args = gfc_chainon_list (NULL_TREE, init);
- tmp = build_int_2 (n, 0);
+ tmp = build_int_cst (NULL_TREE, n);
args = gfc_chainon_list (args, tmp);
tmp = gfc_build_addr_expr (pvoid_type_node, end_label);
gfc_add_block_to_block (&block, &se.pre);
- tmp = gfc_build_function_call (gfor_fndecl_select_string, args);
- tmp = build1 (GOTO_EXPR, void_type_node, tmp);
+ tmp = build_function_call_expr (gfor_fndecl_select_string, args);
+ case_label = gfc_create_var (TREE_TYPE (tmp), "case_label");
+ gfc_add_modify_expr (&block, case_label, tmp);
+
+ gfc_add_block_to_block (&block, &se.post);
+
+ tmp = build1 (GOTO_EXPR, void_type_node, case_label);
gfc_add_expr_to_block (&block, tmp);
tmp = gfc_finish_block (&body);
gfc_add_expr_to_block (&block, tmp);
- tmp = build_v (LABEL_EXPR, end_label);
+ tmp = build1_v (LABEL_EXPR, end_label);
gfc_add_expr_to_block (&block, tmp);
if (n != 0)
tree
gfc_trans_select (gfc_code * code)
{
- assert (code && code->expr);
+ gcc_assert (code && code->expr);
/* Empty SELECT constructs are legal. */
if (code->block == NULL)
}
-/* Generate the loops for a FORALL block. The normal loop format:
+/* Generate the loops for a FORALL block, specified by FORALL_TMP. BODY
+ is the contents of the FORALL block/stmt to be iterated. MASK_FLAG
+ indicates whether we should generate code to test the FORALLs mask
+ array. OUTER is the loop header to be used for initializing mask
+ indices.
+
+ The generated loop format is:
count = (end - start + step) / step
loopvar = start
while (1)
end_of_loop: */
static tree
-gfc_trans_forall_loop (forall_info *forall_tmp, int nvar, tree body, int mask_flag)
+gfc_trans_forall_loop (forall_info *forall_tmp, tree body,
+ int mask_flag, stmtblock_t *outer)
{
- int n;
+ int n, nvar;
tree tmp;
tree cond;
stmtblock_t block;
tree exit_label;
tree count;
- tree var, start, end, step, mask, maskindex;
+ tree var, start, end, step;
iter_info *iter;
+ /* Initialize the mask index outside the FORALL nest. */
+ if (mask_flag && forall_tmp->mask)
+ gfc_add_modify_expr (outer, forall_tmp->maskindex, gfc_index_zero_node);
+
iter = forall_tmp->this_loop;
+ nvar = forall_tmp->nvar;
for (n = 0; n < nvar; n++)
{
var = iter->var;
gfc_init_block (&block);
/* The exit condition. */
- cond = build (LE_EXPR, boolean_type_node, count, integer_zero_node);
+ cond = fold_build2 (LE_EXPR, boolean_type_node,
+ count, build_int_cst (TREE_TYPE (count), 0));
tmp = build1_v (GOTO_EXPR, exit_label);
- tmp = build_v (COND_EXPR, cond, tmp, build_empty_stmt ());
+ tmp = fold_build3 (COND_EXPR, void_type_node,
+ cond, tmp, build_empty_stmt ());
gfc_add_expr_to_block (&block, tmp);
/* The main loop body. */
gfc_add_expr_to_block (&block, body);
/* Increment the loop variable. */
- tmp = build (PLUS_EXPR, TREE_TYPE (var), var, step);
+ tmp = build2 (PLUS_EXPR, TREE_TYPE (var), var, step);
gfc_add_modify_expr (&block, var, tmp);
- /* Advance to the next mask element. */
- if (mask_flag)
- {
- mask = forall_tmp->mask;
- maskindex = forall_tmp->maskindex;
- if (mask)
- {
- tmp = build (PLUS_EXPR, gfc_array_index_type, maskindex,
- integer_one_node);
- gfc_add_modify_expr (&block, maskindex, tmp);
- }
- }
+ /* Advance to the next mask element. Only do this for the
+ innermost loop. */
+ if (n == 0 && mask_flag && forall_tmp->mask)
+ {
+ tree maskindex = forall_tmp->maskindex;
+ tmp = build2 (PLUS_EXPR, gfc_array_index_type,
+ maskindex, gfc_index_one_node);
+ gfc_add_modify_expr (&block, maskindex, tmp);
+ }
+
/* Decrement the loop counter. */
- tmp = build (MINUS_EXPR, TREE_TYPE (var), count, integer_one_node);
+ tmp = build2 (MINUS_EXPR, TREE_TYPE (var), count, gfc_index_one_node);
gfc_add_modify_expr (&block, count, tmp);
body = gfc_finish_block (&block);
gfc_init_block (&block);
gfc_add_modify_expr (&block, var, start);
+
/* Initialize the loop counter. */
- tmp = fold (build (MINUS_EXPR, TREE_TYPE (var), step, start));
- tmp = fold (build (PLUS_EXPR, TREE_TYPE (var), end, tmp));
- tmp = fold (build (TRUNC_DIV_EXPR, TREE_TYPE (var), tmp, step));
+ tmp = fold_build2 (MINUS_EXPR, TREE_TYPE (var), step, start);
+ tmp = fold_build2 (PLUS_EXPR, TREE_TYPE (var), end, tmp);
+ tmp = fold_build2 (TRUNC_DIV_EXPR, TREE_TYPE (var), tmp, step);
gfc_add_modify_expr (&block, count, tmp);
/* The loop expression. */
- tmp = build_v (LOOP_EXPR, body);
+ tmp = build1_v (LOOP_EXPR, body);
gfc_add_expr_to_block (&block, tmp);
/* The exit label. */
}
-/* Generate the body and loops according to MASK_FLAG and NEST_FLAG.
- if MASK_FLAG is non-zero, the body is controlled by maskes in forall
- nest, otherwise, the body is not controlled by maskes.
- if NEST_FLAG is non-zero, generate loops for nested forall, otherwise,
- only generate loops for the current forall level. */
+/* Generate the body and loops according to MASK_FLAG. If MASK_FLAG
+ is nonzero, the body is controlled by all masks in the forall nest.
+ Otherwise, the innermost loop is not controlled by it's mask. This
+ is used for initializing that mask. */
static tree
gfc_trans_nested_forall_loop (forall_info * nested_forall_info, tree body,
- int mask_flag, int nest_flag)
+ int mask_flag)
{
tree tmp;
- int nvar;
+ stmtblock_t header;
forall_info *forall_tmp;
- tree pmask, mask, maskindex;
+ tree mask, maskindex;
+
+ gfc_start_block (&header);
forall_tmp = nested_forall_info;
- /* Generate loops for nested forall. */
- if (nest_flag)
+ while (forall_tmp != NULL)
{
- while (forall_tmp->next_nest != NULL)
- forall_tmp = forall_tmp->next_nest;
- while (forall_tmp != NULL)
+ /* Generate body with masks' control. */
+ if (mask_flag)
{
- /* Generate body with masks' control. */
- if (mask_flag)
- {
- pmask = forall_tmp->pmask;
- mask = forall_tmp->mask;
- maskindex = forall_tmp->maskindex;
-
- if (mask)
- {
- /* If a mask was specified make the assignment contitional. */
- if (pmask)
- tmp = gfc_build_indirect_ref (mask);
- else
- tmp = mask;
- tmp = gfc_build_array_ref (tmp, maskindex);
+ mask = forall_tmp->mask;
+ maskindex = forall_tmp->maskindex;
- body = build_v (COND_EXPR, tmp, body, build_empty_stmt ());
- }
+ /* If a mask was specified make the assignment conditional. */
+ if (mask)
+ {
+ tmp = gfc_build_array_ref (mask, maskindex);
+ body = build3_v (COND_EXPR, tmp, body, build_empty_stmt ());
}
- nvar = forall_tmp->nvar;
- body = gfc_trans_forall_loop (forall_tmp, nvar, body, mask_flag);
- forall_tmp = forall_tmp->outer;
}
- }
- else
- {
- nvar = forall_tmp->nvar;
- body = gfc_trans_forall_loop (forall_tmp, nvar, body, mask_flag);
+ body = gfc_trans_forall_loop (forall_tmp, body, mask_flag, &header);
+ forall_tmp = forall_tmp->prev_nest;
+ mask_flag = 1;
}
- return body;
+ gfc_add_expr_to_block (&header, body);
+ return gfc_finish_block (&header);
}
if (INTEGER_CST_P (size))
{
- tmp = fold (build (MINUS_EXPR, gfc_array_index_type, size,
- integer_one_node));
+ tmp = fold_build2 (MINUS_EXPR, gfc_array_index_type, size,
+ gfc_index_one_node);
}
else
tmp = NULL_TREE;
- type = build_range_type (gfc_array_index_type, integer_zero_node, tmp);
+ type = build_range_type (gfc_array_index_type, gfc_index_zero_node, tmp);
type = build_array_type (elem_type, type);
if (gfc_can_put_var_on_stack (bytesize))
{
- assert (INTEGER_CST_P (size));
+ gcc_assert (INTEGER_CST_P (size));
tmpvar = gfc_create_var (type, "temp");
*pdata = NULL_TREE;
}
else if (gfc_index_integer_kind == 8)
tmp = gfor_fndecl_internal_malloc64;
else
- abort ();
- tmp = gfc_build_function_call (tmp, args);
+ gcc_unreachable ();
+ tmp = build_function_call_expr (tmp, args);
tmp = convert (TREE_TYPE (tmpvar), tmp);
gfc_add_modify_expr (pblock, tmpvar, tmp);
}
/* Generate codes to copy the temporary to the actual lhs. */
static tree
-generate_loop_for_temp_to_lhs (gfc_expr *expr, tree tmp1, tree size,
- tree count3, tree count1, tree count2, tree wheremask)
+generate_loop_for_temp_to_lhs (gfc_expr *expr, tree tmp1, tree count3,
+ tree count1, tree wheremask, bool invert)
{
gfc_ss *lss;
gfc_se lse, rse;
stmtblock_t block, body;
gfc_loopinfo loop1;
- tree tmp, tmp2;
- tree index;
+ tree tmp;
tree wheremaskexpr;
/* Walk the lhs. */
gfc_add_block_to_block (&block, &lse.post);
/* Increment the count1. */
- tmp = fold (build (PLUS_EXPR, TREE_TYPE (count1), count1, size));
+ tmp = fold_build2 (PLUS_EXPR, TREE_TYPE (count1), count1,
+ gfc_index_one_node);
gfc_add_modify_expr (&block, count1, tmp);
+
tmp = gfc_finish_block (&block);
}
else
gfc_conv_loop_setup (&loop1);
gfc_mark_ss_chain_used (lss, 1);
- /* Initialize count2. */
- gfc_add_modify_expr (&block, count2, integer_zero_node);
/* Start the scalarized loop body. */
gfc_start_scalarized_body (&loop1, &body);
/* Form the expression of the temporary. */
if (lss != gfc_ss_terminator)
- {
- index = fold (build (PLUS_EXPR, gfc_array_index_type,
- count1, count2));
- rse.expr = gfc_build_array_ref (tmp1, index);
- }
+ rse.expr = gfc_build_array_ref (tmp1, count1);
/* Translate expr. */
gfc_conv_expr (&lse, expr);
/* Use the scalar assignment. */
- tmp = gfc_trans_scalar_assign (&lse, &rse, expr->ts.type);
+ rse.string_length = lse.string_length;
+ tmp = gfc_trans_scalar_assign (&lse, &rse, expr->ts, false, false);
- /* Form the mask expression according to the mask tree list. */
- if (wheremask)
- {
- tmp2 = wheremask;
- if (tmp2 != NULL)
- wheremaskexpr = gfc_build_array_ref (tmp2, count3);
- tmp2 = TREE_CHAIN (tmp2);
- while (tmp2)
- {
- tmp1 = gfc_build_array_ref (tmp2, count3);
- wheremaskexpr = build (TRUTH_AND_EXPR, TREE_TYPE (tmp1),
- wheremaskexpr, tmp1);
- tmp2 = TREE_CHAIN (tmp2);
- }
- tmp = build_v (COND_EXPR, wheremaskexpr, tmp, build_empty_stmt ());
+ /* Form the mask expression according to the mask tree list. */
+ if (wheremask)
+ {
+ wheremaskexpr = gfc_build_array_ref (wheremask, count3);
+ if (invert)
+ wheremaskexpr = fold_build1 (TRUTH_NOT_EXPR,
+ TREE_TYPE (wheremaskexpr),
+ wheremaskexpr);
+ tmp = fold_build3 (COND_EXPR, void_type_node,
+ wheremaskexpr, tmp, build_empty_stmt ());
}
gfc_add_expr_to_block (&body, tmp);
- /* Increment count2. */
- tmp = fold (build (PLUS_EXPR, TREE_TYPE (count2), count2,
- integer_one_node));
- gfc_add_modify_expr (&body, count2, tmp);
+ /* Increment count1. */
+ tmp = fold_build2 (PLUS_EXPR, gfc_array_index_type,
+ count1, gfc_index_one_node);
+ gfc_add_modify_expr (&body, count1, tmp);
/* Increment count3. */
if (count3)
- {
- tmp = fold (build (PLUS_EXPR, TREE_TYPE (count3), count3,
- integer_one_node));
- gfc_add_modify_expr (&body, count3, tmp);
- }
+ {
+ tmp = fold_build2 (PLUS_EXPR, gfc_array_index_type,
+ count3, gfc_index_one_node);
+ gfc_add_modify_expr (&body, count3, tmp);
+ }
/* Generate the copying loops. */
gfc_trans_scalarizing_loops (&loop1, &body);
gfc_add_block_to_block (&block, &loop1.post);
gfc_cleanup_loop (&loop1);
- /* Increment count1. */
- tmp = fold (build (PLUS_EXPR, TREE_TYPE (count1), count1, size));
- gfc_add_modify_expr (&block, count1, tmp);
tmp = gfc_finish_block (&block);
}
return tmp;
}
-/* Generate codes to copy rhs to the temporary. TMP1 is the address of temporary
- LSS and RSS are formed in function compute_inner_temp_size(), and should
- not be freed. */
+/* Generate codes to copy rhs to the temporary. TMP1 is the address of
+ temporary, LSS and RSS are formed in function compute_inner_temp_size(),
+ and should not be freed. WHEREMASK is the conditional execution mask
+ whose sense may be inverted by INVERT. */
static tree
-generate_loop_for_rhs_to_temp (gfc_expr *expr2, tree tmp1, tree size,
- tree count3, tree count1, tree count2,
- gfc_ss *lss, gfc_ss *rss, tree wheremask)
+generate_loop_for_rhs_to_temp (gfc_expr *expr2, tree tmp1, tree count3,
+ tree count1, gfc_ss *lss, gfc_ss *rss,
+ tree wheremask, bool invert)
{
stmtblock_t block, body1;
gfc_loopinfo loop;
gfc_se lse;
gfc_se rse;
- tree tmp, tmp2, index;
+ tree tmp;
tree wheremaskexpr;
gfc_start_block (&block);
}
else
{
- /* Initilize count2. */
- gfc_add_modify_expr (&block, count2, integer_zero_node);
-
- /* Initiliaze the loop. */
+ /* Initialize the loop. */
gfc_init_loopinfo (&loop);
/* We may need LSS to determine the shape of the expression. */
gfc_conv_expr (&rse, expr2);
/* Form the expression of the temporary. */
- index = fold (build (PLUS_EXPR, gfc_array_index_type, count1, count2));
- lse.expr = gfc_build_array_ref (tmp1, index);
+ lse.expr = gfc_build_array_ref (tmp1, count1);
}
/* Use the scalar assignment. */
- tmp = gfc_trans_scalar_assign (&lse, &rse, expr2->ts.type);
+ lse.string_length = rse.string_length;
+ tmp = gfc_trans_scalar_assign (&lse, &rse, expr2->ts, true,
+ expr2->expr_type == EXPR_VARIABLE);
/* Form the mask expression according to the mask tree list. */
if (wheremask)
{
- tmp2 = wheremask;
- if (tmp2 != NULL)
- wheremaskexpr = gfc_build_array_ref (tmp2, count3);
- tmp2 = TREE_CHAIN (tmp2);
- while (tmp2)
- {
- tmp1 = gfc_build_array_ref (tmp2, count3);
- wheremaskexpr = build (TRUTH_AND_EXPR, TREE_TYPE (tmp1),
- wheremaskexpr, tmp1);
- tmp2 = TREE_CHAIN (tmp2);
- }
- tmp = build_v (COND_EXPR, wheremaskexpr, tmp, build_empty_stmt ());
+ wheremaskexpr = gfc_build_array_ref (wheremask, count3);
+ if (invert)
+ wheremaskexpr = fold_build1 (TRUTH_NOT_EXPR,
+ TREE_TYPE (wheremaskexpr),
+ wheremaskexpr);
+ tmp = fold_build3 (COND_EXPR, void_type_node,
+ wheremaskexpr, tmp, build_empty_stmt ());
}
gfc_add_expr_to_block (&body1, tmp);
if (lss == gfc_ss_terminator)
{
gfc_add_block_to_block (&block, &body1);
+
+ /* Increment count1. */
+ tmp = fold_build2 (PLUS_EXPR, TREE_TYPE (count1), count1,
+ gfc_index_one_node);
+ gfc_add_modify_expr (&block, count1, tmp);
}
else
{
- /* Increment count2. */
- tmp = fold (build (PLUS_EXPR, gfc_array_index_type, count2,
- integer_one_node));
- gfc_add_modify_expr (&body1, count2, tmp);
+ /* Increment count1. */
+ tmp = fold_build2 (PLUS_EXPR, gfc_array_index_type,
+ count1, gfc_index_one_node);
+ gfc_add_modify_expr (&body1, count1, tmp);
/* Increment count3. */
if (count3)
- {
- tmp = fold (build (PLUS_EXPR, gfc_array_index_type, count3,
- integer_one_node));
- gfc_add_modify_expr (&body1, count3, tmp);
- }
+ {
+ tmp = fold_build2 (PLUS_EXPR, gfc_array_index_type,
+ count3, gfc_index_one_node);
+ gfc_add_modify_expr (&body1, count3, tmp);
+ }
/* Generate the copying loops. */
gfc_trans_scalarizing_loops (&loop, &body1);
gfc_cleanup_loop (&loop);
/* TODO: Reuse lss and rss when copying temp->lhs. Need to be careful
- as tree nodes in SS may not be valid in different scope. */
+ as tree nodes in SS may not be valid in different scope. */
}
- /* Increment count1. */
- tmp = fold (build (PLUS_EXPR, TREE_TYPE (count1), count1, size));
- gfc_add_modify_expr (&block, count1, tmp);
tmp = gfc_finish_block (&block);
return tmp;
gfc_loopinfo loop;
tree size;
int i;
+ int save_flag;
tree tmp;
*lss = gfc_walk_expr (expr1);
*rss = NULL;
- size = integer_one_node;
+ size = gfc_index_one_node;
if (*lss != gfc_ss_terminator)
{
gfc_init_loopinfo (&loop);
loop.array_parameter = 1;
/* Calculate the bounds of the scalarization. */
+ save_flag = flag_bounds_check;
+ flag_bounds_check = 0;
gfc_conv_ss_startstride (&loop);
+ flag_bounds_check = save_flag;
gfc_conv_loop_setup (&loop);
/* Figure out how many elements we need. */
for (i = 0; i < loop.dimen; i++)
{
- tmp = fold (build (MINUS_EXPR, TREE_TYPE (loop.from[i]),
- integer_one_node, loop.from[i]));
- tmp = fold (build (PLUS_EXPR, TREE_TYPE (tmp), tmp, loop.to[i]));
- size = fold (build (MULT_EXPR, TREE_TYPE (size), size, tmp));
+ tmp = fold_build2 (MINUS_EXPR, gfc_array_index_type,
+ gfc_index_one_node, loop.from[i]);
+ tmp = fold_build2 (PLUS_EXPR, gfc_array_index_type,
+ tmp, loop.to[i]);
+ size = fold_build2 (MULT_EXPR, gfc_array_index_type, size, tmp);
}
gfc_add_block_to_block (pblock, &loop.pre);
size = gfc_evaluate_now (size, pblock);
static tree
compute_overall_iter_number (forall_info *nested_forall_info, tree inner_size,
- stmtblock_t *block)
+ stmtblock_t *inner_size_body, stmtblock_t *block)
{
tree tmp, number;
stmtblock_t body;
+ /* Optimize the case of unconditional FORALL nests with constant bounds. */
+ if (INTEGER_CST_P (inner_size))
+ {
+ bool all_const_p = true;
+ forall_info *forall_tmp;
+
+ /* First check whether all the bounds are constant. */
+ for (forall_tmp = nested_forall_info;
+ forall_tmp;
+ forall_tmp = forall_tmp->prev_nest)
+ if (forall_tmp->mask || !INTEGER_CST_P (forall_tmp->size))
+ {
+ all_const_p = false;
+ break;
+ }
+
+ if (all_const_p)
+ {
+ tree tmp = inner_size;
+ for (forall_tmp = nested_forall_info;
+ forall_tmp;
+ forall_tmp = forall_tmp->prev_nest)
+ tmp = fold_build2 (MULT_EXPR, gfc_array_index_type,
+ tmp, forall_tmp->size);
+ return tmp;
+ }
+ }
+
/* TODO: optimizing the computing process. */
number = gfc_create_var (gfc_array_index_type, "num");
- gfc_add_modify_expr (block, number, integer_zero_node);
+ gfc_add_modify_expr (block, number, gfc_index_zero_node);
gfc_start_block (&body);
+ if (inner_size_body)
+ gfc_add_block_to_block (&body, inner_size_body);
if (nested_forall_info)
- tmp = build (PLUS_EXPR, gfc_array_index_type, number,
- inner_size);
+ tmp = build2 (PLUS_EXPR, gfc_array_index_type, number,
+ inner_size);
else
tmp = inner_size;
gfc_add_modify_expr (&body, number, tmp);
/* Generate loops. */
if (nested_forall_info != NULL)
- tmp = gfc_trans_nested_forall_loop (nested_forall_info, tmp, 0, 1);
+ tmp = gfc_trans_nested_forall_loop (nested_forall_info, tmp, 1);
gfc_add_expr_to_block (block, tmp);
}
-/* Allocate temporary for forall construct according to the information in
- nested_forall_info. INNER_SIZE is the size of temporary needed in the
- assignment inside forall. PTEMP1 is returned for space free. */
+/* Allocate temporary for forall construct. SIZE is the size of temporary
+ needed. PTEMP1 is returned for space free. */
static tree
-allocate_temp_for_forall_nest (forall_info * nested_forall_info, tree type,
- tree inner_size, stmtblock_t * block,
- tree * ptemp1)
+allocate_temp_for_forall_nest_1 (tree type, tree size, stmtblock_t * block,
+ tree * ptemp1)
{
+ tree bytesize;
tree unit;
- tree temp1;
tree tmp;
- tree bytesize, size;
-
- /* Calculate the total size of temporary needed in forall construct. */
- size = compute_overall_iter_number (nested_forall_info, inner_size, block);
unit = TYPE_SIZE_UNIT (type);
- bytesize = fold (build (MULT_EXPR, gfc_array_index_type, size, unit));
+ if (!integer_onep (unit))
+ bytesize = fold_build2 (MULT_EXPR, gfc_array_index_type, size, unit);
+ else
+ bytesize = size;
*ptemp1 = NULL;
- temp1 = gfc_do_allocate (bytesize, size, ptemp1, block, type);
+ tmp = gfc_do_allocate (bytesize, size, ptemp1, block, type);
if (*ptemp1)
- tmp = gfc_build_indirect_ref (temp1);
- else
- tmp = temp1;
-
+ tmp = build_fold_indirect_ref (tmp);
return tmp;
}
-/* Handle assignments inside forall which need temporary. */
-static void
-gfc_trans_assign_need_temp (gfc_expr * expr1, gfc_expr * expr2, tree wheremask,
- forall_info * nested_forall_info,
- stmtblock_t * block)
-{
+/* Allocate temporary for forall construct according to the information in
+ nested_forall_info. INNER_SIZE is the size of temporary needed in the
+ assignment inside forall. PTEMP1 is returned for space free. */
+
+static tree
+allocate_temp_for_forall_nest (forall_info * nested_forall_info, tree type,
+ tree inner_size, stmtblock_t * inner_size_body,
+ stmtblock_t * block, tree * ptemp1)
+{
+ tree size;
+
+ /* Calculate the total size of temporary needed in forall construct. */
+ size = compute_overall_iter_number (nested_forall_info, inner_size,
+ inner_size_body, block);
+
+ return allocate_temp_for_forall_nest_1 (type, size, block, ptemp1);
+}
+
+
+/* Handle assignments inside forall which need temporary.
+
+ forall (i=start:end:stride; maskexpr)
+ e<i> = f<i>
+ end forall
+ (where e,f<i> are arbitrary expressions possibly involving i
+ and there is a dependency between e<i> and f<i>)
+ Translates to:
+ masktmp(:) = maskexpr(:)
+
+ maskindex = 0;
+ count1 = 0;
+ num = 0;
+ for (i = start; i <= end; i += stride)
+ num += SIZE (f<i>)
+ count1 = 0;
+ ALLOCATE (tmp(num))
+ for (i = start; i <= end; i += stride)
+ {
+ if (masktmp[maskindex++])
+ tmp[count1++] = f<i>
+ }
+ maskindex = 0;
+ count1 = 0;
+ for (i = start; i <= end; i += stride)
+ {
+ if (masktmp[maskindex++])
+ e<i> = tmp[count1++]
+ }
+ DEALLOCATE (tmp)
+ */
+static void
+gfc_trans_assign_need_temp (gfc_expr * expr1, gfc_expr * expr2,
+ tree wheremask, bool invert,
+ forall_info * nested_forall_info,
+ stmtblock_t * block)
+{
tree type;
tree inner_size;
gfc_ss *lss, *rss;
- tree count, count1, count2;
+ tree count, count1;
tree tmp, tmp1;
tree ptemp1;
- tree mask, maskindex;
- forall_info *forall_tmp;
+ stmtblock_t inner_size_body;
- /* Create vars. count1 is the current iterator number of the nested forall.
- count2 is the current iterator number of the inner loops needed in the
- assignment. */
+ /* Create vars. count1 is the current iterator number of the nested
+ forall. */
count1 = gfc_create_var (gfc_array_index_type, "count1");
- count2 = gfc_create_var (gfc_array_index_type, "count2");
/* Count is the wheremask index. */
if (wheremask)
{
count = gfc_create_var (gfc_array_index_type, "count");
- gfc_add_modify_expr (block, count, integer_zero_node);
+ gfc_add_modify_expr (block, count, gfc_index_zero_node);
}
else
count = NULL;
/* Initialize count1. */
- gfc_add_modify_expr (block, count1, integer_zero_node);
+ gfc_add_modify_expr (block, count1, gfc_index_zero_node);
/* Calculate the size of temporary needed in the assignment. Return loop, lss
and rss which are used in function generate_loop_for_rhs_to_temp(). */
- inner_size = compute_inner_temp_size (expr1, expr2, block, &lss, &rss);
+ gfc_init_block (&inner_size_body);
+ inner_size = compute_inner_temp_size (expr1, expr2, &inner_size_body,
+ &lss, &rss);
/* The type of LHS. Used in function allocate_temp_for_forall_nest */
type = gfc_typenode_for_spec (&expr1->ts);
/* Allocate temporary for nested forall construct according to the
- information in nested_forall_info and inner_size. */
- tmp1 = allocate_temp_for_forall_nest (nested_forall_info, type,
- inner_size, block, &ptemp1);
-
- /* Initialize the maskindexes. */
- forall_tmp = nested_forall_info;
- while (forall_tmp != NULL)
- {
- mask = forall_tmp->mask;
- maskindex = forall_tmp->maskindex;
- if (mask)
- gfc_add_modify_expr (block, maskindex, integer_zero_node);
- forall_tmp = forall_tmp->next_nest;
- }
+ information in nested_forall_info and inner_size. */
+ tmp1 = allocate_temp_for_forall_nest (nested_forall_info, type, inner_size,
+ &inner_size_body, block, &ptemp1);
/* Generate codes to copy rhs to the temporary . */
- tmp = generate_loop_for_rhs_to_temp (expr2, tmp1, inner_size, count,
- count1, count2, lss, rss, wheremask);
+ tmp = generate_loop_for_rhs_to_temp (expr2, tmp1, count, count1, lss, rss,
+ wheremask, invert);
- /* Generate body and loops according to the inforamtion in
+ /* Generate body and loops according to the information in
nested_forall_info. */
- tmp = gfc_trans_nested_forall_loop (nested_forall_info, tmp, 1, 1);
+ tmp = gfc_trans_nested_forall_loop (nested_forall_info, tmp, 1);
gfc_add_expr_to_block (block, tmp);
/* Reset count1. */
- gfc_add_modify_expr (block, count1, integer_zero_node);
-
- /* Reset maskindexed. */
- forall_tmp = nested_forall_info;
- while (forall_tmp != NULL)
- {
- mask = forall_tmp->mask;
- maskindex = forall_tmp->maskindex;
- if (mask)
- gfc_add_modify_expr (block, maskindex, integer_zero_node);
- forall_tmp = forall_tmp->next_nest;
- }
+ gfc_add_modify_expr (block, count1, gfc_index_zero_node);
/* Reset count. */
if (wheremask)
- gfc_add_modify_expr (block, count, integer_zero_node);
+ gfc_add_modify_expr (block, count, gfc_index_zero_node);
/* Generate codes to copy the temporary to lhs. */
- tmp = generate_loop_for_temp_to_lhs (expr1, tmp1, inner_size, count,
- count1, count2, wheremask);
+ tmp = generate_loop_for_temp_to_lhs (expr1, tmp1, count, count1,
+ wheremask, invert);
- /* Generate body and loops according to the inforamtion in
+ /* Generate body and loops according to the information in
nested_forall_info. */
- tmp = gfc_trans_nested_forall_loop (nested_forall_info, tmp, 1, 1);
+ tmp = gfc_trans_nested_forall_loop (nested_forall_info, tmp, 1);
gfc_add_expr_to_block (block, tmp);
if (ptemp1)
{
/* Free the temporary. */
tmp = gfc_chainon_list (NULL_TREE, ptemp1);
- tmp = gfc_build_function_call (gfor_fndecl_internal_free, tmp);
+ tmp = build_function_call_expr (gfor_fndecl_internal_free, tmp);
gfc_add_expr_to_block (block, tmp);
}
}
stmtblock_t body;
tree count;
tree tmp, tmp1, ptemp1;
- tree mask, maskindex;
- forall_info *forall_tmp;
count = gfc_create_var (gfc_array_index_type, "count");
- gfc_add_modify_expr (block, count, integer_zero_node);
+ gfc_add_modify_expr (block, count, gfc_index_zero_node);
inner_size = integer_one_node;
lss = gfc_walk_expr (expr1);
/* Allocate temporary for nested forall construct according to the
information in nested_forall_info and inner_size. */
- tmp1 = allocate_temp_for_forall_nest (nested_forall_info,
- type, inner_size, block, &ptemp1);
+ tmp1 = allocate_temp_for_forall_nest (nested_forall_info, type,
+ inner_size, NULL, block, &ptemp1);
gfc_start_block (&body);
gfc_init_se (&lse, NULL);
lse.expr = gfc_build_array_ref (tmp1, count);
rse.want_pointer = 1;
gfc_conv_expr (&rse, expr2);
gfc_add_block_to_block (&body, &rse.pre);
- gfc_add_modify_expr (&body, lse.expr, rse.expr);
+ gfc_add_modify_expr (&body, lse.expr,
+ fold_convert (TREE_TYPE (lse.expr), rse.expr));
gfc_add_block_to_block (&body, &rse.post);
/* Increment count. */
- tmp = fold (build (PLUS_EXPR, TREE_TYPE (count), count,
- integer_one_node));
+ tmp = fold_build2 (PLUS_EXPR, gfc_array_index_type,
+ count, gfc_index_one_node);
gfc_add_modify_expr (&body, count, tmp);
tmp = gfc_finish_block (&body);
- /* Initialize the maskindexes. */
- forall_tmp = nested_forall_info;
- while (forall_tmp != NULL)
- {
- mask = forall_tmp->mask;
- maskindex = forall_tmp->maskindex;
- if (mask)
- gfc_add_modify_expr (block, maskindex, integer_zero_node);
- forall_tmp = forall_tmp->next_nest;
- }
-
- /* Generate body and loops according to the inforamtion in
+ /* Generate body and loops according to the information in
nested_forall_info. */
- tmp = gfc_trans_nested_forall_loop (nested_forall_info, tmp, 1, 1);
+ tmp = gfc_trans_nested_forall_loop (nested_forall_info, tmp, 1);
gfc_add_expr_to_block (block, tmp);
/* Reset count. */
- gfc_add_modify_expr (block, count, integer_zero_node);
+ gfc_add_modify_expr (block, count, gfc_index_zero_node);
- /* Reset maskindexes. */
- forall_tmp = nested_forall_info;
- while (forall_tmp != NULL)
- {
- mask = forall_tmp->mask;
- maskindex = forall_tmp->maskindex;
- if (mask)
- gfc_add_modify_expr (block, maskindex, integer_zero_node);
- forall_tmp = forall_tmp->next_nest;
- }
gfc_start_block (&body);
gfc_init_se (&lse, NULL);
gfc_init_se (&rse, NULL);
gfc_add_modify_expr (&body, lse.expr, rse.expr);
gfc_add_block_to_block (&body, &lse.post);
/* Increment count. */
- tmp = fold (build (PLUS_EXPR, TREE_TYPE (count), count,
- integer_one_node));
+ tmp = fold_build2 (PLUS_EXPR, gfc_array_index_type,
+ count, gfc_index_one_node);
gfc_add_modify_expr (&body, count, tmp);
tmp = gfc_finish_block (&body);
- /* Generate body and loops according to the inforamtion in
+ /* Generate body and loops according to the information in
nested_forall_info. */
- tmp = gfc_trans_nested_forall_loop (nested_forall_info, tmp, 1, 1);
+ tmp = gfc_trans_nested_forall_loop (nested_forall_info, tmp, 1);
gfc_add_expr_to_block (block, tmp);
}
else
/* Allocate temporary for nested forall construct. */
tmp1 = allocate_temp_for_forall_nest (nested_forall_info, parmtype,
- inner_size, block, &ptemp1);
+ inner_size, NULL, block, &ptemp1);
gfc_start_block (&body);
gfc_init_se (&lse, NULL);
lse.expr = gfc_build_array_ref (tmp1, count);
gfc_add_block_to_block (&body, &lse.post);
/* Increment count. */
- tmp = fold (build (PLUS_EXPR, TREE_TYPE (count), count,
- integer_one_node));
+ tmp = fold_build2 (PLUS_EXPR, gfc_array_index_type,
+ count, gfc_index_one_node);
gfc_add_modify_expr (&body, count, tmp);
tmp = gfc_finish_block (&body);
- /* Initialize the maskindexes. */
- forall_tmp = nested_forall_info;
- while (forall_tmp != NULL)
- {
- mask = forall_tmp->mask;
- maskindex = forall_tmp->maskindex;
- if (mask)
- gfc_add_modify_expr (block, maskindex, integer_zero_node);
- forall_tmp = forall_tmp->next_nest;
- }
-
- /* Generate body and loops according to the inforamtion in
+ /* Generate body and loops according to the information in
nested_forall_info. */
- tmp = gfc_trans_nested_forall_loop (nested_forall_info, tmp, 1, 1);
+ tmp = gfc_trans_nested_forall_loop (nested_forall_info, tmp, 1);
gfc_add_expr_to_block (block, tmp);
/* Reset count. */
- gfc_add_modify_expr (block, count, integer_zero_node);
+ gfc_add_modify_expr (block, count, gfc_index_zero_node);
- /* Reset maskindexes. */
- forall_tmp = nested_forall_info;
- while (forall_tmp != NULL)
- {
- mask = forall_tmp->mask;
- maskindex = forall_tmp->maskindex;
- if (mask)
- gfc_add_modify_expr (block, maskindex, integer_zero_node);
- forall_tmp = forall_tmp->next_nest;
- }
parm = gfc_build_array_ref (tmp1, count);
lss = gfc_walk_expr (expr1);
gfc_init_se (&lse, NULL);
gfc_add_block_to_block (&body, &lse.post);
/* Increment count. */
- tmp = fold (build (PLUS_EXPR, TREE_TYPE (count), count,
- integer_one_node));
+ tmp = fold_build2 (PLUS_EXPR, gfc_array_index_type,
+ count, gfc_index_one_node);
gfc_add_modify_expr (&body, count, tmp);
tmp = gfc_finish_block (&body);
- tmp = gfc_trans_nested_forall_loop (nested_forall_info, tmp, 1, 1);
+ tmp = gfc_trans_nested_forall_loop (nested_forall_info, tmp, 1);
gfc_add_expr_to_block (block, tmp);
}
/* Free the temporary. */
if (ptemp1)
{
tmp = gfc_chainon_list (NULL_TREE, ptemp1);
- tmp = gfc_build_function_call (gfor_fndecl_internal_free, tmp);
+ tmp = build_function_call_expr (gfor_fndecl_internal_free, tmp);
gfc_add_expr_to_block (block, tmp);
}
}
e<i> = f<i>
g<i> = h<i>
end forall
- (where e,f,g,h<i> are arbitary expressions possibly involving i)
+ (where e,f,g,h<i> are arbitrary expressions possibly involving i)
Translates to:
- count = ((end + 1 - start) / staride)
+ count = ((end + 1 - start) / stride)
masktmp(:) = maskexpr(:)
maskindex = 0;
for (i = start; i <= end; i += stride)
{
if (masktmp[maskindex++])
- e<i> = f<i>
+ g<i> = h<i>
}
Note that this code only works when there are no dependencies.
Forall loop with array assignments and data dependencies are a real pain,
because the size of the temporary cannot always be determined before the
- loop is executed. This problem is compouded by the presence of nested
+ loop is executed. This problem is compounded by the presence of nested
FORALL constructs.
*/
tree tmp;
tree assign;
tree size;
- tree bytesize;
- tree tmpvar;
- tree sizevar;
- tree lenvar;
tree maskindex;
tree mask;
tree pmask;
gfc_forall_iterator *fa;
gfc_se se;
gfc_code *c;
- tree *saved_var_decl;
- symbol_attribute *saved_var_attr;
- iter_info *this_forall, *iter_tmp;
- forall_info *info, *forall_tmp;
- temporary_list *temp;
-
- gfc_start_block (&block);
+ gfc_saved_var *saved_vars;
+ iter_info *this_forall;
+ forall_info *info;
n = 0;
/* Count the FORALL index number. */
end = (tree *) gfc_getmem (nvar * sizeof (tree));
step = (tree *) gfc_getmem (nvar * sizeof (tree));
varexpr = (gfc_expr **) gfc_getmem (nvar * sizeof (gfc_expr *));
- saved_var_decl = (tree *) gfc_getmem (nvar * sizeof (tree));
- saved_var_attr = (symbol_attribute *)
- gfc_getmem (nvar * sizeof (symbol_attribute));
+ saved_vars = (gfc_saved_var *) gfc_getmem (nvar * sizeof (gfc_saved_var));
/* Allocate the space for info. */
info = (forall_info *) gfc_getmem (sizeof (forall_info));
+
+ gfc_start_block (&block);
+
n = 0;
for (fa = code->ext.forall_iterator; fa; fa = fa->next)
{
gfc_symbol *sym = fa->var->symtree->n.sym;
- /* allocate space for this_forall. */
+ /* Allocate space for this_forall. */
this_forall = (iter_info *) gfc_getmem (sizeof (iter_info));
- /* Save the FORALL index's backend_decl. */
- saved_var_decl[n] = sym->backend_decl;
-
- /* Save the attribute. */
- saved_var_attr[n] = sym->attr;
-
- /* Set the proper attributes. */
- gfc_clear_attr (&sym->attr);
- sym->attr.referenced = 1;
- sym->attr.flavor = FL_VARIABLE;
-
/* Create a temporary variable for the FORALL index. */
tmp = gfc_typenode_for_spec (&sym->ts);
var[n] = gfc_create_var (tmp, sym->name);
+ gfc_shadow_sym (sym, var[n], &saved_vars[n]);
+
/* Record it in this_forall. */
this_forall->var = var[n];
/* Set the NEXT field of this_forall to NULL. */
this_forall->next = NULL;
/* Link this_forall to the info construct. */
- if (info->this_loop == NULL)
- info->this_loop = this_forall;
- else
+ if (info->this_loop)
{
- iter_tmp = info->this_loop;
+ iter_info *iter_tmp = info->this_loop;
while (iter_tmp->next != NULL)
iter_tmp = iter_tmp->next;
iter_tmp->next = this_forall;
}
+ else
+ info->this_loop = this_forall;
n++;
}
nvar = n;
- /* Work out the number of elements in the mask array. */
- tmpvar = NULL_TREE;
- lenvar = NULL_TREE;
- size = integer_one_node;
- sizevar = NULL_TREE;
-
+ /* Calculate the size needed for the current forall level. */
+ size = gfc_index_one_node;
for (n = 0; n < nvar; n++)
{
- if (lenvar && TREE_TYPE (lenvar) != TREE_TYPE (start[n]))
- lenvar = NULL_TREE;
-
/* size = (end + step - start) / step. */
- tmp = fold (build (MINUS_EXPR, TREE_TYPE (start[n]), step[n], start[n]));
- tmp = fold (build (PLUS_EXPR, TREE_TYPE (end[n]), end[n], tmp));
+ tmp = fold_build2 (MINUS_EXPR, TREE_TYPE (start[n]),
+ step[n], start[n]);
+ tmp = fold_build2 (PLUS_EXPR, TREE_TYPE (end[n]), end[n], tmp);
- tmp = fold (build (FLOOR_DIV_EXPR, TREE_TYPE (tmp), tmp, step[n]));
+ tmp = fold_build2 (FLOOR_DIV_EXPR, TREE_TYPE (tmp), tmp, step[n]);
tmp = convert (gfc_array_index_type, tmp);
- size = fold (build (MULT_EXPR, gfc_array_index_type, size, tmp));
+ size = fold_build2 (MULT_EXPR, gfc_array_index_type, size, tmp);
}
/* Record the nvar and size of current forall level. */
info->nvar = nvar;
info->size = size;
- /* Link the current forall level to nested_forall_info. */
- forall_tmp = nested_forall_info;
- if (forall_tmp == NULL)
- nested_forall_info = info;
+ /* First we need to allocate the mask. */
+ if (code->expr)
+ {
+ /* As the mask array can be very big, prefer compact boolean types. */
+ tree mask_type = gfc_get_logical_type (gfc_logical_kinds[0].kind);
+ mask = allocate_temp_for_forall_nest (nested_forall_info, mask_type,
+ size, NULL, &block, &pmask);
+ maskindex = gfc_create_var_np (gfc_array_index_type, "mi");
+
+ /* Record them in the info structure. */
+ info->maskindex = maskindex;
+ info->mask = mask;
+ }
else
{
- while (forall_tmp->next_nest != NULL)
- forall_tmp = forall_tmp->next_nest;
- info->outer = forall_tmp;
- forall_tmp->next_nest = info;
+ /* No mask was specified. */
+ maskindex = NULL_TREE;
+ mask = pmask = NULL_TREE;
}
+ /* Link the current forall level to nested_forall_info. */
+ info->prev_nest = nested_forall_info;
+ nested_forall_info = info;
+
/* Copy the mask into a temporary variable if required.
- For now we assume a mask temporary is needed. */
+ For now we assume a mask temporary is needed. */
if (code->expr)
{
- /* Allocate the mask temporary. */
- bytesize = fold (build (MULT_EXPR, gfc_array_index_type, size,
- TYPE_SIZE_UNIT (boolean_type_node)));
+ /* As the mask array can be very big, prefer compact boolean types. */
+ tree mask_type = gfc_get_logical_type (gfc_logical_kinds[0].kind);
- mask = gfc_do_allocate (bytesize, size, &pmask, &block, boolean_type_node);
-
- maskindex = gfc_create_var_np (gfc_array_index_type, "mi");
- /* Record them in the info structure. */
- info->pmask = pmask;
- info->mask = mask;
- info->maskindex = maskindex;
-
- gfc_add_modify_expr (&block, maskindex, integer_zero_node);
+ gfc_add_modify_expr (&block, maskindex, gfc_index_zero_node);
/* Start of mask assignment loop body. */
gfc_start_block (&body);
gfc_add_block_to_block (&body, &se.pre);
/* Store the mask. */
- se.expr = convert (boolean_type_node, se.expr);
+ se.expr = convert (mask_type, se.expr);
- if (pmask)
- tmp = gfc_build_indirect_ref (mask);
- else
- tmp = mask;
- tmp = gfc_build_array_ref (tmp, maskindex);
+ tmp = gfc_build_array_ref (mask, maskindex);
gfc_add_modify_expr (&body, tmp, se.expr);
/* Advance to the next mask element. */
- tmp = build (PLUS_EXPR, gfc_array_index_type, maskindex,
- integer_one_node);
+ tmp = build2 (PLUS_EXPR, gfc_array_index_type,
+ maskindex, gfc_index_one_node);
gfc_add_modify_expr (&body, maskindex, tmp);
/* Generate the loops. */
tmp = gfc_finish_block (&body);
- tmp = gfc_trans_nested_forall_loop (info, tmp, 0, 0);
+ tmp = gfc_trans_nested_forall_loop (info, tmp, 0);
gfc_add_expr_to_block (&block, tmp);
}
- else
- {
- /* No mask was specified. */
- maskindex = NULL_TREE;
- mask = pmask = NULL_TREE;
- }
c = code->block->next;
switch (c->op)
{
case EXEC_ASSIGN:
- /* A scalar or array assingment. */
- need_temp = gfc_check_dependency (c->expr, c->expr2, varexpr, nvar);
- /* Teporaries due to array assignment data dependencies introduce
+ /* A scalar or array assignment. */
+ need_temp = gfc_check_dependency (c->expr, c->expr2, 0);
+ /* Temporaries due to array assignment data dependencies introduce
no end of problems. */
if (need_temp)
- gfc_trans_assign_need_temp (c->expr, c->expr2, NULL,
+ gfc_trans_assign_need_temp (c->expr, c->expr2, NULL, false,
nested_forall_info, &block);
else
{
/* Use the normal assignment copying routines. */
- assign = gfc_trans_assignment (c->expr, c->expr2);
-
- /* Reset the mask index. */
- if (mask)
- gfc_add_modify_expr (&block, maskindex, integer_zero_node);
+ assign = gfc_trans_assignment (c->expr, c->expr2, false);
/* Generate body and loops. */
- tmp = gfc_trans_nested_forall_loop (nested_forall_info, assign, 1, 1);
+ tmp = gfc_trans_nested_forall_loop (nested_forall_info,
+ assign, 1);
gfc_add_expr_to_block (&block, tmp);
}
break;
case EXEC_WHERE:
-
/* Translate WHERE or WHERE construct nested in FORALL. */
- temp = NULL;
- gfc_trans_where_2 (c, NULL, NULL, nested_forall_info, &block, &temp);
-
- while (temp)
- {
- tree args;
- temporary_list *p;
-
- /* Free the temporary. */
- args = gfc_chainon_list (NULL_TREE, temp->temporary);
- tmp = gfc_build_function_call (gfor_fndecl_internal_free, args);
- gfc_add_expr_to_block (&block, tmp);
-
- p = temp;
- temp = temp->next;
- gfc_free (p);
- }
-
- break;
+ gfc_trans_where_2 (c, NULL, false, nested_forall_info, &block);
+ break;
/* Pointer assignment inside FORALL. */
case EXEC_POINTER_ASSIGN:
- need_temp = gfc_check_dependency (c->expr, c->expr2, varexpr, nvar);
+ need_temp = gfc_check_dependency (c->expr, c->expr2, 0);
if (need_temp)
gfc_trans_pointer_assign_need_temp (c->expr, c->expr2,
nested_forall_info, &block);
/* Use the normal assignment copying routines. */
assign = gfc_trans_pointer_assignment (c->expr, c->expr2);
- /* Reset the mask index. */
- if (mask)
- gfc_add_modify_expr (&block, maskindex, integer_zero_node);
-
/* Generate body and loops. */
- tmp = gfc_trans_nested_forall_loop (nested_forall_info, assign,
- 1, 1);
+ tmp = gfc_trans_nested_forall_loop (nested_forall_info,
+ assign, 1);
gfc_add_expr_to_block (&block, tmp);
}
break;
gfc_add_expr_to_block (&block, tmp);
break;
+ /* Explicit subroutine calls are prevented by the frontend but interface
+ assignments can legitimately produce them. */
+ case EXEC_ASSIGN_CALL:
+ assign = gfc_trans_call (c, true);
+ tmp = gfc_trans_nested_forall_loop (nested_forall_info, assign, 1);
+ gfc_add_expr_to_block (&block, tmp);
+ break;
+
default:
- abort ();
- break;
+ gcc_unreachable ();
}
c = c->next;
}
- /* Restore the index original backend_decl and the attribute. */
- for (fa = code->ext.forall_iterator, n=0; fa; fa = fa->next, n++)
- {
- gfc_symbol *sym = fa->var->symtree->n.sym;
- sym->backend_decl = saved_var_decl[n];
- sym->attr = saved_var_attr[n];
- }
+ /* Restore the original index variables. */
+ for (fa = code->ext.forall_iterator, n = 0; fa; fa = fa->next, n++)
+ gfc_restore_sym (fa->var->symtree->n.sym, &saved_vars[n]);
/* Free the space for var, start, end, step, varexpr. */
gfc_free (var);
gfc_free (end);
gfc_free (step);
gfc_free (varexpr);
- gfc_free (saved_var_decl);
- gfc_free (saved_var_attr);
+ gfc_free (saved_vars);
+
+ /* Free the space for this forall_info. */
+ gfc_free (info);
if (pmask)
{
/* Free the temporary for the mask. */
tmp = gfc_chainon_list (NULL_TREE, pmask);
- tmp = gfc_build_function_call (gfor_fndecl_internal_free, tmp);
+ tmp = build_function_call_expr (gfor_fndecl_internal_free, tmp);
gfc_add_expr_to_block (&block, tmp);
}
if (maskindex)
needed by the WHERE mask expression multiplied by the iterator number of
the nested forall.
ME is the WHERE mask expression.
- MASK is the temporary which value is mask's value.
- NMASK is another temporary which value is !mask.
- TEMP records the temporary's address allocated in this function in order to
- free them outside this function.
- MASK, NMASK and TEMP are all OUT arguments. */
+ MASK is the current execution mask upon input, whose sense may or may
+ not be inverted as specified by the INVERT argument.
+ CMASK is the updated execution mask on output, or NULL if not required.
+ PMASK is the pending execution mask on output, or NULL if not required.
+ BLOCK is the block in which to place the condition evaluation loops. */
-static tree
+static void
gfc_evaluate_where_mask (gfc_expr * me, forall_info * nested_forall_info,
- tree * mask, tree * nmask, temporary_list ** temp,
- stmtblock_t * block)
+ tree mask, bool invert, tree cmask, tree pmask,
+ tree mask_type, stmtblock_t * block)
{
tree tmp, tmp1;
gfc_ss *lss, *rss;
gfc_loopinfo loop;
- tree ptemp1, ntmp, ptemp2;
- tree inner_size;
stmtblock_t body, body1;
+ tree count, cond, mtmp;
gfc_se lse, rse;
- tree count;
- tree tmpexpr;
gfc_init_loopinfo (&loop);
- /* Calculate the size of temporary needed by the mask-expr. */
- inner_size = compute_inner_temp_size (me, me, block, &lss, &rss);
-
- /* Allocate temporary for where mask. */
- tmp = allocate_temp_for_forall_nest (nested_forall_info, boolean_type_node,
- inner_size, block, &ptemp1);
- /* Record the temporary address in order to free it later. */
- if (ptemp1)
- {
- temporary_list *tempo;
- tempo = (temporary_list *) gfc_getmem (sizeof (temporary_list));
- tempo->temporary = ptemp1;
- tempo->next = *temp;
- *temp = tempo;
- }
-
- /* Allocate temporary for !mask. */
- ntmp = allocate_temp_for_forall_nest (nested_forall_info, boolean_type_node,
- inner_size, block, &ptemp2);
- /* Record the temporary in order to free it later. */
- if (ptemp2)
- {
- temporary_list *tempo;
- tempo = (temporary_list *) gfc_getmem (sizeof (temporary_list));
- tempo->temporary = ptemp2;
- tempo->next = *temp;
- *temp = tempo;
- }
+ lss = gfc_walk_expr (me);
+ rss = gfc_walk_expr (me);
/* Variable to index the temporary. */
count = gfc_create_var (gfc_array_index_type, "count");
- /* Initilize count. */
- gfc_add_modify_expr (block, count, integer_zero_node);
+ /* Initialize count. */
+ gfc_add_modify_expr (block, count, gfc_index_zero_node);
gfc_start_block (&body);
}
else
{
- /* Initiliaze the loop. */
+ /* Initialize the loop. */
gfc_init_loopinfo (&loop);
/* We may need LSS to determine the shape of the expression. */
rse.ss = rss;
gfc_conv_expr (&rse, me);
}
- /* Form the expression of the temporary. */
- lse.expr = gfc_build_array_ref (tmp, count);
- tmpexpr = gfc_build_array_ref (ntmp, count);
- /* Use the scalar assignment to fill temporary TMP. */
- tmp1 = gfc_trans_scalar_assign (&lse, &rse, me->ts.type);
- gfc_add_expr_to_block (&body1, tmp1);
+ /* Variable to evaluate mask condition. */
+ cond = gfc_create_var (mask_type, "cond");
+ if (mask && (cmask || pmask))
+ mtmp = gfc_create_var (mask_type, "mask");
+ else mtmp = NULL_TREE;
+
+ gfc_add_block_to_block (&body1, &lse.pre);
+ gfc_add_block_to_block (&body1, &rse.pre);
+
+ gfc_add_modify_expr (&body1, cond, fold_convert (mask_type, rse.expr));
- /* Fill temporary NTMP. */
- tmp1 = build1 (TRUTH_NOT_EXPR, TREE_TYPE (lse.expr), lse.expr);
- gfc_add_modify_expr (&body1, tmpexpr, tmp1);
+ if (mask && (cmask || pmask))
+ {
+ tmp = gfc_build_array_ref (mask, count);
+ if (invert)
+ tmp = fold_build1 (TRUTH_NOT_EXPR, mask_type, tmp);
+ gfc_add_modify_expr (&body1, mtmp, tmp);
+ }
- if (lss == gfc_ss_terminator)
+ if (cmask)
+ {
+ tmp1 = gfc_build_array_ref (cmask, count);
+ tmp = cond;
+ if (mask)
+ tmp = build2 (TRUTH_AND_EXPR, mask_type, mtmp, tmp);
+ gfc_add_modify_expr (&body1, tmp1, tmp);
+ }
+
+ if (pmask)
+ {
+ tmp1 = gfc_build_array_ref (pmask, count);
+ tmp = build1 (TRUTH_NOT_EXPR, mask_type, cond);
+ if (mask)
+ tmp = build2 (TRUTH_AND_EXPR, mask_type, mtmp, tmp);
+ gfc_add_modify_expr (&body1, tmp1, tmp);
+ }
+
+ gfc_add_block_to_block (&body1, &lse.post);
+ gfc_add_block_to_block (&body1, &rse.post);
+
+ if (lss == gfc_ss_terminator)
{
gfc_add_block_to_block (&body, &body1);
}
else
{
/* Increment count. */
- tmp1 = fold (build (PLUS_EXPR, gfc_array_index_type, count,
- integer_one_node));
+ tmp1 = fold_build2 (PLUS_EXPR, gfc_array_index_type, count,
+ gfc_index_one_node);
gfc_add_modify_expr (&body1, count, tmp1);
/* Generate the copying loops. */
tmp1 = gfc_finish_block (&body);
/* If the WHERE construct is inside FORALL, fill the full temporary. */
if (nested_forall_info != NULL)
- tmp1 = gfc_trans_nested_forall_loop (nested_forall_info, tmp1, 1, 1);
-
+ tmp1 = gfc_trans_nested_forall_loop (nested_forall_info, tmp1, 1);
gfc_add_expr_to_block (block, tmp1);
-
- *mask = tmp;
- *nmask = ntmp;
-
- return tmp1;
}
/* Translate an assignment statement in a WHERE statement or construct
statement. The MASK expression is used to control which elements
- of EXPR1 shall be assigned. */
+ of EXPR1 shall be assigned. The sense of MASK is specified by
+ INVERT. */
static tree
-gfc_trans_where_assign (gfc_expr *expr1, gfc_expr *expr2, tree mask,
- tree count1, tree count2)
+gfc_trans_where_assign (gfc_expr *expr1, gfc_expr *expr2,
+ tree mask, bool invert,
+ tree count1, tree count2,
+ gfc_symbol *sym)
{
gfc_se lse;
gfc_se rse;
tree tmp;
stmtblock_t block;
stmtblock_t body;
- tree index, maskexpr, tmp1;
+ tree index, maskexpr;
#if 0
/* TODO: handle this special case.
/* In each where-assign-stmt, the mask-expr and the variable being
defined shall be arrays of the same shape. */
- assert (lss != gfc_ss_terminator);
+ gcc_assert (lss != gfc_ss_terminator);
/* The assignment needs scalarization. */
lss_section = lss;
&& lss_section->type != GFC_SS_SECTION)
lss_section = lss_section->next;
- assert (lss_section != gfc_ss_terminator);
+ gcc_assert (lss_section != gfc_ss_terminator);
/* Initialize the scalarizer. */
gfc_init_loopinfo (&loop);
else
gfc_conv_expr (&lse, expr1);
- /* Form the mask expression according to the mask tree list. */
+ /* Form the mask expression according to the mask. */
index = count1;
- tmp = mask;
- if (tmp != NULL)
- maskexpr = gfc_build_array_ref (tmp, index);
- else
- maskexpr = NULL;
+ maskexpr = gfc_build_array_ref (mask, index);
+ if (invert)
+ maskexpr = fold_build1 (TRUTH_NOT_EXPR, TREE_TYPE (maskexpr), maskexpr);
- tmp = TREE_CHAIN (tmp);
- while (tmp)
- {
- tmp1 = gfc_build_array_ref (tmp, index);
- maskexpr = build (TRUTH_AND_EXPR, TREE_TYPE (tmp1), maskexpr, tmp1);
- tmp = TREE_CHAIN (tmp);
- }
/* Use the scalar assignment as is. */
- tmp = gfc_trans_scalar_assign (&lse, &rse, expr1->ts.type);
- tmp = build_v (COND_EXPR, maskexpr, tmp, build_empty_stmt ());
+ if (sym == NULL)
+ tmp = gfc_trans_scalar_assign (&lse, &rse, expr1->ts,
+ loop.temp_ss != NULL, false);
+ else
+ tmp = gfc_conv_operator_assign (&lse, &rse, sym);
+
+ tmp = build3_v (COND_EXPR, maskexpr, tmp, build_empty_stmt ());
gfc_add_expr_to_block (&body, tmp);
if (lss == gfc_ss_terminator)
{
/* Increment count1. */
- tmp = fold (build (PLUS_EXPR, TREE_TYPE (count1), count1,
- integer_one_node));
+ tmp = fold_build2 (PLUS_EXPR, gfc_array_index_type,
+ count1, gfc_index_one_node);
gfc_add_modify_expr (&body, count1, tmp);
/* Use the scalar assignment as is. */
}
else
{
- if (lse.ss != gfc_ss_terminator)
- abort ();
- if (rse.ss != gfc_ss_terminator)
- abort ();
+ gcc_assert (lse.ss == gfc_ss_terminator
+ && rse.ss == gfc_ss_terminator);
if (loop.temp_ss != NULL)
{
/* Increment count1 before finish the main body of a scalarized
expression. */
- tmp = fold (build (PLUS_EXPR, TREE_TYPE (count1), count1,
- integer_one_node));
+ tmp = fold_build2 (PLUS_EXPR, gfc_array_index_type,
+ count1, gfc_index_one_node);
gfc_add_modify_expr (&body, count1, tmp);
gfc_trans_scalarized_loop_boundary (&loop, &body);
gfc_advance_se_ss_chain (&rse);
gfc_conv_expr (&lse, expr1);
- if (lse.ss != gfc_ss_terminator)
- abort ();
-
- if (rse.ss != gfc_ss_terminator)
- abort ();
+ gcc_assert (lse.ss == gfc_ss_terminator
+ && rse.ss == gfc_ss_terminator);
/* Form the mask expression according to the mask tree list. */
index = count2;
- tmp = mask;
- if (tmp != NULL)
- maskexpr = gfc_build_array_ref (tmp, index);
- else
- maskexpr = NULL;
+ maskexpr = gfc_build_array_ref (mask, index);
+ if (invert)
+ maskexpr = fold_build1 (TRUTH_NOT_EXPR, TREE_TYPE (maskexpr),
+ maskexpr);
- tmp = TREE_CHAIN (tmp);
- while (tmp)
- {
- tmp1 = gfc_build_array_ref (tmp, index);
- maskexpr = build (TRUTH_AND_EXPR, TREE_TYPE (tmp1), maskexpr,
- tmp1);
- tmp = TREE_CHAIN (tmp);
- }
/* Use the scalar assignment as is. */
- tmp = gfc_trans_scalar_assign (&lse, &rse, expr1->ts.type);
- tmp = build_v (COND_EXPR, maskexpr, tmp, build_empty_stmt ());
+ tmp = gfc_trans_scalar_assign (&lse, &rse, expr1->ts, false, false);
+ tmp = build3_v (COND_EXPR, maskexpr, tmp, build_empty_stmt ());
gfc_add_expr_to_block (&body, tmp);
+
/* Increment count2. */
- tmp = fold (build (PLUS_EXPR, TREE_TYPE (count2), count2,
- integer_one_node));
+ tmp = fold_build2 (PLUS_EXPR, gfc_array_index_type,
+ count2, gfc_index_one_node);
gfc_add_modify_expr (&body, count2, tmp);
}
else
{
/* Increment count1. */
- tmp = fold (build (PLUS_EXPR, TREE_TYPE (count1), count1,
- integer_one_node));
+ tmp = fold_build2 (PLUS_EXPR, gfc_array_index_type,
+ count1, gfc_index_one_node);
gfc_add_modify_expr (&body, count1, tmp);
}
/* Translate the WHERE construct or statement.
- This fuction can be called iteratelly to translate the nested WHERE
+ This function can be called iteratively to translate the nested WHERE
construct or statement.
- MASK is the control mask, and PMASK is the pending control mask.
- TEMP records the temporary address which must be freed later. */
+ MASK is the control mask. */
static void
-gfc_trans_where_2 (gfc_code * code, tree mask, tree pmask,
- forall_info * nested_forall_info, stmtblock_t * block,
- temporary_list ** temp)
+gfc_trans_where_2 (gfc_code * code, tree mask, bool invert,
+ forall_info * nested_forall_info, stmtblock_t * block)
{
+ stmtblock_t inner_size_body;
+ tree inner_size, size;
+ gfc_ss *lss, *rss;
+ tree mask_type;
gfc_expr *expr1;
gfc_expr *expr2;
gfc_code *cblock;
gfc_code *cnext;
- tree tmp, tmp1, tmp2;
+ tree tmp;
tree count1, count2;
- tree mask_copy;
+ bool need_cmask;
+ bool need_pmask;
int need_temp;
+ tree pcmask = NULL_TREE;
+ tree ppmask = NULL_TREE;
+ tree cmask = NULL_TREE;
+ tree pmask = NULL_TREE;
+ gfc_actual_arglist *arg;
/* the WHERE statement or the WHERE construct statement. */
cblock = code->block;
+
+ /* As the mask array can be very big, prefer compact boolean types. */
+ mask_type = gfc_get_logical_type (gfc_logical_kinds[0].kind);
+
+ /* Determine which temporary masks are needed. */
+ if (!cblock->block)
+ {
+ /* One clause: No ELSEWHEREs. */
+ need_cmask = (cblock->next != 0);
+ need_pmask = false;
+ }
+ else if (cblock->block->block)
+ {
+ /* Three or more clauses: Conditional ELSEWHEREs. */
+ need_cmask = true;
+ need_pmask = true;
+ }
+ else if (cblock->next)
+ {
+ /* Two clauses, the first non-empty. */
+ need_cmask = true;
+ need_pmask = (mask != NULL_TREE
+ && cblock->block->next != 0);
+ }
+ else if (!cblock->block->next)
+ {
+ /* Two clauses, both empty. */
+ need_cmask = false;
+ need_pmask = false;
+ }
+ /* Two clauses, the first empty, the second non-empty. */
+ else if (mask)
+ {
+ need_cmask = (cblock->block->expr != 0);
+ need_pmask = true;
+ }
+ else
+ {
+ need_cmask = true;
+ need_pmask = false;
+ }
+
+ if (need_cmask || need_pmask)
+ {
+ /* Calculate the size of temporary needed by the mask-expr. */
+ gfc_init_block (&inner_size_body);
+ inner_size = compute_inner_temp_size (cblock->expr, cblock->expr,
+ &inner_size_body, &lss, &rss);
+
+ /* Calculate the total size of temporary needed. */
+ size = compute_overall_iter_number (nested_forall_info, inner_size,
+ &inner_size_body, block);
+
+ /* Allocate temporary for WHERE mask if needed. */
+ if (need_cmask)
+ cmask = allocate_temp_for_forall_nest_1 (mask_type, size, block,
+ &pcmask);
+
+ /* Allocate temporary for !mask if needed. */
+ if (need_pmask)
+ pmask = allocate_temp_for_forall_nest_1 (mask_type, size, block,
+ &ppmask);
+ }
+
while (cblock)
{
+ /* Each time around this loop, the where clause is conditional
+ on the value of mask and invert, which are updated at the
+ bottom of the loop. */
+
/* Has mask-expr. */
if (cblock->expr)
{
- /* Ensure that the WHERE mask be evaluated only once. */
- tmp2 = gfc_evaluate_where_mask (cblock->expr, nested_forall_info,
- &tmp, &tmp1, temp, block);
-
- /* Set the control mask and the pending control mask. */
- /* It's a where-stmt. */
- if (mask == NULL)
- {
- mask = tmp;
- pmask = tmp1;
- }
- /* It's a nested where-stmt. */
- else if (mask && pmask == NULL)
- {
- tree tmp2;
- /* Use the TREE_CHAIN to list the masks. */
- tmp2 = copy_list (mask);
- pmask = chainon (mask, tmp1);
- mask = chainon (tmp2, tmp);
- }
- /* It's a masked-elsewhere-stmt. */
- else if (mask && cblock->expr)
- {
- tree tmp2;
- tmp2 = copy_list (pmask);
+ /* Ensure that the WHERE mask will be evaluated exactly once.
+ If there are no statements in this WHERE/ELSEWHERE clause,
+ then we don't need to update the control mask (cmask).
+ If this is the last clause of the WHERE construct, then
+ we don't need to update the pending control mask (pmask). */
+ if (mask)
+ gfc_evaluate_where_mask (cblock->expr, nested_forall_info,
+ mask, invert,
+ cblock->next ? cmask : NULL_TREE,
+ cblock->block ? pmask : NULL_TREE,
+ mask_type, block);
+ else
+ gfc_evaluate_where_mask (cblock->expr, nested_forall_info,
+ NULL_TREE, false,
+ (cblock->next || cblock->block)
+ ? cmask : NULL_TREE,
+ NULL_TREE, mask_type, block);
- mask = pmask;
- tmp2 = chainon (tmp2, tmp);
- pmask = chainon (mask, tmp1);
- mask = tmp2;
- }
+ invert = false;
}
- /* It's a elsewhere-stmt. No mask-expr is present. */
+ /* It's a final elsewhere-stmt. No mask-expr is present. */
else
- mask = pmask;
+ cmask = mask;
+
+ /* The body of this where clause are controlled by cmask with
+ sense specified by invert. */
/* Get the assignment statement of a WHERE statement, or the first
statement in where-body-construct of a WHERE construct. */
switch (cnext->op)
{
/* WHERE assignment statement. */
+ case EXEC_ASSIGN_CALL:
+
+ arg = cnext->ext.actual;
+ expr1 = expr2 = NULL;
+ for (; arg; arg = arg->next)
+ {
+ if (!arg->expr)
+ continue;
+ if (expr1 == NULL)
+ expr1 = arg->expr;
+ else
+ expr2 = arg->expr;
+ }
+ goto evaluate;
+
case EXEC_ASSIGN:
expr1 = cnext->expr;
expr2 = cnext->expr2;
+ evaluate:
if (nested_forall_info != NULL)
{
- int nvar;
- gfc_expr **varexpr;
-
- nvar = nested_forall_info->nvar;
- varexpr = (gfc_expr **)
- gfc_getmem (nvar * sizeof (gfc_expr *));
- need_temp = gfc_check_dependency (expr1, expr2, varexpr,
- nvar);
- if (need_temp)
- gfc_trans_assign_need_temp (expr1, expr2, mask,
+ need_temp = gfc_check_dependency (expr1, expr2, 0);
+ if (need_temp && cnext->op != EXEC_ASSIGN_CALL)
+ gfc_trans_assign_need_temp (expr1, expr2,
+ cmask, invert,
nested_forall_info, block);
else
{
/* Variables to control maskexpr. */
count1 = gfc_create_var (gfc_array_index_type, "count1");
count2 = gfc_create_var (gfc_array_index_type, "count2");
- gfc_add_modify_expr (block, count1, integer_zero_node);
- gfc_add_modify_expr (block, count2, integer_zero_node);
+ gfc_add_modify_expr (block, count1, gfc_index_zero_node);
+ gfc_add_modify_expr (block, count2, gfc_index_zero_node);
+
+ tmp = gfc_trans_where_assign (expr1, expr2,
+ cmask, invert,
+ count1, count2,
+ cnext->resolved_sym);
- tmp = gfc_trans_where_assign (expr1, expr2, mask, count1,
- count2);
tmp = gfc_trans_nested_forall_loop (nested_forall_info,
- tmp, 1, 1);
+ tmp, 1);
gfc_add_expr_to_block (block, tmp);
}
}
/* Variables to control maskexpr. */
count1 = gfc_create_var (gfc_array_index_type, "count1");
count2 = gfc_create_var (gfc_array_index_type, "count2");
- gfc_add_modify_expr (block, count1, integer_zero_node);
- gfc_add_modify_expr (block, count2, integer_zero_node);
+ gfc_add_modify_expr (block, count1, gfc_index_zero_node);
+ gfc_add_modify_expr (block, count2, gfc_index_zero_node);
- tmp = gfc_trans_where_assign (expr1, expr2, mask, count1,
- count2);
+ tmp = gfc_trans_where_assign (expr1, expr2,
+ cmask, invert,
+ count1, count2,
+ cnext->resolved_sym);
gfc_add_expr_to_block (block, tmp);
}
/* WHERE or WHERE construct is part of a where-body-construct. */
case EXEC_WHERE:
- /* Ensure that MASK is not modified by next gfc_trans_where_2. */
- mask_copy = copy_list (mask);
- gfc_trans_where_2 (cnext, mask_copy, NULL, nested_forall_info,
- block, temp);
- break;
+ gfc_trans_where_2 (cnext, cmask, invert,
+ nested_forall_info, block);
+ break;
default:
- abort ();
+ gcc_unreachable ();
}
/* The next statement within the same where-body-construct. */
}
/* The next masked-elsewhere-stmt, elsewhere-stmt, or end-where-stmt. */
cblock = cblock->block;
+ if (mask == NULL_TREE)
+ {
+ /* If we're the initial WHERE, we can simply invert the sense
+ of the current mask to obtain the "mask" for the remaining
+ ELSEWHEREs. */
+ invert = true;
+ mask = cmask;
+ }
+ else
+ {
+ /* Otherwise, for nested WHERE's we need to use the pending mask. */
+ invert = false;
+ mask = pmask;
+ }
}
+
+ /* If we allocated a pending mask array, deallocate it now. */
+ if (ppmask)
+ {
+ tree args = gfc_chainon_list (NULL_TREE, ppmask);
+ tmp = build_function_call_expr (gfor_fndecl_internal_free, args);
+ gfc_add_expr_to_block (block, tmp);
+ }
+
+ /* If we allocated a current mask array, deallocate it now. */
+ if (pcmask)
+ {
+ tree args = gfc_chainon_list (NULL_TREE, pcmask);
+ tmp = build_function_call_expr (gfor_fndecl_internal_free, args);
+ gfc_add_expr_to_block (block, tmp);
+ }
}
+/* Translate a simple WHERE construct or statement without dependencies.
+ CBLOCK is the "then" clause of the WHERE statement, where CBLOCK->EXPR
+ is the mask condition, and EBLOCK if non-NULL is the "else" clause.
+ Currently both CBLOCK and EBLOCK are restricted to single assignments. */
+
+static tree
+gfc_trans_where_3 (gfc_code * cblock, gfc_code * eblock)
+{
+ stmtblock_t block, body;
+ gfc_expr *cond, *tdst, *tsrc, *edst, *esrc;
+ tree tmp, cexpr, tstmt, estmt;
+ gfc_ss *css, *tdss, *tsss;
+ gfc_se cse, tdse, tsse, edse, esse;
+ gfc_loopinfo loop;
+ gfc_ss *edss = 0;
+ gfc_ss *esss = 0;
+
+ cond = cblock->expr;
+ tdst = cblock->next->expr;
+ tsrc = cblock->next->expr2;
+ edst = eblock ? eblock->next->expr : NULL;
+ esrc = eblock ? eblock->next->expr2 : NULL;
+
+ gfc_start_block (&block);
+ gfc_init_loopinfo (&loop);
+
+ /* Handle the condition. */
+ gfc_init_se (&cse, NULL);
+ css = gfc_walk_expr (cond);
+ gfc_add_ss_to_loop (&loop, css);
+
+ /* Handle the then-clause. */
+ gfc_init_se (&tdse, NULL);
+ gfc_init_se (&tsse, NULL);
+ tdss = gfc_walk_expr (tdst);
+ tsss = gfc_walk_expr (tsrc);
+ if (tsss == gfc_ss_terminator)
+ {
+ tsss = gfc_get_ss ();
+ tsss->next = gfc_ss_terminator;
+ tsss->type = GFC_SS_SCALAR;
+ tsss->expr = tsrc;
+ }
+ gfc_add_ss_to_loop (&loop, tdss);
+ gfc_add_ss_to_loop (&loop, tsss);
+
+ if (eblock)
+ {
+ /* Handle the else clause. */
+ gfc_init_se (&edse, NULL);
+ gfc_init_se (&esse, NULL);
+ edss = gfc_walk_expr (edst);
+ esss = gfc_walk_expr (esrc);
+ if (esss == gfc_ss_terminator)
+ {
+ esss = gfc_get_ss ();
+ esss->next = gfc_ss_terminator;
+ esss->type = GFC_SS_SCALAR;
+ esss->expr = esrc;
+ }
+ gfc_add_ss_to_loop (&loop, edss);
+ gfc_add_ss_to_loop (&loop, esss);
+ }
+
+ gfc_conv_ss_startstride (&loop);
+ gfc_conv_loop_setup (&loop);
+
+ gfc_mark_ss_chain_used (css, 1);
+ gfc_mark_ss_chain_used (tdss, 1);
+ gfc_mark_ss_chain_used (tsss, 1);
+ if (eblock)
+ {
+ gfc_mark_ss_chain_used (edss, 1);
+ gfc_mark_ss_chain_used (esss, 1);
+ }
+
+ gfc_start_scalarized_body (&loop, &body);
+
+ gfc_copy_loopinfo_to_se (&cse, &loop);
+ gfc_copy_loopinfo_to_se (&tdse, &loop);
+ gfc_copy_loopinfo_to_se (&tsse, &loop);
+ cse.ss = css;
+ tdse.ss = tdss;
+ tsse.ss = tsss;
+ if (eblock)
+ {
+ gfc_copy_loopinfo_to_se (&edse, &loop);
+ gfc_copy_loopinfo_to_se (&esse, &loop);
+ edse.ss = edss;
+ esse.ss = esss;
+ }
+
+ gfc_conv_expr (&cse, cond);
+ gfc_add_block_to_block (&body, &cse.pre);
+ cexpr = cse.expr;
+
+ gfc_conv_expr (&tsse, tsrc);
+ if (tdss != gfc_ss_terminator && loop.temp_ss != NULL)
+ {
+ gfc_conv_tmp_array_ref (&tdse);
+ gfc_advance_se_ss_chain (&tdse);
+ }
+ else
+ gfc_conv_expr (&tdse, tdst);
+
+ if (eblock)
+ {
+ gfc_conv_expr (&esse, esrc);
+ if (edss != gfc_ss_terminator && loop.temp_ss != NULL)
+ {
+ gfc_conv_tmp_array_ref (&edse);
+ gfc_advance_se_ss_chain (&edse);
+ }
+ else
+ gfc_conv_expr (&edse, edst);
+ }
+
+ tstmt = gfc_trans_scalar_assign (&tdse, &tsse, tdst->ts, false, false);
+ estmt = eblock ? gfc_trans_scalar_assign (&edse, &esse, edst->ts, false, false)
+ : build_empty_stmt ();
+ tmp = build3_v (COND_EXPR, cexpr, tstmt, estmt);
+ gfc_add_expr_to_block (&body, tmp);
+ gfc_add_block_to_block (&body, &cse.post);
+
+ gfc_trans_scalarizing_loops (&loop, &body);
+ gfc_add_block_to_block (&block, &loop.pre);
+ gfc_add_block_to_block (&block, &loop.post);
+ gfc_cleanup_loop (&loop);
+
+ return gfc_finish_block (&block);
+}
/* As the WHERE or WHERE construct statement can be nested, we call
gfc_trans_where_2 to do the translation, and pass the initial
- NULL values for both the control mask and the pending control mask. */
+ NULL values for both the control mask and the pending control mask. */
tree
gfc_trans_where (gfc_code * code)
{
stmtblock_t block;
- temporary_list *temp, *p;
- tree args;
- tree tmp;
+ gfc_code *cblock;
+ gfc_code *eblock;
- gfc_start_block (&block);
- temp = NULL;
+ cblock = code->block;
+ if (cblock->next
+ && cblock->next->op == EXEC_ASSIGN
+ && !cblock->next->next)
+ {
+ eblock = cblock->block;
+ if (!eblock)
+ {
+ /* A simple "WHERE (cond) x = y" statement or block is
+ dependence free if cond is not dependent upon writing x,
+ and the source y is unaffected by the destination x. */
+ if (!gfc_check_dependency (cblock->next->expr,
+ cblock->expr, 0)
+ && !gfc_check_dependency (cblock->next->expr,
+ cblock->next->expr2, 0))
+ return gfc_trans_where_3 (cblock, NULL);
+ }
+ else if (!eblock->expr
+ && !eblock->block
+ && eblock->next
+ && eblock->next->op == EXEC_ASSIGN
+ && !eblock->next->next)
+ {
+ /* A simple "WHERE (cond) x1 = y1 ELSEWHERE x2 = y2 ENDWHERE"
+ block is dependence free if cond is not dependent on writes
+ to x1 and x2, y1 is not dependent on writes to x2, and y2
+ is not dependent on writes to x1, and both y's are not
+ dependent upon their own x's. */
+ if (!gfc_check_dependency(cblock->next->expr,
+ cblock->expr, 0)
+ && !gfc_check_dependency(eblock->next->expr,
+ cblock->expr, 0)
+ && !gfc_check_dependency(cblock->next->expr,
+ eblock->next->expr2, 0)
+ && !gfc_check_dependency(eblock->next->expr,
+ cblock->next->expr2, 0)
+ && !gfc_check_dependency(cblock->next->expr,
+ cblock->next->expr2, 0)
+ && !gfc_check_dependency(eblock->next->expr,
+ eblock->next->expr2, 0))
+ return gfc_trans_where_3 (cblock, eblock);
+ }
+ }
- gfc_trans_where_2 (code, NULL, NULL, NULL, &block, &temp);
+ gfc_start_block (&block);
- /* Add calls to free temporaries which were dynamically allocated. */
- while (temp)
- {
- args = gfc_chainon_list (NULL_TREE, temp->temporary);
- tmp = gfc_build_function_call (gfor_fndecl_internal_free, args);
- gfc_add_expr_to_block (&block, tmp);
+ gfc_trans_where_2 (code, NULL, false, NULL, &block);
- p = temp;
- temp = temp->next;
- gfc_free (p);
- }
return gfc_finish_block (&block);
}
}
-/* EXIT a DO loop. Similair to CYCLE, but now the label is in
+/* EXIT a DO loop. Similar to CYCLE, but now the label is in
TREE_VALUE (backend_decl) of the gfc_code node at the head of the
loop. */
gfc_se se;
tree tmp;
tree parm;
- gfc_ref *ref;
tree stat;
tree pstat;
tree error_label;
if (code->expr)
{
+ tree gfc_int4_type_node = gfc_get_int_type (4);
+
stat = gfc_create_var (gfc_int4_type_node, "stat");
- pstat = gfc_build_addr_expr (NULL, stat);
+ pstat = build_fold_addr_expr (stat);
error_label = gfc_build_label_decl (NULL_TREE);
TREE_USED (error_label) = 1;
se.descriptor_only = 1;
gfc_conv_expr (&se, expr);
- ref = expr->ref;
-
- /* Find the last reference in the chain. */
- while (ref && ref->next != NULL)
- {
- assert (ref->type != REF_ARRAY || ref->u.ar.type == AR_ELEMENT);
- ref = ref->next;
- }
-
- if (ref != NULL && ref->type == REF_ARRAY)
- {
- /* An array. */
- gfc_array_allocate (&se, ref, pstat);
- }
- else
+ if (!gfc_array_allocate (&se, expr, pstat))
{
/* A scalar or derived type. */
- tree val;
+ tmp = TYPE_SIZE_UNIT (TREE_TYPE (TREE_TYPE (se.expr)));
- val = gfc_create_var (ppvoid_type_node, "ptr");
- tmp = gfc_build_addr_expr (ppvoid_type_node, se.expr);
- gfc_add_modify_expr (&se.pre, val, tmp);
+ if (expr->ts.type == BT_CHARACTER && tmp == NULL_TREE)
+ tmp = se.string_length;
- tmp = TYPE_SIZE_UNIT (TREE_TYPE (TREE_TYPE (se.expr)));
- parm = gfc_chainon_list (NULL_TREE, val);
- parm = gfc_chainon_list (parm, tmp);
+ parm = gfc_chainon_list (NULL_TREE, tmp);
parm = gfc_chainon_list (parm, pstat);
- tmp = gfc_build_function_call (gfor_fndecl_allocate, parm);
+ tmp = build_function_call_expr (gfor_fndecl_allocate, parm);
+ tmp = build2 (MODIFY_EXPR, void_type_node, se.expr, tmp);
gfc_add_expr_to_block (&se.pre, tmp);
if (code->expr)
{
tmp = build1_v (GOTO_EXPR, error_label);
- parm =
- build (NE_EXPR, boolean_type_node, stat, integer_zero_node);
- tmp = build_v (COND_EXPR, parm, tmp, build_empty_stmt ());
+ parm = fold_build2 (NE_EXPR, boolean_type_node,
+ stat, build_int_cst (TREE_TYPE (stat), 0));
+ tmp = fold_build3 (COND_EXPR, void_type_node,
+ parm, tmp, build_empty_stmt ());
+ gfc_add_expr_to_block (&se.pre, tmp);
+ }
+
+ if (expr->ts.type == BT_DERIVED && expr->ts.derived->attr.alloc_comp)
+ {
+ tmp = build_fold_indirect_ref (se.expr);
+ tmp = gfc_nullify_alloc_comp (expr->ts.derived, tmp, 0);
gfc_add_expr_to_block (&se.pre, tmp);
}
+
}
tmp = gfc_finish_block (&se.pre);
}
+/* Translate a DEALLOCATE statement.
+ There are two cases within the for loop:
+ (1) deallocate(a1, a2, a3) is translated into the following sequence
+ _gfortran_deallocate(a1, 0B)
+ _gfortran_deallocate(a2, 0B)
+ _gfortran_deallocate(a3, 0B)
+ where the STAT= variable is passed a NULL pointer.
+ (2) deallocate(a1, a2, a3, stat=i) is translated into the following
+ astat = 0
+ _gfortran_deallocate(a1, &stat)
+ astat = astat + stat
+ _gfortran_deallocate(a2, &stat)
+ astat = astat + stat
+ _gfortran_deallocate(a3, &stat)
+ astat = astat + stat
+ In case (1), we simply return at the end of the for loop. In case (2)
+ we set STAT= astat. */
tree
gfc_trans_deallocate (gfc_code * code)
{
gfc_se se;
gfc_alloc *al;
gfc_expr *expr;
- tree var;
- tree tmp;
- tree type;
+ tree apstat, astat, parm, pstat, stat, tmp;
stmtblock_t block;
gfc_start_block (&block);
+ /* Set up the optional STAT= */
+ if (code->expr)
+ {
+ tree gfc_int4_type_node = gfc_get_int_type (4);
+
+ /* Variable used with the library call. */
+ stat = gfc_create_var (gfc_int4_type_node, "stat");
+ pstat = build_fold_addr_expr (stat);
+
+ /* Running total of possible deallocation failures. */
+ astat = gfc_create_var (gfc_int4_type_node, "astat");
+ apstat = build_fold_addr_expr (astat);
+
+ /* Initialize astat to 0. */
+ gfc_add_modify_expr (&block, astat, build_int_cst (TREE_TYPE (astat), 0));
+ }
+ else
+ {
+ pstat = apstat = null_pointer_node;
+ stat = astat = NULL_TREE;
+ }
+
for (al = code->ext.alloc_list; al != NULL; al = al->next)
{
expr = al->expr;
- assert (expr->expr_type == EXPR_VARIABLE);
+ gcc_assert (expr->expr_type == EXPR_VARIABLE);
gfc_init_se (&se, NULL);
gfc_start_block (&se.pre);
se.descriptor_only = 1;
gfc_conv_expr (&se, expr);
- if (expr->symtree->n.sym->attr.dimension)
- {
- tmp = gfc_array_deallocate (se.expr);
- gfc_add_expr_to_block (&se.pre, tmp);
+ if (expr->ts.type == BT_DERIVED
+ && expr->ts.derived->attr.alloc_comp)
+ {
+ gfc_ref *ref;
+ gfc_ref *last = NULL;
+ for (ref = expr->ref; ref; ref = ref->next)
+ if (ref->type == REF_COMPONENT)
+ last = ref;
+
+ /* Do not deallocate the components of a derived type
+ ultimate pointer component. */
+ if (!(last && last->u.c.component->pointer)
+ && !(!last && expr->symtree->n.sym->attr.pointer))
+ {
+ tmp = gfc_deallocate_alloc_comp (expr->ts.derived, se.expr,
+ expr->rank);
+ gfc_add_expr_to_block (&se.pre, tmp);
+ }
}
+
+ if (expr->rank)
+ tmp = gfc_array_deallocate (se.expr, pstat);
else
{
- type = build_pointer_type (TREE_TYPE (se.expr));
- var = gfc_create_var (type, "ptr");
- tmp = gfc_build_addr_expr (type, se.expr);
- gfc_add_modify_expr (&se.pre, var, tmp);
-
- tmp = gfc_chainon_list (NULL_TREE, var);
- tmp = gfc_chainon_list (tmp, integer_zero_node);
- tmp = gfc_build_function_call (gfor_fndecl_deallocate, tmp);
+ parm = gfc_chainon_list (NULL_TREE, se.expr);
+ parm = gfc_chainon_list (parm, pstat);
+ tmp = build_function_call_expr (gfor_fndecl_deallocate, parm);
gfc_add_expr_to_block (&se.pre, tmp);
+
+ tmp = build2 (MODIFY_EXPR, void_type_node,
+ se.expr, build_int_cst (TREE_TYPE (se.expr), 0));
+ }
+
+ gfc_add_expr_to_block (&se.pre, tmp);
+
+ /* Keep track of the number of failed deallocations by adding stat
+ of the last deallocation to the running total. */
+ if (code->expr)
+ {
+ apstat = build2 (PLUS_EXPR, TREE_TYPE (stat), astat, stat);
+ gfc_add_modify_expr (&se.pre, astat, apstat);
}
+
tmp = gfc_finish_block (&se.pre);
gfc_add_expr_to_block (&block, tmp);
+
+ }
+
+ /* Assign the value to the status variable. */
+ if (code->expr)
+ {
+ gfc_init_se (&se, NULL);
+ gfc_conv_expr_lhs (&se, code->expr);
+ tmp = convert (TREE_TYPE (se.expr), astat);
+ gfc_add_modify_expr (&block, se.expr, tmp);
}
return gfc_finish_block (&block);