marshalling to implement data sharing and copying clauses.
Contributed by Diego Novillo <dnovillo@redhat.com>
- Copyright (C) 2005, 2006, 2007, 2008, 2009, 2010
+ Copyright (C) 2005, 2006, 2007, 2008, 2009, 2010, 2011, 2012
Free Software Foundation, Inc.
This file is part of GCC.
#include "flags.h"
#include "function.h"
#include "expr.h"
-#include "toplev.h"
#include "tree-pass.h"
#include "ggc.h"
#include "except.h"
case GIMPLE_TRY: \
case GIMPLE_CATCH: \
case GIMPLE_EH_FILTER: \
+ case GIMPLE_TRANSACTION: \
/* The sub-statements for these should be walked. */ \
*handled_ops_p = false; \
break;
break;
case LE_EXPR:
if (POINTER_TYPE_P (TREE_TYPE (loop->n2)))
- loop->n2 = fold_build2_loc (loc,
- POINTER_PLUS_EXPR, TREE_TYPE (loop->n2),
- loop->n2, size_one_node);
+ loop->n2 = fold_build_pointer_plus_hwi_loc (loc, loop->n2, 1);
else
loop->n2 = fold_build2_loc (loc,
PLUS_EXPR, TREE_TYPE (loop->n2), loop->n2,
break;
case GE_EXPR:
if (POINTER_TYPE_P (TREE_TYPE (loop->n2)))
- loop->n2 = fold_build2_loc (loc,
- POINTER_PLUS_EXPR, TREE_TYPE (loop->n2),
- loop->n2, size_int (-1));
+ loop->n2 = fold_build_pointer_plus_hwi_loc (loc, loop->n2, -1);
else
loop->n2 = fold_build2_loc (loc,
MINUS_EXPR, TREE_TYPE (loop->n2), loop->n2,
parallel+workshare call. WS_STMT is the workshare directive being
expanded. */
-static tree
+static VEC(tree,gc) *
get_ws_args_for (gimple ws_stmt)
{
tree t;
location_t loc = gimple_location (ws_stmt);
+ VEC(tree,gc) *ws_args;
if (gimple_code (ws_stmt) == GIMPLE_OMP_FOR)
{
struct omp_for_data fd;
- tree ws_args;
extract_omp_for_data (ws_stmt, &fd, NULL);
- ws_args = NULL_TREE;
- if (fd.chunk_size)
- {
- t = fold_convert_loc (loc, long_integer_type_node, fd.chunk_size);
- ws_args = tree_cons (NULL, t, ws_args);
- }
+ ws_args = VEC_alloc (tree, gc, 3 + (fd.chunk_size != 0));
- t = fold_convert_loc (loc, long_integer_type_node, fd.loop.step);
- ws_args = tree_cons (NULL, t, ws_args);
+ t = fold_convert_loc (loc, long_integer_type_node, fd.loop.n1);
+ VEC_quick_push (tree, ws_args, t);
t = fold_convert_loc (loc, long_integer_type_node, fd.loop.n2);
- ws_args = tree_cons (NULL, t, ws_args);
+ VEC_quick_push (tree, ws_args, t);
- t = fold_convert_loc (loc, long_integer_type_node, fd.loop.n1);
- ws_args = tree_cons (NULL, t, ws_args);
+ t = fold_convert_loc (loc, long_integer_type_node, fd.loop.step);
+ VEC_quick_push (tree, ws_args, t);
+
+ if (fd.chunk_size)
+ {
+ t = fold_convert_loc (loc, long_integer_type_node, fd.chunk_size);
+ VEC_quick_push (tree, ws_args, t);
+ }
return ws_args;
}
the exit of the sections region. */
basic_block bb = single_succ (gimple_bb (ws_stmt));
t = build_int_cst (unsigned_type_node, EDGE_COUNT (bb->succs) - 1);
- t = tree_cons (NULL, t, NULL);
- return t;
+ ws_args = VEC_alloc (tree, gc, 1);
+ VEC_quick_push (tree, ws_args, t);
+ return ws_args;
}
gcc_unreachable ();
break;
if (c)
- return true;
+ goto maybe_mark_addressable_and_ret;
}
}
returns, the task hasn't necessarily terminated. */
if (!TREE_READONLY (decl) && is_task_ctx (shared_ctx))
{
- tree outer = maybe_lookup_decl_in_outer_ctx (decl, shared_ctx);
+ tree outer;
+ maybe_mark_addressable_and_ret:
+ outer = maybe_lookup_decl_in_outer_ctx (decl, shared_ctx);
if (is_gimple_reg (outer))
{
/* Taking address of OUTER in lower_send_shared_vars
tree copy = copy_var_decl (var, name, type);
DECL_CONTEXT (copy) = current_function_decl;
- TREE_CHAIN (copy) = ctx->block_vars;
+ DECL_CHAIN (copy) = ctx->block_vars;
ctx->block_vars = copy;
return copy;
return omp_copy_decl_2 (var, DECL_NAME (var), TREE_TYPE (var), ctx);
}
+/* Build COMPONENT_REF and set TREE_THIS_VOLATILE and TREE_READONLY on it
+ as appropriate. */
+static tree
+omp_build_component_ref (tree obj, tree field)
+{
+ tree ret = build3 (COMPONENT_REF, TREE_TYPE (field), obj, field, NULL);
+ if (TREE_THIS_VOLATILE (field))
+ TREE_THIS_VOLATILE (ret) |= 1;
+ if (TREE_READONLY (field))
+ TREE_READONLY (ret) |= 1;
+ return ret;
+}
+
/* Build tree nodes to access the field for VAR on the receiver side. */
static tree
if (x != NULL)
field = x;
- x = build_fold_indirect_ref (ctx->receiver_decl);
- x = build3 (COMPONENT_REF, TREE_TYPE (field), x, field, NULL);
+ x = build_simple_mem_ref (ctx->receiver_decl);
+ x = omp_build_component_ref (x, field);
if (by_ref)
- x = build_fold_indirect_ref (x);
+ x = build_simple_mem_ref (x);
return x;
}
{
x = TREE_OPERAND (DECL_VALUE_EXPR (var), 0);
x = build_outer_var_ref (x, ctx);
- x = build_fold_indirect_ref (x);
+ x = build_simple_mem_ref (x);
}
else if (is_taskreg_ctx (ctx))
{
gcc_unreachable ();
if (is_reference (var))
- x = build_fold_indirect_ref (x);
+ x = build_simple_mem_ref (x);
return x;
}
build_sender_ref (tree var, omp_context *ctx)
{
tree field = lookup_sfield (var, ctx);
- return build3 (COMPONENT_REF, TREE_TYPE (field),
- ctx->sender_decl, field, NULL);
+ return omp_build_component_ref (ctx->sender_decl, field);
}
/* Add a new field for VAR inside the structure CTX->SENDER_DECL. */
{
ctx->cb.src_fn = current_function_decl;
ctx->cb.dst_fn = current_function_decl;
- ctx->cb.src_node = cgraph_node (current_function_decl);
+ ctx->cb.src_node = cgraph_get_node (current_function_decl);
+ gcc_checking_assert (ctx->cb.src_node);
ctx->cb.dst_node = ctx->cb.src_node;
ctx->cb.src_cfun = cfun;
ctx->cb.copy_decl = omp_copy_decl;
old_fn = current_function_decl;
push_cfun (child_cfun);
current_function_decl = child_fn;
- bind = gimplify_body (&DECL_SAVED_TREE (child_fn), child_fn, false);
+ bind = gimplify_body (child_fn, false);
seq = gimple_seq_alloc ();
gimple_seq_add_stmt (&seq, bind);
new_seq = maybe_catch_exception (seq);
if (ctx->record_type)
{
tree t;
- for (t = TYPE_FIELDS (ctx->record_type); t ; t = TREE_CHAIN (t))
+ for (t = TYPE_FIELDS (ctx->record_type); t ; t = DECL_CHAIN (t))
DECL_ABSTRACT_ORIGIN (t) = NULL;
}
if (ctx->srecord_type)
{
tree t;
- for (t = TYPE_FIELDS (ctx->srecord_type); t ; t = TREE_CHAIN (t))
+ for (t = TYPE_FIELDS (ctx->srecord_type); t ; t = DECL_CHAIN (t))
DECL_ABSTRACT_ORIGIN (t) = NULL;
}
variably_modified_type_p doesn't work the way we expect for
record types. Testing each field for whether it needs remapping
and creating a new record by hand works, however. */
- for (f = TYPE_FIELDS (type); f ; f = TREE_CHAIN (f))
+ for (f = TYPE_FIELDS (type); f ; f = DECL_CHAIN (f))
if (variably_modified_type_p (TREE_TYPE (f), ctx->cb.src_fn))
break;
if (f)
TYPE_DECL, name, type);
TYPE_NAME (type) = name;
- for (f = TYPE_FIELDS (ctx->record_type); f ; f = TREE_CHAIN (f))
+ for (f = TYPE_FIELDS (ctx->record_type); f ; f = DECL_CHAIN (f))
{
tree new_f = copy_node (f);
DECL_CONTEXT (new_f) = type;
TREE_TYPE (new_f) = remap_type (TREE_TYPE (f), &ctx->cb);
- TREE_CHAIN (new_f) = new_fields;
+ DECL_CHAIN (new_f) = new_fields;
walk_tree (&DECL_SIZE (new_f), copy_tree_body_r, &ctx->cb, NULL);
walk_tree (&DECL_SIZE_UNIT (new_f), copy_tree_body_r,
&ctx->cb, NULL);
ctx->default_kind = OMP_CLAUSE_DEFAULT_KIND (c);
break;
+ case OMP_CLAUSE_FINAL:
case OMP_CLAUSE_IF:
case OMP_CLAUSE_NUM_THREADS:
case OMP_CLAUSE_SCHEDULE:
case OMP_CLAUSE_ORDERED:
case OMP_CLAUSE_COLLAPSE:
case OMP_CLAUSE_UNTIED:
+ case OMP_CLAUSE_MERGEABLE:
break;
default:
case OMP_CLAUSE_ORDERED:
case OMP_CLAUSE_COLLAPSE:
case OMP_CLAUSE_UNTIED:
+ case OMP_CLAUSE_FINAL:
+ case OMP_CLAUSE_MERGEABLE:
break;
default:
TREE_STATIC (decl) = 1;
TREE_USED (decl) = 1;
DECL_ARTIFICIAL (decl) = 1;
+ DECL_NAMELESS (decl) = 1;
DECL_IGNORED_P (decl) = 0;
TREE_PUBLIC (decl) = 0;
DECL_UNINLINABLE (decl) = 1;
t = build_decl (DECL_SOURCE_LOCATION (decl),
PARM_DECL, get_identifier (".omp_data_i"), ptr_type_node);
DECL_ARTIFICIAL (t) = 1;
+ DECL_NAMELESS (t) = 1;
DECL_ARG_TYPE (t) = ptr_type_node;
DECL_CONTEXT (t) = current_function_decl;
TREE_USED (t) = 1;
PARM_DECL, get_identifier (".omp_data_o"),
ptr_type_node);
DECL_ARTIFICIAL (t) = 1;
+ DECL_NAMELESS (t) = 1;
DECL_ARG_TYPE (t) = ptr_type_node;
DECL_CONTEXT (t) = current_function_decl;
TREE_USED (t) = 1;
TREE_ADDRESSABLE (t) = 1;
- TREE_CHAIN (t) = DECL_ARGUMENTS (decl);
+ DECL_CHAIN (t) = DECL_ARGUMENTS (decl);
DECL_ARGUMENTS (decl) = t;
}
name = create_tmp_var_name (".omp_data_s");
name = build_decl (gimple_location (stmt),
TYPE_DECL, name, ctx->record_type);
+ DECL_ARTIFICIAL (name) = 1;
+ DECL_NAMELESS (name) = 1;
TYPE_NAME (ctx->record_type) = name;
create_omp_child_function (ctx, false);
gimple_omp_parallel_set_child_fn (stmt, ctx->cb.dst_fn);
name = create_tmp_var_name (".omp_data_s");
name = build_decl (gimple_location (stmt),
TYPE_DECL, name, ctx->record_type);
+ DECL_ARTIFICIAL (name) = 1;
+ DECL_NAMELESS (name) = 1;
TYPE_NAME (ctx->record_type) = name;
create_omp_child_function (ctx, false);
gimple_omp_task_set_child_fn (stmt, ctx->cb.dst_fn);
name = create_tmp_var_name (".omp_data_a");
name = build_decl (gimple_location (stmt),
TYPE_DECL, name, ctx->srecord_type);
+ DECL_ARTIFICIAL (name) = 1;
+ DECL_NAMELESS (name) = 1;
TYPE_NAME (ctx->srecord_type) = name;
create_omp_child_function (ctx, true);
}
q = &TREE_CHAIN (*q);
}
else
- p = &TREE_CHAIN (*p);
+ p = &DECL_CHAIN (*p);
*p = vla_fields;
layout_type (ctx->record_type);
fixup_child_record_type (ctx);
/* Check OpenMP nesting restrictions. */
-static void
-check_omp_nesting_restrictions (gimple stmt, omp_context *ctx)
+static bool
+check_omp_nesting_restrictions (gimple stmt, omp_context *ctx)
{
switch (gimple_code (stmt))
{
case GIMPLE_OMP_TASK:
if (is_gimple_call (stmt))
{
- warning (0, "barrier region may not be closely nested inside "
- "of work-sharing, critical, ordered, master or "
- "explicit task region");
- return;
+ error_at (gimple_location (stmt),
+ "barrier region may not be closely nested inside "
+ "of work-sharing, critical, ordered, master or "
+ "explicit task region");
+ return false;
}
- warning (0, "work-sharing region may not be closely nested inside "
- "of work-sharing, critical, ordered, master or explicit "
- "task region");
- return;
+ error_at (gimple_location (stmt),
+ "work-sharing region may not be closely nested inside "
+ "of work-sharing, critical, ordered, master or explicit "
+ "task region");
+ return false;
case GIMPLE_OMP_PARALLEL:
- return;
+ return true;
default:
break;
}
case GIMPLE_OMP_SECTIONS:
case GIMPLE_OMP_SINGLE:
case GIMPLE_OMP_TASK:
- warning (0, "master region may not be closely nested inside "
- "of work-sharing or explicit task region");
- return;
+ error_at (gimple_location (stmt),
+ "master region may not be closely nested inside "
+ "of work-sharing or explicit task region");
+ return false;
case GIMPLE_OMP_PARALLEL:
- return;
+ return true;
default:
break;
}
{
case GIMPLE_OMP_CRITICAL:
case GIMPLE_OMP_TASK:
- warning (0, "ordered region may not be closely nested inside "
- "of critical or explicit task region");
- return;
+ error_at (gimple_location (stmt),
+ "ordered region may not be closely nested inside "
+ "of critical or explicit task region");
+ return false;
case GIMPLE_OMP_FOR:
if (find_omp_clause (gimple_omp_for_clauses (ctx->stmt),
OMP_CLAUSE_ORDERED) == NULL)
- warning (0, "ordered region must be closely nested inside "
+ {
+ error_at (gimple_location (stmt),
+ "ordered region must be closely nested inside "
"a loop region with an ordered clause");
- return;
+ return false;
+ }
+ return true;
case GIMPLE_OMP_PARALLEL:
- return;
+ return true;
default:
break;
}
&& (gimple_omp_critical_name (stmt)
== gimple_omp_critical_name (ctx->stmt)))
{
- warning (0, "critical region may not be nested inside a critical "
- "region with the same name");
- return;
+ error_at (gimple_location (stmt),
+ "critical region may not be nested inside a critical "
+ "region with the same name");
+ return false;
}
break;
default:
break;
}
+ return true;
}
{
*walk_subtrees = 1;
if (ctx)
- TREE_TYPE (t) = remap_type (TREE_TYPE (t), &ctx->cb);
+ {
+ tree tem = remap_type (TREE_TYPE (t), &ctx->cb);
+ if (tem != TREE_TYPE (t))
+ {
+ if (TREE_CODE (t) == INTEGER_CST)
+ *tp = build_int_cst_wide (tem,
+ TREE_INT_CST_LOW (t),
+ TREE_INT_CST_HIGH (t));
+ else
+ TREE_TYPE (t) = tem;
+ }
+ }
}
break;
}
/* Check the OpenMP nesting restrictions. */
if (ctx != NULL)
{
+ bool remove = false;
if (is_gimple_omp (stmt))
- check_omp_nesting_restrictions (stmt, ctx);
+ remove = !check_omp_nesting_restrictions (stmt, ctx);
else if (is_gimple_call (stmt))
{
tree fndecl = gimple_call_fndecl (stmt);
if (fndecl && DECL_BUILT_IN_CLASS (fndecl) == BUILT_IN_NORMAL
&& DECL_FUNCTION_CODE (fndecl) == BUILT_IN_GOMP_BARRIER)
- check_omp_nesting_restrictions (stmt, ctx);
+ remove = !check_omp_nesting_restrictions (stmt, ctx);
+ }
+ if (remove)
+ {
+ stmt = gimple_build_nop ();
+ gsi_replace (gsi, stmt, false);
}
}
*handled_ops_p = false;
if (ctx)
- for (var = gimple_bind_vars (stmt); var ; var = TREE_CHAIN (var))
+ for (var = gimple_bind_vars (stmt); var ; var = DECL_CHAIN (var))
insert_decl_map (&ctx->cb, var, var);
}
break;
static tree
build_omp_barrier (void)
{
- return build_call_expr (built_in_decls[BUILT_IN_GOMP_BARRIER], 0);
+ return build_call_expr (builtin_decl_explicit (BUILT_IN_GOMP_BARRIER), 0);
}
/* If a context was created for STMT when it was scanned, return it. */
case TRUTH_ORIF_EXPR:
case TRUTH_XOR_EXPR:
case NE_EXPR:
- return fold_convert_loc (loc, type, integer_zero_node);
+ return build_zero_cst (type);
case MULT_EXPR:
case TRUTH_AND_EXPR:
if (c_kind != OMP_CLAUSE_FIRSTPRIVATE || !is_task_ctx (ctx))
{
gimple stmt;
- tree tmp;
+ tree tmp, atmp;
ptr = DECL_VALUE_EXPR (new_var);
gcc_assert (TREE_CODE (ptr) == INDIRECT_REF);
x = TYPE_SIZE_UNIT (TREE_TYPE (new_var));
/* void *tmp = __builtin_alloca */
- stmt
- = gimple_build_call (built_in_decls[BUILT_IN_ALLOCA], 1, x);
+ atmp = builtin_decl_explicit (BUILT_IN_ALLOCA);
+ stmt = gimple_build_call (atmp, 1, x);
tmp = create_tmp_var_raw (ptr_type_node, NULL);
gimple_add_tmp_var (tmp);
gimple_call_set_lhs (stmt, tmp);
}
else
{
- x = build_call_expr_loc (clause_loc,
- built_in_decls[BUILT_IN_ALLOCA], 1, x);
+ tree atmp = builtin_decl_explicit (BUILT_IN_ALLOCA);
+ x = build_call_expr_loc (clause_loc, atmp, 1, x);
}
x = fold_convert_loc (clause_loc, TREE_TYPE (new_var), x);
gimplify_assign (new_var, x, ilist);
- new_var = build_fold_indirect_ref_loc (clause_loc, new_var);
+ new_var = build_simple_mem_ref_loc (clause_loc, new_var);
}
else if (c_kind == OMP_CLAUSE_REDUCTION
&& OMP_CLAUSE_REDUCTION_PLACEHOLDER (c))
but it certainly is to C++ operator=. */
if (copyin_seq)
{
- x = build_call_expr (built_in_decls[BUILT_IN_OMP_GET_THREAD_NUM], 0);
+ x = build_call_expr (builtin_decl_explicit (BUILT_IN_OMP_GET_THREAD_NUM),
+ 0);
x = build2 (NE_EXPR, boolean_type_node, x,
build_int_cst (TREE_TYPE (x), 0));
x = build3 (COND_EXPR, void_type_node, x, copyin_seq, NULL);
x = build_outer_var_ref (var, ctx);
if (is_reference (var))
- new_var = build_fold_indirect_ref_loc (clause_loc, new_var);
+ new_var = build_simple_mem_ref_loc (clause_loc, new_var);
x = lang_hooks.decls.omp_clause_assign_op (c, x, new_var);
gimplify_and_add (x, stmt_list);
}
var = OMP_CLAUSE_DECL (c);
new_var = lookup_decl (var, ctx);
if (is_reference (var))
- new_var = build_fold_indirect_ref_loc (clause_loc, new_var);
+ new_var = build_simple_mem_ref_loc (clause_loc, new_var);
ref = build_outer_var_ref (var, ctx);
code = OMP_CLAUSE_REDUCTION_CODE (c);
}
}
- stmt = gimple_build_call (built_in_decls[BUILT_IN_GOMP_ATOMIC_START], 0);
+ stmt = gimple_build_call (builtin_decl_explicit (BUILT_IN_GOMP_ATOMIC_START),
+ 0);
gimple_seq_add_stmt (stmt_seqp, stmt);
gimple_seq_add_seq (stmt_seqp, sub_seq);
- stmt = gimple_build_call (built_in_decls[BUILT_IN_GOMP_ATOMIC_END], 0);
+ stmt = gimple_build_call (builtin_decl_explicit (BUILT_IN_GOMP_ATOMIC_END),
+ 0);
gimple_seq_add_stmt (stmt_seqp, stmt);
}
if (is_reference (var))
{
ref = fold_convert_loc (clause_loc, TREE_TYPE (new_var), ref);
- ref = build_fold_indirect_ref_loc (clause_loc, ref);
- new_var = build_fold_indirect_ref_loc (clause_loc, new_var);
+ ref = build_simple_mem_ref_loc (clause_loc, ref);
+ new_var = build_simple_mem_ref_loc (clause_loc, new_var);
}
x = lang_hooks.decls.omp_clause_assign_op (c, new_var, ref);
gimplify_and_add (x, rlist);
return;
record_type = ctx->srecord_type ? ctx->srecord_type : ctx->record_type;
- for (f = TYPE_FIELDS (record_type); f ; f = TREE_CHAIN (f))
+ for (f = TYPE_FIELDS (record_type); f ; f = DECL_CHAIN (f))
{
ovar = DECL_ABSTRACT_ORIGIN (f);
nvar = maybe_lookup_decl (ovar, ctx);
static void
expand_parallel_call (struct omp_region *region, basic_block bb,
- gimple entry_stmt, tree ws_args)
+ gimple entry_stmt, VEC(tree,gc) *ws_args)
{
tree t, t1, t2, val, cond, c, clauses;
gimple_stmt_iterator gsi;
gimple stmt;
- int start_ix;
+ enum built_in_function start_ix;
+ int start_ix2;
location_t clause_loc;
+ VEC(tree,gc) *args;
clauses = gimple_omp_parallel_clauses (entry_stmt);
{
case GIMPLE_OMP_FOR:
gcc_assert (region->inner->sched_kind != OMP_CLAUSE_SCHEDULE_AUTO);
- start_ix = BUILT_IN_GOMP_PARALLEL_LOOP_STATIC_START
- + (region->inner->sched_kind
- == OMP_CLAUSE_SCHEDULE_RUNTIME
- ? 3 : region->inner->sched_kind);
+ start_ix2 = ((int)BUILT_IN_GOMP_PARALLEL_LOOP_STATIC_START
+ + (region->inner->sched_kind
+ == OMP_CLAUSE_SCHEDULE_RUNTIME
+ ? 3 : region->inner->sched_kind));
+ start_ix = (enum built_in_function)start_ix2;
break;
case GIMPLE_OMP_SECTIONS:
start_ix = BUILT_IN_GOMP_PARALLEL_SECTIONS_START;
t1 = build_fold_addr_expr (t);
t2 = build_fold_addr_expr (gimple_omp_parallel_child_fn (entry_stmt));
- if (ws_args)
- {
- tree args = tree_cons (NULL, t2,
- tree_cons (NULL, t1,
- tree_cons (NULL, val, ws_args)));
- t = build_function_call_expr (UNKNOWN_LOCATION,
- built_in_decls[start_ix], args);
- }
- else
- t = build_call_expr (built_in_decls[start_ix], 3, t2, t1, val);
+ args = VEC_alloc (tree, gc, 3 + VEC_length (tree, ws_args));
+ VEC_quick_push (tree, args, t2);
+ VEC_quick_push (tree, args, t1);
+ VEC_quick_push (tree, args, val);
+ VEC_splice (tree, args, ws_args);
+
+ t = build_call_expr_loc_vec (UNKNOWN_LOCATION,
+ builtin_decl_explicit (start_ix), args);
force_gimple_operand_gsi (&gsi, t, true, NULL_TREE,
false, GSI_CONTINUE_LINKING);
false, GSI_CONTINUE_LINKING);
t = build_call_expr_loc (gimple_location (entry_stmt),
- built_in_decls[BUILT_IN_GOMP_PARALLEL_END], 0);
+ builtin_decl_explicit (BUILT_IN_GOMP_PARALLEL_END),
+ 0);
force_gimple_operand_gsi (&gsi, t, true, NULL_TREE,
false, GSI_CONTINUE_LINKING);
}
static void
expand_task_call (basic_block bb, gimple entry_stmt)
{
- tree t, t1, t2, t3, flags, cond, c, clauses;
+ tree t, t1, t2, t3, flags, cond, c, c2, clauses;
gimple_stmt_iterator gsi;
location_t loc = gimple_location (entry_stmt);
cond = boolean_true_node;
c = find_omp_clause (clauses, OMP_CLAUSE_UNTIED);
- flags = build_int_cst (unsigned_type_node, (c ? 1 : 0));
+ c2 = find_omp_clause (clauses, OMP_CLAUSE_MERGEABLE);
+ flags = build_int_cst (unsigned_type_node,
+ (c ? 1 : 0) + (c2 ? 4 : 0));
+
+ c = find_omp_clause (clauses, OMP_CLAUSE_FINAL);
+ if (c)
+ {
+ c = gimple_boolify (OMP_CLAUSE_FINAL_EXPR (c));
+ c = fold_build3_loc (loc, COND_EXPR, unsigned_type_node, c,
+ build_int_cst (unsigned_type_node, 2),
+ build_int_cst (unsigned_type_node, 0));
+ flags = fold_build2_loc (loc, PLUS_EXPR, unsigned_type_node, flags, c);
+ }
gsi = gsi_last_bb (bb);
t = gimple_omp_task_data_arg (entry_stmt);
else
t3 = build_fold_addr_expr_loc (loc, t);
- t = build_call_expr (built_in_decls[BUILT_IN_GOMP_TASK], 7, t1, t2, t3,
+ t = build_call_expr (builtin_decl_explicit (BUILT_IN_GOMP_TASK),
+ 7, t1, t2, t3,
gimple_omp_task_arg_size (entry_stmt),
gimple_omp_task_arg_align (entry_stmt), cond, flags);
if (!flag_exceptions)
return body;
- if (lang_protect_cleanup_actions)
- decl = lang_protect_cleanup_actions ();
+ if (lang_hooks.eh_protect_cleanup_actions != NULL)
+ decl = lang_hooks.eh_protect_cleanup_actions ();
else
- decl = built_in_decls[BUILT_IN_TRAP];
+ decl = builtin_decl_explicit (BUILT_IN_TRAP);
g = gimple_build_eh_must_not_throw (decl);
g = gimple_build_try (body, gimple_seq_alloc_with_stmt (g),
/* Chain all the DECLs in LIST by their TREE_CHAIN fields. */
static tree
-list2chain (tree list)
+vec2chain (VEC(tree,gc) *v)
{
- tree t;
+ tree chain = NULL_TREE, t;
+ unsigned ix;
- for (t = list; t; t = TREE_CHAIN (t))
+ FOR_EACH_VEC_ELT_REVERSE (tree, v, ix, t)
{
- tree var = TREE_VALUE (t);
- if (TREE_CHAIN (t))
- TREE_CHAIN (var) = TREE_VALUE (TREE_CHAIN (t));
- else
- TREE_CHAIN (var) = NULL_TREE;
+ DECL_CHAIN (t) = chain;
+ chain = t;
}
- return list ? TREE_VALUE (list) : NULL_TREE;
+ return chain;
}
{
gimple parallel_stmt = last_stmt (region->entry);
tree child_fun = gimple_omp_parallel_child_fn (parallel_stmt);
- tree local_decls = DECL_STRUCT_FUNCTION (child_fun)->local_decls;
- tree block;
+ tree local_decls, block, decl;
+ unsigned ix;
any_addressable_vars = 0;
- for (; local_decls; local_decls = TREE_CHAIN (local_decls))
- if (TREE_ADDRESSABLE (TREE_VALUE (local_decls)))
+ FOR_EACH_LOCAL_DECL (DECL_STRUCT_FUNCTION (child_fun), ix, decl)
+ if (TREE_ADDRESSABLE (decl))
{
any_addressable_vars = 1;
break;
{
for (local_decls = BLOCK_VARS (block);
local_decls;
- local_decls = TREE_CHAIN (local_decls))
+ local_decls = DECL_CHAIN (local_decls))
if (TREE_ADDRESSABLE (local_decls))
{
any_addressable_vars = 1;
{
basic_block bb;
gimple_stmt_iterator gsi;
- tree thr_num_id
- = DECL_ASSEMBLER_NAME (built_in_decls [BUILT_IN_OMP_GET_THREAD_NUM]);
- tree num_thr_id
- = DECL_ASSEMBLER_NAME (built_in_decls [BUILT_IN_OMP_GET_NUM_THREADS]);
+ tree thr_num_tree = builtin_decl_explicit (BUILT_IN_OMP_GET_THREAD_NUM);
+ tree thr_num_id = DECL_ASSEMBLER_NAME (thr_num_tree);
+ tree num_thr_tree = builtin_decl_explicit (BUILT_IN_OMP_GET_NUM_THREADS);
+ tree num_thr_id = DECL_ASSEMBLER_NAME (num_thr_tree);
bool untied_task = (gimple_code (entry_stmt) == GIMPLE_OMP_TASK
&& find_omp_clause (gimple_omp_task_clauses (entry_stmt),
OMP_CLAUSE_UNTIED) != NULL);
during the execution of the task region. */
if (untied_task)
continue;
- built_in = built_in_decls [BUILT_IN_OMP_GET_THREAD_NUM];
+ built_in = builtin_decl_explicit (BUILT_IN_OMP_GET_THREAD_NUM);
}
else if (DECL_NAME (decl) == num_thr_id)
- built_in = built_in_decls [BUILT_IN_OMP_GET_NUM_THREADS];
+ built_in = builtin_decl_explicit (BUILT_IN_OMP_GET_NUM_THREADS);
else
continue;
{
basic_block entry_bb, exit_bb, new_bb;
struct function *child_cfun;
- tree child_fn, block, t, ws_args, *tp;
+ tree child_fn, block, t;
tree save_current;
gimple_stmt_iterator gsi;
gimple entry_stmt, stmt;
edge e;
+ VEC(tree,gc) *ws_args;
entry_stmt = last_stmt (region->entry);
child_fn = gimple_omp_taskreg_child_fn (entry_stmt);
if (is_combined_parallel (region))
ws_args = region->ws_args;
else
- ws_args = NULL_TREE;
+ ws_args = NULL;
if (child_cfun->cfg)
{
}
else
{
+ unsigned srcidx, dstidx, num;
+
/* If the parallel region needs data sent from the parent
function, then the very first statement (except possible
tree profile counter updates) of the parallel body
/* Declare local variables needed in CHILD_CFUN. */
block = DECL_INITIAL (child_fn);
- BLOCK_VARS (block) = list2chain (child_cfun->local_decls);
+ BLOCK_VARS (block) = vec2chain (child_cfun->local_decls);
/* The gimplifier could record temporaries in parallel/task block
rather than in containing function's local_decls chain,
which would mean cgraph missed finalizing them. Do it now. */
- for (t = BLOCK_VARS (block); t; t = TREE_CHAIN (t))
+ for (t = BLOCK_VARS (block); t; t = DECL_CHAIN (t))
if (TREE_CODE (t) == VAR_DECL
&& TREE_STATIC (t)
&& !DECL_EXTERNAL (t))
TREE_USED (block) = 1;
/* Reset DECL_CONTEXT on function arguments. */
- for (t = DECL_ARGUMENTS (child_fn); t; t = TREE_CHAIN (t))
+ for (t = DECL_ARGUMENTS (child_fn); t; t = DECL_CHAIN (t))
DECL_CONTEXT (t) = child_fn;
/* Split ENTRY_BB at GIMPLE_OMP_PARALLEL or GIMPLE_OMP_TASK,
single_succ_edge (new_bb)->flags = EDGE_FALLTHRU;
/* Remove non-local VAR_DECLs from child_cfun->local_decls list. */
- for (tp = &child_cfun->local_decls; *tp; )
- if (DECL_CONTEXT (TREE_VALUE (*tp)) != cfun->decl)
- tp = &TREE_CHAIN (*tp);
- else
- *tp = TREE_CHAIN (*tp);
+ num = VEC_length (tree, child_cfun->local_decls);
+ for (srcidx = 0, dstidx = 0; srcidx < num; srcidx++)
+ {
+ t = VEC_index (tree, child_cfun->local_decls, srcidx);
+ if (DECL_CONTEXT (t) == cfun->decl)
+ continue;
+ if (srcidx != dstidx)
+ VEC_replace (tree, child_cfun->local_decls, dstidx, t);
+ dstidx++;
+ }
+ if (dstidx != num)
+ VEC_truncate (tree, child_cfun->local_decls, dstidx);
/* Inform the callgraph about the new function. */
DECL_STRUCT_FUNCTION (child_fn)->curr_properties
{
/* In a combined parallel loop, emit a call to
GOMP_loop_foo_next. */
- t = build_call_expr (built_in_decls[next_fn], 2,
+ t = build_call_expr (builtin_decl_explicit (next_fn), 2,
build_fold_addr_expr (istart0),
build_fold_addr_expr (iend0));
}
if (fd->chunk_size)
{
t = fold_convert (fd->iter_type, fd->chunk_size);
- t = build_call_expr (built_in_decls[start_fn], 6,
- t0, t1, t2, t, t3, t4);
+ t = build_call_expr (builtin_decl_explicit (start_fn),
+ 6, t0, t1, t2, t, t3, t4);
}
else
- t = build_call_expr (built_in_decls[start_fn], 5,
- t0, t1, t2, t3, t4);
+ t = build_call_expr (builtin_decl_explicit (start_fn),
+ 5, t0, t1, t2, t3, t4);
}
else
{
tree t5;
tree c_bool_type;
+ tree bfn_decl;
/* The GOMP_loop_ull_*start functions have additional boolean
argument, true for < loops and false for > loops.
In Fortran, the C bool type can be different from
boolean_type_node. */
- c_bool_type = TREE_TYPE (TREE_TYPE (built_in_decls[start_fn]));
+ bfn_decl = builtin_decl_explicit (start_fn);
+ c_bool_type = TREE_TYPE (TREE_TYPE (bfn_decl));
t5 = build_int_cst (c_bool_type,
fd->loop.cond_code == LT_EXPR ? 1 : 0);
if (fd->chunk_size)
{
+ tree bfn_decl = builtin_decl_explicit (start_fn);
t = fold_convert (fd->iter_type, fd->chunk_size);
- t = build_call_expr (built_in_decls[start_fn], 7,
- t5, t0, t1, t2, t, t3, t4);
+ t = build_call_expr (bfn_decl, 7, t5, t0, t1, t2, t, t3, t4);
}
else
- t = build_call_expr (built_in_decls[start_fn], 6,
- t5, t0, t1, t2, t3, t4);
+ t = build_call_expr (builtin_decl_explicit (start_fn),
+ 6, t5, t0, t1, t2, t3, t4);
}
}
if (TREE_TYPE (t) != boolean_type_node)
t = fold_build2 (MULT_EXPR, itype, t,
fold_convert (itype, fd->loops[i].step));
if (POINTER_TYPE_P (vtype))
- t = fold_build2 (POINTER_PLUS_EXPR, vtype,
- fd->loops[i].n1, fold_convert (sizetype, t));
+ t = fold_build_pointer_plus (fd->loops[i].n1, t);
else
t = fold_build2 (PLUS_EXPR, itype, fd->loops[i].n1, t);
t = force_gimple_operand_gsi (&gsi, t, false, NULL_TREE,
vback = gimple_omp_continue_control_def (stmt);
if (POINTER_TYPE_P (type))
- t = fold_build2 (POINTER_PLUS_EXPR, type, vmain,
- fold_convert (sizetype, fd->loop.step));
+ t = fold_build_pointer_plus (vmain, fd->loop.step);
else
t = fold_build2 (PLUS_EXPR, type, vmain, fd->loop.step);
t = force_gimple_operand_gsi (&gsi, t, false, NULL_TREE,
set_immediate_dominator (CDI_DOMINATORS, bb, last_bb);
if (POINTER_TYPE_P (vtype))
- t = fold_build2 (POINTER_PLUS_EXPR, vtype,
- fd->loops[i].v,
- fold_convert (sizetype, fd->loops[i].step));
+ t = fold_build_pointer_plus (fd->loops[i].v, fd->loops[i].step);
else
t = fold_build2 (PLUS_EXPR, vtype, fd->loops[i].v,
fd->loops[i].step);
/* Emit code to get the next parallel iteration in L2_BB. */
gsi = gsi_start_bb (l2_bb);
- t = build_call_expr (built_in_decls[next_fn], 2,
+ t = build_call_expr (builtin_decl_explicit (next_fn), 2,
build_fold_addr_expr (istart0),
build_fold_addr_expr (iend0));
t = force_gimple_operand_gsi (&gsi, t, true, NULL_TREE,
/* Add the loop cleanup function. */
gsi = gsi_last_bb (exit_bb);
if (gimple_omp_return_nowait_p (gsi_stmt (gsi)))
- t = built_in_decls[BUILT_IN_GOMP_LOOP_END_NOWAIT];
+ t = builtin_decl_explicit (BUILT_IN_GOMP_LOOP_END_NOWAIT);
else
- t = built_in_decls[BUILT_IN_GOMP_LOOP_END];
+ t = builtin_decl_explicit (BUILT_IN_GOMP_LOOP_END);
stmt = gimple_build_call (t, 0);
gsi_insert_after (&gsi, stmt, GSI_SAME_STMT);
gsi_remove (&gsi, true);
else
n = (adj + N2 - N1) / STEP;
q = n / nthreads;
- q += (q * nthreads != n);
- s0 = q * threadid;
- e0 = min(s0 + q, n);
+ tt = n % nthreads;
+ if (threadid < tt) goto L3; else goto L4;
+ L3:
+ tt = 0;
+ q = q + 1;
+ L4:
+ s0 = q * threadid + tt;
+ e0 = s0 + q;
V = s0 * STEP + N1;
if (s0 >= e0) goto L2; else goto L0;
L0:
expand_omp_for_static_nochunk (struct omp_region *region,
struct omp_for_data *fd)
{
- tree n, q, s0, e0, e, t, nthreads, threadid;
+ tree n, q, s0, e0, e, t, tt, nthreads, threadid;
tree type, itype, vmain, vback;
- basic_block entry_bb, exit_bb, seq_start_bb, body_bb, cont_bb;
+ basic_block entry_bb, second_bb, third_bb, exit_bb, seq_start_bb;
+ basic_block body_bb, cont_bb;
basic_block fin_bb;
gimple_stmt_iterator gsi;
gimple stmt;
+ edge ep;
itype = type = TREE_TYPE (fd->loop.v);
if (POINTER_TYPE_P (type))
gsi = gsi_last_bb (entry_bb);
gcc_assert (gimple_code (gsi_stmt (gsi)) == GIMPLE_OMP_FOR);
- t = build_call_expr (built_in_decls[BUILT_IN_OMP_GET_NUM_THREADS], 0);
+ t = build_call_expr (builtin_decl_explicit (BUILT_IN_OMP_GET_NUM_THREADS), 0);
t = fold_convert (itype, t);
nthreads = force_gimple_operand_gsi (&gsi, t, true, NULL_TREE,
true, GSI_SAME_STMT);
- t = build_call_expr (built_in_decls[BUILT_IN_OMP_GET_THREAD_NUM], 0);
+ t = build_call_expr (builtin_decl_explicit (BUILT_IN_OMP_GET_THREAD_NUM), 0);
t = fold_convert (itype, t);
threadid = force_gimple_operand_gsi (&gsi, t, true, NULL_TREE,
true, GSI_SAME_STMT);
t = fold_convert (itype, t);
n = force_gimple_operand_gsi (&gsi, t, true, NULL_TREE, true, GSI_SAME_STMT);
+ q = create_tmp_var (itype, "q");
t = fold_build2 (TRUNC_DIV_EXPR, itype, n, nthreads);
- q = force_gimple_operand_gsi (&gsi, t, true, NULL_TREE, true, GSI_SAME_STMT);
+ t = force_gimple_operand_gsi (&gsi, t, false, NULL_TREE, true, GSI_SAME_STMT);
+ gsi_insert_before (&gsi, gimple_build_assign (q, t), GSI_SAME_STMT);
+
+ tt = create_tmp_var (itype, "tt");
+ t = fold_build2 (TRUNC_MOD_EXPR, itype, n, nthreads);
+ t = force_gimple_operand_gsi (&gsi, t, false, NULL_TREE, true, GSI_SAME_STMT);
+ gsi_insert_before (&gsi, gimple_build_assign (tt, t), GSI_SAME_STMT);
+
+ t = build2 (LT_EXPR, boolean_type_node, threadid, tt);
+ stmt = gimple_build_cond_empty (t);
+ gsi_insert_before (&gsi, stmt, GSI_SAME_STMT);
- t = fold_build2 (MULT_EXPR, itype, q, nthreads);
- t = fold_build2 (NE_EXPR, itype, t, n);
- t = fold_build2 (PLUS_EXPR, itype, q, t);
- q = force_gimple_operand_gsi (&gsi, t, true, NULL_TREE, true, GSI_SAME_STMT);
+ second_bb = split_block (entry_bb, stmt)->dest;
+ gsi = gsi_last_bb (second_bb);
+ gcc_assert (gimple_code (gsi_stmt (gsi)) == GIMPLE_OMP_FOR);
+
+ gsi_insert_before (&gsi, gimple_build_assign (tt, build_int_cst (itype, 0)),
+ GSI_SAME_STMT);
+ stmt = gimple_build_assign_with_ops (PLUS_EXPR, q, q,
+ build_int_cst (itype, 1));
+ gsi_insert_before (&gsi, stmt, GSI_SAME_STMT);
+
+ third_bb = split_block (second_bb, stmt)->dest;
+ gsi = gsi_last_bb (third_bb);
+ gcc_assert (gimple_code (gsi_stmt (gsi)) == GIMPLE_OMP_FOR);
t = build2 (MULT_EXPR, itype, q, threadid);
+ t = build2 (PLUS_EXPR, itype, t, tt);
s0 = force_gimple_operand_gsi (&gsi, t, true, NULL_TREE, true, GSI_SAME_STMT);
t = fold_build2 (PLUS_EXPR, itype, s0, q);
- t = fold_build2 (MIN_EXPR, itype, t, n);
e0 = force_gimple_operand_gsi (&gsi, t, true, NULL_TREE, true, GSI_SAME_STMT);
t = build2 (GE_EXPR, boolean_type_node, s0, e0);
t = fold_convert (itype, s0);
t = fold_build2 (MULT_EXPR, itype, t, fd->loop.step);
if (POINTER_TYPE_P (type))
- t = fold_build2 (POINTER_PLUS_EXPR, type, fd->loop.n1,
- fold_convert (sizetype, t));
+ t = fold_build_pointer_plus (fd->loop.n1, t);
else
t = fold_build2 (PLUS_EXPR, type, t, fd->loop.n1);
t = force_gimple_operand_gsi (&gsi, t, false, NULL_TREE,
t = fold_convert (itype, e0);
t = fold_build2 (MULT_EXPR, itype, t, fd->loop.step);
if (POINTER_TYPE_P (type))
- t = fold_build2 (POINTER_PLUS_EXPR, type, fd->loop.n1,
- fold_convert (sizetype, t));
+ t = fold_build_pointer_plus (fd->loop.n1, t);
else
t = fold_build2 (PLUS_EXPR, type, t, fd->loop.n1);
e = force_gimple_operand_gsi (&gsi, t, true, NULL_TREE,
vback = gimple_omp_continue_control_def (stmt);
if (POINTER_TYPE_P (type))
- t = fold_build2 (POINTER_PLUS_EXPR, type, vmain,
- fold_convert (sizetype, fd->loop.step));
+ t = fold_build_pointer_plus (vmain, fd->loop.step);
else
t = fold_build2 (PLUS_EXPR, type, vmain, fd->loop.step);
t = force_gimple_operand_gsi (&gsi, t, false, NULL_TREE,
gsi_remove (&gsi, true);
/* Connect all the blocks. */
- find_edge (entry_bb, seq_start_bb)->flags = EDGE_FALSE_VALUE;
- find_edge (entry_bb, fin_bb)->flags = EDGE_TRUE_VALUE;
+ ep = make_edge (entry_bb, third_bb, EDGE_FALSE_VALUE);
+ ep->probability = REG_BR_PROB_BASE / 4 * 3;
+ ep = find_edge (entry_bb, second_bb);
+ ep->flags = EDGE_TRUE_VALUE;
+ ep->probability = REG_BR_PROB_BASE / 4;
+ find_edge (third_bb, seq_start_bb)->flags = EDGE_FALSE_VALUE;
+ find_edge (third_bb, fin_bb)->flags = EDGE_TRUE_VALUE;
find_edge (cont_bb, body_bb)->flags = EDGE_TRUE_VALUE;
find_edge (cont_bb, fin_bb)->flags = EDGE_FALSE_VALUE;
- set_immediate_dominator (CDI_DOMINATORS, seq_start_bb, entry_bb);
+ set_immediate_dominator (CDI_DOMINATORS, second_bb, entry_bb);
+ set_immediate_dominator (CDI_DOMINATORS, third_bb, entry_bb);
+ set_immediate_dominator (CDI_DOMINATORS, seq_start_bb, third_bb);
set_immediate_dominator (CDI_DOMINATORS, body_bb,
recompute_dominator (CDI_DOMINATORS, body_bb));
set_immediate_dominator (CDI_DOMINATORS, fin_bb,
si = gsi_last_bb (entry_bb);
gcc_assert (gimple_code (gsi_stmt (si)) == GIMPLE_OMP_FOR);
- t = build_call_expr (built_in_decls[BUILT_IN_OMP_GET_NUM_THREADS], 0);
+ t = build_call_expr (builtin_decl_explicit (BUILT_IN_OMP_GET_NUM_THREADS), 0);
t = fold_convert (itype, t);
nthreads = force_gimple_operand_gsi (&si, t, true, NULL_TREE,
true, GSI_SAME_STMT);
- t = build_call_expr (built_in_decls[BUILT_IN_OMP_GET_THREAD_NUM], 0);
+ t = build_call_expr (builtin_decl_explicit (BUILT_IN_OMP_GET_THREAD_NUM), 0);
t = fold_convert (itype, t);
threadid = force_gimple_operand_gsi (&si, t, true, NULL_TREE,
true, GSI_SAME_STMT);
t = fold_build2 (MULT_EXPR, itype, threadid, fd->chunk_size);
t = fold_build2 (MULT_EXPR, itype, t, fd->loop.step);
if (POINTER_TYPE_P (type))
- t = fold_build2 (POINTER_PLUS_EXPR, type, fd->loop.n1,
- fold_convert (sizetype, t));
+ t = fold_build_pointer_plus (fd->loop.n1, t);
else
t = fold_build2 (PLUS_EXPR, type, t, fd->loop.n1);
v_extra = force_gimple_operand_gsi (&si, t, true, NULL_TREE,
t = fold_convert (itype, s0);
t = fold_build2 (MULT_EXPR, itype, t, fd->loop.step);
if (POINTER_TYPE_P (type))
- t = fold_build2 (POINTER_PLUS_EXPR, type, fd->loop.n1,
- fold_convert (sizetype, t));
+ t = fold_build_pointer_plus (fd->loop.n1, t);
else
t = fold_build2 (PLUS_EXPR, type, t, fd->loop.n1);
t = force_gimple_operand_gsi (&si, t, false, NULL_TREE,
t = fold_convert (itype, e0);
t = fold_build2 (MULT_EXPR, itype, t, fd->loop.step);
if (POINTER_TYPE_P (type))
- t = fold_build2 (POINTER_PLUS_EXPR, type, fd->loop.n1,
- fold_convert (sizetype, t));
+ t = fold_build_pointer_plus (fd->loop.n1, t);
else
t = fold_build2 (PLUS_EXPR, type, t, fd->loop.n1);
e = force_gimple_operand_gsi (&si, t, true, NULL_TREE,
v_back = gimple_omp_continue_control_def (stmt);
if (POINTER_TYPE_P (type))
- t = fold_build2 (POINTER_PLUS_EXPR, type, v_main,
- fold_convert (sizetype, fd->loop.step));
+ t = fold_build_pointer_plus (v_main, fd->loop.step);
else
t = fold_build2 (PLUS_EXPR, type, v_main, fd->loop.step);
stmt = gimple_build_assign (v_back, t);
{
int fn_index, start_ix, next_ix;
+ if (fd.chunk_size == NULL
+ && fd.sched_kind == OMP_CLAUSE_SCHEDULE_STATIC)
+ fd.chunk_size = integer_zero_node;
gcc_assert (fd.sched_kind != OMP_CLAUSE_SCHEDULE_AUTO);
fn_index = (fd.sched_kind == OMP_CLAUSE_SCHEDULE_RUNTIME)
? 3 : fd.sched_kind;
fn_index += fd.have_ordered * 4;
- start_ix = BUILT_IN_GOMP_LOOP_STATIC_START + fn_index;
- next_ix = BUILT_IN_GOMP_LOOP_STATIC_NEXT + fn_index;
+ start_ix = ((int)BUILT_IN_GOMP_LOOP_STATIC_START) + fn_index;
+ next_ix = ((int)BUILT_IN_GOMP_LOOP_STATIC_NEXT) + fn_index;
if (fd.iter_type == long_long_unsigned_type_node)
{
- start_ix += BUILT_IN_GOMP_LOOP_ULL_STATIC_START
- - BUILT_IN_GOMP_LOOP_STATIC_START;
- next_ix += BUILT_IN_GOMP_LOOP_ULL_STATIC_NEXT
- - BUILT_IN_GOMP_LOOP_STATIC_NEXT;
+ start_ix += ((int)BUILT_IN_GOMP_LOOP_ULL_STATIC_START
+ - (int)BUILT_IN_GOMP_LOOP_STATIC_START);
+ next_ix += ((int)BUILT_IN_GOMP_LOOP_ULL_STATIC_NEXT
+ - (int)BUILT_IN_GOMP_LOOP_STATIC_NEXT);
}
expand_omp_for_generic (region, &fd, (enum built_in_function) start_ix,
(enum built_in_function) next_ix);
unsigned i, casei;
bool exit_reachable = region->cont != NULL;
- gcc_assert (exit_reachable == (region->exit != NULL));
+ gcc_assert (region->exit != NULL);
entry_bb = region->entry;
l0_bb = single_succ (entry_bb);
l1_bb = region->cont;
l2_bb = region->exit;
- if (exit_reachable)
+ if (single_pred_p (l2_bb) && single_pred (l2_bb) == l0_bb)
+ l2 = gimple_block_label (l2_bb);
+ else
{
- if (single_pred_p (l2_bb) && single_pred (l2_bb) == l0_bb)
- l2 = gimple_block_label (l2_bb);
+ /* This can happen if there are reductions. */
+ len = EDGE_COUNT (l0_bb->succs);
+ gcc_assert (len > 0);
+ e = EDGE_SUCC (l0_bb, len - 1);
+ si = gsi_last_bb (e->dest);
+ l2 = NULL_TREE;
+ if (gsi_end_p (si)
+ || gimple_code (gsi_stmt (si)) != GIMPLE_OMP_SECTION)
+ l2 = gimple_block_label (e->dest);
else
- {
- /* This can happen if there are reductions. */
- len = EDGE_COUNT (l0_bb->succs);
- gcc_assert (len > 0);
- e = EDGE_SUCC (l0_bb, len - 1);
- si = gsi_last_bb (e->dest);
- l2 = NULL_TREE;
- if (gsi_end_p (si)
- || gimple_code (gsi_stmt (si)) != GIMPLE_OMP_SECTION)
- l2 = gimple_block_label (e->dest);
- else
- FOR_EACH_EDGE (e, ei, l0_bb->succs)
+ FOR_EACH_EDGE (e, ei, l0_bb->succs)
+ {
+ si = gsi_last_bb (e->dest);
+ if (gsi_end_p (si)
+ || gimple_code (gsi_stmt (si)) != GIMPLE_OMP_SECTION)
{
- si = gsi_last_bb (e->dest);
- if (gsi_end_p (si)
- || gimple_code (gsi_stmt (si)) != GIMPLE_OMP_SECTION)
- {
- l2 = gimple_block_label (e->dest);
- break;
- }
+ l2 = gimple_block_label (e->dest);
+ break;
}
- }
- default_bb = create_empty_bb (l1_bb->prev_bb);
+ }
}
+ if (exit_reachable)
+ default_bb = create_empty_bb (l1_bb->prev_bb);
else
- {
- default_bb = create_empty_bb (l0_bb);
- l2 = gimple_block_label (default_bb);
- }
+ default_bb = create_empty_bb (l0_bb);
/* We will build a switch() with enough cases for all the
GIMPLE_OMP_SECTION regions, a '0' case to handle the end of more work
call GOMP_sections_start. */
t = build_int_cst (unsigned_type_node,
exit_reachable ? len - 1 : len);
- u = built_in_decls[BUILT_IN_GOMP_SECTIONS_START];
+ u = builtin_decl_explicit (BUILT_IN_GOMP_SECTIONS_START);
stmt = gimple_build_call (u, 1, t);
}
else
{
/* Otherwise, call GOMP_sections_next. */
- u = built_in_decls[BUILT_IN_GOMP_SECTIONS_NEXT];
+ u = builtin_decl_explicit (BUILT_IN_GOMP_SECTIONS_NEXT);
stmt = gimple_build_call (u, 0);
}
gimple_call_set_lhs (stmt, vin);
vnext = NULL_TREE;
}
- i = 0;
- if (exit_reachable)
- {
- t = build3 (CASE_LABEL_EXPR, void_type_node,
- build_int_cst (unsigned_type_node, 0), NULL, l2);
- VEC_quick_push (tree, label_vec, t);
- i++;
- }
+ t = build_case_label (build_int_cst (unsigned_type_node, 0), NULL, l2);
+ VEC_quick_push (tree, label_vec, t);
+ i = 1;
/* Convert each GIMPLE_OMP_SECTION into a CASE_LABEL_EXPR. */
for (inner = region->inner, casei = 1;
t = gimple_block_label (s_entry_bb);
u = build_int_cst (unsigned_type_node, casei);
- u = build3 (CASE_LABEL_EXPR, void_type_node, u, NULL, t);
+ u = build_case_label (u, NULL, t);
VEC_quick_push (tree, label_vec, u);
si = gsi_last_bb (s_entry_bb);
/* Error handling code goes in DEFAULT_BB. */
t = gimple_block_label (default_bb);
- u = build3 (CASE_LABEL_EXPR, void_type_node, NULL, NULL, t);
+ u = build_case_label (NULL, NULL, t);
make_edge (l0_bb, default_bb, 0);
stmt = gimple_build_switch_vec (vmain, u, label_vec);
VEC_free (tree, heap, label_vec);
si = gsi_start_bb (default_bb);
- stmt = gimple_build_call (built_in_decls[BUILT_IN_TRAP], 0);
+ stmt = gimple_build_call (builtin_decl_explicit (BUILT_IN_TRAP), 0);
gsi_insert_after (&si, stmt, GSI_CONTINUE_LINKING);
if (exit_reachable)
{
+ tree bfn_decl;
+
/* Code to get the next section goes in L1_BB. */
si = gsi_last_bb (l1_bb);
gcc_assert (gimple_code (gsi_stmt (si)) == GIMPLE_OMP_CONTINUE);
- stmt = gimple_build_call (built_in_decls[BUILT_IN_GOMP_SECTIONS_NEXT], 0);
+ bfn_decl = builtin_decl_explicit (BUILT_IN_GOMP_SECTIONS_NEXT);
+ stmt = gimple_build_call (bfn_decl, 0);
gimple_call_set_lhs (stmt, vnext);
gsi_insert_after (&si, stmt, GSI_SAME_STMT);
gsi_remove (&si, true);
single_succ_edge (l1_bb)->flags = EDGE_FALLTHRU;
-
- /* Cleanup function replaces GIMPLE_OMP_RETURN in EXIT_BB. */
- si = gsi_last_bb (l2_bb);
- if (gimple_omp_return_nowait_p (gsi_stmt (si)))
- t = built_in_decls[BUILT_IN_GOMP_SECTIONS_END_NOWAIT];
- else
- t = built_in_decls[BUILT_IN_GOMP_SECTIONS_END];
- stmt = gimple_build_call (t, 0);
- gsi_insert_after (&si, stmt, GSI_SAME_STMT);
- gsi_remove (&si, true);
}
+ /* Cleanup function replaces GIMPLE_OMP_RETURN in EXIT_BB. */
+ si = gsi_last_bb (l2_bb);
+ if (gimple_omp_return_nowait_p (gsi_stmt (si)))
+ t = builtin_decl_explicit (BUILT_IN_GOMP_SECTIONS_END_NOWAIT);
+ else
+ t = builtin_decl_explicit (BUILT_IN_GOMP_SECTIONS_END);
+ stmt = gimple_build_call (t, 0);
+ gsi_insert_after (&si, stmt, GSI_SAME_STMT);
+ gsi_remove (&si, true);
+
set_immediate_dominator (CDI_DOMINATORS, default_bb, l0_bb);
}
}
/* A subroutine of expand_omp_atomic. Attempt to implement the atomic
- operation as a __sync_fetch_and_op builtin. INDEX is log2 of the
+ operation as a normal volatile load. */
+
+static bool
+expand_omp_atomic_load (basic_block load_bb, tree addr,
+ tree loaded_val, int index)
+{
+ enum built_in_function tmpbase;
+ gimple_stmt_iterator gsi;
+ basic_block store_bb;
+ location_t loc;
+ gimple stmt;
+ tree decl, call, type, itype;
+
+ gsi = gsi_last_bb (load_bb);
+ stmt = gsi_stmt (gsi);
+ gcc_assert (gimple_code (stmt) == GIMPLE_OMP_ATOMIC_LOAD);
+ loc = gimple_location (stmt);
+
+ /* ??? If the target does not implement atomic_load_optab[mode], and mode
+ is smaller than word size, then expand_atomic_load assumes that the load
+ is atomic. We could avoid the builtin entirely in this case. */
+
+ tmpbase = (enum built_in_function) (BUILT_IN_ATOMIC_LOAD_N + index + 1);
+ decl = builtin_decl_explicit (tmpbase);
+ if (decl == NULL_TREE)
+ return false;
+
+ type = TREE_TYPE (loaded_val);
+ itype = TREE_TYPE (TREE_TYPE (decl));
+
+ call = build_call_expr_loc (loc, decl, 2, addr,
+ build_int_cst (NULL, MEMMODEL_RELAXED));
+ if (!useless_type_conversion_p (type, itype))
+ call = fold_build1_loc (loc, VIEW_CONVERT_EXPR, type, call);
+ call = build2_loc (loc, MODIFY_EXPR, void_type_node, loaded_val, call);
+
+ force_gimple_operand_gsi (&gsi, call, true, NULL_TREE, true, GSI_SAME_STMT);
+ gsi_remove (&gsi, true);
+
+ store_bb = single_succ (load_bb);
+ gsi = gsi_last_bb (store_bb);
+ gcc_assert (gimple_code (gsi_stmt (gsi)) == GIMPLE_OMP_ATOMIC_STORE);
+ gsi_remove (&gsi, true);
+
+ if (gimple_in_ssa_p (cfun))
+ update_ssa (TODO_update_ssa_no_phi);
+
+ return true;
+}
+
+/* A subroutine of expand_omp_atomic. Attempt to implement the atomic
+ operation as a normal volatile store. */
+
+static bool
+expand_omp_atomic_store (basic_block load_bb, tree addr,
+ tree loaded_val, tree stored_val, int index)
+{
+ enum built_in_function tmpbase;
+ gimple_stmt_iterator gsi;
+ basic_block store_bb = single_succ (load_bb);
+ location_t loc;
+ gimple stmt;
+ tree decl, call, type, itype;
+ enum machine_mode imode;
+ bool exchange;
+
+ gsi = gsi_last_bb (load_bb);
+ stmt = gsi_stmt (gsi);
+ gcc_assert (gimple_code (stmt) == GIMPLE_OMP_ATOMIC_LOAD);
+
+ /* If the load value is needed, then this isn't a store but an exchange. */
+ exchange = gimple_omp_atomic_need_value_p (stmt);
+
+ gsi = gsi_last_bb (store_bb);
+ stmt = gsi_stmt (gsi);
+ gcc_assert (gimple_code (stmt) == GIMPLE_OMP_ATOMIC_STORE);
+ loc = gimple_location (stmt);
+
+ /* ??? If the target does not implement atomic_store_optab[mode], and mode
+ is smaller than word size, then expand_atomic_store assumes that the store
+ is atomic. We could avoid the builtin entirely in this case. */
+
+ tmpbase = (exchange ? BUILT_IN_ATOMIC_EXCHANGE_N : BUILT_IN_ATOMIC_STORE_N);
+ tmpbase = (enum built_in_function) ((int) tmpbase + index + 1);
+ decl = builtin_decl_explicit (tmpbase);
+ if (decl == NULL_TREE)
+ return false;
+
+ type = TREE_TYPE (stored_val);
+
+ /* Dig out the type of the function's second argument. */
+ itype = TREE_TYPE (decl);
+ itype = TYPE_ARG_TYPES (itype);
+ itype = TREE_CHAIN (itype);
+ itype = TREE_VALUE (itype);
+ imode = TYPE_MODE (itype);
+
+ if (exchange && !can_atomic_exchange_p (imode, true))
+ return false;
+
+ if (!useless_type_conversion_p (itype, type))
+ stored_val = fold_build1_loc (loc, VIEW_CONVERT_EXPR, itype, stored_val);
+ call = build_call_expr_loc (loc, decl, 3, addr, stored_val,
+ build_int_cst (NULL, MEMMODEL_RELAXED));
+ if (exchange)
+ {
+ if (!useless_type_conversion_p (type, itype))
+ call = build1_loc (loc, VIEW_CONVERT_EXPR, type, call);
+ call = build2_loc (loc, MODIFY_EXPR, void_type_node, loaded_val, call);
+ }
+
+ force_gimple_operand_gsi (&gsi, call, true, NULL_TREE, true, GSI_SAME_STMT);
+ gsi_remove (&gsi, true);
+
+ /* Remove the GIMPLE_OMP_ATOMIC_LOAD that we verified above. */
+ gsi = gsi_last_bb (load_bb);
+ gsi_remove (&gsi, true);
+
+ if (gimple_in_ssa_p (cfun))
+ update_ssa (TODO_update_ssa_no_phi);
+
+ return true;
+}
+
+/* A subroutine of expand_omp_atomic. Attempt to implement the atomic
+ operation as a __atomic_fetch_op builtin. INDEX is log2 of the
size of the data type, and thus usable to find the index of the builtin
decl. Returns false if the expression is not of the proper form. */
tree addr, tree loaded_val,
tree stored_val, int index)
{
- enum built_in_function base;
+ enum built_in_function oldbase, newbase, tmpbase;
tree decl, itype, call;
- enum insn_code *optab;
- tree rhs;
+ tree lhs, rhs;
basic_block store_bb = single_succ (load_bb);
gimple_stmt_iterator gsi;
gimple stmt;
location_t loc;
+ enum tree_code code;
+ bool need_old, need_new;
+ enum machine_mode imode;
/* We expect to find the following sequences:
gsi_next (&gsi);
if (gimple_code (gsi_stmt (gsi)) != GIMPLE_OMP_ATOMIC_STORE)
return false;
+ need_new = gimple_omp_atomic_need_value_p (gsi_stmt (gsi));
+ need_old = gimple_omp_atomic_need_value_p (last_stmt (load_bb));
+ gcc_checking_assert (!need_old || !need_new);
if (!operand_equal_p (gimple_assign_lhs (stmt), stored_val, 0))
return false;
/* Check for one of the supported fetch-op operations. */
- switch (gimple_assign_rhs_code (stmt))
+ code = gimple_assign_rhs_code (stmt);
+ switch (code)
{
case PLUS_EXPR:
case POINTER_PLUS_EXPR:
- base = BUILT_IN_FETCH_AND_ADD_N;
- optab = sync_add_optab;
+ oldbase = BUILT_IN_ATOMIC_FETCH_ADD_N;
+ newbase = BUILT_IN_ATOMIC_ADD_FETCH_N;
break;
case MINUS_EXPR:
- base = BUILT_IN_FETCH_AND_SUB_N;
- optab = sync_add_optab;
+ oldbase = BUILT_IN_ATOMIC_FETCH_SUB_N;
+ newbase = BUILT_IN_ATOMIC_SUB_FETCH_N;
break;
case BIT_AND_EXPR:
- base = BUILT_IN_FETCH_AND_AND_N;
- optab = sync_and_optab;
+ oldbase = BUILT_IN_ATOMIC_FETCH_AND_N;
+ newbase = BUILT_IN_ATOMIC_AND_FETCH_N;
break;
case BIT_IOR_EXPR:
- base = BUILT_IN_FETCH_AND_OR_N;
- optab = sync_ior_optab;
+ oldbase = BUILT_IN_ATOMIC_FETCH_OR_N;
+ newbase = BUILT_IN_ATOMIC_OR_FETCH_N;
break;
case BIT_XOR_EXPR:
- base = BUILT_IN_FETCH_AND_XOR_N;
- optab = sync_xor_optab;
+ oldbase = BUILT_IN_ATOMIC_FETCH_XOR_N;
+ newbase = BUILT_IN_ATOMIC_XOR_FETCH_N;
break;
default:
return false;
}
+
/* Make sure the expression is of the proper form. */
if (operand_equal_p (gimple_assign_rhs1 (stmt), loaded_val, 0))
rhs = gimple_assign_rhs2 (stmt);
else
return false;
- decl = built_in_decls[base + index + 1];
+ tmpbase = ((enum built_in_function)
+ ((need_new ? newbase : oldbase) + index + 1));
+ decl = builtin_decl_explicit (tmpbase);
+ if (decl == NULL_TREE)
+ return false;
itype = TREE_TYPE (TREE_TYPE (decl));
+ imode = TYPE_MODE (itype);
- if (optab[TYPE_MODE (itype)] == CODE_FOR_nothing)
+ /* We could test all of the various optabs involved, but the fact of the
+ matter is that (with the exception of i486 vs i586 and xadd) all targets
+ that support any atomic operaton optab also implements compare-and-swap.
+ Let optabs.c take care of expanding any compare-and-swap loop. */
+ if (!can_compare_and_swap_p (imode, true))
return false;
gsi = gsi_last_bb (load_bb);
gcc_assert (gimple_code (gsi_stmt (gsi)) == GIMPLE_OMP_ATOMIC_LOAD);
- call = build_call_expr_loc (loc,
- decl, 2, addr,
- fold_convert_loc (loc, itype, rhs));
- call = fold_convert_loc (loc, void_type_node, call);
+
+ /* OpenMP does not imply any barrier-like semantics on its atomic ops.
+ It only requires that the operation happen atomically. Thus we can
+ use the RELAXED memory model. */
+ call = build_call_expr_loc (loc, decl, 3, addr,
+ fold_convert_loc (loc, itype, rhs),
+ build_int_cst (NULL, MEMMODEL_RELAXED));
+
+ if (need_old || need_new)
+ {
+ lhs = need_old ? loaded_val : stored_val;
+ call = fold_convert_loc (loc, TREE_TYPE (lhs), call);
+ call = build2_loc (loc, MODIFY_EXPR, void_type_node, lhs, call);
+ }
+ else
+ call = fold_convert_loc (loc, void_type_node, call);
force_gimple_operand_gsi (&gsi, call, true, NULL_TREE, true, GSI_SAME_STMT);
gsi_remove (&gsi, true);
basic_block loop_header = single_succ (load_bb);
gimple phi, stmt;
edge e;
-
- cmpxchg = built_in_decls[BUILT_IN_VAL_COMPARE_AND_SWAP_N + index + 1];
+ enum built_in_function fncode;
+
+ /* ??? We need a non-pointer interface to __atomic_compare_exchange in
+ order to use the RELAXED memory model effectively. */
+ fncode = (enum built_in_function)((int)BUILT_IN_SYNC_VAL_COMPARE_AND_SWAP_N
+ + index + 1);
+ cmpxchg = builtin_decl_explicit (fncode);
+ if (cmpxchg == NULL_TREE)
+ return false;
type = TYPE_MAIN_VARIANT (TREE_TYPE (TREE_TYPE (addr)));
itype = TREE_TYPE (TREE_TYPE (cmpxchg));
- if (sync_compare_and_swap[TYPE_MODE (itype)] == CODE_FOR_nothing)
+ if (!can_compare_and_swap_p (TYPE_MODE (itype), true))
return false;
/* Load the initial value, replacing the GIMPLE_OMP_ATOMIC_LOAD. */
loadedi = loaded_val;
}
- initial = force_gimple_operand_gsi (&si, build_fold_indirect_ref (iaddr),
- true, NULL_TREE, true, GSI_SAME_STMT);
+ initial
+ = force_gimple_operand_gsi (&si,
+ build2 (MEM_REF, TREE_TYPE (TREE_TYPE (iaddr)),
+ iaddr,
+ build_int_cst (TREE_TYPE (iaddr), 0)),
+ true, NULL_TREE, true, GSI_SAME_STMT);
/* Move the value to the LOADEDI temporary. */
if (gimple_in_ssa_p (cfun))
loaded_val = *addr;
and replace
- GIMPLE_OMP_ATOMIC_ATORE (stored_val) with
+ GIMPLE_OMP_ATOMIC_STORE (stored_val) with
*addr = stored_val;
*/
si = gsi_last_bb (load_bb);
gcc_assert (gimple_code (gsi_stmt (si)) == GIMPLE_OMP_ATOMIC_LOAD);
- t = built_in_decls[BUILT_IN_GOMP_ATOMIC_START];
- t = build_function_call_expr (UNKNOWN_LOCATION, t, 0);
+ t = builtin_decl_explicit (BUILT_IN_GOMP_ATOMIC_START);
+ t = build_call_expr (t, 0);
force_gimple_operand_gsi (&si, t, true, NULL_TREE, true, GSI_SAME_STMT);
- stmt = gimple_build_assign (loaded_val, build_fold_indirect_ref (addr));
+ stmt = gimple_build_assign (loaded_val, build_simple_mem_ref (addr));
gsi_insert_before (&si, stmt, GSI_SAME_STMT);
gsi_remove (&si, true);
si = gsi_last_bb (store_bb);
gcc_assert (gimple_code (gsi_stmt (si)) == GIMPLE_OMP_ATOMIC_STORE);
- stmt = gimple_build_assign (build_fold_indirect_ref (unshare_expr (addr)),
- stored_val);
+ stmt = gimple_build_assign (build_simple_mem_ref (unshare_expr (addr)),
+ stored_val);
gsi_insert_before (&si, stmt, GSI_SAME_STMT);
- t = built_in_decls[BUILT_IN_GOMP_ATOMIC_END];
- t = build_function_call_expr (UNKNOWN_LOCATION, t, 0);
+ t = builtin_decl_explicit (BUILT_IN_GOMP_ATOMIC_END);
+ t = build_call_expr (t, 0);
force_gimple_operand_gsi (&si, t, true, NULL_TREE, true, GSI_SAME_STMT);
gsi_remove (&si, true);
/* __sync builtins require strict data alignment. */
if (exact_log2 (align) >= index)
{
+ /* Atomic load. */
+ if (loaded_val == stored_val
+ && (GET_MODE_CLASS (TYPE_MODE (type)) == MODE_INT
+ || GET_MODE_CLASS (TYPE_MODE (type)) == MODE_FLOAT)
+ && GET_MODE_BITSIZE (TYPE_MODE (type)) <= BITS_PER_WORD
+ && expand_omp_atomic_load (load_bb, addr, loaded_val, index))
+ return;
+
+ /* Atomic store. */
+ if ((GET_MODE_CLASS (TYPE_MODE (type)) == MODE_INT
+ || GET_MODE_CLASS (TYPE_MODE (type)) == MODE_FLOAT)
+ && GET_MODE_BITSIZE (TYPE_MODE (type)) <= BITS_PER_WORD
+ && store_bb == single_succ (load_bb)
+ && first_stmt (store_bb) == store
+ && expand_omp_atomic_store (load_bb, addr, loaded_val,
+ stored_val, index))
+ return;
+
/* When possible, use specialized atomic update functions. */
if ((INTEGRAL_TYPE_P (type) || POINTER_TYPE_P (type))
- && store_bb == single_succ (load_bb))
- {
- if (expand_omp_atomic_fetch_op (load_bb, addr,
- loaded_val, stored_val, index))
- return;
- }
+ && store_bb == single_succ (load_bb)
+ && expand_omp_atomic_fetch_op (load_bb, addr,
+ loaded_val, stored_val, index))
+ return;
/* If we don't have specialized __sync builtins, try and implement
as a compare and swap loop. */
0, /* properties_provided */
0, /* properties_destroyed */
0, /* todo_flags_start */
- TODO_dump_func /* todo_flags_finish */
+ 0 /* todo_flags_finish */
}
};
\f
gimple call, cond;
tree lhs, decl;
- decl = built_in_decls[BUILT_IN_GOMP_SINGLE_START];
+ decl = builtin_decl_explicit (BUILT_IN_GOMP_SINGLE_START);
lhs = create_tmp_var (TREE_TYPE (TREE_TYPE (decl)), NULL);
call = gimple_build_call (decl, 0);
gimple_call_set_lhs (call, lhs);
static void
lower_omp_single_copy (gimple single_stmt, gimple_seq *pre_p, omp_context *ctx)
{
- tree ptr_type, t, l0, l1, l2;
+ tree ptr_type, t, l0, l1, l2, bfn_decl;
gimple_seq copyin_seq;
location_t loc = gimple_location (single_stmt);
l1 = create_artificial_label (loc);
l2 = create_artificial_label (loc);
- t = build_call_expr_loc (loc, built_in_decls[BUILT_IN_GOMP_SINGLE_COPY_START], 0);
+ bfn_decl = builtin_decl_explicit (BUILT_IN_GOMP_SINGLE_COPY_START);
+ t = build_call_expr_loc (loc, bfn_decl, 0);
t = fold_convert_loc (loc, ptr_type, t);
gimplify_assign (ctx->receiver_decl, t, pre_p);
©in_seq, ctx);
t = build_fold_addr_expr_loc (loc, ctx->sender_decl);
- t = build_call_expr_loc (loc, built_in_decls[BUILT_IN_GOMP_SINGLE_COPY_END],
- 1, t);
+ bfn_decl = builtin_decl_explicit (BUILT_IN_GOMP_SINGLE_COPY_END);
+ t = build_call_expr_loc (loc, bfn_decl, 1, t);
gimplify_and_add (t, pre_p);
t = build_and_jump (&l2);
static void
lower_omp_master (gimple_stmt_iterator *gsi_p, omp_context *ctx)
{
- tree block, lab = NULL, x;
+ tree block, lab = NULL, x, bfn_decl;
gimple stmt = gsi_stmt (*gsi_p), bind;
location_t loc = gimple_location (stmt);
gimple_seq tseq;
bind = gimple_build_bind (NULL, gimple_seq_alloc_with_stmt (stmt),
block);
- x = build_call_expr_loc (loc, built_in_decls[BUILT_IN_OMP_GET_THREAD_NUM], 0);
+ bfn_decl = builtin_decl_explicit (BUILT_IN_OMP_GET_THREAD_NUM);
+ x = build_call_expr_loc (loc, bfn_decl, 0);
x = build2 (EQ_EXPR, boolean_type_node, x, integer_zero_node);
x = build3 (COND_EXPR, void_type_node, x, NULL, build_and_jump (&lab));
tseq = NULL;
bind = gimple_build_bind (NULL, gimple_seq_alloc_with_stmt (stmt),
block);
- x = gimple_build_call (built_in_decls[BUILT_IN_GOMP_ORDERED_START], 0);
+ x = gimple_build_call (builtin_decl_explicit (BUILT_IN_GOMP_ORDERED_START),
+ 0);
gimple_bind_add_stmt (bind, x);
lower_omp (gimple_omp_body (stmt), ctx);
gimple_bind_add_seq (bind, gimple_omp_body (stmt));
gimple_omp_set_body (stmt, NULL);
- x = gimple_build_call (built_in_decls[BUILT_IN_GOMP_ORDERED_END], 0);
+ x = gimple_build_call (builtin_decl_explicit (BUILT_IN_GOMP_ORDERED_END), 0);
gimple_bind_add_stmt (bind, x);
gimple_bind_add_stmt (bind, gimple_build_omp_return (true));
else
decl = (tree) n->value;
- lock = built_in_decls[BUILT_IN_GOMP_CRITICAL_NAME_START];
+ lock = builtin_decl_explicit (BUILT_IN_GOMP_CRITICAL_NAME_START);
lock = build_call_expr_loc (loc, lock, 1, build_fold_addr_expr_loc (loc, decl));
- unlock = built_in_decls[BUILT_IN_GOMP_CRITICAL_NAME_END];
+ unlock = builtin_decl_explicit (BUILT_IN_GOMP_CRITICAL_NAME_END);
unlock = build_call_expr_loc (loc, unlock, 1,
build_fold_addr_expr_loc (loc, decl));
}
else
{
- lock = built_in_decls[BUILT_IN_GOMP_CRITICAL_START];
+ lock = builtin_decl_explicit (BUILT_IN_GOMP_CRITICAL_START);
lock = build_call_expr_loc (loc, lock, 0);
- unlock = built_in_decls[BUILT_IN_GOMP_CRITICAL_END];
+ unlock = builtin_decl_explicit (BUILT_IN_GOMP_CRITICAL_END);
unlock = build_call_expr_loc (loc, unlock, 0);
}
child_fn = gimple_omp_task_copy_fn (task_stmt);
child_cfun = DECL_STRUCT_FUNCTION (child_fn);
gcc_assert (child_cfun->cfg == NULL);
- child_cfun->dont_save_pending_sizes_p = 1;
DECL_SAVED_TREE (child_fn) = alloc_stmt_list ();
/* Reset DECL_CONTEXT on function arguments. */
- for (t = DECL_ARGUMENTS (child_fn); t; t = TREE_CHAIN (t))
+ for (t = DECL_ARGUMENTS (child_fn); t; t = DECL_CHAIN (t))
DECL_CONTEXT (t) = child_fn;
/* Populate the function. */
/* Remap src and dst argument types if needed. */
record_type = ctx->record_type;
srecord_type = ctx->srecord_type;
- for (f = TYPE_FIELDS (record_type); f ; f = TREE_CHAIN (f))
+ for (f = TYPE_FIELDS (record_type); f ; f = DECL_CHAIN (f))
if (variably_modified_type_p (TREE_TYPE (f), ctx->cb.src_fn))
{
record_needs_remap = true;
break;
}
- for (f = TYPE_FIELDS (srecord_type); f ; f = TREE_CHAIN (f))
+ for (f = TYPE_FIELDS (srecord_type); f ; f = DECL_CHAIN (f))
if (variably_modified_type_p (TREE_TYPE (f), ctx->cb.src_fn))
{
srecord_needs_remap = true;
memset (&tcctx, '\0', sizeof (tcctx));
tcctx.cb.src_fn = ctx->cb.src_fn;
tcctx.cb.dst_fn = child_fn;
- tcctx.cb.src_node = cgraph_node (tcctx.cb.src_fn);
+ tcctx.cb.src_node = cgraph_get_node (tcctx.cb.src_fn);
+ gcc_checking_assert (tcctx.cb.src_node);
tcctx.cb.dst_node = tcctx.cb.src_node;
tcctx.cb.src_cfun = ctx->cb.src_cfun;
tcctx.cb.copy_decl = task_copyfn_copy_decl;
arg = DECL_ARGUMENTS (child_fn);
TREE_TYPE (arg) = build_pointer_type (record_type);
- sarg = TREE_CHAIN (arg);
+ sarg = DECL_CHAIN (arg);
TREE_TYPE (sarg) = build_pointer_type (srecord_type);
/* First pass: initialize temporaries used in record_type and srecord_type
n = splay_tree_lookup (ctx->sfield_map, (splay_tree_key) decl);
sf = (tree) n->value;
sf = *(tree *) pointer_map_contains (tcctx.cb.decl_map, sf);
- src = build_fold_indirect_ref_loc (loc, sarg);
- src = build3 (COMPONENT_REF, TREE_TYPE (sf), src, sf, NULL);
+ src = build_simple_mem_ref_loc (loc, sarg);
+ src = omp_build_component_ref (src, sf);
t = build2 (MODIFY_EXPR, TREE_TYPE (*p), *p, src);
append_to_statement_list (t, &list);
}
sf = (tree) n->value;
if (tcctx.cb.decl_map)
sf = *(tree *) pointer_map_contains (tcctx.cb.decl_map, sf);
- src = build_fold_indirect_ref_loc (loc, sarg);
- src = build3 (COMPONENT_REF, TREE_TYPE (sf), src, sf, NULL);
- dst = build_fold_indirect_ref_loc (loc, arg);
- dst = build3 (COMPONENT_REF, TREE_TYPE (f), dst, f, NULL);
+ src = build_simple_mem_ref_loc (loc, sarg);
+ src = omp_build_component_ref (src, sf);
+ dst = build_simple_mem_ref_loc (loc, arg);
+ dst = omp_build_component_ref (dst, f);
t = build2 (MODIFY_EXPR, TREE_TYPE (dst), dst, src);
append_to_statement_list (t, &list);
break;
sf = (tree) n->value;
if (tcctx.cb.decl_map)
sf = *(tree *) pointer_map_contains (tcctx.cb.decl_map, sf);
- src = build_fold_indirect_ref_loc (loc, sarg);
- src = build3 (COMPONENT_REF, TREE_TYPE (sf), src, sf, NULL);
+ src = build_simple_mem_ref_loc (loc, sarg);
+ src = omp_build_component_ref (src, sf);
if (use_pointer_for_field (decl, NULL) || is_reference (decl))
- src = build_fold_indirect_ref_loc (loc, src);
+ src = build_simple_mem_ref_loc (loc, src);
}
else
src = decl;
- dst = build_fold_indirect_ref_loc (loc, arg);
- dst = build3 (COMPONENT_REF, TREE_TYPE (f), dst, f, NULL);
+ dst = build_simple_mem_ref_loc (loc, arg);
+ dst = omp_build_component_ref (dst, f);
t = lang_hooks.decls.omp_clause_copy_ctor (c, dst, src);
append_to_statement_list (t, &list);
break;
sf = (tree) n->value;
if (tcctx.cb.decl_map)
sf = *(tree *) pointer_map_contains (tcctx.cb.decl_map, sf);
- src = build_fold_indirect_ref_loc (loc, sarg);
- src = build3 (COMPONENT_REF, TREE_TYPE (sf), src, sf, NULL);
+ src = build_simple_mem_ref_loc (loc, sarg);
+ src = omp_build_component_ref (src, sf);
if (use_pointer_for_field (decl, NULL))
- src = build_fold_indirect_ref_loc (loc, src);
+ src = build_simple_mem_ref_loc (loc, src);
}
else
src = decl;
- dst = build_fold_indirect_ref_loc (loc, arg);
- dst = build3 (COMPONENT_REF, TREE_TYPE (f), dst, f, NULL);
+ dst = build_simple_mem_ref_loc (loc, arg);
+ dst = omp_build_component_ref (dst, f);
t = build2 (MODIFY_EXPR, TREE_TYPE (dst), dst, src);
append_to_statement_list (t, &list);
break;
(splay_tree_key) TREE_OPERAND (ind, 0));
sf = (tree) n->value;
sf = *(tree *) pointer_map_contains (tcctx.cb.decl_map, sf);
- src = build_fold_indirect_ref_loc (loc, sarg);
- src = build3 (COMPONENT_REF, TREE_TYPE (sf), src, sf, NULL);
- src = build_fold_indirect_ref_loc (loc, src);
- dst = build_fold_indirect_ref_loc (loc, arg);
- dst = build3 (COMPONENT_REF, TREE_TYPE (f), dst, f, NULL);
+ src = build_simple_mem_ref_loc (loc, sarg);
+ src = omp_build_component_ref (src, sf);
+ src = build_simple_mem_ref_loc (loc, src);
+ dst = build_simple_mem_ref_loc (loc, arg);
+ dst = omp_build_component_ref (dst, f);
t = lang_hooks.decls.omp_clause_copy_ctor (c, dst, src);
append_to_statement_list (t, &list);
n = splay_tree_lookup (ctx->field_map,
(splay_tree_key) TREE_OPERAND (ind, 0));
df = (tree) n->value;
df = *(tree *) pointer_map_contains (tcctx.cb.decl_map, df);
- ptr = build_fold_indirect_ref_loc (loc, arg);
- ptr = build3 (COMPONENT_REF, TREE_TYPE (df), ptr, df, NULL);
+ ptr = build_simple_mem_ref_loc (loc, arg);
+ ptr = omp_build_component_ref (ptr, df);
t = build2 (MODIFY_EXPR, TREE_TYPE (ptr), ptr,
build_fold_addr_expr_loc (loc, dst));
append_to_statement_list (t, &list);
ctx->sender_decl
= create_tmp_var (ctx->srecord_type ? ctx->srecord_type
: ctx->record_type, ".omp_data_o");
+ DECL_NAMELESS (ctx->sender_decl) = 1;
TREE_ADDRESSABLE (ctx->sender_decl) = 1;
gimple_omp_taskreg_set_data_arg (stmt, ctx->sender_decl);
}
PROP_gimple_lomp, /* properties_provided */
0, /* properties_destroyed */
0, /* todo_flags_start */
- TODO_dump_func /* todo_flags_finish */
+ 0 /* todo_flags_finish */
}
};
\f