/* Passes for transactional memory support.
- Copyright (C) 2008, 2009, 2010, 2011 Free Software Foundation, Inc.
+ Copyright (C) 2008, 2009, 2010, 2011, 2012 Free Software Foundation, Inc.
This file is part of GCC.
unsigned int summary_flags : 8;
unsigned int block_flags : 8;
unsigned int func_flags : 8;
- unsigned int saw_unsafe : 1;
unsigned int saw_volatile : 1;
gimple stmt;
};
"unsafe function call %qD within "
"atomic transaction", fn);
else
- error_at (gimple_location (stmt),
- "unsafe function call %qE within "
- "atomic transaction", fn);
+ {
+ if (!DECL_P (fn) || DECL_NAME (fn))
+ error_at (gimple_location (stmt),
+ "unsafe function call %qE within "
+ "atomic transaction", fn);
+ else
+ error_at (gimple_location (stmt),
+ "unsafe indirect function call within "
+ "atomic transaction");
+ }
}
else
{
"unsafe function call %qD within "
"%<transaction_safe%> function", fn);
else
- error_at (gimple_location (stmt),
- "unsafe function call %qE within "
- "%<transaction_safe%> function", fn);
+ {
+ if (!DECL_P (fn) || DECL_NAME (fn))
+ error_at (gimple_location (stmt),
+ "unsafe function call %qE within "
+ "%<transaction_safe%> function", fn);
+ else
+ error_at (gimple_location (stmt),
+ "unsafe indirect function call within "
+ "%<transaction_safe%> function");
+ }
}
}
}
else if (d->func_flags & DIAG_TM_SAFE)
error_at (gimple_location (stmt),
"asm not allowed in %<transaction_safe%> function");
- else
- d->saw_unsafe = true;
break;
case GIMPLE_TRANSACTION:
else if (d->func_flags & DIAG_TM_SAFE)
error_at (gimple_location (stmt),
"relaxed transaction in %<transaction_safe%> function");
- else
- d->saw_unsafe = true;
inner_flags = DIAG_TM_RELAXED;
}
else if (gimple_transaction_subcode (stmt) & GTMA_IS_OUTER)
else if (d->func_flags & DIAG_TM_SAFE)
error_at (gimple_location (stmt),
"outer transaction in %<transaction_safe%> function");
- else
- d->saw_unsafe = true;
inner_flags |= DIAG_TM_OUTER;
}
walk_gimple_seq (gimple_transaction_body (stmt),
diagnose_tm_1, diagnose_tm_1_op, &wi_inner);
-
- d->saw_unsafe |= d_inner.saw_unsafe;
}
}
break;
walk_gimple_seq (gimple_body (current_function_decl),
diagnose_tm_1, diagnose_tm_1_op, &wi);
- /* If we saw something other than a call that makes this function
- unsafe, remember it so that the IPA pass only needs to scan calls. */
- if (d.saw_unsafe && !is_tm_safe_or_pure (current_function_decl))
- cgraph_local_info (current_function_decl)->tm_may_enter_irr = 1;
-
return 0;
}
special constructors and the like. */
&& !TREE_ADDRESSABLE (type))
{
- lp->save_var = create_tmp_var (TREE_TYPE (lp->addr), "tm_save");
+ lp->save_var = create_tmp_reg (TREE_TYPE (lp->addr), "tm_save");
add_referenced_var (lp->save_var);
lp->stmts = NULL;
lp->entry_block = entry_block;
during lower_sequence_tm/gimplification, leave the call
to needs_to_live_in_memory until we eliminate
lower_sequence_tm altogether. */
- needs_to_live_in_memory (x)
- /* X escapes. */
- || ptr_deref_may_alias_global_p (x))
+ needs_to_live_in_memory (x))
return true;
else
{
bitmap irr_blocks;
};
+typedef struct tm_region *tm_region_p;
+DEF_VEC_P (tm_region_p);
+DEF_VEC_ALLOC_P (tm_region_p, heap);
+
/* True if there are pending edge statements to be committed for the
current function being scanned in the tmmark pass. */
bool pending_edge_inserts_p;
VEC(basic_block, heap) *queue = NULL;
bitmap visited_blocks = BITMAP_ALLOC (NULL);
struct tm_region *old_region;
+ VEC(tm_region_p, heap) *bb_regions = NULL;
all_tm_regions = region;
bb = single_succ (ENTRY_BLOCK_PTR);
+ /* We could store this information in bb->aux, but we may get called
+ through get_all_tm_blocks() from another pass that may be already
+ using bb->aux. */
+ VEC_safe_grow_cleared (tm_region_p, heap, bb_regions, last_basic_block);
+
VEC_safe_push (basic_block, heap, queue, bb);
- gcc_assert (!bb->aux); /* FIXME: Remove me. */
- bb->aux = region;
+ VEC_replace (tm_region_p, bb_regions, bb->index, region);
do
{
bb = VEC_pop (basic_block, queue);
- region = (struct tm_region *)bb->aux;
- bb->aux = NULL;
+ region = VEC_index (tm_region_p, bb_regions, bb->index);
+ VEC_replace (tm_region_p, bb_regions, bb->index, NULL);
/* Record exit and irrevocable blocks. */
region = tm_region_init_1 (region, bb);
{
bitmap_set_bit (visited_blocks, e->dest->index);
VEC_safe_push (basic_block, heap, queue, e->dest);
- gcc_assert (!e->dest->aux); /* FIXME: Remove me. */
/* If the current block started a new region, make sure that only
the entry block of the new region is associated with this region.
Other successors are still part of the old region. */
if (old_region != region && e->dest != region->entry_block)
- e->dest->aux = old_region;
+ VEC_replace (tm_region_p, bb_regions, e->dest->index, old_region);
else
- e->dest->aux = region;
+ VEC_replace (tm_region_p, bb_regions, e->dest->index, region);
}
}
while (!VEC_empty (basic_block, queue));
VEC_free (basic_block, heap, queue);
BITMAP_FREE (visited_blocks);
+ VEC_free (tm_region_p, heap, bb_regions);
}
/* The "gate" function for all transactional memory expansion and optimization
}
if (!gcall)
{
- tree lhs_addr, rhs_addr;
+ tree lhs_addr, rhs_addr, tmp;
if (load_p)
transaction_subcode_ior (region, GTMA_HAVE_LOAD);
/* ??? Figure out if there's any possible overlap between the LHS
and the RHS and if not, use MEMCPY. */
- lhs_addr = gimplify_addr (gsi, lhs);
+
+ if (load_p && is_gimple_reg (lhs))
+ {
+ tmp = create_tmp_var (TREE_TYPE (lhs), NULL);
+ lhs_addr = build_fold_addr_expr (tmp);
+ }
+ else
+ {
+ tmp = NULL_TREE;
+ lhs_addr = gimplify_addr (gsi, lhs);
+ }
rhs_addr = gimplify_addr (gsi, rhs);
gcall = gimple_build_call (builtin_decl_explicit (BUILT_IN_TM_MEMMOVE),
3, lhs_addr, rhs_addr,
TYPE_SIZE_UNIT (TREE_TYPE (lhs)));
gimple_set_location (gcall, loc);
gsi_insert_before (gsi, gcall, GSI_SAME_STMT);
+
+ if (tmp)
+ {
+ gcall = gimple_build_assign (lhs, tmp);
+ gsi_insert_before (gsi, gcall, GSI_SAME_STMT);
+ }
}
/* Now that we have the load/store in its instrumented form, add
}
node = cgraph_get_node (fn_decl);
+ /* All calls should have cgraph here. */
+ gcc_assert (node);
if (node->local.tm_may_enter_irr)
transaction_subcode_ior (region, GTMA_MAY_ENTER_IRREVOCABLE);
{
case GIMPLE_ASSIGN:
/* Only memory reads/writes need to be instrumented. */
- if (gimple_assign_single_p (stmt))
+ if (gimple_assign_single_p (stmt)
+ && !gimple_clobber_p (stmt))
{
expand_assign_tm (region, &gsi);
continue;
return bbs;
}
+/* Set the IN_TRANSACTION for all gimple statements that appear in a
+ transaction. */
+
+void
+compute_transaction_bits (void)
+{
+ struct tm_region *region;
+ VEC (basic_block, heap) *queue;
+ unsigned int i;
+ basic_block bb;
+
+ /* ?? Perhaps we need to abstract gate_tm_init further, because we
+ certainly don't need it to calculate CDI_DOMINATOR info. */
+ gate_tm_init ();
+
+ FOR_EACH_BB (bb)
+ bb->flags &= ~BB_IN_TRANSACTION;
+
+ for (region = all_tm_regions; region; region = region->next)
+ {
+ queue = get_tm_region_blocks (region->entry_block,
+ region->exit_blocks,
+ region->irr_blocks,
+ NULL,
+ /*stop_at_irr_p=*/true);
+ for (i = 0; VEC_iterate (basic_block, queue, i, bb); ++i)
+ bb->flags |= BB_IN_TRANSACTION;
+ VEC_free (basic_block, heap, queue);
+ }
+
+ if (all_tm_regions)
+ bitmap_obstack_release (&tm_obstack);
+}
+
/* Entry point to the MARK phase of TM expansion. Here we replace
transactional memory statements with calls to builtins, and function
calls with their transactional clones (if available). But we don't
for (gsi = gsi_start_bb (bb); !gsi_end_p (gsi); )
{
+ bool do_next = true;
gimple stmt = gsi_stmt (gsi);
/* ??? TM_COMMIT (and any other tm builtin function) in a nested
make_tm_edge (stmt, bb, region);
bb = e->dest;
gsi = gsi_start_bb (bb);
+ do_next = false;
}
/* Delete any tail-call annotation that may have been added.
gimple_call_set_tail (stmt, false);
}
- gsi_next (&gsi);
+ if (do_next)
+ gsi_next (&gsi);
}
}
typedef VEC (cgraph_node_p, heap) *cgraph_node_queue;
/* Return the ipa data associated with NODE, allocating zeroed memory
- if necessary. */
+ if necessary. TRAVERSE_ALIASES is true if we must traverse aliases
+ and set *NODE accordingly. */
static struct tm_ipa_cg_data *
-get_cg_data (struct cgraph_node *node)
+get_cg_data (struct cgraph_node **node, bool traverse_aliases)
{
- struct tm_ipa_cg_data *d = (struct tm_ipa_cg_data *) node->aux;
+ struct tm_ipa_cg_data *d;
+
+ if (traverse_aliases && (*node)->alias)
+ *node = cgraph_get_node ((*node)->thunk.alias);
+
+ d = (struct tm_ipa_cg_data *) (*node)->aux;
if (d == NULL)
{
d = (struct tm_ipa_cg_data *)
obstack_alloc (&tm_obstack.obstack, sizeof (*d));
- node->aux = (void *) d;
+ (*node)->aux = (void *) d;
memset (d, 0, sizeof (*d));
}
node = cgraph_get_node (fndecl);
gcc_assert (node != NULL);
- d = get_cg_data (node);
+ d = get_cg_data (&node, true);
pcallers = (for_clone ? &d->tm_callers_clone
: &d->tm_callers_normal);
ipa_tm_note_irrevocable (struct cgraph_node *node,
cgraph_node_queue *worklist_p)
{
- struct tm_ipa_cg_data *d = get_cg_data (node);
+ struct tm_ipa_cg_data *d = get_cg_data (&node, true);
struct cgraph_edge *e;
d->is_irrevocable = true;
for (e = node->callers; e ; e = e->next_caller)
{
basic_block bb;
+ struct cgraph_node *caller;
/* Don't examine recursive calls. */
if (e->caller == node)
if (is_tm_safe_or_pure (e->caller->decl))
continue;
- d = get_cg_data (e->caller);
+ caller = e->caller;
+ d = get_cg_data (&caller, true);
/* Check if the callee is in a transactional region. If so,
schedule the function for normal re-scan as well. */
&& bitmap_bit_p (d->transaction_blocks_normal, bb->index))
d->want_irr_scan_normal = true;
- maybe_push_queue (e->caller, worklist_p, &d->in_worklist);
+ maybe_push_queue (caller, worklist_p, &d->in_worklist);
}
}
if (TREE_CODE (fn) == ADDR_EXPR)
{
struct tm_ipa_cg_data *d;
+ struct cgraph_node *node;
fn = TREE_OPERAND (fn, 0);
if (is_tm_ending_fndecl (fn))
if (find_tm_replacement_function (fn))
break;
- d = get_cg_data (cgraph_get_node (fn));
- if (d->is_irrevocable)
+ node = cgraph_get_node(fn);
+ d = get_cg_data (&node, true);
+
+ /* Return true if irrevocable, but above all, believe
+ the user. */
+ if (d->is_irrevocable
+ && !is_tm_safe_or_pure (fn))
return true;
}
break;
assembly statement is not relevant to the transaction
is to wrap it in a __tm_waiver block. This is not
yet implemented, so we can't check for it. */
+ if (is_tm_safe (current_function_decl))
+ {
+ tree t = build1 (NOP_EXPR, void_type_node, size_zero_node);
+ SET_EXPR_LOCATION (t, gimple_location (stmt));
+ TREE_BLOCK (t) = gimple_block (stmt);
+ error ("%Kasm not allowed in %<transaction_safe%> function", t);
+ }
return true;
default:
{
struct tm_ipa_cg_data *d;
unsigned *pcallers;
+ struct cgraph_node *tnode;
if (is_tm_ending_fndecl (fndecl))
continue;
if (find_tm_replacement_function (fndecl))
continue;
- d = get_cg_data (cgraph_get_node (fndecl));
+ tnode = cgraph_get_node (fndecl);
+ d = get_cg_data (&tnode, true);
+
pcallers = (for_clone ? &d->tm_callers_clone
: &d->tm_callers_normal);
VEC (basic_block, heap) *queue;
bool ret = false;
+ /* Builtin operators (operator new, and such). */
+ if (DECL_STRUCT_FUNCTION (node->decl) == NULL
+ || DECL_STRUCT_FUNCTION (node->decl)->cfg == NULL)
+ return false;
+
current_function_decl = node->decl;
push_cfun (DECL_STRUCT_FUNCTION (node->decl));
calculate_dominance_info (CDI_DOMINATORS);
- d = get_cg_data (node);
+ d = get_cg_data (&node, true);
queue = VEC_alloc (basic_block, heap, 10);
new_irr = BITMAP_ALLOC (&tm_obstack);
static bool
ipa_tm_mayenterirr_function (struct cgraph_node *node)
{
- struct tm_ipa_cg_data *d = get_cg_data (node);
- tree decl = node->decl;
- unsigned flags = flags_from_decl_or_type (decl);
+ struct tm_ipa_cg_data *d;
+ tree decl;
+ unsigned flags;
+
+ d = get_cg_data (&node, true);
+ decl = node->decl;
+ flags = flags_from_decl_or_type (decl);
/* Handle some TM builtins. Ordinarily these aren't actually generated
at this point, but handling these functions when written in by the
tree new_decl;
};
-/* A subrontine of ipa_tm_create_version, called via
+/* A subroutine of ipa_tm_create_version, called via
cgraph_for_node_and_aliases. Create new tm clones for each of
the existing aliases. */
static bool
/* Based loosely on C++'s make_alias_for(). */
TREE_PUBLIC (new_decl) = TREE_PUBLIC (old_decl);
- DECL_CONTEXT (new_decl) = NULL;
+ DECL_CONTEXT (new_decl) = DECL_CONTEXT (old_decl);
+ DECL_LANG_SPECIFIC (new_decl) = DECL_LANG_SPECIFIC (old_decl);
TREE_READONLY (new_decl) = TREE_READONLY (old_decl);
DECL_EXTERNAL (new_decl) = 0;
DECL_ARTIFICIAL (new_decl) = 1;
TREE_SYMBOL_REFERENCED (tm_name) = 1;
/* Perform the same remapping to the comdat group. */
- if (DECL_COMDAT (new_decl))
+ if (DECL_ONE_ONLY (new_decl))
DECL_COMDAT_GROUP (new_decl) = tm_mangle (DECL_COMDAT_GROUP (old_decl));
new_node = cgraph_same_body_alias (NULL, new_decl, info->new_decl);
new_node->tm_clone = true;
- get_cg_data (node)->clone = new_node;
+ new_node->local.externally_visible = info->old_node->local.externally_visible;
+ /* ?? Do not traverse aliases here. */
+ get_cg_data (&node, false)->clone = new_node;
record_tm_clone_pair (old_decl, new_decl);
- if (info->old_node->needed)
+ if (info->old_node->needed
+ || ipa_ref_list_first_refering (&info->old_node->ref_list))
ipa_tm_mark_needed_node (new_node);
return false;
}
TREE_SYMBOL_REFERENCED (tm_name) = 1;
/* Perform the same remapping to the comdat group. */
- if (DECL_COMDAT (new_decl))
+ if (DECL_ONE_ONLY (new_decl))
DECL_COMDAT_GROUP (new_decl) = tm_mangle (DECL_COMDAT_GROUP (old_decl));
new_node = cgraph_copy_node_for_versioning (old_node, new_decl, NULL, NULL);
+ new_node->local.externally_visible = old_node->local.externally_visible;
new_node->lowered = true;
new_node->tm_clone = 1;
- get_cg_data (old_node)->clone = new_node;
+ get_cg_data (&old_node, true)->clone = new_node;
if (cgraph_function_body_availability (old_node) >= AVAIL_OVERWRITABLE)
{
{
DECL_EXTERNAL (new_decl) = 0;
TREE_PUBLIC (new_decl) = 0;
+ DECL_WEAK (new_decl) = 0;
}
- tree_function_versioning (old_decl, new_decl, NULL, false, NULL,
+ tree_function_versioning (old_decl, new_decl, NULL, false, NULL, false,
NULL, NULL);
}
record_tm_clone_pair (old_decl, new_decl);
cgraph_call_function_insertion_hooks (new_node);
- if (old_node->needed)
+ if (old_node->needed
+ || ipa_ref_list_first_refering (&old_node->ref_list))
ipa_tm_mark_needed_node (new_node);
/* Do the same thing, but for any aliases of the original node. */
}
else
{
- struct tm_ipa_cg_data *d = get_cg_data (e->callee);
+ struct tm_ipa_cg_data *d;
+ struct cgraph_node *tnode = e->callee;
+
+ d = get_cg_data (&tnode, true);
new_node = d->clone;
/* As we've already skipped pure calls and appropriate builtins,
{
*need_ssa_rename_p |=
ipa_tm_insert_gettmclone_call (node, region, gsi, stmt);
- cgraph_remove_edge (e);
return;
}
static void
ipa_tm_transform_transaction (struct cgraph_node *node)
{
- struct tm_ipa_cg_data *d = get_cg_data (node);
+ struct tm_ipa_cg_data *d;
struct tm_region *region;
bool need_ssa_rename = false;
+ d = get_cg_data (&node, true);
+
current_function_decl = node->decl;
push_cfun (DECL_STRUCT_FUNCTION (node->decl));
calculate_dominance_info (CDI_DOMINATORS);
static void
ipa_tm_transform_clone (struct cgraph_node *node)
{
- struct tm_ipa_cg_data *d = get_cg_data (node);
+ struct tm_ipa_cg_data *d;
bool need_ssa_rename;
+ d = get_cg_data (&node, true);
+
/* If this function makes no calls and has no irrevocable blocks,
then there's nothing to do. */
/* ??? Remove non-aborting top-level transactions. */
- if (!node->callees && !d->irrevocable_blocks_clone)
+ if (!node->callees && !node->indirect_calls && !d->irrevocable_blocks_clone)
return;
current_function_decl = d->clone->decl;
if (is_tm_callable (node->decl)
&& cgraph_function_body_availability (node) >= AVAIL_OVERWRITABLE)
{
- d = get_cg_data (node);
+ d = get_cg_data (&node, true);
maybe_push_queue (node, &tm_callees, &d->in_callee_queue);
}
tm_region_init (NULL);
if (all_tm_regions)
{
- d = get_cg_data (node);
+ d = get_cg_data (&node, true);
/* Scan for calls that are in each transaction. */
ipa_tm_scan_calls_transaction (d, &tm_callees);
- /* If we saw something that will make us go irrevocable, put it
- in the worklist so we can scan the function later
- (ipa_tm_scan_irr_function) and mark the irrevocable blocks. */
- if (node->local.tm_may_enter_irr)
- {
- maybe_push_queue (node, &irr_worklist, &d->in_worklist);
- d->want_irr_scan_normal = true;
- }
+ /* Put it in the worklist so we can scan the function
+ later (ipa_tm_scan_irr_function) and mark the
+ irrevocable blocks. */
+ maybe_push_queue (node, &irr_worklist, &d->in_worklist);
+ d->want_irr_scan_normal = true;
}
pop_cfun ();
{
node = VEC_index (cgraph_node_p, tm_callees, i);
a = cgraph_function_body_availability (node);
- d = get_cg_data (node);
+ d = get_cg_data (&node, true);
- /* If we saw something that will make us go irrevocable, put it
- in the worklist so we can scan the function later
- (ipa_tm_scan_irr_function) and mark the irrevocable blocks. */
- if (node->local.tm_may_enter_irr)
- maybe_push_queue (node, &irr_worklist, &d->in_worklist);
+ /* Put it in the worklist so we can scan the function later
+ (ipa_tm_scan_irr_function) and mark the irrevocable
+ blocks. */
+ maybe_push_queue (node, &irr_worklist, &d->in_worklist);
/* Some callees cannot be arbitrarily cloned. These will always be
irrevocable. Mark these now, so that we need not scan them. */
if (node->alias)
{
node = cgraph_get_node (node->thunk.alias);
- d = get_cg_data (node);
+ d = get_cg_data (&node, true);
maybe_push_queue (node, &tm_callees, &d->in_callee_queue);
continue;
}
}
node = VEC_index (cgraph_node_p, irr_worklist, i);
- d = get_cg_data (node);
+ d = get_cg_data (&node, true);
d->in_worklist = false;
if (d->want_irr_scan_normal)
node = VEC_index (cgraph_node_p, tm_callees, i);
if (ipa_tm_mayenterirr_function (node))
{
- d = get_cg_data (node);
+ d = get_cg_data (&node, true);
gcc_assert (d->in_worklist == false);
maybe_push_queue (node, &irr_worklist, &d->in_worklist);
}
}
node = VEC_index (cgraph_node_p, irr_worklist, i);
- d = get_cg_data (node);
+ d = get_cg_data (&node, true);
d->in_worklist = false;
node->local.tm_may_enter_irr = true;
if (!is_tm_safe_or_pure (caller->decl)
&& !caller->local.tm_may_enter_irr)
{
- d = get_cg_data (caller);
+ d = get_cg_data (&caller, true);
maybe_push_queue (caller, &irr_worklist, &d->in_worklist);
}
}
if (ref->use == IPA_REF_ALIAS
&& !caller->local.tm_may_enter_irr)
{
- d = get_cg_data (caller);
+ /* ?? Do not traverse aliases here. */
+ d = get_cg_data (&caller, false);
maybe_push_queue (caller, &irr_worklist, &d->in_worklist);
}
}
if (node->reachable && node->lowered
&& cgraph_function_body_availability (node) >= AVAIL_OVERWRITABLE)
{
- d = get_cg_data (node);
+ d = get_cg_data (&node, true);
if (is_tm_safe (node->decl))
ipa_tm_diagnose_tm_safe (node);
else if (d->all_tm_regions)
continue;
a = cgraph_function_body_availability (node);
- d = get_cg_data (node);
+ d = get_cg_data (&node, true);
if (a <= AVAIL_NOT_AVAILABLE)
doit = is_tm_callable (node->decl);
node = VEC_index (cgraph_node_p, tm_callees, i);
if (node->analyzed)
{
- d = get_cg_data (node);
+ d = get_cg_data (&node, true);
if (d->clone)
ipa_tm_transform_clone (node);
}
if (node->reachable && node->lowered
&& cgraph_function_body_availability (node) >= AVAIL_OVERWRITABLE)
{
- d = get_cg_data (node);
+ d = get_cg_data (&node, true);
if (d->all_tm_regions)
ipa_tm_transform_transaction (node);
}