/* Passes for transactional memory support.
- Copyright (C) 2008, 2009, 2010, 2011 Free Software Foundation, Inc.
+ Copyright (C) 2008, 2009, 2010, 2011, 2012 Free Software Foundation, Inc.
This file is part of GCC.
}
if (is_global_var (x))
- return !TREE_READONLY (x);
+ {
+ if (DECL_THREAD_LOCAL_P (x))
+ goto thread_local;
+ if (DECL_HAS_VALUE_EXPR_P (x))
+ {
+ tree value = get_base_address (DECL_VALUE_EXPR (x));
+
+ if (value && DECL_P (value) && DECL_THREAD_LOCAL_P (value))
+ goto thread_local;
+ }
+ return !TREE_READONLY (x);
+ }
if (/* FIXME: This condition should actually go below in the
tm_log_add() call, however is_call_clobbered() depends on
aliasing info which is not available during
during lower_sequence_tm/gimplification, leave the call
to needs_to_live_in_memory until we eliminate
lower_sequence_tm altogether. */
- needs_to_live_in_memory (x)
- /* X escapes. */
- || ptr_deref_may_alias_global_p (x))
+ needs_to_live_in_memory (x))
return true;
- else
- {
- /* For local memory that doesn't escape (aka thread private
- memory), we can either save the value at the beginning of
- the transaction and restore on restart, or call a tm
- function to dynamically save and restore on restart
- (ITM_L*). */
- if (stmt)
- tm_log_add (entry_block, orig, stmt);
- return false;
- }
+ thread_local:
+ /* For local memory that doesn't escape (aka thread private memory),
+ we can either save the value at the beginning of the transaction and
+ restore on restart, or call a tm function to dynamically save and
+ restore on restart (ITM_L*). */
+ if (stmt)
+ tm_log_add (entry_block, orig, stmt);
+ return false;
default:
return false;
}
if (!gcall)
{
- tree lhs_addr, rhs_addr;
+ tree lhs_addr, rhs_addr, tmp;
if (load_p)
transaction_subcode_ior (region, GTMA_HAVE_LOAD);
/* ??? Figure out if there's any possible overlap between the LHS
and the RHS and if not, use MEMCPY. */
- lhs_addr = gimplify_addr (gsi, lhs);
+
+ if (load_p && is_gimple_reg (lhs))
+ {
+ tmp = create_tmp_var (TREE_TYPE (lhs), NULL);
+ lhs_addr = build_fold_addr_expr (tmp);
+ }
+ else
+ {
+ tmp = NULL_TREE;
+ lhs_addr = gimplify_addr (gsi, lhs);
+ }
rhs_addr = gimplify_addr (gsi, rhs);
gcall = gimple_build_call (builtin_decl_explicit (BUILT_IN_TM_MEMMOVE),
3, lhs_addr, rhs_addr,
TYPE_SIZE_UNIT (TREE_TYPE (lhs)));
gimple_set_location (gcall, loc);
gsi_insert_before (gsi, gcall, GSI_SAME_STMT);
+
+ if (tmp)
+ {
+ gcall = gimple_build_assign (lhs, tmp);
+ gsi_insert_before (gsi, gcall, GSI_SAME_STMT);
+ }
}
/* Now that we have the load/store in its instrumented form, add
typedef VEC (cgraph_node_p, heap) *cgraph_node_queue;
/* Return the ipa data associated with NODE, allocating zeroed memory
- if necessary. */
+ if necessary. TRAVERSE_ALIASES is true if we must traverse aliases
+ and set *NODE accordingly. */
static struct tm_ipa_cg_data *
-get_cg_data (struct cgraph_node *node)
+get_cg_data (struct cgraph_node **node, bool traverse_aliases)
{
- struct tm_ipa_cg_data *d = (struct tm_ipa_cg_data *) node->aux;
+ struct tm_ipa_cg_data *d;
+
+ if (traverse_aliases && (*node)->alias)
+ *node = cgraph_get_node ((*node)->thunk.alias);
+
+ d = (struct tm_ipa_cg_data *) (*node)->aux;
if (d == NULL)
{
d = (struct tm_ipa_cg_data *)
obstack_alloc (&tm_obstack.obstack, sizeof (*d));
- node->aux = (void *) d;
+ (*node)->aux = (void *) d;
memset (d, 0, sizeof (*d));
}
node = cgraph_get_node (fndecl);
gcc_assert (node != NULL);
- d = get_cg_data (node);
+ d = get_cg_data (&node, true);
pcallers = (for_clone ? &d->tm_callers_clone
: &d->tm_callers_normal);
ipa_tm_note_irrevocable (struct cgraph_node *node,
cgraph_node_queue *worklist_p)
{
- struct tm_ipa_cg_data *d = get_cg_data (node);
+ struct tm_ipa_cg_data *d = get_cg_data (&node, true);
struct cgraph_edge *e;
d->is_irrevocable = true;
for (e = node->callers; e ; e = e->next_caller)
{
basic_block bb;
+ struct cgraph_node *caller;
/* Don't examine recursive calls. */
if (e->caller == node)
if (is_tm_safe_or_pure (e->caller->decl))
continue;
- d = get_cg_data (e->caller);
+ caller = e->caller;
+ d = get_cg_data (&caller, true);
/* Check if the callee is in a transactional region. If so,
schedule the function for normal re-scan as well. */
&& bitmap_bit_p (d->transaction_blocks_normal, bb->index))
d->want_irr_scan_normal = true;
- maybe_push_queue (e->caller, worklist_p, &d->in_worklist);
+ maybe_push_queue (caller, worklist_p, &d->in_worklist);
}
}
if (TREE_CODE (fn) == ADDR_EXPR)
{
struct tm_ipa_cg_data *d;
+ struct cgraph_node *node;
fn = TREE_OPERAND (fn, 0);
if (is_tm_ending_fndecl (fn))
if (find_tm_replacement_function (fn))
break;
- d = get_cg_data (cgraph_get_node (fn));
+ node = cgraph_get_node(fn);
+ d = get_cg_data (&node, true);
/* Return true if irrevocable, but above all, believe
the user. */
{
struct tm_ipa_cg_data *d;
unsigned *pcallers;
+ struct cgraph_node *tnode;
if (is_tm_ending_fndecl (fndecl))
continue;
if (find_tm_replacement_function (fndecl))
continue;
- d = get_cg_data (cgraph_get_node (fndecl));
+ tnode = cgraph_get_node (fndecl);
+ d = get_cg_data (&tnode, true);
+
pcallers = (for_clone ? &d->tm_callers_clone
: &d->tm_callers_normal);
push_cfun (DECL_STRUCT_FUNCTION (node->decl));
calculate_dominance_info (CDI_DOMINATORS);
- d = get_cg_data (node);
+ d = get_cg_data (&node, true);
queue = VEC_alloc (basic_block, heap, 10);
new_irr = BITMAP_ALLOC (&tm_obstack);
static bool
ipa_tm_mayenterirr_function (struct cgraph_node *node)
{
- struct tm_ipa_cg_data *d = get_cg_data (node);
- tree decl = node->decl;
- unsigned flags = flags_from_decl_or_type (decl);
+ struct tm_ipa_cg_data *d;
+ tree decl;
+ unsigned flags;
+
+ d = get_cg_data (&node, true);
+ decl = node->decl;
+ flags = flags_from_decl_or_type (decl);
/* Handle some TM builtins. Ordinarily these aren't actually generated
at this point, but handling these functions when written in by the
tree new_decl;
};
-/* A subrontine of ipa_tm_create_version, called via
+/* A subroutine of ipa_tm_create_version, called via
cgraph_for_node_and_aliases. Create new tm clones for each of
the existing aliases. */
static bool
new_node = cgraph_same_body_alias (NULL, new_decl, info->new_decl);
new_node->tm_clone = true;
- get_cg_data (node)->clone = new_node;
+ new_node->local.externally_visible = info->old_node->local.externally_visible;
+ /* ?? Do not traverse aliases here. */
+ get_cg_data (&node, false)->clone = new_node;
record_tm_clone_pair (old_decl, new_decl);
DECL_COMDAT_GROUP (new_decl) = tm_mangle (DECL_COMDAT_GROUP (old_decl));
new_node = cgraph_copy_node_for_versioning (old_node, new_decl, NULL, NULL);
+ new_node->local.externally_visible = old_node->local.externally_visible;
new_node->lowered = true;
new_node->tm_clone = 1;
- get_cg_data (old_node)->clone = new_node;
+ get_cg_data (&old_node, true)->clone = new_node;
if (cgraph_function_body_availability (old_node) >= AVAIL_OVERWRITABLE)
{
DECL_WEAK (new_decl) = 0;
}
- tree_function_versioning (old_decl, new_decl, NULL, false, NULL,
+ tree_function_versioning (old_decl, new_decl, NULL, false, NULL, false,
NULL, NULL);
}
}
else
{
- struct tm_ipa_cg_data *d = get_cg_data (e->callee);
+ struct tm_ipa_cg_data *d;
+ struct cgraph_node *tnode = e->callee;
+
+ d = get_cg_data (&tnode, true);
new_node = d->clone;
/* As we've already skipped pure calls and appropriate builtins,
static void
ipa_tm_transform_transaction (struct cgraph_node *node)
{
- struct tm_ipa_cg_data *d = get_cg_data (node);
+ struct tm_ipa_cg_data *d;
struct tm_region *region;
bool need_ssa_rename = false;
+ d = get_cg_data (&node, true);
+
current_function_decl = node->decl;
push_cfun (DECL_STRUCT_FUNCTION (node->decl));
calculate_dominance_info (CDI_DOMINATORS);
static void
ipa_tm_transform_clone (struct cgraph_node *node)
{
- struct tm_ipa_cg_data *d = get_cg_data (node);
+ struct tm_ipa_cg_data *d;
bool need_ssa_rename;
+ d = get_cg_data (&node, true);
+
/* If this function makes no calls and has no irrevocable blocks,
then there's nothing to do. */
/* ??? Remove non-aborting top-level transactions. */
if (is_tm_callable (node->decl)
&& cgraph_function_body_availability (node) >= AVAIL_OVERWRITABLE)
{
- d = get_cg_data (node);
+ d = get_cg_data (&node, true);
maybe_push_queue (node, &tm_callees, &d->in_callee_queue);
}
tm_region_init (NULL);
if (all_tm_regions)
{
- d = get_cg_data (node);
+ d = get_cg_data (&node, true);
/* Scan for calls that are in each transaction. */
ipa_tm_scan_calls_transaction (d, &tm_callees);
{
node = VEC_index (cgraph_node_p, tm_callees, i);
a = cgraph_function_body_availability (node);
- d = get_cg_data (node);
+ d = get_cg_data (&node, true);
/* Put it in the worklist so we can scan the function later
(ipa_tm_scan_irr_function) and mark the irrevocable
if (node->alias)
{
node = cgraph_get_node (node->thunk.alias);
- d = get_cg_data (node);
+ d = get_cg_data (&node, true);
maybe_push_queue (node, &tm_callees, &d->in_callee_queue);
continue;
}
}
node = VEC_index (cgraph_node_p, irr_worklist, i);
- d = get_cg_data (node);
+ d = get_cg_data (&node, true);
d->in_worklist = false;
if (d->want_irr_scan_normal)
node = VEC_index (cgraph_node_p, tm_callees, i);
if (ipa_tm_mayenterirr_function (node))
{
- d = get_cg_data (node);
+ d = get_cg_data (&node, true);
gcc_assert (d->in_worklist == false);
maybe_push_queue (node, &irr_worklist, &d->in_worklist);
}
}
node = VEC_index (cgraph_node_p, irr_worklist, i);
- d = get_cg_data (node);
+ d = get_cg_data (&node, true);
d->in_worklist = false;
node->local.tm_may_enter_irr = true;
if (!is_tm_safe_or_pure (caller->decl)
&& !caller->local.tm_may_enter_irr)
{
- d = get_cg_data (caller);
+ d = get_cg_data (&caller, true);
maybe_push_queue (caller, &irr_worklist, &d->in_worklist);
}
}
if (ref->use == IPA_REF_ALIAS
&& !caller->local.tm_may_enter_irr)
{
- d = get_cg_data (caller);
+ /* ?? Do not traverse aliases here. */
+ d = get_cg_data (&caller, false);
maybe_push_queue (caller, &irr_worklist, &d->in_worklist);
}
}
if (node->reachable && node->lowered
&& cgraph_function_body_availability (node) >= AVAIL_OVERWRITABLE)
{
- d = get_cg_data (node);
+ d = get_cg_data (&node, true);
if (is_tm_safe (node->decl))
ipa_tm_diagnose_tm_safe (node);
else if (d->all_tm_regions)
continue;
a = cgraph_function_body_availability (node);
- d = get_cg_data (node);
+ d = get_cg_data (&node, true);
if (a <= AVAIL_NOT_AVAILABLE)
doit = is_tm_callable (node->decl);
node = VEC_index (cgraph_node_p, tm_callees, i);
if (node->analyzed)
{
- d = get_cg_data (node);
+ d = get_cg_data (&node, true);
if (d->clone)
ipa_tm_transform_clone (node);
}
if (node->reachable && node->lowered
&& cgraph_function_body_availability (node) >= AVAIL_OVERWRITABLE)
{
- d = get_cg_data (node);
+ d = get_cg_data (&node, true);
if (d->all_tm_regions)
ipa_tm_transform_transaction (node);
}