return acc && acc->first_child;
}
+/* Return true iff ACC is (partly) covered by at least one replacement. */
+
+static bool
+access_has_replacements_p (struct access *acc)
+{
+ struct access *child;
+ if (acc->grp_to_be_replaced)
+ return true;
+ for (child = acc->first_child; child; child = child->next_sibling)
+ if (access_has_replacements_p (child))
+ return true;
+ return false;
+}
+
/* Return a vector of pointers to accesses for the variable given in BASE or
NULL if there is none. */
we can extract more optimistic alignment information
by looking at the access mode. That would constrain the
alignment of base + base_offset which we would need to
- adjust according to offset.
- ??? But it is not at all clear that prev_base is an access
- that was in the IL that way, so be conservative for now. */
+ adjust according to offset. */
align = get_pointer_alignment_1 (base, &misalign);
+ if (misalign == 0
+ && (TREE_CODE (prev_base) == MEM_REF
+ || TREE_CODE (prev_base) == TARGET_MEM_REF))
+ align = MAX (align, TYPE_ALIGN (TREE_TYPE (prev_base)));
misalign += (double_int_sext (tree_to_double_int (off),
TYPE_PRECISION (TREE_TYPE (off))).low
* BITS_PER_UNIT);
&& (root->grp_scalar_write || root->grp_assignment_write))))
{
bool new_integer_type;
- if (TREE_CODE (root->type) == ENUMERAL_TYPE)
+ /* Always create access replacements that cover the whole access.
+ For integral types this means the precision has to match.
+ Avoid assumptions based on the integral type kind, too. */
+ if (INTEGRAL_TYPE_P (root->type)
+ && (TREE_CODE (root->type) != INTEGER_TYPE
+ || TYPE_PRECISION (root->type) != root->size)
+ /* But leave bitfield accesses alone. */
+ && (TREE_CODE (root->expr) != COMPONENT_REF
+ || !DECL_BIT_FIELD (TREE_OPERAND (root->expr, 1))))
{
tree rt = root->type;
- root->type = build_nonstandard_integer_type (TYPE_PRECISION (rt),
+ gcc_assert ((root->offset % BITS_PER_UNIT) == 0
+ && (root->size % BITS_PER_UNIT) == 0);
+ root->type = build_nonstandard_integer_type (root->size,
TYPE_UNSIGNED (rt));
+ root->expr = build_ref_for_offset (UNKNOWN_LOCATION,
+ root->base, root->offset,
+ root->type, NULL, false);
new_integer_type = true;
}
else
sra_stats.exprs++;
}
else if (racc
- && !access_has_children_p (racc)
- && !racc->grp_to_be_replaced
&& !racc->grp_unscalarized_data
- && TREE_CODE (lhs) == SSA_NAME)
+ && TREE_CODE (lhs) == SSA_NAME
+ && !access_has_replacements_p (racc))
{
rhs = get_repl_default_def_ssa_name (racc);
modify_this_stmt = true;
}
else
{
- if (access_has_children_p (lacc) && access_has_children_p (racc))
+ if (access_has_children_p (lacc)
+ && access_has_children_p (racc)
+ /* When an access represents an unscalarizable region, it usually
+ represents accesses with variable offset and thus must not be used
+ to generate new memory accesses. */
+ && !lacc->grp_unscalarizable_region
+ && !racc->grp_unscalarizable_region)
{
gimple_stmt_iterator orig_gsi = *gsi;
enum unscalarized_data_handling refreshed;
sra_stats.deleted++;
return SRA_AM_REMOVED;
}
+ /* Restore the aggregate RHS from its components so the
+ prevailing aggregate copy does the right thing. */
if (access_has_children_p (racc))
- generate_subtree_copies (racc->first_child, lhs, racc->offset,
- 0, 0, gsi, false, true, loc);
+ generate_subtree_copies (racc->first_child, racc->base, 0, 0, 0,
+ gsi, false, false, loc);
+ /* Re-load the components of the aggregate copy destination.
+ But use the RHS aggregate to load from to expose more
+ optimization opportunities. */
if (access_has_children_p (lacc))
generate_subtree_copies (lacc->first_child, rhs, lacc->offset,
0, 0, gsi, true, true, loc);
if (dump_file)
fprintf (dump_file, "Adjusting call (%i -> %i) %s -> %s\n",
cs->caller->uid, cs->callee->uid,
- cgraph_node_name (cs->caller),
- cgraph_node_name (cs->callee));
+ xstrdup (cgraph_node_name (cs->caller)),
+ xstrdup (cgraph_node_name (cs->callee)));
ipa_modify_call_arguments (cs, cs->call_stmt, adjustments);
new_node = cgraph_function_versioning (node, redirect_callers, NULL, NULL,
false, NULL, NULL, "isra");
+ VEC_free (cgraph_edge_p, heap, redirect_callers);
+
current_function_decl = new_node->decl;
push_cfun (DECL_STRUCT_FUNCTION (new_node->decl));