{
*msg = "structure field size not fixed";
return true;
- }
+ }
+ if (!host_integerp (bit_position (fld), 0))
+ {
+ *msg = "structure field size too big";
+ return true;
+ }
if (AGGREGATE_TYPE_P (ft)
&& int_bit_position (fld) % BITS_PER_UNIT != 0)
{
disqualify_base_of_expr (tree t, const char *reason)
{
t = get_base_address (t);
- if (sra_mode == SRA_MODE_EARLY_IPA
+ if (t
+ && sra_mode == SRA_MODE_EARLY_IPA
&& TREE_CODE (t) == MEM_REF)
t = get_ssa_base_param (TREE_OPERAND (t, 0));
we can extract more optimistic alignment information
by looking at the access mode. That would constrain the
alignment of base + base_offset which we would need to
- adjust according to offset.
- ??? But it is not at all clear that prev_base is an access
- that was in the IL that way, so be conservative for now. */
+ adjust according to offset. */
align = get_pointer_alignment_1 (base, &misalign);
+ if (misalign == 0
+ && (TREE_CODE (prev_base) == MEM_REF
+ || TREE_CODE (prev_base) == TARGET_MEM_REF))
+ align = MAX (align, TYPE_ALIGN (TREE_TYPE (prev_base)));
misalign += (double_int_sext (tree_to_double_int (off),
TYPE_PRECISION (TREE_TYPE (off))).low
* BITS_PER_UNIT);
&& (TREE_CODE (root->type) != INTEGER_TYPE
|| TYPE_PRECISION (root->type) != root->size)
/* But leave bitfield accesses alone. */
- && (root->offset % BITS_PER_UNIT) == 0)
+ && (TREE_CODE (root->expr) != COMPONENT_REF
+ || !DECL_BIT_FIELD (TREE_OPERAND (root->expr, 1))))
{
tree rt = root->type;
+ gcc_assert ((root->offset % BITS_PER_UNIT) == 0
+ && (root->size % BITS_PER_UNIT) == 0);
root->type = build_nonstandard_integer_type (root->size,
TYPE_UNSIGNED (rt));
root->expr = build_ref_for_offset (UNKNOWN_LOCATION,
??? This should move to fold_stmt which we simply should
call after building a VIEW_CONVERT_EXPR here. */
if (AGGREGATE_TYPE_P (TREE_TYPE (lhs))
- && !contains_bitfld_comp_ref_p (lhs)
- && !access_has_children_p (lacc))
+ && !contains_bitfld_comp_ref_p (lhs))
{
lhs = build_ref_for_model (loc, lhs, 0, racc, gsi, false);
gimple_assign_set_lhs (*stmt, lhs);
}
else if (AGGREGATE_TYPE_P (TREE_TYPE (rhs))
- && !contains_vce_or_bfcref_p (rhs)
- && !access_has_children_p (racc))
+ && !contains_vce_or_bfcref_p (rhs))
rhs = build_ref_for_model (loc, rhs, 0, lacc, gsi, false);
if (!useless_type_conversion_p (TREE_TYPE (lhs), TREE_TYPE (rhs)))
}
else
{
- if (access_has_children_p (lacc) && access_has_children_p (racc))
+ if (access_has_children_p (lacc)
+ && access_has_children_p (racc)
+ /* When an access represents an unscalarizable region, it usually
+ represents accesses with variable offset and thus must not be used
+ to generate new memory accesses. */
+ && !lacc->grp_unscalarizable_region
+ && !racc->grp_unscalarizable_region)
{
gimple_stmt_iterator orig_gsi = *gsi;
enum unscalarized_data_handling refreshed;
if (dump_file)
fprintf (dump_file, "Adjusting call (%i -> %i) %s -> %s\n",
cs->caller->uid, cs->callee->uid,
- cgraph_node_name (cs->caller),
- cgraph_node_name (cs->callee));
+ xstrdup (cgraph_node_name (cs->caller)),
+ xstrdup (cgraph_node_name (cs->callee)));
ipa_modify_call_arguments (cs, cs->call_stmt, adjustments);
new_node = cgraph_function_versioning (node, redirect_callers, NULL, NULL,
false, NULL, NULL, "isra");
+ VEC_free (cgraph_edge_p, heap, redirect_callers);
+
current_function_decl = new_node->decl;
push_cfun (DECL_STRUCT_FUNCTION (new_node->decl));