/* Scalar Replacement of Aggregates (SRA) converts some structure
references into scalar references, exposing them to the scalar
optimizers.
- Copyright (C) 2008, 2009, 2010 Free Software Foundation, Inc.
+ Copyright (C) 2008, 2009, 2010, 2011 Free Software Foundation, Inc.
Contributed by Martin Jambor <mjambor@suse.cz>
This file is part of GCC.
#include "dbgcnt.h"
#include "tree-inline.h"
#include "gimple-pretty-print.h"
+#include "ipa-inline.h"
/* Enumeration of all aggregate reductions we can do. */
enum sra_mode { SRA_MODE_EARLY_IPA, /* early call regularization */
/* Is this particular access write access? */
unsigned write : 1;
- /* Is this access an artificial one created to scalarize some record
- entirely? */
- unsigned total_scalarization : 1;
-
/* Is this access an access to a non-addressable field? */
unsigned non_addressable : 1;
statement? This flag is propagated down the access tree. */
unsigned grp_assignment_write : 1;
+ /* Does this group contain a read access through a scalar type? This flag is
+ not propagated in the access tree in any direction. */
+ unsigned grp_scalar_read : 1;
+
+ /* Does this group contain a write access through a scalar type? This flag
+ is not propagated in the access tree in any direction. */
+ unsigned grp_scalar_write : 1;
+
+ /* Is this access an artificial one created to scalarize some record
+ entirely? */
+ unsigned grp_total_scalarization : 1;
+
/* Other passes of the analysis use this bit to make function
analyze_access_subtree create scalar replacements for this group if
possible. */
fprintf (f, ", type = ");
print_generic_expr (f, access->type, 0);
if (grp)
- fprintf (f, ", grp_write = %d, total_scalarization = %d, "
- "grp_read = %d, grp_hint = %d, grp_assignment_read = %d,"
- "grp_assignment_write = %d, grp_covered = %d, "
+ fprintf (f, ", grp_read = %d, grp_write = %d, grp_assignment_read = %d, "
+ "grp_assignment_write = %d, grp_scalar_read = %d, "
+ "grp_scalar_write = %d, grp_total_scalarization = %d, "
+ "grp_hint = %d, grp_covered = %d, "
"grp_unscalarizable_region = %d, grp_unscalarized_data = %d, "
"grp_partial_lhs = %d, grp_to_be_replaced = %d, "
"grp_maybe_modified = %d, "
"grp_not_necessarilly_dereferenced = %d\n",
- access->grp_write, access->total_scalarization,
- access->grp_read, access->grp_hint, access->grp_assignment_read,
- access->grp_assignment_write, access->grp_covered,
+ access->grp_read, access->grp_write, access->grp_assignment_read,
+ access->grp_assignment_write, access->grp_scalar_read,
+ access->grp_scalar_write, access->grp_total_scalarization,
+ access->grp_hint, access->grp_covered,
access->grp_unscalarizable_region, access->grp_unscalarized_data,
access->grp_partial_lhs, access->grp_to_be_replaced,
access->grp_maybe_modified,
access->grp_not_necessarilly_dereferenced);
else
- fprintf (f, ", write = %d, total_scalarization = %d, "
+ fprintf (f, ", write = %d, grp_total_scalarization = %d, "
"grp_partial_lhs = %d\n",
- access->write, access->total_scalarization,
+ access->write, access->grp_total_scalarization,
access->grp_partial_lhs);
}
return acc && acc->first_child;
}
+/* Return true iff ACC is (partly) covered by at least one replacement. */
+
+static bool
+access_has_replacements_p (struct access *acc)
+{
+ struct access *child;
+ if (acc->grp_to_be_replaced)
+ return true;
+ for (child = acc->first_child; child; child = child->next_sibling)
+ if (access_has_replacements_p (child))
+ return true;
+ return false;
+}
+
/* Return a vector of pointers to accesses for the variable given in BASE or
NULL if there is none. */
scalarization. */
static bool
-type_internals_preclude_sra_p (tree type)
+type_internals_preclude_sra_p (tree type, const char **msg)
{
tree fld;
tree et;
{
tree ft = TREE_TYPE (fld);
- if (TREE_THIS_VOLATILE (fld)
- || !DECL_FIELD_OFFSET (fld) || !DECL_SIZE (fld)
- || !host_integerp (DECL_FIELD_OFFSET (fld), 1)
- || !host_integerp (DECL_SIZE (fld), 1)
- || (AGGREGATE_TYPE_P (ft)
- && int_bit_position (fld) % BITS_PER_UNIT != 0))
- return true;
-
+ if (TREE_THIS_VOLATILE (fld))
+ {
+ *msg = "volatile structure field";
+ return true;
+ }
+ if (!DECL_FIELD_OFFSET (fld))
+ {
+ *msg = "no structure field offset";
+ return true;
+ }
+ if (!DECL_SIZE (fld))
+ {
+ *msg = "zero structure field size";
+ return true;
+ }
+ if (!host_integerp (DECL_FIELD_OFFSET (fld), 1))
+ {
+ *msg = "structure field offset not fixed";
+ return true;
+ }
+ if (!host_integerp (DECL_SIZE (fld), 1))
+ {
+ *msg = "structure field size not fixed";
+ return true;
+ }
+ if (!host_integerp (bit_position (fld), 0))
+ {
+ *msg = "structure field size too big";
+ return true;
+ }
if (AGGREGATE_TYPE_P (ft)
- && type_internals_preclude_sra_p (ft))
+ && int_bit_position (fld) % BITS_PER_UNIT != 0)
+ {
+ *msg = "structure field is bit field";
+ return true;
+ }
+
+ if (AGGREGATE_TYPE_P (ft) && type_internals_preclude_sra_p (ft, msg))
return true;
}
case ARRAY_TYPE:
et = TREE_TYPE (type);
- if (AGGREGATE_TYPE_P (et))
- return type_internals_preclude_sra_p (et);
- else
- return false;
+ if (TYPE_VOLATILE (et))
+ {
+ *msg = "element type is volatile";
+ return true;
+ }
+
+ if (AGGREGATE_TYPE_P (et) && type_internals_preclude_sra_p (et, msg))
+ return true;
+
+ return false;
default:
return false;
access = create_access_1 (base, pos, size);
access->expr = nref;
access->type = ft;
- access->total_scalarization = 1;
+ access->grp_total_scalarization = 1;
/* Accesses for intraprocedural SRA can have their stmt NULL. */
}
else
}
}
+/* Create total_scalarization accesses for all scalar type fields in VAR and
+ for VAR a a whole. VAR must be of a RECORD_TYPE conforming to
+ type_consists_of_records_p. */
+
+static void
+completely_scalarize_var (tree var)
+{
+ HOST_WIDE_INT size = tree_low_cst (DECL_SIZE (var), 1);
+ struct access *access;
+
+ access = create_access_1 (var, 0, size);
+ access->expr = var;
+ access->type = TREE_TYPE (var);
+ access->grp_total_scalarization = 1;
+
+ completely_scalarize_record (var, var, 0, var);
+}
/* Search the given tree for a declaration by skipping handled components and
exclude it from the candidates. */
disqualify_base_of_expr (tree t, const char *reason)
{
t = get_base_address (t);
- if (sra_mode == SRA_MODE_EARLY_IPA
+ if (t
+ && sra_mode == SRA_MODE_EARLY_IPA
&& TREE_CODE (t) == MEM_REF)
t = get_ssa_base_param (TREE_OPERAND (t, 0));
return false;
}
+/* Return true if EXP is a memory reference less aligned than ALIGN. This is
+ invoked only on strict-alignment targets. */
+
+static bool
+tree_non_aligned_mem_p (tree exp, unsigned int align)
+{
+ unsigned int exp_align;
+
+ if (TREE_CODE (exp) == VIEW_CONVERT_EXPR)
+ exp = TREE_OPERAND (exp, 0);
+
+ if (TREE_CODE (exp) == SSA_NAME || is_gimple_min_invariant (exp))
+ return false;
+
+ /* get_object_alignment will fall back to BITS_PER_UNIT if it cannot
+ compute an explicit alignment. Pretend that dereferenced pointers
+ are always aligned on strict-alignment targets. */
+ if (TREE_CODE (exp) == MEM_REF || TREE_CODE (exp) == TARGET_MEM_REF)
+ exp_align = get_object_or_type_alignment (exp);
+ else
+ exp_align = get_object_alignment (exp);
+
+ if (exp_align < align)
+ return true;
+
+ return false;
+}
+
+/* Return true if EXP is a memory reference less aligned than what the access
+ ACC would require. This is invoked only on strict-alignment targets. */
+
+static bool
+tree_non_aligned_mem_for_access_p (tree exp, struct access *acc)
+{
+ unsigned int acc_align;
+
+ /* The alignment of the access is that of its expression. However, it may
+ have been artificially increased, e.g. by a local alignment promotion,
+ so we cap it to the alignment of the type of the base, on the grounds
+ that valid sub-accesses cannot be more aligned than that. */
+ acc_align = get_object_alignment (acc->expr);
+ if (acc->base && acc_align > TYPE_ALIGN (TREE_TYPE (acc->base)))
+ acc_align = TYPE_ALIGN (TREE_TYPE (acc->base));
+
+ return tree_non_aligned_mem_p (exp, acc_align);
+}
+
/* Scan expressions occuring in STMT, create access structures for all accesses
to candidates for scalarization and remove those candidates which occur in
statements or expressions that prevent them from being split apart. Return
tree lhs, rhs;
struct access *lacc, *racc;
- if (!gimple_assign_single_p (stmt))
+ if (!gimple_assign_single_p (stmt)
+ /* Scope clobbers don't influence scalarization. */
+ || gimple_clobber_p (stmt))
return false;
lhs = gimple_assign_lhs (stmt);
lacc = build_access_from_expr_1 (lhs, stmt, true);
if (lacc)
- lacc->grp_assignment_write = 1;
+ {
+ lacc->grp_assignment_write = 1;
+ if (STRICT_ALIGNMENT && tree_non_aligned_mem_for_access_p (rhs, lacc))
+ lacc->grp_unscalarizable_region = 1;
+ }
if (racc)
{
if (should_scalarize_away_bitmap && !gimple_has_volatile_ops (stmt)
&& !is_gimple_reg_type (racc->type))
bitmap_set_bit (should_scalarize_away_bitmap, DECL_UID (racc->base));
+ if (STRICT_ALIGNMENT && tree_non_aligned_mem_for_access_p (lhs, racc))
+ racc->grp_unscalarizable_region = 1;
}
if (lacc && racc
tree prev_base = base;
tree off;
HOST_WIDE_INT base_offset;
+ unsigned HOST_WIDE_INT misalign;
+ unsigned int align;
gcc_checking_assert (offset % BITS_PER_UNIT == 0);
add_referenced_var (tmp);
tmp = make_ssa_name (tmp, NULL);
addr = build_fold_addr_expr (unshare_expr (prev_base));
+ STRIP_USELESS_TYPE_CONVERSION (addr);
stmt = gimple_build_assign (tmp, addr);
gimple_set_location (stmt, loc);
SSA_NAME_DEF_STMT (tmp) = stmt;
{
off = build_int_cst (TREE_TYPE (TREE_OPERAND (base, 1)),
base_offset + offset / BITS_PER_UNIT);
- off = int_const_binop (PLUS_EXPR, TREE_OPERAND (base, 1), off, 0);
+ off = int_const_binop (PLUS_EXPR, TREE_OPERAND (base, 1), off);
base = unshare_expr (TREE_OPERAND (base, 0));
}
else
base = build_fold_addr_expr (unshare_expr (base));
}
+ /* If prev_base were always an originally performed access
+ we can extract more optimistic alignment information
+ by looking at the access mode. That would constrain the
+ alignment of base + base_offset which we would need to
+ adjust according to offset. */
+ align = get_pointer_alignment_1 (base, &misalign);
+ if (misalign == 0
+ && (TREE_CODE (prev_base) == MEM_REF
+ || TREE_CODE (prev_base) == TARGET_MEM_REF))
+ align = MAX (align, TYPE_ALIGN (TREE_TYPE (prev_base)));
+ misalign += (double_int_sext (tree_to_double_int (off),
+ TYPE_PRECISION (TREE_TYPE (off))).low
+ * BITS_PER_UNIT);
+ misalign = misalign & (align - 1);
+ if (misalign != 0)
+ align = (misalign & -misalign);
+ if (align < TYPE_ALIGN (exp_type))
+ exp_type = build_aligned_type (exp_type, align);
+
return fold_build2_loc (loc, MEM_REF, exp_type, base, off);
}
+DEF_VEC_ALLOC_P_STACK (tree);
+#define VEC_tree_stack_alloc(alloc) VEC_stack_alloc (tree, alloc)
+
/* Construct a memory reference to a part of an aggregate BASE at the given
- OFFSET and of the same type as MODEL. In case this is a reference to a
- component, the function will replicate the last COMPONENT_REF of model's
- expr to access it. GSI and INSERT_AFTER have the same meaning as in
- build_ref_for_offset. */
+ OFFSET and of the type of MODEL. In case this is a chain of references
+ to component, the function will replicate the chain of COMPONENT_REFs of
+ the expression of MODEL to access it. GSI and INSERT_AFTER have the same
+ meaning as in build_ref_for_offset. */
static tree
build_ref_for_model (location_t loc, tree base, HOST_WIDE_INT offset,
struct access *model, gimple_stmt_iterator *gsi,
bool insert_after)
{
+ tree type = model->type, t;
+ VEC(tree,stack) *cr_stack = NULL;
+
if (TREE_CODE (model->expr) == COMPONENT_REF)
{
- tree t, exp_type;
- offset -= int_bit_position (TREE_OPERAND (model->expr, 1));
- exp_type = TREE_TYPE (TREE_OPERAND (model->expr, 0));
- t = build_ref_for_offset (loc, base, offset, exp_type, gsi, insert_after);
- return fold_build3_loc (loc, COMPONENT_REF, model->type, t,
- TREE_OPERAND (model->expr, 1), NULL_TREE);
+ tree expr = model->expr;
+
+ /* Create a stack of the COMPONENT_REFs so later we can walk them in
+ order from inner to outer. */
+ cr_stack = VEC_alloc (tree, stack, 6);
+
+ do {
+ tree field = TREE_OPERAND (expr, 1);
+ tree cr_offset = component_ref_field_offset (expr);
+ HOST_WIDE_INT bit_pos
+ = tree_low_cst (cr_offset, 1) * BITS_PER_UNIT
+ + TREE_INT_CST_LOW (DECL_FIELD_BIT_OFFSET (field));
+
+ /* We can be called with a model different from the one associated
+ with BASE so we need to avoid going up the chain too far. */
+ if (offset - bit_pos < 0)
+ break;
+
+ offset -= bit_pos;
+ VEC_safe_push (tree, stack, cr_stack, expr);
+
+ expr = TREE_OPERAND (expr, 0);
+ type = TREE_TYPE (expr);
+ } while (TREE_CODE (expr) == COMPONENT_REF);
}
- else
- return build_ref_for_offset (loc, base, offset, model->type,
- gsi, insert_after);
+
+ t = build_ref_for_offset (loc, base, offset, type, gsi, insert_after);
+
+ if (TREE_CODE (model->expr) == COMPONENT_REF)
+ {
+ unsigned i;
+ tree expr;
+
+ /* Now replicate the chain of COMPONENT_REFs from inner to outer. */
+ FOR_EACH_VEC_ELT_REVERSE (tree, cr_stack, i, expr)
+ {
+ tree field = TREE_OPERAND (expr, 1);
+ t = fold_build3_loc (loc, COMPONENT_REF, TREE_TYPE (field), t, field,
+ TREE_OPERAND (expr, 2));
+ }
+
+ VEC_free (tree, stack, cr_stack);
+ }
+
+ return t;
}
/* Construct a memory reference consisting of component_refs and array_refs to
return false;
index = build_int_cst (TYPE_DOMAIN (type), offset / el_size);
if (!integer_zerop (minidx))
- index = int_const_binop (PLUS_EXPR, index, minidx, 0);
+ index = int_const_binop (PLUS_EXPR, index, minidx);
*res = build4 (ARRAY_REF, TREE_TYPE (type), *res, index,
NULL_TREE, NULL_TREE);
offset = offset % el_size;
return TYPE_MAIN_VARIANT (type) == TYPE_MAIN_VARIANT (va_list_type_node);
}
+/* Print message to dump file why a variable was rejected. */
+
+static void
+reject (tree var, const char *msg)
+{
+ if (dump_file && (dump_flags & TDF_DETAILS))
+ {
+ fprintf (dump_file, "Rejected (%d): %s: ", DECL_UID (var), msg);
+ print_generic_expr (dump_file, var, 0);
+ fprintf (dump_file, "\n");
+ }
+}
+
/* The very first phase of intraprocedural SRA. It marks in candidate_bitmap
those with type which is suitable for scalarization. */
tree var, type;
referenced_var_iterator rvi;
bool ret = false;
+ const char *msg;
- FOR_EACH_REFERENCED_VAR (var, rvi)
+ FOR_EACH_REFERENCED_VAR (cfun, var, rvi)
{
if (TREE_CODE (var) != VAR_DECL && TREE_CODE (var) != PARM_DECL)
continue;
type = TREE_TYPE (var);
- if (!AGGREGATE_TYPE_P (type)
- || needs_to_live_in_memory (var)
- || TREE_THIS_VOLATILE (var)
- || !COMPLETE_TYPE_P (type)
- || !host_integerp (TYPE_SIZE (type), 1)
- || tree_low_cst (TYPE_SIZE (type), 1) == 0
- || type_internals_preclude_sra_p (type)
- /* Fix for PR 41089. tree-stdarg.c needs to have va_lists intact but
+ if (!AGGREGATE_TYPE_P (type))
+ {
+ reject (var, "not aggregate");
+ continue;
+ }
+ if (needs_to_live_in_memory (var))
+ {
+ reject (var, "needs to live in memory");
+ continue;
+ }
+ if (TREE_THIS_VOLATILE (var))
+ {
+ reject (var, "is volatile");
+ continue;
+ }
+ if (!COMPLETE_TYPE_P (type))
+ {
+ reject (var, "has incomplete type");
+ continue;
+ }
+ if (!host_integerp (TYPE_SIZE (type), 1))
+ {
+ reject (var, "type size not fixed");
+ continue;
+ }
+ if (tree_low_cst (TYPE_SIZE (type), 1) == 0)
+ {
+ reject (var, "type size is zero");
+ continue;
+ }
+ if (type_internals_preclude_sra_p (type, &msg))
+ {
+ reject (var, msg);
+ continue;
+ }
+ if (/* Fix for PR 41089. tree-stdarg.c needs to have va_lists intact but
we also want to schedule it rather late. Thus we ignore it in
the early pass. */
- || (sra_mode == SRA_MODE_EARLY_INTRA
+ (sra_mode == SRA_MODE_EARLY_INTRA
&& is_va_list_type (type)))
- continue;
+ {
+ reject (var, "is va_list");
+ continue;
+ }
bitmap_set_bit (candidate_bitmap, DECL_UID (var));
struct access *access = VEC_index (access_p, access_vec, i);
bool grp_write = access->write;
bool grp_read = !access->write;
+ bool grp_scalar_write = access->write
+ && is_gimple_reg_type (access->type);
+ bool grp_scalar_read = !access->write
+ && is_gimple_reg_type (access->type);
bool grp_assignment_read = access->grp_assignment_read;
bool grp_assignment_write = access->grp_assignment_write;
- bool multiple_reads = false;
- bool total_scalarization = access->total_scalarization;
+ bool multiple_scalar_reads = false;
+ bool total_scalarization = access->grp_total_scalarization;
bool grp_partial_lhs = access->grp_partial_lhs;
bool first_scalar = is_gimple_reg_type (access->type);
bool unscalarizable_region = access->grp_unscalarizable_region;
if (ac2->offset != access->offset || ac2->size != access->size)
break;
if (ac2->write)
- grp_write = true;
+ {
+ grp_write = true;
+ grp_scalar_write = (grp_scalar_write
+ || is_gimple_reg_type (ac2->type));
+ }
else
{
- if (grp_read)
- multiple_reads = true;
- else
- grp_read = true;
+ grp_read = true;
+ if (is_gimple_reg_type (ac2->type))
+ {
+ if (grp_scalar_read)
+ multiple_scalar_reads = true;
+ else
+ grp_scalar_read = true;
+ }
}
grp_assignment_read |= ac2->grp_assignment_read;
grp_assignment_write |= ac2->grp_assignment_write;
grp_partial_lhs |= ac2->grp_partial_lhs;
unscalarizable_region |= ac2->grp_unscalarizable_region;
- total_scalarization |= ac2->total_scalarization;
+ total_scalarization |= ac2->grp_total_scalarization;
relink_to_new_repr (access, ac2);
/* If there are both aggregate-type and scalar-type accesses with
access->group_representative = access;
access->grp_write = grp_write;
access->grp_read = grp_read;
+ access->grp_scalar_read = grp_scalar_read;
+ access->grp_scalar_write = grp_scalar_write;
access->grp_assignment_read = grp_assignment_read;
access->grp_assignment_write = grp_assignment_write;
- access->grp_hint = multiple_reads || total_scalarization;
+ access->grp_hint = multiple_scalar_reads || total_scalarization;
+ access->grp_total_scalarization = total_scalarization;
access->grp_partial_lhs = grp_partial_lhs;
access->grp_unscalarizable_region = unscalarizable_region;
if (access->first_link)
tree repl;
repl = create_tmp_var (access->type, "SR");
- get_var_ann (repl);
add_referenced_var (repl);
if (rename)
mark_sym_for_renaming (repl);
return false;
}
-enum mark_rw_status { SRA_MRRW_NOTHING, SRA_MRRW_DIRECT, SRA_MRRW_ASSIGN};
-
/* Analyze the subtree of accesses rooted in ROOT, scheduling replacements when
both seeming beneficial and when ALLOW_REPLACEMENTS allows it. Also set all
sorts of access flags appropriately along the way, notably always set
there is more than one direct read access) or according to the following
table:
- Access written to individually (once or more times)
+ Access written to through a scalar type (once or more times)
|
- | Parent written to in an assignment statement
+ | Written to in an assignment statement
| |
- | | Access read individually _once_
+ | | Access read as scalar _once_
| | |
- | | | Parent read in an assignment statement
+ | | | Read in an assignment statement
| | | |
| | | | Scalarize Comment
-----------------------------------------------------------------------------
1 1 1 1 Yes Any of the above yeses */
static bool
-analyze_access_subtree (struct access *root, bool allow_replacements,
- enum mark_rw_status mark_read,
- enum mark_rw_status mark_write)
+analyze_access_subtree (struct access *root, struct access *parent,
+ bool allow_replacements)
{
struct access *child;
HOST_WIDE_INT limit = root->offset + root->size;
HOST_WIDE_INT covered_to = root->offset;
bool scalar = is_gimple_reg_type (root->type);
bool hole = false, sth_created = false;
- bool direct_read = root->grp_read;
- bool direct_write = root->grp_write;
- if (root->grp_assignment_read)
- mark_read = SRA_MRRW_ASSIGN;
- else if (mark_read == SRA_MRRW_ASSIGN)
- {
- root->grp_read = 1;
- root->grp_assignment_read = 1;
- }
- else if (mark_read == SRA_MRRW_DIRECT)
- root->grp_read = 1;
- else if (root->grp_read)
- mark_read = SRA_MRRW_DIRECT;
-
- if (root->grp_assignment_write)
- mark_write = SRA_MRRW_ASSIGN;
- else if (mark_write == SRA_MRRW_ASSIGN)
+ if (parent)
{
- root->grp_write = 1;
- root->grp_assignment_write = 1;
+ if (parent->grp_read)
+ root->grp_read = 1;
+ if (parent->grp_assignment_read)
+ root->grp_assignment_read = 1;
+ if (parent->grp_write)
+ root->grp_write = 1;
+ if (parent->grp_assignment_write)
+ root->grp_assignment_write = 1;
+ if (parent->grp_total_scalarization)
+ root->grp_total_scalarization = 1;
}
- else if (mark_write == SRA_MRRW_DIRECT)
- root->grp_write = 1;
- else if (root->grp_write)
- mark_write = SRA_MRRW_DIRECT;
if (root->grp_unscalarizable_region)
allow_replacements = false;
for (child = root->first_child; child; child = child->next_sibling)
{
- if (!hole && child->offset < covered_to)
- hole = true;
- else
- covered_to += child->size;
-
- sth_created |= analyze_access_subtree (child,
- allow_replacements && !scalar,
- mark_read, mark_write);
+ hole |= covered_to < child->offset;
+ sth_created |= analyze_access_subtree (child, root,
+ allow_replacements && !scalar);
root->grp_unscalarized_data |= child->grp_unscalarized_data;
- hole |= !child->grp_covered;
+ root->grp_total_scalarization &= child->grp_total_scalarization;
+ if (child->grp_covered)
+ covered_to += child->size;
+ else
+ hole = true;
}
if (allow_replacements && scalar && !root->first_child
&& (root->grp_hint
- || ((direct_write || root->grp_assignment_write)
- && (direct_read || root->grp_assignment_read))))
+ || ((root->grp_scalar_read || root->grp_assignment_read)
+ && (root->grp_scalar_write || root->grp_assignment_write))))
{
+ bool new_integer_type;
+ /* Always create access replacements that cover the whole access.
+ For integral types this means the precision has to match.
+ Avoid assumptions based on the integral type kind, too. */
+ if (INTEGRAL_TYPE_P (root->type)
+ && (TREE_CODE (root->type) != INTEGER_TYPE
+ || TYPE_PRECISION (root->type) != root->size)
+ /* But leave bitfield accesses alone. */
+ && (TREE_CODE (root->expr) != COMPONENT_REF
+ || !DECL_BIT_FIELD (TREE_OPERAND (root->expr, 1))))
+ {
+ tree rt = root->type;
+ gcc_assert ((root->offset % BITS_PER_UNIT) == 0
+ && (root->size % BITS_PER_UNIT) == 0);
+ root->type = build_nonstandard_integer_type (root->size,
+ TYPE_UNSIGNED (rt));
+ root->expr = build_ref_for_offset (UNKNOWN_LOCATION,
+ root->base, root->offset,
+ root->type, NULL, false);
+ new_integer_type = true;
+ }
+ else
+ new_integer_type = false;
+
if (dump_file && (dump_flags & TDF_DETAILS))
{
fprintf (dump_file, "Marking ");
print_generic_expr (dump_file, root->base, 0);
- fprintf (dump_file, " offset: %u, size: %u: ",
+ fprintf (dump_file, " offset: %u, size: %u ",
(unsigned) root->offset, (unsigned) root->size);
- fprintf (dump_file, " to be replaced.\n");
+ fprintf (dump_file, " to be replaced%s.\n",
+ new_integer_type ? " with an integer": "");
}
root->grp_to_be_replaced = 1;
sth_created = true;
hole = false;
}
- else if (covered_to < limit)
- hole = true;
+ else
+ {
+ if (covered_to < limit)
+ hole = true;
+ if (scalar)
+ root->grp_total_scalarization = 0;
+ }
- if (sth_created && !hole)
+ if (sth_created
+ && (!hole || root->grp_total_scalarization))
{
root->grp_covered = 1;
return true;
while (access)
{
- if (analyze_access_subtree (access, true,
- SRA_MRRW_NOTHING, SRA_MRRW_NOTHING))
+ if (analyze_access_subtree (access, NULL, true))
ret = true;
access = access->next_grp;
}
|| racc->grp_unscalarizable_region)
return false;
- if (!lacc->first_child && !racc->first_child
- && is_gimple_reg_type (racc->type))
+ if (is_gimple_reg_type (racc->type))
{
- tree t = lacc->base;
-
- lacc->type = racc->type;
- if (build_user_friendly_ref_for_offset (&t, TREE_TYPE (t), lacc->offset,
- racc->type))
- lacc->expr = t;
- else
+ if (!lacc->first_child && !racc->first_child)
{
- lacc->expr = build_ref_for_model (EXPR_LOCATION (lacc->base),
- lacc->base, lacc->offset,
- racc, NULL, false);
- lacc->grp_no_warning = true;
+ tree t = lacc->base;
+
+ lacc->type = racc->type;
+ if (build_user_friendly_ref_for_offset (&t, TREE_TYPE (t),
+ lacc->offset, racc->type))
+ lacc->expr = t;
+ else
+ {
+ lacc->expr = build_ref_for_model (EXPR_LOCATION (lacc->base),
+ lacc->base, lacc->offset,
+ racc, NULL, false);
+ lacc->grp_no_warning = true;
+ }
}
return false;
}
tree var = referenced_var (i);
if (TREE_CODE (var) == VAR_DECL
- && ((unsigned) tree_low_cst (TYPE_SIZE (TREE_TYPE (var)), 1)
- <= max_total_scalarization_size)
&& type_consists_of_records_p (TREE_TYPE (var)))
{
- completely_scalarize_record (var, var, 0, var);
- if (dump_file && (dump_flags & TDF_DETAILS))
+ if ((unsigned) tree_low_cst (TYPE_SIZE (TREE_TYPE (var)), 1)
+ <= max_total_scalarization_size)
{
- fprintf (dump_file, "Will attempt to totally scalarize ");
+ completely_scalarize_var (var);
+ if (dump_file && (dump_flags & TDF_DETAILS))
+ {
+ fprintf (dump_file, "Will attempt to totally scalarize ");
+ print_generic_expr (dump_file, var, 0);
+ fprintf (dump_file, " (UID: %u): \n", DECL_UID (var));
+ }
+ }
+ else if (dump_file && (dump_flags & TDF_DETAILS))
+ {
+ fprintf (dump_file, "Too big to totally scalarize: ");
print_generic_expr (dump_file, var, 0);
- fprintf (dump_file, " (UID: %u): \n", DECL_UID (var));
+ fprintf (dump_file, " (UID: %u)\n", DECL_UID (var));
}
}
}
rhs = get_access_replacement (racc);
if (!useless_type_conversion_p (lacc->type, racc->type))
rhs = fold_build1_loc (loc, VIEW_CONVERT_EXPR, lacc->type, rhs);
+
+ if (racc->grp_partial_lhs && lacc->grp_partial_lhs)
+ rhs = force_gimple_operand_gsi (old_gsi, rhs, true, NULL_TREE,
+ true, GSI_SAME_STMT);
}
else
{
else
rhs = build_ref_for_model (loc, top_racc->base, offset, lacc,
new_gsi, true);
+ if (lacc->grp_partial_lhs)
+ rhs = force_gimple_operand_gsi (new_gsi, rhs, true, NULL_TREE,
+ false, GSI_NEW_STMT);
}
stmt = gimple_build_assign (get_access_replacement (lacc), rhs);
if (!acc)
return SRA_AM_NONE;
+ if (gimple_clobber_p (*stmt))
+ {
+ /* Remove clobbers of fully scalarized variables, otherwise
+ do nothing. */
+ if (acc->grp_covered)
+ {
+ unlink_stmt_vdef (*stmt);
+ gsi_remove (gsi, true);
+ return SRA_AM_REMOVED;
+ }
+ else
+ return SRA_AM_NONE;
+ }
+
loc = gimple_location (*stmt);
if (VEC_length (constructor_elt,
CONSTRUCTOR_ELTS (gimple_assign_rhs1 (*stmt))) > 0)
force_gimple_rhs = true;
sra_stats.exprs++;
}
+ else if (racc
+ && !racc->grp_unscalarized_data
+ && TREE_CODE (lhs) == SSA_NAME
+ && !access_has_replacements_p (racc))
+ {
+ rhs = get_repl_default_def_ssa_name (racc);
+ modify_this_stmt = true;
+ sra_stats.exprs++;
+ }
if (modify_this_stmt)
{
??? This should move to fold_stmt which we simply should
call after building a VIEW_CONVERT_EXPR here. */
if (AGGREGATE_TYPE_P (TREE_TYPE (lhs))
- && !contains_bitfld_comp_ref_p (lhs)
- && !access_has_children_p (lacc))
+ && !contains_bitfld_comp_ref_p (lhs))
{
- lhs = build_ref_for_offset (loc, lhs, 0, TREE_TYPE (rhs),
- gsi, false);
+ lhs = build_ref_for_model (loc, lhs, 0, racc, gsi, false);
gimple_assign_set_lhs (*stmt, lhs);
}
else if (AGGREGATE_TYPE_P (TREE_TYPE (rhs))
- && !contains_vce_or_bfcref_p (rhs)
- && !access_has_children_p (racc))
- rhs = build_ref_for_offset (loc, rhs, 0, TREE_TYPE (lhs),
- gsi, false);
+ && !contains_vce_or_bfcref_p (rhs))
+ rhs = build_ref_for_model (loc, rhs, 0, lacc, gsi, false);
if (!useless_type_conversion_p (TREE_TYPE (lhs), TREE_TYPE (rhs)))
{
there to do the copying and then load the scalar replacements of the LHS.
This is what the first branch does. */
- if (gimple_has_volatile_ops (*stmt)
+ if (modify_this_stmt
+ || gimple_has_volatile_ops (*stmt)
|| contains_vce_or_bfcref_p (rhs)
|| contains_vce_or_bfcref_p (lhs))
{
generate_subtree_copies (lacc->first_child, lacc->base, 0, 0, 0,
gsi, true, true, loc);
sra_stats.separate_lhs_rhs_handling++;
+
+ /* This gimplification must be done after generate_subtree_copies,
+ lest we insert the subtree copies in the middle of the gimplified
+ sequence. */
+ if (force_gimple_rhs)
+ rhs = force_gimple_operand_gsi (&orig_gsi, rhs, true, NULL_TREE,
+ true, GSI_SAME_STMT);
+ if (gimple_assign_rhs1 (*stmt) != rhs)
+ {
+ modify_this_stmt = true;
+ gimple_assign_set_rhs_from_tree (&orig_gsi, rhs);
+ gcc_assert (*stmt == gsi_stmt (orig_gsi));
+ }
+
+ return modify_this_stmt ? SRA_AM_MODIFIED : SRA_AM_NONE;
}
else
{
- if (access_has_children_p (lacc) && access_has_children_p (racc))
+ if (access_has_children_p (lacc)
+ && access_has_children_p (racc)
+ /* When an access represents an unscalarizable region, it usually
+ represents accesses with variable offset and thus must not be used
+ to generate new memory accesses. */
+ && !lacc->grp_unscalarizable_region
+ && !racc->grp_unscalarizable_region)
{
gimple_stmt_iterator orig_gsi = *gsi;
enum unscalarized_data_handling refreshed;
}
else
{
- if (racc)
+ if (access_has_children_p (racc)
+ && !racc->grp_unscalarized_data)
{
- if (!racc->grp_to_be_replaced && !racc->grp_unscalarized_data)
+ if (dump_file)
{
- if (dump_file)
- {
- fprintf (dump_file, "Removing load: ");
- print_gimple_stmt (dump_file, *stmt, 0, 0);
- }
-
- if (TREE_CODE (lhs) == SSA_NAME)
- {
- rhs = get_repl_default_def_ssa_name (racc);
- if (!useless_type_conversion_p (TREE_TYPE (lhs),
- TREE_TYPE (rhs)))
- rhs = fold_build1_loc (loc, VIEW_CONVERT_EXPR,
- TREE_TYPE (lhs), rhs);
- }
- else
- {
- if (racc->first_child)
- generate_subtree_copies (racc->first_child, lhs,
- racc->offset, 0, 0, gsi,
- false, false, loc);
-
- gcc_assert (*stmt == gsi_stmt (*gsi));
- unlink_stmt_vdef (*stmt);
- gsi_remove (gsi, true);
- sra_stats.deleted++;
- return SRA_AM_REMOVED;
- }
+ fprintf (dump_file, "Removing load: ");
+ print_gimple_stmt (dump_file, *stmt, 0, 0);
}
- else if (racc->first_child)
- generate_subtree_copies (racc->first_child, lhs, racc->offset,
- 0, 0, gsi, false, true, loc);
+ generate_subtree_copies (racc->first_child, lhs,
+ racc->offset, 0, 0, gsi,
+ false, false, loc);
+ gcc_assert (*stmt == gsi_stmt (*gsi));
+ unlink_stmt_vdef (*stmt);
+ gsi_remove (gsi, true);
+ sra_stats.deleted++;
+ return SRA_AM_REMOVED;
}
+ /* Restore the aggregate RHS from its components so the
+ prevailing aggregate copy does the right thing. */
+ if (access_has_children_p (racc))
+ generate_subtree_copies (racc->first_child, racc->base, 0, 0, 0,
+ gsi, false, false, loc);
+ /* Re-load the components of the aggregate copy destination.
+ But use the RHS aggregate to load from to expose more
+ optimization opportunities. */
if (access_has_children_p (lacc))
generate_subtree_copies (lacc->first_child, rhs, lacc->offset,
0, 0, gsi, true, true, loc);
}
- }
- /* This gimplification must be done after generate_subtree_copies, lest we
- insert the subtree copies in the middle of the gimplified sequence. */
- if (force_gimple_rhs)
- rhs = force_gimple_operand_gsi (&orig_gsi, rhs, true, NULL_TREE,
- true, GSI_SAME_STMT);
- if (gimple_assign_rhs1 (*stmt) != rhs)
- {
- modify_this_stmt = true;
- gimple_assign_set_rhs_from_tree (&orig_gsi, rhs);
- gcc_assert (*stmt == gsi_stmt (orig_gsi));
+ return SRA_AM_NONE;
}
-
- return modify_this_stmt ? SRA_AM_MODIFIED : SRA_AM_NONE;
}
/* Traverse the function body and all modifications as decided in
0, /* properties_provided */
0, /* properties_destroyed */
0, /* todo_flags_start */
- TODO_dump_func
- | TODO_update_ssa
+ TODO_update_ssa
| TODO_ggc_collect
| TODO_verify_ssa /* todo_flags_finish */
}
0, /* properties_provided */
0, /* properties_destroyed */
TODO_update_address_taken, /* todo_flags_start */
- TODO_dump_func
- | TODO_update_ssa
+ TODO_update_ssa
| TODO_ggc_collect
| TODO_verify_ssa /* todo_flags_finish */
}
&& TREE_OPERAND (lhs, 0) == name
&& integer_zerop (TREE_OPERAND (lhs, 1))
&& types_compatible_p (TREE_TYPE (lhs),
- TREE_TYPE (TREE_TYPE (name))))
+ TREE_TYPE (TREE_TYPE (name)))
+ && !TREE_THIS_VOLATILE (lhs))
uses_ok++;
}
if (gimple_assign_single_p (stmt))
&& TREE_OPERAND (rhs, 0) == name
&& integer_zerop (TREE_OPERAND (rhs, 1))
&& types_compatible_p (TREE_TYPE (rhs),
- TREE_TYPE (TREE_TYPE (name))))
+ TREE_TYPE (TREE_TYPE (name)))
+ && !TREE_THIS_VOLATILE (rhs))
uses_ok++;
}
else if (is_gimple_call (stmt))
&& TREE_OPERAND (arg, 0) == name
&& integer_zerop (TREE_OPERAND (arg, 1))
&& types_compatible_p (TREE_TYPE (arg),
- TREE_TYPE (TREE_TYPE (name))))
+ TREE_TYPE (TREE_TYPE (name)))
+ && !TREE_THIS_VOLATILE (arg))
uses_ok++;
}
}
tree parm;
int count = 0;
bool ret = false;
+ const char *msg;
for (parm = DECL_ARGUMENTS (current_function_decl);
parm;
|| !host_integerp (TYPE_SIZE (type), 1)
|| tree_low_cst (TYPE_SIZE (type), 1) == 0
|| (AGGREGATE_TYPE_P (type)
- && type_internals_preclude_sra_p (type)))
+ && type_internals_preclude_sra_p (type, &msg)))
continue;
bitmap_set_bit (candidate_bitmap, DECL_UID (parm));
|| gimple_code (access->stmt) == GIMPLE_ASM))
return true;
+ if (STRICT_ALIGNMENT
+ && tree_non_aligned_mem_p (access->expr, TYPE_ALIGN (access->type)))
+ return true;
+
return false;
}
if (by_ref && repr->non_addressable)
return 0;
+ /* Do not decompose a non-BLKmode param in a way that would
+ create BLKmode params. Especially for by-reference passing
+ (thus, pointer-type param) this is hardly worthwhile. */
+ if (DECL_MODE (parm) != BLKmode
+ && TYPE_MODE (repr->type) == BLKmode)
+ return 0;
+
if (!by_ref || (!repr->grp_maybe_modified
&& !repr->grp_not_necessarilly_dereferenced))
total_size += repr->size;
DECL_NAME (repl) = get_identifier (pretty_name);
obstack_free (&name_obstack, pretty_name);
- get_var_ann (repl);
add_referenced_var (repl);
adj->new_ssa_base = repl;
}
sra_ipa_reset_debug_stmts (ipa_parm_adjustment_vec adjustments)
{
int i, len;
+ gimple_stmt_iterator *gsip = NULL, gsi;
+ if (MAY_HAVE_DEBUG_STMTS && single_succ_p (ENTRY_BLOCK_PTR))
+ {
+ gsi = gsi_after_labels (single_succ (ENTRY_BLOCK_PTR));
+ gsip = &gsi;
+ }
len = VEC_length (ipa_parm_adjustment_t, adjustments);
for (i = 0; i < len; i++)
{
struct ipa_parm_adjustment *adj;
imm_use_iterator ui;
- gimple stmt;
- tree name;
+ gimple stmt, def_temp;
+ tree name, vexpr, copy = NULL_TREE;
+ use_operand_p use_p;
adj = VEC_index (ipa_parm_adjustment_t, adjustments, i);
if (adj->copy_param || !is_gimple_reg (adj->base))
continue;
name = gimple_default_def (cfun, adj->base);
- if (!name)
- continue;
- FOR_EACH_IMM_USE_STMT (stmt, ui, name)
+ vexpr = NULL;
+ if (name)
+ FOR_EACH_IMM_USE_STMT (stmt, ui, name)
+ {
+ /* All other users must have been removed by
+ ipa_sra_modify_function_body. */
+ gcc_assert (is_gimple_debug (stmt));
+ if (vexpr == NULL && gsip != NULL)
+ {
+ gcc_assert (TREE_CODE (adj->base) == PARM_DECL);
+ vexpr = make_node (DEBUG_EXPR_DECL);
+ def_temp = gimple_build_debug_source_bind (vexpr, adj->base,
+ NULL);
+ DECL_ARTIFICIAL (vexpr) = 1;
+ TREE_TYPE (vexpr) = TREE_TYPE (name);
+ DECL_MODE (vexpr) = DECL_MODE (adj->base);
+ gsi_insert_before (gsip, def_temp, GSI_SAME_STMT);
+ }
+ if (vexpr)
+ {
+ FOR_EACH_IMM_USE_ON_STMT (use_p, ui)
+ SET_USE (use_p, vexpr);
+ }
+ else
+ gimple_debug_bind_reset_value (stmt);
+ update_stmt (stmt);
+ }
+ /* Create a VAR_DECL for debug info purposes. */
+ if (!DECL_IGNORED_P (adj->base))
{
- /* All other users must have been removed by
- ipa_sra_modify_function_body. */
- gcc_assert (is_gimple_debug (stmt));
- gimple_debug_bind_reset_value (stmt);
- update_stmt (stmt);
+ copy = build_decl (DECL_SOURCE_LOCATION (current_function_decl),
+ VAR_DECL, DECL_NAME (adj->base),
+ TREE_TYPE (adj->base));
+ if (DECL_PT_UID_SET_P (adj->base))
+ SET_DECL_PT_UID (copy, DECL_PT_UID (adj->base));
+ TREE_ADDRESSABLE (copy) = TREE_ADDRESSABLE (adj->base);
+ TREE_READONLY (copy) = TREE_READONLY (adj->base);
+ TREE_THIS_VOLATILE (copy) = TREE_THIS_VOLATILE (adj->base);
+ DECL_GIMPLE_REG_P (copy) = DECL_GIMPLE_REG_P (adj->base);
+ DECL_ARTIFICIAL (copy) = DECL_ARTIFICIAL (adj->base);
+ DECL_IGNORED_P (copy) = DECL_IGNORED_P (adj->base);
+ DECL_ABSTRACT_ORIGIN (copy) = DECL_ORIGIN (adj->base);
+ DECL_SEEN_IN_BIND_EXPR_P (copy) = 1;
+ SET_DECL_RTL (copy, 0);
+ TREE_USED (copy) = 1;
+ DECL_CONTEXT (copy) = current_function_decl;
+ add_referenced_var (copy);
+ add_local_decl (cfun, copy);
+ DECL_CHAIN (copy) =
+ BLOCK_VARS (DECL_INITIAL (current_function_decl));
+ BLOCK_VARS (DECL_INITIAL (current_function_decl)) = copy;
+ }
+ if (gsip != NULL && copy && target_for_debug_bind (adj->base))
+ {
+ gcc_assert (TREE_CODE (adj->base) == PARM_DECL);
+ if (vexpr)
+ def_temp = gimple_build_debug_bind (copy, vexpr, NULL);
+ else
+ def_temp = gimple_build_debug_source_bind (copy, adj->base,
+ NULL);
+ gsi_insert_before (gsip, def_temp, GSI_SAME_STMT);
}
}
}
-/* Return true iff all callers have at least as many actual arguments as there
+/* Return false iff all callers have at least as many actual arguments as there
are formal parameters in the current function. */
static bool
-all_callers_have_enough_arguments_p (struct cgraph_node *node)
+not_all_callers_have_enough_arguments_p (struct cgraph_node *node,
+ void *data ATTRIBUTE_UNUSED)
{
struct cgraph_edge *cs;
for (cs = node->callers; cs; cs = cs->next_caller)
if (!callsite_has_enough_arguments_p (cs->call_stmt))
- return false;
+ return true;
- return true;
+ return false;
}
+/* Convert all callers of NODE. */
-/* Convert all callers of NODE to pass parameters as given in ADJUSTMENTS. */
-
-static void
-convert_callers (struct cgraph_node *node, tree old_decl,
- ipa_parm_adjustment_vec adjustments)
+static bool
+convert_callers_for_node (struct cgraph_node *node,
+ void *data)
{
- tree old_cur_fndecl = current_function_decl;
- struct cgraph_edge *cs;
- basic_block this_block;
+ ipa_parm_adjustment_vec adjustments = (ipa_parm_adjustment_vec)data;
bitmap recomputed_callers = BITMAP_ALLOC (NULL);
+ struct cgraph_edge *cs;
for (cs = node->callers; cs; cs = cs->next_caller)
{
if (dump_file)
fprintf (dump_file, "Adjusting call (%i -> %i) %s -> %s\n",
cs->caller->uid, cs->callee->uid,
- cgraph_node_name (cs->caller),
- cgraph_node_name (cs->callee));
+ xstrdup (cgraph_node_name (cs->caller)),
+ xstrdup (cgraph_node_name (cs->callee)));
ipa_modify_call_arguments (cs, cs->call_stmt, adjustments);
}
for (cs = node->callers; cs; cs = cs->next_caller)
- if (bitmap_set_bit (recomputed_callers, cs->caller->uid))
- compute_inline_parameters (cs->caller);
+ if (bitmap_set_bit (recomputed_callers, cs->caller->uid)
+ && gimple_in_ssa_p (DECL_STRUCT_FUNCTION (cs->caller->decl)))
+ compute_inline_parameters (cs->caller, true);
BITMAP_FREE (recomputed_callers);
+ return true;
+}
+
+/* Convert all callers of NODE to pass parameters as given in ADJUSTMENTS. */
+
+static void
+convert_callers (struct cgraph_node *node, tree old_decl,
+ ipa_parm_adjustment_vec adjustments)
+{
+ tree old_cur_fndecl = current_function_decl;
+ basic_block this_block;
+
+ cgraph_for_node_and_aliases (node, convert_callers_for_node,
+ adjustments, false);
+
current_function_decl = old_cur_fndecl;
if (!encountered_recursive_call)
modify_function (struct cgraph_node *node, ipa_parm_adjustment_vec adjustments)
{
struct cgraph_node *new_node;
- struct cgraph_edge *cs;
bool cfg_changed;
- VEC (cgraph_edge_p, heap) * redirect_callers;
- int node_callers;
-
- node_callers = 0;
- for (cs = node->callers; cs != NULL; cs = cs->next_caller)
- node_callers++;
- redirect_callers = VEC_alloc (cgraph_edge_p, heap, node_callers);
- for (cs = node->callers; cs != NULL; cs = cs->next_caller)
- VEC_quick_push (cgraph_edge_p, redirect_callers, cs);
+ VEC (cgraph_edge_p, heap) * redirect_callers = collect_callers_of_node (node);
rebuild_cgraph_edges ();
+ free_dominance_info (CDI_DOMINATORS);
pop_cfun ();
current_function_decl = NULL_TREE;
new_node = cgraph_function_versioning (node, redirect_callers, NULL, NULL,
- NULL, NULL, "isra");
+ false, NULL, NULL, "isra");
+ VEC_free (cgraph_edge_p, heap, redirect_callers);
+
current_function_decl = new_node->decl;
push_cfun (DECL_STRUCT_FUNCTION (new_node->decl));
return false;
}
+ if (!node->local.can_change_signature)
+ {
+ if (dump_file)
+ fprintf (dump_file, "Function can not change signature.\n");
+ return false;
+ }
+
if (!tree_versionable_function_p (node->decl))
{
if (dump_file)
}
if ((DECL_COMDAT (node->decl) || DECL_EXTERNAL (node->decl))
- && node->global.size >= MAX_INLINE_INSNS_AUTO)
+ && inline_summary(node)->size >= MAX_INLINE_INSNS_AUTO)
{
if (dump_file)
fprintf (dump_file, "Function too big to be made truly local.\n");
static unsigned int
ipa_early_sra (void)
{
- struct cgraph_node *node = cgraph_node (current_function_decl);
+ struct cgraph_node *node = cgraph_get_node (current_function_decl);
ipa_parm_adjustment_vec adjustments;
int ret = 0;
goto simple_out;
}
- if (!all_callers_have_enough_arguments_p (node))
+ if (cgraph_for_node_and_aliases (node, not_all_callers_have_enough_arguments_p,
+ NULL, true))
{
if (dump_file)
fprintf (dump_file, "There are callers with insufficient number of "
0, /* properties_provided */
0, /* properties_destroyed */
0, /* todo_flags_start */
- TODO_dump_func | TODO_dump_cgraph /* todo_flags_finish */
+ TODO_dump_cgraph /* todo_flags_finish */
}
};
-
-