NULL, /* sub */
NULL, /* next */
0, /* static_pass_number */
- TV_NONE, /* tv_id */
+ TV_OPTIMIZE, /* tv_id */
0, /* properties_required */
0, /* properties_provided */
0, /* properties_destroyed */
return (!seen_error () && !in_lto_p);
}
+static unsigned int
+execute_all_early_local_passes (void)
+{
+ /* Once this pass (and its sub-passes) are complete, all functions
+ will be in SSA form. Technically this state change is happening
+ a tad early, since the sub-passes have not yet run, but since
+ none of the sub-passes are IPA passes and do not create new
+ functions, this is ok. We're setting this value for the benefit
+ of IPA passes that follow. */
+ if (cgraph_state < CGRAPH_STATE_IPA_SSA)
+ cgraph_state = CGRAPH_STATE_IPA_SSA;
+ return 0;
+}
+
struct simple_ipa_opt_pass pass_early_local_passes =
{
{
SIMPLE_IPA_PASS,
"early_local_cleanups", /* name */
gate_all_early_local_passes, /* gate */
- NULL, /* execute */
+ execute_all_early_local_passes, /* execute */
NULL, /* sub */
NULL, /* next */
0, /* static_pass_number */
- TV_NONE, /* tv_id */
+ TV_EARLY_LOCAL, /* tv_id */
0, /* properties_required */
0, /* properties_provided */
0, /* properties_destroyed */
}
};
-static unsigned int
-execute_early_local_optimizations (void)
-{
- /* First time we start with early optimization we need to advance
- cgraph state so newly inserted functions are also early optimized.
- However we execute early local optimizations for lately inserted
- functions, in that case don't reset cgraph state back to IPA_SSA. */
- if (cgraph_state < CGRAPH_STATE_IPA_SSA)
- cgraph_state = CGRAPH_STATE_IPA_SSA;
- return 0;
-}
-
/* Gate: execute, or not, all of the non-trivial optimizations. */
static bool
GIMPLE_PASS,
"early_optimizations", /* name */
gate_all_early_optimizations, /* gate */
- execute_early_local_optimizations, /* execute */
+ NULL, /* execute */
NULL, /* sub */
NULL, /* next */
0, /* static_pass_number */
}
};
-/* Pass: cleanup the CFG just before expanding trees to RTL.
- This is just a round of label cleanups and case node grouping
- because after the tree optimizers have run such cleanups may
- be necessary. */
-
-static unsigned int
-execute_cleanup_cfg_pre_ipa (void)
-{
- cleanup_tree_cfg ();
- return 0;
-}
-
-struct gimple_opt_pass pass_cleanup_cfg =
-{
- {
- GIMPLE_PASS,
- "cleanup_cfg", /* name */
- NULL, /* gate */
- execute_cleanup_cfg_pre_ipa, /* execute */
- NULL, /* sub */
- NULL, /* next */
- 0, /* static_pass_number */
- TV_NONE, /* tv_id */
- PROP_cfg, /* properties_required */
- 0, /* properties_provided */
- 0, /* properties_destroyed */
- 0, /* todo_flags_start */
- TODO_dump_func /* todo_flags_finish */
- }
-};
-
/* Pass: cleanup the CFG just before expanding trees to RTL.
This is just a round of label cleanups and case node grouping
static unsigned int
execute_cleanup_cfg_post_optimizing (void)
{
- fold_cond_expr_cond ();
cleanup_tree_cfg ();
cleanup_dead_labels ();
group_case_labels ();
NULL, /* sub */
NULL, /* next */
0, /* static_pass_number */
- TV_NONE, /* tv_id */
+ TV_TREE_CLEANUP_CFG, /* tv_id */
PROP_cfg, /* properties_required */
0, /* properties_provided */
0, /* properties_destroyed */
return 0;
}
-/* Pass: fixup_cfg. IPA passes, compilation of earlier functions or inlining
+/* IPA passes, compilation of earlier functions or inlining
might have changed some properties, such as marked functions nothrow,
pure, const or noreturn.
Remove redundant edges and basic blocks, and create new ones if necessary.
int flags = gimple_call_flags (stmt);
if (flags & (ECF_CONST | ECF_PURE | ECF_LOOPING_CONST_OR_PURE))
{
+ if (gimple_purge_dead_abnormal_call_edges (bb))
+ todo |= TODO_cleanup_cfg;
+
if (gimple_in_ssa_p (cfun))
{
todo |= TODO_update_ssa | TODO_cleanup_cfg;
- mark_symbols_for_renaming (stmt);
update_stmt (stmt);
}
}
-
+
if (flags & ECF_NORETURN
&& fixup_noreturn_call (stmt))
todo |= TODO_cleanup_cfg;
}
- maybe_clean_eh_stmt (stmt);
+ if (maybe_clean_eh_stmt (stmt)
+ && gimple_purge_dead_eh_edges (bb))
+ todo |= TODO_cleanup_cfg;
}
- if (gimple_purge_dead_eh_edges (bb))
- todo |= TODO_cleanup_cfg;
FOR_EACH_EDGE (e, ei, bb->succs)
e->count = (e->count * count_scale
+ REG_BR_PROB_BASE / 2) / REG_BR_PROB_BASE;
{
location_t saved_loc;
- timevar_push (TV_EXPAND);
+ timevar_push (TV_REST_OF_COMPILATION);
gcc_assert (cgraph_global_info_ready);
= TREE_INT_CST_LOW (TYPE_SIZE_UNIT (ret_type));
if (compare_tree_int (TYPE_SIZE_UNIT (ret_type), size_as_int) == 0)
- warning (OPT_Wlarger_than_eq, "size of return value of %q+D is %u bytes",
+ warning (OPT_Wlarger_than_, "size of return value of %q+D is %u bytes",
fndecl, size_as_int);
else
- warning (OPT_Wlarger_than_eq, "size of return value of %q+D is larger than %wd bytes",
+ warning (OPT_Wlarger_than_, "size of return value of %q+D is larger than %wd bytes",
fndecl, larger_than_size);
}
}
input_location = saved_loc;
ggc_collect ();
- timevar_pop (TV_EXPAND);
+ timevar_pop (TV_REST_OF_COMPILATION);
}