X-Git-Url: http://git.sourceforge.jp/view?a=blobdiff_plain;f=gcc%2Fopts.c;h=9bae6ea834904b9bbf6fcf8407f8d91994edbaf1;hb=cd8171dd88ae95d9e06c4e9a22bf445ba14babd6;hp=8c46dfa9cf734c989b4cfde28f0689285f89c26d;hpb=0bfd8d5c472a9e897b1f5636d4bc1a69a0fe6251;p=pf3gnuchains%2Fgcc-fork.git diff --git a/gcc/opts.c b/gcc/opts.c index 8c46dfa9cf7..9bae6ea8349 100644 --- a/gcc/opts.c +++ b/gcc/opts.c @@ -47,6 +47,9 @@ along with GCC; see the file COPYING3. If not see unsigned HOST_WIDE_INT g_switch_value; bool g_switch_set; +/* Same for selective scheduling. */ +bool sel_sched_switch_set; + /* True if we should exit after parsing options. */ bool exit_after_options; @@ -347,7 +350,8 @@ static bool profile_arc_flag_set, flag_profile_values_set; static bool flag_unroll_loops_set, flag_tracer_set; static bool flag_value_profile_transformations_set; static bool flag_peel_loops_set, flag_branch_probabilities_set; -static bool flag_inline_functions_set; +static bool flag_inline_functions_set, flag_ipa_cp_set, flag_ipa_cp_clone_set; +static bool flag_predictive_commoning_set, flag_unswitch_loops_set, flag_gcse_after_reload_set; /* Functions excluded from profiling. */ @@ -866,39 +870,11 @@ decode_options (unsigned int argc, const char **argv) } } - if (!flag_unit_at_a_time) - { - flag_section_anchors = 0; - flag_toplevel_reorder = 0; - } - if (!flag_toplevel_reorder) - { - if (flag_section_anchors == 1) - error ("Section anchors must be disabled when toplevel reorder is disabled."); - flag_section_anchors = 0; - } - #ifdef IRA_COVER_CLASSES /* Use IRA if it is implemented for the target. */ flag_ira = 1; #endif - /* Originally we just set the variables if a particular optimization level, - but with the advent of being able to change the optimization level for a - function, we need to reset optimizations. */ - if (!optimize) - { - flag_merge_constants = 0; - - /* We disable toplevel reordering at -O0 to disable transformations that - might be surprising to end users and to get -fno-toplevel-reorder - tested, but we keep section anchors. */ - if (flag_toplevel_reorder == 2) - flag_toplevel_reorder = 0; - } - else - flag_merge_constants = 1; - /* -O1 optimizations. */ opt1 = (optimize >= 1); flag_defer_pop = opt1; @@ -914,6 +890,7 @@ decode_options (unsigned int argc, const char **argv) flag_if_conversion2 = opt1; flag_ipa_pure_const = opt1; flag_ipa_reference = opt1; + flag_merge_constants = opt1; flag_split_wide_types = opt1; flag_tree_ccp = opt1; flag_tree_dce = opt1; @@ -951,13 +928,13 @@ decode_options (unsigned int argc, const char **argv) flag_delete_null_pointer_checks = opt2; flag_reorder_blocks = opt2; flag_reorder_functions = opt2; - flag_tree_store_ccp = opt2; flag_tree_vrp = opt2; flag_tree_builtin_call_dce = opt2; flag_tree_pre = opt2; - flag_tree_switch_conversion = 1; + flag_tree_switch_conversion = 1; + flag_ipa_cp = opt2; - /* Allow more virtual operators to increase alias precision. */ + /* Allow more virtual operators to increase alias precision. */ set_param_value ("max-aliased-vops", (opt2) ? 500 : initial_max_aliased_vops); @@ -973,6 +950,9 @@ decode_options (unsigned int argc, const char **argv) flag_unswitch_loops = opt3; flag_gcse_after_reload = opt3; flag_tree_vectorize = opt3; + flag_ipa_cp_clone = opt3; + if (flag_ipa_cp_clone) + flag_ipa_cp = 1; /* Allow even more virtual operators. Max-aliased-vops was set above for -O2, so don't reset it unless we are at -O3. */ @@ -990,37 +970,10 @@ decode_options (unsigned int argc, const char **argv) if (optimize_size) { - /* Conditional DCE generates bigger code. */ - flag_tree_builtin_call_dce = 0; - - /* PRE tends to generate bigger code. */ - flag_tree_pre = 0; - - /* These options are set with -O3, so reset for -Os */ - flag_predictive_commoning = 0; - flag_gcse_after_reload = 0; - flag_tree_vectorize = 0; - - /* Don't reorder blocks when optimizing for size because extra jump insns may - be created; also barrier may create extra padding. - - More correctly we should have a block reordering mode that tried to - minimize the combined size of all the jumps. This would more or less - automatically remove extra jumps, but would also try to use more short - jumps instead of long jumps. */ - flag_reorder_blocks = 0; - flag_reorder_blocks_and_partition = 0; - /* Inlining of functions reducing size is a good idea regardless of them being declared inline. */ flag_inline_functions = 1; - /* Don't align code. */ - align_loops = 1; - align_jumps = 1; - align_labels = 1; - align_functions = 1; - /* Basic optimization options. */ optimize_size = 1; if (optimize > 2) @@ -1055,6 +1008,24 @@ decode_options (unsigned int argc, const char **argv) handle_options (argc, argv, lang_mask); + /* -fno-unit-at-a-time and -fno-toplevel-reorder handling. */ + if (!flag_unit_at_a_time) + { + flag_section_anchors = 0; + flag_toplevel_reorder = 0; + } + else if (!optimize && flag_toplevel_reorder == 2) + /* We disable toplevel reordering at -O0 to disable transformations that + might be surprising to end users and to get -fno-toplevel-reorder + tested, but we keep section anchors. */ + flag_toplevel_reorder = 0; + else if (!flag_toplevel_reorder) + { + if (flag_section_anchors == 1) + error ("section anchors must be disabled when toplevel reorder is disabled"); + flag_section_anchors = 0; + } + if (first_time_p) { if (flag_pie) @@ -1110,6 +1081,11 @@ decode_options (unsigned int argc, const char **argv) flag_reorder_blocks = 1; } + /* Pipelining of outer loops is only possible when general pipelining + capabilities are requested. */ + if (!flag_sel_sched_pipelining) + flag_sel_sched_pipelining_outer_loops = 0; + #ifndef IRA_COVER_CLASSES if (flag_ira) { @@ -1831,6 +1807,17 @@ common_handle_option (size_t scode, const char *arg, int value, flag_value_profile_transformations = value; if (!flag_inline_functions_set) flag_inline_functions = value; + if (!flag_ipa_cp_set) + flag_ipa_cp = value; + if (!flag_ipa_cp_clone_set + && value && flag_ipa_cp) + flag_ipa_cp_clone = value; + if (!flag_predictive_commoning_set) + flag_predictive_commoning = value; + if (!flag_unswitch_loops_set) + flag_unswitch_loops = value; + if (!flag_gcse_after_reload_set) + flag_gcse_after_reload = value; break; case OPT_fprofile_generate_: @@ -1882,6 +1869,11 @@ common_handle_option (size_t scode, const char *arg, int value, set_random_seed (arg); break; + case OPT_fselective_scheduling: + case OPT_fselective_scheduling2: + sel_sched_switch_set = true; + break; + case OPT_fsched_verbose_: #ifdef INSN_SCHEDULING fix_sched_param ("verbose", arg); @@ -1988,6 +1980,26 @@ common_handle_option (size_t scode, const char *arg, int value, flag_tracer_set = true; break; + case OPT_fipa_cp: + flag_ipa_cp_set = true; + break; + + case OPT_fipa_cp_clone: + flag_ipa_cp_clone_set = true; + break; + + case OPT_fpredictive_commoning: + flag_predictive_commoning_set = true; + break; + + case OPT_funswitch_loops: + flag_unswitch_loops_set = true; + break; + + case OPT_fgcse_after_reload: + flag_gcse_after_reload_set = true; + break; + case OPT_funroll_loops: flag_unroll_loops_set = true; break;