X-Git-Url: http://git.sourceforge.jp/view?p=pf3gnuchains%2Fgcc-fork.git;a=blobdiff_plain;f=gcc%2Fsched-deps.c;h=33a6996fe4fca2b7bbfde0aaf187b23d94f051f0;hp=556c38a4fb0faa42bd97dda4b033b4a621418b07;hb=2af89801635506744c58a179cebbd0c1ad2225d2;hpb=4b987facd8ba658d00c277a7e9c46548b492854f diff --git a/gcc/sched-deps.c b/gcc/sched-deps.c index 556c38a4fb0..33a6996fe4f 100644 --- a/gcc/sched-deps.c +++ b/gcc/sched-deps.c @@ -1,7 +1,8 @@ /* Instruction scheduling pass. This file computes dependencies between instructions. Copyright (C) 1992, 1993, 1994, 1995, 1996, 1997, 1998, - 1999, 2000, 2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2009, 2010 + 1999, 2000, 2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2009, 2010, + 2011, 2012 Free Software Foundation, Inc. Contributed by Michael Tiemann (tiemann@cygnus.com) Enhanced by, and currently maintained by, Jim Wilson (wilson@cygnus.com) @@ -26,7 +27,7 @@ along with GCC; see the file COPYING3. If not see #include "system.h" #include "coretypes.h" #include "tm.h" -#include "toplev.h" +#include "diagnostic-core.h" #include "rtl.h" #include "tm_p.h" #include "hard-reg-set.h" @@ -36,7 +37,6 @@ along with GCC; see the file COPYING3. If not see #include "insn-config.h" #include "insn-attr.h" #include "except.h" -#include "toplev.h" #include "recog.h" #include "sched-int.h" #include "params.h" @@ -68,6 +68,9 @@ ds_to_dk (ds_t ds) if (ds & DEP_OUTPUT) return REG_DEP_OUTPUT; + if (ds & DEP_CONTROL) + return REG_DEP_CONTROL; + gcc_assert (ds & DEP_ANTI); return REG_DEP_ANTI; @@ -85,6 +88,9 @@ dk_to_ds (enum reg_note dk) case REG_DEP_OUTPUT: return DEP_OUTPUT; + case REG_DEP_CONTROL: + return DEP_CONTROL; + default: gcc_assert (dk == REG_DEP_ANTI); return DEP_ANTI; @@ -101,6 +107,7 @@ init_dep_1 (dep_t dep, rtx pro, rtx con, enum reg_note type, ds_t ds) DEP_CON (dep) = con; DEP_TYPE (dep) = type; DEP_STATUS (dep) = ds; + DEP_COST (dep) = UNKNOWN_DEP_COST; } /* Init DEP with the arguments. @@ -114,7 +121,7 @@ init_dep (dep_t dep, rtx pro, rtx con, enum reg_note kind) if ((current_sched_info->flags & USE_DEPS_LIST)) ds = dk_to_ds (kind); else - ds = -1; + ds = 0; init_dep_1 (dep, pro, con, kind, ds); } @@ -180,6 +187,10 @@ dump_dep (FILE *dump, dep_t dep, int flags) t = 'o'; break; + case REG_DEP_CONTROL: + t = 'c'; + break; + case REG_DEP_ANTI: t = 'a'; break; @@ -407,9 +418,28 @@ clear_deps_list (deps_list_t l) while (1); } +/* Decide whether a dependency should be treated as a hard or a speculative + dependency. */ +static bool +dep_spec_p (dep_t dep) +{ + if (current_sched_info->flags & DO_SPECULATION) + { + if (DEP_STATUS (dep) & SPECULATIVE) + return true; + } + if (current_sched_info->flags & DO_PREDICATION) + { + if (DEP_TYPE (dep) == REG_DEP_CONTROL) + return true; + } + return false; +} + static regset reg_pending_sets; static regset reg_pending_clobbers; static regset reg_pending_uses; +static regset reg_pending_control_uses; static enum reg_pending_barrier_mode reg_pending_barrier; /* Hard registers implicitly clobbered or used (or may be implicitly @@ -437,10 +467,12 @@ static HARD_REG_SET implicit_reg_pending_uses; static bitmap_head *true_dependency_cache = NULL; static bitmap_head *output_dependency_cache = NULL; static bitmap_head *anti_dependency_cache = NULL; +static bitmap_head *control_dependency_cache = NULL; static bitmap_head *spec_dependency_cache = NULL; static int cache_size; static int deps_may_trap_p (const_rtx); +static void add_dependence_1 (rtx, rtx, enum reg_note); static void add_dependence_list (rtx, rtx, int, enum reg_note); static void add_dependence_list_and_free (struct deps_desc *, rtx, rtx *, int, enum reg_note); @@ -484,14 +516,11 @@ deps_may_trap_p (const_rtx mem) it is set to TRUE when the returned comparison should be reversed to get the actual condition. */ static rtx -sched_get_condition_with_rev (const_rtx insn, bool *rev) +sched_get_condition_with_rev_uncached (const_rtx insn, bool *rev) { rtx pat = PATTERN (insn); rtx src; - if (pat == 0) - return 0; - if (rev) *rev = false; @@ -521,6 +550,62 @@ sched_get_condition_with_rev (const_rtx insn, bool *rev) return 0; } +/* Return the condition under which INSN does not execute (i.e. the + not-taken condition for a conditional branch), or NULL if we cannot + find such a condition. The caller should make a copy of the condition + before using it. */ +rtx +sched_get_reverse_condition_uncached (const_rtx insn) +{ + bool rev; + rtx cond = sched_get_condition_with_rev_uncached (insn, &rev); + if (cond == NULL_RTX) + return cond; + if (!rev) + { + enum rtx_code revcode = reversed_comparison_code (cond, insn); + cond = gen_rtx_fmt_ee (revcode, GET_MODE (cond), + XEXP (cond, 0), + XEXP (cond, 1)); + } + return cond; +} + +/* Caching variant of sched_get_condition_with_rev_uncached. + We only do actual work the first time we come here for an insn; the + results are cached in INSN_CACHED_COND and INSN_REVERSE_COND. */ +static rtx +sched_get_condition_with_rev (const_rtx insn, bool *rev) +{ + bool tmp; + + if (INSN_LUID (insn) == 0) + return sched_get_condition_with_rev_uncached (insn, rev); + + if (INSN_CACHED_COND (insn) == const_true_rtx) + return NULL_RTX; + + if (INSN_CACHED_COND (insn) != NULL_RTX) + { + if (rev) + *rev = INSN_REVERSE_COND (insn); + return INSN_CACHED_COND (insn); + } + + INSN_CACHED_COND (insn) = sched_get_condition_with_rev_uncached (insn, &tmp); + INSN_REVERSE_COND (insn) = tmp; + + if (INSN_CACHED_COND (insn) == NULL_RTX) + { + INSN_CACHED_COND (insn) = const_true_rtx; + return NULL_RTX; + } + + if (rev) + *rev = INSN_REVERSE_COND (insn); + return INSN_CACHED_COND (insn); +} + /* True when we can find a condition under which INSN is executed. */ static bool sched_has_condition_p (const_rtx insn) @@ -540,7 +625,7 @@ conditions_mutex_p (const_rtx cond1, const_rtx cond2, bool rev1, bool rev2) (rev1==rev2 ? reversed_comparison_code (cond2, NULL) : GET_CODE (cond2)) - && XEXP (cond1, 0) == XEXP (cond2, 0) + && rtx_equal_p (XEXP (cond1, 0), XEXP (cond2, 0)) && XEXP (cond1, 1) == XEXP (cond2, 1)) return 1; return 0; @@ -597,8 +682,8 @@ sched_insn_is_legitimate_for_speculation_p (const_rtx insn, ds_t ds) /* The following instructions, which depend on a speculatively scheduled instruction, cannot be speculatively scheduled along. */ { - if (may_trap_p (PATTERN (insn))) - /* If instruction might trap, it cannot be speculatively scheduled. + if (may_trap_or_fault_p (PATTERN (insn))) + /* If instruction might fault, it cannot be speculatively scheduled. For control speculation it's obvious why and for data speculation it's because the insn might get wrong input if speculation wasn't successful. */ @@ -710,9 +795,6 @@ sd_init_insn (rtx insn) INSN_FORW_DEPS (insn) = create_deps_list (); INSN_RESOLVED_FORW_DEPS (insn) = create_deps_list (); - if (DEBUG_INSN_P (insn)) - DEBUG_INSN_SCHED_P (insn) = TRUE; - /* ??? It would be nice to allocate dependency caches here. */ } @@ -722,12 +804,6 @@ sd_finish_insn (rtx insn) { /* ??? It would be nice to deallocate dependency caches here. */ - if (DEBUG_INSN_P (insn)) - { - gcc_assert (DEBUG_INSN_SCHED_P (insn)); - DEBUG_INSN_SCHED_P (insn) = FALSE; - } - free_deps_list (INSN_HARD_BACK_DEPS (insn)); INSN_HARD_BACK_DEPS (insn) = NULL; @@ -818,12 +894,10 @@ sd_find_dep_between (rtx pro, rtx con, bool resolved_p) int elem_luid = INSN_LUID (pro); int insn_luid = INSN_LUID (con); - gcc_assert (output_dependency_cache != NULL - && anti_dependency_cache != NULL); - if (!bitmap_bit_p (&true_dependency_cache[insn_luid], elem_luid) && !bitmap_bit_p (&output_dependency_cache[insn_luid], elem_luid) - && !bitmap_bit_p (&anti_dependency_cache[insn_luid], elem_luid)) + && !bitmap_bit_p (&anti_dependency_cache[insn_luid], elem_luid) + && !bitmap_bit_p (&control_dependency_cache[insn_luid], elem_luid)) return NULL; } @@ -876,7 +950,8 @@ ask_dependency_caches (dep_t dep) gcc_assert (true_dependency_cache != NULL && output_dependency_cache != NULL - && anti_dependency_cache != NULL); + && anti_dependency_cache != NULL + && control_dependency_cache != NULL); if (!(current_sched_info->flags & USE_DEPS_LIST)) { @@ -888,6 +963,8 @@ ask_dependency_caches (dep_t dep) present_dep_type = REG_DEP_OUTPUT; else if (bitmap_bit_p (&anti_dependency_cache[insn_luid], elem_luid)) present_dep_type = REG_DEP_ANTI; + else if (bitmap_bit_p (&control_dependency_cache[insn_luid], elem_luid)) + present_dep_type = REG_DEP_CONTROL; else /* There is no existing dep so it should be created. */ return DEP_CREATED; @@ -906,6 +983,8 @@ ask_dependency_caches (dep_t dep) present_dep_types |= DEP_OUTPUT; if (bitmap_bit_p (&anti_dependency_cache[insn_luid], elem_luid)) present_dep_types |= DEP_ANTI; + if (bitmap_bit_p (&control_dependency_cache[insn_luid], elem_luid)) + present_dep_types |= DEP_CONTROL; if (present_dep_types == 0) /* There is no existing dep so it should be created. */ @@ -959,6 +1038,10 @@ set_dependency_caches (dep_t dep) bitmap_set_bit (&anti_dependency_cache[insn_luid], elem_luid); break; + case REG_DEP_CONTROL: + bitmap_set_bit (&control_dependency_cache[insn_luid], elem_luid); + break; + default: gcc_unreachable (); } @@ -973,6 +1056,8 @@ set_dependency_caches (dep_t dep) bitmap_set_bit (&output_dependency_cache[insn_luid], elem_luid); if (ds & DEP_ANTI) bitmap_set_bit (&anti_dependency_cache[insn_luid], elem_luid); + if (ds & DEP_CONTROL) + bitmap_set_bit (&control_dependency_cache[insn_luid], elem_luid); if (ds & SPECULATIVE) { @@ -1004,6 +1089,10 @@ update_dependency_caches (dep_t dep, enum reg_note old_type) bitmap_clear_bit (&anti_dependency_cache[insn_luid], elem_luid); break; + case REG_DEP_CONTROL: + bitmap_clear_bit (&control_dependency_cache[insn_luid], elem_luid); + break; + default: gcc_unreachable (); } @@ -1044,6 +1133,7 @@ update_dep (dep_t dep, dep_t new_dep, { enum DEPS_ADJUST_RESULT res = DEP_PRESENT; enum reg_note old_type = DEP_TYPE (dep); + bool was_spec = dep_spec_p (dep); /* If this is a more restrictive type of dependence than the existing one, then change the existing dependence to this @@ -1062,20 +1152,13 @@ update_dep (dep_t dep, dep_t new_dep, ds_t new_status = ds | dep_status; if (new_status & SPECULATIVE) - /* Either existing dep or a dep we're adding or both are - speculative. */ { + /* Either existing dep or a dep we're adding or both are + speculative. */ if (!(ds & SPECULATIVE) || !(dep_status & SPECULATIVE)) /* The new dep can't be speculative. */ - { - new_status &= ~SPECULATIVE; - - if (dep_status & SPECULATIVE) - /* The old dep was speculative, but now it - isn't. */ - change_spec_dep_to_hard (sd_it); - } + new_status &= ~SPECULATIVE; else { /* Both are speculative. Merge probabilities. */ @@ -1100,6 +1183,10 @@ update_dep (dep_t dep, dep_t new_dep, } } + if (was_spec && !dep_spec_p (dep)) + /* The old dep was speculative, but now it isn't. */ + change_spec_dep_to_hard (sd_it); + if (true_dependency_cache != NULL && res == DEP_CHANGED) update_dependency_caches (dep, old_type); @@ -1200,8 +1287,7 @@ get_back_and_forw_lists (dep_t dep, bool resolved_p, if (!resolved_p) { - if ((current_sched_info->flags & DO_SPECULATION) - && (DEP_STATUS (dep) & SPECULATIVE)) + if (dep_spec_p (dep)) *back_list_ptr = INSN_SPEC_BACK_DEPS (con); else *back_list_ptr = INSN_HARD_BACK_DEPS (con); @@ -1228,8 +1314,8 @@ sd_add_dep (dep_t dep, bool resolved_p) gcc_assert (INSN_P (insn) && INSN_P (elem) && insn != elem); - if ((current_sched_info->flags & DO_SPECULATION) - && !sched_insn_is_legitimate_for_speculation_p (insn, DEP_STATUS (dep))) + if ((current_sched_info->flags & DO_SPECULATION) == 0 + || !sched_insn_is_legitimate_for_speculation_p (insn, DEP_STATUS (dep))) DEP_STATUS (dep) &= ~SPECULATIVE; copy_dep (DEP_NODE_DEP (n), dep); @@ -1269,8 +1355,7 @@ sd_resolve_dep (sd_iterator_def sd_it) rtx pro = DEP_PRO (dep); rtx con = DEP_CON (dep); - if ((current_sched_info->flags & DO_SPECULATION) - && (DEP_STATUS (dep) & SPECULATIVE)) + if (dep_spec_p (dep)) move_dep_link (DEP_NODE_BACK (node), INSN_SPEC_BACK_DEPS (con), INSN_RESOLVED_BACK_DEPS (con)); else @@ -1281,6 +1366,27 @@ sd_resolve_dep (sd_iterator_def sd_it) INSN_RESOLVED_FORW_DEPS (pro)); } +/* Perform the inverse operation of sd_resolve_dep. Restore the dependence + pointed to by SD_IT to unresolved state. */ +void +sd_unresolve_dep (sd_iterator_def sd_it) +{ + dep_node_t node = DEP_LINK_NODE (*sd_it.linkp); + dep_t dep = DEP_NODE_DEP (node); + rtx pro = DEP_PRO (dep); + rtx con = DEP_CON (dep); + + if (dep_spec_p (dep)) + move_dep_link (DEP_NODE_BACK (node), INSN_RESOLVED_BACK_DEPS (con), + INSN_SPEC_BACK_DEPS (con)); + else + move_dep_link (DEP_NODE_BACK (node), INSN_RESOLVED_BACK_DEPS (con), + INSN_HARD_BACK_DEPS (con)); + + move_dep_link (DEP_NODE_FORW (node), INSN_RESOLVED_FORW_DEPS (pro), + INSN_FORW_DEPS (pro)); +} + /* Make TO depend on all the FROM's producers. If RESOLVED_P is true add dependencies to the resolved lists. */ void @@ -1321,6 +1427,7 @@ sd_delete_dep (sd_iterator_def sd_it) bitmap_clear_bit (&true_dependency_cache[insn_luid], elem_luid); bitmap_clear_bit (&anti_dependency_cache[insn_luid], elem_luid); + bitmap_clear_bit (&control_dependency_cache[insn_luid], elem_luid); bitmap_clear_bit (&output_dependency_cache[insn_luid], elem_luid); if (current_sched_info->flags & DO_SPECULATION) @@ -1386,6 +1493,57 @@ sd_debug_lists (rtx insn, sd_list_types_def types) fprintf (stderr, "\n"); } +/* A wrapper around add_dependence_1, to add a dependence of CON on + PRO, with type DEP_TYPE. This function implements special handling + for REG_DEP_CONTROL dependencies. For these, we optionally promote + the type to REG_DEP_ANTI if we can determine that predication is + impossible; otherwise we add additional true dependencies on the + INSN_COND_DEPS list of the jump (which PRO must be). */ +void +add_dependence (rtx con, rtx pro, enum reg_note dep_type) +{ + if (dep_type == REG_DEP_CONTROL + && !(current_sched_info->flags & DO_PREDICATION)) + dep_type = REG_DEP_ANTI; + + /* A REG_DEP_CONTROL dependence may be eliminated through predication, + so we must also make the insn dependent on the setter of the + condition. */ + if (dep_type == REG_DEP_CONTROL) + { + rtx real_pro = pro; + rtx other = real_insn_for_shadow (real_pro); + rtx cond; + + if (other != NULL_RTX) + real_pro = other; + cond = sched_get_reverse_condition_uncached (real_pro); + /* Verify that the insn does not use a different value in + the condition register than the one that was present at + the jump. */ + if (cond == NULL_RTX) + dep_type = REG_DEP_ANTI; + else if (INSN_CACHED_COND (real_pro) == const_true_rtx) + { + HARD_REG_SET uses; + CLEAR_HARD_REG_SET (uses); + note_uses (&PATTERN (con), record_hard_reg_uses, &uses); + if (TEST_HARD_REG_BIT (uses, REGNO (XEXP (cond, 0)))) + dep_type = REG_DEP_ANTI; + } + if (dep_type == REG_DEP_CONTROL) + { + if (sched_verbose >= 5) + fprintf (sched_dump, "making DEP_CONTROL for %d\n", + INSN_UID (real_pro)); + add_dependence_list (con, INSN_COND_DEPS (real_pro), 0, + REG_DEP_TRUE); + } + } + + add_dependence_1 (con, pro, dep_type); +} + /* A convenience wrapper to operate on an entire list. */ static void @@ -1407,7 +1565,10 @@ add_dependence_list_and_free (struct deps_desc *deps, rtx insn, rtx *listp, { rtx list, next; - if (deps->readonly) + /* We don't want to short-circuit dependencies involving debug + insns, because they may cause actual dependencies to be + disregarded. */ + if (deps->readonly || DEBUG_INSN_P (insn)) { add_dependence_list (insn, *listp, uncond, dep_type); return; @@ -1517,9 +1678,7 @@ fixup_sched_groups (rtx insn) delete_all_dependences (insn); - prev_nonnote = prev_nonnote_insn (insn); - while (DEBUG_INSN_P (prev_nonnote)) - prev_nonnote = prev_nonnote_insn (prev_nonnote); + prev_nonnote = prev_nonnote_nondebug_insn (insn); if (BLOCK_FOR_INSN (insn) == BLOCK_FOR_INSN (prev_nonnote) && ! sched_insns_conditions_mutex_p (insn, prev_nonnote)) add_dependence (insn, prev_nonnote, REG_DEP_ANTI); @@ -1569,7 +1728,8 @@ add_insn_mem_dependence (struct deps_desc *deps, bool read_p, if (sched_deps_info->use_cselib) { mem = shallow_copy_rtx (mem); - XEXP (mem, 0) = cselib_subst_to_values (XEXP (mem, 0)); + XEXP (mem, 0) = cselib_subst_to_values_from_insn (XEXP (mem, 0), + GET_MODE (mem), insn); } link = alloc_EXPR_LIST (VOIDmode, canon_rtx (mem), *mem_list); *mem_list = link; @@ -1600,6 +1760,10 @@ flush_pending_lists (struct deps_desc *deps, rtx insn, int for_read, add_dependence_list_and_free (deps, insn, &deps->last_pending_memory_flush, 1, for_read ? REG_DEP_ANTI : REG_DEP_OUTPUT); + + add_dependence_list_and_free (deps, insn, &deps->pending_jump_insns, 1, + REG_DEP_ANTI); + if (!deps->readonly) { free_EXPR_LIST_list (&deps->pending_write_mems); @@ -1662,7 +1826,7 @@ haifa_note_mem_dep (rtx mem, rtx pending_mem, rtx pending_insn, ds_t ds) dep_def _dep, *dep = &_dep; init_dep_1 (dep, pending_insn, cur_insn, ds_to_dt (ds), - current_sched_info->flags & USE_DEPS_LIST ? ds : -1); + current_sched_info->flags & USE_DEPS_LIST ? ds : 0); maybe_add_or_update_dep_1 (dep, false, pending_mem, mem); } @@ -1721,10 +1885,12 @@ ds_to_dt (ds_t ds) return REG_DEP_TRUE; else if (ds & DEP_OUTPUT) return REG_DEP_OUTPUT; + else if (ds & DEP_ANTI) + return REG_DEP_ANTI; else { - gcc_assert (ds & DEP_ANTI); - return REG_DEP_ANTI; + gcc_assert (ds & DEP_CONTROL); + return REG_DEP_CONTROL; } } @@ -1824,10 +1990,10 @@ mark_insn_pseudo_birth (rtx insn, int regno, bool clobber_p, bool unused_p) enum reg_class cl; gcc_assert (regno >= FIRST_PSEUDO_REGISTER); - cl = sched_regno_cover_class[regno]; + cl = sched_regno_pressure_class[regno]; if (cl != NO_REGS) { - incr = ira_reg_class_nregs[cl][PSEUDO_REGNO_MODE (regno)]; + incr = ira_reg_class_max_nregs[cl][PSEUDO_REGNO_MODE (regno)]; if (clobber_p) { new_incr = reg_pressure_info[cl].clobber_increase + incr; @@ -1864,7 +2030,7 @@ mark_insn_hard_regno_birth (rtx insn, int regno, int nregs, gcc_assert (regno < FIRST_PSEUDO_REGISTER); if (! TEST_HARD_REG_BIT (ira_no_alloc_regs, regno)) { - cl = sched_regno_cover_class[regno]; + cl = sched_regno_pressure_class[regno]; if (cl != NO_REGS) { if (clobber_p) @@ -1925,10 +2091,10 @@ mark_pseudo_death (int regno) enum reg_class cl; gcc_assert (regno >= FIRST_PSEUDO_REGISTER); - cl = sched_regno_cover_class[regno]; + cl = sched_regno_pressure_class[regno]; if (cl != NO_REGS) { - incr = ira_reg_class_nregs[cl][PSEUDO_REGNO_MODE (regno)]; + incr = ira_reg_class_max_nregs[cl][PSEUDO_REGNO_MODE (regno)]; reg_pressure_info[cl].change -= incr; } } @@ -1946,7 +2112,7 @@ mark_hard_regno_death (int regno, int nregs) gcc_assert (regno < FIRST_PSEUDO_REGISTER); if (! TEST_HARD_REG_BIT (ira_no_alloc_regs, regno)) { - cl = sched_regno_cover_class[regno]; + cl = sched_regno_pressure_class[regno]; if (cl != NO_REGS) reg_pressure_info[cl].change -= 1; } @@ -1994,8 +2160,8 @@ mark_insn_reg_clobber (rtx reg, const_rtx setter, void *data) } /* Set up reg pressure info related to INSN. */ -static void -setup_insn_reg_pressure_info (rtx insn) +void +init_insn_reg_pressure_info (rtx insn) { int i, len; enum reg_class cl; @@ -2007,9 +2173,9 @@ setup_insn_reg_pressure_info (rtx insn) if (! INSN_P (insn)) return; - for (i = 0; i < ira_reg_class_cover_size; i++) + for (i = 0; i < ira_pressure_classes_num; i++) { - cl = ira_reg_class_cover[i]; + cl = ira_pressure_classes[i]; reg_pressure_info[cl].clobber_increase = 0; reg_pressure_info[cl].set_increase = 0; reg_pressure_info[cl].unused_set_increase = 0; @@ -2030,14 +2196,14 @@ setup_insn_reg_pressure_info (rtx insn) if (REG_NOTE_KIND (link) == REG_DEAD) mark_reg_death (XEXP (link, 0)); - len = sizeof (struct reg_pressure_data) * ira_reg_class_cover_size; + len = sizeof (struct reg_pressure_data) * ira_pressure_classes_num; pressure_info = INSN_REG_PRESSURE (insn) = (struct reg_pressure_data *) xmalloc (len); - INSN_MAX_REG_PRESSURE (insn) = (int *) xcalloc (ira_reg_class_cover_size + INSN_MAX_REG_PRESSURE (insn) = (int *) xcalloc (ira_pressure_classes_num * sizeof (int), 1); - for (i = 0; i < ira_reg_class_cover_size; i++) + for (i = 0; i < ira_pressure_classes_num; i++) { - cl = ira_reg_class_cover[i]; + cl = ira_pressure_classes[i]; pressure_info[i].clobber_increase = reg_pressure_info[cl].clobber_increase; pressure_info[i].set_increase = reg_pressure_info[cl].set_increase; @@ -2262,16 +2428,12 @@ sched_analyze_1 (struct deps_desc *deps, rtx x, rtx insn) /* Treat all writes to a stack register as modifying the TOS. */ if (regno >= FIRST_STACK_REG && regno <= LAST_STACK_REG) { - int nregs; - /* Avoid analyzing the same register twice. */ if (regno != FIRST_STACK_REG) sched_analyze_reg (deps, FIRST_STACK_REG, mode, code, insn); - nregs = hard_regno_nregs[FIRST_STACK_REG][mode]; - while (--nregs >= 0) - SET_HARD_REG_BIT (implicit_reg_pending_uses, - FIRST_STACK_REG + nregs); + add_to_hard_reg_set (&implicit_reg_pending_uses, mode, + FIRST_STACK_REG); } #endif } @@ -2286,8 +2448,11 @@ sched_analyze_1 (struct deps_desc *deps, rtx x, rtx insn) = targetm.addr_space.address_mode (MEM_ADDR_SPACE (dest)); t = shallow_copy_rtx (dest); - cselib_lookup_from_insn (XEXP (t, 0), address_mode, 1, insn); - XEXP (t, 0) = cselib_subst_to_values (XEXP (t, 0)); + cselib_lookup_from_insn (XEXP (t, 0), address_mode, 1, + GET_MODE (t), insn); + XEXP (t, 0) + = cselib_subst_to_values_from_insn (XEXP (t, 0), GET_MODE (t), + insn); } t = canon_rtx (t); @@ -2335,6 +2500,8 @@ sched_analyze_1 (struct deps_desc *deps, rtx x, rtx insn) add_dependence_list (insn, deps->last_pending_memory_flush, 1, REG_DEP_ANTI); + add_dependence_list (insn, deps->pending_jump_insns, 1, + REG_DEP_CONTROL); if (!deps->readonly) add_insn_mem_dependence (deps, false, insn, dest); @@ -2443,8 +2610,11 @@ sched_analyze_2 (struct deps_desc *deps, rtx x, rtx insn) = targetm.addr_space.address_mode (MEM_ADDR_SPACE (t)); t = shallow_copy_rtx (t); - cselib_lookup_from_insn (XEXP (t, 0), address_mode, 1, insn); - XEXP (t, 0) = cselib_subst_to_values (XEXP (t, 0)); + cselib_lookup_from_insn (XEXP (t, 0), address_mode, 1, + GET_MODE (t), insn); + XEXP (t, 0) + = cselib_subst_to_values_from_insn (XEXP (t, 0), GET_MODE (t), + insn); } if (!DEBUG_INSN_P (insn)) @@ -2468,8 +2638,7 @@ sched_analyze_2 (struct deps_desc *deps, rtx x, rtx insn) pending_mem = deps->pending_write_mems; while (pending) { - if (true_dependence (XEXP (pending_mem, 0), VOIDmode, - t, rtx_varies_p) + if (true_dependence (XEXP (pending_mem, 0), VOIDmode, t) && ! sched_insns_conditions_mutex_p (insn, XEXP (pending, 0))) note_mem_dep (t, XEXP (pending_mem, 0), XEXP (pending, 0), @@ -2481,23 +2650,22 @@ sched_analyze_2 (struct deps_desc *deps, rtx x, rtx insn) } for (u = deps->last_pending_memory_flush; u; u = XEXP (u, 1)) - { - if (! JUMP_P (XEXP (u, 0))) - add_dependence (insn, XEXP (u, 0), REG_DEP_ANTI); - else if (deps_may_trap_p (x)) - { - if ((sched_deps_info->generate_spec_deps) - && sel_sched_p () && (spec_info->mask & BEGIN_CONTROL)) - { - ds_t ds = set_dep_weak (DEP_ANTI, BEGIN_CONTROL, - MAX_DEP_WEAK); - - note_dep (XEXP (u, 0), ds); - } - else - add_dependence (insn, XEXP (u, 0), REG_DEP_ANTI); - } - } + add_dependence (insn, XEXP (u, 0), REG_DEP_ANTI); + + for (u = deps->pending_jump_insns; u; u = XEXP (u, 1)) + if (deps_may_trap_p (x)) + { + if ((sched_deps_info->generate_spec_deps) + && sel_sched_p () && (spec_info->mask & BEGIN_CONTROL)) + { + ds_t ds = set_dep_weak (DEP_ANTI, BEGIN_CONTROL, + MAX_DEP_WEAK); + + note_dep (XEXP (u, 0), ds); + } + else + add_dependence (insn, XEXP (u, 0), REG_DEP_CONTROL); + } } /* Always add these dependencies to pending_reads, since @@ -2636,6 +2804,24 @@ sched_analyze_insn (struct deps_desc *deps, rtx x, rtx insn) add_dependence_list (insn, deps->last_function_call_may_noreturn, 1, REG_DEP_ANTI); + /* We must avoid creating a situation in which two successors of the + current block have different unwind info after scheduling. If at any + point the two paths re-join this leads to incorrect unwind info. */ + /* ??? There are certain situations involving a forced frame pointer in + which, with extra effort, we could fix up the unwind info at a later + CFG join. However, it seems better to notice these cases earlier + during prologue generation and avoid marking the frame pointer setup + as frame-related at all. */ + if (RTX_FRAME_RELATED_P (insn)) + { + /* Make sure prologue insn is scheduled before next jump. */ + deps->sched_before_next_jump + = alloc_INSN_LIST (insn, deps->sched_before_next_jump); + + /* Make sure epilogue insn is scheduled after preceding jumps. */ + add_dependence_list (insn, deps->pending_jump_insns, 1, REG_DEP_ANTI); + } + if (code == COND_EXEC) { sched_analyze_2 (deps, COND_EXEC_TEST (x), insn); @@ -2688,16 +2874,18 @@ sched_analyze_insn (struct deps_desc *deps, rtx x, rtx insn) else sched_analyze_2 (deps, XEXP (link, 0), insn); } - if (find_reg_note (insn, REG_SETJMP, NULL)) + /* Don't schedule anything after a tail call, tail call needs + to use at least all call-saved registers. */ + if (SIBLING_CALL_P (insn)) + reg_pending_barrier = TRUE_BARRIER; + else if (find_reg_note (insn, REG_SETJMP, NULL)) reg_pending_barrier = MOVE_BARRIER; } if (JUMP_P (insn)) { rtx next; - next = next_nonnote_insn (insn); - while (next && DEBUG_INSN_P (next)) - next = next_nonnote_insn (next); + next = next_nonnote_nondebug_insn (insn); if (next && BARRIER_P (next)) reg_pending_barrier = MOVE_BARRIER; else @@ -2706,14 +2894,11 @@ sched_analyze_insn (struct deps_desc *deps, rtx x, rtx insn) if (sched_deps_info->compute_jump_reg_dependencies) { - regset_head tmp_uses, tmp_sets; - INIT_REG_SET (&tmp_uses); - INIT_REG_SET (&tmp_sets); - (*sched_deps_info->compute_jump_reg_dependencies) - (insn, &deps->reg_conditional_sets, &tmp_uses, &tmp_sets); + (insn, reg_pending_control_uses); + /* Make latency of jump equal to 0 by using anti-dependence. */ - EXECUTE_IF_SET_IN_REG_SET (&tmp_uses, 0, i, rsi) + EXECUTE_IF_SET_IN_REG_SET (reg_pending_control_uses, 0, i, rsi) { struct deps_reg *reg_last = &deps->reg_last[i]; add_dependence_list (insn, reg_last->sets, 0, REG_DEP_ANTI); @@ -2721,17 +2906,7 @@ sched_analyze_insn (struct deps_desc *deps, rtx x, rtx insn) 0, REG_DEP_ANTI); add_dependence_list (insn, reg_last->clobbers, 0, REG_DEP_ANTI); - - if (!deps->readonly) - { - reg_last->uses_length++; - reg_last->uses = alloc_INSN_LIST (insn, reg_last->uses); - } } - IOR_REG_SET (reg_pending_sets, &tmp_sets); - - CLEAR_REG_SET (&tmp_uses); - CLEAR_REG_SET (&tmp_sets); } /* All memory writes and volatile reads must happen before the @@ -2761,6 +2936,8 @@ sched_analyze_insn (struct deps_desc *deps, rtx x, rtx insn) add_dependence_list (insn, deps->last_pending_memory_flush, 1, REG_DEP_ANTI); + add_dependence_list (insn, deps->pending_jump_insns, 1, + REG_DEP_ANTI); } } @@ -2777,7 +2954,7 @@ sched_analyze_insn (struct deps_desc *deps, rtx x, rtx insn) if (sched_pressure_p) { setup_insn_reg_uses (deps, insn); - setup_insn_reg_pressure_info (insn); + init_insn_reg_pressure_info (insn); } /* Add register dependencies for insn. */ @@ -2796,14 +2973,15 @@ sched_analyze_insn (struct deps_desc *deps, rtx x, rtx insn) REG_DEP_ANTI); for (u = deps->last_pending_memory_flush; u; u = XEXP (u, 1)) - if (! JUMP_P (XEXP (u, 0)) - || !sel_sched_p ()) + if (!sel_sched_p ()) add_dependence (insn, XEXP (u, 0), REG_DEP_ANTI); EXECUTE_IF_SET_IN_REG_SET (reg_pending_uses, 0, i, rsi) { struct deps_reg *reg_last = &deps->reg_last[i]; add_dependence_list (insn, reg_last->sets, 1, REG_DEP_ANTI); + /* There's no point in making REG_DEP_CONTROL dependencies for + debug insns. */ add_dependence_list (insn, reg_last->clobbers, 1, REG_DEP_ANTI); if (!deps->readonly) @@ -2825,6 +3003,8 @@ sched_analyze_insn (struct deps_desc *deps, rtx x, rtx insn) } else { + regset_head set_or_clobbered; + EXECUTE_IF_SET_IN_REG_SET (reg_pending_uses, 0, i, rsi) { struct deps_reg *reg_last = &deps->reg_last[i]; @@ -2855,6 +3035,25 @@ sched_analyze_insn (struct deps_desc *deps, rtx x, rtx insn) } } + if (targetm.sched.exposed_pipeline) + { + INIT_REG_SET (&set_or_clobbered); + bitmap_ior (&set_or_clobbered, reg_pending_clobbers, + reg_pending_sets); + EXECUTE_IF_SET_IN_REG_SET (&set_or_clobbered, 0, i, rsi) + { + struct deps_reg *reg_last = &deps->reg_last[i]; + rtx list; + for (list = reg_last->uses; list; list = XEXP (list, 1)) + { + rtx other = XEXP (list, 0); + if (INSN_CACHED_COND (other) != const_true_rtx + && refers_to_regno_p (i, i + 1, INSN_CACHED_COND (other), NULL)) + INSN_CACHED_COND (other) = const_true_rtx; + } + } + } + /* If the current insn is conditional, we can't free any of the lists. */ if (sched_has_condition_p (insn)) @@ -2866,6 +3065,8 @@ sched_analyze_insn (struct deps_desc *deps, rtx x, rtx insn) add_dependence_list (insn, reg_last->implicit_sets, 0, REG_DEP_ANTI); add_dependence_list (insn, reg_last->uses, 0, REG_DEP_ANTI); + add_dependence_list (insn, reg_last->control_uses, 0, + REG_DEP_CONTROL); if (!deps->readonly) { @@ -2882,12 +3083,11 @@ sched_analyze_insn (struct deps_desc *deps, rtx x, rtx insn) REG_DEP_ANTI); add_dependence_list (insn, reg_last->clobbers, 0, REG_DEP_OUTPUT); add_dependence_list (insn, reg_last->uses, 0, REG_DEP_ANTI); + add_dependence_list (insn, reg_last->control_uses, 0, + REG_DEP_CONTROL); if (!deps->readonly) - { - reg_last->sets = alloc_INSN_LIST (insn, reg_last->sets); - SET_REGNO_REG_SET (&deps->reg_conditional_sets, i); - } + reg_last->sets = alloc_INSN_LIST (insn, reg_last->sets); } } else @@ -2905,6 +3105,9 @@ sched_analyze_insn (struct deps_desc *deps, rtx x, rtx insn) REG_DEP_ANTI); add_dependence_list_and_free (deps, insn, ®_last->uses, 0, REG_DEP_ANTI); + add_dependence_list_and_free (deps, insn, + ®_last->control_uses, 0, + REG_DEP_ANTI); add_dependence_list_and_free (deps, insn, ®_last->clobbers, 0, REG_DEP_OUTPUT); @@ -2921,6 +3124,8 @@ sched_analyze_insn (struct deps_desc *deps, rtx x, rtx insn) add_dependence_list (insn, reg_last->implicit_sets, 0, REG_DEP_ANTI); add_dependence_list (insn, reg_last->uses, 0, REG_DEP_ANTI); + add_dependence_list (insn, reg_last->control_uses, 0, + REG_DEP_CONTROL); } if (!deps->readonly) @@ -2943,16 +3148,26 @@ sched_analyze_insn (struct deps_desc *deps, rtx x, rtx insn) REG_DEP_OUTPUT); add_dependence_list_and_free (deps, insn, ®_last->uses, 0, REG_DEP_ANTI); + add_dependence_list (insn, reg_last->control_uses, 0, + REG_DEP_CONTROL); if (!deps->readonly) { reg_last->sets = alloc_INSN_LIST (insn, reg_last->sets); reg_last->uses_length = 0; reg_last->clobbers_length = 0; - CLEAR_REGNO_REG_SET (&deps->reg_conditional_sets, i); } } } + if (!deps->readonly) + { + EXECUTE_IF_SET_IN_REG_SET (reg_pending_control_uses, 0, i, rsi) + { + struct deps_reg *reg_last = &deps->reg_last[i]; + reg_last->control_uses + = alloc_INSN_LIST (insn, reg_last->control_uses); + } + } } for (i = 0; i < FIRST_PSEUDO_REGISTER; i++) @@ -2962,6 +3177,7 @@ sched_analyze_insn (struct deps_desc *deps, rtx x, rtx insn) add_dependence_list (insn, reg_last->sets, 0, REG_DEP_ANTI); add_dependence_list (insn, reg_last->clobbers, 0, REG_DEP_ANTI); add_dependence_list (insn, reg_last->uses, 0, REG_DEP_ANTI); + add_dependence_list (insn, reg_last->control_uses, 0, REG_DEP_ANTI); if (!deps->readonly) reg_last->implicit_sets @@ -2985,6 +3201,7 @@ sched_analyze_insn (struct deps_desc *deps, rtx x, rtx insn) CLEAR_REG_SET (reg_pending_uses); CLEAR_REG_SET (reg_pending_clobbers); CLEAR_REG_SET (reg_pending_sets); + CLEAR_REG_SET (reg_pending_control_uses); CLEAR_HARD_REG_SET (implicit_reg_pending_clobbers); CLEAR_HARD_REG_SET (implicit_reg_pending_uses); @@ -3016,6 +3233,9 @@ sched_analyze_insn (struct deps_desc *deps, rtx x, rtx insn) struct deps_reg *reg_last = &deps->reg_last[i]; add_dependence_list_and_free (deps, insn, ®_last->uses, 0, REG_DEP_ANTI); + add_dependence_list_and_free (deps, insn, + ®_last->control_uses, 0, + REG_DEP_CONTROL); add_dependence_list_and_free (deps, insn, ®_last->sets, 0, reg_pending_barrier == TRUE_BARRIER ? REG_DEP_TRUE : REG_DEP_ANTI); @@ -3047,8 +3267,6 @@ sched_analyze_insn (struct deps_desc *deps, rtx x, rtx insn) && sel_insn_is_speculation_check (insn))) flush_pending_lists (deps, insn, true, true); - if (!deps->readonly) - CLEAR_REG_SET (&deps->reg_conditional_sets); reg_pending_barrier = NOT_A_BARRIER; } @@ -3229,12 +3447,41 @@ deps_analyze_insn (struct deps_desc *deps, rtx insn) if (sched_deps_info->start_insn) sched_deps_info->start_insn (insn); - if (NONJUMP_INSN_P (insn) || DEBUG_INSN_P (insn) || JUMP_P (insn)) + /* Record the condition for this insn. */ + if (NONDEBUG_INSN_P (insn)) + { + rtx t; + sched_get_condition_with_rev (insn, NULL); + t = INSN_CACHED_COND (insn); + INSN_COND_DEPS (insn) = NULL_RTX; + if (reload_completed + && (current_sched_info->flags & DO_PREDICATION) + && COMPARISON_P (t) + && REG_P (XEXP (t, 0)) + && CONSTANT_P (XEXP (t, 1))) + { + unsigned int regno; + int nregs; + t = XEXP (t, 0); + regno = REGNO (t); + nregs = hard_regno_nregs[regno][GET_MODE (t)]; + t = NULL_RTX; + while (nregs-- > 0) + { + struct deps_reg *reg_last = &deps->reg_last[regno + nregs]; + t = concat_INSN_LIST (reg_last->sets, t); + t = concat_INSN_LIST (reg_last->clobbers, t); + t = concat_INSN_LIST (reg_last->implicit_sets, t); + } + INSN_COND_DEPS (insn) = t; + } + } + + if (JUMP_P (insn)) { /* Make each JUMP_INSN (but not a speculative check) a scheduling barrier for memory references. */ if (!deps->readonly - && JUMP_P (insn) && !(sel_sched_p () && sel_insn_is_speculation_check (insn))) { @@ -3242,10 +3489,19 @@ deps_analyze_insn (struct deps_desc *deps, rtx insn) if (deps->pending_flush_length++ > MAX_PENDING_LIST_LENGTH) flush_pending_lists (deps, insn, true, true); else - deps->last_pending_memory_flush - = alloc_INSN_LIST (insn, deps->last_pending_memory_flush); + deps->pending_jump_insns + = alloc_INSN_LIST (insn, deps->pending_jump_insns); } + /* For each insn which shouldn't cross a jump, add a dependence. */ + add_dependence_list_and_free (deps, insn, + &deps->sched_before_next_jump, 1, + REG_DEP_ANTI); + + sched_analyze_insn (deps, PATTERN (insn), insn); + } + else if (NONJUMP_INSN_P (insn) || DEBUG_INSN_P (insn)) + { sched_analyze_insn (deps, PATTERN (insn), insn); } else if (CALL_P (insn)) @@ -3366,10 +3622,8 @@ deps_start_bb (struct deps_desc *deps, rtx head) hard registers correct. */ if (! reload_completed && !LABEL_P (head)) { - rtx insn = prev_nonnote_insn (head); + rtx insn = prev_nonnote_nondebug_insn (head); - while (insn && DEBUG_INSN_P (insn)) - insn = prev_nonnote_insn (insn); if (insn && CALL_P (insn)) deps->in_post_call_group_p = post_call_initial; } @@ -3444,18 +3698,23 @@ sched_free_deps (rtx head, rtx tail, bool resolved_p) rtx insn; rtx next_tail = NEXT_INSN (tail); + /* We make two passes since some insns may be scheduled before their + dependencies are resolved. */ for (insn = head; insn != next_tail; insn = NEXT_INSN (insn)) if (INSN_P (insn) && INSN_LUID (insn) > 0) { - /* Clear resolved back deps together with its dep_nodes. */ - delete_dep_nodes_in_back_deps (insn, resolved_p); - /* Clear forward deps and leave the dep_nodes to the corresponding back_deps list. */ if (resolved_p) clear_deps_list (INSN_RESOLVED_FORW_DEPS (insn)); else clear_deps_list (INSN_FORW_DEPS (insn)); + } + for (insn = head; insn != next_tail; insn = NEXT_INSN (insn)) + if (INSN_P (insn) && INSN_LUID (insn) > 0) + { + /* Clear resolved back deps together with its dep_nodes. */ + delete_dep_nodes_in_back_deps (insn, resolved_p); sd_finish_insn (insn); } @@ -3476,12 +3735,12 @@ init_deps (struct deps_desc *deps, bool lazy_reg_last) else deps->reg_last = XCNEWVEC (struct deps_reg, max_reg); INIT_REG_SET (&deps->reg_last_in_use); - INIT_REG_SET (&deps->reg_conditional_sets); deps->pending_read_insns = 0; deps->pending_read_mems = 0; deps->pending_write_insns = 0; deps->pending_write_mems = 0; + deps->pending_jump_insns = 0; deps->pending_read_list_length = 0; deps->pending_write_list_length = 0; deps->pending_flush_length = 0; @@ -3489,6 +3748,7 @@ init_deps (struct deps_desc *deps, bool lazy_reg_last) deps->last_function_call = 0; deps->last_function_call_may_noreturn = 0; deps->sched_before_next_call = 0; + deps->sched_before_next_jump = 0; deps->in_post_call_group_p = not_post_call; deps->last_debug_insn = 0; deps->last_reg_pending_barrier = NOT_A_BARRIER; @@ -3541,23 +3801,22 @@ free_deps (struct deps_desc *deps) free_INSN_LIST_list (®_last->sets); if (reg_last->implicit_sets) free_INSN_LIST_list (®_last->implicit_sets); + if (reg_last->control_uses) + free_INSN_LIST_list (®_last->control_uses); if (reg_last->clobbers) free_INSN_LIST_list (®_last->clobbers); } CLEAR_REG_SET (&deps->reg_last_in_use); - CLEAR_REG_SET (&deps->reg_conditional_sets); /* As we initialize reg_last lazily, it is possible that we didn't allocate it at all. */ - if (deps->reg_last) - free (deps->reg_last); + free (deps->reg_last); deps->reg_last = NULL; deps = NULL; } -/* Remove INSN from dependence contexts DEPS. Caution: reg_conditional_sets - is not handled. */ +/* Remove INSN from dependence contexts DEPS. */ void remove_from_deps (struct deps_desc *deps, rtx insn) { @@ -3572,6 +3831,9 @@ remove_from_deps (struct deps_desc *deps, rtx insn) removed = remove_from_both_dependence_lists (insn, &deps->pending_write_insns, &deps->pending_write_mems); deps->pending_write_list_length -= removed; + + removed = remove_from_dependence_list (insn, &deps->pending_jump_insns); + deps->pending_flush_length -= removed; removed = remove_from_dependence_list (insn, &deps->last_pending_memory_flush); deps->pending_flush_length -= removed; @@ -3666,6 +3928,8 @@ extend_dependency_caches (int n, bool create_p) output_dependency_cache, luid); anti_dependency_cache = XRESIZEVEC (bitmap_head, anti_dependency_cache, luid); + control_dependency_cache = XRESIZEVEC (bitmap_head, control_dependency_cache, + luid); if (current_sched_info->flags & DO_SPECULATION) spec_dependency_cache = XRESIZEVEC (bitmap_head, spec_dependency_cache, @@ -3676,6 +3940,7 @@ extend_dependency_caches (int n, bool create_p) bitmap_initialize (&true_dependency_cache[i], 0); bitmap_initialize (&output_dependency_cache[i], 0); bitmap_initialize (&anti_dependency_cache[i], 0); + bitmap_initialize (&control_dependency_cache[i], 0); if (current_sched_info->flags & DO_SPECULATION) bitmap_initialize (&spec_dependency_cache[i], 0); @@ -3705,6 +3970,7 @@ sched_deps_finish (void) bitmap_clear (&true_dependency_cache[i]); bitmap_clear (&output_dependency_cache[i]); bitmap_clear (&anti_dependency_cache[i]); + bitmap_clear (&control_dependency_cache[i]); if (sched_deps_info->generate_spec_deps) bitmap_clear (&spec_dependency_cache[i]); @@ -3715,6 +3981,8 @@ sched_deps_finish (void) output_dependency_cache = NULL; free (anti_dependency_cache); anti_dependency_cache = NULL; + free (control_dependency_cache); + control_dependency_cache = NULL; if (sched_deps_info->generate_spec_deps) { @@ -3736,6 +4004,7 @@ init_deps_global (void) reg_pending_sets = ALLOC_REG_SET (®_obstack); reg_pending_clobbers = ALLOC_REG_SET (®_obstack); reg_pending_uses = ALLOC_REG_SET (®_obstack); + reg_pending_control_uses = ALLOC_REG_SET (®_obstack); reg_pending_barrier = NOT_A_BARRIER; if (!sel_sched_p () || sched_emulate_haifa_p) @@ -3760,6 +4029,7 @@ finish_deps_global (void) FREE_REG_SET (reg_pending_sets); FREE_REG_SET (reg_pending_clobbers); FREE_REG_SET (reg_pending_uses); + FREE_REG_SET (reg_pending_control_uses); } /* Estimate the weakness of dependence between MEM1 and MEM2. */ @@ -3793,8 +4063,8 @@ estimate_dep_weak (rtx mem1, rtx mem2) /* Add or update backward dependence between INSN and ELEM with type DEP_TYPE. This function can handle same INSN and ELEM (INSN == ELEM). It is a convenience wrapper. */ -void -add_dependence (rtx insn, rtx elem, enum reg_note dep_type) +static void +add_dependence_1 (rtx insn, rtx elem, enum reg_note dep_type) { ds_t ds; bool internal; @@ -3803,6 +4073,8 @@ add_dependence (rtx insn, rtx elem, enum reg_note dep_type) ds = DEP_TRUE; else if (dep_type == REG_DEP_OUTPUT) ds = DEP_OUTPUT; + else if (dep_type == REG_DEP_CONTROL) + ds = DEP_CONTROL; else { gcc_assert (dep_type == REG_DEP_ANTI); @@ -4069,10 +4341,12 @@ dump_ds (FILE *f, ds_t s) if (s & DEP_TRUE) fprintf (f, "DEP_TRUE; "); - if (s & DEP_ANTI) - fprintf (f, "DEP_ANTI; "); if (s & DEP_OUTPUT) fprintf (f, "DEP_OUTPUT; "); + if (s & DEP_ANTI) + fprintf (f, "DEP_ANTI; "); + if (s & DEP_CONTROL) + fprintf (f, "DEP_CONTROL; "); fprintf (f, "}"); } @@ -4097,7 +4371,7 @@ check_dep (dep_t dep, bool relaxed_p) if (!(current_sched_info->flags & USE_DEPS_LIST)) { - gcc_assert (ds == -1); + gcc_assert (ds == 0); return; } @@ -4107,10 +4381,13 @@ check_dep (dep_t dep, bool relaxed_p) else if (dt == REG_DEP_OUTPUT) gcc_assert ((ds & DEP_OUTPUT) && !(ds & DEP_TRUE)); - else - gcc_assert ((dt == REG_DEP_ANTI) - && (ds & DEP_ANTI) + else if (dt == REG_DEP_ANTI) + gcc_assert ((ds & DEP_ANTI) && !(ds & (DEP_OUTPUT | DEP_TRUE))); + else + gcc_assert (dt == REG_DEP_CONTROL + && (ds & DEP_CONTROL) + && !(ds & (DEP_OUTPUT | DEP_ANTI | DEP_TRUE))); /* HARD_DEP can not appear in dep_status of a link. */ gcc_assert (!(ds & HARD_DEP));