{
if (DEBUG_INSN_P (DEP_CON (dep)))
dbgcount++;
- else
+ else if (!DEBUG_INSN_P (DEP_PRO (dep)))
nodbgcount++;
}
should have been removed from the ready list. */
gcc_assert (sd_lists_empty_p (insn, SD_LIST_BACK));
+ /* Reset debug insns invalidated by moving this insn. */
+ if (MAY_HAVE_DEBUG_INSNS && !DEBUG_INSN_P (insn))
+ for (sd_it = sd_iterator_start (insn, SD_LIST_BACK);
+ sd_iterator_cond (&sd_it, &dep);)
+ {
+ rtx dbg = DEP_PRO (dep);
+
+ gcc_assert (DEBUG_INSN_P (dbg));
+
+ if (sched_verbose >= 6)
+ fprintf (sched_dump, ";;\t\tresetting: debug insn %d\n",
+ INSN_UID (dbg));
+
+ /* ??? Rather than resetting the debug insn, we might be able
+ to emit a debug temp before the just-scheduled insn, but
+ this would involve checking that the expression at the
+ point of the debug insn is equivalent to the expression
+ before the just-scheduled insn. They might not be: the
+ expression in the debug insn may depend on other insns not
+ yet scheduled that set MEMs, REGs or even other debug
+ insns. It's not clear that attempting to preserve debug
+ information in these cases is worth the effort, given how
+ uncommon these resets are and the likelihood that the debug
+ temps introduced won't survive the schedule change. */
+ INSN_VAR_LOCATION_LOC (dbg) = gen_rtx_UNKNOWN_VAR_LOC ();
+ df_insn_rescan (dbg);
+
+ /* We delete rather than resolve these deps, otherwise we
+ crash in sched_free_deps(), because forward deps are
+ expected to be released before backward deps. */
+ sd_delete_dep (sd_it);
+ }
+
gcc_assert (QUEUE_INDEX (insn) == QUEUE_NOWHERE);
QUEUE_INDEX (insn) = QUEUE_SCHEDULED;
advancing the iterator. */
sd_resolve_dep (sd_it);
+ /* Don't bother trying to mark next as ready if insn is a debug
+ insn. If insn is the last hard dependency, it will have
+ already been discounted. */
+ if (DEBUG_INSN_P (insn) && !DEBUG_INSN_P (next))
+ continue;
+
if (!IS_SPECULATION_BRANCHY_CHECK_P (insn))
{
int effective_cost;
fprintf (stderr, "\n");
}
+/* Determine whether DEP is a dependency link of a non-debug insn on a
+ debug insn. */
+
+static inline bool
+depl_on_debug_p (dep_link_t dep)
+{
+ return (DEBUG_INSN_P (DEP_LINK_PRO (dep))
+ && !DEBUG_INSN_P (DEP_LINK_CON (dep)));
+}
+
/* Functions to operate with a single link from the dependencies lists -
dep_link_t. */
{
attach_dep_link (link, &DEPS_LIST_FIRST (l));
- ++DEPS_LIST_N_LINKS (l);
+ /* Don't count debug deps. */
+ if (!depl_on_debug_p (link))
+ ++DEPS_LIST_N_LINKS (l);
}
/* Detach dep_link L from the list. */
{
detach_dep_link (link);
- --DEPS_LIST_N_LINKS (list);
+ /* Don't count debug deps. */
+ if (!depl_on_debug_p (link))
+ --DEPS_LIST_N_LINKS (list);
}
/* Move link LINK from list FROM to list TO. */
}
/* Return true if INSN's lists defined by LIST_TYPES are all empty. */
+
bool
sd_lists_empty_p (const_rtx insn, sd_list_types_def list_types)
{
- return sd_lists_size (insn, list_types) == 0;
+ while (list_types != SD_LIST_NONE)
+ {
+ deps_list_t list;
+ bool resolved_p;
+
+ sd_next_list (insn, &list_types, &list, &resolved_p);
+ if (!deps_list_empty_p (list))
+ return false;
+ }
+
+ return true;
}
/* Initialize data for INSN. */
rtx insn = DEP_CON (dep);
gcc_assert (INSN_P (insn) && INSN_P (elem) && insn != elem);
- gcc_assert (!DEBUG_INSN_P (elem) || DEBUG_INSN_P (insn));
if ((current_sched_info->flags & DO_SPECULATION)
&& !sched_insn_is_legitimate_for_speculation_p (insn, DEP_STATUS (dep)))
{
insn_list = &deps->pending_read_insns;
mem_list = &deps->pending_read_mems;
- deps->pending_read_list_length++;
+ if (!DEBUG_INSN_P (insn))
+ deps->pending_read_list_length++;
}
else
{
rtx pending, pending_mem;
rtx t = x;
- if (DEBUG_INSN_P (insn))
- {
- sched_analyze_2 (deps, XEXP (x, 0), insn);
- return;
- }
-
if (sched_deps_info->use_cselib)
{
t = shallow_copy_rtx (t);
cselib_lookup (XEXP (t, 0), Pmode, 1);
XEXP (t, 0) = cselib_subst_to_values (XEXP (t, 0));
}
- t = canon_rtx (t);
- pending = deps->pending_read_insns;
- pending_mem = deps->pending_read_mems;
- while (pending)
- {
- if (read_dependence (XEXP (pending_mem, 0), t)
- && ! sched_insns_conditions_mutex_p (insn, XEXP (pending, 0)))
- note_mem_dep (t, XEXP (pending_mem, 0), XEXP (pending, 0),
- DEP_ANTI);
-
- pending = XEXP (pending, 1);
- pending_mem = XEXP (pending_mem, 1);
- }
- pending = deps->pending_write_insns;
- pending_mem = deps->pending_write_mems;
- while (pending)
+ if (!DEBUG_INSN_P (insn))
{
- if (true_dependence (XEXP (pending_mem, 0), VOIDmode,
- t, rtx_varies_p)
- && ! sched_insns_conditions_mutex_p (insn, XEXP (pending, 0)))
- note_mem_dep (t, XEXP (pending_mem, 0), XEXP (pending, 0),
- sched_deps_info->generate_spec_deps
- ? BEGIN_DATA | DEP_TRUE : DEP_TRUE);
-
- pending = XEXP (pending, 1);
- pending_mem = XEXP (pending_mem, 1);
- }
+ t = canon_rtx (t);
+ pending = deps->pending_read_insns;
+ pending_mem = deps->pending_read_mems;
+ while (pending)
+ {
+ if (read_dependence (XEXP (pending_mem, 0), t)
+ && ! sched_insns_conditions_mutex_p (insn,
+ XEXP (pending, 0)))
+ note_mem_dep (t, XEXP (pending_mem, 0), XEXP (pending, 0),
+ DEP_ANTI);
+
+ pending = XEXP (pending, 1);
+ pending_mem = XEXP (pending_mem, 1);
+ }
- for (u = deps->last_pending_memory_flush; u; u = XEXP (u, 1))
- {
- if (! JUMP_P (XEXP (u, 0)))
- add_dependence (insn, XEXP (u, 0), REG_DEP_ANTI);
- else if (deps_may_trap_p (x))
+ pending = deps->pending_write_insns;
+ pending_mem = deps->pending_write_mems;
+ while (pending)
{
- if ((sched_deps_info->generate_spec_deps)
- && sel_sched_p () && (spec_info->mask & BEGIN_CONTROL))
- {
- ds_t ds = set_dep_weak (DEP_ANTI, BEGIN_CONTROL,
- MAX_DEP_WEAK);
+ if (true_dependence (XEXP (pending_mem, 0), VOIDmode,
+ t, rtx_varies_p)
+ && ! sched_insns_conditions_mutex_p (insn,
+ XEXP (pending, 0)))
+ note_mem_dep (t, XEXP (pending_mem, 0), XEXP (pending, 0),
+ sched_deps_info->generate_spec_deps
+ ? BEGIN_DATA | DEP_TRUE : DEP_TRUE);
+
+ pending = XEXP (pending, 1);
+ pending_mem = XEXP (pending_mem, 1);
+ }
- note_dep (XEXP (u, 0), ds);
- }
- else
+ for (u = deps->last_pending_memory_flush; u; u = XEXP (u, 1))
+ {
+ if (! JUMP_P (XEXP (u, 0)))
add_dependence (insn, XEXP (u, 0), REG_DEP_ANTI);
+ else if (deps_may_trap_p (x))
+ {
+ if ((sched_deps_info->generate_spec_deps)
+ && sel_sched_p () && (spec_info->mask & BEGIN_CONTROL))
+ {
+ ds_t ds = set_dep_weak (DEP_ANTI, BEGIN_CONTROL,
+ MAX_DEP_WEAK);
+
+ note_dep (XEXP (u, 0), ds);
+ }
+ else
+ add_dependence (insn, XEXP (u, 0), REG_DEP_ANTI);
+ }
}
}
if (!deps->readonly)
add_insn_mem_dependence (deps, true, insn, x);
- /* Take advantage of tail recursion here. */
sched_analyze_2 (deps, XEXP (x, 0), insn);
if (cslr_p && sched_deps_info->finish_rhs)
struct deps_reg *reg_last = &deps->reg_last[i];
add_dependence_list (insn, reg_last->sets, 1, REG_DEP_ANTI);
add_dependence_list (insn, reg_last->clobbers, 1, REG_DEP_ANTI);
+
+ if (!deps->readonly)
+ reg_last->uses = alloc_INSN_LIST (insn, reg_last->uses);
}
CLEAR_REG_SET (reg_pending_uses);
removed = remove_from_both_dependence_lists (insn, &deps->pending_read_insns,
&deps->pending_read_mems);
- deps->pending_read_list_length -= removed;
+ if (!DEBUG_INSN_P (insn))
+ deps->pending_read_list_length -= removed;
removed = remove_from_both_dependence_lists (insn, &deps->pending_write_insns,
&deps->pending_write_mems);
deps->pending_write_list_length -= removed;