#include "params.h"
#include "cselib.h"
+#ifdef INSN_SCHEDULING
+
#ifdef ENABLE_CHECKING
#define CHECK (true)
#else
static enum DEPS_ADJUST_RESULT add_or_update_dep_1 (dep_t, bool, rtx, rtx);
static dw_t estimate_dep_weak (rtx, rtx);
-#ifdef INSN_SCHEDULING
#ifdef ENABLE_CHECKING
static void check_dep (dep_t, bool);
#endif
-#endif
\f
/* Return nonzero if a load of the memory reference MEM can cause a trap. */
/* Don't depend an insn on itself. */
if (insn == elem)
{
-#ifdef INSN_SCHEDULING
if (current_sched_info->flags & DO_SPECULATION)
/* INSN has an internal dependence, which we can't overcome. */
HAS_INTERNAL_DEP (insn) = 1;
-#endif
return DEP_NODEP;
}
return add_or_update_dep_1 (dep, resolved_p, mem1, mem2);
}
-#ifdef INSN_SCHEDULING
/* Ask dependency caches what needs to be done for dependence DEP.
Return DEP_CREATED if new dependence should be created and there is no
need to try to find one searching the dependencies lists.
bitmap_clear_bit (&spec_dependency_cache[INSN_LUID (insn)],
INSN_LUID (elem));
}
-#endif
/* Update DEP to incorporate information from NEW_DEP.
SD_IT points to DEP in case it should be moved to another list.
res = DEP_CHANGED;
}
-#ifdef INSN_SCHEDULING
if (current_sched_info->flags & USE_DEPS_LIST)
/* Update DEP_STATUS. */
{
if (true_dependency_cache != NULL
&& res == DEP_CHANGED)
update_dependency_caches (dep, old_type);
-#endif
return res;
}
gcc_assert (INSN_P (DEP_PRO (new_dep)) && INSN_P (DEP_CON (new_dep))
&& DEP_PRO (new_dep) != DEP_CON (new_dep));
-#ifdef INSN_SCHEDULING
-
#ifdef ENABLE_CHECKING
check_dep (new_dep, mem1 != NULL);
#endif
break;
}
}
-#endif
/* Check that we don't already have this dependence. */
if (maybe_present_p)
add_to_deps_list (DEP_NODE_BACK (n), con_back_deps);
-#ifdef INSN_SCHEDULING
#ifdef ENABLE_CHECKING
check_dep (dep, false);
#endif
in the bitmap caches of dependency information. */
if (true_dependency_cache != NULL)
set_dependency_caches (dep);
-#endif
}
/* Add or update backward dependence between INSN and ELEM
rtx next;
next = next_nonnote_insn (insn);
if (next && BARRIER_P (next))
- reg_pending_barrier = TRUE_BARRIER;
+ reg_pending_barrier = MOVE_BARRIER;
else
{
rtx pending, pending_mem;
|| (NONJUMP_INSN_P (insn) && control_flow_insn_p (insn)))
reg_pending_barrier = MOVE_BARRIER;
+ /* Add register dependencies for insn.
+ If the current insn is conditional, we can't free any of the lists. */
+ if (sched_get_condition (insn))
+ {
+ EXECUTE_IF_SET_IN_REG_SET (reg_pending_uses, 0, i, rsi)
+ {
+ struct deps_reg *reg_last = &deps->reg_last[i];
+ add_dependence_list (insn, reg_last->sets, 0, REG_DEP_TRUE);
+ add_dependence_list (insn, reg_last->clobbers, 0, REG_DEP_TRUE);
+ reg_last->uses = alloc_INSN_LIST (insn, reg_last->uses);
+ reg_last->uses_length++;
+ }
+ EXECUTE_IF_SET_IN_REG_SET (reg_pending_clobbers, 0, i, rsi)
+ {
+ struct deps_reg *reg_last = &deps->reg_last[i];
+ add_dependence_list (insn, reg_last->sets, 0, REG_DEP_OUTPUT);
+ add_dependence_list (insn, reg_last->uses, 0, REG_DEP_ANTI);
+ reg_last->clobbers = alloc_INSN_LIST (insn, reg_last->clobbers);
+ reg_last->clobbers_length++;
+ }
+ EXECUTE_IF_SET_IN_REG_SET (reg_pending_sets, 0, i, rsi)
+ {
+ struct deps_reg *reg_last = &deps->reg_last[i];
+ add_dependence_list (insn, reg_last->sets, 0, REG_DEP_OUTPUT);
+ add_dependence_list (insn, reg_last->clobbers, 0, REG_DEP_OUTPUT);
+ add_dependence_list (insn, reg_last->uses, 0, REG_DEP_ANTI);
+ reg_last->sets = alloc_INSN_LIST (insn, reg_last->sets);
+ SET_REGNO_REG_SET (&deps->reg_conditional_sets, i);
+ }
+ }
+ else
+ {
+ EXECUTE_IF_SET_IN_REG_SET (reg_pending_uses, 0, i, rsi)
+ {
+ struct deps_reg *reg_last = &deps->reg_last[i];
+ add_dependence_list (insn, reg_last->sets, 0, REG_DEP_TRUE);
+ add_dependence_list (insn, reg_last->clobbers, 0, REG_DEP_TRUE);
+ reg_last->uses_length++;
+ reg_last->uses = alloc_INSN_LIST (insn, reg_last->uses);
+ }
+ EXECUTE_IF_SET_IN_REG_SET (reg_pending_clobbers, 0, i, rsi)
+ {
+ struct deps_reg *reg_last = &deps->reg_last[i];
+ if (reg_last->uses_length > MAX_PENDING_LIST_LENGTH
+ || reg_last->clobbers_length > MAX_PENDING_LIST_LENGTH)
+ {
+ add_dependence_list_and_free (insn, ®_last->sets, 0,
+ REG_DEP_OUTPUT);
+ add_dependence_list_and_free (insn, ®_last->uses, 0,
+ REG_DEP_ANTI);
+ add_dependence_list_and_free (insn, ®_last->clobbers, 0,
+ REG_DEP_OUTPUT);
+ reg_last->sets = alloc_INSN_LIST (insn, reg_last->sets);
+ reg_last->clobbers_length = 0;
+ reg_last->uses_length = 0;
+ }
+ else
+ {
+ add_dependence_list (insn, reg_last->sets, 0, REG_DEP_OUTPUT);
+ add_dependence_list (insn, reg_last->uses, 0, REG_DEP_ANTI);
+ }
+ reg_last->clobbers_length++;
+ reg_last->clobbers = alloc_INSN_LIST (insn, reg_last->clobbers);
+ }
+ EXECUTE_IF_SET_IN_REG_SET (reg_pending_sets, 0, i, rsi)
+ {
+ struct deps_reg *reg_last = &deps->reg_last[i];
+ add_dependence_list_and_free (insn, ®_last->sets, 0,
+ REG_DEP_OUTPUT);
+ add_dependence_list_and_free (insn, ®_last->clobbers, 0,
+ REG_DEP_OUTPUT);
+ add_dependence_list_and_free (insn, ®_last->uses, 0,
+ REG_DEP_ANTI);
+ reg_last->sets = alloc_INSN_LIST (insn, reg_last->sets);
+ reg_last->uses_length = 0;
+ reg_last->clobbers_length = 0;
+ CLEAR_REGNO_REG_SET (&deps->reg_conditional_sets, i);
+ }
+ }
+
+ IOR_REG_SET (&deps->reg_last_in_use, reg_pending_uses);
+ IOR_REG_SET (&deps->reg_last_in_use, reg_pending_clobbers);
+ IOR_REG_SET (&deps->reg_last_in_use, reg_pending_sets);
+
+ CLEAR_REG_SET (reg_pending_uses);
+ CLEAR_REG_SET (reg_pending_clobbers);
+ CLEAR_REG_SET (reg_pending_sets);
+
/* Add dependencies if a scheduling barrier was found. */
if (reg_pending_barrier)
{
CLEAR_REG_SET (&deps->reg_conditional_sets);
reg_pending_barrier = NOT_A_BARRIER;
}
- else
- {
- /* If the current insn is conditional, we can't free any
- of the lists. */
- if (sched_get_condition (insn))
- {
- EXECUTE_IF_SET_IN_REG_SET (reg_pending_uses, 0, i, rsi)
- {
- struct deps_reg *reg_last = &deps->reg_last[i];
- add_dependence_list (insn, reg_last->sets, 0, REG_DEP_TRUE);
- add_dependence_list (insn, reg_last->clobbers, 0, REG_DEP_TRUE);
- reg_last->uses = alloc_INSN_LIST (insn, reg_last->uses);
- reg_last->uses_length++;
- }
- EXECUTE_IF_SET_IN_REG_SET (reg_pending_clobbers, 0, i, rsi)
- {
- struct deps_reg *reg_last = &deps->reg_last[i];
- add_dependence_list (insn, reg_last->sets, 0, REG_DEP_OUTPUT);
- add_dependence_list (insn, reg_last->uses, 0, REG_DEP_ANTI);
- reg_last->clobbers = alloc_INSN_LIST (insn, reg_last->clobbers);
- reg_last->clobbers_length++;
- }
- EXECUTE_IF_SET_IN_REG_SET (reg_pending_sets, 0, i, rsi)
- {
- struct deps_reg *reg_last = &deps->reg_last[i];
- add_dependence_list (insn, reg_last->sets, 0, REG_DEP_OUTPUT);
- add_dependence_list (insn, reg_last->clobbers, 0, REG_DEP_OUTPUT);
- add_dependence_list (insn, reg_last->uses, 0, REG_DEP_ANTI);
- reg_last->sets = alloc_INSN_LIST (insn, reg_last->sets);
- SET_REGNO_REG_SET (&deps->reg_conditional_sets, i);
- }
- }
- else
- {
- EXECUTE_IF_SET_IN_REG_SET (reg_pending_uses, 0, i, rsi)
- {
- struct deps_reg *reg_last = &deps->reg_last[i];
- add_dependence_list (insn, reg_last->sets, 0, REG_DEP_TRUE);
- add_dependence_list (insn, reg_last->clobbers, 0, REG_DEP_TRUE);
- reg_last->uses_length++;
- reg_last->uses = alloc_INSN_LIST (insn, reg_last->uses);
- }
- EXECUTE_IF_SET_IN_REG_SET (reg_pending_clobbers, 0, i, rsi)
- {
- struct deps_reg *reg_last = &deps->reg_last[i];
- if (reg_last->uses_length > MAX_PENDING_LIST_LENGTH
- || reg_last->clobbers_length > MAX_PENDING_LIST_LENGTH)
- {
- add_dependence_list_and_free (insn, ®_last->sets, 0,
- REG_DEP_OUTPUT);
- add_dependence_list_and_free (insn, ®_last->uses, 0,
- REG_DEP_ANTI);
- add_dependence_list_and_free (insn, ®_last->clobbers, 0,
- REG_DEP_OUTPUT);
- reg_last->sets = alloc_INSN_LIST (insn, reg_last->sets);
- reg_last->clobbers_length = 0;
- reg_last->uses_length = 0;
- }
- else
- {
- add_dependence_list (insn, reg_last->sets, 0, REG_DEP_OUTPUT);
- add_dependence_list (insn, reg_last->uses, 0, REG_DEP_ANTI);
- }
- reg_last->clobbers_length++;
- reg_last->clobbers = alloc_INSN_LIST (insn, reg_last->clobbers);
- }
- EXECUTE_IF_SET_IN_REG_SET (reg_pending_sets, 0, i, rsi)
- {
- struct deps_reg *reg_last = &deps->reg_last[i];
- add_dependence_list_and_free (insn, ®_last->sets, 0,
- REG_DEP_OUTPUT);
- add_dependence_list_and_free (insn, ®_last->clobbers, 0,
- REG_DEP_OUTPUT);
- add_dependence_list_and_free (insn, ®_last->uses, 0,
- REG_DEP_ANTI);
- reg_last->sets = alloc_INSN_LIST (insn, reg_last->sets);
- reg_last->uses_length = 0;
- reg_last->clobbers_length = 0;
- CLEAR_REGNO_REG_SET (&deps->reg_conditional_sets, i);
- }
- }
-
- IOR_REG_SET (&deps->reg_last_in_use, reg_pending_uses);
- IOR_REG_SET (&deps->reg_last_in_use, reg_pending_clobbers);
- IOR_REG_SET (&deps->reg_last_in_use, reg_pending_sets);
- }
- CLEAR_REG_SET (reg_pending_uses);
- CLEAR_REG_SET (reg_pending_clobbers);
- CLEAR_REG_SET (reg_pending_sets);
/* If we are currently in a libcall scheduling group, then mark the
current insn as being in a scheduling group and that it can not
if (SCHED_GROUP_P (insn))
fixup_sched_groups (insn);
-#ifdef INSN_SCHEDULING
if ((current_sched_info->flags & DO_SPECULATION)
&& !sched_insn_is_legitimate_for_speculation_p (insn, 0))
/* INSN has an internal dependency (e.g. r14 = [r14]) and thus cannot
sd_iterator_cond (&sd_it, &dep);)
change_spec_dep_to_hard (sd_it);
}
-#endif
}
/* Analyze every insn between HEAD and TAIL inclusive, creating backward
fprintf (stderr, "\n");
}
-#ifdef INSN_SCHEDULING
#ifdef ENABLE_CHECKING
/* Verify that dependence type and status are consistent.
If RELAXED_P is true, then skip dep_weakness checks. */
gcc_assert (ds & BEGIN_CONTROL);
}
}
-#endif
-#endif
+#endif /* ENABLE_CHECKING */
+
+#endif /* INSN_SCHEDULING */