/* Instruction scheduling pass. This file computes dependencies between
instructions.
Copyright (C) 1992, 1993, 1994, 1995, 1996, 1997, 1998,
- 1999, 2000, 2001, 2002 Free Software Foundation, Inc.
+ 1999, 2000, 2001, 2002, 2003, 2004, 2005, 2006
+ Free Software Foundation, Inc.
Contributed by Michael Tiemann (tiemann@cygnus.com) Enhanced by,
and currently maintained by, Jim Wilson (wilson@cygnus.com)
You should have received a copy of the GNU General Public License
along with GCC; see the file COPYING. If not, write to the Free
-Software Foundation, 59 Temple Place - Suite 330, Boston, MA
-02111-1307, USA. */
+Software Foundation, 51 Franklin Street, Fifth Floor, Boston, MA
+02110-1301, USA. */
\f
#include "config.h"
#include "system.h"
+#include "coretypes.h"
+#include "tm.h"
#include "toplev.h"
#include "rtl.h"
#include "tm_p.h"
#include "hard-reg-set.h"
-#include "basic-block.h"
#include "regs.h"
#include "function.h"
#include "flags.h"
#include "sched-int.h"
#include "params.h"
#include "cselib.h"
+#include "df.h"
-extern char *reg_known_equiv_p;
-extern rtx *reg_known_value;
+#ifdef ENABLE_CHECKING
+#define CHECK (true)
+#else
+#define CHECK (false)
+#endif
+
+/* Return the major type present in the DS. */
+enum reg_note
+ds_to_dk (ds_t ds)
+{
+ if (ds & DEP_TRUE)
+ return REG_DEP_TRUE;
+
+ if (ds & DEP_OUTPUT)
+ return REG_DEP_OUTPUT;
+
+ gcc_assert (ds & DEP_ANTI);
+
+ return REG_DEP_ANTI;
+}
+
+/* Return equivalent dep_status. */
+ds_t
+dk_to_ds (enum reg_note dk)
+{
+ switch (dk)
+ {
+ case REG_DEP_TRUE:
+ return DEP_TRUE;
+
+ case REG_DEP_OUTPUT:
+ return DEP_OUTPUT;
+
+ default:
+ gcc_assert (dk == REG_DEP_ANTI);
+ return DEP_ANTI;
+ }
+}
+
+/* Functions to operate with dependence information container - dep_t. */
+
+/* Init DEP with the arguments. */
+static void
+init_dep_1 (dep_t dep, rtx pro, rtx con, enum reg_note kind, ds_t ds)
+{
+ DEP_PRO (dep) = pro;
+ DEP_CON (dep) = con;
+ DEP_KIND (dep) = kind;
+ DEP_STATUS (dep) = ds;
+}
+
+/* Init DEP with the arguments.
+ While most of the scheduler (including targets) only need the major type
+ of the dependency, it is convenient to hide full dep_status from them. */
+void
+init_dep (dep_t dep, rtx pro, rtx con, enum reg_note kind)
+{
+ ds_t ds;
+
+ if ((current_sched_info->flags & USE_DEPS_LIST) != 0)
+ ds = dk_to_ds (kind);
+ else
+ ds = -1;
+
+ init_dep_1 (dep, pro, con, kind, ds);
+}
+
+/* Make a copy of FROM in TO. */
+static void
+copy_dep (dep_t to, dep_t from)
+{
+ memcpy (to, from, sizeof (*to));
+}
+
+/* Functions to operate with a single link from the dependencies lists -
+ dep_link_t. */
+
+/* Return true if dep_link L is consistent. */
+static bool
+dep_link_consistent_p (dep_link_t l)
+{
+ dep_link_t next = DEP_LINK_NEXT (l);
+
+ return (next == NULL
+ || &DEP_LINK_NEXT (l) == DEP_LINK_PREV_NEXTP (next));
+}
+
+/* Attach L to appear after link X whose &DEP_LINK_NEXT (X) is given by
+ PREV_NEXT_P. */
+static void
+attach_dep_link (dep_link_t l, dep_link_t *prev_nextp)
+{
+ dep_link_t next = *prev_nextp;
+
+ gcc_assert (DEP_LINK_PREV_NEXTP (l) == NULL
+ && DEP_LINK_NEXT (l) == NULL);
+
+ /* Init node being inserted. */
+ DEP_LINK_PREV_NEXTP (l) = prev_nextp;
+ DEP_LINK_NEXT (l) = next;
+
+ /* Fix next node. */
+ if (next != NULL)
+ {
+ gcc_assert (DEP_LINK_PREV_NEXTP (next) == prev_nextp);
+
+ DEP_LINK_PREV_NEXTP (next) = &DEP_LINK_NEXT (l);
+ }
+
+ /* Fix prev node. */
+ *prev_nextp = l;
+}
+
+/* Add dep_link LINK to deps_list L. */
+static void
+add_to_deps_list (dep_link_t link, deps_list_t l)
+{
+ attach_dep_link (link, &DEPS_LIST_FIRST (l));
+}
+
+/* Detach dep_link L from the list. */
+static void
+detach_dep_link (dep_link_t l)
+{
+ dep_link_t *prev_nextp = DEP_LINK_PREV_NEXTP (l);
+ dep_link_t next = DEP_LINK_NEXT (l);
+
+ *prev_nextp = next;
+
+ if (next != NULL)
+ DEP_LINK_PREV_NEXTP (next) = prev_nextp;
+
+ /* Though this is property is not used anywhere but in the assert in
+ attach_dep_link (), this can prevent latent errors. */
+ DEP_LINK_PREV_NEXTP (l) = NULL;
+ DEP_LINK_NEXT (l) = NULL;
+}
+
+/* Move LINK from whatever list it is now to L. */
+void
+move_dep_link (dep_link_t link, deps_list_t l)
+{
+ detach_dep_link (link);
+ add_to_deps_list (link, l);
+}
+
+/* Check L's and its successors' consistency.
+ This is, potentially, an expensive check, hence it should be guarded by
+ ENABLE_CHECKING at all times. */
+static bool
+dep_links_consistent_p (dep_link_t l)
+{
+ while (l != NULL)
+ {
+ if (dep_link_consistent_p (l))
+ l = DEP_LINK_NEXT (l);
+ else
+ return false;
+ }
+
+ return true;
+}
+
+/* Dump dep_nodes starting from l. */
+static void
+dump_dep_links (FILE *dump, dep_link_t l)
+{
+ while (l != NULL)
+ {
+ dep_t d = DEP_LINK_DEP (l);
+
+ fprintf (dump, "%d%c>%d ", INSN_UID (DEP_PRO (d)),
+ dep_link_consistent_p (l) ? '-' : '!', INSN_UID (DEP_CON (d)));
+
+ l = DEP_LINK_NEXT (l);
+ }
+
+ fprintf (dump, "\n");
+}
-static regset_head reg_pending_sets_head;
-static regset_head reg_pending_clobbers_head;
-static regset_head reg_pending_uses_head;
+/* Dump dep_nodes starting from L to stderr. */
+void
+debug_dep_links (dep_link_t l)
+{
+ dump_dep_links (stderr, l);
+}
+
+/* Obstack to allocate dep_nodes and deps_lists on. */
+static struct obstack deps_obstack;
+
+/* Obstack to hold forward dependencies lists (deps_list_t). */
+static struct obstack *dl_obstack = &deps_obstack;
+
+/* Obstack to hold all dependency nodes (dep_node_t). */
+static struct obstack *dn_obstack = &deps_obstack;
+
+/* Functions to operate with dependences lists - deps_list_t. */
+
+/* Allocate deps_list.
+
+ If ON_OBSTACK_P is true, allocate the list on the obstack. This is done for
+ INSN_FORW_DEPS lists because they should live till the end of scheduling.
+
+ INSN_BACK_DEPS and INSN_RESOLVED_BACK_DEPS lists are allocated on the free
+ store and are being freed in haifa-sched.c: schedule_insn (). */
+static deps_list_t
+alloc_deps_list (bool on_obstack_p)
+{
+ if (on_obstack_p)
+ return obstack_alloc (dl_obstack, sizeof (struct _deps_list));
+ else
+ return xmalloc (sizeof (struct _deps_list));
+}
+
+/* Initialize deps_list L. */
+static void
+init_deps_list (deps_list_t l)
+{
+ DEPS_LIST_FIRST (l) = NULL;
+}
+
+/* Create (allocate and init) deps_list.
+ The meaning of ON_OBSTACK_P is the same as in alloc_deps_list (). */
+deps_list_t
+create_deps_list (bool on_obstack_p)
+{
+ deps_list_t l = alloc_deps_list (on_obstack_p);
+
+ init_deps_list (l);
+ return l;
+}
+
+/* Free dep_data_nodes that present in L. */
+static void
+clear_deps_list (deps_list_t l)
+{
+ /* All dep_nodes are allocated on the dn_obstack. They'll be freed with
+ the obstack. */
+
+ DEPS_LIST_FIRST (l) = NULL;
+}
+
+/* Free deps_list L. */
+void
+free_deps_list (deps_list_t l)
+{
+ gcc_assert (deps_list_empty_p (l));
+ free (l);
+}
+
+/* Delete (clear and free) deps_list L. */
+void
+delete_deps_list (deps_list_t l)
+{
+ clear_deps_list (l);
+ free_deps_list (l);
+}
+
+/* Return true if L is empty. */
+bool
+deps_list_empty_p (deps_list_t l)
+{
+ return DEPS_LIST_FIRST (l) == NULL;
+}
+
+/* Check L's consistency.
+ This is, potentially, an expensive check, hence it should be guarded by
+ ENABLE_CHECKING at all times. */
+static bool
+deps_list_consistent_p (deps_list_t l)
+{
+ dep_link_t first = DEPS_LIST_FIRST (l);
+
+ return (first == NULL
+ || (&DEPS_LIST_FIRST (l) == DEP_LINK_PREV_NEXTP (first)
+ && dep_links_consistent_p (first)));
+}
+
+/* Dump L to F. */
+static void
+dump_deps_list (FILE *f, deps_list_t l)
+{
+ dump_dep_links (f, DEPS_LIST_FIRST (l));
+}
+
+/* Dump L to STDERR. */
+void
+debug_deps_list (deps_list_t l)
+{
+ dump_deps_list (stderr, l);
+}
+
+/* Add a dependency described by DEP to the list L.
+ L should be either INSN_BACK_DEPS or INSN_RESOLVED_BACK_DEPS. */
+void
+add_back_dep_to_deps_list (deps_list_t l, dep_t dep_from)
+{
+ dep_node_t n = (dep_node_t) obstack_alloc (dn_obstack,
+ sizeof (*n));
+ dep_t dep_to = DEP_NODE_DEP (n);
+ dep_link_t back = DEP_NODE_BACK (n);
+ dep_link_t forw = DEP_NODE_FORW (n);
+
+ copy_dep (dep_to, dep_from);
+
+ DEP_LINK_NODE (back) = n;
+ DEP_LINK_NODE (forw) = n;
+
+ /* There is no particular need to initialize these four fields except to make
+ assert in attach_dep_link () happy. */
+ DEP_LINK_NEXT (back) = NULL;
+ DEP_LINK_PREV_NEXTP (back) = NULL;
+ DEP_LINK_NEXT (forw) = NULL;
+ DEP_LINK_PREV_NEXTP (forw) = NULL;
+
+ add_to_deps_list (back, l);
+}
+
+/* Find the dep_link with producer PRO in deps_list L. */
+dep_link_t
+find_link_by_pro_in_deps_list (deps_list_t l, rtx pro)
+{
+ dep_link_t link;
+
+ FOR_EACH_DEP_LINK (link, l)
+ if (DEP_LINK_PRO (link) == pro)
+ return link;
+
+ return NULL;
+}
+
+/* Find the dep_link with consumer CON in deps_list L. */
+dep_link_t
+find_link_by_con_in_deps_list (deps_list_t l, rtx con)
+{
+ dep_link_t link;
+
+ FOR_EACH_DEP_LINK (link, l)
+ if (DEP_LINK_CON (link) == con)
+ return link;
+
+ return NULL;
+}
+
+/* Make a copy of FROM in TO with substituting consumer with CON.
+ TO and FROM should be RESOLVED_BACK_DEPS lists. */
+void
+copy_deps_list_change_con (deps_list_t to, deps_list_t from, rtx con)
+{
+ dep_link_t l;
+
+ gcc_assert (deps_list_empty_p (to));
+
+ FOR_EACH_DEP_LINK (l, from)
+ {
+ add_back_dep_to_deps_list (to, DEP_LINK_DEP (l));
+ DEP_LINK_CON (DEPS_LIST_FIRST (to)) = con;
+ }
+}
static regset reg_pending_sets;
static regset reg_pending_clobbers;
static regset reg_pending_uses;
-static bool reg_pending_barrier;
+
+/* The following enumeration values tell us what dependencies we
+ should use to implement the barrier. We use true-dependencies for
+ TRUE_BARRIER and anti-dependencies for MOVE_BARRIER. */
+enum reg_pending_barrier_mode
+{
+ NOT_A_BARRIER = 0,
+ MOVE_BARRIER,
+ TRUE_BARRIER
+};
+
+static enum reg_pending_barrier_mode reg_pending_barrier;
/* To speed up the test for duplicate dependency links we keep a
record of dependencies created by add_dependence when the average
has enough entries to represent a dependency on any other insn in
the insn chain. All bitmap for true dependencies cache is
allocated then the rest two ones are also allocated. */
-static sbitmap *true_dependency_cache;
-static sbitmap *anti_dependency_cache;
-static sbitmap *output_dependency_cache;
+static bitmap_head *true_dependency_cache;
+static bitmap_head *output_dependency_cache;
+static bitmap_head *anti_dependency_cache;
+static bitmap_head *spec_dependency_cache;
+static int cache_size;
/* To speed up checking consistency of formed forward insn
dependencies we use the following cache. Another possible solution
could be switching off checking duplication of insns in forward
dependencies. */
#ifdef ENABLE_CHECKING
-static sbitmap *forward_dependency_cache;
+static bitmap_head *forward_dependency_cache;
#endif
-static int deps_may_trap_p PARAMS ((rtx));
-static void add_dependence_list PARAMS ((rtx, rtx, enum reg_note));
-static void add_dependence_list_and_free PARAMS ((rtx, rtx *, enum reg_note));
-static void remove_dependence PARAMS ((rtx, rtx));
-static void set_sched_group_p PARAMS ((rtx));
-
-static void flush_pending_lists PARAMS ((struct deps *, rtx, int, int));
-static void sched_analyze_1 PARAMS ((struct deps *, rtx, rtx));
-static void sched_analyze_2 PARAMS ((struct deps *, rtx, rtx));
-static void sched_analyze_insn PARAMS ((struct deps *, rtx, rtx, rtx));
-static rtx group_leader PARAMS ((rtx));
-
-static rtx get_condition PARAMS ((rtx));
-static int conditions_mutex_p PARAMS ((rtx, rtx));
+static int deps_may_trap_p (rtx);
+static void add_dependence_list (rtx, rtx, int, enum reg_note);
+static void add_dependence_list_and_free (rtx, rtx *, int, enum reg_note);
+static void delete_all_dependences (rtx);
+static void fixup_sched_groups (rtx);
+
+static void flush_pending_lists (struct deps *, rtx, int, int);
+static void sched_analyze_1 (struct deps *, rtx, rtx);
+static void sched_analyze_2 (struct deps *, rtx, rtx);
+static void sched_analyze_insn (struct deps *, rtx, rtx);
+
+static rtx sched_get_condition (rtx);
+static int conditions_mutex_p (rtx, rtx);
+
+static enum DEPS_ADJUST_RESULT maybe_add_or_update_back_dep_1 (rtx, rtx,
+ enum reg_note, ds_t, rtx, rtx, dep_link_t **);
+static enum DEPS_ADJUST_RESULT add_or_update_back_dep_1 (rtx, rtx,
+ enum reg_note, ds_t, rtx, rtx, dep_link_t **);
+static void add_back_dep (rtx, rtx, enum reg_note, ds_t);
+
+static void adjust_add_sorted_back_dep (rtx, dep_link_t, dep_link_t *);
+static void adjust_back_add_forw_dep (rtx, dep_link_t *);
+static void delete_forw_dep (dep_link_t);
+static dw_t estimate_dep_weak (rtx, rtx);
+#ifdef INSN_SCHEDULING
+#ifdef ENABLE_CHECKING
+static void check_dep_status (enum reg_note, ds_t, bool);
+#endif
+#endif
\f
/* Return nonzero if a load of the memory reference MEM can cause a trap. */
static int
-deps_may_trap_p (mem)
- rtx mem;
+deps_may_trap_p (rtx mem)
{
rtx addr = XEXP (mem, 0);
- if (REG_P (addr)
- && REGNO (addr) >= FIRST_PSEUDO_REGISTER
- && reg_known_value[REGNO (addr)])
- addr = reg_known_value[REGNO (addr)];
- return rtx_addr_can_trap_p (addr);
-}
-\f
-/* Return the INSN_LIST containing INSN in LIST, or NULL
- if LIST does not contain INSN. */
-
-rtx
-find_insn_list (insn, list)
- rtx insn;
- rtx list;
-{
- while (list)
+ if (REG_P (addr) && REGNO (addr) >= FIRST_PSEUDO_REGISTER)
{
- if (XEXP (list, 0) == insn)
- return list;
- list = XEXP (list, 1);
+ rtx t = get_reg_known_value (REGNO (addr));
+ if (t)
+ addr = t;
}
- return 0;
+ return rtx_addr_can_trap_p (addr);
}
\f
/* Find the condition under which INSN is executed. */
static rtx
-get_condition (insn)
- rtx insn;
+sched_get_condition (rtx insn)
{
rtx pat = PATTERN (insn);
- rtx cond;
+ rtx src;
if (pat == 0)
return 0;
+
if (GET_CODE (pat) == COND_EXEC)
return COND_EXEC_TEST (pat);
- if (GET_CODE (insn) != JUMP_INSN)
- return 0;
- if (GET_CODE (pat) != SET || SET_SRC (pat) != pc_rtx)
- return 0;
- if (GET_CODE (SET_DEST (pat)) != IF_THEN_ELSE)
- return 0;
- pat = SET_DEST (pat);
- cond = XEXP (pat, 0);
- if (GET_CODE (XEXP (cond, 1)) == LABEL_REF
- && XEXP (cond, 2) == pc_rtx)
- return cond;
- else if (GET_CODE (XEXP (cond, 2)) == LABEL_REF
- && XEXP (cond, 1) == pc_rtx)
- return gen_rtx_fmt_ee (reverse_condition (GET_CODE (cond)), GET_MODE (cond),
- XEXP (cond, 0), XEXP (cond, 1));
- else
+
+ if (!any_condjump_p (insn) || !onlyjump_p (insn))
return 0;
+
+ src = SET_SRC (pc_set (insn));
+
+ if (XEXP (src, 2) == pc_rtx)
+ return XEXP (src, 0);
+ else if (XEXP (src, 1) == pc_rtx)
+ {
+ rtx cond = XEXP (src, 0);
+ enum rtx_code revcode = reversed_comparison_code (cond, insn);
+
+ if (revcode == UNKNOWN)
+ return 0;
+ return gen_rtx_fmt_ee (revcode, GET_MODE (cond), XEXP (cond, 0),
+ XEXP (cond, 1));
+ }
+
+ return 0;
}
+\f
/* Return nonzero if conditions COND1 and COND2 can never be both true. */
static int
-conditions_mutex_p (cond1, cond2)
- rtx cond1, cond2;
+conditions_mutex_p (rtx cond1, rtx cond2)
{
- if (GET_RTX_CLASS (GET_CODE (cond1)) == '<'
- && GET_RTX_CLASS (GET_CODE (cond2)) == '<'
- && GET_CODE (cond1) == reverse_condition (GET_CODE (cond2))
+ if (COMPARISON_P (cond1)
+ && COMPARISON_P (cond2)
+ && GET_CODE (cond1) == reversed_comparison_code (cond2, NULL)
&& XEXP (cond1, 0) == XEXP (cond2, 0)
&& XEXP (cond1, 1) == XEXP (cond2, 1))
return 1;
return 0;
}
-\f
-/* Add ELEM wrapped in an INSN_LIST with reg note kind DEP_TYPE to the
- LOG_LINKS of INSN, if not already there. DEP_TYPE indicates the type
- of dependence that this link represents. */
-void
-add_dependence (insn, elem, dep_type)
- rtx insn;
- rtx elem;
- enum reg_note dep_type;
+/* Return true if insn1 and insn2 can never depend on one another because
+ the conditions under which they are executed are mutually exclusive. */
+bool
+sched_insns_conditions_mutex_p (rtx insn1, rtx insn2)
{
- rtx link, next;
- int present_p;
rtx cond1, cond2;
- /* Don't depend an insn on itself. */
- if (insn == elem)
- return;
-
- /* We can get a dependency on deleted insns due to optimizations in
- the register allocation and reloading or due to splitting. Any
- such dependency is useless and can be ignored. */
- if (GET_CODE (elem) == NOTE)
- return;
-
/* flow.c doesn't handle conditional lifetimes entirely correctly;
calls mess up the conditional lifetimes. */
- /* ??? add_dependence is the wrong place to be eliding dependencies,
- as that forgets that the condition expressions themselves may
- be dependent. */
- if (GET_CODE (insn) != CALL_INSN && GET_CODE (elem) != CALL_INSN)
+ if (!CALL_P (insn1) && !CALL_P (insn2))
{
- cond1 = get_condition (insn);
- cond2 = get_condition (elem);
+ cond1 = sched_get_condition (insn1);
+ cond2 = sched_get_condition (insn2);
if (cond1 && cond2
&& conditions_mutex_p (cond1, cond2)
/* Make sure first instruction doesn't affect condition of second
instruction if switched. */
- && !modified_in_p (cond1, elem)
+ && !modified_in_p (cond1, insn2)
/* Make sure second instruction doesn't affect condition of first
instruction if switched. */
- && !modified_in_p (cond2, insn))
- return;
+ && !modified_in_p (cond2, insn1))
+ return true;
}
+ return false;
+}
+\f
+/* Add ELEM wrapped in an dep_link with reg note kind DEP_TYPE to the
+ INSN_BACK_DEPS (INSN), if it is not already there. DEP_TYPE indicates the
+ type of dependence that this link represents. DS, if nonzero,
+ indicates speculations, through which this dependence can be overcome.
+ MEM1 and MEM2, if non-null, corresponds to memory locations in case of
+ data speculation. The function returns a value indicating if an old entry
+ has been changed or a new entry has been added to insn's backward deps.
+ In case of changed entry CHANGED_LINKPP sets to its address.
+ See also the definition of enum DEPS_ADJUST_RESULT in sched-int.h.
+ Actual manipulation of dependence data structures is performed in
+ add_or_update_back_dep_1. */
+
+static enum DEPS_ADJUST_RESULT
+maybe_add_or_update_back_dep_1 (rtx insn, rtx elem, enum reg_note dep_type,
+ ds_t ds, rtx mem1, rtx mem2,
+ dep_link_t **changed_linkpp)
+{
+ gcc_assert (INSN_P (insn) && INSN_P (elem));
- /* If elem is part of a sequence that must be scheduled together, then
- make the dependence point to the last insn of the sequence.
- When HAVE_cc0, it is possible for NOTEs to exist between users and
- setters of the condition codes, so we must skip past notes here.
- Otherwise, NOTEs are impossible here. */
- next = next_nonnote_insn (elem);
- if (next && SCHED_GROUP_P (next)
- && GET_CODE (next) != CODE_LABEL)
+ /* Don't depend an insn on itself. */
+ if (insn == elem)
{
- /* Notes will never intervene here though, so don't bother checking
- for them. */
- /* Hah! Wrong. */
- /* We must reject CODE_LABELs, so that we don't get confused by one
- that has LABEL_PRESERVE_P set, which is represented by the same
- bit in the rtl as SCHED_GROUP_P. A CODE_LABEL can never be
- SCHED_GROUP_P. */
-
- rtx nnext;
- while ((nnext = next_nonnote_insn (next)) != NULL
- && SCHED_GROUP_P (nnext)
- && GET_CODE (nnext) != CODE_LABEL)
- next = nnext;
-
- /* Again, don't depend an insn on itself. */
- if (insn == next)
- return;
-
- /* Make the dependence to NEXT, the last insn of the group, instead
- of the original ELEM. */
- elem = next;
+#ifdef INSN_SCHEDULING
+ if (current_sched_info->flags & DO_SPECULATION)
+ /* INSN has an internal dependence, which we can't overcome. */
+ HAS_INTERNAL_DEP (insn) = 1;
+#endif
+ return 0;
}
- present_p = 1;
+ return add_or_update_back_dep_1 (insn, elem, dep_type,
+ ds, mem1, mem2, changed_linkpp);
+}
+
+/* This function has the same meaning of parameters and return values
+ as maybe_add_or_update_back_dep_1. The only difference between these
+ two functions is that INSN and ELEM are guaranteed not to be the same
+ in this one. */
+static enum DEPS_ADJUST_RESULT
+add_or_update_back_dep_1 (rtx insn, rtx elem, enum reg_note dep_type,
+ ds_t ds ATTRIBUTE_UNUSED,
+ rtx mem1 ATTRIBUTE_UNUSED, rtx mem2 ATTRIBUTE_UNUSED,
+ dep_link_t **changed_linkpp ATTRIBUTE_UNUSED)
+{
+ bool maybe_present_p = true, present_p = false;
+
+ gcc_assert (INSN_P (insn) && INSN_P (elem) && insn != elem);
+
#ifdef INSN_SCHEDULING
- /* ??? No good way to tell from here whether we're doing interblock
- scheduling. Possibly add another callback. */
-#if 0
- /* (This code is guarded by INSN_SCHEDULING, otherwise INSN_BB is undefined.)
- No need for interblock dependences with calls, since
- calls are not moved between blocks. Note: the edge where
- elem is a CALL is still required. */
- if (GET_CODE (insn) == CALL_INSN
- && (INSN_BB (elem) != INSN_BB (insn)))
- return;
+
+#ifdef ENABLE_CHECKING
+ check_dep_status (dep_type, ds, mem1 != NULL);
#endif
/* If we already have a dependency for ELEM, then we do not need to
dramatically for some code. */
if (true_dependency_cache != NULL)
{
- enum reg_note present_dep_type = 0;
-
- if (anti_dependency_cache == NULL || output_dependency_cache == NULL)
- abort ();
- if (TEST_BIT (true_dependency_cache[INSN_LUID (insn)], INSN_LUID (elem)))
- /* Do nothing (present_set_type is already 0). */
- ;
- else if (TEST_BIT (anti_dependency_cache[INSN_LUID (insn)],
- INSN_LUID (elem)))
- present_dep_type = REG_DEP_ANTI;
- else if (TEST_BIT (output_dependency_cache[INSN_LUID (insn)],
- INSN_LUID (elem)))
- present_dep_type = REG_DEP_OUTPUT;
+ enum reg_note present_dep_type;
+
+ gcc_assert (output_dependency_cache);
+ gcc_assert (anti_dependency_cache);
+ if (!(current_sched_info->flags & USE_DEPS_LIST))
+ {
+ if (bitmap_bit_p (&true_dependency_cache[INSN_LUID (insn)],
+ INSN_LUID (elem)))
+ present_dep_type = REG_DEP_TRUE;
+ else if (bitmap_bit_p (&output_dependency_cache[INSN_LUID (insn)],
+ INSN_LUID (elem)))
+ present_dep_type = REG_DEP_OUTPUT;
+ else if (bitmap_bit_p (&anti_dependency_cache[INSN_LUID (insn)],
+ INSN_LUID (elem)))
+ present_dep_type = REG_DEP_ANTI;
+ else
+ maybe_present_p = false;
+
+ if (maybe_present_p)
+ {
+ if ((int) dep_type >= (int) present_dep_type)
+ return DEP_PRESENT;
+
+ present_p = true;
+ }
+ }
else
- present_p = 0;
- if (present_p && (int) dep_type >= (int) present_dep_type)
- return;
+ {
+ ds_t present_dep_types = 0;
+
+ if (bitmap_bit_p (&true_dependency_cache[INSN_LUID (insn)],
+ INSN_LUID (elem)))
+ present_dep_types |= DEP_TRUE;
+ if (bitmap_bit_p (&output_dependency_cache[INSN_LUID (insn)],
+ INSN_LUID (elem)))
+ present_dep_types |= DEP_OUTPUT;
+ if (bitmap_bit_p (&anti_dependency_cache[INSN_LUID (insn)],
+ INSN_LUID (elem)))
+ present_dep_types |= DEP_ANTI;
+
+ if (present_dep_types)
+ {
+ if (!(current_sched_info->flags & DO_SPECULATION)
+ || !bitmap_bit_p (&spec_dependency_cache[INSN_LUID (insn)],
+ INSN_LUID (elem)))
+ {
+ if ((present_dep_types | (ds & DEP_TYPES))
+ == present_dep_types)
+ /* We already have all these bits. */
+ return DEP_PRESENT;
+ }
+ else
+ {
+ /* Only true dependencies can be data speculative and
+ only anti dependencies can be control speculative. */
+ gcc_assert ((present_dep_types & (DEP_TRUE | DEP_ANTI))
+ == present_dep_types);
+
+ /* if (additional dep is SPECULATIVE) then
+ we should update DEP_STATUS
+ else
+ we should reset existing dep to non-speculative. */
+ }
+
+ present_p = true;
+ }
+ else
+ maybe_present_p = false;
+ }
}
#endif
/* Check that we don't already have this dependence. */
- if (present_p)
- for (link = LOG_LINKS (insn); link; link = XEXP (link, 1))
- if (XEXP (link, 0) == elem)
- {
+ if (maybe_present_p)
+ {
+ dep_link_t *linkp;
+
+ for (linkp = &DEPS_LIST_FIRST (INSN_BACK_DEPS (insn));
+ *linkp != NULL;
+ linkp = &DEP_LINK_NEXT (*linkp))
+ {
+ dep_t link = DEP_LINK_DEP (*linkp);
+
+ gcc_assert (true_dependency_cache == 0 || present_p);
+
+ if (DEP_PRO (link) == elem)
+ {
+ enum DEPS_ADJUST_RESULT changed_p = DEP_PRESENT;
+
#ifdef INSN_SCHEDULING
- /* Clear corresponding cache entry because type of the link
- may be changed. */
- if (true_dependency_cache != NULL)
- {
- if (REG_NOTE_KIND (link) == REG_DEP_ANTI)
- RESET_BIT (anti_dependency_cache[INSN_LUID (insn)],
- INSN_LUID (elem));
- else if (REG_NOTE_KIND (link) == REG_DEP_OUTPUT
- && output_dependency_cache)
- RESET_BIT (output_dependency_cache[INSN_LUID (insn)],
- INSN_LUID (elem));
- else
- abort ();
- }
+ if (current_sched_info->flags & USE_DEPS_LIST)
+ {
+ ds_t new_status = ds | DEP_STATUS (link);
+
+ if (new_status & SPECULATIVE)
+ {
+ if (!(ds & SPECULATIVE)
+ || !(DEP_STATUS (link) & SPECULATIVE))
+ /* Then this dep can't be speculative. */
+ {
+ new_status &= ~SPECULATIVE;
+ if (true_dependency_cache
+ && (DEP_STATUS (link) & SPECULATIVE))
+ bitmap_clear_bit (&spec_dependency_cache
+ [INSN_LUID (insn)],
+ INSN_LUID (elem));
+ }
+ else
+ {
+ /* Both are speculative. Merging probabilities. */
+ if (mem1)
+ {
+ dw_t dw;
+
+ dw = estimate_dep_weak (mem1, mem2);
+ ds = set_dep_weak (ds, BEGIN_DATA, dw);
+ }
+
+ new_status = ds_merge (DEP_STATUS (link), ds);
+ }
+ }
+
+ ds = new_status;
+ }
+
+ /* Clear corresponding cache entry because type of the link
+ may have changed. Keep them if we use_deps_list. */
+ if (true_dependency_cache != NULL
+ && !(current_sched_info->flags & USE_DEPS_LIST))
+ {
+ enum reg_note kind = DEP_KIND (link);
+
+ switch (kind)
+ {
+ case REG_DEP_OUTPUT:
+ bitmap_clear_bit (&output_dependency_cache
+ [INSN_LUID (insn)], INSN_LUID (elem));
+ break;
+ case REG_DEP_ANTI:
+ bitmap_clear_bit (&anti_dependency_cache
+ [INSN_LUID (insn)], INSN_LUID (elem));
+ break;
+ default:
+ gcc_unreachable ();
+ }
+ }
+
+ if ((current_sched_info->flags & USE_DEPS_LIST)
+ && DEP_STATUS (link) != ds)
+ {
+ DEP_STATUS (link) = ds;
+ changed_p = DEP_CHANGED;
+ }
#endif
- /* If this is a more restrictive type of dependence than the existing
- one, then change the existing dependence to this type. */
- if ((int) dep_type < (int) REG_NOTE_KIND (link))
- PUT_REG_NOTE_KIND (link, dep_type);
+ /* If this is a more restrictive type of dependence than the
+ existing one, then change the existing dependence to this
+ type. */
+ if ((int) dep_type < (int) DEP_KIND (link))
+ {
+ DEP_KIND (link) = dep_type;
+ changed_p = DEP_CHANGED;
+ }
#ifdef INSN_SCHEDULING
- /* If we are adding a dependency to INSN's LOG_LINKs, then
- note that in the bitmap caches of dependency information. */
- if (true_dependency_cache != NULL)
- {
- if ((int) REG_NOTE_KIND (link) == 0)
- SET_BIT (true_dependency_cache[INSN_LUID (insn)],
- INSN_LUID (elem));
- else if (REG_NOTE_KIND (link) == REG_DEP_ANTI)
- SET_BIT (anti_dependency_cache[INSN_LUID (insn)],
- INSN_LUID (elem));
- else if (REG_NOTE_KIND (link) == REG_DEP_OUTPUT)
- SET_BIT (output_dependency_cache[INSN_LUID (insn)],
- INSN_LUID (elem));
- }
+ /* If we are adding a dependency to INSN's LOG_LINKs, then
+ note that in the bitmap caches of dependency information. */
+ if (true_dependency_cache != NULL)
+ {
+ if (!(current_sched_info->flags & USE_DEPS_LIST))
+ {
+ if (DEP_KIND (link) == REG_DEP_TRUE)
+ bitmap_set_bit (&true_dependency_cache
+ [INSN_LUID (insn)], INSN_LUID (elem));
+ else if (DEP_KIND (link) == REG_DEP_OUTPUT)
+ bitmap_set_bit (&output_dependency_cache
+ [INSN_LUID (insn)], INSN_LUID (elem));
+ else if (DEP_KIND (link) == REG_DEP_ANTI)
+ bitmap_set_bit (&anti_dependency_cache
+ [INSN_LUID (insn)], INSN_LUID (elem));
+ }
+ else
+ {
+ if (ds & DEP_TRUE)
+ bitmap_set_bit (&true_dependency_cache
+ [INSN_LUID (insn)], INSN_LUID (elem));
+ if (ds & DEP_OUTPUT)
+ bitmap_set_bit (&output_dependency_cache
+ [INSN_LUID (insn)], INSN_LUID (elem));
+ if (ds & DEP_ANTI)
+ bitmap_set_bit (&anti_dependency_cache
+ [INSN_LUID (insn)], INSN_LUID (elem));
+ /* Note, that dep can become speculative only
+ at the moment of creation. Thus, we don't need to
+ check for it here. */
+ }
+ }
+
+ if (changed_linkpp && changed_p == DEP_CHANGED)
+ *changed_linkpp = linkp;
#endif
- return;
- }
- /* Might want to check one level of transitivity to save conses. */
+ return changed_p;
+ }
+ }
+ /* We didn't find a dep. It shouldn't be present in the cache. */
+ gcc_assert (!present_p);
+ }
+
+ /* Might want to check one level of transitivity to save conses.
+ This check should be done in maybe_add_or_update_back_dep_1.
+ Since we made it to add_or_update_back_dep_1, we must create
+ (or update) a link. */
- link = alloc_INSN_LIST (elem, LOG_LINKS (insn));
- LOG_LINKS (insn) = link;
+ if (mem1)
+ {
+ gcc_assert (current_sched_info->flags & DO_SPECULATION);
+ ds = set_dep_weak (ds, BEGIN_DATA, estimate_dep_weak (mem1, mem2));
+ }
+
+ add_back_dep (insn, elem, dep_type, ds);
+
+ return DEP_CREATED;
+}
+
+/* This function creates a link between INSN and ELEM under any
+ conditions. DS describes speculative status of the link. */
+static void
+add_back_dep (rtx insn, rtx elem, enum reg_note dep_type, ds_t ds)
+{
+ struct _dep _dep, *dep = &_dep;
+
+ gcc_assert (INSN_P (insn) && INSN_P (elem) && insn != elem);
- /* Insn dependency, not data dependency. */
- PUT_REG_NOTE_KIND (link, dep_type);
+ if (current_sched_info->flags & USE_DEPS_LIST)
+ init_dep_1 (dep, elem, insn, dep_type, ds);
+ else
+ init_dep_1 (dep, elem, insn, dep_type, -1);
+
+ add_back_dep_to_deps_list (INSN_BACK_DEPS (insn), dep);
#ifdef INSN_SCHEDULING
+#ifdef ENABLE_CHECKING
+ check_dep_status (dep_type, ds, false);
+#endif
+
/* If we are adding a dependency to INSN's LOG_LINKs, then note that
in the bitmap caches of dependency information. */
if (true_dependency_cache != NULL)
{
- if ((int) dep_type == 0)
- SET_BIT (true_dependency_cache[INSN_LUID (insn)], INSN_LUID (elem));
- else if (dep_type == REG_DEP_ANTI)
- SET_BIT (anti_dependency_cache[INSN_LUID (insn)], INSN_LUID (elem));
- else if (dep_type == REG_DEP_OUTPUT)
- SET_BIT (output_dependency_cache[INSN_LUID (insn)], INSN_LUID (elem));
+ if (!(current_sched_info->flags & USE_DEPS_LIST))
+ {
+ if (dep_type == REG_DEP_TRUE)
+ bitmap_set_bit (&true_dependency_cache[INSN_LUID (insn)],
+ INSN_LUID (elem));
+ else if (dep_type == REG_DEP_OUTPUT)
+ bitmap_set_bit (&output_dependency_cache[INSN_LUID (insn)],
+ INSN_LUID (elem));
+ else if (dep_type == REG_DEP_ANTI)
+ bitmap_set_bit (&anti_dependency_cache[INSN_LUID (insn)],
+ INSN_LUID (elem));
+ }
+ else
+ {
+ if (ds & DEP_TRUE)
+ bitmap_set_bit (&true_dependency_cache[INSN_LUID (insn)],
+ INSN_LUID (elem));
+ if (ds & DEP_OUTPUT)
+ bitmap_set_bit (&output_dependency_cache[INSN_LUID (insn)],
+ INSN_LUID (elem));
+ if (ds & DEP_ANTI)
+ bitmap_set_bit (&anti_dependency_cache[INSN_LUID (insn)],
+ INSN_LUID (elem));
+ if (ds & SPECULATIVE)
+ {
+ gcc_assert (current_sched_info->flags & DO_SPECULATION);
+ bitmap_set_bit (&spec_dependency_cache[INSN_LUID (insn)],
+ INSN_LUID (elem));
+ }
+ }
}
#endif
}
/* A convenience wrapper to operate on an entire list. */
static void
-add_dependence_list (insn, list, dep_type)
- rtx insn, list;
- enum reg_note dep_type;
+add_dependence_list (rtx insn, rtx list, int uncond, enum reg_note dep_type)
{
for (; list; list = XEXP (list, 1))
- add_dependence (insn, XEXP (list, 0), dep_type);
+ {
+ if (uncond || ! sched_insns_conditions_mutex_p (insn, XEXP (list, 0)))
+ add_dependence (insn, XEXP (list, 0), dep_type);
+ }
}
/* Similar, but free *LISTP at the same time. */
static void
-add_dependence_list_and_free (insn, listp, dep_type)
- rtx insn;
- rtx *listp;
- enum reg_note dep_type;
+add_dependence_list_and_free (rtx insn, rtx *listp, int uncond,
+ enum reg_note dep_type)
{
rtx list, next;
for (list = *listp, *listp = NULL; list ; list = next)
{
next = XEXP (list, 1);
- add_dependence (insn, XEXP (list, 0), dep_type);
+ if (uncond || ! sched_insns_conditions_mutex_p (insn, XEXP (list, 0)))
+ add_dependence (insn, XEXP (list, 0), dep_type);
free_INSN_LIST_node (list);
}
}
-/* Remove ELEM wrapped in an INSN_LIST from the LOG_LINKS
- of INSN. Abort if not found. */
+/* Clear all dependencies for an insn. */
static void
-remove_dependence (insn, elem)
- rtx insn;
- rtx elem;
+delete_all_dependences (rtx insn)
{
- rtx prev, link, next;
- int found = 0;
-
- for (prev = 0, link = LOG_LINKS (insn); link; link = next)
- {
- next = XEXP (link, 1);
- if (XEXP (link, 0) == elem)
- {
- if (prev)
- XEXP (prev, 1) = next;
- else
- LOG_LINKS (insn) = next;
+ /* Clear caches, if they exist, as well as free the dependence. */
#ifdef INSN_SCHEDULING
- /* If we are removing a dependency from the LOG_LINKS list,
- make sure to remove it from the cache too. */
- if (true_dependency_cache != NULL)
- {
- if (REG_NOTE_KIND (link) == 0)
- RESET_BIT (true_dependency_cache[INSN_LUID (insn)],
- INSN_LUID (elem));
- else if (REG_NOTE_KIND (link) == REG_DEP_ANTI)
- RESET_BIT (anti_dependency_cache[INSN_LUID (insn)],
- INSN_LUID (elem));
- else if (REG_NOTE_KIND (link) == REG_DEP_OUTPUT)
- RESET_BIT (output_dependency_cache[INSN_LUID (insn)],
- INSN_LUID (elem));
- }
-#endif
-
- free_INSN_LIST_node (link);
-
- found = 1;
- }
- else
- prev = link;
- }
-
- if (!found)
- abort ();
- return;
-}
-
-/* Return an insn which represents a SCHED_GROUP, which is
- the last insn in the group. */
-
-static rtx
-group_leader (insn)
- rtx insn;
-{
- rtx prev;
-
- do
+ if (true_dependency_cache != NULL)
{
- prev = insn;
- insn = next_nonnote_insn (insn);
+ bitmap_clear (&true_dependency_cache[INSN_LUID (insn)]);
+ bitmap_clear (&output_dependency_cache[INSN_LUID (insn)]);
+ bitmap_clear (&anti_dependency_cache[INSN_LUID (insn)]);
+ /* We don't have to clear forward_dependency_cache here,
+ because it is formed later. */
+ if (current_sched_info->flags & DO_SPECULATION)
+ bitmap_clear (&spec_dependency_cache[INSN_LUID (insn)]);
}
- while (insn && SCHED_GROUP_P (insn) && (GET_CODE (insn) != CODE_LABEL));
+#endif
- return prev;
+ clear_deps_list (INSN_BACK_DEPS (insn));
}
-/* Set SCHED_GROUP_P and care for the rest of the bookkeeping that
- goes along with that. */
+/* All insns in a scheduling group except the first should only have
+ dependencies on the previous insn in the group. So we find the
+ first instruction in the scheduling group by walking the dependence
+ chains backwards. Then we add the dependencies for the group to
+ the previous nonnote insn. */
static void
-set_sched_group_p (insn)
- rtx insn;
+fixup_sched_groups (rtx insn)
{
- rtx link, prev;
+ dep_link_t link;
+ rtx prev_nonnote;
- SCHED_GROUP_P (insn) = 1;
+ FOR_EACH_DEP_LINK (link, INSN_BACK_DEPS (insn))
+ {
+ rtx i = insn;
+ dep_t dep = DEP_LINK_DEP (link);
+ rtx pro = DEP_PRO (dep);
- /* There may be a note before this insn now, but all notes will
- be removed before we actually try to schedule the insns, so
- it won't cause a problem later. We must avoid it here though. */
- prev = prev_nonnote_insn (insn);
+ do
+ {
+ i = prev_nonnote_insn (i);
+
+ if (pro == i)
+ goto next_link;
+ } while (SCHED_GROUP_P (i));
- /* Make a copy of all dependencies on the immediately previous insn,
- and add to this insn. This is so that all the dependencies will
- apply to the group. Remove an explicit dependence on this insn
- as SCHED_GROUP_P now represents it. */
+ if (! sched_insns_conditions_mutex_p (i, pro))
+ add_dependence (i, pro, DEP_KIND (dep));
+ next_link:;
+ }
- if (find_insn_list (prev, LOG_LINKS (insn)))
- remove_dependence (insn, prev);
+ delete_all_dependences (insn);
- for (link = LOG_LINKS (prev); link; link = XEXP (link, 1))
- add_dependence (insn, XEXP (link, 0), REG_NOTE_KIND (link));
+ prev_nonnote = prev_nonnote_insn (insn);
+ if (BLOCK_FOR_INSN (insn) == BLOCK_FOR_INSN (prev_nonnote)
+ && ! sched_insns_conditions_mutex_p (insn, prev_nonnote))
+ add_dependence (insn, prev_nonnote, REG_DEP_ANTI);
}
\f
/* Process an insn's memory dependencies. There are four kinds of
(0) read dependence: read follows read
(1) true dependence: read follows write
- (2) anti dependence: write follows read
- (3) output dependence: write follows write
+ (2) output dependence: write follows write
+ (3) anti dependence: write follows read
We are careful to build only dependencies which actually exist, and
use transitivity to avoid building too many links. */
The MEM is a memory reference contained within INSN, which we are saving
so that we can do memory aliasing on it. */
-void
-add_insn_mem_dependence (deps, insn_list, mem_list, insn, mem)
- struct deps *deps;
- rtx *insn_list, *mem_list, insn, mem;
+static void
+add_insn_mem_dependence (struct deps *deps, bool read_p,
+ rtx insn, rtx mem)
{
+ rtx *insn_list;
+ rtx *mem_list;
rtx link;
+ if (read_p)
+ {
+ insn_list = &deps->pending_read_insns;
+ mem_list = &deps->pending_read_mems;
+ deps->pending_read_list_length++;
+ }
+ else
+ {
+ insn_list = &deps->pending_write_insns;
+ mem_list = &deps->pending_write_mems;
+ deps->pending_write_list_length++;
+ }
+
link = alloc_INSN_LIST (insn, *insn_list);
*insn_list = link;
mem = shallow_copy_rtx (mem);
XEXP (mem, 0) = cselib_subst_to_values (XEXP (mem, 0));
}
- link = alloc_EXPR_LIST (VOIDmode, mem, *mem_list);
+ link = alloc_EXPR_LIST (VOIDmode, canon_rtx (mem), *mem_list);
*mem_list = link;
-
- deps->pending_lists_length++;
}
/* Make a dependency between every memory reference on the pending lists
and INSN, thus flushing the pending lists. FOR_READ is true if emitting
dependencies for a read operation, similarly with FOR_WRITE. */
-static void
-flush_pending_lists (deps, insn, for_read, for_write)
- struct deps *deps;
- rtx insn;
- int for_read, for_write;
-{
- if (for_write)
+static void
+flush_pending_lists (struct deps *deps, rtx insn, int for_read,
+ int for_write)
+{
+ if (for_write)
+ {
+ add_dependence_list_and_free (insn, &deps->pending_read_insns, 1,
+ REG_DEP_ANTI);
+ free_EXPR_LIST_list (&deps->pending_read_mems);
+ deps->pending_read_list_length = 0;
+ }
+
+ add_dependence_list_and_free (insn, &deps->pending_write_insns, 1,
+ for_read ? REG_DEP_ANTI : REG_DEP_OUTPUT);
+ free_EXPR_LIST_list (&deps->pending_write_mems);
+ deps->pending_write_list_length = 0;
+
+ add_dependence_list_and_free (insn, &deps->last_pending_memory_flush, 1,
+ for_read ? REG_DEP_ANTI : REG_DEP_OUTPUT);
+ deps->last_pending_memory_flush = alloc_INSN_LIST (insn, NULL_RTX);
+ deps->pending_flush_length = 1;
+}
+\f
+/* Analyze a single reference to register (reg:MODE REGNO) in INSN.
+ The type of the reference is specified by REF and can be SET,
+ CLOBBER, PRE_DEC, POST_DEC, PRE_INC, POST_INC or USE. */
+
+static void
+sched_analyze_reg (struct deps *deps, int regno, enum machine_mode mode,
+ enum rtx_code ref, rtx insn)
+{
+ /* A hard reg in a wide mode may really be multiple registers.
+ If so, mark all of them just like the first. */
+ if (regno < FIRST_PSEUDO_REGISTER)
+ {
+ int i = hard_regno_nregs[regno][mode];
+ if (ref == SET)
+ {
+ while (--i >= 0)
+ SET_REGNO_REG_SET (reg_pending_sets, regno + i);
+ }
+ else if (ref == USE)
+ {
+ while (--i >= 0)
+ SET_REGNO_REG_SET (reg_pending_uses, regno + i);
+ }
+ else
+ {
+ while (--i >= 0)
+ SET_REGNO_REG_SET (reg_pending_clobbers, regno + i);
+ }
+ }
+
+ /* ??? Reload sometimes emits USEs and CLOBBERs of pseudos that
+ it does not reload. Ignore these as they have served their
+ purpose already. */
+ else if (regno >= deps->max_reg)
{
- add_dependence_list_and_free (insn, &deps->pending_read_insns,
- REG_DEP_ANTI);
- free_EXPR_LIST_list (&deps->pending_read_mems);
+ enum rtx_code code = GET_CODE (PATTERN (insn));
+ gcc_assert (code == USE || code == CLOBBER);
}
- add_dependence_list_and_free (insn, &deps->pending_write_insns,
- for_read ? REG_DEP_ANTI : REG_DEP_OUTPUT);
- free_EXPR_LIST_list (&deps->pending_write_mems);
- deps->pending_lists_length = 0;
+ else
+ {
+ if (ref == SET)
+ SET_REGNO_REG_SET (reg_pending_sets, regno);
+ else if (ref == USE)
+ SET_REGNO_REG_SET (reg_pending_uses, regno);
+ else
+ SET_REGNO_REG_SET (reg_pending_clobbers, regno);
- add_dependence_list_and_free (insn, &deps->last_pending_memory_flush,
- for_read ? REG_DEP_ANTI : REG_DEP_OUTPUT);
- deps->last_pending_memory_flush = alloc_INSN_LIST (insn, NULL_RTX);
- deps->pending_flush_length = 1;
+ /* Pseudos that are REG_EQUIV to something may be replaced
+ by that during reloading. We need only add dependencies for
+ the address in the REG_EQUIV note. */
+ if (!reload_completed && get_reg_known_equiv_p (regno))
+ {
+ rtx t = get_reg_known_value (regno);
+ if (MEM_P (t))
+ sched_analyze_2 (deps, XEXP (t, 0), insn);
+ }
+
+ /* Don't let it cross a call after scheduling if it doesn't
+ already cross one. */
+ if (REG_N_CALLS_CROSSED (regno) == 0)
+ {
+ if (ref == USE)
+ deps->sched_before_next_call
+ = alloc_INSN_LIST (insn, deps->sched_before_next_call);
+ else
+ add_dependence_list (insn, deps->last_function_call, 1,
+ REG_DEP_ANTI);
+ }
+ }
}
-\f
+
/* Analyze a single SET, CLOBBER, PRE_DEC, POST_DEC, PRE_INC or POST_INC
rtx, X, creating all dependencies generated by the write to the
destination of X, and reads of everything mentioned. */
static void
-sched_analyze_1 (deps, x, insn)
- struct deps *deps;
- rtx x;
- rtx insn;
+sched_analyze_1 (struct deps *deps, rtx x, rtx insn)
{
- int regno;
rtx dest = XEXP (x, 0);
enum rtx_code code = GET_CODE (x);
}
while (GET_CODE (dest) == STRICT_LOW_PART || GET_CODE (dest) == SUBREG
- || GET_CODE (dest) == ZERO_EXTRACT || GET_CODE (dest) == SIGN_EXTRACT)
+ || GET_CODE (dest) == ZERO_EXTRACT)
{
- if (GET_CODE (dest) == ZERO_EXTRACT || GET_CODE (dest) == SIGN_EXTRACT)
+ if (GET_CODE (dest) == STRICT_LOW_PART
+ || GET_CODE (dest) == ZERO_EXTRACT
+ || df_read_modify_subreg_p (dest))
+ {
+ /* These both read and modify the result. We must handle
+ them as writes to get proper dependencies for following
+ instructions. We must handle them as reads to get proper
+ dependencies from this to previous instructions.
+ Thus we need to call sched_analyze_2. */
+
+ sched_analyze_2 (deps, XEXP (dest, 0), insn);
+ }
+ if (GET_CODE (dest) == ZERO_EXTRACT)
{
/* The second and third arguments are values read by this insn. */
sched_analyze_2 (deps, XEXP (dest, 1), insn);
dest = XEXP (dest, 0);
}
- if (GET_CODE (dest) == REG)
+ if (REG_P (dest))
{
- regno = REGNO (dest);
+ int regno = REGNO (dest);
+ enum machine_mode mode = GET_MODE (dest);
- /* A hard reg in a wide mode may really be multiple registers.
- If so, mark all of them just like the first. */
- if (regno < FIRST_PSEUDO_REGISTER)
- {
- int i = HARD_REGNO_NREGS (regno, GET_MODE (dest));
- if (code == SET)
- {
- while (--i >= 0)
- SET_REGNO_REG_SET (reg_pending_sets, regno + i);
- }
- else
- {
- while (--i >= 0)
- SET_REGNO_REG_SET (reg_pending_clobbers, regno + i);
- }
- }
- /* ??? Reload sometimes emits USEs and CLOBBERs of pseudos that
- it does not reload. Ignore these as they have served their
- purpose already. */
- else if (regno >= deps->max_reg)
- {
- if (GET_CODE (PATTERN (insn)) != USE
- && GET_CODE (PATTERN (insn)) != CLOBBER)
- abort ();
- }
- else
+ sched_analyze_reg (deps, regno, mode, code, insn);
+
+#ifdef STACK_REGS
+ /* Treat all writes to a stack register as modifying the TOS. */
+ if (regno >= FIRST_STACK_REG && regno <= LAST_STACK_REG)
{
- if (code == SET)
- SET_REGNO_REG_SET (reg_pending_sets, regno);
- else
- SET_REGNO_REG_SET (reg_pending_clobbers, regno);
-
- /* Pseudos that are REG_EQUIV to something may be replaced
- by that during reloading. We need only add dependencies for
- the address in the REG_EQUIV note. */
- if (!reload_completed
- && reg_known_equiv_p[regno]
- && GET_CODE (reg_known_value[regno]) == MEM)
- sched_analyze_2 (deps, XEXP (reg_known_value[regno], 0), insn);
-
- /* Don't let it cross a call after scheduling if it doesn't
- already cross one. */
- if (REG_N_CALLS_CROSSED (regno) == 0)
- add_dependence_list (insn, deps->last_function_call, REG_DEP_ANTI);
+ /* Avoid analyzing the same register twice. */
+ if (regno != FIRST_STACK_REG)
+ sched_analyze_reg (deps, FIRST_STACK_REG, mode, code, insn);
+ sched_analyze_reg (deps, FIRST_STACK_REG, mode, USE, insn);
}
+#endif
}
- else if (GET_CODE (dest) == MEM)
+ else if (MEM_P (dest))
{
/* Writing memory. */
rtx t = dest;
cselib_lookup (XEXP (t, 0), Pmode, 1);
XEXP (t, 0) = cselib_subst_to_values (XEXP (t, 0));
}
+ t = canon_rtx (t);
- if (deps->pending_lists_length > MAX_PENDING_LIST_LENGTH)
+ if ((deps->pending_read_list_length + deps->pending_write_list_length)
+ > MAX_PENDING_LIST_LENGTH)
{
/* Flush all pending reads and writes to prevent the pending lists
from getting any larger. Insn scheduling runs too slowly when
pending_mem = deps->pending_read_mems;
while (pending)
{
- if (anti_dependence (XEXP (pending_mem, 0), t))
+ if (anti_dependence (XEXP (pending_mem, 0), t)
+ && ! sched_insns_conditions_mutex_p (insn, XEXP (pending, 0)))
add_dependence (insn, XEXP (pending, 0), REG_DEP_ANTI);
pending = XEXP (pending, 1);
pending_mem = deps->pending_write_mems;
while (pending)
{
- if (output_dependence (XEXP (pending_mem, 0), t))
+ if (output_dependence (XEXP (pending_mem, 0), t)
+ && ! sched_insns_conditions_mutex_p (insn, XEXP (pending, 0)))
add_dependence (insn, XEXP (pending, 0), REG_DEP_OUTPUT);
pending = XEXP (pending, 1);
pending_mem = XEXP (pending_mem, 1);
}
- add_dependence_list (insn, deps->last_pending_memory_flush,
+ add_dependence_list (insn, deps->last_pending_memory_flush, 1,
REG_DEP_ANTI);
- add_insn_mem_dependence (deps, &deps->pending_write_insns,
- &deps->pending_write_mems, insn, dest);
+ add_insn_mem_dependence (deps, false, insn, dest);
}
sched_analyze_2 (deps, XEXP (dest, 0), insn);
}
/* Analyze the uses of memory and registers in rtx X in INSN. */
static void
-sched_analyze_2 (deps, x, insn)
- struct deps *deps;
- rtx x;
- rtx insn;
+sched_analyze_2 (struct deps *deps, rtx x, rtx insn)
{
int i;
int j;
#ifdef HAVE_cc0
case CC0:
/* User of CC0 depends on immediately preceding insn. */
- set_sched_group_p (insn);
+ SCHED_GROUP_P (insn) = 1;
+ /* Don't move CC0 setter to another block (it can set up the
+ same flag for previous CC0 users which is safe). */
+ CANT_MOVE (prev_nonnote_insn (insn)) = 1;
return;
#endif
case REG:
{
int regno = REGNO (x);
- if (regno < FIRST_PSEUDO_REGISTER)
- {
- int i = HARD_REGNO_NREGS (regno, GET_MODE (x));
- while (--i >= 0)
- SET_REGNO_REG_SET (reg_pending_uses, regno + i);
- }
- /* ??? Reload sometimes emits USEs and CLOBBERs of pseudos that
- it does not reload. Ignore these as they have served their
- purpose already. */
- else if (regno >= deps->max_reg)
- {
- if (GET_CODE (PATTERN (insn)) != USE
- && GET_CODE (PATTERN (insn)) != CLOBBER)
- abort ();
- }
- else
- {
- SET_REGNO_REG_SET (reg_pending_uses, regno);
-
- /* Pseudos that are REG_EQUIV to something may be replaced
- by that during reloading. We need only add dependencies for
- the address in the REG_EQUIV note. */
- if (!reload_completed
- && reg_known_equiv_p[regno]
- && GET_CODE (reg_known_value[regno]) == MEM)
- sched_analyze_2 (deps, XEXP (reg_known_value[regno], 0), insn);
-
- /* If the register does not already cross any calls, then add this
- insn to the sched_before_next_call list so that it will still
- not cross calls after scheduling. */
- if (REG_N_CALLS_CROSSED (regno) == 0)
- deps->sched_before_next_call
- = alloc_INSN_LIST (insn, deps->sched_before_next_call);
- }
+ enum machine_mode mode = GET_MODE (x);
+
+ sched_analyze_reg (deps, regno, mode, USE, insn);
+
+#ifdef STACK_REGS
+ /* Treat all reads of a stack register as modifying the TOS. */
+ if (regno >= FIRST_STACK_REG && regno <= LAST_STACK_REG)
+ {
+ /* Avoid analyzing the same register twice. */
+ if (regno != FIRST_STACK_REG)
+ sched_analyze_reg (deps, FIRST_STACK_REG, mode, USE, insn);
+ sched_analyze_reg (deps, FIRST_STACK_REG, mode, SET, insn);
+ }
+#endif
return;
}
cselib_lookup (XEXP (t, 0), Pmode, 1);
XEXP (t, 0) = cselib_subst_to_values (XEXP (t, 0));
}
+ t = canon_rtx (t);
pending = deps->pending_read_insns;
pending_mem = deps->pending_read_mems;
while (pending)
{
- if (read_dependence (XEXP (pending_mem, 0), t))
+ if (read_dependence (XEXP (pending_mem, 0), t)
+ && ! sched_insns_conditions_mutex_p (insn, XEXP (pending, 0)))
add_dependence (insn, XEXP (pending, 0), REG_DEP_ANTI);
pending = XEXP (pending, 1);
while (pending)
{
if (true_dependence (XEXP (pending_mem, 0), VOIDmode,
- t, rtx_varies_p))
- add_dependence (insn, XEXP (pending, 0), 0);
+ t, rtx_varies_p)
+ && ! sched_insns_conditions_mutex_p (insn, XEXP (pending, 0)))
+ {
+ if (current_sched_info->flags & DO_SPECULATION)
+ maybe_add_or_update_back_dep_1 (insn, XEXP (pending, 0),
+ REG_DEP_TRUE,
+ BEGIN_DATA | DEP_TRUE,
+ XEXP (pending_mem, 0), t, 0);
+ else
+ add_dependence (insn, XEXP (pending, 0), REG_DEP_TRUE);
+ }
pending = XEXP (pending, 1);
pending_mem = XEXP (pending_mem, 1);
}
for (u = deps->last_pending_memory_flush; u; u = XEXP (u, 1))
- if (GET_CODE (XEXP (u, 0)) != JUMP_INSN
- || deps_may_trap_p (x))
+ if (! JUMP_P (XEXP (u, 0)) || deps_may_trap_p (x))
add_dependence (insn, XEXP (u, 0), REG_DEP_ANTI);
/* Always add these dependencies to pending_reads, since
this insn may be followed by a write. */
- add_insn_mem_dependence (deps, &deps->pending_read_insns,
- &deps->pending_read_mems, insn, x);
+ add_insn_mem_dependence (deps, true, insn, x);
/* Take advantage of tail recursion here. */
sched_analyze_2 (deps, XEXP (x, 0), insn);
mode. An insn should not be moved across this even if it only uses
pseudo-regs because it might give an incorrectly rounded result. */
if (code != ASM_OPERANDS || MEM_VOLATILE_P (x))
- reg_pending_barrier = true;
+ reg_pending_barrier = TRUE_BARRIER;
/* For all ASM_OPERANDS, we must traverse the vector of input operands.
We can not just fall through here since then we would be confused
/* Analyze an INSN with pattern X to find all dependencies. */
static void
-sched_analyze_insn (deps, x, insn, loop_notes)
- struct deps *deps;
- rtx x, insn;
- rtx loop_notes;
+sched_analyze_insn (struct deps *deps, rtx x, rtx insn)
{
RTX_CODE code = GET_CODE (x);
rtx link;
- int i;
+ unsigned i;
+ reg_set_iterator rsi;
if (code == COND_EXEC)
{
code = GET_CODE (x);
}
if (code == SET || code == CLOBBER)
- sched_analyze_1 (deps, x, insn);
+ {
+ sched_analyze_1 (deps, x, insn);
+
+ /* Bare clobber insns are used for letting life analysis, reg-stack
+ and others know that a value is dead. Depend on the last call
+ instruction so that reg-stack won't get confused. */
+ if (code == CLOBBER)
+ add_dependence_list (insn, deps->last_function_call, 1, REG_DEP_OUTPUT);
+ }
else if (code == PARALLEL)
{
- int i;
- for (i = XVECLEN (x, 0) - 1; i >= 0; i--)
+ for (i = XVECLEN (x, 0); i--;)
{
rtx sub = XVECEXP (x, 0, i);
code = GET_CODE (sub);
sched_analyze_2 (deps, x, insn);
/* Mark registers CLOBBERED or used by called function. */
- if (GET_CODE (insn) == CALL_INSN)
+ if (CALL_P (insn))
{
for (link = CALL_INSN_FUNCTION_USAGE (insn); link; link = XEXP (link, 1))
{
sched_analyze_2 (deps, XEXP (link, 0), insn);
}
if (find_reg_note (insn, REG_SETJMP, NULL))
- reg_pending_barrier = true;
+ reg_pending_barrier = MOVE_BARRIER;
}
- if (GET_CODE (insn) == JUMP_INSN)
+ if (JUMP_P (insn))
{
rtx next;
next = next_nonnote_insn (insn);
- if (next && GET_CODE (next) == BARRIER)
- reg_pending_barrier = true;
+ if (next && BARRIER_P (next))
+ reg_pending_barrier = TRUE_BARRIER;
else
{
rtx pending, pending_mem;
- regset_head tmp;
- INIT_REG_SET (&tmp);
+ regset_head tmp_uses, tmp_sets;
+ INIT_REG_SET (&tmp_uses);
+ INIT_REG_SET (&tmp_sets);
+
+ (*current_sched_info->compute_jump_reg_dependencies)
+ (insn, &deps->reg_conditional_sets, &tmp_uses, &tmp_sets);
+ /* Make latency of jump equal to 0 by using anti-dependence. */
+ EXECUTE_IF_SET_IN_REG_SET (&tmp_uses, 0, i, rsi)
+ {
+ struct deps_reg *reg_last = &deps->reg_last[i];
+ add_dependence_list (insn, reg_last->sets, 0, REG_DEP_ANTI);
+ add_dependence_list (insn, reg_last->clobbers, 0, REG_DEP_ANTI);
+ reg_last->uses_length++;
+ reg_last->uses = alloc_INSN_LIST (insn, reg_last->uses);
+ }
+ IOR_REG_SET (reg_pending_sets, &tmp_sets);
- (*current_sched_info->compute_jump_reg_dependencies) (insn, &tmp);
- IOR_REG_SET (reg_pending_uses, &tmp);
- CLEAR_REG_SET (&tmp);
+ CLEAR_REG_SET (&tmp_uses);
+ CLEAR_REG_SET (&tmp_sets);
/* All memory writes and volatile reads must happen before the
jump. Non-volatile reads must happen before the jump iff
pending_mem = deps->pending_write_mems;
while (pending)
{
- add_dependence (insn, XEXP (pending, 0), REG_DEP_OUTPUT);
+ if (! sched_insns_conditions_mutex_p (insn, XEXP (pending, 0)))
+ add_dependence (insn, XEXP (pending, 0), REG_DEP_OUTPUT);
pending = XEXP (pending, 1);
pending_mem = XEXP (pending_mem, 1);
}
pending_mem = deps->pending_read_mems;
while (pending)
{
- if (MEM_VOLATILE_P (XEXP (pending_mem, 0)))
+ if (MEM_VOLATILE_P (XEXP (pending_mem, 0))
+ && ! sched_insns_conditions_mutex_p (insn, XEXP (pending, 0)))
add_dependence (insn, XEXP (pending, 0), REG_DEP_OUTPUT);
pending = XEXP (pending, 1);
pending_mem = XEXP (pending_mem, 1);
}
- add_dependence_list (insn, deps->last_pending_memory_flush,
+ add_dependence_list (insn, deps->last_pending_memory_flush, 1,
REG_DEP_ANTI);
}
}
- /* If there is a {LOOP,EHREGION}_{BEG,END} note in the middle of a basic
- block, then we must be sure that no instructions are scheduled across it.
- Otherwise, the reg_n_refs info (which depends on loop_depth) would
- become incorrect. */
- if (loop_notes)
- {
- rtx link;
-
- /* Update loop_notes with any notes from this insn. Also determine
- if any of the notes on the list correspond to instruction scheduling
- barriers (loop, eh & setjmp notes, but not range notes). */
- link = loop_notes;
- while (XEXP (link, 1))
- {
- if (INTVAL (XEXP (link, 0)) == NOTE_INSN_LOOP_BEG
- || INTVAL (XEXP (link, 0)) == NOTE_INSN_LOOP_END
- || INTVAL (XEXP (link, 0)) == NOTE_INSN_EH_REGION_BEG
- || INTVAL (XEXP (link, 0)) == NOTE_INSN_EH_REGION_END)
- reg_pending_barrier = true;
-
- link = XEXP (link, 1);
- }
- XEXP (link, 1) = REG_NOTES (insn);
- REG_NOTES (insn) = loop_notes;
- }
-
/* If this instruction can throw an exception, then moving it changes
where block boundaries fall. This is mighty confusing elsewhere.
Therefore, prevent such an instruction from being moved. */
if (can_throw_internal (insn))
- reg_pending_barrier = true;
+ reg_pending_barrier = MOVE_BARRIER;
/* Add dependencies if a scheduling barrier was found. */
if (reg_pending_barrier)
{
- if (GET_CODE (PATTERN (insn)) == COND_EXEC)
+ /* In the case of barrier the most added dependencies are not
+ real, so we use anti-dependence here. */
+ if (sched_get_condition (insn))
{
- EXECUTE_IF_SET_IN_REG_SET (&deps->reg_last_in_use, 0, i,
+ EXECUTE_IF_SET_IN_REG_SET (&deps->reg_last_in_use, 0, i, rsi)
{
struct deps_reg *reg_last = &deps->reg_last[i];
- add_dependence_list (insn, reg_last->uses, REG_DEP_ANTI);
- add_dependence_list (insn, reg_last->sets, 0);
- add_dependence_list (insn, reg_last->clobbers, 0);
- });
+ add_dependence_list (insn, reg_last->uses, 0, REG_DEP_ANTI);
+ add_dependence_list
+ (insn, reg_last->sets, 0,
+ reg_pending_barrier == TRUE_BARRIER ? REG_DEP_TRUE : REG_DEP_ANTI);
+ add_dependence_list
+ (insn, reg_last->clobbers, 0,
+ reg_pending_barrier == TRUE_BARRIER ? REG_DEP_TRUE : REG_DEP_ANTI);
+ }
}
else
{
- EXECUTE_IF_SET_IN_REG_SET (&deps->reg_last_in_use, 0, i,
+ EXECUTE_IF_SET_IN_REG_SET (&deps->reg_last_in_use, 0, i, rsi)
{
struct deps_reg *reg_last = &deps->reg_last[i];
- add_dependence_list_and_free (insn, ®_last->uses,
+ add_dependence_list_and_free (insn, ®_last->uses, 0,
REG_DEP_ANTI);
- add_dependence_list_and_free (insn, ®_last->sets, 0);
- add_dependence_list_and_free (insn, ®_last->clobbers, 0);
+ add_dependence_list_and_free
+ (insn, ®_last->sets, 0,
+ reg_pending_barrier == TRUE_BARRIER ? REG_DEP_TRUE : REG_DEP_ANTI);
+ add_dependence_list_and_free
+ (insn, ®_last->clobbers, 0,
+ reg_pending_barrier == TRUE_BARRIER ? REG_DEP_TRUE : REG_DEP_ANTI);
reg_last->uses_length = 0;
reg_last->clobbers_length = 0;
- });
+ }
}
- for (i = 0; i < deps->max_reg; i++)
+ for (i = 0; i < (unsigned)deps->max_reg; i++)
{
struct deps_reg *reg_last = &deps->reg_last[i];
reg_last->sets = alloc_INSN_LIST (insn, reg_last->sets);
}
flush_pending_lists (deps, insn, true, true);
- reg_pending_barrier = false;
+ CLEAR_REG_SET (&deps->reg_conditional_sets);
+ reg_pending_barrier = NOT_A_BARRIER;
}
else
{
/* If the current insn is conditional, we can't free any
of the lists. */
- if (GET_CODE (PATTERN (insn)) == COND_EXEC)
+ if (sched_get_condition (insn))
{
- EXECUTE_IF_SET_IN_REG_SET (reg_pending_uses, 0, i,
+ EXECUTE_IF_SET_IN_REG_SET (reg_pending_uses, 0, i, rsi)
{
struct deps_reg *reg_last = &deps->reg_last[i];
- add_dependence_list (insn, reg_last->sets, 0);
- add_dependence_list (insn, reg_last->clobbers, 0);
+ add_dependence_list (insn, reg_last->sets, 0, REG_DEP_TRUE);
+ add_dependence_list (insn, reg_last->clobbers, 0, REG_DEP_TRUE);
reg_last->uses = alloc_INSN_LIST (insn, reg_last->uses);
reg_last->uses_length++;
- });
- EXECUTE_IF_SET_IN_REG_SET (reg_pending_clobbers, 0, i,
+ }
+ EXECUTE_IF_SET_IN_REG_SET (reg_pending_clobbers, 0, i, rsi)
{
struct deps_reg *reg_last = &deps->reg_last[i];
- add_dependence_list (insn, reg_last->sets, REG_DEP_OUTPUT);
- add_dependence_list (insn, reg_last->uses, REG_DEP_ANTI);
+ add_dependence_list (insn, reg_last->sets, 0, REG_DEP_OUTPUT);
+ add_dependence_list (insn, reg_last->uses, 0, REG_DEP_ANTI);
reg_last->clobbers = alloc_INSN_LIST (insn, reg_last->clobbers);
reg_last->clobbers_length++;
- });
- EXECUTE_IF_SET_IN_REG_SET (reg_pending_sets, 0, i,
+ }
+ EXECUTE_IF_SET_IN_REG_SET (reg_pending_sets, 0, i, rsi)
{
struct deps_reg *reg_last = &deps->reg_last[i];
- add_dependence_list (insn, reg_last->sets, REG_DEP_OUTPUT);
- add_dependence_list (insn, reg_last->clobbers, REG_DEP_OUTPUT);
- add_dependence_list (insn, reg_last->uses, REG_DEP_ANTI);
+ add_dependence_list (insn, reg_last->sets, 0, REG_DEP_OUTPUT);
+ add_dependence_list (insn, reg_last->clobbers, 0, REG_DEP_OUTPUT);
+ add_dependence_list (insn, reg_last->uses, 0, REG_DEP_ANTI);
reg_last->sets = alloc_INSN_LIST (insn, reg_last->sets);
- });
+ SET_REGNO_REG_SET (&deps->reg_conditional_sets, i);
+ }
}
else
{
- EXECUTE_IF_SET_IN_REG_SET (reg_pending_uses, 0, i,
+ EXECUTE_IF_SET_IN_REG_SET (reg_pending_uses, 0, i, rsi)
{
struct deps_reg *reg_last = &deps->reg_last[i];
- add_dependence_list (insn, reg_last->sets, 0);
- add_dependence_list (insn, reg_last->clobbers, 0);
+ add_dependence_list (insn, reg_last->sets, 0, REG_DEP_TRUE);
+ add_dependence_list (insn, reg_last->clobbers, 0, REG_DEP_TRUE);
reg_last->uses_length++;
reg_last->uses = alloc_INSN_LIST (insn, reg_last->uses);
- });
- EXECUTE_IF_SET_IN_REG_SET (reg_pending_clobbers, 0, i,
+ }
+ EXECUTE_IF_SET_IN_REG_SET (reg_pending_clobbers, 0, i, rsi)
{
struct deps_reg *reg_last = &deps->reg_last[i];
- add_dependence_list (insn, reg_last->sets, REG_DEP_OUTPUT);
- add_dependence_list (insn, reg_last->uses, REG_DEP_ANTI);
if (reg_last->uses_length > MAX_PENDING_LIST_LENGTH
|| reg_last->clobbers_length > MAX_PENDING_LIST_LENGTH)
{
- add_dependence_list_and_free (insn, ®_last->sets,
+ add_dependence_list_and_free (insn, ®_last->sets, 0,
REG_DEP_OUTPUT);
- add_dependence_list_and_free (insn, ®_last->uses,
+ add_dependence_list_and_free (insn, ®_last->uses, 0,
REG_DEP_ANTI);
- add_dependence_list_and_free (insn, ®_last->clobbers,
+ add_dependence_list_and_free (insn, ®_last->clobbers, 0,
REG_DEP_OUTPUT);
+ reg_last->sets = alloc_INSN_LIST (insn, reg_last->sets);
reg_last->clobbers_length = 0;
reg_last->uses_length = 0;
}
else
{
- add_dependence_list (insn, reg_last->sets, REG_DEP_OUTPUT);
- add_dependence_list (insn, reg_last->uses, REG_DEP_ANTI);
+ add_dependence_list (insn, reg_last->sets, 0, REG_DEP_OUTPUT);
+ add_dependence_list (insn, reg_last->uses, 0, REG_DEP_ANTI);
}
reg_last->clobbers_length++;
reg_last->clobbers = alloc_INSN_LIST (insn, reg_last->clobbers);
- });
- EXECUTE_IF_SET_IN_REG_SET (reg_pending_sets, 0, i,
+ }
+ EXECUTE_IF_SET_IN_REG_SET (reg_pending_sets, 0, i, rsi)
{
struct deps_reg *reg_last = &deps->reg_last[i];
- add_dependence_list_and_free (insn, ®_last->sets,
+ add_dependence_list_and_free (insn, ®_last->sets, 0,
REG_DEP_OUTPUT);
- add_dependence_list_and_free (insn, ®_last->clobbers,
+ add_dependence_list_and_free (insn, ®_last->clobbers, 0,
REG_DEP_OUTPUT);
- add_dependence_list_and_free (insn, ®_last->uses,
+ add_dependence_list_and_free (insn, ®_last->uses, 0,
REG_DEP_ANTI);
reg_last->sets = alloc_INSN_LIST (insn, reg_last->sets);
reg_last->uses_length = 0;
reg_last->clobbers_length = 0;
- });
+ CLEAR_REGNO_REG_SET (&deps->reg_conditional_sets, i);
+ }
}
IOR_REG_SET (&deps->reg_last_in_use, reg_pending_uses);
CLEAR_REG_SET (reg_pending_clobbers);
CLEAR_REG_SET (reg_pending_sets);
+ /* If we are currently in a libcall scheduling group, then mark the
+ current insn as being in a scheduling group and that it can not
+ be moved into a different basic block. */
+
+ if (deps->libcall_block_tail_insn)
+ {
+ SCHED_GROUP_P (insn) = 1;
+ CANT_MOVE (insn) = 1;
+ }
+
/* If a post-call group is still open, see if it should remain so.
This insn must be a simple move of a hard reg to a pseudo or
vice-versa.
tmp = SET_DEST (set);
if (GET_CODE (tmp) == SUBREG)
tmp = SUBREG_REG (tmp);
- if (GET_CODE (tmp) == REG)
+ if (REG_P (tmp))
dest_regno = REGNO (tmp);
else
goto end_call_group;
tmp = SET_SRC (set);
if (GET_CODE (tmp) == SUBREG)
tmp = SUBREG_REG (tmp);
- if (GET_CODE (tmp) == REG)
+ if ((GET_CODE (tmp) == PLUS
+ || GET_CODE (tmp) == MINUS)
+ && REG_P (XEXP (tmp, 0))
+ && REGNO (XEXP (tmp, 0)) == STACK_POINTER_REGNUM
+ && dest_regno == STACK_POINTER_REGNUM)
+ src_regno = STACK_POINTER_REGNUM;
+ else if (REG_P (tmp))
src_regno = REGNO (tmp);
else
goto end_call_group;
if (src_regno < FIRST_PSEUDO_REGISTER
|| dest_regno < FIRST_PSEUDO_REGISTER)
{
- set_sched_group_p (insn);
+ if (deps->in_post_call_group_p == post_call_initial)
+ deps->in_post_call_group_p = post_call;
+
+ SCHED_GROUP_P (insn) = 1;
CANT_MOVE (insn) = 1;
}
else
{
end_call_group:
- deps->in_post_call_group_p = false;
+ deps->in_post_call_group_p = not_post_call;
}
}
+
+ /* Fixup the dependencies in the sched group. */
+ if (SCHED_GROUP_P (insn))
+ fixup_sched_groups (insn);
}
-/* Analyze every insn between HEAD and TAIL inclusive, creating LOG_LINKS
- for every dependency. */
+/* Analyze every insn between HEAD and TAIL inclusive, creating backward
+ dependencies for each insn. */
void
-sched_analyze (deps, head, tail)
- struct deps *deps;
- rtx head, tail;
+sched_analyze (struct deps *deps, rtx head, rtx tail)
{
rtx insn;
- rtx loop_notes = 0;
if (current_sched_info->use_cselib)
- cselib_init ();
+ cselib_init (true);
+ /* Before reload, if the previous block ended in a call, show that
+ we are inside a post-call group, so as to keep the lifetimes of
+ hard registers correct. */
+ if (! reload_completed && !LABEL_P (head))
+ {
+ insn = prev_nonnote_insn (head);
+ if (insn && CALL_P (insn))
+ deps->in_post_call_group_p = post_call_initial;
+ }
for (insn = head;; insn = NEXT_INSN (insn))
{
- if (GET_CODE (insn) == INSN || GET_CODE (insn) == JUMP_INSN)
+ rtx link, end_seq, r0, set;
+
+ if (INSN_P (insn))
{
/* Clear out the stale LOG_LINKS from flow. */
free_INSN_LIST_list (&LOG_LINKS (insn));
+ /* These two lists will be freed in schedule_insn (). */
+ INSN_BACK_DEPS (insn) = create_deps_list (false);
+ INSN_RESOLVED_BACK_DEPS (insn) = create_deps_list (false);
+
+ /* This one should be allocated on the obstack because it should live
+ till the scheduling ends. */
+ INSN_FORW_DEPS (insn) = create_deps_list (true);
+ }
+
+ if (NONJUMP_INSN_P (insn) || JUMP_P (insn))
+ {
/* Make each JUMP_INSN a scheduling barrier for memory
references. */
- if (GET_CODE (insn) == JUMP_INSN)
+ if (JUMP_P (insn))
{
/* Keep the list a reasonable size. */
if (deps->pending_flush_length++ > MAX_PENDING_LIST_LENGTH)
deps->last_pending_memory_flush
= alloc_INSN_LIST (insn, deps->last_pending_memory_flush);
}
- sched_analyze_insn (deps, PATTERN (insn), insn, loop_notes);
- loop_notes = 0;
+ sched_analyze_insn (deps, PATTERN (insn), insn);
}
- else if (GET_CODE (insn) == CALL_INSN)
+ else if (CALL_P (insn))
{
int i;
CANT_MOVE (insn) = 1;
- /* Clear out the stale LOG_LINKS from flow. */
- free_INSN_LIST_list (&LOG_LINKS (insn));
-
if (find_reg_note (insn, REG_SETJMP, NULL))
{
/* This is setjmp. Assume that all registers, not just
hard registers, may be clobbered by this call. */
- reg_pending_barrier = true;
+ reg_pending_barrier = MOVE_BARRIER;
}
else
{
SET_REGNO_REG_SET (reg_pending_sets, i);
SET_REGNO_REG_SET (reg_pending_uses, i);
}
- /* Other call-clobbered hard regs may be clobbered. */
- else if (TEST_HARD_REG_BIT (regs_invalidated_by_call, i))
+ /* Other call-clobbered hard regs may be clobbered.
+ Since we only have a choice between 'might be clobbered'
+ and 'definitely not clobbered', we must include all
+ partly call-clobbered registers here. */
+ else if (HARD_REGNO_CALL_PART_CLOBBERED (i, reg_raw_mode[i])
+ || TEST_HARD_REG_BIT (regs_invalidated_by_call, i))
SET_REGNO_REG_SET (reg_pending_clobbers, i);
/* We don't know what set of fixed registers might be used
by the function, but it is certain that the stack pointer
/* For each insn which shouldn't cross a call, add a dependence
between that insn and this call insn. */
- add_dependence_list_and_free (insn, &deps->sched_before_next_call,
+ add_dependence_list_and_free (insn, &deps->sched_before_next_call, 1,
REG_DEP_ANTI);
- sched_analyze_insn (deps, PATTERN (insn), insn, loop_notes);
- loop_notes = 0;
+ sched_analyze_insn (deps, PATTERN (insn), insn);
/* In the absence of interprocedural alias analysis, we must flush
all pending reads and writes, and start new dependencies starting
/* Before reload, begin a post-call group, so as to keep the
lifetimes of hard registers correct. */
if (! reload_completed)
- deps->in_post_call_group_p = true;
+ deps->in_post_call_group_p = post_call;
}
- /* See comments on reemit_notes as to why we do this.
- ??? Actually, the reemit_notes just say what is done, not why. */
-
- if (GET_CODE (insn) == NOTE
- && (NOTE_LINE_NUMBER (insn) == NOTE_INSN_LOOP_BEG
- || NOTE_LINE_NUMBER (insn) == NOTE_INSN_LOOP_END
- || NOTE_LINE_NUMBER (insn) == NOTE_INSN_EH_REGION_BEG
- || NOTE_LINE_NUMBER (insn) == NOTE_INSN_EH_REGION_END))
- {
- rtx rtx_region;
-
- if (NOTE_LINE_NUMBER (insn) == NOTE_INSN_EH_REGION_BEG
- || NOTE_LINE_NUMBER (insn) == NOTE_INSN_EH_REGION_END)
- rtx_region = GEN_INT (NOTE_EH_HANDLER (insn));
- else
- rtx_region = GEN_INT (0);
-
- loop_notes = alloc_EXPR_LIST (REG_SAVE_NOTE,
- rtx_region,
- loop_notes);
- loop_notes = alloc_EXPR_LIST (REG_SAVE_NOTE,
- GEN_INT (NOTE_LINE_NUMBER (insn)),
- loop_notes);
- CONST_OR_PURE_CALL_P (loop_notes) = CONST_OR_PURE_CALL_P (insn);
- }
+ /* EH_REGION insn notes can not appear until well after we complete
+ scheduling. */
+ if (NOTE_P (insn))
+ gcc_assert (NOTE_LINE_NUMBER (insn) != NOTE_INSN_EH_REGION_BEG
+ && NOTE_LINE_NUMBER (insn) != NOTE_INSN_EH_REGION_END);
if (current_sched_info->use_cselib)
cselib_process_insn (insn);
+
+ /* Now that we have completed handling INSN, check and see if it is
+ a CLOBBER beginning a libcall block. If it is, record the
+ end of the libcall sequence.
+
+ We want to schedule libcall blocks as a unit before reload. While
+ this restricts scheduling, it preserves the meaning of a libcall
+ block.
+
+ As a side effect, we may get better code due to decreased register
+ pressure as well as less chance of a foreign insn appearing in
+ a libcall block. */
+ if (!reload_completed
+ /* Note we may have nested libcall sequences. We only care about
+ the outermost libcall sequence. */
+ && deps->libcall_block_tail_insn == 0
+ /* The sequence must start with a clobber of a register. */
+ && NONJUMP_INSN_P (insn)
+ && GET_CODE (PATTERN (insn)) == CLOBBER
+ && (r0 = XEXP (PATTERN (insn), 0), REG_P (r0))
+ && REG_P (XEXP (PATTERN (insn), 0))
+ /* The CLOBBER must also have a REG_LIBCALL note attached. */
+ && (link = find_reg_note (insn, REG_LIBCALL, NULL_RTX)) != 0
+ && (end_seq = XEXP (link, 0)) != 0
+ /* The insn referenced by the REG_LIBCALL note must be a
+ simple nop copy with the same destination as the register
+ mentioned in the clobber. */
+ && (set = single_set (end_seq)) != 0
+ && SET_DEST (set) == r0 && SET_SRC (set) == r0
+ /* And finally the insn referenced by the REG_LIBCALL must
+ also contain a REG_EQUAL note and a REG_RETVAL note. */
+ && find_reg_note (end_seq, REG_EQUAL, NULL_RTX) != 0
+ && find_reg_note (end_seq, REG_RETVAL, NULL_RTX) != 0)
+ deps->libcall_block_tail_insn = XEXP (link, 0);
+
+ /* If we have reached the end of a libcall block, then close the
+ block. */
+ if (deps->libcall_block_tail_insn == insn)
+ deps->libcall_block_tail_insn = 0;
+
if (insn == tail)
{
if (current_sched_info->use_cselib)
return;
}
}
- abort ();
+ gcc_unreachable ();
}
\f
+
+/* The following function adds forward dependence (FROM, TO) with
+ given DEP_TYPE. The forward dependence should be not exist before. */
+
+void
+add_forw_dep (dep_link_t link)
+{
+ dep_t dep = DEP_LINK_DEP (link);
+ rtx to = DEP_CON (dep);
+ rtx from = DEP_PRO (dep);
+
+#ifdef ENABLE_CHECKING
+ /* If add_dependence is working properly there should never
+ be notes, deleted insns or duplicates in the backward
+ links. Thus we need not check for them here.
+
+ However, if we have enabled checking we might as well go
+ ahead and verify that add_dependence worked properly. */
+ gcc_assert (INSN_P (from));
+ gcc_assert (!INSN_DELETED_P (from));
+ if (true_dependency_cache)
+ {
+ gcc_assert (!bitmap_bit_p (&forward_dependency_cache[INSN_LUID (from)],
+ INSN_LUID (to)));
+ bitmap_set_bit (&forward_dependency_cache[INSN_LUID (from)],
+ INSN_LUID (to));
+ }
+
+ gcc_assert (find_link_by_con_in_deps_list (INSN_FORW_DEPS (from), to)
+ == NULL);
+#endif
+
+ add_to_deps_list (DEP_NODE_FORW (DEP_LINK_NODE (link)),
+ INSN_FORW_DEPS (from));
+
+ INSN_DEP_COUNT (to) += 1;
+}
+
/* Examine insns in the range [ HEAD, TAIL ] and Use the backward
- dependences from LOG_LINKS to build forward dependences in
- INSN_DEPEND. */
+ dependences from INSN_BACK_DEPS list to build forward dependences in
+ INSN_FORW_DEPS. */
void
-compute_forward_dependences (head, tail)
- rtx head, tail;
+compute_forward_dependences (rtx head, rtx tail)
{
- rtx insn, link;
+ rtx insn;
rtx next_tail;
- enum reg_note dep_type;
next_tail = NEXT_INSN (tail);
for (insn = head; insn != next_tail; insn = NEXT_INSN (insn))
{
+ dep_link_t link;
+
if (! INSN_P (insn))
continue;
+
+ if (current_sched_info->flags & DO_SPECULATION)
+ {
+ /* We will add links, preserving order, from INSN_BACK_DEPS to
+ NEW. */
+ dep_link_t new = NULL;
- insn = group_leader (insn);
+ link = DEPS_LIST_FIRST (INSN_BACK_DEPS (insn));
- for (link = LOG_LINKS (insn); link; link = XEXP (link, 1))
- {
- rtx x = group_leader (XEXP (link, 0));
- rtx new_link;
+ while (link != NULL)
+ {
+ dep_link_t next = DEP_LINK_NEXT (link);
- if (x != XEXP (link, 0))
- continue;
+ detach_dep_link (link);
+ adjust_add_sorted_back_dep (insn, link, &new);
-#ifdef ENABLE_CHECKING
- /* If add_dependence is working properly there should never
- be notes, deleted insns or duplicates in the backward
- links. Thus we need not check for them here.
-
- However, if we have enabled checking we might as well go
- ahead and verify that add_dependence worked properly. */
- if (GET_CODE (x) == NOTE
- || INSN_DELETED_P (x)
- || (forward_dependency_cache != NULL
- && TEST_BIT (forward_dependency_cache[INSN_LUID (x)],
- INSN_LUID (insn)))
- || (forward_dependency_cache == NULL
- && find_insn_list (insn, INSN_DEPEND (x))))
- abort ();
- if (forward_dependency_cache != NULL)
- SET_BIT (forward_dependency_cache[INSN_LUID (x)],
- INSN_LUID (insn));
-#endif
+ link = next;
+ }
- new_link = alloc_INSN_LIST (insn, INSN_DEPEND (x));
+ /* Attach NEW to be the list of backward dependencies. */
+ if (new != NULL)
+ {
+ DEP_LINK_PREV_NEXTP (new)
+ = &DEPS_LIST_FIRST (INSN_BACK_DEPS (insn));
- dep_type = REG_NOTE_KIND (link);
- PUT_REG_NOTE_KIND (new_link, dep_type);
+ DEPS_LIST_FIRST (INSN_BACK_DEPS (insn)) = new;
+ }
+ }
- INSN_DEPEND (x) = new_link;
- INSN_DEP_COUNT (insn) += 1;
- }
+ FOR_EACH_DEP_LINK (link, INSN_BACK_DEPS (insn))
+ add_forw_dep (link);
}
}
\f
n_bbs is the number of region blocks. */
void
-init_deps (deps)
- struct deps *deps;
+init_deps (struct deps *deps)
{
int max_reg = (reload_completed ? FIRST_PSEUDO_REGISTER : max_reg_num ());
deps->max_reg = max_reg;
- deps->reg_last = (struct deps_reg *)
- xcalloc (max_reg, sizeof (struct deps_reg));
+ deps->reg_last = XCNEWVEC (struct deps_reg, max_reg);
INIT_REG_SET (&deps->reg_last_in_use);
+ INIT_REG_SET (&deps->reg_conditional_sets);
deps->pending_read_insns = 0;
deps->pending_read_mems = 0;
deps->pending_write_insns = 0;
deps->pending_write_mems = 0;
- deps->pending_lists_length = 0;
+ deps->pending_read_list_length = 0;
+ deps->pending_write_list_length = 0;
deps->pending_flush_length = 0;
deps->last_pending_memory_flush = 0;
deps->last_function_call = 0;
deps->sched_before_next_call = 0;
- deps->in_post_call_group_p = false;
+ deps->in_post_call_group_p = not_post_call;
+ deps->libcall_block_tail_insn = 0;
}
/* Free insn lists found in DEPS. */
void
-free_deps (deps)
- struct deps *deps;
+free_deps (struct deps *deps)
{
- int i;
+ unsigned i;
+ reg_set_iterator rsi;
free_INSN_LIST_list (&deps->pending_read_insns);
free_EXPR_LIST_list (&deps->pending_read_mems);
free_INSN_LIST_list (&deps->last_pending_memory_flush);
/* Without the EXECUTE_IF_SET, this loop is executed max_reg * nr_regions
- times. For a test case with 42000 regs and 8000 small basic blocks,
+ times. For a testcase with 42000 regs and 8000 small basic blocks,
this loop accounted for nearly 60% (84 sec) of the total -O2 runtime. */
- EXECUTE_IF_SET_IN_REG_SET (&deps->reg_last_in_use, 0, i,
+ EXECUTE_IF_SET_IN_REG_SET (&deps->reg_last_in_use, 0, i, rsi)
{
struct deps_reg *reg_last = &deps->reg_last[i];
- free_INSN_LIST_list (®_last->uses);
- free_INSN_LIST_list (®_last->sets);
- free_INSN_LIST_list (®_last->clobbers);
- });
+ if (reg_last->uses)
+ free_INSN_LIST_list (®_last->uses);
+ if (reg_last->sets)
+ free_INSN_LIST_list (®_last->sets);
+ if (reg_last->clobbers)
+ free_INSN_LIST_list (®_last->clobbers);
+ }
CLEAR_REG_SET (&deps->reg_last_in_use);
+ CLEAR_REG_SET (&deps->reg_conditional_sets);
free (deps->reg_last);
}
/* If it is profitable to use them, initialize caches for tracking
- dependency informatino. LUID is the number of insns to be scheduled,
+ dependency information. LUID is the number of insns to be scheduled,
it is used in the estimate of profitability. */
void
-init_dependency_caches (luid)
- int luid;
+init_dependency_caches (int luid)
{
/* ?!? We could save some memory by computing a per-region luid mapping
which could reduce both the number of vectors in the cache and the size
what we consider "very high". */
if (luid / n_basic_blocks > 100 * 5)
{
- true_dependency_cache = sbitmap_vector_alloc (luid, luid);
- sbitmap_vector_zero (true_dependency_cache, luid);
- anti_dependency_cache = sbitmap_vector_alloc (luid, luid);
- sbitmap_vector_zero (anti_dependency_cache, luid);
- output_dependency_cache = sbitmap_vector_alloc (luid, luid);
- sbitmap_vector_zero (output_dependency_cache, luid);
+ cache_size = 0;
+ extend_dependency_caches (luid, true);
+ }
+
+ /* Lifetime of this obstack is whole function scheduling (not single region
+ scheduling) because some dependencies can be manually generated for
+ outside regions. See dont_calc_deps in sched-{rgn, ebb}.c .
+
+ Possible solution would be to have two obstacks:
+ * the big one for regular dependencies with region scheduling lifetime,
+ * and the small one for manually generated dependencies with function
+ scheduling lifetime. */
+ gcc_obstack_init (&deps_obstack);
+}
+
+/* Create or extend (depending on CREATE_P) dependency caches to
+ size N. */
+void
+extend_dependency_caches (int n, bool create_p)
+{
+ if (create_p || true_dependency_cache)
+ {
+ int i, luid = cache_size + n;
+
+ true_dependency_cache = XRESIZEVEC (bitmap_head, true_dependency_cache,
+ luid);
+ output_dependency_cache = XRESIZEVEC (bitmap_head,
+ output_dependency_cache, luid);
+ anti_dependency_cache = XRESIZEVEC (bitmap_head, anti_dependency_cache,
+ luid);
+#ifdef ENABLE_CHECKING
+ forward_dependency_cache = XRESIZEVEC (bitmap_head,
+ forward_dependency_cache, luid);
+#endif
+ if (current_sched_info->flags & DO_SPECULATION)
+ spec_dependency_cache = XRESIZEVEC (bitmap_head, spec_dependency_cache,
+ luid);
+
+ for (i = cache_size; i < luid; i++)
+ {
+ bitmap_initialize (&true_dependency_cache[i], 0);
+ bitmap_initialize (&output_dependency_cache[i], 0);
+ bitmap_initialize (&anti_dependency_cache[i], 0);
#ifdef ENABLE_CHECKING
- forward_dependency_cache = sbitmap_vector_alloc (luid, luid);
- sbitmap_vector_zero (forward_dependency_cache, luid);
+ bitmap_initialize (&forward_dependency_cache[i], 0);
#endif
+ if (current_sched_info->flags & DO_SPECULATION)
+ bitmap_initialize (&spec_dependency_cache[i], 0);
+ }
+ cache_size = luid;
}
}
/* Free the caches allocated in init_dependency_caches. */
void
-free_dependency_caches ()
+free_dependency_caches (void)
{
+ obstack_free (&deps_obstack, NULL);
+
if (true_dependency_cache)
{
- sbitmap_vector_free (true_dependency_cache);
+ int i;
+
+ for (i = 0; i < cache_size; i++)
+ {
+ bitmap_clear (&true_dependency_cache[i]);
+ bitmap_clear (&output_dependency_cache[i]);
+ bitmap_clear (&anti_dependency_cache[i]);
+#ifdef ENABLE_CHECKING
+ bitmap_clear (&forward_dependency_cache[i]);
+#endif
+ if (current_sched_info->flags & DO_SPECULATION)
+ bitmap_clear (&spec_dependency_cache[i]);
+ }
+ free (true_dependency_cache);
true_dependency_cache = NULL;
- sbitmap_vector_free (anti_dependency_cache);
- anti_dependency_cache = NULL;
- sbitmap_vector_free (output_dependency_cache);
+ free (output_dependency_cache);
output_dependency_cache = NULL;
+ free (anti_dependency_cache);
+ anti_dependency_cache = NULL;
#ifdef ENABLE_CHECKING
- sbitmap_vector_free (forward_dependency_cache);
+ free (forward_dependency_cache);
forward_dependency_cache = NULL;
#endif
+ if (current_sched_info->flags & DO_SPECULATION)
+ {
+ free (spec_dependency_cache);
+ spec_dependency_cache = NULL;
+ }
}
}
code. */
void
-init_deps_global ()
+init_deps_global (void)
{
- reg_pending_sets = INITIALIZE_REG_SET (reg_pending_sets_head);
- reg_pending_clobbers = INITIALIZE_REG_SET (reg_pending_clobbers_head);
- reg_pending_uses = INITIALIZE_REG_SET (reg_pending_uses_head);
- reg_pending_barrier = false;
+ reg_pending_sets = ALLOC_REG_SET (®_obstack);
+ reg_pending_clobbers = ALLOC_REG_SET (®_obstack);
+ reg_pending_uses = ALLOC_REG_SET (®_obstack);
+ reg_pending_barrier = NOT_A_BARRIER;
}
/* Free everything used by the dependency analysis code. */
void
-finish_deps_global ()
+finish_deps_global (void)
{
FREE_REG_SET (reg_pending_sets);
FREE_REG_SET (reg_pending_clobbers);
FREE_REG_SET (reg_pending_uses);
}
+
+/* Insert LINK into the dependence chain pointed to by LINKP and
+ maintain the sort order. */
+static void
+adjust_add_sorted_back_dep (rtx insn, dep_link_t link, dep_link_t *linkp)
+{
+ gcc_assert (current_sched_info->flags & DO_SPECULATION);
+
+ /* If the insn cannot move speculatively, but the link is speculative,
+ make it hard dependence. */
+ if (HAS_INTERNAL_DEP (insn)
+ && (DEP_LINK_STATUS (link) & SPECULATIVE))
+ {
+ DEP_LINK_STATUS (link) &= ~SPECULATIVE;
+
+ if (true_dependency_cache)
+ bitmap_clear_bit (&spec_dependency_cache[INSN_LUID (insn)],
+ INSN_LUID (DEP_LINK_PRO (link)));
+ }
+
+ /* Non-speculative links go at the head of deps_list, followed by
+ speculative links. */
+ if (DEP_LINK_STATUS (link) & SPECULATIVE)
+ while (*linkp && !(DEP_LINK_STATUS (*linkp) & SPECULATIVE))
+ linkp = &DEP_LINK_NEXT (*linkp);
+
+ attach_dep_link (link, linkp);
+
+ if (CHECK)
+ gcc_assert (deps_list_consistent_p (INSN_BACK_DEPS (insn)));
+}
+
+/* Move the dependence pointed to by LINKP to the back dependencies
+ of INSN, and also add this dependence to the forward ones. All dep_links,
+ except one pointed to by LINKP, must be sorted. */
+static void
+adjust_back_add_forw_dep (rtx insn, dep_link_t *linkp)
+{
+ dep_link_t link;
+
+ gcc_assert (current_sched_info->flags & DO_SPECULATION);
+
+ link = *linkp;
+ detach_dep_link (link);
+
+ adjust_add_sorted_back_dep (insn, link,
+ &DEPS_LIST_FIRST (INSN_BACK_DEPS (insn)));
+ add_forw_dep (link);
+}
+
+/* Remove forward dependence described by L. */
+static void
+delete_forw_dep (dep_link_t l)
+{
+ gcc_assert (current_sched_info->flags & DO_SPECULATION);
+
+#ifdef ENABLE_CHECKING
+ if (true_dependency_cache)
+ bitmap_clear_bit (&forward_dependency_cache[INSN_LUID (DEP_LINK_PRO (l))],
+ INSN_LUID (DEP_LINK_CON (l)));
+#endif
+
+ detach_dep_link (l);
+
+ INSN_DEP_COUNT (DEP_LINK_CON (l))--;
+}
+
+/* Estimate the weakness of dependence between MEM1 and MEM2. */
+static dw_t
+estimate_dep_weak (rtx mem1, rtx mem2)
+{
+ rtx r1, r2;
+
+ if (mem1 == mem2)
+ /* MEMs are the same - don't speculate. */
+ return MIN_DEP_WEAK;
+
+ r1 = XEXP (mem1, 0);
+ r2 = XEXP (mem2, 0);
+
+ if (r1 == r2
+ || (REG_P (r1) && REG_P (r2)
+ && REGNO (r1) == REGNO (r2)))
+ /* Again, MEMs are the same. */
+ return MIN_DEP_WEAK;
+ else if ((REG_P (r1) && !REG_P (r2))
+ || (!REG_P (r1) && REG_P (r2)))
+ /* Different addressing modes - reason to be more speculative,
+ than usual. */
+ return NO_DEP_WEAK - (NO_DEP_WEAK - UNCERTAIN_DEP_WEAK) / 2;
+ else
+ /* We can't say anything about the dependence. */
+ return UNCERTAIN_DEP_WEAK;
+}
+
+/* Add or update backward dependence between INSN and ELEM with type DEP_TYPE.
+ This function can handle same INSN and ELEM (INSN == ELEM).
+ It is a convenience wrapper. */
+void
+add_dependence (rtx insn, rtx elem, enum reg_note dep_type)
+{
+ ds_t ds;
+
+ if (dep_type == REG_DEP_TRUE)
+ ds = DEP_TRUE;
+ else if (dep_type == REG_DEP_OUTPUT)
+ ds = DEP_OUTPUT;
+ else if (dep_type == REG_DEP_ANTI)
+ ds = DEP_ANTI;
+ else
+ gcc_unreachable ();
+
+ maybe_add_or_update_back_dep_1 (insn, elem, dep_type, ds, 0, 0, 0);
+}
+
+/* Add or update backward dependence between INSN and ELEM
+ with given type DEP_TYPE and dep_status DS.
+ This function is a convenience wrapper. */
+enum DEPS_ADJUST_RESULT
+add_or_update_back_dep (rtx insn, rtx elem, enum reg_note dep_type, ds_t ds)
+{
+ return add_or_update_back_dep_1 (insn, elem, dep_type, ds, 0, 0, 0);
+}
+
+/* Add or update both backward and forward dependencies between INSN and ELEM
+ with given type DEP_TYPE and dep_status DS. */
+void
+add_or_update_back_forw_dep (rtx insn, rtx elem, enum reg_note dep_type,
+ ds_t ds)
+{
+ enum DEPS_ADJUST_RESULT res;
+ dep_link_t *linkp;
+
+ res = add_or_update_back_dep_1 (insn, elem, dep_type, ds, 0, 0, &linkp);
+
+ if (res == DEP_CHANGED || res == DEP_CREATED)
+ {
+ if (res == DEP_CHANGED)
+ delete_forw_dep (DEP_NODE_FORW (DEP_LINK_NODE (*linkp)));
+ else if (res == DEP_CREATED)
+ linkp = &DEPS_LIST_FIRST (INSN_BACK_DEPS (insn));
+
+ adjust_back_add_forw_dep (insn, linkp);
+ }
+}
+
+/* Add both backward and forward dependencies between INSN and ELEM
+ with given type DEP_TYPE and dep_status DS. */
+void
+add_back_forw_dep (rtx insn, rtx elem, enum reg_note dep_type, ds_t ds)
+{
+ add_back_dep (insn, elem, dep_type, ds);
+ adjust_back_add_forw_dep (insn, &DEPS_LIST_FIRST (INSN_BACK_DEPS (insn)));
+
+ if (CHECK)
+ gcc_assert (deps_list_consistent_p (INSN_BACK_DEPS (insn)));
+}
+
+/* Remove a dependency referred to by L. */
+void
+delete_back_forw_dep (dep_link_t l)
+{
+ dep_node_t n = DEP_LINK_NODE (l);
+
+ gcc_assert (current_sched_info->flags & DO_SPECULATION);
+
+ if (true_dependency_cache != NULL)
+ {
+ dep_t dep = DEP_NODE_DEP (n);
+ int elem_luid = INSN_LUID (DEP_PRO (dep));
+ int insn_luid = INSN_LUID (DEP_CON (dep));
+
+ bitmap_clear_bit (&true_dependency_cache[insn_luid], elem_luid);
+ bitmap_clear_bit (&anti_dependency_cache[insn_luid], elem_luid);
+ bitmap_clear_bit (&output_dependency_cache[insn_luid], elem_luid);
+ bitmap_clear_bit (&spec_dependency_cache[insn_luid], elem_luid);
+ }
+
+ delete_forw_dep (DEP_NODE_FORW (n));
+ detach_dep_link (DEP_NODE_BACK (n));
+}
+
+/* Return weakness of speculative type TYPE in the dep_status DS. */
+dw_t
+get_dep_weak (ds_t ds, ds_t type)
+{
+ ds = ds & type;
+ switch (type)
+ {
+ case BEGIN_DATA: ds >>= BEGIN_DATA_BITS_OFFSET; break;
+ case BE_IN_DATA: ds >>= BE_IN_DATA_BITS_OFFSET; break;
+ case BEGIN_CONTROL: ds >>= BEGIN_CONTROL_BITS_OFFSET; break;
+ case BE_IN_CONTROL: ds >>= BE_IN_CONTROL_BITS_OFFSET; break;
+ default: gcc_unreachable ();
+ }
+
+ gcc_assert (MIN_DEP_WEAK <= ds && ds <= MAX_DEP_WEAK);
+ return (dw_t) ds;
+}
+
+/* Return the dep_status, which has the same parameters as DS, except for
+ speculative type TYPE, that will have weakness DW. */
+ds_t
+set_dep_weak (ds_t ds, ds_t type, dw_t dw)
+{
+ gcc_assert (MIN_DEP_WEAK <= dw && dw <= MAX_DEP_WEAK);
+
+ ds &= ~type;
+ switch (type)
+ {
+ case BEGIN_DATA: ds |= ((ds_t) dw) << BEGIN_DATA_BITS_OFFSET; break;
+ case BE_IN_DATA: ds |= ((ds_t) dw) << BE_IN_DATA_BITS_OFFSET; break;
+ case BEGIN_CONTROL: ds |= ((ds_t) dw) << BEGIN_CONTROL_BITS_OFFSET; break;
+ case BE_IN_CONTROL: ds |= ((ds_t) dw) << BE_IN_CONTROL_BITS_OFFSET; break;
+ default: gcc_unreachable ();
+ }
+ return ds;
+}
+
+/* Return the join of two dep_statuses DS1 and DS2. */
+ds_t
+ds_merge (ds_t ds1, ds_t ds2)
+{
+ ds_t ds, t;
+
+ gcc_assert ((ds1 & SPECULATIVE) && (ds2 & SPECULATIVE));
+
+ ds = (ds1 & DEP_TYPES) | (ds2 & DEP_TYPES);
+
+ t = FIRST_SPEC_TYPE;
+ do
+ {
+ if ((ds1 & t) && !(ds2 & t))
+ ds |= ds1 & t;
+ else if (!(ds1 & t) && (ds2 & t))
+ ds |= ds2 & t;
+ else if ((ds1 & t) && (ds2 & t))
+ {
+ ds_t dw;
+
+ dw = ((ds_t) get_dep_weak (ds1, t)) * ((ds_t) get_dep_weak (ds2, t));
+ dw /= MAX_DEP_WEAK;
+ if (dw < MIN_DEP_WEAK)
+ dw = MIN_DEP_WEAK;
+
+ ds = set_dep_weak (ds, t, (dw_t) dw);
+ }
+
+ if (t == LAST_SPEC_TYPE)
+ break;
+ t <<= SPEC_TYPE_SHIFT;
+ }
+ while (1);
+
+ return ds;
+}
+
+#ifdef INSN_SCHEDULING
+#ifdef ENABLE_CHECKING
+/* Verify that dependence type and status are consistent.
+ If RELAXED_P is true, then skip dep_weakness checks. */
+static void
+check_dep_status (enum reg_note dt, ds_t ds, bool relaxed_p)
+{
+ /* Check that dependence type contains the same bits as the status. */
+ if (dt == REG_DEP_TRUE)
+ gcc_assert (ds & DEP_TRUE);
+ else if (dt == REG_DEP_OUTPUT)
+ gcc_assert ((ds & DEP_OUTPUT)
+ && !(ds & DEP_TRUE));
+ else
+ gcc_assert ((dt == REG_DEP_ANTI)
+ && (ds & DEP_ANTI)
+ && !(ds & (DEP_OUTPUT | DEP_TRUE)));
+
+ /* HARD_DEP can not appear in dep_status of a link. */
+ gcc_assert (!(ds & HARD_DEP));
+
+ /* Check that dependence status is set correctly when speculation is not
+ supported. */
+ if (!(current_sched_info->flags & DO_SPECULATION))
+ gcc_assert (!(ds & SPECULATIVE));
+ else if (ds & SPECULATIVE)
+ {
+ if (!relaxed_p)
+ {
+ ds_t type = FIRST_SPEC_TYPE;
+
+ /* Check that dependence weakness is in proper range. */
+ do
+ {
+ if (ds & type)
+ get_dep_weak (ds, type);
+
+ if (type == LAST_SPEC_TYPE)
+ break;
+ type <<= SPEC_TYPE_SHIFT;
+ }
+ while (1);
+ }
+
+ if (ds & BEGIN_SPEC)
+ {
+ /* Only true dependence can be data speculative. */
+ if (ds & BEGIN_DATA)
+ gcc_assert (ds & DEP_TRUE);
+
+ /* Control dependencies in the insn scheduler are represented by
+ anti-dependencies, therefore only anti dependence can be
+ control speculative. */
+ if (ds & BEGIN_CONTROL)
+ gcc_assert (ds & DEP_ANTI);
+ }
+ else
+ {
+ /* Subsequent speculations should resolve true dependencies. */
+ gcc_assert ((ds & DEP_TYPES) == DEP_TRUE);
+ }
+
+ /* Check that true and anti dependencies can't have other speculative
+ statuses. */
+ if (ds & DEP_TRUE)
+ gcc_assert (ds & (BEGIN_DATA | BE_IN_SPEC));
+ /* An output dependence can't be speculative at all. */
+ gcc_assert (!(ds & DEP_OUTPUT));
+ if (ds & DEP_ANTI)
+ gcc_assert (ds & BEGIN_CONTROL);
+ }
+}
+#endif
+#endif