/* Instruction scheduling pass. This file computes dependencies between
instructions.
Copyright (C) 1992, 1993, 1994, 1995, 1996, 1997, 1998,
- 1999, 2000 Free Software Foundation, Inc.
+ 1999, 2000, 2001 Free Software Foundation, Inc.
Contributed by Michael Tiemann (tiemann@cygnus.com) Enhanced by,
and currently maintained by, Jim Wilson (wilson@cygnus.com)
-This file is part of GNU CC.
+This file is part of GCC.
-GNU CC is free software; you can redistribute it and/or modify it
-under the terms of the GNU General Public License as published by the
-Free Software Foundation; either version 2, or (at your option) any
-later version.
+GCC is free software; you can redistribute it and/or modify it under
+the terms of the GNU General Public License as published by the Free
+Software Foundation; either version 2, or (at your option) any later
+version.
-GNU CC is distributed in the hope that it will be useful, but WITHOUT
-ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+GCC is distributed in the hope that it will be useful, but WITHOUT ANY
+WARRANTY; without even the implied warranty of MERCHANTABILITY or
FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
for more details.
You should have received a copy of the GNU General Public License
-along with GNU CC; see the file COPYING. If not, write to the Free
-the Free Software Foundation, 59 Temple Place - Suite 330, Boston, MA
+along with GCC; see the file COPYING. If not, write to the Free the
+Free Software Foundation, 59 Temple Place - Suite 330, Boston, MA
02111-1307, USA. */
\f
#include "config.h"
#include "toplev.h"
#include "recog.h"
#include "sched-int.h"
+#include "params.h"
+#include "cselib.h"
extern char *reg_known_equiv_p;
extern rtx *reg_known_value;
Each insn has associated bitmaps for its dependencies. Each bitmap
has enough entries to represent a dependency on any other insn in
the insn chain. All bitmap for true dependencies cache is
- allocated then the rest two ones are also allocated. */
+ allocated then the rest two ones are also allocated. */
static sbitmap *true_dependency_cache;
static sbitmap *anti_dependency_cache;
static sbitmap *output_dependency_cache;
/* To speed up checking consistency of formed forward insn
dependencies we use the following cache. Another possible solution
could be switching off checking duplication of insns in forward
- dependencies. */
+ dependencies. */
#ifdef ENABLE_CHECKING
static sbitmap *forward_dependency_cache;
#endif
+static int deps_may_trap_p PARAMS ((rtx));
static void remove_dependence PARAMS ((rtx, rtx));
static void set_sched_group_p PARAMS ((rtx));
static void sched_analyze_2 PARAMS ((struct deps *, rtx, rtx));
static void sched_analyze_insn PARAMS ((struct deps *, rtx, rtx, rtx));
static rtx group_leader PARAMS ((rtx));
+
+static rtx get_condition PARAMS ((rtx));
+static int conditions_mutex_p PARAMS ((rtx, rtx));
+\f
+/* Return nonzero if a load of the memory reference MEM can cause a trap. */
+
+static int
+deps_may_trap_p (mem)
+ rtx mem;
+{
+ rtx addr = XEXP (mem, 0);
+
+ if (REG_P (addr)
+ && REGNO (addr) >= FIRST_PSEUDO_REGISTER
+ && reg_known_value[REGNO (addr)])
+ addr = reg_known_value[REGNO (addr)];
+ return rtx_addr_can_trap_p (addr);
+}
\f
/* Return the INSN_LIST containing INSN in LIST, or NULL
if LIST does not contain INSN. */
return 0;
}
\f
+/* Find the condition under which INSN is executed. */
+
+static rtx
+get_condition (insn)
+ rtx insn;
+{
+ rtx pat = PATTERN (insn);
+ rtx cond;
+
+ if (pat == 0)
+ return 0;
+ if (GET_CODE (pat) == COND_EXEC)
+ return COND_EXEC_TEST (pat);
+ if (GET_CODE (insn) != JUMP_INSN)
+ return 0;
+ if (GET_CODE (pat) != SET || SET_SRC (pat) != pc_rtx)
+ return 0;
+ if (GET_CODE (SET_DEST (pat)) != IF_THEN_ELSE)
+ return 0;
+ pat = SET_DEST (pat);
+ cond = XEXP (pat, 0);
+ if (GET_CODE (XEXP (cond, 1)) == LABEL_REF
+ && XEXP (cond, 2) == pc_rtx)
+ return cond;
+ else if (GET_CODE (XEXP (cond, 2)) == LABEL_REF
+ && XEXP (cond, 1) == pc_rtx)
+ return gen_rtx_fmt_ee (reverse_condition (GET_CODE (cond)), GET_MODE (cond),
+ XEXP (cond, 0), XEXP (cond, 1));
+ else
+ return 0;
+}
+
+/* Return nonzero if conditions COND1 and COND2 can never be both true. */
+
+static int
+conditions_mutex_p (cond1, cond2)
+ rtx cond1, cond2;
+{
+ if (GET_RTX_CLASS (GET_CODE (cond1)) == '<'
+ && GET_RTX_CLASS (GET_CODE (cond2)) == '<'
+ && GET_CODE (cond1) == reverse_condition (GET_CODE (cond2))
+ && XEXP (cond1, 0) == XEXP (cond2, 0)
+ && XEXP (cond1, 1) == XEXP (cond2, 1))
+ return 1;
+ return 0;
+}
+\f
/* Add ELEM wrapped in an INSN_LIST with reg note kind DEP_TYPE to the
LOG_LINKS of INSN, if not already there. DEP_TYPE indicates the type
of dependence that this link represents. */
{
rtx link, next;
int present_p;
- enum reg_note present_dep_type;
+ rtx cond1, cond2;
/* Don't depend an insn on itself. */
if (insn == elem)
if (GET_CODE (elem) == NOTE)
return;
+ /* flow.c doesn't handle conditional lifetimes entirely correctly;
+ calls mess up the conditional lifetimes. */
+ /* ??? add_dependence is the wrong place to be eliding dependencies,
+ as that forgets that the condition expressions themselves may
+ be dependent. */
+ if (GET_CODE (insn) != CALL_INSN && GET_CODE (elem) != CALL_INSN)
+ {
+ cond1 = get_condition (insn);
+ cond2 = get_condition (elem);
+ if (cond1 && cond2
+ && conditions_mutex_p (cond1, cond2)
+ && !modified_in_p (cond1, elem))
+ return;
+ }
+
/* If elem is part of a sequence that must be scheduled together, then
make the dependence point to the last insn of the sequence.
When HAVE_cc0, it is possible for NOTEs to exist between users and
dramatically for some code. */
if (true_dependency_cache != NULL)
{
+ enum reg_note present_dep_type = 0;
+
if (anti_dependency_cache == NULL || output_dependency_cache == NULL)
abort ();
if (TEST_BIT (true_dependency_cache[INSN_LUID (insn)], INSN_LUID (elem)))
- present_dep_type = 0;
+ /* Do nothing (present_set_type is already 0). */
+ ;
else if (TEST_BIT (anti_dependency_cache[INSN_LUID (insn)],
INSN_LUID (elem)))
present_dep_type = REG_DEP_ANTI;
{
#ifdef INSN_SCHEDULING
/* Clear corresponding cache entry because type of the link
- may be changed. */
+ may be changed. */
if (true_dependency_cache != NULL)
{
if (REG_NOTE_KIND (link) == REG_DEP_ANTI)
#ifdef INSN_SCHEDULING
/* If we are adding a dependency to INSN's LOG_LINKs, then
- note that in the bitmap caches of dependency information. */
+ note that in the bitmap caches of dependency information. */
if (true_dependency_cache != NULL)
{
if ((int)REG_NOTE_KIND (link) == 0)
#ifdef INSN_SCHEDULING
/* If we are adding a dependency to INSN's LOG_LINKs, then note that
- in the bitmap caches of dependency information. */
+ in the bitmap caches of dependency information. */
if (true_dependency_cache != NULL)
{
if ((int)dep_type == 0)
link = alloc_INSN_LIST (insn, *insn_list);
*insn_list = link;
+ if (current_sched_info->use_cselib)
+ {
+ mem = shallow_copy_rtx (mem);
+ XEXP (mem, 0) = cselib_subst_to_values (XEXP (mem, 0));
+ }
link = alloc_EXPR_LIST (VOIDmode, mem, *mem_list);
*mem_list = link;
free_INSN_LIST_list (&deps->last_pending_memory_flush);
deps->last_pending_memory_flush = alloc_INSN_LIST (insn, NULL_RTX);
+ deps->pending_flush_length = 1;
}
\f
/* Analyze a single SET, CLOBBER, PRE_DEC, POST_DEC, PRE_INC or POST_INC
if (dest == 0)
return;
- if (GET_CODE (dest) == PARALLEL
- && GET_MODE (dest) == BLKmode)
+ if (GET_CODE (dest) == PARALLEL)
{
register int i;
+
for (i = XVECLEN (dest, 0) - 1; i >= 0; i--)
- sched_analyze_1 (deps, XVECEXP (dest, 0, i), insn);
+ if (XEXP (XVECEXP (dest, 0, i), 0) != 0)
+ sched_analyze_1 (deps,
+ gen_rtx_CLOBBER (VOIDmode,
+ XEXP (XVECEXP (dest, 0, i), 0)),
+ insn);
+
if (GET_CODE (x) == SET)
sched_analyze_2 (deps, SET_SRC (x), insn);
return;
int r = regno + i;
rtx u;
- for (u = deps->reg_last_uses[r]; u; u = XEXP (u, 1))
+ for (u = deps->reg_last[r].uses; u; u = XEXP (u, 1))
add_dependence (insn, XEXP (u, 0), REG_DEP_ANTI);
- for (u = deps->reg_last_sets[r]; u; u = XEXP (u, 1))
+ for (u = deps->reg_last[r].sets; u; u = XEXP (u, 1))
add_dependence (insn, XEXP (u, 0), REG_DEP_OUTPUT);
/* Clobbers need not be ordered with respect to one
pending clobber. */
if (code == SET)
{
- free_INSN_LIST_list (&deps->reg_last_uses[r]);
- for (u = deps->reg_last_clobbers[r]; u; u = XEXP (u, 1))
+ if (GET_CODE (PATTERN (insn)) != COND_EXEC)
+ free_INSN_LIST_list (&deps->reg_last[r].uses);
+ for (u = deps->reg_last[r].clobbers; u; u = XEXP (u, 1))
add_dependence (insn, XEXP (u, 0), REG_DEP_OUTPUT);
SET_REGNO_REG_SET (reg_pending_sets, r);
}
SET_REGNO_REG_SET (reg_pending_clobbers, r);
/* Function calls clobber all call_used regs. */
- if (global_regs[r] || (code == SET && call_used_regs[r]))
+ if (global_regs[r]
+ || (code == SET
+ && TEST_HARD_REG_BIT (regs_invalidated_by_call, r)))
for (u = deps->last_function_call; u; u = XEXP (u, 1))
add_dependence (insn, XEXP (u, 0), REG_DEP_ANTI);
}
}
+ /* ??? Reload sometimes emits USEs and CLOBBERs of pseudos that
+ it does not reload. Ignore these as they have served their
+ purpose already. */
+ else if (regno >= deps->max_reg)
+ {
+ if (GET_CODE (PATTERN (insn)) != USE
+ && GET_CODE (PATTERN (insn)) != CLOBBER)
+ abort ();
+ }
else
{
rtx u;
- for (u = deps->reg_last_uses[regno]; u; u = XEXP (u, 1))
+ for (u = deps->reg_last[regno].uses; u; u = XEXP (u, 1))
add_dependence (insn, XEXP (u, 0), REG_DEP_ANTI);
- for (u = deps->reg_last_sets[regno]; u; u = XEXP (u, 1))
+ for (u = deps->reg_last[regno].sets; u; u = XEXP (u, 1))
add_dependence (insn, XEXP (u, 0), REG_DEP_OUTPUT);
if (code == SET)
{
- free_INSN_LIST_list (&deps->reg_last_uses[regno]);
- for (u = deps->reg_last_clobbers[regno]; u; u = XEXP (u, 1))
+ if (GET_CODE (PATTERN (insn)) != COND_EXEC)
+ free_INSN_LIST_list (&deps->reg_last[regno].uses);
+ for (u = deps->reg_last[regno].clobbers; u; u = XEXP (u, 1))
add_dependence (insn, XEXP (u, 0), REG_DEP_OUTPUT);
SET_REGNO_REG_SET (reg_pending_sets, regno);
}
else if (GET_CODE (dest) == MEM)
{
/* Writing memory. */
+ rtx t = dest;
- if (deps->pending_lists_length > 32)
+ if (current_sched_info->use_cselib)
+ {
+ t = shallow_copy_rtx (dest);
+ cselib_lookup (XEXP (t, 0), Pmode, 1);
+ XEXP (t, 0) = cselib_subst_to_values (XEXP (t, 0));
+ }
+
+ if (deps->pending_lists_length > MAX_PENDING_LIST_LENGTH)
{
/* Flush all pending reads and writes to prevent the pending lists
from getting any larger. Insn scheduling runs too slowly when
- these lists get long. The number 32 was chosen because it
- seems like a reasonable number. When compiling GCC with itself,
+ these lists get long. When compiling GCC with itself,
this flush occurs 8 times for sparc, and 10 times for m88k using
- the number 32. */
+ the default value of 32. */
flush_pending_lists (deps, insn, 0);
}
else
pending_mem = deps->pending_read_mems;
while (pending)
{
- if (anti_dependence (XEXP (pending_mem, 0), dest))
+ if (anti_dependence (XEXP (pending_mem, 0), t))
add_dependence (insn, XEXP (pending, 0), REG_DEP_ANTI);
pending = XEXP (pending, 1);
pending_mem = deps->pending_write_mems;
while (pending)
{
- if (output_dependence (XEXP (pending_mem, 0), dest))
+ if (output_dependence (XEXP (pending_mem, 0), t))
add_dependence (insn, XEXP (pending, 0), REG_DEP_OUTPUT);
pending = XEXP (pending, 1);
while (--i >= 0)
{
int r = regno + i;
- deps->reg_last_uses[r]
- = alloc_INSN_LIST (insn, deps->reg_last_uses[r]);
+ deps->reg_last[r].uses
+ = alloc_INSN_LIST (insn, deps->reg_last[r].uses);
+ SET_REGNO_REG_SET (&deps->reg_last_in_use, r);
- for (u = deps->reg_last_sets[r]; u; u = XEXP (u, 1))
+ for (u = deps->reg_last[r].sets; u; u = XEXP (u, 1))
add_dependence (insn, XEXP (u, 0), 0);
/* ??? This should never happen. */
- for (u = deps->reg_last_clobbers[r]; u; u = XEXP (u, 1))
+ for (u = deps->reg_last[r].clobbers; u; u = XEXP (u, 1))
add_dependence (insn, XEXP (u, 0), 0);
if (call_used_regs[r] || global_regs[r])
add_dependence (insn, XEXP (u, 0), REG_DEP_ANTI);
}
}
+ /* ??? Reload sometimes emits USEs and CLOBBERs of pseudos that
+ it does not reload. Ignore these as they have served their
+ purpose already. */
+ else if (regno >= deps->max_reg)
+ {
+ if (GET_CODE (PATTERN (insn)) != USE
+ && GET_CODE (PATTERN (insn)) != CLOBBER)
+ abort ();
+ }
else
{
- deps->reg_last_uses[regno]
- = alloc_INSN_LIST (insn, deps->reg_last_uses[regno]);
+ deps->reg_last[regno].uses
+ = alloc_INSN_LIST (insn, deps->reg_last[regno].uses);
+ SET_REGNO_REG_SET (&deps->reg_last_in_use, regno);
- for (u = deps->reg_last_sets[regno]; u; u = XEXP (u, 1))
+ for (u = deps->reg_last[regno].sets; u; u = XEXP (u, 1))
add_dependence (insn, XEXP (u, 0), 0);
/* ??? This should never happen. */
- for (u = deps->reg_last_clobbers[regno]; u; u = XEXP (u, 1))
+ for (u = deps->reg_last[regno].clobbers; u; u = XEXP (u, 1))
add_dependence (insn, XEXP (u, 0), 0);
/* Pseudos that are REG_EQUIV to something may be replaced
/* Reading memory. */
rtx u;
rtx pending, pending_mem;
+ rtx t = x;
+ if (current_sched_info->use_cselib)
+ {
+ t = shallow_copy_rtx (t);
+ cselib_lookup (XEXP (t, 0), Pmode, 1);
+ XEXP (t, 0) = cselib_subst_to_values (XEXP (t, 0));
+ }
pending = deps->pending_read_insns;
pending_mem = deps->pending_read_mems;
while (pending)
{
- if (read_dependence (XEXP (pending_mem, 0), x))
+ if (read_dependence (XEXP (pending_mem, 0), t))
add_dependence (insn, XEXP (pending, 0), REG_DEP_ANTI);
pending = XEXP (pending, 1);
while (pending)
{
if (true_dependence (XEXP (pending_mem, 0), VOIDmode,
- x, rtx_varies_p))
+ t, rtx_varies_p))
add_dependence (insn, XEXP (pending, 0), 0);
pending = XEXP (pending, 1);
}
for (u = deps->last_pending_memory_flush; u; u = XEXP (u, 1))
- add_dependence (insn, XEXP (u, 0), REG_DEP_ANTI);
+ if (GET_CODE (XEXP (u, 0)) != JUMP_INSN
+ || deps_may_trap_p (x))
+ add_dependence (insn, XEXP (u, 0), REG_DEP_ANTI);
/* Always add these dependencies to pending_reads, since
this insn may be followed by a write. */
pseudo-regs because it might give an incorrectly rounded result. */
if (code != ASM_OPERANDS || MEM_VOLATILE_P (x))
{
- int max_reg = max_reg_num ();
- for (i = 0; i < max_reg; i++)
+ for (i = 0; i < deps->max_reg; i++)
{
- for (u = deps->reg_last_uses[i]; u; u = XEXP (u, 1))
- add_dependence (insn, XEXP (u, 0), REG_DEP_ANTI);
- free_INSN_LIST_list (&deps->reg_last_uses[i]);
+ struct deps_reg *reg_last = &deps->reg_last[i];
- for (u = deps->reg_last_sets[i]; u; u = XEXP (u, 1))
+ for (u = reg_last->uses; u; u = XEXP (u, 1))
+ add_dependence (insn, XEXP (u, 0), REG_DEP_ANTI);
+ for (u = reg_last->sets; u; u = XEXP (u, 1))
add_dependence (insn, XEXP (u, 0), 0);
-
- for (u = deps->reg_last_clobbers[i]; u; u = XEXP (u, 1))
+ for (u = reg_last->clobbers; u; u = XEXP (u, 1))
add_dependence (insn, XEXP (u, 0), 0);
+
+ if (GET_CODE (PATTERN (insn)) != COND_EXEC)
+ free_INSN_LIST_list (®_last->uses);
}
reg_pending_sets_all = 1;
rtx loop_notes;
{
register RTX_CODE code = GET_CODE (x);
+ int schedule_barrier_found = 0;
rtx link;
- int maxreg = max_reg_num ();
int i;
if (code == COND_EXEC)
/* Mark registers CLOBBERED or used by called function. */
if (GET_CODE (insn) == CALL_INSN)
- for (link = CALL_INSN_FUNCTION_USAGE (insn); link; link = XEXP (link, 1))
- {
- if (GET_CODE (XEXP (link, 0)) == CLOBBER)
- sched_analyze_1 (deps, XEXP (link, 0), insn);
- else
- sched_analyze_2 (deps, XEXP (link, 0), insn);
- }
+ {
+ for (link = CALL_INSN_FUNCTION_USAGE (insn); link; link = XEXP (link, 1))
+ {
+ if (GET_CODE (XEXP (link, 0)) == CLOBBER)
+ sched_analyze_1 (deps, XEXP (link, 0), insn);
+ else
+ sched_analyze_2 (deps, XEXP (link, 0), insn);
+ }
+ if (find_reg_note (insn, REG_SETJMP, NULL))
+ schedule_barrier_found = 1;
+ }
+
+ if (GET_CODE (insn) == JUMP_INSN)
+ {
+ rtx next;
+ next = next_nonnote_insn (insn);
+ if (next && GET_CODE (next) == BARRIER)
+ schedule_barrier_found = 1;
+ else
+ {
+ rtx pending, pending_mem, u;
+ regset_head tmp;
+ INIT_REG_SET (&tmp);
+
+ (*current_sched_info->compute_jump_reg_dependencies) (insn, &tmp);
+ EXECUTE_IF_SET_IN_REG_SET (&tmp, 0, i,
+ {
+ struct deps_reg *reg_last = &deps->reg_last[i];
+ for (u = reg_last->sets; u; u = XEXP (u, 1))
+ add_dependence (insn, XEXP (u, 0), REG_DEP_ANTI);
+ reg_last->uses = alloc_INSN_LIST (insn, reg_last->uses);
+ SET_REGNO_REG_SET (&deps->reg_last_in_use, i);
+ });
+
+ CLEAR_REG_SET (&tmp);
+
+ /* All memory writes and volatile reads must happen before the
+ jump. Non-volatile reads must happen before the jump iff
+ the result is needed by the above register used mask. */
+
+ pending = deps->pending_write_insns;
+ pending_mem = deps->pending_write_mems;
+ while (pending)
+ {
+ add_dependence (insn, XEXP (pending, 0), REG_DEP_OUTPUT);
+ pending = XEXP (pending, 1);
+ pending_mem = XEXP (pending_mem, 1);
+ }
+
+ pending = deps->pending_read_insns;
+ pending_mem = deps->pending_read_mems;
+ while (pending)
+ {
+ if (MEM_VOLATILE_P (XEXP (pending_mem, 0)))
+ add_dependence (insn, XEXP (pending, 0), REG_DEP_OUTPUT);
+ pending = XEXP (pending, 1);
+ pending_mem = XEXP (pending_mem, 1);
+ }
+
+ for (u = deps->last_pending_memory_flush; u; u = XEXP (u, 1))
+ add_dependence (insn, XEXP (u, 0), REG_DEP_ANTI);
+ }
+ }
/* If there is a {LOOP,EHREGION}_{BEG,END} note in the middle of a basic
block, then we must be sure that no instructions are scheduled across it.
Otherwise, the reg_n_refs info (which depends on loop_depth) would
become incorrect. */
-
if (loop_notes)
{
- int max_reg = max_reg_num ();
- int schedule_barrier_found = 0;
rtx link;
/* Update loop_notes with any notes from this insn. Also determine
if any of the notes on the list correspond to instruction scheduling
- barriers (loop, eh & setjmp notes, but not range notes. */
+ barriers (loop, eh & setjmp notes, but not range notes). */
link = loop_notes;
while (XEXP (link, 1))
{
if (INTVAL (XEXP (link, 0)) == NOTE_INSN_LOOP_BEG
|| INTVAL (XEXP (link, 0)) == NOTE_INSN_LOOP_END
|| INTVAL (XEXP (link, 0)) == NOTE_INSN_EH_REGION_BEG
- || INTVAL (XEXP (link, 0)) == NOTE_INSN_EH_REGION_END
- || INTVAL (XEXP (link, 0)) == NOTE_INSN_SETJMP)
+ || INTVAL (XEXP (link, 0)) == NOTE_INSN_EH_REGION_END)
schedule_barrier_found = 1;
link = XEXP (link, 1);
}
XEXP (link, 1) = REG_NOTES (insn);
REG_NOTES (insn) = loop_notes;
+ }
- /* Add dependencies if a scheduling barrier was found. */
- if (schedule_barrier_found)
- {
- for (i = 0; i < max_reg; i++)
- {
- rtx u;
- for (u = deps->reg_last_uses[i]; u; u = XEXP (u, 1))
- add_dependence (insn, XEXP (u, 0), REG_DEP_ANTI);
- free_INSN_LIST_list (&deps->reg_last_uses[i]);
+ /* If this instruction can throw an exception, then moving it changes
+ where block boundaries fall. This is mighty confusing elsewhere.
+ Therefore, prevent such an instruction from being moved. */
+ if (flag_non_call_exceptions && can_throw_internal (insn))
+ schedule_barrier_found = 1;
- for (u = deps->reg_last_sets[i]; u; u = XEXP (u, 1))
- add_dependence (insn, XEXP (u, 0), 0);
+ /* Add dependencies if a scheduling barrier was found. */
+ if (schedule_barrier_found)
+ {
+ rtx u;
- for (u = deps->reg_last_clobbers[i]; u; u = XEXP (u, 1))
- add_dependence (insn, XEXP (u, 0), 0);
- }
- reg_pending_sets_all = 1;
+ for (i = 0; i < deps->max_reg; i++)
+ {
+ struct deps_reg *reg_last = &deps->reg_last[i];
- flush_pending_lists (deps, insn, 0);
+ for (u = reg_last->uses; u; u = XEXP (u, 1))
+ add_dependence (insn, XEXP (u, 0), REG_DEP_ANTI);
+ for (u = reg_last->sets; u; u = XEXP (u, 1))
+ add_dependence (insn, XEXP (u, 0), 0);
+ for (u = reg_last->clobbers; u; u = XEXP (u, 1))
+ add_dependence (insn, XEXP (u, 0), 0);
+
+ if (GET_CODE (PATTERN (insn)) != COND_EXEC)
+ free_INSN_LIST_list (®_last->uses);
}
+ flush_pending_lists (deps, insn, 0);
+ reg_pending_sets_all = 1;
}
- /* Accumulate clobbers until the next set so that it will be output dependent
- on all of them. At the next set we can clear the clobber list, since
- subsequent sets will be output dependent on it. */
- EXECUTE_IF_SET_IN_REG_SET
- (reg_pending_sets, 0, i,
- {
- free_INSN_LIST_list (&deps->reg_last_sets[i]);
- free_INSN_LIST_list (&deps->reg_last_clobbers[i]);
- deps->reg_last_sets[i] = alloc_INSN_LIST (insn, NULL_RTX);
- });
- EXECUTE_IF_SET_IN_REG_SET
- (reg_pending_clobbers, 0, i,
- {
- deps->reg_last_clobbers[i]
- = alloc_INSN_LIST (insn, deps->reg_last_clobbers[i]);
- });
- CLEAR_REG_SET (reg_pending_sets);
- CLEAR_REG_SET (reg_pending_clobbers);
-
+ /* Accumulate clobbers until the next set so that it will be output
+ dependent on all of them. At the next set we can clear the clobber
+ list, since subsequent sets will be output dependent on it. */
if (reg_pending_sets_all)
{
- for (i = 0; i < maxreg; i++)
+ reg_pending_sets_all = 0;
+ for (i = 0; i < deps->max_reg; i++)
{
- free_INSN_LIST_list (&deps->reg_last_sets[i]);
- free_INSN_LIST_list (&deps->reg_last_clobbers[i]);
- deps->reg_last_sets[i] = alloc_INSN_LIST (insn, NULL_RTX);
+ struct deps_reg *reg_last = &deps->reg_last[i];
+ if (GET_CODE (PATTERN (insn)) != COND_EXEC)
+ {
+ free_INSN_LIST_list (®_last->sets);
+ free_INSN_LIST_list (®_last->clobbers);
+ }
+ reg_last->sets = alloc_INSN_LIST (insn, reg_last->sets);
+ SET_REGNO_REG_SET (&deps->reg_last_in_use, i);
}
-
- reg_pending_sets_all = 0;
}
+ else
+ {
+ EXECUTE_IF_SET_IN_REG_SET (reg_pending_sets, 0, i,
+ {
+ struct deps_reg *reg_last = &deps->reg_last[i];
+ if (GET_CODE (PATTERN (insn)) != COND_EXEC)
+ {
+ free_INSN_LIST_list (®_last->sets);
+ free_INSN_LIST_list (®_last->clobbers);
+ }
+ reg_last->sets = alloc_INSN_LIST (insn, reg_last->sets);
+ SET_REGNO_REG_SET (&deps->reg_last_in_use, i);
+ });
+ EXECUTE_IF_SET_IN_REG_SET (reg_pending_clobbers, 0, i,
+ {
+ struct deps_reg *reg_last = &deps->reg_last[i];
+ reg_last->clobbers = alloc_INSN_LIST (insn, reg_last->clobbers);
+ SET_REGNO_REG_SET (&deps->reg_last_in_use, i);
+ });
+ }
+ CLEAR_REG_SET (reg_pending_sets);
+ CLEAR_REG_SET (reg_pending_clobbers);
/* If a post-call group is still open, see if it should remain so.
This insn must be a simple move of a hard reg to a pseudo or
register rtx u;
rtx loop_notes = 0;
+ if (current_sched_info->use_cselib)
+ cselib_init ();
+
for (insn = head;; insn = NEXT_INSN (insn))
{
if (GET_CODE (insn) == INSN || GET_CODE (insn) == JUMP_INSN)
/* Make each JUMP_INSN a scheduling barrier for memory
references. */
if (GET_CODE (insn) == JUMP_INSN)
- deps->last_pending_memory_flush
- = alloc_INSN_LIST (insn, deps->last_pending_memory_flush);
+ {
+ /* Keep the list a reasonable size. */
+ if (deps->pending_flush_length++ > MAX_PENDING_LIST_LENGTH)
+ flush_pending_lists (deps, insn, 0);
+ else
+ deps->last_pending_memory_flush
+ = alloc_INSN_LIST (insn, deps->last_pending_memory_flush);
+ }
sched_analyze_insn (deps, PATTERN (insn), insn, loop_notes);
loop_notes = 0;
}
past a void call (i.e. it does not explicitly set the hard
return reg). */
- /* If this call is followed by a NOTE_INSN_SETJMP, then assume that
+ /* If this call has REG_SETJMP, then assume that
all registers, not just hard registers, may be clobbered by this
call. */
/* Insn, being a CALL_INSN, magically depends on
`last_function_call' already. */
- if (NEXT_INSN (insn) && GET_CODE (NEXT_INSN (insn)) == NOTE
- && NOTE_LINE_NUMBER (NEXT_INSN (insn)) == NOTE_INSN_SETJMP)
+ if (find_reg_note (insn, REG_SETJMP, NULL))
{
- int max_reg = max_reg_num ();
- for (i = 0; i < max_reg; i++)
+ for (i = 0; i < deps->max_reg; i++)
{
- for (u = deps->reg_last_uses[i]; u; u = XEXP (u, 1))
+ struct deps_reg *reg_last = &deps->reg_last[i];
+
+ for (u = reg_last->uses; u; u = XEXP (u, 1))
add_dependence (insn, XEXP (u, 0), REG_DEP_ANTI);
- free_INSN_LIST_list (&deps->reg_last_uses[i]);
-
- for (u = deps->reg_last_sets[i]; u; u = XEXP (u, 1))
+ for (u = reg_last->sets; u; u = XEXP (u, 1))
add_dependence (insn, XEXP (u, 0), 0);
-
- for (u = deps->reg_last_clobbers[i]; u; u = XEXP (u, 1))
+ for (u = reg_last->clobbers; u; u = XEXP (u, 1))
add_dependence (insn, XEXP (u, 0), 0);
+
+ free_INSN_LIST_list (®_last->uses);
}
reg_pending_sets_all = 1;
-
- /* Add a pair of REG_SAVE_NOTEs which we will later
- convert back into a NOTE_INSN_SETJMP note. See
- reemit_notes for why we use a pair of NOTEs. */
- REG_NOTES (insn) = alloc_EXPR_LIST (REG_SAVE_NOTE,
- GEN_INT (0),
- REG_NOTES (insn));
- REG_NOTES (insn) = alloc_EXPR_LIST (REG_SAVE_NOTE,
- GEN_INT (NOTE_INSN_SETJMP),
- REG_NOTES (insn));
}
else
{
for (i = 0; i < FIRST_PSEUDO_REGISTER; i++)
if (call_used_regs[i] || global_regs[i])
{
- for (u = deps->reg_last_uses[i]; u; u = XEXP (u, 1))
+ for (u = deps->reg_last[i].uses; u; u = XEXP (u, 1))
add_dependence (insn, XEXP (u, 0), REG_DEP_ANTI);
-
- for (u = deps->reg_last_sets[i]; u; u = XEXP (u, 1))
+ for (u = deps->reg_last[i].sets; u; u = XEXP (u, 1))
add_dependence (insn, XEXP (u, 0), REG_DEP_ANTI);
SET_REGNO_REG_SET (reg_pending_clobbers, i);
all pending reads and writes, and start new dependencies starting
from here. But only flush writes for constant calls (which may
be passed a pointer to something we haven't written yet). */
- flush_pending_lists (deps, insn, CONST_CALL_P (insn));
+ flush_pending_lists (deps, insn, CONST_OR_PURE_CALL_P (insn));
/* Depend this function call (actually, the user of this
function call) on all hard register clobberage. */
&& (NOTE_LINE_NUMBER (insn) == NOTE_INSN_LOOP_BEG
|| NOTE_LINE_NUMBER (insn) == NOTE_INSN_LOOP_END
|| NOTE_LINE_NUMBER (insn) == NOTE_INSN_EH_REGION_BEG
- || NOTE_LINE_NUMBER (insn) == NOTE_INSN_EH_REGION_END
- || (NOTE_LINE_NUMBER (insn) == NOTE_INSN_SETJMP
- && GET_CODE (PREV_INSN (insn)) != CALL_INSN)))
+ || NOTE_LINE_NUMBER (insn) == NOTE_INSN_EH_REGION_END))
{
rtx rtx_region;
loop_notes = alloc_EXPR_LIST (REG_SAVE_NOTE,
GEN_INT (NOTE_LINE_NUMBER (insn)),
loop_notes);
- CONST_CALL_P (loop_notes) = CONST_CALL_P (insn);
+ CONST_OR_PURE_CALL_P (loop_notes) = CONST_OR_PURE_CALL_P (insn);
}
+ if (current_sched_info->use_cselib)
+ cselib_process_insn (insn);
if (insn == tail)
- return;
+ {
+ if (current_sched_info->use_cselib)
+ cselib_finish ();
+ return;
+ }
}
abort ();
}
init_deps (deps)
struct deps *deps;
{
- int maxreg = max_reg_num ();
- deps->reg_last_uses = (rtx *) xcalloc (maxreg, sizeof (rtx));
- deps->reg_last_sets = (rtx *) xcalloc (maxreg, sizeof (rtx));
- deps->reg_last_clobbers = (rtx *) xcalloc (maxreg, sizeof (rtx));
+ int max_reg = (reload_completed ? FIRST_PSEUDO_REGISTER : max_reg_num ());
+
+ deps->max_reg = max_reg;
+ deps->reg_last = (struct deps_reg *)
+ xcalloc (max_reg, sizeof (struct deps_reg));
+ INIT_REG_SET (&deps->reg_last_in_use);
deps->pending_read_insns = 0;
deps->pending_read_mems = 0;
deps->pending_write_insns = 0;
deps->pending_write_mems = 0;
deps->pending_lists_length = 0;
+ deps->pending_flush_length = 0;
deps->last_pending_memory_flush = 0;
deps->last_function_call = 0;
deps->in_post_call_group_p = 0;
free_deps (deps)
struct deps *deps;
{
- int max_reg = max_reg_num ();
int i;
- /* Note this loop is executed max_reg * nr_regions times. It's first
- implementation accounted for over 90% of the calls to free_INSN_LIST_list.
- The list was empty for the vast majority of those calls. On the PA, not
- calling free_INSN_LIST_list in those cases improves -O2 compile times by
- 3-5% on average. */
- for (i = 0; i < max_reg; ++i)
+ /* Without the EXECUTE_IF_SET, this loop is executed max_reg * nr_regions
+ times. For a test case with 42000 regs and 8000 small basic blocks,
+ this loop accounted for nearly 60% (84 sec) of the total -O2 runtime. */
+ EXECUTE_IF_SET_IN_REG_SET (&deps->reg_last_in_use, 0, i,
{
- if (deps->reg_last_clobbers[i])
- free_INSN_LIST_list (&deps->reg_last_clobbers[i]);
- if (deps->reg_last_sets[i])
- free_INSN_LIST_list (&deps->reg_last_sets[i]);
- if (deps->reg_last_uses[i])
- free_INSN_LIST_list (&deps->reg_last_uses[i]);
- }
- free (deps->reg_last_clobbers);
- free (deps->reg_last_sets);
- free (deps->reg_last_uses);
+ struct deps_reg *reg_last = &deps->reg_last[i];
+ free_INSN_LIST_list (®_last->uses);
+ free_INSN_LIST_list (®_last->sets);
+ free_INSN_LIST_list (®_last->clobbers);
+ });
+ CLEAR_REG_SET (&deps->reg_last_in_use);
+
+ free (deps->reg_last);
+ deps->reg_last = NULL;
}
/* If it is profitable to use them, initialize caches for tracking
{
if (true_dependency_cache)
{
- free (true_dependency_cache);
+ sbitmap_vector_free (true_dependency_cache);
true_dependency_cache = NULL;
- free (anti_dependency_cache);
+ sbitmap_vector_free (anti_dependency_cache);
anti_dependency_cache = NULL;
- free (output_dependency_cache);
+ sbitmap_vector_free (output_dependency_cache);
output_dependency_cache = NULL;
#ifdef ENABLE_CHECKING
- free (forward_dependency_cache);
+ sbitmap_vector_free (forward_dependency_cache);
forward_dependency_cache = NULL;
#endif
}