/* Instruction scheduling pass. This file computes dependencies between
instructions.
Copyright (C) 1992, 1993, 1994, 1995, 1996, 1997, 1998,
- 1999, 2000, 2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2009
+ 1999, 2000, 2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2009, 2010,
+ 2011
Free Software Foundation, Inc.
Contributed by Michael Tiemann (tiemann@cygnus.com) Enhanced by,
and currently maintained by, Jim Wilson (wilson@cygnus.com)
#include "system.h"
#include "coretypes.h"
#include "tm.h"
-#include "toplev.h"
+#include "diagnostic-core.h"
#include "rtl.h"
#include "tm_p.h"
#include "hard-reg-set.h"
#include "insn-config.h"
#include "insn-attr.h"
#include "except.h"
-#include "toplev.h"
#include "recog.h"
#include "sched-int.h"
#include "params.h"
#include "cselib.h"
#include "ira.h"
+#include "target.h"
#ifdef INSN_SCHEDULING
#define CHECK (false)
#endif
+/* In deps->last_pending_memory_flush marks JUMP_INSNs that weren't
+ added to the list because of flush_pending_lists, stands just
+ for itself and not for any other pending memory reads/writes. */
+#define NON_FLUSH_JUMP_KIND REG_DEP_ANTI
+#define NON_FLUSH_JUMP_P(x) (REG_NOTE_KIND (x) == NON_FLUSH_JUMP_KIND)
+
/* Holds current parameters for the dependency analyzer. */
struct sched_deps_info_def *sched_deps_info;
fprintf (stderr, "\n");
}
+/* Determine whether DEP is a dependency link of a non-debug insn on a
+ debug insn. */
+
+static inline bool
+depl_on_debug_p (dep_link_t dep)
+{
+ return (DEBUG_INSN_P (DEP_LINK_PRO (dep))
+ && !DEBUG_INSN_P (DEP_LINK_CON (dep)));
+}
+
/* Functions to operate with a single link from the dependencies lists -
dep_link_t. */
{
attach_dep_link (link, &DEPS_LIST_FIRST (l));
- ++DEPS_LIST_N_LINKS (l);
+ /* Don't count debug deps. */
+ if (!depl_on_debug_p (link))
+ ++DEPS_LIST_N_LINKS (l);
}
/* Detach dep_link L from the list. */
{
detach_dep_link (link);
- --DEPS_LIST_N_LINKS (list);
+ /* Don't count debug deps. */
+ if (!depl_on_debug_p (link))
+ --DEPS_LIST_N_LINKS (list);
}
/* Move link LINK from list FROM to list TO. */
static int deps_may_trap_p (const_rtx);
static void add_dependence_list (rtx, rtx, int, enum reg_note);
-static void add_dependence_list_and_free (struct deps *, rtx,
+static void add_dependence_list_and_free (struct deps_desc *, rtx,
rtx *, int, enum reg_note);
static void delete_all_dependences (rtx);
static void fixup_sched_groups (rtx);
-static void flush_pending_lists (struct deps *, rtx, int, int);
-static void sched_analyze_1 (struct deps *, rtx, rtx);
-static void sched_analyze_2 (struct deps *, rtx, rtx);
-static void sched_analyze_insn (struct deps *, rtx, rtx);
+static void flush_pending_lists (struct deps_desc *, rtx, int, int);
+static void sched_analyze_1 (struct deps_desc *, rtx, rtx);
+static void sched_analyze_2 (struct deps_desc *, rtx, rtx);
+static void sched_analyze_insn (struct deps_desc *, rtx, rtx);
static bool sched_has_condition_p (const_rtx);
static int conditions_mutex_p (const_rtx, const_rtx, bool, bool);
/* The following instructions, which depend on a speculatively scheduled
instruction, cannot be speculatively scheduled along. */
{
- if (may_trap_p (PATTERN (insn)))
- /* If instruction might trap, it cannot be speculatively scheduled.
+ if (may_trap_or_fault_p (PATTERN (insn)))
+ /* If instruction might fault, it cannot be speculatively scheduled.
For control speculation it's obvious why and for data speculation
it's because the insn might get wrong input if speculation
wasn't successful. */
}
/* Return true if INSN's lists defined by LIST_TYPES are all empty. */
+
bool
sd_lists_empty_p (const_rtx insn, sd_list_types_def list_types)
{
- return sd_lists_size (insn, list_types) == 0;
+ while (list_types != SD_LIST_NONE)
+ {
+ deps_list_t list;
+ bool resolved_p;
+
+ sd_next_list (insn, &list_types, &list, &resolved_p);
+ if (!deps_list_empty_p (list))
+ return false;
+ }
+
+ return true;
}
/* Initialize data for INSN. */
INSN_FORW_DEPS (insn) = create_deps_list ();
INSN_RESOLVED_FORW_DEPS (insn) = create_deps_list ();
- if (DEBUG_INSN_P (insn))
- DEBUG_INSN_SCHED_P (insn) = TRUE;
-
/* ??? It would be nice to allocate dependency caches here. */
}
{
/* ??? It would be nice to deallocate dependency caches here. */
- if (DEBUG_INSN_P (insn))
- {
- gcc_assert (DEBUG_INSN_SCHED_P (insn));
- DEBUG_INSN_SCHED_P (insn) = FALSE;
- }
-
free_deps_list (INSN_HARD_BACK_DEPS (insn));
INSN_HARD_BACK_DEPS (insn) = NULL;
&& anti_dependency_cache != NULL);
if (!(current_sched_info->flags & USE_DEPS_LIST))
- {
+ {
enum reg_note present_dep_type;
if (bitmap_bit_p (&true_dependency_cache[insn_luid], elem_luid))
return DEP_PRESENT;
}
else
- {
+ {
ds_t present_dep_types = 0;
-
+
if (bitmap_bit_p (&true_dependency_cache[insn_luid], elem_luid))
present_dep_types |= DEP_TRUE;
if (bitmap_bit_p (&output_dependency_cache[insn_luid], elem_luid))
break;
default:
- gcc_unreachable ();
+ gcc_unreachable ();
}
}
dw = estimate_dep_weak (mem1, mem2);
ds = set_dep_weak (ds, BEGIN_DATA, dw);
}
-
+
new_status = ds_merge (dep_status, ds);
}
}
gcc_assert (INSN_P (DEP_PRO (new_dep)) && INSN_P (DEP_CON (new_dep))
&& DEP_PRO (new_dep) != DEP_CON (new_dep));
-
+
#ifdef ENABLE_CHECKING
check_dep (new_dep, mem1 != NULL);
#endif
}
sd_add_dep (new_dep, resolved_p);
-
+
return DEP_CREATED;
}
rtx insn = DEP_CON (dep);
gcc_assert (INSN_P (insn) && INSN_P (elem) && insn != elem);
- gcc_assert (!DEBUG_INSN_P (elem) || DEBUG_INSN_P (insn));
if ((current_sched_info->flags & DO_SPECULATION)
&& !sched_insn_is_legitimate_for_speculation_p (insn, DEP_STATUS (dep)))
}
}
-/* Similar, but free *LISTP at the same time, when the context
+/* Similar, but free *LISTP at the same time, when the context
is not readonly. */
static void
-add_dependence_list_and_free (struct deps *deps, rtx insn, rtx *listp,
+add_dependence_list_and_free (struct deps_desc *deps, rtx insn, rtx *listp,
int uncond, enum reg_note dep_type)
{
rtx list, next;
- if (deps->readonly)
+ /* We don't want to short-circuit dependencies involving debug
+ insns, because they may cause actual dependencies to be
+ disregarded. */
+ if (deps->readonly || DEBUG_INSN_P (insn))
{
add_dependence_list (insn, *listp, uncond, dep_type);
return;
}
}
-/* Remove all occurences of INSN from LIST. Return the number of
+/* Remove all occurences of INSN from LIST. Return the number of
occurences removed. */
static int
remove_from_dependence_list (rtx insn, rtx* listp)
{
int removed = 0;
-
+
while (*listp)
{
if (XEXP (*listp, 0) == insn)
removed++;
continue;
}
-
+
listp = &XEXP (*listp, 1);
}
-
+
return removed;
}
/* Same as above, but process two lists at once. */
-static int
+static int
remove_from_both_dependence_lists (rtx insn, rtx *listp, rtx *exprp)
{
int removed = 0;
-
+
while (*listp)
{
if (XEXP (*listp, 0) == insn)
removed++;
continue;
}
-
+
listp = &XEXP (*listp, 1);
exprp = &XEXP (*exprp, 1);
}
-
+
return removed;
}
delete_all_dependences (insn);
- prev_nonnote = prev_nonnote_insn (insn);
- while (DEBUG_INSN_P (prev_nonnote))
- prev_nonnote = prev_nonnote_insn (prev_nonnote);
+ prev_nonnote = prev_nonnote_nondebug_insn (insn);
if (BLOCK_FOR_INSN (insn) == BLOCK_FOR_INSN (prev_nonnote)
&& ! sched_insns_conditions_mutex_p (insn, prev_nonnote))
add_dependence (insn, prev_nonnote, REG_DEP_ANTI);
so that we can do memory aliasing on it. */
static void
-add_insn_mem_dependence (struct deps *deps, bool read_p,
+add_insn_mem_dependence (struct deps_desc *deps, bool read_p,
rtx insn, rtx mem)
{
rtx *insn_list;
{
insn_list = &deps->pending_read_insns;
mem_list = &deps->pending_read_mems;
- deps->pending_read_list_length++;
+ if (!DEBUG_INSN_P (insn))
+ deps->pending_read_list_length++;
}
else
{
if (sched_deps_info->use_cselib)
{
mem = shallow_copy_rtx (mem);
- XEXP (mem, 0) = cselib_subst_to_values (XEXP (mem, 0));
+ XEXP (mem, 0) = cselib_subst_to_values (XEXP (mem, 0), GET_MODE (mem));
}
link = alloc_EXPR_LIST (VOIDmode, canon_rtx (mem), *mem_list);
*mem_list = link;
dependencies for a read operation, similarly with FOR_WRITE. */
static void
-flush_pending_lists (struct deps *deps, rtx insn, int for_read,
+flush_pending_lists (struct deps_desc *deps, rtx insn, int for_read,
int for_write)
{
if (for_write)
{
- add_dependence_list_and_free (deps, insn, &deps->pending_read_insns,
+ add_dependence_list_and_free (deps, insn, &deps->pending_read_insns,
1, REG_DEP_ANTI);
if (!deps->readonly)
{
add_dependence_list_and_free (deps, insn, &deps->pending_write_insns, 1,
for_read ? REG_DEP_ANTI : REG_DEP_OUTPUT);
- add_dependence_list_and_free (deps, insn,
+ add_dependence_list_and_free (deps, insn,
&deps->last_pending_memory_flush, 1,
for_read ? REG_DEP_ANTI : REG_DEP_OUTPUT);
if (!deps->readonly)
{
dep_def _dep, *dep = &_dep;
-
+
init_dep_1 (dep, pending_insn, cur_insn, ds_to_dt (ds),
current_sched_info->flags & USE_DEPS_LIST ? ds : -1);
maybe_add_or_update_dep_1 (dep, false, pending_mem, mem);
/* Set up insn register uses for INSN and dependency context DEPS. */
static void
-setup_insn_reg_uses (struct deps *deps, rtx insn)
+setup_insn_reg_uses (struct deps_desc *deps, rtx insn)
{
unsigned i;
reg_set_iterator rsi;
use = create_insn_reg_use (i, insn);
use->next_regno_use = use;
reg_last = &deps->reg_last[i];
-
+
/* Create the cycle list of uses. */
for (list = reg_last->uses; list; list = XEXP (list, 1))
{
enum reg_class cl;
gcc_assert (regno >= FIRST_PSEUDO_REGISTER);
- cl = sched_regno_cover_class[regno];
+ cl = sched_regno_pressure_class[regno];
if (cl != NO_REGS)
{
- incr = ira_reg_class_nregs[cl][PSEUDO_REGNO_MODE (regno)];
+ incr = ira_reg_class_max_nregs[cl][PSEUDO_REGNO_MODE (regno)];
if (clobber_p)
{
new_incr = reg_pressure_info[cl].clobber_increase + incr;
{
enum reg_class cl;
int new_incr, last = regno + nregs;
-
+
while (regno < last)
{
gcc_assert (regno < FIRST_PSEUDO_REGISTER);
if (! TEST_HARD_REG_BIT (ira_no_alloc_regs, regno))
{
- cl = sched_regno_cover_class[regno];
+ cl = sched_regno_pressure_class[regno];
if (cl != NO_REGS)
{
if (clobber_p)
enum reg_class cl;
gcc_assert (regno >= FIRST_PSEUDO_REGISTER);
- cl = sched_regno_cover_class[regno];
+ cl = sched_regno_pressure_class[regno];
if (cl != NO_REGS)
{
- incr = ira_reg_class_nregs[cl][PSEUDO_REGNO_MODE (regno)];
+ incr = ira_reg_class_max_nregs[cl][PSEUDO_REGNO_MODE (regno)];
reg_pressure_info[cl].change -= incr;
}
}
{
enum reg_class cl;
int last = regno + nregs;
-
+
while (regno < last)
{
gcc_assert (regno < FIRST_PSEUDO_REGISTER);
if (! TEST_HARD_REG_BIT (ira_no_alloc_regs, regno))
{
- cl = sched_regno_cover_class[regno];
+ cl = sched_regno_pressure_class[regno];
if (cl != NO_REGS)
reg_pressure_info[cl].change -= 1;
}
}
/* Set up reg pressure info related to INSN. */
-static void
-setup_insn_reg_pressure_info (rtx insn)
+void
+init_insn_reg_pressure_info (rtx insn)
{
int i, len;
enum reg_class cl;
if (! INSN_P (insn))
return;
- for (i = 0; i < ira_reg_class_cover_size; i++)
+ for (i = 0; i < ira_pressure_classes_num; i++)
{
- cl = ira_reg_class_cover[i];
+ cl = ira_pressure_classes[i];
reg_pressure_info[cl].clobber_increase = 0;
reg_pressure_info[cl].set_increase = 0;
reg_pressure_info[cl].unused_set_increase = 0;
reg_pressure_info[cl].change = 0;
}
-
+
note_stores (PATTERN (insn), mark_insn_reg_clobber, insn);
-
+
note_stores (PATTERN (insn), mark_insn_reg_store, insn);
-
+
#ifdef AUTO_INC_DEC
for (link = REG_NOTES (insn); link; link = XEXP (link, 1))
if (REG_NOTE_KIND (link) == REG_INC)
for (link = REG_NOTES (insn); link; link = XEXP (link, 1))
if (REG_NOTE_KIND (link) == REG_DEAD)
mark_reg_death (XEXP (link, 0));
-
- len = sizeof (struct reg_pressure_data) * ira_reg_class_cover_size;
+
+ len = sizeof (struct reg_pressure_data) * ira_pressure_classes_num;
pressure_info
= INSN_REG_PRESSURE (insn) = (struct reg_pressure_data *) xmalloc (len);
- INSN_MAX_REG_PRESSURE (insn) = (int *) xmalloc (ira_reg_class_cover_size
- * sizeof (int));
- for (i = 0; i < ira_reg_class_cover_size; i++)
+ INSN_MAX_REG_PRESSURE (insn) = (int *) xcalloc (ira_pressure_classes_num
+ * sizeof (int), 1);
+ for (i = 0; i < ira_pressure_classes_num; i++)
{
- cl = ira_reg_class_cover[i];
+ cl = ira_pressure_classes[i];
pressure_info[i].clobber_increase
= reg_pressure_info[cl].clobber_increase;
pressure_info[i].set_increase = reg_pressure_info[cl].set_increase;
at the most toplevel SET. */
static bool can_start_lhs_rhs_p;
-/* Extend reg info for the deps context DEPS given that
+/* Extend reg info for the deps context DEPS given that
we have just generated a register numbered REGNO. */
static void
-extend_deps_reg_info (struct deps *deps, int regno)
+extend_deps_reg_info (struct deps_desc *deps, int regno)
{
int max_regno = regno + 1;
if (max_regno > deps->max_reg)
{
- deps->reg_last = XRESIZEVEC (struct deps_reg, deps->reg_last,
+ deps->reg_last = XRESIZEVEC (struct deps_reg, deps->reg_last,
max_regno);
memset (&deps->reg_last[deps->max_reg],
- 0, (max_regno - deps->max_reg)
+ 0, (max_regno - deps->max_reg)
* sizeof (struct deps_reg));
deps->max_reg = max_regno;
}
CLOBBER, PRE_DEC, POST_DEC, PRE_INC, POST_INC or USE. */
static void
-sched_analyze_reg (struct deps *deps, int regno, enum machine_mode mode,
+sched_analyze_reg (struct deps_desc *deps, int regno, enum machine_mode mode,
enum rtx_code ref, rtx insn)
{
/* We could emit new pseudos in renaming. Extend the reg structures. */
destination of X, and reads of everything mentioned. */
static void
-sched_analyze_1 (struct deps *deps, rtx x, rtx insn)
+sched_analyze_1 (struct deps_desc *deps, rtx x, rtx insn)
{
rtx dest = XEXP (x, 0);
enum rtx_code code = GET_CODE (x);
/* Treat all writes to a stack register as modifying the TOS. */
if (regno >= FIRST_STACK_REG && regno <= LAST_STACK_REG)
{
- int nregs;
-
/* Avoid analyzing the same register twice. */
if (regno != FIRST_STACK_REG)
sched_analyze_reg (deps, FIRST_STACK_REG, mode, code, insn);
- nregs = hard_regno_nregs[FIRST_STACK_REG][mode];
- while (--nregs >= 0)
- SET_HARD_REG_BIT (implicit_reg_pending_uses,
- FIRST_STACK_REG + nregs);
+ add_to_hard_reg_set (&implicit_reg_pending_uses, mode,
+ FIRST_STACK_REG);
}
#endif
}
if (sched_deps_info->use_cselib)
{
+ enum machine_mode address_mode
+ = targetm.addr_space.address_mode (MEM_ADDR_SPACE (dest));
+
t = shallow_copy_rtx (dest);
- cselib_lookup (XEXP (t, 0), Pmode, 1);
- XEXP (t, 0) = cselib_subst_to_values (XEXP (t, 0));
+ cselib_lookup_from_insn (XEXP (t, 0), address_mode, 1,
+ GET_MODE (t), insn);
+ XEXP (t, 0) = cselib_subst_to_values (XEXP (t, 0), GET_MODE (t));
}
t = canon_rtx (t);
/* Analyze the uses of memory and registers in rtx X in INSN. */
static void
-sched_analyze_2 (struct deps *deps, rtx x, rtx insn)
+sched_analyze_2 (struct deps_desc *deps, rtx x, rtx insn)
{
int i;
int j;
rtx pending, pending_mem;
rtx t = x;
- if (DEBUG_INSN_P (insn))
- {
- sched_analyze_2 (deps, XEXP (x, 0), insn);
- return;
- }
-
if (sched_deps_info->use_cselib)
{
- t = shallow_copy_rtx (t);
- cselib_lookup (XEXP (t, 0), Pmode, 1);
- XEXP (t, 0) = cselib_subst_to_values (XEXP (t, 0));
- }
- t = canon_rtx (t);
- pending = deps->pending_read_insns;
- pending_mem = deps->pending_read_mems;
- while (pending)
- {
- if (read_dependence (XEXP (pending_mem, 0), t)
- && ! sched_insns_conditions_mutex_p (insn, XEXP (pending, 0)))
- note_mem_dep (t, XEXP (pending_mem, 0), XEXP (pending, 0),
- DEP_ANTI);
+ enum machine_mode address_mode
+ = targetm.addr_space.address_mode (MEM_ADDR_SPACE (t));
- pending = XEXP (pending, 1);
- pending_mem = XEXP (pending_mem, 1);
+ t = shallow_copy_rtx (t);
+ cselib_lookup_from_insn (XEXP (t, 0), address_mode, 1,
+ GET_MODE (t), insn);
+ XEXP (t, 0) = cselib_subst_to_values (XEXP (t, 0), GET_MODE (t));
}
- pending = deps->pending_write_insns;
- pending_mem = deps->pending_write_mems;
- while (pending)
+ if (!DEBUG_INSN_P (insn))
{
- if (true_dependence (XEXP (pending_mem, 0), VOIDmode,
- t, rtx_varies_p)
- && ! sched_insns_conditions_mutex_p (insn, XEXP (pending, 0)))
- note_mem_dep (t, XEXP (pending_mem, 0), XEXP (pending, 0),
- sched_deps_info->generate_spec_deps
- ? BEGIN_DATA | DEP_TRUE : DEP_TRUE);
-
- pending = XEXP (pending, 1);
- pending_mem = XEXP (pending_mem, 1);
- }
+ t = canon_rtx (t);
+ pending = deps->pending_read_insns;
+ pending_mem = deps->pending_read_mems;
+ while (pending)
+ {
+ if (read_dependence (XEXP (pending_mem, 0), t)
+ && ! sched_insns_conditions_mutex_p (insn,
+ XEXP (pending, 0)))
+ note_mem_dep (t, XEXP (pending_mem, 0), XEXP (pending, 0),
+ DEP_ANTI);
+
+ pending = XEXP (pending, 1);
+ pending_mem = XEXP (pending_mem, 1);
+ }
- for (u = deps->last_pending_memory_flush; u; u = XEXP (u, 1))
- {
- if (! JUMP_P (XEXP (u, 0)))
- add_dependence (insn, XEXP (u, 0), REG_DEP_ANTI);
- else if (deps_may_trap_p (x))
+ pending = deps->pending_write_insns;
+ pending_mem = deps->pending_write_mems;
+ while (pending)
{
- if ((sched_deps_info->generate_spec_deps)
- && sel_sched_p () && (spec_info->mask & BEGIN_CONTROL))
- {
- ds_t ds = set_dep_weak (DEP_ANTI, BEGIN_CONTROL,
- MAX_DEP_WEAK);
+ if (true_dependence (XEXP (pending_mem, 0), VOIDmode,
+ t, rtx_varies_p)
+ && ! sched_insns_conditions_mutex_p (insn,
+ XEXP (pending, 0)))
+ note_mem_dep (t, XEXP (pending_mem, 0), XEXP (pending, 0),
+ sched_deps_info->generate_spec_deps
+ ? BEGIN_DATA | DEP_TRUE : DEP_TRUE);
+
+ pending = XEXP (pending, 1);
+ pending_mem = XEXP (pending_mem, 1);
+ }
- note_dep (XEXP (u, 0), ds);
- }
- else
+ for (u = deps->last_pending_memory_flush; u; u = XEXP (u, 1))
+ {
+ if (! NON_FLUSH_JUMP_P (u))
add_dependence (insn, XEXP (u, 0), REG_DEP_ANTI);
+ else if (deps_may_trap_p (x))
+ {
+ if ((sched_deps_info->generate_spec_deps)
+ && sel_sched_p () && (spec_info->mask & BEGIN_CONTROL))
+ {
+ ds_t ds = set_dep_weak (DEP_ANTI, BEGIN_CONTROL,
+ MAX_DEP_WEAK);
+
+ note_dep (XEXP (u, 0), ds);
+ }
+ else
+ add_dependence (insn, XEXP (u, 0), REG_DEP_ANTI);
+ }
}
}
if (!deps->readonly)
add_insn_mem_dependence (deps, true, insn, x);
- /* Take advantage of tail recursion here. */
sched_analyze_2 (deps, XEXP (x, 0), insn);
if (cslr_p && sched_deps_info->finish_rhs)
/* Analyze an INSN with pattern X to find all dependencies. */
static void
-sched_analyze_insn (struct deps *deps, rtx x, rtx insn)
+sched_analyze_insn (struct deps_desc *deps, rtx x, rtx insn)
{
RTX_CODE code = GET_CODE (x);
rtx link;
extract_insn (insn);
preprocess_constraints ();
ira_implicitly_set_insn_hard_regs (&temp);
+ AND_COMPL_HARD_REG_SET (temp, ira_no_alloc_regs);
IOR_HARD_REG_SET (implicit_reg_pending_clobbers, temp);
}
can_start_lhs_rhs_p = (NONJUMP_INSN_P (insn)
&& code == SET);
+ if (may_trap_p (x))
+ /* Avoid moving trapping instructions accross function calls that might
+ not always return. */
+ add_dependence_list (insn, deps->last_function_call_may_noreturn,
+ 1, REG_DEP_ANTI);
+
if (code == COND_EXEC)
{
sched_analyze_2 (deps, COND_EXEC_TEST (x), insn);
if (JUMP_P (insn))
{
rtx next;
- next = next_nonnote_insn (insn);
- while (next && DEBUG_INSN_P (next))
- next = next_nonnote_insn (next);
+ next = next_nonnote_nondebug_insn (insn);
if (next && BARRIER_P (next))
reg_pending_barrier = MOVE_BARRIER;
else
if (sched_pressure_p)
{
setup_insn_reg_uses (deps, insn);
- setup_insn_reg_pressure_info (insn);
+ init_insn_reg_pressure_info (insn);
}
/* Add register dependencies for insn. */
REG_DEP_ANTI);
for (u = deps->last_pending_memory_flush; u; u = XEXP (u, 1))
- if (! JUMP_P (XEXP (u, 0))
- || !sel_sched_p ())
+ if (! NON_FLUSH_JUMP_P (u) || !sel_sched_p ())
add_dependence (insn, XEXP (u, 0), REG_DEP_ANTI);
EXECUTE_IF_SET_IN_REG_SET (reg_pending_uses, 0, i, rsi)
struct deps_reg *reg_last = &deps->reg_last[i];
add_dependence_list (insn, reg_last->sets, 1, REG_DEP_ANTI);
add_dependence_list (insn, reg_last->clobbers, 1, REG_DEP_ANTI);
+
+ if (!deps->readonly)
+ reg_last->uses = alloc_INSN_LIST (insn, reg_last->uses);
}
CLEAR_REG_SET (reg_pending_uses);
add_dependence_list (insn, reg_last->sets, 0, REG_DEP_TRUE);
add_dependence_list (insn, reg_last->implicit_sets, 0, REG_DEP_ANTI);
add_dependence_list (insn, reg_last->clobbers, 0, REG_DEP_TRUE);
-
+
if (!deps->readonly)
{
reg_last->uses = alloc_INSN_LIST (insn, reg_last->uses);
reg_last->uses_length++;
}
}
-
+
for (i = 0; i < FIRST_PSEUDO_REGISTER; i++)
if (TEST_HARD_REG_BIT (implicit_reg_pending_uses, i))
{
add_dependence_list (insn, reg_last->implicit_sets, 0,
REG_DEP_ANTI);
add_dependence_list (insn, reg_last->clobbers, 0, REG_DEP_TRUE);
-
+
if (!deps->readonly)
{
reg_last->uses = alloc_INSN_LIST (insn, reg_last->uses);
add_dependence_list (insn, reg_last->implicit_sets, 0,
REG_DEP_ANTI);
add_dependence_list (insn, reg_last->uses, 0, REG_DEP_ANTI);
-
+
if (!deps->readonly)
{
reg_last->clobbers
REG_DEP_ANTI);
add_dependence_list (insn, reg_last->clobbers, 0, REG_DEP_OUTPUT);
add_dependence_list (insn, reg_last->uses, 0, REG_DEP_ANTI);
-
+
if (!deps->readonly)
{
reg_last->sets = alloc_INSN_LIST (insn, reg_last->sets);
REG_DEP_ANTI);
add_dependence_list_and_free
(deps, insn, ®_last->clobbers, 0, REG_DEP_OUTPUT);
-
+
if (!deps->readonly)
{
reg_last->sets = alloc_INSN_LIST (insn, reg_last->sets);
REG_DEP_ANTI);
add_dependence_list (insn, reg_last->uses, 0, REG_DEP_ANTI);
}
-
+
if (!deps->readonly)
{
reg_last->clobbers_length++;
EXECUTE_IF_SET_IN_REG_SET (reg_pending_sets, 0, i, rsi)
{
struct deps_reg *reg_last = &deps->reg_last[i];
-
+
add_dependence_list_and_free (deps, insn, ®_last->sets, 0,
REG_DEP_OUTPUT);
add_dependence_list_and_free (deps, insn,
REG_DEP_OUTPUT);
add_dependence_list_and_free (deps, insn, ®_last->uses, 0,
REG_DEP_ANTI);
-
+
if (!deps->readonly)
{
reg_last->sets = alloc_INSN_LIST (insn, reg_last->sets);
add_dependence_list (insn, reg_last->sets, 0, REG_DEP_ANTI);
add_dependence_list (insn, reg_last->clobbers, 0, REG_DEP_ANTI);
add_dependence_list (insn, reg_last->uses, 0, REG_DEP_ANTI);
-
+
if (!deps->readonly)
reg_last->implicit_sets
= alloc_INSN_LIST (insn, reg_last->implicit_sets);
}
/* Flush pending lists on jumps, but not on speculative checks. */
- if (JUMP_P (insn) && !(sel_sched_p ()
+ if (JUMP_P (insn) && !(sel_sched_p ()
&& sel_insn_is_speculation_check (insn)))
flush_pending_lists (deps, insn, true, true);
-
+
if (!deps->readonly)
CLEAR_REG_SET (&deps->reg_conditional_sets);
reg_pending_barrier = NOT_A_BARRIER;
This insn must be a simple move of a hard reg to a pseudo or
vice-versa.
- We must avoid moving these insns for correctness on
- SMALL_REGISTER_CLASS machines, and for special registers like
+ We must avoid moving these insns for correctness on targets
+ with small register classes, and for special registers like
PIC_OFFSET_TABLE_REGNUM. For simplicity, extend this to all
hard regs for all targets. */
&& deps->in_post_call_group_p == post_call_initial)
deps->in_post_call_group_p = post_call;
- if (!sel_sched_p () || sched_emulate_haifa_p)
+ if (!sel_sched_p () || sched_emulate_haifa_p)
{
SCHED_GROUP_P (insn) = 1;
CANT_MOVE (insn) = 1;
{
sd_iterator_def sd_it;
dep_t dep;
-
+
for (sd_it = sd_iterator_start (insn, SD_LIST_SPEC_BACK);
sd_iterator_cond (&sd_it, &dep);)
change_spec_dep_to_hard (sd_it);
}
}
+/* Return TRUE if INSN might not always return normally (e.g. call exit,
+ longjmp, loop forever, ...). */
+static bool
+call_may_noreturn_p (rtx insn)
+{
+ rtx call;
+
+ /* const or pure calls that aren't looping will always return. */
+ if (RTL_CONST_OR_PURE_CALL_P (insn)
+ && !RTL_LOOPING_CONST_OR_PURE_CALL_P (insn))
+ return false;
+
+ call = PATTERN (insn);
+ if (GET_CODE (call) == PARALLEL)
+ call = XVECEXP (call, 0, 0);
+ if (GET_CODE (call) == SET)
+ call = SET_SRC (call);
+ if (GET_CODE (call) == CALL
+ && MEM_P (XEXP (call, 0))
+ && GET_CODE (XEXP (XEXP (call, 0), 0)) == SYMBOL_REF)
+ {
+ rtx symbol = XEXP (XEXP (call, 0), 0);
+ if (SYMBOL_REF_DECL (symbol)
+ && TREE_CODE (SYMBOL_REF_DECL (symbol)) == FUNCTION_DECL)
+ {
+ if (DECL_BUILT_IN_CLASS (SYMBOL_REF_DECL (symbol))
+ == BUILT_IN_NORMAL)
+ switch (DECL_FUNCTION_CODE (SYMBOL_REF_DECL (symbol)))
+ {
+ case BUILT_IN_BCMP:
+ case BUILT_IN_BCOPY:
+ case BUILT_IN_BZERO:
+ case BUILT_IN_INDEX:
+ case BUILT_IN_MEMCHR:
+ case BUILT_IN_MEMCMP:
+ case BUILT_IN_MEMCPY:
+ case BUILT_IN_MEMMOVE:
+ case BUILT_IN_MEMPCPY:
+ case BUILT_IN_MEMSET:
+ case BUILT_IN_RINDEX:
+ case BUILT_IN_STPCPY:
+ case BUILT_IN_STPNCPY:
+ case BUILT_IN_STRCAT:
+ case BUILT_IN_STRCHR:
+ case BUILT_IN_STRCMP:
+ case BUILT_IN_STRCPY:
+ case BUILT_IN_STRCSPN:
+ case BUILT_IN_STRLEN:
+ case BUILT_IN_STRNCAT:
+ case BUILT_IN_STRNCMP:
+ case BUILT_IN_STRNCPY:
+ case BUILT_IN_STRPBRK:
+ case BUILT_IN_STRRCHR:
+ case BUILT_IN_STRSPN:
+ case BUILT_IN_STRSTR:
+ /* Assume certain string/memory builtins always return. */
+ return false;
+ default:
+ break;
+ }
+ }
+ }
+
+ /* For all other calls assume that they might not always return. */
+ return true;
+}
+
/* Analyze INSN with DEPS as a context. */
void
-deps_analyze_insn (struct deps *deps, rtx insn)
+deps_analyze_insn (struct deps_desc *deps, rtx insn)
{
if (sched_deps_info->start_insn)
sched_deps_info->start_insn (insn);
if (NONJUMP_INSN_P (insn) || DEBUG_INSN_P (insn) || JUMP_P (insn))
{
- /* Make each JUMP_INSN (but not a speculative check)
+ /* Make each JUMP_INSN (but not a speculative check)
a scheduling barrier for memory references. */
if (!deps->readonly
- && JUMP_P (insn)
- && !(sel_sched_p ()
+ && JUMP_P (insn)
+ && !(sel_sched_p ()
&& sel_insn_is_speculation_check (insn)))
{
/* Keep the list a reasonable size. */
if (deps->pending_flush_length++ > MAX_PENDING_LIST_LENGTH)
flush_pending_lists (deps, insn, true, true);
else
- deps->last_pending_memory_flush
- = alloc_INSN_LIST (insn, deps->last_pending_memory_flush);
+ {
+ deps->last_pending_memory_flush
+ = alloc_INSN_LIST (insn, deps->last_pending_memory_flush);
+ /* Signal to sched_analyze_insn that this jump stands
+ just for its own, not any other pending memory
+ reads/writes flush_pending_lists had to flush. */
+ PUT_REG_NOTE_KIND (deps->last_pending_memory_flush,
+ NON_FLUSH_JUMP_KIND);
+ }
}
sched_analyze_insn (deps, PATTERN (insn), insn);
/* For each insn which shouldn't cross a call, add a dependence
between that insn and this call insn. */
- add_dependence_list_and_free (deps, insn,
+ add_dependence_list_and_free (deps, insn,
&deps->sched_before_next_call, 1,
REG_DEP_ANTI);
/* Remember the last function call for limiting lifetimes. */
free_INSN_LIST_list (&deps->last_function_call);
deps->last_function_call = alloc_INSN_LIST (insn, NULL_RTX);
-
+
+ if (call_may_noreturn_p (insn))
+ {
+ /* Remember the last function call that might not always return
+ normally for limiting moves of trapping insns. */
+ free_INSN_LIST_list (&deps->last_function_call_may_noreturn);
+ deps->last_function_call_may_noreturn
+ = alloc_INSN_LIST (insn, NULL_RTX);
+ }
+
/* Before reload, begin a post-call group, so as to keep the
lifetimes of hard registers correct. */
if (! reload_completed)
sched_deps_info->finish_insn ();
/* Fixup the dependencies in the sched group. */
- if ((NONJUMP_INSN_P (insn) || JUMP_P (insn))
+ if ((NONJUMP_INSN_P (insn) || JUMP_P (insn))
&& SCHED_GROUP_P (insn) && !sel_sched_p ())
fixup_sched_groups (insn);
}
/* Initialize DEPS for the new block beginning with HEAD. */
void
-deps_start_bb (struct deps *deps, rtx head)
+deps_start_bb (struct deps_desc *deps, rtx head)
{
gcc_assert (!deps->readonly);
hard registers correct. */
if (! reload_completed && !LABEL_P (head))
{
- rtx insn = prev_nonnote_insn (head);
+ rtx insn = prev_nonnote_nondebug_insn (head);
- while (insn && DEBUG_INSN_P (insn))
- insn = prev_nonnote_insn (insn);
if (insn && CALL_P (insn))
deps->in_post_call_group_p = post_call_initial;
}
/* Analyze every insn between HEAD and TAIL inclusive, creating backward
dependencies for each insn. */
void
-sched_analyze (struct deps *deps, rtx head, rtx tail)
+sched_analyze (struct deps_desc *deps, rtx head, rtx tail)
{
rtx insn;
if (sched_deps_info->use_cselib)
- cselib_init (true);
+ cselib_init (CSELIB_RECORD_MEMORY);
deps_start_bb (deps, head);
}
\f
/* Initialize variables for region data dependence analysis.
- n_bbs is the number of region blocks. */
+ When LAZY_REG_LAST is true, do not allocate reg_last array
+ of struct deps_desc immediately. */
void
-init_deps (struct deps *deps)
+init_deps (struct deps_desc *deps, bool lazy_reg_last)
{
int max_reg = (reload_completed ? FIRST_PSEUDO_REGISTER : max_reg_num ());
deps->max_reg = max_reg;
- deps->reg_last = XCNEWVEC (struct deps_reg, max_reg);
+ if (lazy_reg_last)
+ deps->reg_last = NULL;
+ else
+ deps->reg_last = XCNEWVEC (struct deps_reg, max_reg);
INIT_REG_SET (&deps->reg_last_in_use);
INIT_REG_SET (&deps->reg_conditional_sets);
deps->pending_flush_length = 0;
deps->last_pending_memory_flush = 0;
deps->last_function_call = 0;
+ deps->last_function_call_may_noreturn = 0;
deps->sched_before_next_call = 0;
deps->in_post_call_group_p = not_post_call;
deps->last_debug_insn = 0;
deps->readonly = 0;
}
+/* Init only reg_last field of DEPS, which was not allocated before as
+ we inited DEPS lazily. */
+void
+init_deps_reg_last (struct deps_desc *deps)
+{
+ gcc_assert (deps && deps->max_reg > 0);
+ gcc_assert (deps->reg_last == NULL);
+
+ deps->reg_last = XCNEWVEC (struct deps_reg, deps->max_reg);
+}
+
+
/* Free insn lists found in DEPS. */
void
-free_deps (struct deps *deps)
+free_deps (struct deps_desc *deps)
{
unsigned i;
reg_set_iterator rsi;
+ /* We set max_reg to 0 when this context was already freed. */
+ if (deps->max_reg == 0)
+ {
+ gcc_assert (deps->reg_last == NULL);
+ return;
+ }
+ deps->max_reg = 0;
+
free_INSN_LIST_list (&deps->pending_read_insns);
free_EXPR_LIST_list (&deps->pending_read_mems);
free_INSN_LIST_list (&deps->pending_write_insns);
CLEAR_REG_SET (&deps->reg_last_in_use);
CLEAR_REG_SET (&deps->reg_conditional_sets);
- free (deps->reg_last);
+ /* As we initialize reg_last lazily, it is possible that we didn't allocate
+ it at all. */
+ if (deps->reg_last)
+ free (deps->reg_last);
deps->reg_last = NULL;
deps = NULL;
/* Remove INSN from dependence contexts DEPS. Caution: reg_conditional_sets
is not handled. */
void
-remove_from_deps (struct deps *deps, rtx insn)
+remove_from_deps (struct deps_desc *deps, rtx insn)
{
int removed;
unsigned i;
reg_set_iterator rsi;
-
+
removed = remove_from_both_dependence_lists (insn, &deps->pending_read_insns,
&deps->pending_read_mems);
- deps->pending_read_list_length -= removed;
+ if (!DEBUG_INSN_P (insn))
+ deps->pending_read_list_length -= removed;
removed = remove_from_both_dependence_lists (insn, &deps->pending_write_insns,
&deps->pending_write_mems);
deps->pending_write_list_length -= removed;
}
if (CALL_P (insn))
- remove_from_dependence_list (insn, &deps->last_function_call);
+ {
+ remove_from_dependence_list (insn, &deps->last_function_call);
+ remove_from_dependence_list (insn,
+ &deps->last_function_call_may_noreturn);
+ }
remove_from_dependence_list (insn, &deps->sched_before_next_call);
}
static void
init_deps_data_vector (void)
{
- int reserve = (sched_max_luid + 1
+ int reserve = (sched_max_luid + 1
- VEC_length (haifa_deps_insn_data_def, h_d_i_d));
- if (reserve > 0
+ if (reserve > 0
&& ! VEC_space (haifa_deps_insn_data_def, h_d_i_d, reserve))
VEC_safe_grow_cleared (haifa_deps_insn_data_def, heap, h_d_i_d,
3 * sched_max_luid / 2);
int insns_in_block = sched_max_luid / n_basic_blocks + 1;
init_deps_data_vector ();
-
- /* We use another caching mechanism for selective scheduling, so
+
+ /* We use another caching mechanism for selective scheduling, so
we don't use this one. */
if (!sel_sched_p () && global_p && insns_in_block > 100 * 5)
{
extend_dependency_caches (sched_max_luid, true);
}
- if (global_p)
+ if (global_p)
{
dl_pool = create_alloc_pool ("deps_list", sizeof (struct _deps_list),
/* Allocate lists for one block at a time. */
VEC_free (haifa_deps_insn_data_def, heap, h_d_i_d);
cache_size = 0;
-
+
if (true_dependency_cache)
{
int i;
gcc_assert (insn == cur_insn);
else
cur_insn = insn;
-
+
note_dep (elem, ds);
if (!internal)
cur_insn = NULL;
fprintf (f, "}");
}
-void
+DEBUG_FUNCTION void
debug_ds (ds_t s)
{
dump_ds (stderr, s);
gcc_assert (ds & DEP_TRUE);
else if (dt == REG_DEP_OUTPUT)
gcc_assert ((ds & DEP_OUTPUT)
- && !(ds & DEP_TRUE));
- else
+ && !(ds & DEP_TRUE));
+ else
gcc_assert ((dt == REG_DEP_ANTI)
&& (ds & DEP_ANTI)
&& !(ds & (DEP_OUTPUT | DEP_TRUE)));
/* HARD_DEP can not appear in dep_status of a link. */
- gcc_assert (!(ds & HARD_DEP));
+ gcc_assert (!(ds & HARD_DEP));
/* Check that dependence status is set correctly when speculation is not
supported. */
/* Subsequent speculations should resolve true dependencies. */
gcc_assert ((ds & DEP_TYPES) == DEP_TRUE);
}
-
- /* Check that true and anti dependencies can't have other speculative
+
+ /* Check that true and anti dependencies can't have other speculative
statuses. */
if (ds & DEP_TRUE)
gcc_assert (ds & (BEGIN_DATA | BE_IN_SPEC));