/* Instruction scheduling pass. This file computes dependencies between
instructions.
Copyright (C) 1992, 1993, 1994, 1995, 1996, 1997, 1998,
- 1999, 2000, 2001, 2002 Free Software Foundation, Inc.
+ 1999, 2000, 2001, 2002, 2003, 2004 Free Software Foundation, Inc.
Contributed by Michael Tiemann (tiemann@cygnus.com) Enhanced by,
and currently maintained by, Jim Wilson (wilson@cygnus.com)
#include "sched-int.h"
#include "params.h"
#include "cselib.h"
+#include "df.h"
extern char *reg_known_equiv_p;
extern rtx *reg_known_value;
static regset reg_pending_sets;
static regset reg_pending_clobbers;
static regset reg_pending_uses;
-static bool reg_pending_barrier;
+
+/* The following enumeration values tell us what dependencies we
+ should use to implement the barrier. We use true-dependencies for
+ TRUE_BARRIER and anti-dependencies for MOVE_BARRIER. */
+enum reg_pending_barrier_mode
+{
+ NOT_A_BARRIER = 0,
+ MOVE_BARRIER,
+ TRUE_BARRIER
+};
+
+static enum reg_pending_barrier_mode reg_pending_barrier;
/* To speed up the test for duplicate dependency links we keep a
record of dependencies created by add_dependence when the average
has enough entries to represent a dependency on any other insn in
the insn chain. All bitmap for true dependencies cache is
allocated then the rest two ones are also allocated. */
-static sbitmap *true_dependency_cache;
-static sbitmap *anti_dependency_cache;
-static sbitmap *output_dependency_cache;
+static bitmap_head *true_dependency_cache;
+static bitmap_head *anti_dependency_cache;
+static bitmap_head *output_dependency_cache;
+int cache_size;
/* To speed up checking consistency of formed forward insn
dependencies we use the following cache. Another possible solution
could be switching off checking duplication of insns in forward
dependencies. */
#ifdef ENABLE_CHECKING
-static sbitmap *forward_dependency_cache;
+static bitmap_head *forward_dependency_cache;
#endif
-static int deps_may_trap_p PARAMS ((rtx));
-static void add_dependence_list PARAMS ((rtx, rtx, enum reg_note));
-static void add_dependence_list_and_free PARAMS ((rtx, rtx *, enum reg_note));
-static void set_sched_group_p PARAMS ((rtx));
+static int deps_may_trap_p (rtx);
+static void add_dependence_list (rtx, rtx, enum reg_note);
+static void add_dependence_list_and_free (rtx, rtx *, enum reg_note);
+static void set_sched_group_p (rtx);
-static void flush_pending_lists PARAMS ((struct deps *, rtx, int, int));
-static void sched_analyze_1 PARAMS ((struct deps *, rtx, rtx));
-static void sched_analyze_2 PARAMS ((struct deps *, rtx, rtx));
-static void sched_analyze_insn PARAMS ((struct deps *, rtx, rtx, rtx));
+static void flush_pending_lists (struct deps *, rtx, int, int);
+static void sched_analyze_1 (struct deps *, rtx, rtx);
+static void sched_analyze_2 (struct deps *, rtx, rtx);
+static void sched_analyze_insn (struct deps *, rtx, rtx, rtx);
-static rtx get_condition PARAMS ((rtx));
-static int conditions_mutex_p PARAMS ((rtx, rtx));
+static rtx get_condition (rtx);
+static int conditions_mutex_p (rtx, rtx);
\f
/* Return nonzero if a load of the memory reference MEM can cause a trap. */
static int
-deps_may_trap_p (mem)
- rtx mem;
+deps_may_trap_p (rtx mem)
{
rtx addr = XEXP (mem, 0);
if LIST does not contain INSN. */
rtx
-find_insn_list (insn, list)
- rtx insn;
- rtx list;
+find_insn_list (rtx insn, rtx list)
{
while (list)
{
/* Find the condition under which INSN is executed. */
static rtx
-get_condition (insn)
- rtx insn;
+get_condition (rtx insn)
{
rtx pat = PATTERN (insn);
rtx cond;
/* Return nonzero if conditions COND1 and COND2 can never be both true. */
static int
-conditions_mutex_p (cond1, cond2)
- rtx cond1, cond2;
+conditions_mutex_p (rtx cond1, rtx cond2)
{
if (GET_RTX_CLASS (GET_CODE (cond1)) == '<'
&& GET_RTX_CLASS (GET_CODE (cond2)) == '<'
}
\f
/* Add ELEM wrapped in an INSN_LIST with reg note kind DEP_TYPE to the
- LOG_LINKS of INSN, if not already there. DEP_TYPE indicates the type
- of dependence that this link represents. */
+ LOG_LINKS of INSN, if not already there. DEP_TYPE indicates the
+ type of dependence that this link represents. The function returns
+ nonzero if a new entry has been added to insn's LOG_LINK. */
-void
-add_dependence (insn, elem, dep_type)
- rtx insn;
- rtx elem;
- enum reg_note dep_type;
+int
+add_dependence (rtx insn, rtx elem, enum reg_note dep_type)
{
rtx link;
int present_p;
/* Don't depend an insn on itself. */
if (insn == elem)
- return;
+ return 0;
/* We can get a dependency on deleted insns due to optimizations in
the register allocation and reloading or due to splitting. Any
such dependency is useless and can be ignored. */
if (GET_CODE (elem) == NOTE)
- return;
+ return 0;
/* flow.c doesn't handle conditional lifetimes entirely correctly;
calls mess up the conditional lifetimes. */
/* Make sure second instruction doesn't affect condition of first
instruction if switched. */
&& !modified_in_p (cond2, insn))
- return;
+ return 0;
}
present_p = 1;
elem is a CALL is still required. */
if (GET_CODE (insn) == CALL_INSN
&& (INSN_BB (elem) != INSN_BB (insn)))
- return;
+ return 0;
#endif
/* If we already have a dependency for ELEM, then we do not need to
if (anti_dependency_cache == NULL || output_dependency_cache == NULL)
abort ();
- if (TEST_BIT (true_dependency_cache[INSN_LUID (insn)], INSN_LUID (elem)))
+ if (bitmap_bit_p (&true_dependency_cache[INSN_LUID (insn)],
+ INSN_LUID (elem)))
/* Do nothing (present_set_type is already 0). */
;
- else if (TEST_BIT (anti_dependency_cache[INSN_LUID (insn)],
+ else if (bitmap_bit_p (&anti_dependency_cache[INSN_LUID (insn)],
INSN_LUID (elem)))
present_dep_type = REG_DEP_ANTI;
- else if (TEST_BIT (output_dependency_cache[INSN_LUID (insn)],
+ else if (bitmap_bit_p (&output_dependency_cache[INSN_LUID (insn)],
INSN_LUID (elem)))
present_dep_type = REG_DEP_OUTPUT;
else
present_p = 0;
if (present_p && (int) dep_type >= (int) present_dep_type)
- return;
+ return 0;
}
#endif
if (true_dependency_cache != NULL)
{
if (REG_NOTE_KIND (link) == REG_DEP_ANTI)
- RESET_BIT (anti_dependency_cache[INSN_LUID (insn)],
- INSN_LUID (elem));
+ bitmap_clear_bit (&anti_dependency_cache[INSN_LUID (insn)],
+ INSN_LUID (elem));
else if (REG_NOTE_KIND (link) == REG_DEP_OUTPUT
&& output_dependency_cache)
- RESET_BIT (output_dependency_cache[INSN_LUID (insn)],
- INSN_LUID (elem));
+ bitmap_clear_bit (&output_dependency_cache[INSN_LUID (insn)],
+ INSN_LUID (elem));
else
abort ();
}
if (true_dependency_cache != NULL)
{
if ((int) REG_NOTE_KIND (link) == 0)
- SET_BIT (true_dependency_cache[INSN_LUID (insn)],
- INSN_LUID (elem));
+ bitmap_set_bit (&true_dependency_cache[INSN_LUID (insn)],
+ INSN_LUID (elem));
else if (REG_NOTE_KIND (link) == REG_DEP_ANTI)
- SET_BIT (anti_dependency_cache[INSN_LUID (insn)],
- INSN_LUID (elem));
+ bitmap_set_bit (&anti_dependency_cache[INSN_LUID (insn)],
+ INSN_LUID (elem));
else if (REG_NOTE_KIND (link) == REG_DEP_OUTPUT)
- SET_BIT (output_dependency_cache[INSN_LUID (insn)],
- INSN_LUID (elem));
+ bitmap_set_bit (&output_dependency_cache[INSN_LUID (insn)],
+ INSN_LUID (elem));
}
#endif
- return;
- }
+ return 0;
+ }
/* Might want to check one level of transitivity to save conses. */
link = alloc_INSN_LIST (elem, LOG_LINKS (insn));
if (true_dependency_cache != NULL)
{
if ((int) dep_type == 0)
- SET_BIT (true_dependency_cache[INSN_LUID (insn)], INSN_LUID (elem));
+ bitmap_set_bit (&true_dependency_cache[INSN_LUID (insn)], INSN_LUID (elem));
else if (dep_type == REG_DEP_ANTI)
- SET_BIT (anti_dependency_cache[INSN_LUID (insn)], INSN_LUID (elem));
+ bitmap_set_bit (&anti_dependency_cache[INSN_LUID (insn)], INSN_LUID (elem));
else if (dep_type == REG_DEP_OUTPUT)
- SET_BIT (output_dependency_cache[INSN_LUID (insn)], INSN_LUID (elem));
+ bitmap_set_bit (&output_dependency_cache[INSN_LUID (insn)], INSN_LUID (elem));
}
#endif
+ return 1;
}
/* A convenience wrapper to operate on an entire list. */
static void
-add_dependence_list (insn, list, dep_type)
- rtx insn, list;
- enum reg_note dep_type;
+add_dependence_list (rtx insn, rtx list, enum reg_note dep_type)
{
for (; list; list = XEXP (list, 1))
add_dependence (insn, XEXP (list, 0), dep_type);
/* Similar, but free *LISTP at the same time. */
static void
-add_dependence_list_and_free (insn, listp, dep_type)
- rtx insn;
- rtx *listp;
- enum reg_note dep_type;
+add_dependence_list_and_free (rtx insn, rtx *listp, enum reg_note dep_type)
{
rtx list, next;
for (list = *listp, *listp = NULL; list ; list = next)
goes along with that. */
static void
-set_sched_group_p (insn)
- rtx insn;
+set_sched_group_p (rtx insn)
{
rtx prev;
so that we can do memory aliasing on it. */
void
-add_insn_mem_dependence (deps, insn_list, mem_list, insn, mem)
- struct deps *deps;
- rtx *insn_list, *mem_list, insn, mem;
+add_insn_mem_dependence (struct deps *deps, rtx *insn_list, rtx *mem_list,
+ rtx insn, rtx mem)
{
rtx link;
mem = shallow_copy_rtx (mem);
XEXP (mem, 0) = cselib_subst_to_values (XEXP (mem, 0));
}
- link = alloc_EXPR_LIST (VOIDmode, mem, *mem_list);
+ link = alloc_EXPR_LIST (VOIDmode, canon_rtx (mem), *mem_list);
*mem_list = link;
deps->pending_lists_length++;
dependencies for a read operation, similarly with FOR_WRITE. */
static void
-flush_pending_lists (deps, insn, for_read, for_write)
- struct deps *deps;
- rtx insn;
- int for_read, for_write;
+flush_pending_lists (struct deps *deps, rtx insn, int for_read,
+ int for_write)
{
if (for_write)
{
destination of X, and reads of everything mentioned. */
static void
-sched_analyze_1 (deps, x, insn)
- struct deps *deps;
- rtx x;
- rtx insn;
+sched_analyze_1 (struct deps *deps, rtx x, rtx insn)
{
int regno;
rtx dest = XEXP (x, 0);
while (GET_CODE (dest) == STRICT_LOW_PART || GET_CODE (dest) == SUBREG
|| GET_CODE (dest) == ZERO_EXTRACT || GET_CODE (dest) == SIGN_EXTRACT)
{
+ if (GET_CODE (dest) == STRICT_LOW_PART
+ || GET_CODE (dest) == ZERO_EXTRACT
+ || GET_CODE (dest) == SIGN_EXTRACT
+ || read_modify_subreg_p (dest))
+ {
+ /* These both read and modify the result. We must handle
+ them as writes to get proper dependencies for following
+ instructions. We must handle them as reads to get proper
+ dependencies from this to previous instructions.
+ Thus we need to call sched_analyze_2. */
+
+ sched_analyze_2 (deps, XEXP (dest, 0), insn);
+ }
if (GET_CODE (dest) == ZERO_EXTRACT || GET_CODE (dest) == SIGN_EXTRACT)
{
/* The second and third arguments are values read by this insn. */
If so, mark all of them just like the first. */
if (regno < FIRST_PSEUDO_REGISTER)
{
- int i = HARD_REGNO_NREGS (regno, GET_MODE (dest));
+ int i = hard_regno_nregs[regno][GET_MODE (dest)];
if (code == SET)
{
while (--i >= 0)
cselib_lookup (XEXP (t, 0), Pmode, 1);
XEXP (t, 0) = cselib_subst_to_values (XEXP (t, 0));
}
+ t = canon_rtx (t);
if (deps->pending_lists_length > MAX_PENDING_LIST_LENGTH)
{
/* Analyze the uses of memory and registers in rtx X in INSN. */
static void
-sched_analyze_2 (deps, x, insn)
- struct deps *deps;
- rtx x;
- rtx insn;
+sched_analyze_2 (struct deps *deps, rtx x, rtx insn)
{
int i;
int j;
case CC0:
/* User of CC0 depends on immediately preceding insn. */
set_sched_group_p (insn);
+ /* Don't move CC0 setter to another block (it can set up the
+ same flag for previous CC0 users which is safe). */
+ CANT_MOVE (prev_nonnote_insn (insn)) = 1;
return;
#endif
int regno = REGNO (x);
if (regno < FIRST_PSEUDO_REGISTER)
{
- int i = HARD_REGNO_NREGS (regno, GET_MODE (x));
+ int i = hard_regno_nregs[regno][GET_MODE (x)];
while (--i >= 0)
SET_REGNO_REG_SET (reg_pending_uses, regno + i);
}
cselib_lookup (XEXP (t, 0), Pmode, 1);
XEXP (t, 0) = cselib_subst_to_values (XEXP (t, 0));
}
+ t = canon_rtx (t);
pending = deps->pending_read_insns;
pending_mem = deps->pending_read_mems;
while (pending)
mode. An insn should not be moved across this even if it only uses
pseudo-regs because it might give an incorrectly rounded result. */
if (code != ASM_OPERANDS || MEM_VOLATILE_P (x))
- reg_pending_barrier = true;
+ reg_pending_barrier = TRUE_BARRIER;
/* For all ASM_OPERANDS, we must traverse the vector of input operands.
We can not just fall through here since then we would be confused
/* Analyze an INSN with pattern X to find all dependencies. */
static void
-sched_analyze_insn (deps, x, insn, loop_notes)
- struct deps *deps;
- rtx x, insn;
- rtx loop_notes;
+sched_analyze_insn (struct deps *deps, rtx x, rtx insn, rtx loop_notes)
{
RTX_CODE code = GET_CODE (x);
rtx link;
sched_analyze_2 (deps, XEXP (link, 0), insn);
}
if (find_reg_note (insn, REG_SETJMP, NULL))
- reg_pending_barrier = true;
+ reg_pending_barrier = MOVE_BARRIER;
}
if (GET_CODE (insn) == JUMP_INSN)
rtx next;
next = next_nonnote_insn (insn);
if (next && GET_CODE (next) == BARRIER)
- reg_pending_barrier = true;
+ reg_pending_barrier = TRUE_BARRIER;
else
{
rtx pending, pending_mem;
- regset_head tmp;
- INIT_REG_SET (&tmp);
+ regset_head tmp_uses, tmp_sets;
+ INIT_REG_SET (&tmp_uses);
+ INIT_REG_SET (&tmp_sets);
- (*current_sched_info->compute_jump_reg_dependencies) (insn, &tmp);
+ (*current_sched_info->compute_jump_reg_dependencies)
+ (insn, &deps->reg_conditional_sets, &tmp_uses, &tmp_sets);
/* Make latency of jump equal to 0 by using anti-dependence. */
- EXECUTE_IF_SET_IN_REG_SET (&tmp, 0, i,
+ EXECUTE_IF_SET_IN_REG_SET (&tmp_uses, 0, i,
{
struct deps_reg *reg_last = &deps->reg_last[i];
add_dependence_list (insn, reg_last->sets, REG_DEP_ANTI);
reg_last->uses_length++;
reg_last->uses = alloc_INSN_LIST (insn, reg_last->uses);
});
- CLEAR_REG_SET (&tmp);
+ IOR_REG_SET (reg_pending_sets, &tmp_sets);
+
+ CLEAR_REG_SET (&tmp_uses);
+ CLEAR_REG_SET (&tmp_sets);
/* All memory writes and volatile reads must happen before the
jump. Non-volatile reads must happen before the jump iff
|| INTVAL (XEXP (link, 0)) == NOTE_INSN_LOOP_END
|| INTVAL (XEXP (link, 0)) == NOTE_INSN_EH_REGION_BEG
|| INTVAL (XEXP (link, 0)) == NOTE_INSN_EH_REGION_END)
- reg_pending_barrier = true;
+ reg_pending_barrier = MOVE_BARRIER;
link = XEXP (link, 1);
}
where block boundaries fall. This is mighty confusing elsewhere.
Therefore, prevent such an instruction from being moved. */
if (can_throw_internal (insn))
- reg_pending_barrier = true;
+ reg_pending_barrier = MOVE_BARRIER;
/* Add dependencies if a scheduling barrier was found. */
if (reg_pending_barrier)
{
struct deps_reg *reg_last = &deps->reg_last[i];
add_dependence_list (insn, reg_last->uses, REG_DEP_ANTI);
- add_dependence_list (insn, reg_last->sets, REG_DEP_ANTI);
- add_dependence_list (insn, reg_last->clobbers, REG_DEP_ANTI);
+ add_dependence_list
+ (insn, reg_last->sets,
+ reg_pending_barrier == TRUE_BARRIER ? 0 : REG_DEP_ANTI);
+ add_dependence_list
+ (insn, reg_last->clobbers,
+ reg_pending_barrier == TRUE_BARRIER ? 0 : REG_DEP_ANTI);
});
}
else
struct deps_reg *reg_last = &deps->reg_last[i];
add_dependence_list_and_free (insn, ®_last->uses,
REG_DEP_ANTI);
- add_dependence_list_and_free (insn, ®_last->sets,
- REG_DEP_ANTI);
- add_dependence_list_and_free (insn, ®_last->clobbers,
- REG_DEP_ANTI);
+ add_dependence_list_and_free
+ (insn, ®_last->sets,
+ reg_pending_barrier == TRUE_BARRIER ? 0 : REG_DEP_ANTI);
+ add_dependence_list_and_free
+ (insn, ®_last->clobbers,
+ reg_pending_barrier == TRUE_BARRIER ? 0 : REG_DEP_ANTI);
reg_last->uses_length = 0;
reg_last->clobbers_length = 0;
});
}
flush_pending_lists (deps, insn, true, true);
- reg_pending_barrier = false;
+ CLEAR_REG_SET (&deps->reg_conditional_sets);
+ reg_pending_barrier = NOT_A_BARRIER;
}
else
{
add_dependence_list (insn, reg_last->clobbers, REG_DEP_OUTPUT);
add_dependence_list (insn, reg_last->uses, REG_DEP_ANTI);
reg_last->sets = alloc_INSN_LIST (insn, reg_last->sets);
+ SET_REGNO_REG_SET (&deps->reg_conditional_sets, i);
});
}
else
reg_last->sets = alloc_INSN_LIST (insn, reg_last->sets);
reg_last->uses_length = 0;
reg_last->clobbers_length = 0;
+ CLEAR_REGNO_REG_SET (&deps->reg_conditional_sets, i);
});
}
for every dependency. */
void
-sched_analyze (deps, head, tail)
- struct deps *deps;
- rtx head, tail;
+sched_analyze (struct deps *deps, rtx head, rtx tail)
{
rtx insn;
rtx loop_notes = 0;
{
/* This is setjmp. Assume that all registers, not just
hard registers, may be clobbered by this call. */
- reg_pending_barrier = true;
+ reg_pending_barrier = MOVE_BARRIER;
}
else
{
|| NOTE_LINE_NUMBER (insn) == NOTE_INSN_EH_REGION_END)
rtx_region = GEN_INT (NOTE_EH_HANDLER (insn));
else
- rtx_region = GEN_INT (0);
+ rtx_region = const0_rtx;
loop_notes = alloc_EXPR_LIST (REG_SAVE_NOTE,
rtx_region,
/* Now that we have completed handling INSN, check and see if it is
a CLOBBER beginning a libcall block. If it is, record the
- end of the libcall sequence.
+ end of the libcall sequence.
We want to schedule libcall blocks as a unit before reload. While
this restricts scheduling, it preserves the meaning of a libcall
a libcall block. */
if (!reload_completed
/* Note we may have nested libcall sequences. We only care about
- the outermost libcall sequence. */
+ the outermost libcall sequence. */
&& deps->libcall_block_tail_insn == 0
/* The sequence must start with a clobber of a register. */
&& GET_CODE (insn) == INSN
abort ();
}
\f
+
+/* The following function adds forward dependence (FROM, TO) with
+ given DEP_TYPE. The forward dependence should be not exist before. */
+
+void
+add_forward_dependence (rtx from, rtx to, enum reg_note dep_type)
+{
+ rtx new_link;
+
+#ifdef ENABLE_CHECKING
+ /* If add_dependence is working properly there should never
+ be notes, deleted insns or duplicates in the backward
+ links. Thus we need not check for them here.
+
+ However, if we have enabled checking we might as well go
+ ahead and verify that add_dependence worked properly. */
+ if (GET_CODE (from) == NOTE
+ || INSN_DELETED_P (from)
+ || (forward_dependency_cache != NULL
+ && bitmap_bit_p (&forward_dependency_cache[INSN_LUID (from)],
+ INSN_LUID (to)))
+ || (forward_dependency_cache == NULL
+ && find_insn_list (to, INSN_DEPEND (from))))
+ abort ();
+ if (forward_dependency_cache != NULL)
+ bitmap_bit_p (&forward_dependency_cache[INSN_LUID (from)],
+ INSN_LUID (to));
+#endif
+
+ new_link = alloc_INSN_LIST (to, INSN_DEPEND (from));
+
+ PUT_REG_NOTE_KIND (new_link, dep_type);
+
+ INSN_DEPEND (from) = new_link;
+ INSN_DEP_COUNT (to) += 1;
+}
+
/* Examine insns in the range [ HEAD, TAIL ] and Use the backward
dependences from LOG_LINKS to build forward dependences in
INSN_DEPEND. */
void
-compute_forward_dependences (head, tail)
- rtx head, tail;
+compute_forward_dependences (rtx head, rtx tail)
{
rtx insn, link;
rtx next_tail;
- enum reg_note dep_type;
next_tail = NEXT_INSN (tail);
for (insn = head; insn != next_tail; insn = NEXT_INSN (insn))
continue;
for (link = LOG_LINKS (insn); link; link = XEXP (link, 1))
- {
- rtx x = XEXP (link, 0);
- rtx new_link;
-
- if (x != XEXP (link, 0))
- continue;
-
-#ifdef ENABLE_CHECKING
- /* If add_dependence is working properly there should never
- be notes, deleted insns or duplicates in the backward
- links. Thus we need not check for them here.
-
- However, if we have enabled checking we might as well go
- ahead and verify that add_dependence worked properly. */
- if (GET_CODE (x) == NOTE
- || INSN_DELETED_P (x)
- || (forward_dependency_cache != NULL
- && TEST_BIT (forward_dependency_cache[INSN_LUID (x)],
- INSN_LUID (insn)))
- || (forward_dependency_cache == NULL
- && find_insn_list (insn, INSN_DEPEND (x))))
- abort ();
- if (forward_dependency_cache != NULL)
- SET_BIT (forward_dependency_cache[INSN_LUID (x)],
- INSN_LUID (insn));
-#endif
-
- new_link = alloc_INSN_LIST (insn, INSN_DEPEND (x));
-
- dep_type = REG_NOTE_KIND (link);
- PUT_REG_NOTE_KIND (new_link, dep_type);
-
- INSN_DEPEND (x) = new_link;
- INSN_DEP_COUNT (insn) += 1;
- }
+ add_forward_dependence (XEXP (link, 0), insn, REG_NOTE_KIND (link));
}
}
\f
n_bbs is the number of region blocks. */
void
-init_deps (deps)
- struct deps *deps;
+init_deps (struct deps *deps)
{
int max_reg = (reload_completed ? FIRST_PSEUDO_REGISTER : max_reg_num ());
deps->max_reg = max_reg;
- deps->reg_last = (struct deps_reg *)
- xcalloc (max_reg, sizeof (struct deps_reg));
+ deps->reg_last = xcalloc (max_reg, sizeof (struct deps_reg));
INIT_REG_SET (&deps->reg_last_in_use);
+ INIT_REG_SET (&deps->reg_conditional_sets);
deps->pending_read_insns = 0;
deps->pending_read_mems = 0;
/* Free insn lists found in DEPS. */
void
-free_deps (deps)
- struct deps *deps;
+free_deps (struct deps *deps)
{
int i;
free_INSN_LIST_list (&deps->last_pending_memory_flush);
/* Without the EXECUTE_IF_SET, this loop is executed max_reg * nr_regions
- times. For a test case with 42000 regs and 8000 small basic blocks,
+ times. For a testcase with 42000 regs and 8000 small basic blocks,
this loop accounted for nearly 60% (84 sec) of the total -O2 runtime. */
EXECUTE_IF_SET_IN_REG_SET (&deps->reg_last_in_use, 0, i,
{
free_INSN_LIST_list (®_last->clobbers);
});
CLEAR_REG_SET (&deps->reg_last_in_use);
+ CLEAR_REG_SET (&deps->reg_conditional_sets);
free (deps->reg_last);
}
it is used in the estimate of profitability. */
void
-init_dependency_caches (luid)
- int luid;
+init_dependency_caches (int luid)
{
/* ?!? We could save some memory by computing a per-region luid mapping
which could reduce both the number of vectors in the cache and the size
what we consider "very high". */
if (luid / n_basic_blocks > 100 * 5)
{
- true_dependency_cache = sbitmap_vector_alloc (luid, luid);
- sbitmap_vector_zero (true_dependency_cache, luid);
- anti_dependency_cache = sbitmap_vector_alloc (luid, luid);
- sbitmap_vector_zero (anti_dependency_cache, luid);
- output_dependency_cache = sbitmap_vector_alloc (luid, luid);
- sbitmap_vector_zero (output_dependency_cache, luid);
+ int i;
+ true_dependency_cache = xmalloc (luid * sizeof (bitmap_head));
+ anti_dependency_cache = xmalloc (luid * sizeof (bitmap_head));
+ output_dependency_cache = xmalloc (luid * sizeof (bitmap_head));
+#ifdef ENABLE_CHECKING
+ forward_dependency_cache = xmalloc (luid * sizeof (bitmap_head));
+#endif
+ for (i = 0; i < luid; i++)
+ {
+ bitmap_initialize (&true_dependency_cache[i], 0);
+ bitmap_initialize (&anti_dependency_cache[i], 0);
+ bitmap_initialize (&output_dependency_cache[i], 0);
#ifdef ENABLE_CHECKING
- forward_dependency_cache = sbitmap_vector_alloc (luid, luid);
- sbitmap_vector_zero (forward_dependency_cache, luid);
+ bitmap_initialize (&forward_dependency_cache[i], 0);
#endif
+ }
+ cache_size = luid;
}
}
/* Free the caches allocated in init_dependency_caches. */
void
-free_dependency_caches ()
+free_dependency_caches (void)
{
if (true_dependency_cache)
{
- sbitmap_vector_free (true_dependency_cache);
+ int i;
+
+ for (i = 0; i < cache_size; i++)
+ {
+ bitmap_clear (&true_dependency_cache[i]);
+ bitmap_clear (&anti_dependency_cache[i]);
+ bitmap_clear (&output_dependency_cache[i]);
+#ifdef ENABLE_CHECKING
+ bitmap_clear (&forward_dependency_cache[i]);
+#endif
+ }
+ free (true_dependency_cache);
true_dependency_cache = NULL;
- sbitmap_vector_free (anti_dependency_cache);
+ free (anti_dependency_cache);
anti_dependency_cache = NULL;
- sbitmap_vector_free (output_dependency_cache);
+ free (output_dependency_cache);
output_dependency_cache = NULL;
#ifdef ENABLE_CHECKING
- sbitmap_vector_free (forward_dependency_cache);
+ free (forward_dependency_cache);
forward_dependency_cache = NULL;
#endif
}
code. */
void
-init_deps_global ()
+init_deps_global (void)
{
reg_pending_sets = INITIALIZE_REG_SET (reg_pending_sets_head);
reg_pending_clobbers = INITIALIZE_REG_SET (reg_pending_clobbers_head);
reg_pending_uses = INITIALIZE_REG_SET (reg_pending_uses_head);
- reg_pending_barrier = false;
+ reg_pending_barrier = NOT_A_BARRIER;
}
/* Free everything used by the dependency analysis code. */
void
-finish_deps_global ()
+finish_deps_global (void)
{
FREE_REG_SET (reg_pending_sets);
FREE_REG_SET (reg_pending_clobbers);