/* Do not allow clobbering the address register of speculative
insns. */
if ((EXPR_SPEC_DONE_DS (expr) & SPECULATIVE)
- && bitmap_bit_p (VINSN_REG_USES (EXPR_VINSN (expr)),
- expr_dest_regno (expr)))
+ && register_unavailable_p (VINSN_REG_USES (EXPR_VINSN (expr)),
+ expr_dest_reg (expr)))
EXPR_TARGET_AVAILABLE (expr) = false;
return true;
{
rtx_search_arg_p p = (rtx_search_arg_p) arg;
- /* The last param FOR_GCSE is true, because otherwise it performs excessive
- substitutions like
- r8 = r33
- r16 = r33
- for the last insn it presumes r33 equivalent to r8, so it changes it to
- r33. Actually, there's no change, but it spoils debugging. */
- if (exp_equiv_p (*cur_rtx, p->x, 0, true))
- {
- /* Bail out if we occupy more than one register. */
- if (REG_P (*cur_rtx)
- && HARD_REGISTER_P (*cur_rtx)
- && hard_regno_nregs[REGNO(*cur_rtx)][GET_MODE (*cur_rtx)] > 1)
+ if (REG_P (*cur_rtx) && REGNO (*cur_rtx) == REGNO (p->x))
+ {
+ /* Bail out if mode is different or more than one register is used. */
+ if (GET_MODE (*cur_rtx) != GET_MODE (p->x)
+ || (HARD_REGISTER_P (*cur_rtx)
+ && hard_regno_nregs[REGNO(*cur_rtx)][GET_MODE (*cur_rtx)] > 1))
{
p->n = 0;
return 1;
}
if (GET_CODE (*cur_rtx) == SUBREG
- && REG_P (p->x)
&& (!REG_P (SUBREG_REG (*cur_rtx))
|| REGNO (SUBREG_REG (*cur_rtx)) == REGNO (p->x)))
{
{
struct rtx_search_arg arg;
+ gcc_assert (REG_P (what));
arg.x = what;
arg.n = 0;
FIXME: it is enough to do this once per all original defs. */
if (frame_pointer_needed)
{
- int i;
-
- for (i = hard_regno_nregs[FRAME_POINTER_REGNUM][Pmode]; i--;)
- SET_HARD_REG_BIT (reg_rename_p->unavailable_hard_regs,
- FRAME_POINTER_REGNUM + i);
+ add_to_hard_reg_set (®_rename_p->unavailable_hard_regs,
+ Pmode, FRAME_POINTER_REGNUM);
-#if !HARD_FRAME_POINTER_IS_FRAME_POINTER
- for (i = hard_regno_nregs[HARD_FRAME_POINTER_REGNUM][Pmode]; i--;)
- SET_HARD_REG_BIT (reg_rename_p->unavailable_hard_regs,
- HARD_FRAME_POINTER_REGNUM + i);
-#endif
+ if (!HARD_FRAME_POINTER_IS_FRAME_POINTER)
+ add_to_hard_reg_set (®_rename_p->unavailable_hard_regs,
+ Pmode, HARD_FRAME_POINTER_IS_FRAME_POINTER);
}
#ifdef STACK_REGS
regno = expr_dest_regno (expr);
mode = GET_MODE (EXPR_LHS (expr));
target_available = EXPR_TARGET_AVAILABLE (expr) == 1;
- n = reload_completed ? hard_regno_nregs[regno][mode] : 1;
+ n = HARD_REGISTER_NUM_P (regno) ? hard_regno_nregs[regno][mode] : 1;
live_available = hard_available = true;
for (i = 0; i < n; i++)
ds_t *has_dep_p;
ds_t full_ds;
+ /* ??? We use dependencies of non-debug insns on debug insns to
+ indicate that the debug insns need to be reset if the non-debug
+ insn is pulled ahead of it. It's hard to figure out how to
+ introduce such a notion in sel-sched, but it already fails to
+ support debug insns in other ways, so we just go ahead and
+ let the deug insns go corrupt for now. */
+ if (DEBUG_INSN_P (through_insn) && !DEBUG_INSN_P (insn))
+ return MOVEUP_EXPR_SAME;
+
/* When inside_insn_group, delegate to the helper. */
if (inside_insn_group)
return moveup_expr_inside_insn_group (expr, through_insn);
sel_print ("real successors num: %d\n", sinfo->all_succs_n);
}
- /* Add insn to to the tail of current path. */
+ /* Add insn to the tail of current path. */
ilist_add (&p, insn);
FOR_EACH_VEC_ELT (rtx, sinfo->succs_ok, is, succ)
renaming. Check with the right register instead. */
if (sparams->dest && REG_P (sparams->dest))
{
- unsigned regno = REGNO (sparams->dest);
+ rtx reg = sparams->dest;
vinsn_t failed_vinsn = INSN_VINSN (sparams->failed_insn);
- if (bitmap_bit_p (VINSN_REG_SETS (failed_vinsn), regno)
- || bitmap_bit_p (VINSN_REG_USES (failed_vinsn), regno)
- || bitmap_bit_p (VINSN_REG_CLOBBERS (failed_vinsn), regno))
+ if (register_unavailable_p (VINSN_REG_SETS (failed_vinsn), reg)
+ || register_unavailable_p (VINSN_REG_USES (failed_vinsn), reg)
+ || register_unavailable_p (VINSN_REG_CLOBBERS (failed_vinsn), reg))
return true;
}
}
/* Return insn after which we must insert bookkeeping code for path(s) incoming
- into E2->dest, except from E1->src. */
+ into E2->dest, except from E1->src. If the returned insn immediately
+ precedes a fence, assign that fence to *FENCE_TO_REWIND. */
static insn_t
-find_place_for_bookkeeping (edge e1, edge e2)
+find_place_for_bookkeeping (edge e1, edge e2, fence_t *fence_to_rewind)
{
insn_t place_to_insert;
/* Find a basic block that can hold bookkeeping. If it can be found, do not
sel_print ("Pre-existing bookkeeping block is %i\n", book_block->index);
}
- /* If basic block ends with a jump, insert bookkeeping code right before it. */
+ *fence_to_rewind = NULL;
+ /* If basic block ends with a jump, insert bookkeeping code right before it.
+ Notice if we are crossing a fence when taking PREV_INSN. */
if (INSN_P (place_to_insert) && control_flow_insn_p (place_to_insert))
- place_to_insert = PREV_INSN (place_to_insert);
+ {
+ *fence_to_rewind = flist_lookup (fences, place_to_insert);
+ place_to_insert = PREV_INSN (place_to_insert);
+ }
return place_to_insert;
}
insn_t join_point, place_to_insert, new_insn;
int new_seqno;
bool need_to_exchange_data_sets;
+ fence_t fence_to_rewind;
if (sched_verbose >= 4)
sel_print ("Generating bookkeeping insn (%d->%d)\n", e1->src->index,
e2->dest->index);
join_point = sel_bb_head (e2->dest);
- place_to_insert = find_place_for_bookkeeping (e1, e2);
- if (!place_to_insert)
- return NULL;
+ place_to_insert = find_place_for_bookkeeping (e1, e2, &fence_to_rewind);
new_seqno = find_seqno_for_bookkeeping (place_to_insert, join_point);
need_to_exchange_data_sets
= sel_bb_empty_p (BLOCK_FOR_INSN (place_to_insert));
new_insn = emit_bookkeeping_insn (place_to_insert, c_expr, new_seqno);
+ if (fence_to_rewind)
+ FENCE_INSN (fence_to_rewind) = new_insn;
+
/* When inserting bookkeeping insn in new block, av sets should be
following: old basic block (that now holds bookkeeping) data sets are
the same as was before generation of bookkeeping, and new basic block
{
blist_t *bnds_tailp1, *bndsp;
expr_t expr_vliw;
- int need_stall;
+ int need_stall = false;
int was_stall = 0, scheduled_insns = 0;
int max_insns = pipelining_p ? issue_rate : 2 * issue_rate;
int max_stall = pipelining_p ? 1 : 3;
the iterator becomes invalid. We need to try again. */
if (BLOCK_FOR_INSN (insn)->index != old_index
|| EDGE_COUNT (bb->succs) != old_succs)
- goto rescan;
+ {
+ insn = sel_bb_end (BLOCK_FOR_INSN (insn));
+ goto rescan;
+ }
}
#ifdef ENABLE_CHECKING
/* Filter the orig_ops set. */
if (AV_SET_VALID_P (insn))
- av_set_intersect (&orig_ops, AV_SET (insn));
+ av_set_code_motion_filter (&orig_ops, AV_SET (insn));
/* If no more original ops, return immediately. */
if (!orig_ops)
if (!expr)
{
int res;
+ rtx last_insn = PREV_INSN (insn);
+ bool added_to_path;
gcc_assert (insn == sel_bb_end (bb));
/* Add bb tail to PATH (but it doesn't make any sense if it's a bb_head -
it's already in PATH then). */
if (insn != first_insn)
- ilist_add (&path, insn);
+ {
+ ilist_add (&path, insn);
+ added_to_path = true;
+ }
+ else
+ added_to_path = false;
/* Process_successors should be able to find at least one
successor for which code_motion_path_driver returns TRUE. */
res = code_motion_process_successors (insn, orig_ops,
path, static_params);
+ /* Jump in the end of basic block could have been removed or replaced
+ during code_motion_process_successors, so recompute insn as the
+ last insn in bb. */
+ if (NEXT_INSN (last_insn) != insn)
+ {
+ insn = sel_bb_end (bb);
+ first_insn = sel_bb_head (bb);
+ }
+
/* Remove bb tail from path. */
- if (insn != first_insn)
+ if (added_to_path)
ilist_remove (&path);
if (res != 1)
{
struct moveop_static_params sparams;
struct cmpd_local_params lparams;
- bool res;
+ int res;
/* Init params for code_motion_path_driver. */
sparams.dest = dest;
code_motion_path_driver_info = &move_op_hooks;
res = code_motion_path_driver (insn, orig_ops, NULL, &lparams, &sparams);
+ gcc_assert (res != -1);
+
if (sparams.was_renamed)
EXPR_WAS_RENAMED (expr_vliw) = true;
INSN_SEQNO (insn) = cur_seqno--;
}
-/* Initialize seqnos for the current region. NUMBER_OF_INSNS is the number
- of instructions in the region, BLOCKS_TO_RESCHEDULE contains blocks on
- which we're rescheduling when pipelining, FROM is the block where
+/* Initialize seqnos for the current region. BLOCKS_TO_RESCHEDULE contains
+ blocks on which we're rescheduling when pipelining, FROM is the block where
traversing region begins (it may not be the head of the region when
pipelining, but the head of the loop instead).
Returns the maximal seqno found. */
static int
-init_seqno (int number_of_insns, bitmap blocks_to_reschedule, basic_block from)
+init_seqno (bitmap blocks_to_reschedule, basic_block from)
{
sbitmap visited_bbs;
bitmap_iterator bi;
from = EBB_FIRST_BB (0);
}
- cur_seqno = number_of_insns > 0 ? number_of_insns : sched_max_luid - 1;
+ cur_seqno = sched_max_luid - 1;
init_seqno_1 (from, visited_bbs, blocks_to_reschedule);
- gcc_assert (cur_seqno == 0 || number_of_insns == 0);
+
+ /* cur_seqno may be positive if the number of instructions is less than
+ sched_max_luid - 1 (when rescheduling or if some instructions have been
+ removed by the call to purge_empty_blocks in sel_sched_region_1). */
+ gcc_assert (cur_seqno >= 0);
sbitmap_free (visited_bbs);
return sched_max_luid - 1;
bookkeeping_p = 1;
pipelining_p = (bookkeeping_p
&& (flag_sel_sched_pipelining != 0)
- && current_loop_nest != NULL);
+ && current_loop_nest != NULL
+ && loop_has_exit_edges (current_loop_nest));
max_insns_to_rename = PARAM_VALUE (PARAM_SELSCHED_INSNS_TO_RENAME);
max_ws = MAX_WS;
}
/* Prepare and verify loop nest for pipelining. */
static void
-setup_current_loop_nest (int rgn)
+setup_current_loop_nest (int rgn, bb_vec_t *bbs)
{
current_loop_nest = get_loop_nest_for_rgn (rgn);
/* If this loop has any saved loop preheaders from nested loops,
add these basic blocks to the current region. */
- sel_add_loop_preheaders ();
+ sel_add_loop_preheaders (bbs);
/* Check that we're starting with a valid information. */
gcc_assert (loop_latch_edge (current_loop_nest));
if (current_region_empty_p ())
return true;
- if (flag_sel_sched_pipelining)
- setup_current_loop_nest (rgn);
-
- sel_setup_region_sched_flags ();
-
bbs = VEC_alloc (basic_block, heap, current_nr_blocks);
for (i = 0; i < current_nr_blocks; i++)
VEC_quick_push (basic_block, bbs, BASIC_BLOCK (BB_TO_BLOCK (i)));
- sel_init_bbs (bbs, NULL);
+ sel_init_bbs (bbs);
+
+ if (flag_sel_sched_pipelining)
+ setup_current_loop_nest (rgn, &bbs);
+
+ sel_setup_region_sched_flags ();
/* Initialize luids and dependence analysis which both sel-sched and haifa
need. */
- sched_init_luids (bbs, NULL, NULL, NULL);
+ sched_init_luids (bbs);
sched_deps_init (false);
/* Initialize haifa data. */
rgn_setup_sched_infos ();
sel_set_sched_flags ();
- haifa_init_h_i_d (bbs, NULL, NULL, NULL);
+ haifa_init_h_i_d (bbs);
sel_compute_priorities (rgn);
init_deps_global ();
/* Extend luids so that insns generated by the target will
get zero luid. */
- sched_init_luids (NULL, NULL, NULL, NULL);
+ sched_extend_luids ();
}
}
finish_deps_global ();
sched_finish_luids ();
+ VEC_free (haifa_deps_insn_data_def, heap, h_d_i_d);
sel_finish_bbs ();
BITMAP_FREE (blocks_to_reschedule);
static void
sel_sched_region_1 (void)
{
- int number_of_insns;
int orig_max_seqno;
- /* Remove empty blocks that might be in the region from the beginning.
- We need to do save sched_max_luid before that, as it actually shows
- the number of insns in the region, and purge_empty_blocks can
- alter it. */
- number_of_insns = sched_max_luid - 1;
+ /* Remove empty blocks that might be in the region from the beginning. */
purge_empty_blocks ();
- orig_max_seqno = init_seqno (number_of_insns, NULL, NULL);
+ orig_max_seqno = init_seqno (NULL, NULL);
gcc_assert (orig_max_seqno >= 1);
/* When pipelining outer loops, create fences on the loop header,
{
flist_tail_init (new_fences);
- orig_max_seqno = init_seqno (0, blocks_to_reschedule, bb);
+ orig_max_seqno = init_seqno (blocks_to_reschedule, bb);
/* Mark BB as head of the new ebb. */
bitmap_set_bit (forced_ebb_heads, bb->index);