/* Instruction scheduling pass.
- Copyright (C) 1992, 93-98, 1999, 2000 Free Software Foundation, Inc.
+ Copyright (C) 1992, 1993, 1994, 1995, 1996, 1997, 1998,
+ 1999, 2000 Free Software Foundation, Inc.
Contributed by Michael Tiemann (tiemann@cygnus.com) Enhanced by,
and currently maintained by, Jim Wilson (wilson@cygnus.com)
- This file is part of GNU CC.
+This file is part of GNU CC.
- GNU CC is free software; you can redistribute it and/or modify it
- under the terms of the GNU General Public License as published by
- the Free Software Foundation; either version 2, or (at your option)
- any later version.
+GNU CC is free software; you can redistribute it and/or modify it
+under the terms of the GNU General Public License as published by the
+Free Software Foundation; either version 2, or (at your option) any
+later version.
- GNU CC is distributed in the hope that it will be useful, but
- WITHOUT ANY WARRANTY; without even the implied warranty of
- MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- General Public License for more details.
+GNU CC is distributed in the hope that it will be useful, but WITHOUT
+ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+for more details.
- You should have received a copy of the GNU General Public License
- along with GNU CC; see the file COPYING. If not, write to the Free
- the Free Software Foundation, 59 Temple Place - Suite 330,
- Boston, MA 02111-1307, USA. */
+You should have received a copy of the GNU General Public License
+along with GNU CC; see the file COPYING. If not, write to the Free
+the Free Software Foundation, 59 Temple Place - Suite 330, Boston, MA
+02111-1307, USA. */
/* Instruction scheduling pass.
#include "toplev.h"
#include "rtl.h"
#include "tm_p.h"
+#include "hard-reg-set.h"
#include "basic-block.h"
#include "regs.h"
#include "function.h"
-#include "hard-reg-set.h"
#include "flags.h"
#include "insn-config.h"
#include "insn-attr.h"
#endif
/* sched-verbose controls the amount of debugging output the
- scheduler prints. It is controlled by -fsched-verbose-N:
+ scheduler prints. It is controlled by -fsched-verbose=N:
N>0 and no -DSR : the output is directed to stderr.
N>=10 will direct the printouts to stderr (regardless of -dSR).
N=1: same as -dSR.
static FILE *dump = 0;
/* fix_sched_param() is called from toplev.c upon detection
- of the -fsched-***-N options. */
+ of the -fsched-verbose=N option. */
void
fix_sched_param (param, val)
the last function call, must depend on this. */
rtx last_function_call;
- /* The LOG_LINKS field of this is a list of insns which use a pseudo register
- that does not already cross a call. We create dependencies between each
- of those insn and the next call insn, to ensure that they won't cross a call
- after scheduling is done. */
+ /* Used to keep post-call psuedo/hard reg movements together with
+ the call. */
+ int in_post_call_group_p;
+
+ /* The LOG_LINKS field of this is a list of insns which use a pseudo
+ register that does not already cross a call. We create
+ dependencies between each of those insn and the next call insn,
+ to ensure that they won't cross a call after scheduling is done. */
rtx sched_before_next_call;
/* Element N is the next insn that sets (hard or pseudo) register
#define NEXT_Q_AFTER(X, C) (((X)+C) & (INSN_QUEUE_SIZE-1))
/* Forward declarations. */
-static void add_dependence PROTO ((rtx, rtx, enum reg_note));
-#ifdef HAVE_cc0
-static void remove_dependence PROTO ((rtx, rtx));
-#endif
-static rtx find_insn_list PROTO ((rtx, rtx));
-static int insn_unit PROTO ((rtx));
-static unsigned int blockage_range PROTO ((int, rtx));
-static void clear_units PROTO ((void));
-static int actual_hazard_this_instance PROTO ((int, int, rtx, int, int));
-static void schedule_unit PROTO ((int, rtx, int));
-static int actual_hazard PROTO ((int, rtx, int, int));
-static int potential_hazard PROTO ((int, rtx, int));
-static int insn_cost PROTO ((rtx, rtx, rtx));
-static int priority PROTO ((rtx));
-static void free_pending_lists PROTO ((void));
-static void add_insn_mem_dependence PROTO ((struct deps *, rtx *, rtx *, rtx,
+static void add_dependence PARAMS ((rtx, rtx, enum reg_note));
+static void remove_dependence PARAMS ((rtx, rtx));
+static rtx find_insn_list PARAMS ((rtx, rtx));
+static void set_sched_group_p PARAMS ((rtx));
+static int insn_unit PARAMS ((rtx));
+static unsigned int blockage_range PARAMS ((int, rtx));
+static void clear_units PARAMS ((void));
+static int actual_hazard_this_instance PARAMS ((int, int, rtx, int, int));
+static void schedule_unit PARAMS ((int, rtx, int));
+static int actual_hazard PARAMS ((int, rtx, int, int));
+static int potential_hazard PARAMS ((int, rtx, int));
+static int insn_cost PARAMS ((rtx, rtx, rtx));
+static int priority PARAMS ((rtx));
+static void free_pending_lists PARAMS ((void));
+static void add_insn_mem_dependence PARAMS ((struct deps *, rtx *, rtx *, rtx,
rtx));
-static void flush_pending_lists PROTO ((struct deps *, rtx, int));
-static void sched_analyze_1 PROTO ((struct deps *, rtx, rtx));
-static void sched_analyze_2 PROTO ((struct deps *, rtx, rtx));
-static void sched_analyze_insn PROTO ((struct deps *, rtx, rtx, rtx));
-static void sched_analyze PROTO ((struct deps *, rtx, rtx));
-static int rank_for_schedule PROTO ((const PTR, const PTR));
-static void swap_sort PROTO ((rtx *, int));
-static void queue_insn PROTO ((rtx, int));
-static int schedule_insn PROTO ((rtx, rtx *, int, int));
-static void find_insn_reg_weight PROTO ((int));
-static int schedule_block PROTO ((int, int));
-static char *safe_concat PROTO ((char *, char *, const char *));
-static int insn_issue_delay PROTO ((rtx));
-static void adjust_priority PROTO ((rtx));
+static void flush_pending_lists PARAMS ((struct deps *, rtx, int));
+static void sched_analyze_1 PARAMS ((struct deps *, rtx, rtx));
+static void sched_analyze_2 PARAMS ((struct deps *, rtx, rtx));
+static void sched_analyze_insn PARAMS ((struct deps *, rtx, rtx, rtx));
+static void sched_analyze PARAMS ((struct deps *, rtx, rtx));
+static int rank_for_schedule PARAMS ((const PTR, const PTR));
+static void swap_sort PARAMS ((rtx *, int));
+static void queue_insn PARAMS ((rtx, int));
+static int schedule_insn PARAMS ((rtx, rtx *, int, int));
+static void find_insn_reg_weight PARAMS ((int));
+static int schedule_block PARAMS ((int, int));
+static char *safe_concat PARAMS ((char *, char *, const char *));
+static int insn_issue_delay PARAMS ((rtx));
+static void adjust_priority PARAMS ((rtx));
/* Control flow graph edges are kept in circular lists. */
typedef struct
-static int is_cfg_nonregular PROTO ((void));
-static int build_control_flow PROTO ((struct edge_list *));
-static void new_edge PROTO ((int, int));
+static int is_cfg_nonregular PARAMS ((void));
+static int build_control_flow PARAMS ((struct edge_list *));
+static void new_edge PARAMS ((int, int));
/* A region is the main entity for interblock scheduling: insns
#define BLOCK_TO_BB(block) (block_to_bb[block])
#define CONTAINING_RGN(block) (containing_rgn[block])
-void debug_regions PROTO ((void));
-static void find_single_block_region PROTO ((void));
-static void find_rgns PROTO ((struct edge_list *, sbitmap *));
-static int too_large PROTO ((int, int *, int *));
+void debug_regions PARAMS ((void));
+static void find_single_block_region PARAMS ((void));
+static void find_rgns PARAMS ((struct edge_list *, sbitmap *));
+static int too_large PARAMS ((int, int *, int *));
-extern void debug_live PROTO ((int, int));
+extern void debug_live PARAMS ((int, int));
/* Blocks of the current region being scheduled. */
static int current_nr_blocks;
static int bitlst_table_size;
static int *bitlst_table;
-static char bitset_member PROTO ((bitset, int, int));
-static void extract_bitlst PROTO ((bitset, int, int, bitlst *));
+static char bitset_member PARAMS ((bitset, int, int));
+static void extract_bitlst PARAMS ((bitset, int, int, bitlst *));
/* Target info declarations.
typedef bitlst edgelst;
/* Target info functions. */
-static void split_edges PROTO ((int, int, edgelst *));
-static void compute_trg_info PROTO ((int));
-void debug_candidate PROTO ((int));
-void debug_candidates PROTO ((int));
+static void split_edges PARAMS ((int, int, edgelst *));
+static void compute_trg_info PARAMS ((int));
+void debug_candidate PARAMS ((int));
+void debug_candidates PARAMS ((int));
/* Bit-set of bbs, where bit 'i' stands for bb 'i'. */
/* For every bb, a set of its ancestor edges. */
static edgeset *ancestor_edges;
-static void compute_dom_prob_ps PROTO ((int));
+static void compute_dom_prob_ps PARAMS ((int));
#define ABS_VALUE(x) (((x)<0)?(-(x)):(x))
#define INSN_PROBABILITY(INSN) (SRC_PROB (BLOCK_TO_BB (BLOCK_NUM (INSN))))
#define MIN_PROB_DIFF 10
/* Speculative scheduling functions. */
-static int check_live_1 PROTO ((int, rtx));
-static void update_live_1 PROTO ((int, rtx));
-static int check_live PROTO ((rtx, int));
-static void update_live PROTO ((rtx, int));
-static void set_spec_fed PROTO ((rtx));
-static int is_pfree PROTO ((rtx, int, int));
-static int find_conditional_protection PROTO ((rtx, int));
-static int is_conditionally_protected PROTO ((rtx, int, int));
-static int may_trap_exp PROTO ((rtx, int));
-static int haifa_classify_insn PROTO ((rtx));
-static int is_prisky PROTO ((rtx, int, int));
-static int is_exception_free PROTO ((rtx, int, int));
-
-static char find_insn_mem_list PROTO ((rtx, rtx, rtx, rtx));
-static void compute_block_forward_dependences PROTO ((int));
-static void add_branch_dependences PROTO ((rtx, rtx));
-static void compute_block_backward_dependences PROTO ((int));
-void debug_dependencies PROTO ((void));
+static int check_live_1 PARAMS ((int, rtx));
+static void update_live_1 PARAMS ((int, rtx));
+static int check_live PARAMS ((rtx, int));
+static void update_live PARAMS ((rtx, int));
+static void set_spec_fed PARAMS ((rtx));
+static int is_pfree PARAMS ((rtx, int, int));
+static int find_conditional_protection PARAMS ((rtx, int));
+static int is_conditionally_protected PARAMS ((rtx, int, int));
+static int may_trap_exp PARAMS ((rtx, int));
+static int haifa_classify_insn PARAMS ((rtx));
+static int is_prisky PARAMS ((rtx, int, int));
+static int is_exception_free PARAMS ((rtx, int, int));
+
+static char find_insn_mem_list PARAMS ((rtx, rtx, rtx, rtx));
+static void compute_block_forward_dependences PARAMS ((int));
+static void add_branch_dependences PARAMS ((rtx, rtx));
+static void compute_block_backward_dependences PARAMS ((int));
+void debug_dependencies PARAMS ((void));
/* Notes handling mechanism:
=========================
unlink_other_notes ()). After scheduling the block, these notes are
inserted at the beginning of the block (in schedule_block()). */
-static rtx unlink_other_notes PROTO ((rtx, rtx));
-static rtx unlink_line_notes PROTO ((rtx, rtx));
-static void rm_line_notes PROTO ((int));
-static void save_line_notes PROTO ((int));
-static void restore_line_notes PROTO ((int));
-static void rm_redundant_line_notes PROTO ((void));
-static void rm_other_notes PROTO ((rtx, rtx));
-static rtx reemit_notes PROTO ((rtx, rtx));
-
-static void get_block_head_tail PROTO ((int, rtx *, rtx *));
-static void get_bb_head_tail PROTO ((int, rtx *, rtx *));
-
-static int queue_to_ready PROTO ((rtx [], int));
-
-static void debug_ready_list PROTO ((rtx[], int));
-static void init_target_units PROTO ((void));
-static void insn_print_units PROTO ((rtx));
-static int get_visual_tbl_length PROTO ((void));
-static void init_block_visualization PROTO ((void));
-static void print_block_visualization PROTO ((int, const char *));
-static void visualize_scheduled_insns PROTO ((int, int));
-static void visualize_no_unit PROTO ((rtx));
-static void visualize_stall_cycles PROTO ((int, int));
-static void print_exp PROTO ((char *, rtx, int));
-static void print_value PROTO ((char *, rtx, int));
-static void print_pattern PROTO ((char *, rtx, int));
-static void print_insn PROTO ((char *, rtx, int));
-void debug_reg_vector PROTO ((regset));
-
-static rtx move_insn1 PROTO ((rtx, rtx));
-static rtx move_insn PROTO ((rtx, rtx));
-static rtx group_leader PROTO ((rtx));
-static int set_priorities PROTO ((int));
-static void init_deps PROTO ((struct deps *));
-static void schedule_region PROTO ((int));
+static rtx unlink_other_notes PARAMS ((rtx, rtx));
+static rtx unlink_line_notes PARAMS ((rtx, rtx));
+static void rm_line_notes PARAMS ((int));
+static void save_line_notes PARAMS ((int));
+static void restore_line_notes PARAMS ((int));
+static void rm_redundant_line_notes PARAMS ((void));
+static void rm_other_notes PARAMS ((rtx, rtx));
+static rtx reemit_notes PARAMS ((rtx, rtx));
+
+static void get_block_head_tail PARAMS ((int, rtx *, rtx *));
+static void get_bb_head_tail PARAMS ((int, rtx *, rtx *));
+
+static int queue_to_ready PARAMS ((rtx [], int));
+
+static void debug_ready_list PARAMS ((rtx[], int));
+static void init_target_units PARAMS ((void));
+static void insn_print_units PARAMS ((rtx));
+static int get_visual_tbl_length PARAMS ((void));
+static void init_block_visualization PARAMS ((void));
+static void print_block_visualization PARAMS ((int, const char *));
+static void visualize_scheduled_insns PARAMS ((int, int));
+static void visualize_no_unit PARAMS ((rtx));
+static void visualize_stall_cycles PARAMS ((int, int));
+static void print_exp PARAMS ((char *, rtx, int));
+static void print_value PARAMS ((char *, rtx, int));
+static void print_pattern PARAMS ((char *, rtx, int));
+static void print_insn PARAMS ((char *, rtx, int));
+void debug_reg_vector PARAMS ((regset));
+
+static rtx move_insn1 PARAMS ((rtx, rtx));
+static rtx move_insn PARAMS ((rtx, rtx));
+static rtx group_leader PARAMS ((rtx));
+static int set_priorities PARAMS ((int));
+static void init_deps PARAMS ((struct deps *));
+static void schedule_region PARAMS ((int));
+static void propagate_deps PARAMS ((int, struct deps *, int));
#endif /* INSN_SCHEDULING */
\f
When HAVE_cc0, it is possible for NOTEs to exist between users and
setters of the condition codes, so we must skip past notes here.
Otherwise, NOTEs are impossible here. */
-
- next = NEXT_INSN (elem);
-
-#ifdef HAVE_cc0
- while (next && GET_CODE (next) == NOTE)
- next = NEXT_INSN (next);
-#endif
-
+ next = next_nonnote_insn (elem);
if (next && SCHED_GROUP_P (next)
&& GET_CODE (next) != CODE_LABEL)
{
/* Notes will never intervene here though, so don't bother checking
for them. */
+ /* Hah! Wrong. */
/* We must reject CODE_LABELs, so that we don't get confused by one
that has LABEL_PRESERVE_P set, which is represented by the same
bit in the rtl as SCHED_GROUP_P. A CODE_LABEL can never be
SCHED_GROUP_P. */
- while (NEXT_INSN (next) && SCHED_GROUP_P (NEXT_INSN (next))
- && GET_CODE (NEXT_INSN (next)) != CODE_LABEL)
- next = NEXT_INSN (next);
+
+ rtx nnext;
+ while ((nnext = next_nonnote_insn (next)) != NULL
+ && SCHED_GROUP_P (nnext)
+ && GET_CODE (nnext) != CODE_LABEL)
+ next = nnext;
/* Again, don't depend an insn on itself. */
if (insn == next)
&& (INSN_BB (elem) != INSN_BB (insn)))
return;
-
/* If we already have a true dependency for ELEM, then we do not
need to do anything. Avoiding the list walk below can cut
compile times dramatically for some code. */
#endif
}
-#ifdef HAVE_cc0
/* Remove ELEM wrapped in an INSN_LIST from the LOG_LINKS
of INSN. Abort if not found. */
abort ();
return;
}
-#endif /* HAVE_cc0 */
+
+/* Return the INSN_LIST containing INSN in LIST, or NULL
+ if LIST does not contain INSN. */
+
+static inline rtx
+find_insn_list (insn, list)
+ rtx insn;
+ rtx list;
+{
+ while (list)
+ {
+ if (XEXP (list, 0) == insn)
+ return list;
+ list = XEXP (list, 1);
+ }
+ return 0;
+}
+
+/* Set SCHED_GROUP_P and care for the rest of the bookkeeping that
+ goes along with that. */
+
+static void
+set_sched_group_p (insn)
+ rtx insn;
+{
+ rtx link, prev;
+
+ SCHED_GROUP_P (insn) = 1;
+
+ /* There may be a note before this insn now, but all notes will
+ be removed before we actually try to schedule the insns, so
+ it won't cause a problem later. We must avoid it here though. */
+ prev = prev_nonnote_insn (insn);
+
+ /* Make a copy of all dependencies on the immediately previous insn,
+ and add to this insn. This is so that all the dependencies will
+ apply to the group. Remove an explicit dependence on this insn
+ as SCHED_GROUP_P now represents it. */
+
+ if (find_insn_list (prev, LOG_LINKS (insn)))
+ remove_dependence (insn, prev);
+
+ for (link = LOG_LINKS (prev); link; link = XEXP (link, 1))
+ add_dependence (insn, XEXP (link, 0), REG_NOTE_KIND (link));
+}
\f
#ifndef INSN_SCHEDULING
void
WORST_CLASS (tmp_class,
may_trap_exp (SET_SRC (XVECEXP (pat, 0, i)), 0));
break;
+ case COND_EXEC:
case TRAP_IF:
tmp_class = TRAP_RISKY;
break;
WORST_CLASS (tmp_class,
may_trap_exp (SET_SRC (pat), 0));
break;
+ case COND_EXEC:
case TRAP_IF:
tmp_class = TRAP_RISKY;
break;
We are careful to build only dependencies which actually exist, and
use transitivity to avoid building too many links. */
\f
-/* Return the INSN_LIST containing INSN in LIST, or NULL
- if LIST does not contain INSN. */
-
-HAIFA_INLINE static rtx
-find_insn_list (insn, list)
- rtx insn;
- rtx list;
-{
- while (list)
- {
- if (XEXP (list, 0) == insn)
- return list;
- list = XEXP (list, 1);
- }
- return 0;
-}
-
-
/* Return 1 if the pair (insn, x) is found in (LIST, LIST1), or 0
otherwise. */
int this_priority;
rtx link;
- if (GET_RTX_CLASS (GET_CODE (insn)) != 'i')
+ if (! INSN_P (insn))
return 0;
if ((this_priority = INSN_PRIORITY (insn)) == 0)
#ifdef HAVE_cc0
case CC0:
- {
- rtx link, prev;
-
- /* User of CC0 depends on immediately preceding insn. */
- SCHED_GROUP_P (insn) = 1;
-
- /* There may be a note before this insn now, but all notes will
- be removed before we actually try to schedule the insns, so
- it won't cause a problem later. We must avoid it here though. */
- prev = prev_nonnote_insn (insn);
-
- /* Make a copy of all dependencies on the immediately previous insn,
- and add to this insn. This is so that all the dependencies will
- apply to the group. Remove an explicit dependence on this insn
- as SCHED_GROUP_P now represents it. */
-
- if (find_insn_list (prev, LOG_LINKS (insn)))
- remove_dependence (insn, prev);
-
- for (link = LOG_LINKS (prev); link; link = XEXP (link, 1))
- add_dependence (insn, XEXP (link, 0), REG_NOTE_KIND (link));
-
- return;
- }
+ /* User of CC0 depends on immediately preceding insn. */
+ set_sched_group_p (insn);
+ return;
#endif
case REG:
sched_analyze_1 (deps, x, insn);
return;
+ case POST_MODIFY:
+ case PRE_MODIFY:
+ /* op0 = op0 + op1 */
+ sched_analyze_2 (deps, XEXP (x, 0), insn);
+ sched_analyze_2 (deps, XEXP (x, 1), insn);
+ sched_analyze_1 (deps, x, insn);
+ return;
+
default:
break;
}
int maxreg = max_reg_num ();
int i;
+ if (code == COND_EXEC)
+ {
+ sched_analyze_2 (deps, COND_EXEC_TEST (x), insn);
+
+ /* ??? Should be recording conditions so we reduce the number of
+ false dependancies. */
+ x = COND_EXEC_CODE (x);
+ code = GET_CODE (x);
+ }
if (code == SET || code == CLOBBER)
sched_analyze_1 (deps, x, insn);
else if (code == PARALLEL)
register int i;
for (i = XVECLEN (x, 0) - 1; i >= 0; i--)
{
- code = GET_CODE (XVECEXP (x, 0, i));
+ rtx sub = XVECEXP (x, 0, i);
+ code = GET_CODE (sub);
+
+ if (code == COND_EXEC)
+ {
+ sched_analyze_2 (deps, COND_EXEC_TEST (sub), insn);
+ sub = COND_EXEC_CODE (sub);
+ code = GET_CODE (sub);
+ }
if (code == SET || code == CLOBBER)
- sched_analyze_1 (deps, XVECEXP (x, 0, i), insn);
+ sched_analyze_1 (deps, sub, insn);
else
- sched_analyze_2 (deps, XVECEXP (x, 0, i), insn);
+ sched_analyze_2 (deps, sub, insn);
}
}
else
reg_pending_sets_all = 0;
}
- /* Handle function calls and function returns created by the epilogue
- threading code. */
- if (GET_CODE (insn) == CALL_INSN || GET_CODE (insn) == JUMP_INSN)
+ /* If a post-call group is still open, see if it should remain so.
+ This insn must be a simple move of a hard reg to a pseudo or
+ vice-versa.
+
+ We must avoid moving these insns for correctness on
+ SMALL_REGISTER_CLASS machines, and for special registers like
+ PIC_OFFSET_TABLE_REGNUM. For simplicity, extend this to all
+ hard regs for all targets. */
+
+ if (deps->in_post_call_group_p)
{
- rtx dep_insn;
- rtx prev_dep_insn;
-
- /* When scheduling instructions, we make sure calls don't lose their
- accompanying USE insns by depending them one on another in order.
-
- Also, we must do the same thing for returns created by the epilogue
- threading code. Note this code works only in this special case,
- because other passes make no guarantee that they will never emit
- an instruction between a USE and a RETURN. There is such a guarantee
- for USE instructions immediately before a call. */
-
- prev_dep_insn = insn;
- dep_insn = PREV_INSN (insn);
- while (GET_CODE (dep_insn) == INSN
- && GET_CODE (PATTERN (dep_insn)) == USE
- && GET_CODE (XEXP (PATTERN (dep_insn), 0)) == REG)
- {
- SCHED_GROUP_P (prev_dep_insn) = 1;
+ rtx tmp, set = single_set (insn);
+ int src_regno, dest_regno;
- /* Make a copy of all dependencies on dep_insn, and add to insn.
- This is so that all of the dependencies will apply to the
- group. */
+ if (set == NULL)
+ goto end_call_group;
- for (link = LOG_LINKS (dep_insn); link; link = XEXP (link, 1))
- add_dependence (insn, XEXP (link, 0), REG_NOTE_KIND (link));
+ tmp = SET_DEST (set);
+ if (GET_CODE (tmp) == SUBREG)
+ tmp = SUBREG_REG (tmp);
+ if (GET_CODE (tmp) == REG)
+ dest_regno = REGNO (tmp);
+ else
+ goto end_call_group;
- prev_dep_insn = dep_insn;
- dep_insn = PREV_INSN (dep_insn);
+ tmp = SET_SRC (set);
+ if (GET_CODE (tmp) == SUBREG)
+ tmp = SUBREG_REG (tmp);
+ if (GET_CODE (tmp) == REG)
+ src_regno = REGNO (tmp);
+ else
+ goto end_call_group;
+
+ if (src_regno < FIRST_PSEUDO_REGISTER
+ || dest_regno < FIRST_PSEUDO_REGISTER)
+ {
+ set_sched_group_p (insn);
+ CANT_MOVE (insn) = 1;
+ }
+ else
+ {
+ end_call_group:
+ deps->in_post_call_group_p = 0;
}
}
}
/* Clear out the stale LOG_LINKS from flow. */
free_INSN_LIST_list (&LOG_LINKS (insn));
+ /* Clear out stale SCHED_GROUP_P. */
+ SCHED_GROUP_P (insn) = 0;
+
/* Make each JUMP_INSN a scheduling barrier for memory
references. */
if (GET_CODE (insn) == JUMP_INSN)
rtx x;
register int i;
+ /* Clear out stale SCHED_GROUP_P. */
+ SCHED_GROUP_P (insn) = 0;
+
CANT_MOVE (insn) = 1;
/* Clear out the stale LOG_LINKS from flow. */
/* last_function_call is now a list of insns. */
free_INSN_LIST_list (&deps->last_function_call);
deps->last_function_call = alloc_INSN_LIST (insn, NULL_RTX);
+
+ /* Before reload, begin a post-call group, so as to keep the
+ lifetimes of hard registers correct. */
+ if (! reload_completed)
+ deps->in_post_call_group_p = 1;
}
/* See comments on reemit_notes as to why we do this.
??? Actually, the reemit_notes just say what is done, not why. */
else if (GET_CODE (insn) == NOTE
- && (NOTE_LINE_NUMBER (insn) == NOTE_INSN_RANGE_START
+ && (NOTE_LINE_NUMBER (insn) == NOTE_INSN_RANGE_BEG
|| NOTE_LINE_NUMBER (insn) == NOTE_INSN_RANGE_END))
{
loop_notes = alloc_EXPR_LIST (REG_SAVE_NOTE, NOTE_RANGE_INFO (insn),
const PTR x;
const PTR y;
{
- rtx tmp = *(rtx *)y;
- rtx tmp2 = *(rtx *)x;
+ rtx tmp = *(const rtx *)y;
+ rtx tmp2 = *(const rtx *)x;
rtx link;
int tmp_class, tmp2_class, depend_count1, depend_count2;
int val, priority_val, spec_val, prob_val, weight_val;
if (NOTE_LINE_NUMBER (insn) != NOTE_INSN_SETJMP
&& NOTE_LINE_NUMBER (insn) != NOTE_INSN_LOOP_BEG
&& NOTE_LINE_NUMBER (insn) != NOTE_INSN_LOOP_END
- && NOTE_LINE_NUMBER (insn) != NOTE_INSN_RANGE_START
+ && NOTE_LINE_NUMBER (insn) != NOTE_INSN_RANGE_BEG
&& NOTE_LINE_NUMBER (insn) != NOTE_INSN_RANGE_END
&& NOTE_LINE_NUMBER (insn) != NOTE_INSN_EH_REGION_BEG
&& NOTE_LINE_NUMBER (insn) != NOTE_INSN_EH_REGION_END)
get_bb_head_tail (bb, &head, &tail);
- if (head == tail
- && (GET_RTX_CLASS (GET_CODE (head)) != 'i'))
+ if (head == tail && (! INSN_P (head)))
return;
next_tail = NEXT_INSN (tail);
rtx next_tail;
rtx insn;
- if (head == tail
- && (GET_RTX_CLASS (GET_CODE (head)) != 'i'))
+ if (head == tail && (! INSN_P (head)))
return;
next_tail = NEXT_INSN (tail);
rtx x;
/* Handle register life information. */
- if (GET_RTX_CLASS (GET_CODE (insn)) != 'i')
+ if (! INSN_P (insn))
continue;
/* Increment weight for each register born here. */
for (insn = get_last_insn (); insn; insn = PREV_INSN (insn))
{
- if (GET_RTX_CLASS (GET_CODE (insn)) != 'i')
+ if (! INSN_P (insn))
continue;
unit = insn_unit (insn);
n_vis_no_unit = 0;
}
-#define BUF_LEN 256
+#define BUF_LEN 2048
static char *
safe_concat (buf, cur, str)
print_value (t1, XEXP (x, 0), verbose);
sprintf (buf, "use %s", t1);
break;
+ case COND_EXEC:
+ print_value (t1, COND_EXEC_CODE (x), verbose);
+ print_value (t2, COND_EXEC_TEST (x), verbose);
+ sprintf (buf, "cond_exec %s %s", t1, t2);
+ break;
case PARALLEL:
{
int i;
{
if (REG_NOTE_KIND (note) == REG_SAVE_NOTE)
{
- int note_type = INTVAL (XEXP (note, 0));
+ enum insn_note note_type = INTVAL (XEXP (note, 0));
+
if (note_type == NOTE_INSN_SETJMP)
{
retval = emit_note_after (NOTE_INSN_SETJMP, insn);
remove_note (insn, note);
note = XEXP (note, 1);
}
- else if (note_type == NOTE_INSN_RANGE_START
+ else if (note_type == NOTE_INSN_RANGE_BEG
|| note_type == NOTE_INSN_RANGE_END)
{
last = emit_note_before (note_type, last);
had different notions of what the "head" insn was. */
get_bb_head_tail (bb, &head, &tail);
- /* Interblock scheduling could have moved the original head insn from this
- block into a proceeding block. This may also cause schedule_block and
- compute_forward_dependences to have different notions of what the
- "head" insn was.
+ /* rm_other_notes only removes notes which are _inside_ the
+ block---that is, it won't remove notes before the first real insn
+ or after the last real insn of the block. So if the first insn
+ has a REG_SAVE_NOTE which would otherwise be emitted before the
+ insn, it is redundant with the note before the start of the
+ block, and so we have to take it out.
- If the interblock movement happened to make this block start with
- some notes (LOOP, EH or SETJMP) before the first real insn, then
- HEAD will have various special notes attached to it which must be
- removed so that we don't end up with extra copies of the notes. */
- if (GET_RTX_CLASS (GET_CODE (head)) == 'i')
+ FIXME: Probably the same thing should be done with REG_SAVE_NOTEs
+ referencing NOTE_INSN_SETJMP at the end of the block. */
+ if (INSN_P (head))
{
rtx note;
for (note = REG_NOTES (head); note; note = XEXP (note, 1))
if (REG_NOTE_KIND (note) == REG_SAVE_NOTE)
- remove_note (head, note);
+ {
+ if (INTVAL (XEXP (note, 0)) != NOTE_INSN_SETJMP)
+ {
+ remove_note (head, note);
+ note = XEXP (note, 1);
+ remove_note (head, note);
+ }
+ else
+ note = XEXP (note, 1);
+ }
}
next_tail = NEXT_INSN (tail);
/* If the only insn left is a NOTE or a CODE_LABEL, then there is no need
to schedule this block. */
- if (head == tail
- && (GET_RTX_CLASS (GET_CODE (head)) != 'i'))
+ if (head == tail && (! INSN_P (head)))
return (sched_n_insns);
/* Debug info. */
{
rtx next;
- if (GET_RTX_CLASS (GET_CODE (insn)) != 'i')
+ if (! INSN_P (insn))
continue;
next = NEXT_INSN (insn);
if (INSN_DEP_COUNT (insn) == 0
- && (SCHED_GROUP_P (next) == 0 || GET_RTX_CLASS (GET_CODE (next)) != 'i'))
+ && (SCHED_GROUP_P (next) == 0 || ! INSN_P (next)))
ready[n_ready++] = insn;
if (!(SCHED_GROUP_P (insn)))
target_n_insns++;
src_next_tail = NEXT_INSN (tail);
src_head = head;
- if (head == tail
- && (GET_RTX_CLASS (GET_CODE (head)) != 'i'))
+ if (head == tail && (! INSN_P (head)))
continue;
for (insn = src_head; insn != src_next_tail; insn = NEXT_INSN (insn))
{
- if (GET_RTX_CLASS (GET_CODE (insn)) != 'i')
+ if (! INSN_P (insn))
continue;
if (!CANT_MOVE (insn)
if (INSN_DEP_COUNT (insn) == 0
&& (! next
|| SCHED_GROUP_P (next) == 0
- || GET_RTX_CLASS (GET_CODE (next)) != 'i'))
+ || ! INSN_P (next)))
ready[n_ready++] = insn;
}
}
next_tail = NEXT_INSN (tail);
for (insn = head; insn != next_tail; insn = NEXT_INSN (insn))
{
- if (GET_RTX_CLASS (GET_CODE (insn)) != 'i')
+ if (! INSN_P (insn))
continue;
insn = group_leader (insn);
deps->pending_lists_length = 0;
deps->last_pending_memory_flush = 0;
deps->last_function_call = 0;
+ deps->in_post_call_group_p = 0;
deps->sched_before_next_call
= gen_rtx_INSN (VOIDmode, 0, NULL_RTX, NULL_RTX,
rtx link;
int unit, range;
- if (GET_RTX_CLASS (GET_CODE (insn)) != 'i')
+ if (! INSN_P (insn))
{
int n;
fprintf (dump, ";; %6d ", INSN_UID (insn));
get_bb_head_tail (bb, &head, &tail);
prev_head = PREV_INSN (head);
- if (head == tail
- && (GET_RTX_CLASS (GET_CODE (head)) != 'i'))
+ if (head == tail && (! INSN_P (head)))
return 0;
n_insn = 0;
int bb;
int rgn_n_insns = 0;
int sched_rgn_n_insns = 0;
+ regset_head reg_pending_sets_head;
+ regset_head reg_pending_clobbers_head;
/* Set variables for the current region. */
current_nr_blocks = RGN_NR_BLOCKS (rgn);
current_blocks = RGN_BLOCKS (rgn);
- reg_pending_sets = ALLOCA_REG_SET ();
- reg_pending_clobbers = ALLOCA_REG_SET ();
+ reg_pending_sets = INITIALIZE_REG_SET (reg_pending_sets_head);
+ reg_pending_clobbers = INITIALIZE_REG_SET (reg_pending_clobbers_head);
reg_pending_sets_all = 0;
/* Initializations for region data dependence analyisis. */
return;
/* Set dump and sched_verbose for the desired debugging output. If no
- dump-file was specified, but -fsched-verbose-N (any N), print to stderr.
- For -fsched-verbose-N, N>=10, print everything to stderr. */
+ dump-file was specified, but -fsched-verbose=N (any N), print to stderr.
+ For -fsched-verbose=N, N>=10, print everything to stderr. */
sched_verbose = sched_verbose_param;
if (sched_verbose_param == 0 && dump_file)
sched_verbose = 1;
if (sched_verbose >= 3)
debug_regions ();
+ /* We are done with flow's edge list. */
+ free_edge_list (edge_list);
+
/* For now. This will move as more and more of haifa is converted
to using the cfg code in flow.c. */
free (dom);
}
}
- deaths_in_region = (int *) xmalloc (sizeof(int) * nr_regions);
+ deaths_in_region = (int *) xmalloc (sizeof (int) * nr_regions);
init_alias_analysis ();
SET_BIT (blocks, rgn_bb_table[RGN_BLOCKS (rgn)]);
RESET_BIT (large_region_blocks, rgn_bb_table[RGN_BLOCKS (rgn)]);
+ /* Don't update reg info after reload, since that affects
+ regs_ever_live, which should not change after reload. */
update_life_info (blocks, UPDATE_LIFE_LOCAL,
- PROP_DEATH_NOTES | PROP_REG_INFO);
+ (reload_completed ? PROP_DEATH_NOTES
+ : PROP_DEATH_NOTES | PROP_REG_INFO));
+#ifndef HAVE_conditional_execution
+ /* ??? REG_DEAD notes only exist for unconditional deaths. We need
+ a count of the conditional plus unconditional deaths for this to
+ work out. */
/* In the single block case, the count of registers that died should
not have changed during the schedule. */
if (count_or_remove_death_notes (blocks, 0) != deaths_in_region[rgn])
- abort ();
+ abort ();
+#endif
}
if (any_large_regions)