/* Optimize by combining instructions for GNU compiler.
Copyright (C) 1987, 1988, 1992, 1993, 1994, 1995, 1996, 1997, 1998,
- 1999, 2000, 2001, 2002, 2003, 2004, 2005 Free Software Foundation, Inc.
+ 1999, 2000, 2001, 2002, 2003, 2004, 2005, 2006, 2007
+ Free Software Foundation, Inc.
This file is part of GCC.
You should have received a copy of the GNU General Public License
along with GCC; see the file COPYING. If not, write to the Free
-Software Foundation, 59 Temple Place - Suite 330, Boston, MA
-02111-1307, USA. */
+Software Foundation, 51 Franklin Street, Fifth Floor, Boston, MA
+02110-1301, USA. */
/* This module is essentially the "combiner" phase of the U. of Arizona
Portable Optimizer, but redone to work on our list-structured
we install it, delete the earlier insns, and update the data flow
information (LOG_LINKS and REG_NOTES) for what we did.
- There are a few exceptions where the dataflow information created by
- flow.c aren't completely updated:
+ There are a few exceptions where the dataflow information isn't
+ completely updated (however this is only a local issue since it is
+ regenerated before the next pass that uses it):
- reg_live_length is not updated
+ - reg_n_refs is not adjusted in the rare case when a register is
+ no longer required in a computation
+ - there are extremely rare cases (see distribute_notes) when a
+ REG_DEAD note is lost
- a LOG_LINKS entry that refers to an insn with multiple SETs may be
removed because there is no way to know which register it was
linking
/* Include output.h for dump_file. */
#include "output.h"
#include "params.h"
+#include "timevar.h"
+#include "tree-pass.h"
+#include "df.h"
/* Number of attempts to combine instructions in this function. */
static int total_attempts, total_merges, total_extras, total_successes;
-\f
-/* Vector mapping INSN_UIDs to cuids.
- The cuids are like uids but increase monotonically always.
- Combine always uses cuids so that it can compare them.
- But actually renumbering the uids, which we used to do,
- proves to be a bad idea because it makes it hard to compare
- the dumps produced by earlier passes with those from later passes. */
-
-static int *uid_cuid;
-static int max_uid_cuid;
+/* combine_instructions may try to replace the right hand side of the
+ second instruction with the value of an associated REG_EQUAL note
+ before throwing it at try_combine. That is problematic when there
+ is a REG_DEAD note for a register used in the old right hand side
+ and can cause distribute_notes to do wrong things. This is the
+ second instruction if it has been so modified, null otherwise. */
-/* Get the cuid of an insn. */
+static rtx i2mod;
-#define INSN_CUID(INSN) \
-(INSN_UID (INSN) > max_uid_cuid ? insn_cuid (INSN) : uid_cuid[INSN_UID (INSN)])
+/* When I2MOD is nonnull, this is a copy of the old right hand side. */
-/* In case BITS_PER_WORD == HOST_BITS_PER_WIDE_INT, shifting by
- BITS_PER_WORD would invoke undefined behavior. Work around it. */
+static rtx i2mod_old_rhs;
-#define UWIDE_SHIFT_LEFT_BY_BITS_PER_WORD(val) \
- (((unsigned HOST_WIDE_INT) (val) << (BITS_PER_WORD - 1)) << 1)
+/* When I2MOD is nonnull, this is a copy of the new right hand side. */
+static rtx i2mod_new_rhs;
+\f
/* Maximum register number, which is the size of the tables below. */
static unsigned int combine_max_regno;
(1) We do not want to reinitialize at each label.
(2) It is useful, but not critical, to know the actual value assigned
- to a register. Often just its form is helpful.
+ to a register. Often just its form is helpful.
Therefore, we maintain the following fields:
unsigned HOST_WIDE_INT last_set_nonzero_bits;
char last_set_sign_bit_copies;
- ENUM_BITFIELD(machine_mode) last_set_mode : 8;
+ ENUM_BITFIELD(machine_mode) last_set_mode : 8;
/* Set nonzero if references to register n in expressions should not be
used. last_set_invalid is set nonzero when this register is being
unsigned char sign_bit_copies;
unsigned HOST_WIDE_INT nonzero_bits;
+
+ /* Record the value of the label_tick when the last truncation
+ happened. The field truncated_to_mode is only valid if
+ truncation_label == label_tick. */
+
+ int truncation_label;
+
+ /* Record the last truncation seen for this register. If truncation
+ is not a nop to this mode we might be able to save an explicit
+ truncation if we know that value already contains a truncated
+ value. */
+
+ ENUM_BITFIELD(machine_mode) truncated_to_mode : 8;
};
static struct reg_stat *reg_stat;
-/* Record the cuid of the last insn that invalidated memory
+/* Record the luid of the last insn that invalidated memory
(anything that writes memory, and subroutine calls, but not pushes). */
static int mem_last_set;
-/* Record the cuid of the last CALL_INSN
+/* Record the luid of the last CALL_INSN
so we can tell whether a potential combination crosses any calls. */
-static int last_call_cuid;
+static int last_call_luid;
/* When `subst' is called, this is the insn that is being modified
(by combining in a previous insn). The PATTERN of this insn
static rtx subst_insn;
-/* This is the lowest CUID that `subst' is currently dealing with.
+/* This is the lowest LUID that `subst' is currently dealing with.
get_last_value will not return a value if the register was set at or
- after this CUID. If not for this mechanism, we could get confused if
+ after this LUID. If not for this mechanism, we could get confused if
I2 or I1 in try_combine were an insn that used the old value of a register
to obtain a new value. In that case, we might erroneously get the
new value of the register when we wanted the old one. */
-static int subst_low_cuid;
+static int subst_low_luid;
/* This contains any hard registers that are used in newpat; reg_dead_at_p
must consider all these registers to be always live. */
/* Basic block in which we are performing combines. */
static basic_block this_basic_block;
-/* A bitmap indicating which blocks had registers go dead at entry.
- After combine, we'll need to re-do global life analysis with
- those blocks as starting points. */
-static sbitmap refresh_blocks;
\f
+/* Length of the currently allocated uid_insn_cost array. */
+
+static int max_uid_known;
+
/* The following array records the insn_rtx_cost for every insn
in the instruction stream. */
static int *uid_insn_cost;
-/* Length of the currently allocated uid_insn_cost array. */
+/* The following array records the LOG_LINKS for every insn in the
+ instruction stream as an INSN_LIST rtx. */
+
+static rtx *uid_log_links;
-static int last_insn_cost;
+#define INSN_COST(INSN) (uid_insn_cost[INSN_UID (INSN)])
+#define LOG_LINKS(INSN) (uid_log_links[INSN_UID (INSN)])
-/* Incremented for each label. */
+/* Incremented for each basic block. */
static int label_tick;
+/* Reset to label_tick for each label. */
+
+static int label_tick_ebb_start;
+
/* Mode used to compute significance in reg_stat[].nonzero_bits. It is the
largest integer mode that can fit in HOST_BITS_PER_WIDE_INT. */
\f
/* Record one modification to rtl structure
- to be undone by storing old_contents into *where.
- is_int is 1 if the contents are an int. */
+ to be undone by storing old_contents into *where. */
struct undo
{
struct undo *next;
- int is_int;
- union {rtx r; int i;} old_contents;
- union {rtx *r; int *i;} where;
+ enum { UNDO_RTX, UNDO_INT, UNDO_MODE } kind;
+ union { rtx r; int i; enum machine_mode m; } old_contents;
+ union { rtx *r; int *i; } where;
};
/* Record a bunch of changes to be undone, up to MAX_UNDO of them.
static void do_SUBST (rtx *, rtx);
static void do_SUBST_INT (int *, int);
static void init_reg_last (void);
-static void setup_incoming_promotions (void);
+static void setup_incoming_promotions (rtx);
static void set_nonzero_bits_and_sign_copies (rtx, rtx, void *);
static int cant_combine_insn_p (rtx);
static int can_combine_p (rtx, rtx, rtx, rtx, rtx *, rtx *);
static rtx make_compound_operation (rtx, enum rtx_code);
static int get_pos_from_mask (unsigned HOST_WIDE_INT,
unsigned HOST_WIDE_INT *);
+static rtx canon_reg_for_combine (rtx, rtx);
static rtx force_to_mode (rtx, enum machine_mode,
- unsigned HOST_WIDE_INT, rtx, int);
+ unsigned HOST_WIDE_INT, int);
static rtx if_then_else_cond (rtx, rtx *, rtx *);
static rtx known_cond (rtx, enum rtx_code, rtx, rtx);
static int rtx_equal_for_field_assignment_p (rtx, rtx);
static rtx make_field_assignment (rtx);
static rtx apply_distributive_law (rtx);
static rtx distribute_and_simplify_rtx (rtx, int);
+static rtx simplify_and_const_int_1 (enum machine_mode, rtx,
+ unsigned HOST_WIDE_INT);
static rtx simplify_and_const_int (rtx, enum machine_mode, rtx,
unsigned HOST_WIDE_INT);
static int merge_outer_ops (enum rtx_code *, HOST_WIDE_INT *, enum rtx_code,
HOST_WIDE_INT, enum machine_mode, int *);
-static rtx simplify_shift_const (rtx, enum rtx_code, enum machine_mode, rtx,
+static rtx simplify_shift_const_1 (enum rtx_code, enum machine_mode, rtx, int);
+static rtx simplify_shift_const (rtx, enum rtx_code, enum machine_mode, rtx,
int);
static int recog_for_combine (rtx *, rtx, rtx *);
static rtx gen_lowpart_for_combine (enum machine_mode, rtx);
static enum rtx_code simplify_comparison (enum rtx_code, rtx *, rtx *);
static void update_table_tick (rtx);
static void record_value_for_reg (rtx, rtx, rtx);
-static void check_promoted_subreg (rtx, rtx);
+static void check_conversions (rtx, rtx);
static void record_dead_and_set_regs_1 (rtx, rtx, void *);
static void record_dead_and_set_regs (rtx);
static int get_last_value_validate (rtx *, rtx, int, int);
static int reg_dead_at_p (rtx, rtx);
static void move_deaths (rtx, rtx, int, rtx, rtx *);
static int reg_bitfield_target_p (rtx, rtx);
-static void distribute_notes (rtx, rtx, rtx, rtx);
+static void distribute_notes (rtx, rtx, rtx, rtx, rtx, rtx);
static void distribute_links (rtx);
static void mark_used_regs_combine (rtx);
-static int insn_cuid (rtx);
static void record_promoted_value (rtx, rtx);
static int unmentioned_reg_p_1 (rtx *, void *);
static bool unmentioned_reg_p (rtx, rtx);
+static void record_truncated_value (rtx);
+static bool reg_truncated_to_mode (enum machine_mode, rtx);
+static rtx gen_lowpart_or_truncate (enum machine_mode, rtx);
\f
/* It is not safe to use ordinary gen_lowpart in combine.
#undef RTL_HOOKS_REG_NUM_SIGN_BIT_COPIES
#define RTL_HOOKS_REG_NUM_SIGN_BIT_COPIES reg_num_sign_bit_copies_for_combine
+#undef RTL_HOOKS_REG_TRUNCATED_TO_MODE
+#define RTL_HOOKS_REG_TRUNCATED_TO_MODE reg_truncated_to_mode
+
static const struct rtl_hooks combine_rtl_hooks = RTL_HOOKS_INITIALIZER;
\f
+/* This is used by find_single_use to locate an rtx in LOC that
+ contains exactly one use of DEST, which is typically either a REG
+ or CC0. It returns a pointer to the innermost rtx expression
+ containing DEST. Appearances of DEST that are being used to
+ totally replace it are not counted. */
+
+static rtx *
+find_single_use_1 (rtx dest, rtx *loc)
+{
+ rtx x = *loc;
+ enum rtx_code code = GET_CODE (x);
+ rtx *result = NULL;
+ rtx *this_result;
+ int i;
+ const char *fmt;
+
+ switch (code)
+ {
+ case CONST_INT:
+ case CONST:
+ case LABEL_REF:
+ case SYMBOL_REF:
+ case CONST_DOUBLE:
+ case CONST_VECTOR:
+ case CLOBBER:
+ return 0;
+
+ case SET:
+ /* If the destination is anything other than CC0, PC, a REG or a SUBREG
+ of a REG that occupies all of the REG, the insn uses DEST if
+ it is mentioned in the destination or the source. Otherwise, we
+ need just check the source. */
+ if (GET_CODE (SET_DEST (x)) != CC0
+ && GET_CODE (SET_DEST (x)) != PC
+ && !REG_P (SET_DEST (x))
+ && ! (GET_CODE (SET_DEST (x)) == SUBREG
+ && REG_P (SUBREG_REG (SET_DEST (x)))
+ && (((GET_MODE_SIZE (GET_MODE (SUBREG_REG (SET_DEST (x))))
+ + (UNITS_PER_WORD - 1)) / UNITS_PER_WORD)
+ == ((GET_MODE_SIZE (GET_MODE (SET_DEST (x)))
+ + (UNITS_PER_WORD - 1)) / UNITS_PER_WORD))))
+ break;
+
+ return find_single_use_1 (dest, &SET_SRC (x));
+
+ case MEM:
+ case SUBREG:
+ return find_single_use_1 (dest, &XEXP (x, 0));
+
+ default:
+ break;
+ }
+
+ /* If it wasn't one of the common cases above, check each expression and
+ vector of this code. Look for a unique usage of DEST. */
+
+ fmt = GET_RTX_FORMAT (code);
+ for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
+ {
+ if (fmt[i] == 'e')
+ {
+ if (dest == XEXP (x, i)
+ || (REG_P (dest) && REG_P (XEXP (x, i))
+ && REGNO (dest) == REGNO (XEXP (x, i))))
+ this_result = loc;
+ else
+ this_result = find_single_use_1 (dest, &XEXP (x, i));
+
+ if (result == NULL)
+ result = this_result;
+ else if (this_result)
+ /* Duplicate usage. */
+ return NULL;
+ }
+ else if (fmt[i] == 'E')
+ {
+ int j;
+
+ for (j = XVECLEN (x, i) - 1; j >= 0; j--)
+ {
+ if (XVECEXP (x, i, j) == dest
+ || (REG_P (dest)
+ && REG_P (XVECEXP (x, i, j))
+ && REGNO (XVECEXP (x, i, j)) == REGNO (dest)))
+ this_result = loc;
+ else
+ this_result = find_single_use_1 (dest, &XVECEXP (x, i, j));
+
+ if (result == NULL)
+ result = this_result;
+ else if (this_result)
+ return NULL;
+ }
+ }
+ }
+
+ return result;
+}
+
+
+/* See if DEST, produced in INSN, is used only a single time in the
+ sequel. If so, return a pointer to the innermost rtx expression in which
+ it is used.
+
+ If PLOC is nonzero, *PLOC is set to the insn containing the single use.
+
+ If DEST is cc0_rtx, we look only at the next insn. In that case, we don't
+ care about REG_DEAD notes or LOG_LINKS.
+
+ Otherwise, we find the single use by finding an insn that has a
+ LOG_LINKS pointing at INSN and has a REG_DEAD note for DEST. If DEST is
+ only referenced once in that insn, we know that it must be the first
+ and last insn referencing DEST. */
+
+static rtx *
+find_single_use (rtx dest, rtx insn, rtx *ploc)
+{
+ rtx next;
+ rtx *result;
+ rtx link;
+
+#ifdef HAVE_cc0
+ if (dest == cc0_rtx)
+ {
+ next = NEXT_INSN (insn);
+ if (next == 0
+ || (!NONJUMP_INSN_P (next) && !JUMP_P (next)))
+ return 0;
+
+ result = find_single_use_1 (dest, &PATTERN (next));
+ if (result && ploc)
+ *ploc = next;
+ return result;
+ }
+#endif
+
+ if (!REG_P (dest))
+ return 0;
+
+ for (next = next_nonnote_insn (insn);
+ next != 0 && !LABEL_P (next);
+ next = next_nonnote_insn (next))
+ if (INSN_P (next) && dead_or_set_p (next, dest))
+ {
+ for (link = LOG_LINKS (next); link; link = XEXP (link, 1))
+ if (XEXP (link, 0) == insn)
+ break;
+
+ if (link)
+ {
+ result = find_single_use_1 (dest, &PATTERN (next));
+ if (ploc)
+ *ploc = next;
+ return result;
+ }
+ }
+
+ return 0;
+}
+\f
/* Substitute NEWVAL, an rtx expression, into INTO, a place in some
insn. The substitution can be undone by undo_all. If INTO is already
set to NEWVAL, do not record this change. Because computing NEWVAL might
if (undobuf.frees)
buf = undobuf.frees, undobuf.frees = buf->next;
else
- buf = xmalloc (sizeof (struct undo));
+ buf = XNEW (struct undo);
- buf->is_int = 0;
+ buf->kind = UNDO_RTX;
buf->where.r = into;
buf->old_contents.r = oldval;
*into = newval;
if (undobuf.frees)
buf = undobuf.frees, undobuf.frees = buf->next;
else
- buf = xmalloc (sizeof (struct undo));
+ buf = XNEW (struct undo);
- buf->is_int = 1;
+ buf->kind = UNDO_INT;
buf->where.i = into;
buf->old_contents.i = oldval;
*into = newval;
}
#define SUBST_INT(INTO, NEWVAL) do_SUBST_INT(&(INTO), (NEWVAL))
+
+/* Similar to SUBST, but just substitute the mode. This is used when
+ changing the mode of a pseudo-register, so that any other
+ references to the entry in the regno_reg_rtx array will change as
+ well. */
+
+static void
+do_SUBST_MODE (rtx *into, enum machine_mode newval)
+{
+ struct undo *buf;
+ enum machine_mode oldval = GET_MODE (*into);
+
+ if (oldval == newval)
+ return;
+
+ if (undobuf.frees)
+ buf = undobuf.frees, undobuf.frees = buf->next;
+ else
+ buf = XNEW (struct undo);
+
+ buf->kind = UNDO_MODE;
+ buf->where.r = into;
+ buf->old_contents.m = oldval;
+ PUT_MODE (*into, newval);
+
+ buf->next = undobuf.undos, undobuf.undos = buf;
+}
+
+#define SUBST_MODE(INTO, NEWVAL) do_SUBST_MODE(&(INTO), (NEWVAL))
\f
/* Subroutine of try_combine. Determine whether the combine replacement
- patterns NEWPAT and NEWI2PAT are cheaper according to insn_rtx_cost
- that the original instruction sequence I1, I2 and I3. Note that I1
- and/or NEWI2PAT may be NULL_RTX. This function returns false, if the
- costs of all instructions can be estimated, and the replacements are
- more expensive than the original sequence. */
+ patterns NEWPAT, NEWI2PAT and NEWOTHERPAT are cheaper according to
+ insn_rtx_cost that the original instruction sequence I1, I2, I3 and
+ undobuf.other_insn. Note that I1 and/or NEWI2PAT may be NULL_RTX.
+ NEWOTHERPAT and undobuf.other_insn may also both be NULL_RTX. This
+ function returns false, if the costs of all instructions can be
+ estimated, and the replacements are more expensive than the original
+ sequence. */
static bool
-combine_validate_cost (rtx i1, rtx i2, rtx i3, rtx newpat, rtx newi2pat)
+combine_validate_cost (rtx i1, rtx i2, rtx i3, rtx newpat, rtx newi2pat,
+ rtx newotherpat)
{
int i1_cost, i2_cost, i3_cost;
int new_i2_cost, new_i3_cost;
int old_cost, new_cost;
/* Lookup the original insn_rtx_costs. */
- i2_cost = INSN_UID (i2) <= last_insn_cost
- ? uid_insn_cost[INSN_UID (i2)] : 0;
- i3_cost = INSN_UID (i3) <= last_insn_cost
- ? uid_insn_cost[INSN_UID (i3)] : 0;
+ i2_cost = INSN_COST (i2);
+ i3_cost = INSN_COST (i3);
if (i1)
{
- i1_cost = INSN_UID (i1) <= last_insn_cost
- ? uid_insn_cost[INSN_UID (i1)] : 0;
+ i1_cost = INSN_COST (i1);
old_cost = (i1_cost > 0 && i2_cost > 0 && i3_cost > 0)
? i1_cost + i2_cost + i3_cost : 0;
}
{
int old_other_cost, new_other_cost;
- old_other_cost = (INSN_UID (undobuf.other_insn) <= last_insn_cost
- ? uid_insn_cost[INSN_UID (undobuf.other_insn)] : 0);
- new_other_cost = insn_rtx_cost (PATTERN (undobuf.other_insn));
+ old_other_cost = INSN_COST (undobuf.other_insn);
+ new_other_cost = insn_rtx_cost (newotherpat);
if (old_other_cost > 0 && new_other_cost > 0)
{
old_cost += old_other_cost;
}
/* Update the uid_insn_cost array with the replacement costs. */
- uid_insn_cost[INSN_UID (i2)] = new_i2_cost;
- uid_insn_cost[INSN_UID (i3)] = new_i3_cost;
+ INSN_COST (i2) = new_i2_cost;
+ INSN_COST (i3) = new_i3_cost;
if (i1)
- uid_insn_cost[INSN_UID (i1)] = 0;
+ INSN_COST (i1) = 0;
return true;
}
+
+
+/* Delete any insns that copy a register to itself. */
+
+static void
+delete_noop_moves (void)
+{
+ rtx insn, next;
+ basic_block bb;
+
+ FOR_EACH_BB (bb)
+ {
+ for (insn = BB_HEAD (bb); insn != NEXT_INSN (BB_END (bb)); insn = next)
+ {
+ next = NEXT_INSN (insn);
+ if (INSN_P (insn) && noop_move_p (insn))
+ {
+ rtx note;
+
+ /* If we're about to remove the first insn of a libcall
+ then move the libcall note to the next real insn and
+ update the retval note. */
+ if ((note = find_reg_note (insn, REG_LIBCALL, NULL_RTX))
+ && XEXP (note, 0) != insn)
+ {
+ rtx new_libcall_insn = next_real_insn (insn);
+ rtx retval_note = find_reg_note (XEXP (note, 0),
+ REG_RETVAL, NULL_RTX);
+ REG_NOTES (new_libcall_insn)
+ = gen_rtx_INSN_LIST (REG_LIBCALL, XEXP (note, 0),
+ REG_NOTES (new_libcall_insn));
+ XEXP (retval_note, 0) = new_libcall_insn;
+ }
+
+ if (dump_file)
+ fprintf (dump_file, "deleting noop move %d\n", INSN_UID (insn));
+
+ delete_insn_and_edges (insn);
+ }
+ }
+ }
+}
+
+\f
+/* Fill in log links field for all insns. */
+
+static void
+create_log_links (void)
+{
+ basic_block bb;
+ rtx *next_use, insn;
+ struct df_ref **def_vec, **use_vec;
+
+ next_use = XCNEWVEC (rtx, max_reg_num ());
+
+ /* Pass through each block from the end, recording the uses of each
+ register and establishing log links when def is encountered.
+ Note that we do not clear next_use array in order to save time,
+ so we have to test whether the use is in the same basic block as def.
+
+ There are a few cases below when we do not consider the definition or
+ usage -- these are taken from original flow.c did. Don't ask me why it is
+ done this way; I don't know and if it works, I don't want to know. */
+
+ FOR_EACH_BB (bb)
+ {
+ FOR_BB_INSNS_REVERSE (bb, insn)
+ {
+ if (!INSN_P (insn))
+ continue;
+
+ /* Log links are created only once. */
+ gcc_assert (!LOG_LINKS (insn));
+
+ for (def_vec = DF_INSN_DEFS (insn); *def_vec; def_vec++)
+ {
+ struct df_ref *def = *def_vec;
+ int regno = DF_REF_REGNO (def);
+ rtx use_insn;
+
+ if (!next_use[regno])
+ continue;
+
+ /* Do not consider if it is pre/post modification in MEM. */
+ if (DF_REF_FLAGS (def) & DF_REF_PRE_POST_MODIFY)
+ continue;
+
+ /* Do not make the log link for frame pointer. */
+ if ((regno == FRAME_POINTER_REGNUM
+ && (! reload_completed || frame_pointer_needed))
+#if FRAME_POINTER_REGNUM != HARD_FRAME_POINTER_REGNUM
+ || (regno == HARD_FRAME_POINTER_REGNUM
+ && (! reload_completed || frame_pointer_needed))
+#endif
+#if FRAME_POINTER_REGNUM != ARG_POINTER_REGNUM
+ || (regno == ARG_POINTER_REGNUM && fixed_regs[regno])
+#endif
+ )
+ continue;
+
+ use_insn = next_use[regno];
+ if (BLOCK_FOR_INSN (use_insn) == bb)
+ {
+ /* flow.c claimed:
+
+ We don't build a LOG_LINK for hard registers contained
+ in ASM_OPERANDs. If these registers get replaced,
+ we might wind up changing the semantics of the insn,
+ even if reload can make what appear to be valid
+ assignments later. */
+ if (regno >= FIRST_PSEUDO_REGISTER
+ || asm_noperands (PATTERN (use_insn)) < 0)
+ LOG_LINKS (use_insn) =
+ alloc_INSN_LIST (insn, LOG_LINKS (use_insn));
+ }
+ next_use[regno] = NULL_RTX;
+ }
+
+ for (use_vec = DF_INSN_USES (insn); *use_vec; use_vec++)
+ {
+ struct df_ref *use = *use_vec;
+ int regno = DF_REF_REGNO (use);
+
+ /* Do not consider the usage of the stack pointer
+ by function call. */
+ if (DF_REF_FLAGS (use) & DF_REF_CALL_STACK_USAGE)
+ continue;
+
+ next_use[regno] = insn;
+ }
+ }
+ }
+
+ free (next_use);
+}
+
+/* Clear LOG_LINKS fields of insns. */
+
+static void
+clear_log_links (void)
+{
+ rtx insn;
+
+ for (insn = get_insns (); insn; insn = NEXT_INSN (insn))
+ if (INSN_P (insn))
+ free_INSN_LIST_list (&LOG_LINKS (insn));
+}
+
+
+
\f
/* Main entry point for combiner. F is the first insn of the function.
NREGS is the first unused pseudo-reg number.
Return nonzero if the combiner has turned an indirect jump
instruction into a direct jump. */
-int
+static int
combine_instructions (rtx f, unsigned int nregs)
{
rtx insn, next;
#ifdef HAVE_cc0
rtx prev;
#endif
- int i;
rtx links, nextlinks;
+ rtx first;
int new_direct_jump_p = 0;
+ for (first = f; first && !INSN_P (first); )
+ first = NEXT_INSN (first);
+ if (!first)
+ return 0;
+
combine_attempts = 0;
combine_merges = 0;
combine_extras = 0;
rtl_hooks = combine_rtl_hooks;
- reg_stat = xcalloc (nregs, sizeof (struct reg_stat));
+ reg_stat = XCNEWVEC (struct reg_stat, nregs);
init_recog_no_volatile ();
- /* Compute maximum uid value so uid_cuid can be allocated. */
-
- for (insn = f, i = 0; insn; insn = NEXT_INSN (insn))
- if (INSN_UID (insn) > i)
- i = INSN_UID (insn);
-
- uid_cuid = xmalloc ((i + 1) * sizeof (int));
- max_uid_cuid = i;
+ /* Allocate array for insn info. */
+ max_uid_known = get_max_uid ();
+ uid_log_links = XCNEWVEC (rtx, max_uid_known + 1);
+ uid_insn_cost = XCNEWVEC (int, max_uid_known + 1);
nonzero_bits_mode = mode_for_size (HOST_BITS_PER_WIDE_INT, MODE_INT, 0);
nonzero_sign_valid = 0;
- /* Compute the mapping from uids to cuids.
- Cuids are numbers assigned to insns, like uids,
- except that cuids increase monotonically through the code.
-
- Scan all SETs and see if we can deduce anything about what
+ /* Scan all SETs and see if we can deduce anything about what
bits are known to be zero for some registers and how many copies
of the sign bit are known to exist for those registers.
Also set any known values so that we can use it while searching
for what bits are known to be set. */
- label_tick = 1;
-
- setup_incoming_promotions ();
+ label_tick = label_tick_ebb_start = 1;
- refresh_blocks = sbitmap_alloc (last_basic_block);
- sbitmap_zero (refresh_blocks);
+ setup_incoming_promotions (first);
- /* Allocate array of current insn_rtx_costs. */
- uid_insn_cost = xcalloc (max_uid_cuid + 1, sizeof (int));
- last_insn_cost = max_uid_cuid;
-
- for (insn = f, i = 0; insn; insn = NEXT_INSN (insn))
+ create_log_links ();
+ FOR_EACH_BB (this_basic_block)
{
- uid_cuid[INSN_UID (insn)] = ++i;
- subst_low_cuid = i;
- subst_insn = insn;
+ last_call_luid = 0;
+ mem_last_set = -1;
+ label_tick++;
+ FOR_BB_INSNS (this_basic_block, insn)
+ if (INSN_P (insn) && BLOCK_FOR_INSN (insn))
+ {
+ subst_low_luid = DF_INSN_LUID (insn);
+ subst_insn = insn;
- if (INSN_P (insn))
- {
- note_stores (PATTERN (insn), set_nonzero_bits_and_sign_copies,
- NULL);
- record_dead_and_set_regs (insn);
+ note_stores (PATTERN (insn), set_nonzero_bits_and_sign_copies,
+ insn);
+ record_dead_and_set_regs (insn);
#ifdef AUTO_INC_DEC
- for (links = REG_NOTES (insn); links; links = XEXP (links, 1))
- if (REG_NOTE_KIND (links) == REG_INC)
- set_nonzero_bits_and_sign_copies (XEXP (links, 0), NULL_RTX,
- NULL);
+ for (links = REG_NOTES (insn); links; links = XEXP (links, 1))
+ if (REG_NOTE_KIND (links) == REG_INC)
+ set_nonzero_bits_and_sign_copies (XEXP (links, 0), NULL_RTX,
+ insn);
#endif
- /* Record the current insn_rtx_cost of this instruction. */
- if (NONJUMP_INSN_P (insn))
- uid_insn_cost[INSN_UID (insn)] = insn_rtx_cost (PATTERN (insn));
- if (dump_file)
- fprintf(dump_file, "insn_cost %d: %d\n",
- INSN_UID (insn), uid_insn_cost[INSN_UID (insn)]);
- }
-
- if (LABEL_P (insn))
- label_tick++;
+ /* Record the current insn_rtx_cost of this instruction. */
+ if (NONJUMP_INSN_P (insn))
+ INSN_COST (insn) = insn_rtx_cost (PATTERN (insn));
+ if (dump_file)
+ fprintf(dump_file, "insn_cost %d: %d\n",
+ INSN_UID (insn), INSN_COST (insn));
+ }
+ else if (LABEL_P (insn))
+ label_tick_ebb_start = label_tick;
}
nonzero_sign_valid = 1;
/* Now scan all the insns in forward order. */
- label_tick = 1;
- last_call_cuid = 0;
- mem_last_set = 0;
+ label_tick = label_tick_ebb_start = 1;
init_reg_last ();
- setup_incoming_promotions ();
+ setup_incoming_promotions (first);
FOR_EACH_BB (this_basic_block)
{
+ last_call_luid = 0;
+ mem_last_set = -1;
+ label_tick++;
for (insn = BB_HEAD (this_basic_block);
- insn != NEXT_INSN (BB_END (this_basic_block));
+ insn != NEXT_INSN (BB_END (this_basic_block));
insn = next ? next : NEXT_INSN (insn))
{
next = 0;
-
- if (LABEL_P (insn))
- label_tick++;
-
- else if (INSN_P (insn))
+ if (INSN_P (insn))
{
/* See if we know about function return values before this
insn based upon SUBREG flags. */
- check_promoted_subreg (insn, PATTERN (insn));
+ check_conversions (insn, PATTERN (insn));
/* Try this insn with each insn it links back to. */
rtx temp = XEXP (links, 0);
if ((set = single_set (temp)) != 0
&& (note = find_reg_equal_equiv_note (temp)) != 0
- && GET_CODE (XEXP (note, 0)) != EXPR_LIST
+ && (note = XEXP (note, 0), GET_CODE (note)) != EXPR_LIST
/* Avoid using a register that may already been marked
dead by an earlier instruction. */
- && ! unmentioned_reg_p (XEXP (note, 0), SET_SRC (set)))
+ && ! unmentioned_reg_p (note, SET_SRC (set))
+ && (GET_MODE (note) == VOIDmode
+ ? SCALAR_INT_MODE_P (GET_MODE (SET_DEST (set)))
+ : GET_MODE (SET_DEST (set)) == GET_MODE (note)))
{
/* Temporarily replace the set's source with the
contents of the REG_EQUAL note. The insn will
be deleted or recognized by try_combine. */
rtx orig = SET_SRC (set);
- SET_SRC (set) = XEXP (note, 0);
- next = try_combine (insn, temp, NULL_RTX,
+ SET_SRC (set) = note;
+ i2mod = temp;
+ i2mod_old_rhs = copy_rtx (orig);
+ i2mod_new_rhs = copy_rtx (note);
+ next = try_combine (insn, i2mod, NULL_RTX,
&new_direct_jump_p);
+ i2mod = NULL_RTX;
if (next)
goto retry;
SET_SRC (set) = orig;
retry:
;
}
+ else if (LABEL_P (insn))
+ label_tick_ebb_start = label_tick;
}
}
- clear_bb_flags ();
- EXECUTE_IF_SET_IN_SBITMAP (refresh_blocks, 0, i,
- BASIC_BLOCK (i)->flags |= BB_DIRTY);
- new_direct_jump_p |= purge_all_dead_edges (0);
+ clear_log_links ();
+ clear_bb_flags ();
+ new_direct_jump_p |= purge_all_dead_edges ();
delete_noop_moves ();
- update_life_info_in_dirty_blocks (UPDATE_LIFE_GLOBAL_RM_NOTES,
- PROP_DEATH_NOTES | PROP_SCAN_DEAD_CODE
- | PROP_KILL_DEAD_CODE);
-
/* Clean up. */
- sbitmap_free (refresh_blocks);
+ free (uid_log_links);
free (uid_insn_cost);
free (reg_stat);
- free (uid_cuid);
{
struct undo *undo, *next;
/* Set up any promoted values for incoming argument registers. */
static void
-setup_incoming_promotions (void)
+setup_incoming_promotions (rtx first)
{
- unsigned int regno;
- rtx reg;
- enum machine_mode mode;
- int unsignedp;
- rtx first = get_insns ();
+ tree arg;
+
+ if (!targetm.calls.promote_function_args (TREE_TYPE (cfun->decl)))
+ return;
- if (targetm.calls.promote_function_args (TREE_TYPE (cfun->decl)))
+ for (arg = DECL_ARGUMENTS (current_function_decl); arg;
+ arg = TREE_CHAIN (arg))
{
- for (regno = 0; regno < FIRST_PSEUDO_REGISTER; regno++)
- /* Check whether this register can hold an incoming pointer
- argument. FUNCTION_ARG_REGNO_P tests outgoing register
- numbers, so translate if necessary due to register windows. */
- if (FUNCTION_ARG_REGNO_P (OUTGOING_REGNO (regno))
- && (reg = promoted_input_arg (regno, &mode, &unsignedp)) != 0)
- {
- record_value_for_reg
- (reg, first, gen_rtx_fmt_e ((unsignedp ? ZERO_EXTEND
- : SIGN_EXTEND),
- GET_MODE (reg),
- gen_rtx_CLOBBER (mode, const0_rtx)));
- }
+ rtx reg = DECL_INCOMING_RTL (arg);
+
+ if (!REG_P (reg))
+ continue;
+
+ if (TYPE_MODE (DECL_ARG_TYPE (arg)) == TYPE_MODE (TREE_TYPE (arg)))
+ {
+ enum machine_mode mode = TYPE_MODE (TREE_TYPE (arg));
+ int uns = TYPE_UNSIGNED (TREE_TYPE (arg));
+
+ mode = promote_mode (TREE_TYPE (arg), mode, &uns, 1);
+ if (mode == GET_MODE (reg) && mode != DECL_MODE (arg))
+ {
+ rtx x;
+ x = gen_rtx_CLOBBER (DECL_MODE (arg), const0_rtx);
+ x = gen_rtx_fmt_e ((uns ? ZERO_EXTEND : SIGN_EXTEND), mode, x);
+ record_value_for_reg (reg, first, x);
+ }
+ }
}
}
\f
by any set of X. */
static void
-set_nonzero_bits_and_sign_copies (rtx x, rtx set,
- void *data ATTRIBUTE_UNUSED)
+set_nonzero_bits_and_sign_copies (rtx x, rtx set, void *data)
{
+ rtx insn = (rtx) data;
unsigned int num;
if (REG_P (x)
&& REGNO (x) >= FIRST_PSEUDO_REGISTER
/* If this register is undefined at the start of the file, we can't
say what its contents were. */
- && ! REGNO_REG_SET_P (ENTRY_BLOCK_PTR->next_bb->global_live_at_start, REGNO (x))
+ && ! REGNO_REG_SET_P
+ (DF_LR_IN (ENTRY_BLOCK_PTR->next_bb), REGNO (x))
&& GET_MODE_BITSIZE (GET_MODE (x)) <= HOST_BITS_PER_WIDE_INT)
{
if (set == 0 || GET_CODE (set) == CLOBBER)
return;
}
+ /* If this register is being initialized using itself, and the
+ register is uninitialized in this basic block, and there are
+ no LOG_LINKS which set the register, then part of the
+ register is uninitialized. In that case we can't assume
+ anything about the number of nonzero bits.
+
+ ??? We could do better if we checked this in
+ reg_{nonzero_bits,num_sign_bit_copies}_for_combine. Then we
+ could avoid making assumptions about the insn which initially
+ sets the register, while still using the information in other
+ insns. We would have to be careful to check every insn
+ involved in the combination. */
+
+ if (insn
+ && reg_referenced_p (x, PATTERN (insn))
+ && !REGNO_REG_SET_P (DF_LR_IN (BLOCK_FOR_INSN (insn)),
+ REGNO (x)))
+ {
+ rtx link;
+
+ for (link = LOG_LINKS (insn); link; link = XEXP (link, 1))
+ {
+ if (dead_or_set_p (XEXP (link, 0), x))
+ break;
+ }
+ if (!link)
+ {
+ reg_stat[REGNO (x)].nonzero_bits = GET_MODE_MASK (GET_MODE (x));
+ reg_stat[REGNO (x)].sign_bit_copies = 1;
+ return;
+ }
+ }
+
/* If this is a complex assignment, see if we can convert it into a
simple assignment. */
set = expand_field_assignment (set);
|| (! all_adjacent
&& (((!MEM_P (src)
|| ! find_reg_note (insn, REG_EQUIV, src))
- && use_crosses_set_p (src, INSN_CUID (insn)))
+ && use_crosses_set_p (src, DF_INSN_LUID (insn)))
|| (GET_CODE (src) == ASM_OPERANDS && MEM_VOLATILE_P (src))
|| GET_CODE (src) == UNSPEC_VOLATILE))
/* If there is a REG_NO_CONFLICT note for DEST in I3 or SUCC, we get
and it is a pain to update that information.
Exception: if source is a constant, moving it later can't hurt.
Accept that special case, because it helps -fforce-addr a lot. */
- || (INSN_CUID (insn) < last_call_cuid && ! CONSTANT_P (src)))
+ || (DF_INSN_LUID (insn) < last_call_luid && ! CONSTANT_P (src)))
return 0;
/* DEST must either be a REG or CC0. */
for (i = XVECLEN (PATTERN (i3), 0) - 1; i >= 0; i--)
if (GET_CODE (XVECEXP (PATTERN (i3), 0, i)) == CLOBBER)
{
- /* Don't substitute for a register intended as a clobberable
+ /* Don't substitute for a register intended as a clobberable
operand. */
rtx reg = XEXP (XVECEXP (PATTERN (i3), 0, i), 0);
if (rtx_equal_p (reg, dest))
{
/* Make sure succ doesn't contain a volatile reference. */
if (succ != 0 && volatile_refs_p (PATTERN (succ)))
- return 0;
+ return 0;
for (p = NEXT_INSN (insn); p != i3; p = NEXT_INSN (p))
- if (INSN_P (p) && p != succ && volatile_refs_p (PATTERN (p)))
+ if (INSN_P (p) && p != succ && volatile_refs_p (PATTERN (p)))
return 0;
}
Consider:
- (set (reg:DI 101) (reg:DI 100))
+ (set (reg:DI 101) (reg:DI 100))
(set (subreg:SI (reg:DI 101) 0) <foo>)
This is NOT equivalent to:
- (parallel [(set (subreg:SI (reg:DI 100) 0) <foo>)
+ (parallel [(set (subreg:SI (reg:DI 100) 0) <foo>)
(set (reg:DI 101) (reg:DI 100))])
Not only does this modify 100 (in which case it might still be valid
rtx dest = SET_DEST (set);
rtx src = SET_SRC (set);
rtx inner_dest = dest;
+ rtx subdest;
while (GET_CODE (inner_dest) == STRICT_LOW_PART
|| GET_CODE (inner_dest) == SUBREG
|| (i1_not_in_src && reg_overlap_mentioned_p (i1dest, src)))
return 0;
- /* If DEST is used in I3, it is being killed in this insn,
- so record that for later.
+ /* If DEST is used in I3, it is being killed in this insn, so
+ record that for later. We have to consider paradoxical
+ subregs here, since they kill the whole register, but we
+ ignore partial subregs, STRICT_LOW_PART, etc.
Never add REG_DEAD notes for the FRAME_POINTER_REGNUM or the
STACK_POINTER_REGNUM, since these are always considered to be
live. Similarly for ARG_POINTER_REGNUM if it is fixed. */
- if (pi3dest_killed && REG_P (dest)
- && reg_referenced_p (dest, PATTERN (i3))
- && REGNO (dest) != FRAME_POINTER_REGNUM
+ subdest = dest;
+ if (GET_CODE (subdest) == SUBREG
+ && (GET_MODE_SIZE (GET_MODE (subdest))
+ >= GET_MODE_SIZE (GET_MODE (SUBREG_REG (subdest)))))
+ subdest = SUBREG_REG (subdest);
+ if (pi3dest_killed
+ && REG_P (subdest)
+ && reg_referenced_p (subdest, PATTERN (i3))
+ && REGNO (subdest) != FRAME_POINTER_REGNUM
#if HARD_FRAME_POINTER_REGNUM != FRAME_POINTER_REGNUM
- && REGNO (dest) != HARD_FRAME_POINTER_REGNUM
+ && REGNO (subdest) != HARD_FRAME_POINTER_REGNUM
#endif
#if ARG_POINTER_REGNUM != FRAME_POINTER_REGNUM
- && (REGNO (dest) != ARG_POINTER_REGNUM
- || ! fixed_regs [REGNO (dest)])
+ && (REGNO (subdest) != ARG_POINTER_REGNUM
+ || ! fixed_regs [REGNO (subdest)])
#endif
- && REGNO (dest) != STACK_POINTER_REGNUM)
+ && REGNO (subdest) != STACK_POINTER_REGNUM)
{
if (*pi3dest_killed)
return 0;
- *pi3dest_killed = dest;
+ *pi3dest_killed = subdest;
}
}
/* Never combine loads and stores involving hard regs that are likely
to be spilled. The register allocator can usually handle such
reg-reg moves by tying. If we allow the combiner to make
- substitutions of likely-spilled regs, we may abort in reload.
+ substitutions of likely-spilled regs, reload might die.
As an exception, we allow combinations involving fixed regs; these are
not available to the register allocator so there's no risk involved. */
return 0;
}
+struct likely_spilled_retval_info
+{
+ unsigned regno, nregs;
+ unsigned mask;
+};
+
+/* Called via note_stores by likely_spilled_retval_p. Remove from info->mask
+ hard registers that are known to be written to / clobbered in full. */
+static void
+likely_spilled_retval_1 (rtx x, rtx set, void *data)
+{
+ struct likely_spilled_retval_info *info = data;
+ unsigned regno, nregs;
+ unsigned new_mask;
+
+ if (!REG_P (XEXP (set, 0)))
+ return;
+ regno = REGNO (x);
+ if (regno >= info->regno + info->nregs)
+ return;
+ nregs = hard_regno_nregs[regno][GET_MODE (x)];
+ if (regno + nregs <= info->regno)
+ return;
+ new_mask = (2U << (nregs - 1)) - 1;
+ if (regno < info->regno)
+ new_mask >>= info->regno - regno;
+ else
+ new_mask <<= regno - info->regno;
+ info->mask &= ~new_mask;
+}
+
+/* Return nonzero iff part of the return value is live during INSN, and
+ it is likely spilled. This can happen when more than one insn is needed
+ to copy the return value, e.g. when we consider to combine into the
+ second copy insn for a complex value. */
+
+static int
+likely_spilled_retval_p (rtx insn)
+{
+ rtx use = BB_END (this_basic_block);
+ rtx reg, p;
+ unsigned regno, nregs;
+ /* We assume here that no machine mode needs more than
+ 32 hard registers when the value overlaps with a register
+ for which FUNCTION_VALUE_REGNO_P is true. */
+ unsigned mask;
+ struct likely_spilled_retval_info info;
+
+ if (!NONJUMP_INSN_P (use) || GET_CODE (PATTERN (use)) != USE || insn == use)
+ return 0;
+ reg = XEXP (PATTERN (use), 0);
+ if (!REG_P (reg) || !FUNCTION_VALUE_REGNO_P (REGNO (reg)))
+ return 0;
+ regno = REGNO (reg);
+ nregs = hard_regno_nregs[regno][GET_MODE (reg)];
+ if (nregs == 1)
+ return 0;
+ mask = (2U << (nregs - 1)) - 1;
+
+ /* Disregard parts of the return value that are set later. */
+ info.regno = regno;
+ info.nregs = nregs;
+ info.mask = mask;
+ for (p = PREV_INSN (use); info.mask && p != insn; p = PREV_INSN (p))
+ if (INSN_P (p))
+ note_stores (PATTERN (p), likely_spilled_retval_1, &info);
+ mask = info.mask;
+
+ /* Check if any of the (probably) live return value registers is
+ likely spilled. */
+ nregs --;
+ do
+ {
+ if ((mask & 1 << nregs)
+ && CLASS_LIKELY_SPILLED_P (REGNO_REG_CLASS (regno + nregs)))
+ return 1;
+ } while (nregs--);
+ return 0;
+}
+
/* Adjust INSN after we made a change to its destination.
Changing the destination can invalidate notes that say something about
static void
adjust_for_new_dest (rtx insn)
{
- rtx *loc;
-
/* For notes, be conservative and simply remove them. */
- loc = ®_NOTES (insn);
- while (*loc)
- {
- enum reg_note kind = REG_NOTE_KIND (*loc);
- if (kind == REG_EQUAL || kind == REG_EQUIV)
- *loc = XEXP (*loc, 1);
- else
- loc = &XEXP (*loc, 1);
- }
+ remove_reg_equal_equiv_notes (insn);
/* The new insn will have a destination that was previously the destination
of an insn just above it. Call distribute_links to make a LOG_LINK from
the next use of that destination. */
distribute_links (gen_rtx_INSN_LIST (VOIDmode, insn, NULL_RTX));
+
+ df_insn_rescan (insn);
+}
+
+/* Return TRUE if combine can reuse reg X in mode MODE.
+ ADDED_SETS is nonzero if the original set is still required. */
+static bool
+can_change_dest_mode (rtx x, int added_sets, enum machine_mode mode)
+{
+ unsigned int regno;
+
+ if (!REG_P(x))
+ return false;
+
+ regno = REGNO (x);
+ /* Allow hard registers if the new mode is legal, and occupies no more
+ registers than the old mode. */
+ if (regno < FIRST_PSEUDO_REGISTER)
+ return (HARD_REGNO_MODE_OK (regno, mode)
+ && (hard_regno_nregs[regno][GET_MODE (x)]
+ >= hard_regno_nregs[regno][mode]));
+
+ /* Or a pseudo that is only used once. */
+ return (REG_N_SETS (regno) == 1 && !added_sets
+ && !REG_USERVAR_P (x));
+}
+
+
+/* Check whether X, the destination of a set, refers to part of
+ the register specified by REG. */
+
+static bool
+reg_subword_p (rtx x, rtx reg)
+{
+ /* Check that reg is an integer mode register. */
+ if (!REG_P (reg) || GET_MODE_CLASS (GET_MODE (reg)) != MODE_INT)
+ return false;
+
+ if (GET_CODE (x) == STRICT_LOW_PART
+ || GET_CODE (x) == ZERO_EXTRACT)
+ x = XEXP (x, 0);
+
+ return GET_CODE (x) == SUBREG
+ && SUBREG_REG (x) == reg
+ && GET_MODE_CLASS (GET_MODE (x)) == MODE_INT;
}
+
/* Try to combine the insns I1 and I2 into I3.
Here I1 and I2 appear earlier than I3.
I1 can be zero; then we combine just I2 into I3.
rtx i3dest_killed = 0;
/* SET_DEST and SET_SRC of I2 and I1. */
rtx i2dest, i2src, i1dest = 0, i1src = 0;
- /* PATTERN (I2), or a copy of it in certain cases. */
- rtx i2pat;
+ /* PATTERN (I1) and PATTERN (I2), or a copy of it in certain cases. */
+ rtx i1pat = 0, i2pat = 0;
/* Indicates if I2DEST or I1DEST is in I2SRC or I1_SRC. */
int i2dest_in_i2src = 0, i1dest_in_i1src = 0, i2dest_in_i1src = 0;
+ int i2dest_killed = 0, i1dest_killed = 0;
int i1_feeds_i3 = 0;
/* Notes that must be added to REG_NOTES in I3 and I2. */
rtx new_i3_notes, new_i2_notes;
int maxreg;
rtx temp;
rtx link;
+ rtx other_pat = 0;
+ rtx new_other_notes;
int i;
/* Exit early if one of the insns involved can't be used for
if (cant_combine_insn_p (i3)
|| cant_combine_insn_p (i2)
|| (i1 && cant_combine_insn_p (i1))
+ || likely_spilled_retval_p (i3)
/* We also can't do anything if I3 has a
REG_LIBCALL note since we don't want to disrupt the contiguity of a
libcall. */
/* If I1 and I2 both feed I3, they can be in any order. To simplify the
code below, set I1 to be the earlier of the two insns. */
- if (i1 && INSN_CUID (i1) > INSN_CUID (i2))
+ if (i1 && DF_INSN_LUID (i1) > DF_INSN_LUID (i2))
temp = i1, i1 = i2, i2 = temp;
added_links_insn = 0;
combine_merges++;
subst_insn = i3;
- subst_low_cuid = INSN_CUID (i2);
+ subst_low_luid = DF_INSN_LUID (i2);
added_sets_2 = added_sets_1 = 0;
i2dest = SET_SRC (PATTERN (i3));
+ i2dest_killed = dead_or_set_p (i2, i2dest);
/* Replace the dest in I2 with our dest and make the resulting
insn the new pattern for I3. Then skip to where we
}
}
- /* If I2 is setting a double-word pseudo to a constant and I3 is setting
- one of those words to another constant, merge them by making a new
+ /* If I2 is setting a pseudo to a constant and I3 is setting some
+ sub-part of it to another constant, merge them by making a new
constant. */
if (i1 == 0
&& (temp = single_set (i2)) != 0
&& (GET_CODE (SET_SRC (temp)) == CONST_INT
|| GET_CODE (SET_SRC (temp)) == CONST_DOUBLE)
- && REG_P (SET_DEST (temp))
- && GET_MODE_CLASS (GET_MODE (SET_DEST (temp))) == MODE_INT
- && GET_MODE_SIZE (GET_MODE (SET_DEST (temp))) == 2 * UNITS_PER_WORD
&& GET_CODE (PATTERN (i3)) == SET
- && GET_CODE (SET_DEST (PATTERN (i3))) == SUBREG
- && SUBREG_REG (SET_DEST (PATTERN (i3))) == SET_DEST (temp)
- && GET_MODE_CLASS (GET_MODE (SET_DEST (PATTERN (i3)))) == MODE_INT
- && GET_MODE_SIZE (GET_MODE (SET_DEST (PATTERN (i3)))) == UNITS_PER_WORD
- && GET_CODE (SET_SRC (PATTERN (i3))) == CONST_INT)
+ && (GET_CODE (SET_SRC (PATTERN (i3))) == CONST_INT
+ || GET_CODE (SET_SRC (PATTERN (i3))) == CONST_DOUBLE)
+ && reg_subword_p (SET_DEST (PATTERN (i3)), SET_DEST (temp)))
{
- HOST_WIDE_INT lo, hi;
+ rtx dest = SET_DEST (PATTERN (i3));
+ int offset = -1;
+ int width = 0;
- if (GET_CODE (SET_SRC (temp)) == CONST_INT)
- lo = INTVAL (SET_SRC (temp)), hi = lo < 0 ? -1 : 0;
+ if (GET_CODE (dest) == ZERO_EXTRACT)
+ {
+ if (GET_CODE (XEXP (dest, 1)) == CONST_INT
+ && GET_CODE (XEXP (dest, 2)) == CONST_INT)
+ {
+ width = INTVAL (XEXP (dest, 1));
+ offset = INTVAL (XEXP (dest, 2));
+ dest = XEXP (dest, 0);
+ if (BITS_BIG_ENDIAN)
+ offset = GET_MODE_BITSIZE (GET_MODE (dest)) - width - offset;
+ }
+ }
else
{
- lo = CONST_DOUBLE_LOW (SET_SRC (temp));
- hi = CONST_DOUBLE_HIGH (SET_SRC (temp));
+ if (GET_CODE (dest) == STRICT_LOW_PART)
+ dest = XEXP (dest, 0);
+ width = GET_MODE_BITSIZE (GET_MODE (dest));
+ offset = 0;
}
- if (subreg_lowpart_p (SET_DEST (PATTERN (i3))))
+ if (offset >= 0)
{
- /* We don't handle the case of the target word being wider
- than a host wide int. */
- gcc_assert (HOST_BITS_PER_WIDE_INT >= BITS_PER_WORD);
-
- lo &= ~(UWIDE_SHIFT_LEFT_BY_BITS_PER_WORD (1) - 1);
- lo |= (INTVAL (SET_SRC (PATTERN (i3)))
- & (UWIDE_SHIFT_LEFT_BY_BITS_PER_WORD (1) - 1));
+ /* If this is the low part, we're done. */
+ if (subreg_lowpart_p (dest))
+ ;
+ /* Handle the case where inner is twice the size of outer. */
+ else if (GET_MODE_BITSIZE (GET_MODE (SET_DEST (temp)))
+ == 2 * GET_MODE_BITSIZE (GET_MODE (dest)))
+ offset += GET_MODE_BITSIZE (GET_MODE (dest));
+ /* Otherwise give up for now. */
+ else
+ offset = -1;
}
- else if (HOST_BITS_PER_WIDE_INT == BITS_PER_WORD)
- hi = INTVAL (SET_SRC (PATTERN (i3)));
- else if (HOST_BITS_PER_WIDE_INT >= 2 * BITS_PER_WORD)
+
+ if (offset >= 0
+ && (GET_MODE_BITSIZE (GET_MODE (SET_DEST (temp)))
+ <= HOST_BITS_PER_WIDE_INT * 2))
{
- int sign = -(int) ((unsigned HOST_WIDE_INT) lo
- >> (HOST_BITS_PER_WIDE_INT - 1));
+ HOST_WIDE_INT mhi, ohi, ihi;
+ HOST_WIDE_INT mlo, olo, ilo;
+ rtx inner = SET_SRC (PATTERN (i3));
+ rtx outer = SET_SRC (temp);
- lo &= ~ (UWIDE_SHIFT_LEFT_BY_BITS_PER_WORD
- (UWIDE_SHIFT_LEFT_BY_BITS_PER_WORD (1) - 1));
- lo |= (UWIDE_SHIFT_LEFT_BY_BITS_PER_WORD
- (INTVAL (SET_SRC (PATTERN (i3)))));
- if (hi == sign)
- hi = lo < 0 ? -1 : 0;
- }
- else
- /* We don't handle the case of the higher word not fitting
- entirely in either hi or lo. */
- gcc_unreachable ();
+ if (GET_CODE (outer) == CONST_INT)
+ {
+ olo = INTVAL (outer);
+ ohi = olo < 0 ? -1 : 0;
+ }
+ else
+ {
+ olo = CONST_DOUBLE_LOW (outer);
+ ohi = CONST_DOUBLE_HIGH (outer);
+ }
+
+ if (GET_CODE (inner) == CONST_INT)
+ {
+ ilo = INTVAL (inner);
+ ihi = ilo < 0 ? -1 : 0;
+ }
+ else
+ {
+ ilo = CONST_DOUBLE_LOW (inner);
+ ihi = CONST_DOUBLE_HIGH (inner);
+ }
+
+ if (width < HOST_BITS_PER_WIDE_INT)
+ {
+ mlo = ((unsigned HOST_WIDE_INT) 1 << width) - 1;
+ mhi = 0;
+ }
+ else if (width < HOST_BITS_PER_WIDE_INT * 2)
+ {
+ mhi = ((unsigned HOST_WIDE_INT) 1
+ << (width - HOST_BITS_PER_WIDE_INT)) - 1;
+ mlo = -1;
+ }
+ else
+ {
+ mlo = -1;
+ mhi = -1;
+ }
+
+ ilo &= mlo;
+ ihi &= mhi;
+
+ if (offset >= HOST_BITS_PER_WIDE_INT)
+ {
+ mhi = mlo << (offset - HOST_BITS_PER_WIDE_INT);
+ mlo = 0;
+ ihi = ilo << (offset - HOST_BITS_PER_WIDE_INT);
+ ilo = 0;
+ }
+ else if (offset > 0)
+ {
+ mhi = (mhi << offset) | ((unsigned HOST_WIDE_INT) mlo
+ >> (HOST_BITS_PER_WIDE_INT - offset));
+ mlo = mlo << offset;
+ ihi = (ihi << offset) | ((unsigned HOST_WIDE_INT) ilo
+ >> (HOST_BITS_PER_WIDE_INT - offset));
+ ilo = ilo << offset;
+ }
- combine_merges++;
- subst_insn = i3;
- subst_low_cuid = INSN_CUID (i2);
- added_sets_2 = added_sets_1 = 0;
- i2dest = SET_DEST (temp);
+ olo = (olo & ~mlo) | ilo;
+ ohi = (ohi & ~mhi) | ihi;
- SUBST (SET_SRC (temp),
- immed_double_const (lo, hi, GET_MODE (SET_DEST (temp))));
+ combine_merges++;
+ subst_insn = i3;
+ subst_low_luid = DF_INSN_LUID (i2);
+ added_sets_2 = added_sets_1 = 0;
+ i2dest = SET_DEST (temp);
+ i2dest_killed = dead_or_set_p (i2, i2dest);
- newpat = PATTERN (i2);
- goto validate_replacement;
+ SUBST (SET_SRC (temp),
+ immed_double_const (olo, ohi, GET_MODE (SET_DEST (temp))));
+
+ newpat = PATTERN (i2);
+ goto validate_replacement;
+ }
}
#ifndef HAVE_cc0
make up a dummy I1 that is
(set Y OP)
and change I2 to be
- (set (reg:CC X) (compare:CC Y (const_int 0)))
+ (set (reg:CC X) (compare:CC Y (const_int 0)))
(We can ignore any trailing CLOBBERs.)
if (i == 1)
{
/* We make I1 with the same INSN_UID as I2. This gives it
- the same INSN_CUID for value tracking. Our fake I1 will
+ the same DF_INSN_LUID for value tracking. Our fake I1 will
never appear in the insn stream so giving it the same INSN_UID
as I2 will not cause a problem. */
i1 = gen_rtx_INSN (VOIDmode, INSN_UID (i2), NULL_RTX, i2,
BLOCK_FOR_INSN (i2), INSN_LOCATOR (i2),
- XVECEXP (PATTERN (i2), 0, 1), -1, NULL_RTX,
- NULL_RTX);
+ XVECEXP (PATTERN (i2), 0, 1), -1, NULL_RTX);
SUBST (PATTERN (i2), XVECEXP (PATTERN (i2), 0, 0));
SUBST (XEXP (SET_SRC (PATTERN (i2)), 0),
i2dest_in_i2src = reg_overlap_mentioned_p (i2dest, i2src);
i1dest_in_i1src = i1 && reg_overlap_mentioned_p (i1dest, i1src);
i2dest_in_i1src = i1 && reg_overlap_mentioned_p (i2dest, i1src);
+ i2dest_killed = dead_or_set_p (i2, i2dest);
+ i1dest_killed = i1 && dead_or_set_p (i1, i1dest);
/* See if I1 directly feeds into I3. It does if I1DEST is not used
in I2SRC. */
rtx. If I2 is a PARALLEL, we just need the piece that assigns I2SRC to
I2DEST. */
- i2pat = (GET_CODE (PATTERN (i2)) == PARALLEL
- ? gen_rtx_SET (VOIDmode, i2dest, i2src)
- : PATTERN (i2));
-
if (added_sets_2)
- i2pat = copy_rtx (i2pat);
+ {
+ if (GET_CODE (PATTERN (i2)) == PARALLEL)
+ i2pat = gen_rtx_SET (VOIDmode, i2dest, copy_rtx (i2src));
+ else
+ i2pat = copy_rtx (PATTERN (i2));
+ }
+
+ if (added_sets_1)
+ {
+ if (GET_CODE (PATTERN (i1)) == PARALLEL)
+ i1pat = gen_rtx_SET (VOIDmode, i1dest, copy_rtx (i1src));
+ else
+ i1pat = copy_rtx (PATTERN (i1));
+ }
combine_merges++;
subst_insn = i3;
- /* It is possible that the source of I2 or I1 may be performing an
- unneeded operation, such as a ZERO_EXTEND of something that is known
- to have the high part zero. Handle that case by letting subst look at
- the innermost one of them.
-
- Another way to do this would be to have a function that tries to
- simplify a single insn instead of merging two or more insns. We don't
- do this because of the potential of infinite loops and because
- of the potential extra memory required. However, doing it the way
- we are is a bit of a kludge and doesn't catch all cases.
-
- But only do this if -fexpensive-optimizations since it slows things down
- and doesn't usually win. */
-
- if (flag_expensive_optimizations)
- {
- /* Pass pc_rtx so no substitutions are done, just simplifications. */
- if (i1)
- {
- subst_low_cuid = INSN_CUID (i1);
- i1src = subst (i1src, pc_rtx, pc_rtx, 0, 0);
- }
- else
- {
- subst_low_cuid = INSN_CUID (i2);
- i2src = subst (i2src, pc_rtx, pc_rtx, 0, 0);
- }
- }
-
#ifndef HAVE_cc0
/* Many machines that don't use CC0 have insns that can both perform an
arithmetic operation and set the condition code. These operations will
i2src, const0_rtx))
!= GET_MODE (SET_DEST (newpat))))
{
- unsigned int regno = REGNO (SET_DEST (newpat));
- rtx new_dest = gen_rtx_REG (compare_mode, regno);
-
- if (regno < FIRST_PSEUDO_REGISTER
- || (REG_N_SETS (regno) == 1 && ! added_sets_2
- && ! REG_USERVAR_P (SET_DEST (newpat))))
+ if (can_change_dest_mode(SET_DEST (newpat), added_sets_2,
+ compare_mode))
{
- if (regno >= FIRST_PSEUDO_REGISTER)
- SUBST (regno_reg_rtx[regno], new_dest);
+ unsigned int regno = REGNO (SET_DEST (newpat));
+ rtx new_dest;
+
+ if (regno < FIRST_PSEUDO_REGISTER)
+ new_dest = gen_rtx_REG (compare_mode, regno);
+ else
+ {
+ SUBST_MODE (regno_reg_rtx[regno], compare_mode);
+ new_dest = regno_reg_rtx[regno];
+ }
SUBST (SET_DEST (newpat), new_dest);
SUBST (XEXP (*cc_use, 0), new_dest);
else
#endif
{
+ /* It is possible that the source of I2 or I1 may be performing
+ an unneeded operation, such as a ZERO_EXTEND of something
+ that is known to have the high part zero. Handle that case
+ by letting subst look at the innermost one of them.
+
+ Another way to do this would be to have a function that tries
+ to simplify a single insn instead of merging two or more
+ insns. We don't do this because of the potential of infinite
+ loops and because of the potential extra memory required.
+ However, doing it the way we are is a bit of a kludge and
+ doesn't catch all cases.
+
+ But only do this if -fexpensive-optimizations since it slows
+ things down and doesn't usually win.
+
+ This is not done in the COMPARE case above because the
+ unmodified I2PAT is used in the PARALLEL and so a pattern
+ with a modified I2SRC would not match. */
+
+ if (flag_expensive_optimizations)
+ {
+ /* Pass pc_rtx so no substitutions are done, just
+ simplifications. */
+ if (i1)
+ {
+ subst_low_luid = DF_INSN_LUID (i1);
+ i1src = subst (i1src, pc_rtx, pc_rtx, 0, 0);
+ }
+ else
+ {
+ subst_low_luid = DF_INSN_LUID (i2);
+ i2src = subst (i2src, pc_rtx, pc_rtx, 0, 0);
+ }
+ }
+
n_occurrences = 0; /* `subst' counts here */
/* If I1 feeds into I2 (not into I3) and I1DEST is in I1SRC, we
need to make a unique copy of I2SRC each time we substitute it
to avoid self-referential rtl. */
- subst_low_cuid = INSN_CUID (i2);
+ subst_low_luid = DF_INSN_LUID (i2);
newpat = subst (PATTERN (i3), i2dest, i2src, 0,
! i1_feeds_i3 && i1dest_in_i1src);
substed_i2 = 1;
}
n_occurrences = 0;
- subst_low_cuid = INSN_CUID (i1);
+ subst_low_luid = DF_INSN_LUID (i1);
newpat = subst (newpat, i1dest, i1src, 0, 0);
substed_i1 = 1;
}
|| (i1 != 0 && FIND_REG_INC_NOTE (i1, NULL_RTX) != 0
&& (n_occurrences + added_sets_1 + (added_sets_2 && ! i1_feeds_i3)
> 1))
- /* Fail if we tried to make a new register (we used to abort, but there's
- really no reason to). */
+ /* Fail if we tried to make a new register. */
|| max_reg_num () != maxreg
/* Fail if we couldn't do something and have a CLOBBER. */
|| GET_CODE (newpat) == CLOBBER
}
if (added_sets_1)
- XVECEXP (newpat, 0, --total_sets)
- = (GET_CODE (PATTERN (i1)) == PARALLEL
- ? gen_rtx_SET (VOIDmode, i1dest, i1src) : PATTERN (i1));
+ XVECEXP (newpat, 0, --total_sets) = i1pat;
if (added_sets_2)
{
&& asm_noperands (newpat) < 0)
{
rtx m_split, *split;
- rtx ni2dest = i2dest;
/* See if the MD file can split NEWPAT. If it can't, see if letting it
use I2DEST as a scratch register will help. In the latter case,
possible to try that as a scratch reg. This would require adding
more code to make it work though. */
- if (m_split == 0 && ! reg_overlap_mentioned_p (ni2dest, newpat))
+ if (m_split == 0 && ! reg_overlap_mentioned_p (i2dest, newpat))
{
- /* If I2DEST is a hard register or the only use of a pseudo,
- we can change its mode. */
- if (GET_MODE (SET_DEST (newpat)) != GET_MODE (i2dest)
- && GET_MODE (SET_DEST (newpat)) != VOIDmode
- && REG_P (i2dest)
- && (REGNO (i2dest) < FIRST_PSEUDO_REGISTER
- || (REG_N_SETS (REGNO (i2dest)) == 1 && ! added_sets_2
- && ! REG_USERVAR_P (i2dest))))
- ni2dest = gen_rtx_REG (GET_MODE (SET_DEST (newpat)),
- REGNO (i2dest));
+ enum machine_mode new_mode = GET_MODE (SET_DEST (newpat));
+ /* First try to split using the original register as a
+ scratch register. */
m_split = split_insns (gen_rtx_PARALLEL
(VOIDmode,
gen_rtvec (2, newpat,
gen_rtx_CLOBBER (VOIDmode,
- ni2dest))),
+ i2dest))),
i3);
- /* If the split with the mode-changed register didn't work, try
- the original register. */
- if (! m_split && ni2dest != i2dest)
+
+ /* If that didn't work, try changing the mode of I2DEST if
+ we can. */
+ if (m_split == 0
+ && new_mode != GET_MODE (i2dest)
+ && new_mode != VOIDmode
+ && can_change_dest_mode (i2dest, added_sets_2, new_mode))
{
- ni2dest = i2dest;
+ enum machine_mode old_mode = GET_MODE (i2dest);
+ rtx ni2dest;
+
+ if (REGNO (i2dest) < FIRST_PSEUDO_REGISTER)
+ ni2dest = gen_rtx_REG (new_mode, REGNO (i2dest));
+ else
+ {
+ SUBST_MODE (regno_reg_rtx[REGNO (i2dest)], new_mode);
+ ni2dest = regno_reg_rtx[REGNO (i2dest)];
+ }
+
m_split = split_insns (gen_rtx_PARALLEL
(VOIDmode,
gen_rtvec (2, newpat,
gen_rtx_CLOBBER (VOIDmode,
- i2dest))),
+ ni2dest))),
i3);
+
+ if (m_split == 0
+ && REGNO (i2dest) >= FIRST_PSEUDO_REGISTER)
+ {
+ struct undo *buf;
+
+ PUT_MODE (regno_reg_rtx[REGNO (i2dest)], old_mode);
+ buf = undobuf.undos;
+ undobuf.undos = buf->next;
+ buf->next = undobuf.frees;
+ undobuf.frees = buf;
+ }
}
}
}
else if (m_split && NEXT_INSN (NEXT_INSN (m_split)) == NULL_RTX
&& (next_real_insn (i2) == i3
- || ! use_crosses_set_p (PATTERN (m_split), INSN_CUID (i2))))
+ || ! use_crosses_set_p (PATTERN (m_split), DF_INSN_LUID (i2))))
{
rtx i2set, i3set;
rtx newi3pat = PATTERN (NEXT_INSN (m_split));
i3set = single_set (NEXT_INSN (m_split));
i2set = single_set (m_split);
- /* In case we changed the mode of I2DEST, replace it in the
- pseudo-register table here. We can't do it above in case this
- code doesn't get executed and we do a split the other way. */
-
- if (REGNO (i2dest) >= FIRST_PSEUDO_REGISTER)
- SUBST (regno_reg_rtx[REGNO (i2dest)], ni2dest);
-
i2_code_number = recog_for_combine (&newi2pat, i2, &new_i2_notes);
/* If I2 or I3 has multiple SETs, we won't know how to track
if (REG_P (new_i3_dest)
&& REG_P (new_i2_dest)
&& REGNO (new_i3_dest) == REGNO (new_i2_dest))
- REG_N_SETS (REGNO (new_i2_dest))++;
+ INC_REG_N_SETS (REGNO (new_i2_dest), 1);
}
}
isn't valid for it, or change the number of registers. */
&& (GET_MODE (*split) == GET_MODE (i2dest)
|| GET_MODE (*split) == VOIDmode
- || (REGNO (i2dest) < FIRST_PSEUDO_REGISTER
- && HARD_REGNO_MODE_OK (REGNO (i2dest), GET_MODE (*split))
- && (HARD_REGNO_NREGS (REGNO (i2dest), GET_MODE (i2dest))
- == HARD_REGNO_NREGS (REGNO (i2dest), GET_MODE (*split))))
- || (REGNO (i2dest) >= FIRST_PSEUDO_REGISTER
- && REG_N_SETS (REGNO (i2dest)) == 1 && ! added_sets_2
- && ! REG_USERVAR_P (i2dest)))
+ || can_change_dest_mode (i2dest, added_sets_2,
+ GET_MODE (*split)))
&& (next_real_insn (i2) == i3
- || ! use_crosses_set_p (*split, INSN_CUID (i2)))
+ || ! use_crosses_set_p (*split, DF_INSN_LUID (i2)))
/* We can't overwrite I2DEST if its value is still used by
NEWPAT. */
&& ! reg_referenced_p (i2dest, newpat))
rtx newdest = i2dest;
enum rtx_code split_code = GET_CODE (*split);
enum machine_mode split_mode = GET_MODE (*split);
+ bool subst_done = false;
+ newi2pat = NULL_RTX;
/* Get NEWDEST as a register in the proper mode. We have already
validated that we can do this. */
if (GET_MODE (i2dest) != split_mode && split_mode != VOIDmode)
{
- newdest = gen_rtx_REG (split_mode, REGNO (i2dest));
-
- if (REGNO (i2dest) >= FIRST_PSEUDO_REGISTER)
- SUBST (regno_reg_rtx[REGNO (i2dest)], newdest);
+ if (REGNO (i2dest) < FIRST_PSEUDO_REGISTER)
+ newdest = gen_rtx_REG (split_mode, REGNO (i2dest));
+ else
+ {
+ SUBST_MODE (regno_reg_rtx[REGNO (i2dest)], split_mode);
+ newdest = regno_reg_rtx[REGNO (i2dest)];
+ }
}
/* If *SPLIT is a (mult FOO (const_int pow2)), convert it to
}
#endif
- newi2pat = gen_rtx_SET (VOIDmode, newdest, *split);
- SUBST (*split, newdest);
+ /* Attempt to split binary operators using arithmetic identities. */
+ if (BINARY_P (SET_SRC (newpat))
+ && split_mode == GET_MODE (SET_SRC (newpat))
+ && ! side_effects_p (SET_SRC (newpat)))
+ {
+ rtx setsrc = SET_SRC (newpat);
+ enum machine_mode mode = GET_MODE (setsrc);
+ enum rtx_code code = GET_CODE (setsrc);
+ rtx src_op0 = XEXP (setsrc, 0);
+ rtx src_op1 = XEXP (setsrc, 1);
+
+ /* Split "X = Y op Y" as "Z = Y; X = Z op Z". */
+ if (rtx_equal_p (src_op0, src_op1))
+ {
+ newi2pat = gen_rtx_SET (VOIDmode, newdest, src_op0);
+ SUBST (XEXP (setsrc, 0), newdest);
+ SUBST (XEXP (setsrc, 1), newdest);
+ subst_done = true;
+ }
+ /* Split "((P op Q) op R) op S" where op is PLUS or MULT. */
+ else if ((code == PLUS || code == MULT)
+ && GET_CODE (src_op0) == code
+ && GET_CODE (XEXP (src_op0, 0)) == code
+ && (INTEGRAL_MODE_P (mode)
+ || (FLOAT_MODE_P (mode)
+ && flag_unsafe_math_optimizations)))
+ {
+ rtx p = XEXP (XEXP (src_op0, 0), 0);
+ rtx q = XEXP (XEXP (src_op0, 0), 1);
+ rtx r = XEXP (src_op0, 1);
+ rtx s = src_op1;
+
+ /* Split both "((X op Y) op X) op Y" and
+ "((X op Y) op Y) op X" as "T op T" where T is
+ "X op Y". */
+ if ((rtx_equal_p (p,r) && rtx_equal_p (q,s))
+ || (rtx_equal_p (p,s) && rtx_equal_p (q,r)))
+ {
+ newi2pat = gen_rtx_SET (VOIDmode, newdest,
+ XEXP (src_op0, 0));
+ SUBST (XEXP (setsrc, 0), newdest);
+ SUBST (XEXP (setsrc, 1), newdest);
+ subst_done = true;
+ }
+ /* Split "((X op X) op Y) op Y)" as "T op T" where
+ T is "X op Y". */
+ else if (rtx_equal_p (p,q) && rtx_equal_p (r,s))
+ {
+ rtx tmp = simplify_gen_binary (code, mode, p, r);
+ newi2pat = gen_rtx_SET (VOIDmode, newdest, tmp);
+ SUBST (XEXP (setsrc, 0), newdest);
+ SUBST (XEXP (setsrc, 1), newdest);
+ subst_done = true;
+ }
+ }
+ }
+
+ if (!subst_done)
+ {
+ newi2pat = gen_rtx_SET (VOIDmode, newdest, *split);
+ SUBST (*split, newdest);
+ }
+
i2_code_number = recog_for_combine (&newi2pat, i2, &new_i2_notes);
/* recog_for_combine might have added CLOBBERs to newi2pat.
&& rtx_equal_p (SET_SRC (XVECEXP (newpat, 0, 1)),
XEXP (SET_SRC (XVECEXP (newpat, 0, 0)), 0))
&& ! use_crosses_set_p (SET_SRC (XVECEXP (newpat, 0, 1)),
- INSN_CUID (i2))
+ DF_INSN_LUID (i2))
&& GET_CODE (SET_DEST (XVECEXP (newpat, 0, 1))) != ZERO_EXTRACT
&& GET_CODE (SET_DEST (XVECEXP (newpat, 0, 1))) != STRICT_LOW_PART
&& ! (temp = SET_DEST (XVECEXP (newpat, 0, 1)),
&& GET_CODE (SET_DEST (XVECEXP (newpat, 0, 1))) != ZERO_EXTRACT
&& GET_CODE (SET_DEST (XVECEXP (newpat, 0, 1))) != STRICT_LOW_PART
&& ! use_crosses_set_p (SET_SRC (XVECEXP (newpat, 0, 1)),
- INSN_CUID (i2))
- /* Don't pass sets with (USE (MEM ...)) dests to the following. */
- && GET_CODE (SET_DEST (XVECEXP (newpat, 0, 1))) != USE
- && GET_CODE (SET_DEST (XVECEXP (newpat, 0, 0))) != USE
+ DF_INSN_LUID (i2))
&& ! reg_referenced_p (SET_DEST (XVECEXP (newpat, 0, 1)),
XVECEXP (newpat, 0, 0))
&& ! reg_referenced_p (SET_DEST (XVECEXP (newpat, 0, 0)),
XVECEXP (newpat, 0, 1))
&& ! (contains_muldiv (SET_SRC (XVECEXP (newpat, 0, 0)))
- && contains_muldiv (SET_SRC (XVECEXP (newpat, 0, 1)))))
+ && contains_muldiv (SET_SRC (XVECEXP (newpat, 0, 1))))
+#ifdef HAVE_cc0
+ /* We cannot split the parallel into two sets if both sets
+ reference cc0. */
+ && ! (reg_referenced_p (cc0_rtx, XVECEXP (newpat, 0, 0))
+ && reg_referenced_p (cc0_rtx, XVECEXP (newpat, 0, 1)))
+#endif
+ )
{
/* Normally, it doesn't matter which of the two is done first,
but it does if one references cc0. In that case, it has to
/* If we had to change another insn, make sure it is valid also. */
if (undobuf.other_insn)
{
- rtx other_pat = PATTERN (undobuf.other_insn);
- rtx new_other_notes;
- rtx note, next;
-
CLEAR_HARD_REG_SET (newpat_used_regs);
+ other_pat = PATTERN (undobuf.other_insn);
other_code_number = recog_for_combine (&other_pat, undobuf.other_insn,
&new_other_notes);
undo_all ();
return 0;
}
-
- PATTERN (undobuf.other_insn) = other_pat;
-
- /* If any of the notes in OTHER_INSN were REG_UNUSED, ensure that they
- are still valid. Then add any non-duplicate notes added by
- recog_for_combine. */
- for (note = REG_NOTES (undobuf.other_insn); note; note = next)
- {
- next = XEXP (note, 1);
-
- if (REG_NOTE_KIND (note) == REG_UNUSED
- && ! reg_set_p (XEXP (note, 0), PATTERN (undobuf.other_insn)))
- {
- if (REG_P (XEXP (note, 0)))
- REG_N_DEATHS (REGNO (XEXP (note, 0)))--;
-
- remove_note (undobuf.other_insn, note);
- }
- }
-
- for (note = new_other_notes; note; note = XEXP (note, 1))
- if (REG_P (XEXP (note, 0)))
- REG_N_DEATHS (REGNO (XEXP (note, 0)))++;
-
- distribute_notes (new_other_notes, undobuf.other_insn,
- undobuf.other_insn, NULL_RTX);
}
+
#ifdef HAVE_cc0
/* If I2 is the CC0 setter and I3 is the CC0 user then check whether
they are adjacent to each other or not. */
/* Only allow this combination if insn_rtx_costs reports that the
replacement instructions are cheaper than the originals. */
- if (!combine_validate_cost (i1, i2, i3, newpat, newi2pat))
+ if (!combine_validate_cost (i1, i2, i3, newpat, newi2pat, other_pat))
{
undo_all ();
return 0;
/* We now know that we can do this combination. Merge the insns and
update the status of registers and LOG_LINKS. */
+ if (undobuf.other_insn)
+ {
+ rtx note, next;
+
+ PATTERN (undobuf.other_insn) = other_pat;
+
+ /* If any of the notes in OTHER_INSN were REG_UNUSED, ensure that they
+ are still valid. Then add any non-duplicate notes added by
+ recog_for_combine. */
+ for (note = REG_NOTES (undobuf.other_insn); note; note = next)
+ {
+ next = XEXP (note, 1);
+
+ if (REG_NOTE_KIND (note) == REG_UNUSED
+ && ! reg_set_p (XEXP (note, 0), PATTERN (undobuf.other_insn)))
+ remove_note (undobuf.other_insn, note);
+ }
+
+ distribute_notes (new_other_notes, undobuf.other_insn,
+ undobuf.other_insn, NULL_RTX, NULL_RTX, NULL_RTX);
+ }
+
if (swap_i2i3)
{
rtx insn;
rtx ni2dest;
/* I3 now uses what used to be its destination and which is now
- I2's destination. This requires us to do a few adjustments. */
+ I2's destination. This requires us to do a few adjustments. */
PATTERN (i3) = newpat;
adjust_for_new_dest (i3);
/* We need a LOG_LINK from I3 to I2. But we used to have one,
- so we still will.
+ so we still will.
However, some later insn might be using I2's dest and have
a LOG_LINK pointing at I3. We must remove this link.
rtx i3links, i2links, i1links = 0;
rtx midnotes = 0;
unsigned int regno;
+ /* Compute which registers we expect to eliminate. newi2pat may be setting
+ either i3dest or i2dest, so we must check it. Also, i1dest may be the
+ same as i3dest, in which case newi2pat may be setting i1dest. */
+ rtx elim_i2 = ((newi2pat && reg_set_p (i2dest, newi2pat))
+ || i2dest_in_i2src || i2dest_in_i1src
+ || !i2dest_killed
+ ? 0 : i2dest);
+ rtx elim_i1 = (i1 == 0 || i1dest_in_i1src
+ || (newi2pat && reg_set_p (i1dest, newi2pat))
+ || !i1dest_killed
+ ? 0 : i1dest);
/* Get the old REG_NOTES and LOG_LINKS from all our insns and
clear them. */
if (i3_subst_into_i2)
{
for (i = 0; i < XVECLEN (PATTERN (i2), 0); i++)
- if (GET_CODE (XVECEXP (PATTERN (i2), 0, i)) != USE
+ if ((GET_CODE (XVECEXP (PATTERN (i2), 0, i)) == SET
+ || GET_CODE (XVECEXP (PATTERN (i2), 0, i)) == CLOBBER)
&& REG_P (SET_DEST (XVECEXP (PATTERN (i2), 0, i)))
&& SET_DEST (XVECEXP (PATTERN (i2), 0, i)) != i2dest
&& ! find_reg_note (i2, REG_UNUSED,
if (newi2pat)
{
- move_deaths (newi2pat, NULL_RTX, INSN_CUID (i1), i2, &midnotes);
- move_deaths (newpat, newi2pat, INSN_CUID (i1), i3, &midnotes);
+ move_deaths (newi2pat, NULL_RTX, DF_INSN_LUID (i1), i2, &midnotes);
+ move_deaths (newpat, newi2pat, DF_INSN_LUID (i1), i3, &midnotes);
}
else
- move_deaths (newpat, NULL_RTX, i1 ? INSN_CUID (i1) : INSN_CUID (i2),
+ move_deaths (newpat, NULL_RTX, i1 ? DF_INSN_LUID (i1) : DF_INSN_LUID (i2),
i3, &midnotes);
/* Distribute all the LOG_LINKS and REG_NOTES from I1, I2, and I3. */
if (i3notes)
- distribute_notes (i3notes, i3, i3, newi2pat ? i2 : NULL_RTX);
+ distribute_notes (i3notes, i3, i3, newi2pat ? i2 : NULL_RTX,
+ elim_i2, elim_i1);
if (i2notes)
- distribute_notes (i2notes, i2, i3, newi2pat ? i2 : NULL_RTX);
+ distribute_notes (i2notes, i2, i3, newi2pat ? i2 : NULL_RTX,
+ elim_i2, elim_i1);
if (i1notes)
- distribute_notes (i1notes, i1, i3, newi2pat ? i2 : NULL_RTX);
+ distribute_notes (i1notes, i1, i3, newi2pat ? i2 : NULL_RTX,
+ elim_i2, elim_i1);
if (midnotes)
- distribute_notes (midnotes, NULL_RTX, i3, newi2pat ? i2 : NULL_RTX);
+ distribute_notes (midnotes, NULL_RTX, i3, newi2pat ? i2 : NULL_RTX,
+ elim_i2, elim_i1);
/* Distribute any notes added to I2 or I3 by recog_for_combine. We
know these are REG_UNUSED and want them to go to the desired insn,
- so we always pass it as i3. We have not counted the notes in
- reg_n_deaths yet, so we need to do so now. */
+ so we always pass it as i3. */
if (newi2pat && new_i2_notes)
- {
- for (temp = new_i2_notes; temp; temp = XEXP (temp, 1))
- if (REG_P (XEXP (temp, 0)))
- REG_N_DEATHS (REGNO (XEXP (temp, 0)))++;
-
- distribute_notes (new_i2_notes, i2, i2, NULL_RTX);
- }
-
+ distribute_notes (new_i2_notes, i2, i2, NULL_RTX, NULL_RTX, NULL_RTX);
+
if (new_i3_notes)
- {
- for (temp = new_i3_notes; temp; temp = XEXP (temp, 1))
- if (REG_P (XEXP (temp, 0)))
- REG_N_DEATHS (REGNO (XEXP (temp, 0)))++;
-
- distribute_notes (new_i3_notes, i3, i3, NULL_RTX);
- }
+ distribute_notes (new_i3_notes, i3, i3, NULL_RTX, NULL_RTX, NULL_RTX);
/* If I3DEST was used in I3SRC, it really died in I3. We may need to
put a REG_DEAD note for it somewhere. If NEWI2PAT exists and sets
if (i3dest_killed)
{
- if (REG_P (i3dest_killed))
- REG_N_DEATHS (REGNO (i3dest_killed))++;
-
if (newi2pat && reg_set_p (i3dest_killed, newi2pat))
distribute_notes (gen_rtx_EXPR_LIST (REG_DEAD, i3dest_killed,
NULL_RTX),
- NULL_RTX, i2, NULL_RTX);
+ NULL_RTX, i2, NULL_RTX, elim_i2, elim_i1);
else
distribute_notes (gen_rtx_EXPR_LIST (REG_DEAD, i3dest_killed,
NULL_RTX),
- NULL_RTX, i3, newi2pat ? i2 : NULL_RTX);
+ NULL_RTX, i3, newi2pat ? i2 : NULL_RTX,
+ elim_i2, elim_i1);
}
if (i2dest_in_i2src)
{
- if (REG_P (i2dest))
- REG_N_DEATHS (REGNO (i2dest))++;
-
if (newi2pat && reg_set_p (i2dest, newi2pat))
distribute_notes (gen_rtx_EXPR_LIST (REG_DEAD, i2dest, NULL_RTX),
- NULL_RTX, i2, NULL_RTX);
+ NULL_RTX, i2, NULL_RTX, NULL_RTX, NULL_RTX);
else
distribute_notes (gen_rtx_EXPR_LIST (REG_DEAD, i2dest, NULL_RTX),
- NULL_RTX, i3, newi2pat ? i2 : NULL_RTX);
+ NULL_RTX, i3, newi2pat ? i2 : NULL_RTX,
+ NULL_RTX, NULL_RTX);
}
if (i1dest_in_i1src)
{
- if (REG_P (i1dest))
- REG_N_DEATHS (REGNO (i1dest))++;
-
if (newi2pat && reg_set_p (i1dest, newi2pat))
distribute_notes (gen_rtx_EXPR_LIST (REG_DEAD, i1dest, NULL_RTX),
- NULL_RTX, i2, NULL_RTX);
+ NULL_RTX, i2, NULL_RTX, NULL_RTX, NULL_RTX);
else
distribute_notes (gen_rtx_EXPR_LIST (REG_DEAD, i1dest, NULL_RTX),
- NULL_RTX, i3, newi2pat ? i2 : NULL_RTX);
+ NULL_RTX, i3, newi2pat ? i2 : NULL_RTX,
+ NULL_RTX, NULL_RTX);
}
distribute_links (i3links);
&& ! i2dest_in_i2src)
{
regno = REGNO (i2dest);
- REG_N_SETS (regno)--;
+ INC_REG_N_SETS (regno, -1);
}
}
regno = REGNO (i1dest);
if (! added_sets_1 && ! i1dest_in_i1src)
- REG_N_SETS (regno)--;
+ INC_REG_N_SETS (regno, -1);
}
/* Update reg_stat[].nonzero_bits et al for any changes that may have
&& SET_DEST (newpat) == pc_rtx)
*new_direct_jump_p = 1;
}
+
+ if (undobuf.other_insn != NULL_RTX)
+ {
+ if (dump_file)
+ {
+ fprintf (dump_file, "modifying other_insn ");
+ dump_insn_slim (dump_file, undobuf.other_insn);
+ }
+ df_insn_rescan (undobuf.other_insn);
+ }
+ if (i1 && !(NOTE_P(i1) && (NOTE_KIND (i1) == NOTE_INSN_DELETED)))
+ {
+ if (dump_file)
+ {
+ fprintf (dump_file, "modifying insn i1 ");
+ dump_insn_slim (dump_file, i1);
+ }
+ df_insn_rescan (i1);
+ }
+
+ if (i2 && !(NOTE_P(i2) && (NOTE_KIND (i2) == NOTE_INSN_DELETED)))
+ {
+ if (dump_file)
+ {
+ fprintf (dump_file, "modifying insn i2 ");
+ dump_insn_slim (dump_file, i2);
+ }
+ df_insn_rescan (i2);
+ }
+
+ if (i3 && !(NOTE_P(i3) && (NOTE_KIND (i3) == NOTE_INSN_DELETED)))
+ {
+ if (dump_file)
+ {
+ fprintf (dump_file, "modifying insn i3 ");
+ dump_insn_slim (dump_file, i3);
+ }
+ df_insn_rescan (i3);
+ }
+
combine_successes++;
undo_commit ();
if (added_links_insn
- && (newi2pat == 0 || INSN_CUID (added_links_insn) < INSN_CUID (i2))
- && INSN_CUID (added_links_insn) < INSN_CUID (i3))
+ && (newi2pat == 0 || DF_INSN_LUID (added_links_insn) < DF_INSN_LUID (i2))
+ && DF_INSN_LUID (added_links_insn) < DF_INSN_LUID (i3))
return added_links_insn;
else
return newi2pat ? i2 : i3;
for (undo = undobuf.undos; undo; undo = next)
{
next = undo->next;
- if (undo->is_int)
- *undo->where.i = undo->old_contents.i;
- else
- *undo->where.r = undo->old_contents.r;
+ switch (undo->kind)
+ {
+ case UNDO_RTX:
+ *undo->where.r = undo->old_contents.r;
+ break;
+ case UNDO_INT:
+ *undo->where.i = undo->old_contents.i;
+ break;
+ case UNDO_MODE:
+ PUT_MODE (*undo->where.r, undo->old_contents.m);
+ break;
+ default:
+ gcc_unreachable ();
+ }
undo->next = undobuf.frees;
undobuf.frees = undo;
}
undobuf.undos = 0;
}
-
\f
/* Find the innermost point within the rtx at LOC, possibly LOC itself,
where we have an arithmetic expression and return that point. LOC will
rtx dest = XEXP (SET_DEST (x), 0);
enum machine_mode mode = GET_MODE (dest);
unsigned HOST_WIDE_INT mask = ((HOST_WIDE_INT) 1 << len) - 1;
+ rtx or_mask;
if (BITS_BIG_ENDIAN)
pos = GET_MODE_BITSIZE (mode) - len - pos;
+ or_mask = gen_int_mode (src << pos, mode);
if (src == mask)
SUBST (SET_SRC (x),
- simplify_gen_binary (IOR, mode, dest, GEN_INT (src << pos)));
+ simplify_gen_binary (IOR, mode, dest, or_mask));
else
{
rtx negmask = gen_int_mode (~(mask << pos), mode);
SUBST (SET_SRC (x),
simplify_gen_binary (IOR, mode,
- simplify_gen_binary (AND, mode,
+ simplify_gen_binary (AND, mode,
dest, negmask),
- GEN_INT (src << pos)));
+ or_mask));
}
SUBST (SET_DEST (x), dest);
return (unique_copy && n_occurrences > 1 ? copy_rtx (to) : to);
}
- /* If X and FROM are the same register but different modes, they will
- not have been seen as equal above. However, flow.c will make a
- LOG_LINKS entry for that case. If we do nothing, we will try to
- rerecognize our original insn and, when it succeeds, we will
- delete the feeding insn, which is incorrect.
+ /* If X and FROM are the same register but different modes, they
+ will not have been seen as equal above. However, the log links code
+ will make a LOG_LINKS entry for that case. If we do nothing, we
+ will try to rerecognize our original insn and, when it succeeds,
+ we will delete the feeding insn, which is incorrect.
So force this insn not to match in this (rare) case. */
if (! in_dest && code == REG && REG_P (from)
- && REGNO (x) == REGNO (from))
+ && reg_overlap_mentioned_p (x, from))
return gen_rtx_CLOBBER (GET_MODE (x), const0_rtx);
/* If this is an object, we are done unless it is a MEM or LO_SUM, both
enum rtx_code code = GET_CODE (x);
enum machine_mode mode = GET_MODE (x);
rtx temp;
- rtx reversed;
int i;
/* If this is a commutative operation, put a constant last and a complex
SUBST (XEXP (x, 1), temp);
}
- /* If this is a PLUS, MINUS, or MULT, and the first operand is the
- sign extension of a PLUS with a constant, reverse the order of the sign
- extension and the addition. Note that this not the same as the original
- code, but overflow is undefined for signed values. Also note that the
- PLUS will have been partially moved "inside" the sign-extension, so that
- the first operand of X will really look like:
- (ashiftrt (plus (ashift A C4) C5) C4).
- We convert this to
- (plus (ashiftrt (ashift A C4) C2) C4)
- and replace the first operand of X with that expression. Later parts
- of this function may simplify the expression further.
-
- For example, if we start with (mult (sign_extend (plus A C1)) C2),
- we swap the SIGN_EXTEND and PLUS. Later code will apply the
- distributive law to produce (plus (mult (sign_extend X) C1) C3).
-
- We do this to simplify address expressions. */
-
- if ((code == PLUS || code == MINUS || code == MULT)
- && GET_CODE (XEXP (x, 0)) == ASHIFTRT
- && GET_CODE (XEXP (XEXP (x, 0), 0)) == PLUS
- && GET_CODE (XEXP (XEXP (XEXP (x, 0), 0), 0)) == ASHIFT
- && GET_CODE (XEXP (XEXP (XEXP (XEXP (x, 0), 0), 0), 1)) == CONST_INT
- && GET_CODE (XEXP (XEXP (x, 0), 1)) == CONST_INT
- && XEXP (XEXP (XEXP (XEXP (x, 0), 0), 0), 1) == XEXP (XEXP (x, 0), 1)
- && GET_CODE (XEXP (XEXP (XEXP (x, 0), 0), 1)) == CONST_INT
- && (temp = simplify_binary_operation (ASHIFTRT, mode,
- XEXP (XEXP (XEXP (x, 0), 0), 1),
- XEXP (XEXP (x, 0), 1))) != 0)
- {
- rtx new
- = simplify_shift_const (NULL_RTX, ASHIFT, mode,
- XEXP (XEXP (XEXP (XEXP (x, 0), 0), 0), 0),
- INTVAL (XEXP (XEXP (x, 0), 1)));
-
- new = simplify_shift_const (NULL_RTX, ASHIFTRT, mode, new,
- INTVAL (XEXP (XEXP (x, 0), 1)));
-
- SUBST (XEXP (x, 0), simplify_gen_binary (PLUS, mode, new, temp));
- }
-
/* If this is a simple operation applied to an IF_THEN_ELSE, try
applying it to the arms of the IF_THEN_ELSE. This often simplifies
things. Check for cases where both arms are testing the same
&& ! (GET_CODE (XEXP (x, 1)) == SUBREG
&& OBJECT_P (SUBREG_REG (XEXP (x, 1)))))))
|| (UNARY_P (x)
- && (!OBJECT_P (XEXP (x, 0))
+ && (!OBJECT_P (XEXP (x, 0))
&& ! (GET_CODE (XEXP (x, 0)) == SUBREG
&& OBJECT_P (SUBREG_REG (XEXP (x, 0)))))))
{
else if (true_rtx == const0_rtx && false_rtx == const_true_rtx
&& ((reversed = reversed_comparison_code_parts
(cond_code, cond, cop1, NULL))
- != UNKNOWN))
+ != UNKNOWN))
x = simplify_gen_relational (reversed, mode, VOIDmode,
cond, cop1);
&& true_rtx == const0_rtx
&& ((reversed = reversed_comparison_code_parts
(cond_code, cond, cop1, NULL))
- != UNKNOWN))
+ != UNKNOWN))
x = simplify_gen_unary (NEG, mode,
simplify_gen_relational (reversed,
mode, VOIDmode,
return gen_lowpart (mode, SUBREG_REG (x));
if (GET_MODE_CLASS (GET_MODE (SUBREG_REG (x))) == MODE_CC)
- break;
+ break;
{
rtx temp;
temp = simplify_subreg (mode, SUBREG_REG (x), op0_mode,
break;
- case NOT:
- if (GET_CODE (XEXP (x, 0)) == SUBREG
- && subreg_lowpart_p (XEXP (x, 0))
- && (GET_MODE_SIZE (GET_MODE (XEXP (x, 0)))
- < GET_MODE_SIZE (GET_MODE (SUBREG_REG (XEXP (x, 0)))))
- && GET_CODE (SUBREG_REG (XEXP (x, 0))) == ASHIFT
- && XEXP (SUBREG_REG (XEXP (x, 0)), 0) == const1_rtx)
- {
- enum machine_mode inner_mode = GET_MODE (SUBREG_REG (XEXP (x, 0)));
-
- x = gen_rtx_ROTATE (inner_mode,
- simplify_gen_unary (NOT, inner_mode, const1_rtx,
- inner_mode),
- XEXP (SUBREG_REG (XEXP (x, 0)), 1));
- return gen_lowpart (mode, x);
- }
-
- /* Apply De Morgan's laws to reduce number of patterns for machines
- with negating logical insns (and-not, nand, etc.). If result has
- only one NOT, put it first, since that is how the patterns are
- coded. */
-
- if (GET_CODE (XEXP (x, 0)) == IOR || GET_CODE (XEXP (x, 0)) == AND)
- {
- rtx in1 = XEXP (XEXP (x, 0), 0), in2 = XEXP (XEXP (x, 0), 1);
- enum machine_mode op_mode;
-
- op_mode = GET_MODE (in1);
- in1 = simplify_gen_unary (NOT, op_mode, in1, op_mode);
-
- op_mode = GET_MODE (in2);
- if (op_mode == VOIDmode)
- op_mode = mode;
- in2 = simplify_gen_unary (NOT, op_mode, in2, op_mode);
-
- if (GET_CODE (in2) == NOT && GET_CODE (in1) != NOT)
- {
- rtx tem = in2;
- in2 = in1; in1 = tem;
- }
-
- return gen_rtx_fmt_ee (GET_CODE (XEXP (x, 0)) == IOR ? AND : IOR,
- mode, in1, in2);
- }
- break;
-
case NEG:
- /* (neg (xor A 1)) is (plus A -1) if A is known to be either 0 or 1. */
- if (GET_CODE (XEXP (x, 0)) == XOR
- && XEXP (XEXP (x, 0), 1) == const1_rtx
- && nonzero_bits (XEXP (XEXP (x, 0), 0), mode) == 1)
- return simplify_gen_binary (PLUS, mode, XEXP (XEXP (x, 0), 0),
- constm1_rtx);
-
temp = expand_compound_operation (XEXP (x, 0));
/* For C equal to the width of MODE minus 1, (neg (ashiftrt X C)) can be
if (GET_CODE (temp) == ASHIFTRT
&& GET_CODE (XEXP (temp, 1)) == CONST_INT
&& INTVAL (XEXP (temp, 1)) == GET_MODE_BITSIZE (mode) - 1)
- return simplify_shift_const (temp, LSHIFTRT, mode, XEXP (temp, 0),
+ return simplify_shift_const (NULL_RTX, LSHIFTRT, mode, XEXP (temp, 0),
INTVAL (XEXP (temp, 1)));
/* If X has only a single bit that might be nonzero, say, bit I, convert
/* We can't handle truncation to a partial integer mode here
because we don't know the real bitsize of the partial
integer mode. */
- if (GET_MODE_CLASS (mode) == MODE_PARTIAL_INT)
- break;
-
- if (GET_MODE_BITSIZE (mode) <= HOST_BITS_PER_WIDE_INT
- && TRULY_NOOP_TRUNCATION (GET_MODE_BITSIZE (mode),
- GET_MODE_BITSIZE (GET_MODE (XEXP (x, 0)))))
- SUBST (XEXP (x, 0),
- force_to_mode (XEXP (x, 0), GET_MODE (XEXP (x, 0)),
- GET_MODE_MASK (mode), NULL_RTX, 0));
-
- /* (truncate:SI ({sign,zero}_extend:DI foo:SI)) == foo:SI. */
- if ((GET_CODE (XEXP (x, 0)) == SIGN_EXTEND
- || GET_CODE (XEXP (x, 0)) == ZERO_EXTEND)
- && GET_MODE (XEXP (XEXP (x, 0), 0)) == mode)
- return XEXP (XEXP (x, 0), 0);
-
- /* (truncate:SI (OP:DI ({sign,zero}_extend:DI foo:SI))) is
- (OP:SI foo:SI) if OP is NEG or ABS. */
- if ((GET_CODE (XEXP (x, 0)) == ABS
- || GET_CODE (XEXP (x, 0)) == NEG)
- && (GET_CODE (XEXP (XEXP (x, 0), 0)) == SIGN_EXTEND
- || GET_CODE (XEXP (XEXP (x, 0), 0)) == ZERO_EXTEND)
- && GET_MODE (XEXP (XEXP (XEXP (x, 0), 0), 0)) == mode)
- return simplify_gen_unary (GET_CODE (XEXP (x, 0)), mode,
- XEXP (XEXP (XEXP (x, 0), 0), 0), mode);
-
- /* (truncate:SI (subreg:DI (truncate:SI X) 0)) is
- (truncate:SI x). */
- if (GET_CODE (XEXP (x, 0)) == SUBREG
- && GET_CODE (SUBREG_REG (XEXP (x, 0))) == TRUNCATE
- && subreg_lowpart_p (XEXP (x, 0)))
- return SUBREG_REG (XEXP (x, 0));
-
- /* If we know that the value is already truncated, we can
- replace the TRUNCATE with a SUBREG if TRULY_NOOP_TRUNCATION
- is nonzero for the corresponding modes. But don't do this
- for an (LSHIFTRT (MULT ...)) since this will cause problems
- with the umulXi3_highpart patterns. */
- if (TRULY_NOOP_TRUNCATION (GET_MODE_BITSIZE (mode),
- GET_MODE_BITSIZE (GET_MODE (XEXP (x, 0))))
- && num_sign_bit_copies (XEXP (x, 0), GET_MODE (XEXP (x, 0)))
- >= (unsigned int) (GET_MODE_BITSIZE (mode) + 1)
- && ! (GET_CODE (XEXP (x, 0)) == LSHIFTRT
- && GET_CODE (XEXP (XEXP (x, 0), 0)) == MULT))
- return gen_lowpart (mode, XEXP (x, 0));
+ if (GET_MODE_CLASS (mode) == MODE_PARTIAL_INT)
+ break;
- /* A truncate of a comparison can be replaced with a subreg if
- STORE_FLAG_VALUE permits. This is like the previous test,
- but it works even if the comparison is done in a mode larger
- than HOST_BITS_PER_WIDE_INT. */
if (GET_MODE_BITSIZE (mode) <= HOST_BITS_PER_WIDE_INT
- && COMPARISON_P (XEXP (x, 0))
- && ((HOST_WIDE_INT) STORE_FLAG_VALUE & ~GET_MODE_MASK (mode)) == 0)
- return gen_lowpart (mode, XEXP (x, 0));
+ && TRULY_NOOP_TRUNCATION (GET_MODE_BITSIZE (mode),
+ GET_MODE_BITSIZE (GET_MODE (XEXP (x, 0)))))
+ SUBST (XEXP (x, 0),
+ force_to_mode (XEXP (x, 0), GET_MODE (XEXP (x, 0)),
+ GET_MODE_MASK (mode), 0));
- /* Similarly, a truncate of a register whose value is a
- comparison can be replaced with a subreg if STORE_FLAG_VALUE
- permits. */
+ /* Similarly to what we do in simplify-rtx.c, a truncate of a register
+ whose value is a comparison can be replaced with a subreg if
+ STORE_FLAG_VALUE permits. */
if (GET_MODE_BITSIZE (mode) <= HOST_BITS_PER_WIDE_INT
&& ((HOST_WIDE_INT) STORE_FLAG_VALUE & ~GET_MODE_MASK (mode)) == 0
&& (temp = get_last_value (XEXP (x, 0)))
&& COMPARISON_P (temp))
return gen_lowpart (mode, XEXP (x, 0));
-
break;
- case FLOAT_TRUNCATE:
- /* (float_truncate:SF (float_extend:DF foo:SF)) = foo:SF. */
- if (GET_CODE (XEXP (x, 0)) == FLOAT_EXTEND
- && GET_MODE (XEXP (XEXP (x, 0), 0)) == mode)
- return XEXP (XEXP (x, 0), 0);
-
- /* (float_truncate:SF (float_truncate:DF foo:XF))
- = (float_truncate:SF foo:XF).
- This may eliminate double rounding, so it is unsafe.
-
- (float_truncate:SF (float_extend:XF foo:DF))
- = (float_truncate:SF foo:DF).
-
- (float_truncate:DF (float_extend:XF foo:SF))
- = (float_extend:SF foo:DF). */
- if ((GET_CODE (XEXP (x, 0)) == FLOAT_TRUNCATE
- && flag_unsafe_math_optimizations)
- || GET_CODE (XEXP (x, 0)) == FLOAT_EXTEND)
- return simplify_gen_unary (GET_MODE_SIZE (GET_MODE (XEXP (XEXP (x, 0),
- 0)))
- > GET_MODE_SIZE (mode)
- ? FLOAT_TRUNCATE : FLOAT_EXTEND,
- mode,
- XEXP (XEXP (x, 0), 0), mode);
-
- /* (float_truncate (float x)) is (float x) */
- if (GET_CODE (XEXP (x, 0)) == FLOAT
- && (flag_unsafe_math_optimizations
- || ((unsigned)significand_size (GET_MODE (XEXP (x, 0)))
- >= (GET_MODE_BITSIZE (GET_MODE (XEXP (XEXP (x, 0), 0)))
- - num_sign_bit_copies (XEXP (XEXP (x, 0), 0),
- GET_MODE (XEXP (XEXP (x, 0), 0)))))))
- return simplify_gen_unary (FLOAT, mode,
- XEXP (XEXP (x, 0), 0),
- GET_MODE (XEXP (XEXP (x, 0), 0)));
-
- /* (float_truncate:SF (OP:DF (float_extend:DF foo:sf))) is
- (OP:SF foo:SF) if OP is NEG or ABS. */
- if ((GET_CODE (XEXP (x, 0)) == ABS
- || GET_CODE (XEXP (x, 0)) == NEG)
- && GET_CODE (XEXP (XEXP (x, 0), 0)) == FLOAT_EXTEND
- && GET_MODE (XEXP (XEXP (XEXP (x, 0), 0), 0)) == mode)
- return simplify_gen_unary (GET_CODE (XEXP (x, 0)), mode,
- XEXP (XEXP (XEXP (x, 0), 0), 0), mode);
-
- /* (float_truncate:SF (subreg:DF (float_truncate:SF X) 0))
- is (float_truncate:SF x). */
- if (GET_CODE (XEXP (x, 0)) == SUBREG
- && subreg_lowpart_p (XEXP (x, 0))
- && GET_CODE (SUBREG_REG (XEXP (x, 0))) == FLOAT_TRUNCATE)
- return SUBREG_REG (XEXP (x, 0));
- break;
- case FLOAT_EXTEND:
- /* (float_extend (float_extend x)) is (float_extend x)
-
- (float_extend (float x)) is (float x) assuming that double
- rounding can't happen.
- */
- if (GET_CODE (XEXP (x, 0)) == FLOAT_EXTEND
- || (GET_CODE (XEXP (x, 0)) == FLOAT
- && ((unsigned)significand_size (GET_MODE (XEXP (x, 0)))
- >= (GET_MODE_BITSIZE (GET_MODE (XEXP (XEXP (x, 0), 0)))
- - num_sign_bit_copies (XEXP (XEXP (x, 0), 0),
- GET_MODE (XEXP (XEXP (x, 0), 0)))))))
- return simplify_gen_unary (GET_CODE (XEXP (x, 0)), mode,
- XEXP (XEXP (x, 0), 0),
- GET_MODE (XEXP (XEXP (x, 0), 0)));
-
- break;
#ifdef HAVE_cc0
case COMPARE:
/* Convert (compare FOO (const_int 0)) to FOO unless we aren't
#endif
case PLUS:
- /* Canonicalize (plus (mult (neg B) C) A) to (minus A (mult B C)).
- */
- if (GET_CODE (XEXP (x, 0)) == MULT
- && GET_CODE (XEXP (XEXP (x, 0), 0)) == NEG)
- {
- rtx in1, in2;
-
- in1 = XEXP (XEXP (XEXP (x, 0), 0), 0);
- in2 = XEXP (XEXP (x, 0), 1);
- return simplify_gen_binary (MINUS, mode, XEXP (x, 1),
- simplify_gen_binary (MULT, mode,
- in1, in2));
- }
-
- /* If we have (plus (plus (A const) B)), associate it so that CONST is
- outermost. That's because that's the way indexed addresses are
- supposed to appear. This code used to check many more cases, but
- they are now checked elsewhere. */
- if (GET_CODE (XEXP (x, 0)) == PLUS
- && CONSTANT_ADDRESS_P (XEXP (XEXP (x, 0), 1)))
- return simplify_gen_binary (PLUS, mode,
- simplify_gen_binary (PLUS, mode,
- XEXP (XEXP (x, 0), 0),
- XEXP (x, 1)),
- XEXP (XEXP (x, 0), 1));
-
/* (plus (xor (and <foo> (const_int pow2 - 1)) <c>) <-c>)
when c is (const_int (pow2 + 1) / 2) is a sign extension of a
bit-field and can be replaced by either a sign_extend or a
GET_MODE_BITSIZE (mode) - (i + 1)),
GET_MODE_BITSIZE (mode) - (i + 1));
- /* (plus (comparison A B) C) can become (neg (rev-comp A B)) if
- C is 1 and STORE_FLAG_VALUE is -1 or if C is -1 and STORE_FLAG_VALUE
- is 1. This produces better code than the alternative immediately
- below. */
- if (COMPARISON_P (XEXP (x, 0))
- && ((STORE_FLAG_VALUE == -1 && XEXP (x, 1) == const1_rtx)
- || (STORE_FLAG_VALUE == 1 && XEXP (x, 1) == constm1_rtx))
- && (reversed = reversed_comparison (XEXP (x, 0), mode)))
- return
- simplify_gen_unary (NEG, mode, reversed, mode);
-
/* If only the low-order bit of X is possibly nonzero, (plus x -1)
can become (ashiftrt (ashift (xor x 1) C) C) where C is
the bitsize of the mode - 1. This allows simplification of
break;
case MINUS:
- /* If STORE_FLAG_VALUE is 1, (minus 1 (comparison foo bar)) can be done
- by reversing the comparison code if valid. */
- if (STORE_FLAG_VALUE == 1
- && XEXP (x, 0) == const1_rtx
- && COMPARISON_P (XEXP (x, 1))
- && (reversed = reversed_comparison (XEXP (x, 1), mode)))
- return reversed;
-
/* (minus <foo> (and <foo> (const_int -pow2))) becomes
(and <foo> (const_int pow2-1)) */
if (GET_CODE (XEXP (x, 1)) == AND
&& rtx_equal_p (XEXP (XEXP (x, 1), 0), XEXP (x, 0)))
return simplify_and_const_int (NULL_RTX, mode, XEXP (x, 0),
-INTVAL (XEXP (XEXP (x, 1), 1)) - 1);
-
- /* Canonicalize (minus A (mult (neg B) C)) to (plus (mult B C) A).
- */
- if (GET_CODE (XEXP (x, 1)) == MULT
- && GET_CODE (XEXP (XEXP (x, 1), 0)) == NEG)
- {
- rtx in1, in2;
-
- in1 = XEXP (XEXP (XEXP (x, 1), 0), 0);
- in2 = XEXP (XEXP (x, 1), 1);
- return simplify_gen_binary (PLUS, mode,
- simplify_gen_binary (MULT, mode,
- in1, in2),
- XEXP (x, 0));
- }
-
- /* Canonicalize (minus (neg A) (mult B C)) to
- (minus (mult (neg B) C) A). */
- if (GET_CODE (XEXP (x, 1)) == MULT
- && GET_CODE (XEXP (x, 0)) == NEG)
- {
- rtx in1, in2;
-
- in1 = simplify_gen_unary (NEG, mode, XEXP (XEXP (x, 1), 0), mode);
- in2 = XEXP (XEXP (x, 1), 1);
- return simplify_gen_binary (MINUS, mode,
- simplify_gen_binary (MULT, mode,
- in1, in2),
- XEXP (XEXP (x, 0), 0));
- }
-
- /* Canonicalize (minus A (plus B C)) to (minus (minus A B) C) for
- integers. */
- if (GET_CODE (XEXP (x, 1)) == PLUS && INTEGRAL_MODE_P (mode))
- return simplify_gen_binary (MINUS, mode,
- simplify_gen_binary (MINUS, mode,
- XEXP (x, 0),
- XEXP (XEXP (x, 1), 0)),
- XEXP (XEXP (x, 1), 1));
break;
case MULT:
case AND:
case IOR:
- case XOR:
return simplify_logical (x);
- case ABS:
- /* (abs (neg <foo>)) -> (abs <foo>) */
- if (GET_CODE (XEXP (x, 0)) == NEG)
- SUBST (XEXP (x, 0), XEXP (XEXP (x, 0), 0));
-
- /* If the mode of the operand is VOIDmode (i.e. if it is ASM_OPERANDS),
- do nothing. */
- if (GET_MODE (XEXP (x, 0)) == VOIDmode)
- break;
-
- /* If operand is something known to be positive, ignore the ABS. */
- if (GET_CODE (XEXP (x, 0)) == FFS || GET_CODE (XEXP (x, 0)) == ABS
- || ((GET_MODE_BITSIZE (GET_MODE (XEXP (x, 0)))
- <= HOST_BITS_PER_WIDE_INT)
- && ((nonzero_bits (XEXP (x, 0), GET_MODE (XEXP (x, 0)))
- & ((HOST_WIDE_INT) 1
- << (GET_MODE_BITSIZE (GET_MODE (XEXP (x, 0))) - 1)))
- == 0)))
- return XEXP (x, 0);
-
- /* If operand is known to be only -1 or 0, convert ABS to NEG. */
- if (num_sign_bit_copies (XEXP (x, 0), mode) == GET_MODE_BITSIZE (mode))
- return gen_rtx_NEG (mode, XEXP (x, 0));
-
- break;
-
- case FFS:
- /* (ffs (*_extend <X>)) = (ffs <X>) */
- if (GET_CODE (XEXP (x, 0)) == SIGN_EXTEND
- || GET_CODE (XEXP (x, 0)) == ZERO_EXTEND)
- SUBST (XEXP (x, 0), XEXP (XEXP (x, 0), 0));
- break;
-
- case POPCOUNT:
- case PARITY:
- /* (pop* (zero_extend <X>)) = (pop* <X>) */
- if (GET_CODE (XEXP (x, 0)) == ZERO_EXTEND)
- SUBST (XEXP (x, 0), XEXP (XEXP (x, 0), 0));
- break;
-
- case FLOAT:
- /* (float (sign_extend <X>)) = (float <X>). */
- if (GET_CODE (XEXP (x, 0)) == SIGN_EXTEND)
- SUBST (XEXP (x, 0), XEXP (XEXP (x, 0), 0));
- break;
-
case ASHIFT:
case LSHIFTRT:
case ASHIFTRT:
((HOST_WIDE_INT) 1
<< exact_log2 (GET_MODE_BITSIZE (GET_MODE (x))))
- 1,
- NULL_RTX, 0));
- break;
-
- case VEC_SELECT:
- {
- rtx op0 = XEXP (x, 0);
- rtx op1 = XEXP (x, 1);
- int len;
-
- gcc_assert (GET_CODE (op1) == PARALLEL);
- len = XVECLEN (op1, 0);
- if (len == 1
- && GET_CODE (XVECEXP (op1, 0, 0)) == CONST_INT
- && GET_CODE (op0) == VEC_CONCAT)
- {
- int offset = INTVAL (XVECEXP (op1, 0, 0)) * GET_MODE_SIZE (GET_MODE (x));
-
- /* Try to find the element in the VEC_CONCAT. */
- for (;;)
- {
- if (GET_MODE (op0) == GET_MODE (x))
- return op0;
- if (GET_CODE (op0) == VEC_CONCAT)
- {
- HOST_WIDE_INT op0_size = GET_MODE_SIZE (GET_MODE (XEXP (op0, 0)));
- if (op0_size < offset)
- op0 = XEXP (op0, 0);
- else
- {
- offset -= op0_size;
- op0 = XEXP (op0, 1);
- }
- }
- else
- break;
- }
- }
- }
-
+ 0));
break;
default:
if (true_code == EQ && true_val == const0_rtx
&& exact_log2 (nzb = nonzero_bits (from, GET_MODE (from))) >= 0)
- false_code = EQ, false_val = GEN_INT (nzb);
+ {
+ false_code = EQ;
+ false_val = GEN_INT (trunc_int_for_mode (nzb, GET_MODE (from)));
+ }
else if (true_code == EQ && true_val == const0_rtx
&& (num_sign_bit_copies (from, GET_MODE (from))
== GET_MODE_BITSIZE (GET_MODE (from))))
- false_code = EQ, false_val = constm1_rtx;
+ {
+ false_code = EQ;
+ false_val = constm1_rtx;
+ }
/* Now simplify an arm if we know the value of the register in the
branch and it is used in the arm. Be careful due to the potential
if (GET_MODE_CLASS (mode) == MODE_INT
&& GET_MODE_BITSIZE (mode) <= HOST_BITS_PER_WIDE_INT)
{
- src = force_to_mode (src, mode, ~(HOST_WIDE_INT) 0, NULL_RTX, 0);
+ src = force_to_mode (src, mode, ~(HOST_WIDE_INT) 0, 0);
SUBST (SET_SRC (x), src);
}
tmp = simplify_relational_operation (old_code, compare_mode, VOIDmode,
op0, op1);
if (!tmp)
- new_code = old_code;
+ new_code = old_code;
else if (!CONSTANT_P (tmp))
- {
- new_code = GET_CODE (tmp);
- op0 = XEXP (tmp, 0);
- op1 = XEXP (tmp, 1);
- }
+ {
+ new_code = GET_CODE (tmp);
+ op0 = XEXP (tmp, 0);
+ op1 = XEXP (tmp, 1);
+ }
else
{
rtx pat = PATTERN (other_insn);
which case we can safely change its mode. */
if (compare_mode != GET_MODE (dest))
{
- unsigned int regno = REGNO (dest);
- rtx new_dest = gen_rtx_REG (compare_mode, regno);
-
- if (regno < FIRST_PSEUDO_REGISTER
- || (REG_N_SETS (regno) == 1 && ! REG_USERVAR_P (dest)))
+ if (can_change_dest_mode (dest, 0, compare_mode))
{
- if (regno >= FIRST_PSEUDO_REGISTER)
- SUBST (regno_reg_rtx[regno], new_dest);
+ unsigned int regno = REGNO (dest);
+ rtx new_dest;
+
+ if (regno < FIRST_PSEUDO_REGISTER)
+ new_dest = gen_rtx_REG (compare_mode, regno);
+ else
+ {
+ SUBST_MODE (regno_reg_rtx[regno], compare_mode);
+ new_dest = regno_reg_rtx[regno];
+ }
SUBST (SET_DEST (x), new_dest);
SUBST (XEXP (*cc_use, 0), new_dest);
SUBST (SET_SRC (x), gen_rtx_COMPARE (compare_mode, op0, op1));
src = SET_SRC (x);
}
- else
+ else if (GET_MODE (op0) == compare_mode && op1 == const0_rtx)
+ {
+ SUBST (SET_SRC (x), op0);
+ src = SET_SRC (x);
+ }
+ /* Otherwise, update the COMPARE if needed. */
+ else if (XEXP (src, 0) != op0 || XEXP (src, 1) != op1)
{
- /* Otherwise, update the COMPARE if needed. */
- SUBST (XEXP (src, 0), op0);
- SUBST (XEXP (src, 1), op1);
+ SUBST (SET_SRC (x), gen_rtx_COMPARE (compare_mode, op0, op1));
+ src = SET_SRC (x);
}
}
else
+ (UNITS_PER_WORD - 1)) / UNITS_PER_WORD))
#ifndef WORD_REGISTER_OPERATIONS
&& (GET_MODE_SIZE (GET_MODE (src))
- < GET_MODE_SIZE (GET_MODE (SUBREG_REG (src))))
+ < GET_MODE_SIZE (GET_MODE (SUBREG_REG (src))))
#endif
#ifdef CANNOT_CHANGE_MODE_CLASS
&& ! (REG_P (dest) && REGNO (dest) < FIRST_PSEUDO_REGISTER
enum machine_mode mode = GET_MODE (x);
rtx op0 = XEXP (x, 0);
rtx op1 = XEXP (x, 1);
- rtx reversed;
switch (GET_CODE (x))
{
case AND:
- /* Convert (A ^ B) & A to A & (~B) since the latter is often a single
- insn (and may simplify more). */
- if (GET_CODE (op0) == XOR
- && rtx_equal_p (XEXP (op0, 0), op1)
- && ! side_effects_p (op1))
- x = simplify_gen_binary (AND, mode,
- simplify_gen_unary (NOT, mode,
- XEXP (op0, 1), mode),
- op1);
-
- if (GET_CODE (op0) == XOR
- && rtx_equal_p (XEXP (op0, 1), op1)
- && ! side_effects_p (op1))
- x = simplify_gen_binary (AND, mode,
- simplify_gen_unary (NOT, mode,
- XEXP (op0, 0), mode),
- op1);
-
- /* Similarly for (~(A ^ B)) & A. */
- if (GET_CODE (op0) == NOT
- && GET_CODE (XEXP (op0, 0)) == XOR
- && rtx_equal_p (XEXP (XEXP (op0, 0), 0), op1)
- && ! side_effects_p (op1))
- x = simplify_gen_binary (AND, mode, XEXP (XEXP (op0, 0), 1), op1);
-
- if (GET_CODE (op0) == NOT
- && GET_CODE (XEXP (op0, 0)) == XOR
- && rtx_equal_p (XEXP (XEXP (op0, 0), 1), op1)
- && ! side_effects_p (op1))
- x = simplify_gen_binary (AND, mode, XEXP (XEXP (op0, 0), 0), op1);
-
/* We can call simplify_and_const_int only if we don't lose
any (sign) bits when converting INTVAL (op1) to
"unsigned HOST_WIDE_INT". */
|| INTVAL (op1) > 0))
{
x = simplify_and_const_int (x, mode, op0, INTVAL (op1));
-
- /* If we have (ior (and (X C1) C2)) and the next restart would be
- the last, simplify this by making C1 as small as possible
- and then exit. Only do this if C1 actually changes: for now
- this only saves memory but, should this transformation be
- moved to simplify-rtx.c, we'd risk unbounded recursion there. */
- if (GET_CODE (x) == IOR && GET_CODE (op0) == AND
- && GET_CODE (XEXP (op0, 1)) == CONST_INT
- && GET_CODE (op1) == CONST_INT
- && (INTVAL (XEXP (op0, 1)) & INTVAL (op1)) != 0)
- return simplify_gen_binary (IOR, mode,
- simplify_gen_binary
- (AND, mode, XEXP (op0, 0),
- GEN_INT (INTVAL (XEXP (op0, 1))
- & ~INTVAL (op1))), op1);
-
if (GET_CODE (x) != AND)
return x;
op1 = XEXP (x, 1);
}
- /* Convert (A | B) & A to A. */
- if (GET_CODE (op0) == IOR
- && (rtx_equal_p (XEXP (op0, 0), op1)
- || rtx_equal_p (XEXP (op0, 1), op1))
- && ! side_effects_p (XEXP (op0, 0))
- && ! side_effects_p (XEXP (op0, 1)))
- return op1;
-
/* If we have any of (and (ior A B) C) or (and (xor A B) C),
apply the distributive law and then the inverse distributive
law to see if things simplify. */
break;
case IOR:
- /* (ior A C) is C if all bits of A that might be nonzero are on in C. */
- if (GET_CODE (op1) == CONST_INT
- && GET_MODE_BITSIZE (mode) <= HOST_BITS_PER_WIDE_INT
- && (nonzero_bits (op0, mode) & ~INTVAL (op1)) == 0)
- return op1;
-
- /* Convert (A & B) | A to A. */
- if (GET_CODE (op0) == AND
- && (rtx_equal_p (XEXP (op0, 0), op1)
- || rtx_equal_p (XEXP (op0, 1), op1))
- && ! side_effects_p (XEXP (op0, 0))
- && ! side_effects_p (XEXP (op0, 1)))
- return op1;
-
/* If we have (ior (and A B) C), apply the distributive law and then
the inverse distributive law to see if things simplify. */
if (result)
return result;
}
-
- /* Convert (ior (ashift A CX) (lshiftrt A CY)) where CX+CY equals the
- mode size to (rotate A CX). */
-
- if (((GET_CODE (op0) == ASHIFT && GET_CODE (op1) == LSHIFTRT)
- || (GET_CODE (op1) == ASHIFT && GET_CODE (op0) == LSHIFTRT))
- && rtx_equal_p (XEXP (op0, 0), XEXP (op1, 0))
- && GET_CODE (XEXP (op0, 1)) == CONST_INT
- && GET_CODE (XEXP (op1, 1)) == CONST_INT
- && (INTVAL (XEXP (op0, 1)) + INTVAL (XEXP (op1, 1))
- == GET_MODE_BITSIZE (mode)))
- return gen_rtx_ROTATE (mode, XEXP (op0, 0),
- (GET_CODE (op0) == ASHIFT
- ? XEXP (op0, 1) : XEXP (op1, 1)));
-
- /* If OP0 is (ashiftrt (plus ...) C), it might actually be
- a (sign_extend (plus ...)). If so, OP1 is a CONST_INT, and the PLUS
- does not affect any of the bits in OP1, it can really be done
- as a PLUS and we can associate. We do this by seeing if OP1
- can be safely shifted left C bits. */
- if (GET_CODE (op1) == CONST_INT && GET_CODE (op0) == ASHIFTRT
- && GET_CODE (XEXP (op0, 0)) == PLUS
- && GET_CODE (XEXP (XEXP (op0, 0), 1)) == CONST_INT
- && GET_CODE (XEXP (op0, 1)) == CONST_INT
- && INTVAL (XEXP (op0, 1)) < HOST_BITS_PER_WIDE_INT)
- {
- int count = INTVAL (XEXP (op0, 1));
- HOST_WIDE_INT mask = INTVAL (op1) << count;
-
- if (mask >> count == INTVAL (op1)
- && (mask & nonzero_bits (XEXP (op0, 0), mode)) == 0)
- {
- SUBST (XEXP (XEXP (op0, 0), 1),
- GEN_INT (INTVAL (XEXP (XEXP (op0, 0), 1)) | mask));
- return op0;
- }
- }
- break;
-
- case XOR:
- /* If we are XORing two things that have no bits in common,
- convert them into an IOR. This helps to detect rotation encoded
- using those methods and possibly other simplifications. */
-
- if (GET_MODE_BITSIZE (mode) <= HOST_BITS_PER_WIDE_INT
- && (nonzero_bits (op0, mode)
- & nonzero_bits (op1, mode)) == 0)
- return (simplify_gen_binary (IOR, mode, op0, op1));
-
- /* Convert (XOR (NOT x) (NOT y)) to (XOR x y).
- Also convert (XOR (NOT x) y) to (NOT (XOR x y)), similarly for
- (NOT y). */
- {
- int num_negated = 0;
-
- if (GET_CODE (op0) == NOT)
- num_negated++, op0 = XEXP (op0, 0);
- if (GET_CODE (op1) == NOT)
- num_negated++, op1 = XEXP (op1, 0);
-
- if (num_negated == 2)
- {
- SUBST (XEXP (x, 0), op0);
- SUBST (XEXP (x, 1), op1);
- }
- else if (num_negated == 1)
- return
- simplify_gen_unary (NOT, mode,
- simplify_gen_binary (XOR, mode, op0, op1),
- mode);
- }
-
- /* Convert (xor (and A B) B) to (and (not A) B). The latter may
- correspond to a machine insn or result in further simplifications
- if B is a constant. */
-
- if (GET_CODE (op0) == AND
- && rtx_equal_p (XEXP (op0, 1), op1)
- && ! side_effects_p (op1))
- return simplify_gen_binary (AND, mode,
- simplify_gen_unary (NOT, mode,
- XEXP (op0, 0), mode),
- op1);
-
- else if (GET_CODE (op0) == AND
- && rtx_equal_p (XEXP (op0, 0), op1)
- && ! side_effects_p (op1))
- return simplify_gen_binary (AND, mode,
- simplify_gen_unary (NOT, mode,
- XEXP (op0, 1), mode),
- op1);
-
- /* (xor (comparison foo bar) (const_int 1)) can become the reversed
- comparison if STORE_FLAG_VALUE is 1. */
- if (STORE_FLAG_VALUE == 1
- && op1 == const1_rtx
- && COMPARISON_P (op0)
- && (reversed = reversed_comparison (op0, mode)))
- return reversed;
-
- /* (lshiftrt foo C) where C is the number of bits in FOO minus 1
- is (lt foo (const_int 0)), so we can perform the above
- simplification if STORE_FLAG_VALUE is 1. */
-
- if (STORE_FLAG_VALUE == 1
- && op1 == const1_rtx
- && GET_CODE (op0) == LSHIFTRT
- && GET_CODE (XEXP (op0, 1)) == CONST_INT
- && INTVAL (XEXP (op0, 1)) == GET_MODE_BITSIZE (mode) - 1)
- return gen_rtx_GE (mode, XEXP (op0, 0), const0_rtx);
-
- /* (xor (comparison foo bar) (const_int sign-bit))
- when STORE_FLAG_VALUE is the sign bit. */
- if (GET_MODE_BITSIZE (mode) <= HOST_BITS_PER_WIDE_INT
- && ((STORE_FLAG_VALUE & GET_MODE_MASK (mode))
- == (unsigned HOST_WIDE_INT) 1 << (GET_MODE_BITSIZE (mode) - 1))
- && op1 == const_true_rtx
- && COMPARISON_P (op0)
- && (reversed = reversed_comparison (op0, mode)))
- return reversed;
-
break;
default:
len = INTVAL (XEXP (x, 1));
pos = INTVAL (XEXP (x, 2));
- /* If this goes outside the object being extracted, replace the object
- with a (use (mem ...)) construct that only combine understands
- and is used only for this purpose. */
+ /* This should stay within the object being extracted, fail otherwise. */
if (len + pos > GET_MODE_BITSIZE (GET_MODE (XEXP (x, 0))))
- SUBST (XEXP (x, 0), gen_rtx_USE (GET_MODE (x), XEXP (x, 0)));
+ return x;
if (BITS_BIG_ENDIAN)
pos = GET_MODE_BITSIZE (GET_MODE (XEXP (x, 0))) - len - pos;
if (GET_CODE (x) == ZERO_EXTEND)
{
/* (zero_extend:DI (truncate:SI foo:DI)) is just foo:DI if we
- know that the last value didn't have any inappropriate bits
- set. */
+ know that the last value didn't have any inappropriate bits
+ set. */
if (GET_CODE (XEXP (x, 0)) == TRUNCATE
&& GET_MODE (XEXP (XEXP (x, 0), 0)) == GET_MODE (x)
&& GET_MODE_BITSIZE (GET_MODE (x)) <= HOST_BITS_PER_WIDE_INT
return SUBREG_REG (XEXP (x, 0));
/* (zero_extend:DI (truncate:SI foo:DI)) is just foo:DI when foo
- is a comparison and STORE_FLAG_VALUE permits. This is like
- the first case, but it works even when GET_MODE (x) is larger
- than HOST_WIDE_INT. */
+ is a comparison and STORE_FLAG_VALUE permits. This is like
+ the first case, but it works even when GET_MODE (x) is larger
+ than HOST_WIDE_INT. */
if (GET_CODE (XEXP (x, 0)) == TRUNCATE
&& GET_MODE (XEXP (XEXP (x, 0), 0)) == GET_MODE (x)
&& COMPARISON_P (XEXP (XEXP (x, 0), 0))
modewidth = GET_MODE_BITSIZE (GET_MODE (x));
if (modewidth + len >= pos)
- tem = simplify_shift_const (NULL_RTX, unsignedp ? LSHIFTRT : ASHIFTRT,
- GET_MODE (x),
- simplify_shift_const (NULL_RTX, ASHIFT,
- GET_MODE (x),
- XEXP (x, 0),
- modewidth - pos - len),
- modewidth - len);
-
+ {
+ enum machine_mode mode = GET_MODE (x);
+ tem = gen_lowpart (mode, XEXP (x, 0));
+ if (!tem || GET_CODE (tem) == CLOBBER)
+ return x;
+ tem = simplify_shift_const (NULL_RTX, ASHIFT, mode,
+ tem, modewidth - pos - len);
+ tem = simplify_shift_const (NULL_RTX, unsignedp ? LSHIFTRT : ASHIFTRT,
+ mode, tem, modewidth - len);
+ }
else if (unsignedp && len < HOST_BITS_PER_WIDE_INT)
tem = simplify_and_const_int (NULL_RTX, GET_MODE (x),
simplify_shift_const (NULL_RTX, LSHIFTRT,
len = INTVAL (XEXP (SET_DEST (x), 1));
pos = XEXP (SET_DEST (x), 2);
- /* If the position is constant and spans the width of INNER,
- surround INNER with a USE to indicate this. */
+ /* A constant position should stay within the width of INNER. */
if (GET_CODE (pos) == CONST_INT
&& INTVAL (pos) + len > GET_MODE_BITSIZE (GET_MODE (inner)))
- inner = gen_rtx_USE (GET_MODE (SET_DEST (x)), inner);
+ break;
if (BITS_BIG_ENDIAN)
{
it is an RTX that represents a variable starting position; otherwise,
POS is the (constant) starting bit position (counted from the LSB).
- INNER may be a USE. This will occur when we started with a bitfield
- that went outside the boundary of the object in memory, which is
- allowed on most machines. To isolate this case, we produce a USE
- whose mode is wide enough and surround the MEM with it. The only
- code that understands the USE is this routine. If it is not removed,
- it will cause the resulting insn not to match.
-
UNSIGNEDP is nonzero for an unsigned reference and zero for a
signed reference.
ignore the POS lowest bits, etc. */
enum machine_mode is_mode = GET_MODE (inner);
enum machine_mode inner_mode;
- enum machine_mode wanted_inner_mode = byte_mode;
+ enum machine_mode wanted_inner_mode;
enum machine_mode wanted_inner_reg_mode = word_mode;
enum machine_mode pos_mode = word_mode;
enum machine_mode extraction_mode = word_mode;
enum machine_mode tmode = mode_for_size (len, MODE_INT, 1);
- int spans_byte = 0;
rtx new = 0;
rtx orig_pos_rtx = pos_rtx;
HOST_WIDE_INT orig_pos;
- /* Get some information about INNER and get the innermost object. */
- if (GET_CODE (inner) == USE)
- /* (use:SI (mem:QI foo)) stands for (mem:SI foo). */
- /* We don't need to adjust the position because we set up the USE
- to pretend that it was a full-word object. */
- spans_byte = 1, inner = XEXP (inner, 0);
- else if (GET_CODE (inner) == SUBREG && subreg_lowpart_p (inner))
+ if (GET_CODE (inner) == SUBREG && subreg_lowpart_p (inner))
{
/* If going from (subreg:SI (mem:QI ...)) to (mem:QI ...),
consider just the QI as the memory to extract from.
appropriate STRICT_LOW_PART operation available.
For MEM, we can avoid an extract if the field starts on an appropriate
- boundary and we can change the mode of the memory reference. However,
- we cannot directly access the MEM if we have a USE and the underlying
- MEM is not TMODE. This combination means that MEM was being used in a
- context where bits outside its mode were being referenced; that is only
- valid in bit-field insns. */
+ boundary and we can change the mode of the memory reference. */
if (tmode != BLKmode
- && ! (spans_byte && inner_mode != tmode)
&& ((pos_rtx == 0 && (pos % BITS_PER_WORD) == 0
&& !MEM_P (inner)
+ && (inner_mode == tmode
+ || !REG_P (inner)
+ || TRULY_NOOP_TRUNCATION (GET_MODE_BITSIZE (tmode),
+ GET_MODE_BITSIZE (inner_mode))
+ || reg_truncated_to_mode (tmode, inner))
&& (! in_dest
|| (REG_P (inner)
&& have_insn_for (STRICT_LOW_PART, tmode))))
/* Avoid creating invalid subregs, for example when
simplifying (x>>32)&255. */
- if (final_word >= GET_MODE_SIZE (inner_mode))
+ if (!validate_subreg (tmode, inner_mode, inner, final_word))
return NULL_RTX;
new = gen_rtx_SUBREG (tmode, inner, final_word);
len >= HOST_BITS_PER_WIDE_INT
? ~(unsigned HOST_WIDE_INT) 0
: ((unsigned HOST_WIDE_INT) 1 << len) - 1,
- NULL_RTX, 0);
+ 0);
/* If this extraction is going into the destination of a SET,
make a STRICT_LOW_PART unless we made a MEM. */
don't do anything with zero-extending field extracts starting at
the low-order bit since they are simple AND operations. */
if (pos_rtx == 0 && pos == 0 && ! in_dest
- && ! in_compare && ! spans_byte && unsignedp)
+ && ! in_compare && unsignedp)
return 0;
- /* Unless we are allowed to span bytes or INNER is not MEM, reject this if
- we would be spanning bytes or if the position is not a constant and the
- length is not 1. In all other cases, we would only be going outside
- our object in cases when an original shift would have been
- undefined. */
- if (! spans_byte && MEM_P (inner)
+ /* Unless INNER is not MEM, reject this if we would be spanning bytes or
+ if the position is not a constant and the length is not 1. In all
+ other cases, we would only be going outside our object in cases when
+ an original shift would have been undefined. */
+ if (MEM_P (inner)
&& ((pos_rtx == 0 && pos + len > GET_MODE_BITSIZE (is_mode))
|| (pos_rtx != 0 && len != 1)))
return 0;
&& GET_MODE_SIZE (pos_mode) < GET_MODE_SIZE (GET_MODE (pos_rtx)))
pos_mode = GET_MODE (pos_rtx);
- /* If this is not from memory, the desired mode is wanted_inner_reg_mode;
- if we have to change the mode of memory and cannot, the desired mode is
- EXTRACTION_MODE. */
+ /* If this is not from memory, the desired mode is the preferred mode
+ for an extraction pattern's first input operand, or word_mode if there
+ is none. */
if (!MEM_P (inner))
wanted_inner_mode = wanted_inner_reg_mode;
- else if (inner_mode != wanted_inner_mode
- && (mode_dependent_address_p (XEXP (inner, 0))
- || MEM_VOLATILE_P (inner)))
- wanted_inner_mode = extraction_mode;
+ else
+ {
+ /* Be careful not to go beyond the extracted object and maintain the
+ natural alignment of the memory. */
+ wanted_inner_mode = smallest_mode_for_size (len, MODE_INT);
+ while (pos % GET_MODE_BITSIZE (wanted_inner_mode) + len
+ > GET_MODE_BITSIZE (wanted_inner_mode))
+ {
+ wanted_inner_mode = GET_MODE_WIDER_MODE (wanted_inner_mode);
+ gcc_assert (wanted_inner_mode != VOIDmode);
+ }
+
+ /* If we have to change the mode of memory and cannot, the desired mode
+ is EXTRACTION_MODE. */
+ if (inner_mode != wanted_inner_mode
+ && (mode_dependent_address_p (XEXP (inner, 0))
+ || MEM_VOLATILE_P (inner)
+ || pos_rtx))
+ wanted_inner_mode = extraction_mode;
+ }
orig_pos = pos;
Note that it can only be less than 0 if !MEM_P (inner). */
}
- /* If INNER has a wider mode, make it smaller. If this is a constant
- extract, try to adjust the byte to point to the byte containing
+ /* If INNER has a wider mode, and this is a constant extraction, try to
+ make it smaller and adjust the byte to point to the byte containing
the value. */
if (wanted_inner_mode != VOIDmode
+ && inner_mode != wanted_inner_mode
+ && ! pos_rtx
&& GET_MODE_SIZE (wanted_inner_mode) < GET_MODE_SIZE (is_mode)
- && ((MEM_P (inner)
- && (inner_mode == wanted_inner_mode
- || (! mode_dependent_address_p (XEXP (inner, 0))
- && ! MEM_VOLATILE_P (inner))))))
+ && MEM_P (inner)
+ && ! mode_dependent_address_p (XEXP (inner, 0))
+ && ! MEM_VOLATILE_P (inner))
{
int offset = 0;
/* If bytes are big endian and we had a paradoxical SUBREG, we must
adjust OFFSET to compensate. */
if (BYTES_BIG_ENDIAN
- && ! spans_byte
&& GET_MODE_SIZE (inner_mode) < GET_MODE_SIZE (is_mode))
offset -= GET_MODE_SIZE (is_mode) - GET_MODE_SIZE (inner_mode);
- /* If this is a constant position, we can move to the desired byte. */
- if (pos_rtx == 0)
- {
- offset += pos / BITS_PER_UNIT;
- pos %= GET_MODE_BITSIZE (wanted_inner_mode);
- }
+ /* We can now move to the desired byte. */
+ offset += (pos / GET_MODE_BITSIZE (wanted_inner_mode))
+ * GET_MODE_SIZE (wanted_inner_mode);
+ pos %= GET_MODE_BITSIZE (wanted_inner_mode);
if (BYTES_BIG_ENDIAN != BITS_BIG_ENDIAN
- && ! spans_byte
&& is_mode != wanted_inner_mode)
offset = (GET_MODE_SIZE (is_mode)
- GET_MODE_SIZE (wanted_inner_mode) - offset);
- if (offset != 0 || inner_mode != wanted_inner_mode)
- inner = adjust_address_nv (inner, wanted_inner_mode, offset);
+ inner = adjust_address_nv (inner, wanted_inner_mode, offset);
}
/* If INNER is not memory, we can always get it into the proper mode. If we
|| orig_pos + len > GET_MODE_BITSIZE (wanted_inner_mode)))
return 0;
+ if (orig_pos < 0)
+ return 0;
+
inner = force_to_mode (inner, wanted_inner_mode,
pos_rtx
|| len + orig_pos >= HOST_BITS_PER_WIDE_INT
? ~(unsigned HOST_WIDE_INT) 0
: ((((unsigned HOST_WIDE_INT) 1 << len) - 1)
<< orig_pos),
- NULL_RTX, 0);
+ 0);
}
/* Adjust mode of POS_RTX, if needed. If we want a wider mode, we
what it originally did, do this SUBREG as a force_to_mode. */
tem = make_compound_operation (SUBREG_REG (x), in_code);
- if (GET_CODE (tem) != GET_CODE (SUBREG_REG (x))
- && GET_MODE_SIZE (mode) < GET_MODE_SIZE (GET_MODE (tem))
- && subreg_lowpart_p (x))
- {
- rtx newer = force_to_mode (tem, mode, ~(HOST_WIDE_INT) 0,
- NULL_RTX, 0);
- /* If we have something other than a SUBREG, we might have
- done an expansion, so rerun ourselves. */
- if (GET_CODE (newer) != SUBREG)
- newer = make_compound_operation (newer, in_code);
+ {
+ rtx simplified;
+ simplified = simplify_subreg (GET_MODE (x), tem, GET_MODE (tem),
+ SUBREG_BYTE (x));
- return newer;
- }
+ if (simplified)
+ tem = simplified;
- /* If this is a paradoxical subreg, and the new code is a sign or
- zero extension, omit the subreg and widen the extension. If it
- is a regular subreg, we can still get rid of the subreg by not
- widening so much, or in fact removing the extension entirely. */
- if ((GET_CODE (tem) == SIGN_EXTEND
- || GET_CODE (tem) == ZERO_EXTEND)
- && subreg_lowpart_p (x))
- {
- if (GET_MODE_SIZE (mode) > GET_MODE_SIZE (GET_MODE (tem))
- || (GET_MODE_SIZE (mode) >
- GET_MODE_SIZE (GET_MODE (XEXP (tem, 0)))))
- {
- if (! SCALAR_INT_MODE_P (mode))
- break;
- tem = gen_rtx_fmt_e (GET_CODE (tem), mode, XEXP (tem, 0));
- }
- else
- tem = gen_lowpart (mode, XEXP (tem, 0));
+ if (GET_CODE (tem) != GET_CODE (SUBREG_REG (x))
+ && GET_MODE_SIZE (mode) < GET_MODE_SIZE (GET_MODE (tem))
+ && subreg_lowpart_p (x))
+ {
+ rtx newer = force_to_mode (tem, mode, ~(HOST_WIDE_INT) 0,
+ 0);
+
+ /* If we have something other than a SUBREG, we might have
+ done an expansion, so rerun ourselves. */
+ if (GET_CODE (newer) != SUBREG)
+ newer = make_compound_operation (newer, in_code);
+
+ return newer;
+ }
+
+ if (simplified)
return tem;
- }
+ }
break;
default:
break;
}
- if (new)
- {
- x = gen_lowpart (mode, new);
- code = GET_CODE (x);
+ if (new)
+ {
+ x = gen_lowpart (mode, new);
+ code = GET_CODE (x);
+ }
+
+ /* Now recursively process each operand of this operation. */
+ fmt = GET_RTX_FORMAT (code);
+ for (i = 0; i < GET_RTX_LENGTH (code); i++)
+ if (fmt[i] == 'e')
+ {
+ new = make_compound_operation (XEXP (x, i), next_code);
+ SUBST (XEXP (x, i), new);
+ }
+
+ /* If this is a commutative operation, the changes to the operands
+ may have made it noncanonical. */
+ if (COMMUTATIVE_ARITH_P (x)
+ && swap_commutative_operands_p (XEXP (x, 0), XEXP (x, 1)))
+ {
+ tem = XEXP (x, 0);
+ SUBST (XEXP (x, 0), XEXP (x, 1));
+ SUBST (XEXP (x, 1), tem);
+ }
+
+ return x;
+}
+\f
+/* Given M see if it is a value that would select a field of bits
+ within an item, but not the entire word. Return -1 if not.
+ Otherwise, return the starting position of the field, where 0 is the
+ low-order bit.
+
+ *PLEN is set to the length of the field. */
+
+static int
+get_pos_from_mask (unsigned HOST_WIDE_INT m, unsigned HOST_WIDE_INT *plen)
+{
+ /* Get the bit number of the first 1 bit from the right, -1 if none. */
+ int pos = exact_log2 (m & -m);
+ int len = 0;
+
+ if (pos >= 0)
+ /* Now shift off the low-order zero bits and see if we have a
+ power of two minus 1. */
+ len = exact_log2 ((m >> pos) + 1);
+
+ if (len <= 0)
+ pos = -1;
+
+ *plen = len;
+ return pos;
+}
+\f
+/* If X refers to a register that equals REG in value, replace these
+ references with REG. */
+static rtx
+canon_reg_for_combine (rtx x, rtx reg)
+{
+ rtx op0, op1, op2;
+ const char *fmt;
+ int i;
+ bool copied;
+
+ enum rtx_code code = GET_CODE (x);
+ switch (GET_RTX_CLASS (code))
+ {
+ case RTX_UNARY:
+ op0 = canon_reg_for_combine (XEXP (x, 0), reg);
+ if (op0 != XEXP (x, 0))
+ return simplify_gen_unary (GET_CODE (x), GET_MODE (x), op0,
+ GET_MODE (reg));
+ break;
+
+ case RTX_BIN_ARITH:
+ case RTX_COMM_ARITH:
+ op0 = canon_reg_for_combine (XEXP (x, 0), reg);
+ op1 = canon_reg_for_combine (XEXP (x, 1), reg);
+ if (op0 != XEXP (x, 0) || op1 != XEXP (x, 1))
+ return simplify_gen_binary (GET_CODE (x), GET_MODE (x), op0, op1);
+ break;
+
+ case RTX_COMPARE:
+ case RTX_COMM_COMPARE:
+ op0 = canon_reg_for_combine (XEXP (x, 0), reg);
+ op1 = canon_reg_for_combine (XEXP (x, 1), reg);
+ if (op0 != XEXP (x, 0) || op1 != XEXP (x, 1))
+ return simplify_gen_relational (GET_CODE (x), GET_MODE (x),
+ GET_MODE (op0), op0, op1);
+ break;
+
+ case RTX_TERNARY:
+ case RTX_BITFIELD_OPS:
+ op0 = canon_reg_for_combine (XEXP (x, 0), reg);
+ op1 = canon_reg_for_combine (XEXP (x, 1), reg);
+ op2 = canon_reg_for_combine (XEXP (x, 2), reg);
+ if (op0 != XEXP (x, 0) || op1 != XEXP (x, 1) || op2 != XEXP (x, 2))
+ return simplify_gen_ternary (GET_CODE (x), GET_MODE (x),
+ GET_MODE (op0), op0, op1, op2);
+
+ case RTX_OBJ:
+ if (REG_P (x))
+ {
+ if (rtx_equal_p (get_last_value (reg), x)
+ || rtx_equal_p (reg, get_last_value (x)))
+ return reg;
+ else
+ break;
+ }
+
+ /* fall through */
+
+ default:
+ fmt = GET_RTX_FORMAT (code);
+ copied = false;
+ for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
+ if (fmt[i] == 'e')
+ {
+ rtx op = canon_reg_for_combine (XEXP (x, i), reg);
+ if (op != XEXP (x, i))
+ {
+ if (!copied)
+ {
+ copied = true;
+ x = copy_rtx (x);
+ }
+ XEXP (x, i) = op;
+ }
+ }
+ else if (fmt[i] == 'E')
+ {
+ int j;
+ for (j = 0; j < XVECLEN (x, i); j++)
+ {
+ rtx op = canon_reg_for_combine (XVECEXP (x, i, j), reg);
+ if (op != XVECEXP (x, i, j))
+ {
+ if (!copied)
+ {
+ copied = true;
+ x = copy_rtx (x);
+ }
+ XVECEXP (x, i, j) = op;
+ }
+ }
+ }
+
+ break;
}
- /* Now recursively process each operand of this operation. */
- fmt = GET_RTX_FORMAT (code);
- for (i = 0; i < GET_RTX_LENGTH (code); i++)
- if (fmt[i] == 'e')
- {
- new = make_compound_operation (XEXP (x, i), next_code);
- SUBST (XEXP (x, i), new);
- }
-
return x;
}
-\f
-/* Given M see if it is a value that would select a field of bits
- within an item, but not the entire word. Return -1 if not.
- Otherwise, return the starting position of the field, where 0 is the
- low-order bit.
- *PLEN is set to the length of the field. */
+/* Return X converted to MODE. If the value is already truncated to
+ MODE we can just return a subreg even though in the general case we
+ would need an explicit truncation. */
-static int
-get_pos_from_mask (unsigned HOST_WIDE_INT m, unsigned HOST_WIDE_INT *plen)
+static rtx
+gen_lowpart_or_truncate (enum machine_mode mode, rtx x)
{
- /* Get the bit number of the first 1 bit from the right, -1 if none. */
- int pos = exact_log2 (m & -m);
- int len = 0;
-
- if (pos >= 0)
- /* Now shift off the low-order zero bits and see if we have a
- power of two minus 1. */
- len = exact_log2 ((m >> pos) + 1);
-
- if (len <= 0)
- pos = -1;
-
- *plen = len;
- return pos;
+ if (GET_MODE_SIZE (GET_MODE (x)) <= GET_MODE_SIZE (mode)
+ || TRULY_NOOP_TRUNCATION (GET_MODE_BITSIZE (mode),
+ GET_MODE_BITSIZE (GET_MODE (x)))
+ || (REG_P (x) && reg_truncated_to_mode (mode, x)))
+ return gen_lowpart (mode, x);
+ else
+ return simplify_gen_unary (TRUNCATE, mode, x, GET_MODE (x));
}
-\f
+
/* See if X can be simplified knowing that we will only refer to it in
MODE and will only refer to those bits that are nonzero in MASK.
If other bits are being computed or if masking operations are done
Return a possibly simplified expression, but always convert X to
MODE. If X is a CONST_INT, AND the CONST_INT with MASK.
- Also, if REG is nonzero and X is a register equal in value to REG,
- replace X with REG.
-
If JUST_SELECT is nonzero, don't optimize by noticing that bits in MASK
are all off in X. This is used when X will be complemented, by either
NOT, NEG, or XOR. */
static rtx
force_to_mode (rtx x, enum machine_mode mode, unsigned HOST_WIDE_INT mask,
- rtx reg, int just_select)
+ int just_select)
{
enum rtx_code code = GET_CODE (x);
int next_select = just_select || code == XOR || code == NOT || code == NEG;
nonzero = nonzero_bits (x, mode);
/* If none of the bits in X are needed, return a zero. */
- if (! just_select && (nonzero & mask) == 0)
+ if (!just_select && (nonzero & mask) == 0 && !side_effects_p (x))
x = const0_rtx;
/* If X is a CONST_INT, return a new one. Do this here since the
if (GET_CODE (x) == CONST_INT)
{
if (SCALAR_INT_MODE_P (mode))
- return gen_int_mode (INTVAL (x) & mask, mode);
+ return gen_int_mode (INTVAL (x) & mask, mode);
else
{
x = GEN_INT (INTVAL (x) & mask);
generating something that won't match. */
return x;
- case USE:
- /* X is a (use (mem ..)) that was made from a bit-field extraction that
- spanned the boundary of the MEM. If we are now masking so it is
- within that boundary, we don't need the USE any more. */
- if (! BITS_BIG_ENDIAN
- && (mask & ~GET_MODE_MASK (GET_MODE (XEXP (x, 0)))) == 0)
- return force_to_mode (XEXP (x, 0), mode, mask, reg, next_select);
- break;
-
case SIGN_EXTEND:
case ZERO_EXTEND:
case ZERO_EXTRACT:
case SIGN_EXTRACT:
x = expand_compound_operation (x);
if (GET_CODE (x) != code)
- return force_to_mode (x, mode, mask, reg, next_select);
- break;
-
- case REG:
- if (reg != 0 && (rtx_equal_p (get_last_value (reg), x)
- || rtx_equal_p (reg, get_last_value (x))))
- x = reg;
+ return force_to_mode (x, mode, mask, next_select);
break;
case SUBREG:
|| (0 == (mask
& GET_MODE_MASK (GET_MODE (x))
& ~GET_MODE_MASK (GET_MODE (SUBREG_REG (x)))))))
- return force_to_mode (SUBREG_REG (x), mode, mask, reg, next_select);
+ return force_to_mode (SUBREG_REG (x), mode, mask, next_select);
break;
case AND:
&& (INTVAL (XEXP (x, 1)) & ~smask) != 0)
return force_to_mode (plus_constant (XEXP (x, 0),
(INTVAL (XEXP (x, 1)) & smask)),
- mode, smask, reg, next_select);
+ mode, smask, next_select);
}
/* ... fall through ... */
{
x = simplify_gen_unary (NEG, GET_MODE (x), XEXP (x, 1),
GET_MODE (x));
- return force_to_mode (x, mode, mask, reg, next_select);
+ return force_to_mode (x, mode, mask, next_select);
}
/* Similarly, if C contains every bit in the fuller_mask, then we may
{
x = simplify_gen_unary (NOT, GET_MODE (x),
XEXP (x, 1), GET_MODE (x));
- return force_to_mode (x, mode, mask, reg, next_select);
+ return force_to_mode (x, mode, mask, next_select);
}
mask = fuller_mask;
XEXP (XEXP (x, 0), 0), temp);
x = simplify_gen_binary (LSHIFTRT, GET_MODE (x), temp,
XEXP (XEXP (x, 0), 1));
- return force_to_mode (x, mode, mask, reg, next_select);
+ return force_to_mode (x, mode, mask, next_select);
}
binop:
/* For most binary operations, just propagate into the operation and
change the mode if we have an operation of that mode. */
- op0 = gen_lowpart (op_mode,
- force_to_mode (XEXP (x, 0), mode, mask,
- reg, next_select));
- op1 = gen_lowpart (op_mode,
- force_to_mode (XEXP (x, 1), mode, mask,
- reg, next_select));
+ op0 = gen_lowpart_or_truncate (op_mode,
+ force_to_mode (XEXP (x, 0), mode, mask,
+ next_select));
+ op1 = gen_lowpart_or_truncate (op_mode,
+ force_to_mode (XEXP (x, 1), mode, mask,
+ next_select));
if (op_mode != GET_MODE (x) || op0 != XEXP (x, 0) || op1 != XEXP (x, 1))
x = simplify_gen_binary (code, op_mode, op0, op1);
else
mask = fuller_mask;
- op0 = gen_lowpart (op_mode,
- force_to_mode (XEXP (x, 0), op_mode,
- mask, reg, next_select));
+ op0 = gen_lowpart_or_truncate (op_mode,
+ force_to_mode (XEXP (x, 0), op_mode,
+ mask, next_select));
if (op_mode != GET_MODE (x) || op0 != XEXP (x, 0))
x = simplify_gen_binary (code, op_mode, op0, XEXP (x, 1));
if ((inner_mask & ~GET_MODE_MASK (GET_MODE (x))) != 0)
op_mode = GET_MODE (x);
- inner = force_to_mode (inner, op_mode, inner_mask, reg, next_select);
+ inner = force_to_mode (inner, op_mode, inner_mask, next_select);
if (GET_MODE (x) != op_mode || inner != XEXP (x, 0))
x = simplify_gen_binary (LSHIFTRT, op_mode, inner, XEXP (x, 1));
if (GET_MODE_BITSIZE (GET_MODE (x)) <= HOST_BITS_PER_WIDE_INT
&& (mask == ((unsigned HOST_WIDE_INT) 1
<< (GET_MODE_BITSIZE (GET_MODE (x)) - 1))))
- return force_to_mode (XEXP (x, 0), mode, mask, reg, next_select);
+ return force_to_mode (XEXP (x, 0), mode, mask, next_select);
/* If this is a shift by a constant, get a mask that contains those bits
that are not copies of the sign bit. We then have two cases: If
if (GET_CODE (XEXP (x, 1)) == CONST_INT && INTVAL (XEXP (x, 1)) >= 0
&& INTVAL (XEXP (x, 1)) < HOST_BITS_PER_WIDE_INT)
{
- int i = -1;
+ int i;
/* If the considered data is wider than HOST_WIDE_INT, we can't
represent a mask for all its bits in a single scalar.
nonzero >>= INTVAL (XEXP (x, 1));
}
- if ((mask & ~nonzero) == 0
- || (i = exact_log2 (mask)) >= 0)
+ if ((mask & ~nonzero) == 0)
+ {
+ x = simplify_shift_const (NULL_RTX, LSHIFTRT, GET_MODE (x),
+ XEXP (x, 0), INTVAL (XEXP (x, 1)));
+ if (GET_CODE (x) != ASHIFTRT)
+ return force_to_mode (x, mode, mask, next_select);
+ }
+
+ else if ((i = exact_log2 (mask)) >= 0)
{
x = simplify_shift_const
- (x, LSHIFTRT, GET_MODE (x), XEXP (x, 0),
- i < 0 ? INTVAL (XEXP (x, 1))
- : GET_MODE_BITSIZE (GET_MODE (x)) - 1 - i);
+ (NULL_RTX, LSHIFTRT, GET_MODE (x), XEXP (x, 0),
+ GET_MODE_BITSIZE (GET_MODE (x)) - 1 - i);
if (GET_CODE (x) != ASHIFTRT)
- return force_to_mode (x, mode, mask, reg, next_select);
+ return force_to_mode (x, mode, mask, next_select);
}
}
&& GET_CODE (XEXP (x, 0)) == ASHIFT
&& XEXP (XEXP (x, 0), 1) == XEXP (x, 1))
return force_to_mode (XEXP (XEXP (x, 0), 0), mode, mask,
- reg, next_select);
+ next_select);
break;
if (temp && GET_CODE (temp) == CONST_INT)
SUBST (XEXP (x, 0),
force_to_mode (XEXP (x, 0), GET_MODE (x),
- INTVAL (temp), reg, next_select));
+ INTVAL (temp), next_select));
}
break;
/* If we just want the low-order bit, the NEG isn't needed since it
won't change the low-order bit. */
if (mask == 1)
- return force_to_mode (XEXP (x, 0), mode, mask, reg, just_select);
+ return force_to_mode (XEXP (x, 0), mode, mask, just_select);
/* We need any bits less significant than the most significant bit in
MASK since carries from those bits will affect the bits we are
x = simplify_gen_binary (LSHIFTRT, GET_MODE (x),
temp, XEXP (XEXP (x, 0), 1));
- return force_to_mode (x, mode, mask, reg, next_select);
+ return force_to_mode (x, mode, mask, next_select);
}
/* (and (not FOO) CONST) is (not (or FOO (not CONST))), so we must
mask = fuller_mask;
unop:
- op0 = gen_lowpart (op_mode,
- force_to_mode (XEXP (x, 0), mode, mask,
- reg, next_select));
+ op0 = gen_lowpart_or_truncate (op_mode,
+ force_to_mode (XEXP (x, 0), mode, mask,
+ next_select));
if (op_mode != GET_MODE (x) || op0 != XEXP (x, 0))
x = simplify_gen_unary (code, op_mode, op0, op_mode);
break;
&& exact_log2 (nonzero_bits (XEXP (x, 0), mode)) >= 0
&& (nonzero_bits (XEXP (x, 0), mode)
== (unsigned HOST_WIDE_INT) STORE_FLAG_VALUE))
- return force_to_mode (XEXP (x, 0), mode, mask, reg, next_select);
+ return force_to_mode (XEXP (x, 0), mode, mask, next_select);
break;
written in a narrower mode. We play it safe and do not do so. */
SUBST (XEXP (x, 1),
- gen_lowpart (GET_MODE (x),
+ gen_lowpart_or_truncate (GET_MODE (x),
force_to_mode (XEXP (x, 1), mode,
- mask, reg, next_select)));
+ mask, next_select)));
SUBST (XEXP (x, 2),
- gen_lowpart (GET_MODE (x),
+ gen_lowpart_or_truncate (GET_MODE (x),
force_to_mode (XEXP (x, 2), mode,
- mask, reg, next_select)));
+ mask, next_select)));
break;
default:
}
/* Ensure we return a value of the proper mode. */
- return gen_lowpart (mode, x);
+ return gen_lowpart_or_truncate (mode, x);
}
\f
/* Return nonzero if X is an expression that has one of two values depending on
*ptrue = simplify_gen_relational (code, mode, VOIDmode,
true0, true1);
*pfalse = simplify_gen_relational (code, mode, VOIDmode,
- false0, false1);
+ false0, false1);
}
else
{
if (XEXP (x, 0) != r)
{
/* We must simplify the zero_extend here, before we lose
- track of the original inner_mode. */
+ track of the original inner_mode. */
new = simplify_unary_operation (ZERO_EXTEND, GET_MODE (x),
r, inner_mode);
if (new)
/* Shift OTHER right POS places and make it the source, restricting it
to the proper length and mode. */
- src = force_to_mode (simplify_shift_const (NULL_RTX, LSHIFTRT,
- GET_MODE (src), other, pos),
- mode,
+ src = canon_reg_for_combine (simplify_shift_const (NULL_RTX, LSHIFTRT,
+ GET_MODE (src),
+ other, pos),
+ dest);
+ src = force_to_mode (src, mode,
GET_MODE_BITSIZE (mode) >= HOST_BITS_PER_WIDE_INT
? ~(unsigned HOST_WIDE_INT) 0
: ((unsigned HOST_WIDE_INT) 1 << len) - 1,
- dest, 0);
+ 0);
/* If SRC is masked by an AND that does not make a difference in
the value being stored, strip it. */
break;
case SUBREG:
- /* Non-paradoxical SUBREGs distributes over all operations, provided
- the inner modes and byte offsets are the same, this is an extraction
- of a low-order part, we don't convert an fp operation to int or
- vice versa, and we would not be converting a single-word
- operation into a multi-word operation. The latter test is not
- required, but it prevents generating unneeded multi-word operations.
- Some of the previous tests are redundant given the latter test, but
- are retained because they are required for correctness.
+ /* Non-paradoxical SUBREGs distributes over all operations,
+ provided the inner modes and byte offsets are the same, this
+ is an extraction of a low-order part, we don't convert an fp
+ operation to int or vice versa, this is not a vector mode,
+ and we would not be converting a single-word operation into a
+ multi-word operation. The latter test is not required, but
+ it prevents generating unneeded multi-word operations. Some
+ of the previous tests are redundant given the latter test,
+ but are retained because they are required for correctness.
We produce the result slightly differently in this case. */
!= GET_MODE_CLASS (GET_MODE (SUBREG_REG (lhs))))
|| (GET_MODE_SIZE (GET_MODE (lhs))
> GET_MODE_SIZE (GET_MODE (SUBREG_REG (lhs))))
- || GET_MODE_SIZE (GET_MODE (SUBREG_REG (lhs))) > UNITS_PER_WORD)
+ || VECTOR_MODE_P (GET_MODE (lhs))
+ || GET_MODE_SIZE (GET_MODE (SUBREG_REG (lhs))) > UNITS_PER_WORD
+ /* Result might need to be truncated. Don't change mode if
+ explicit truncation is needed. */
+ || !TRULY_NOOP_TRUNCATION
+ (GET_MODE_BITSIZE (GET_MODE (x)),
+ GET_MODE_BITSIZE (GET_MODE (SUBREG_REG (lhs)))))
return x;
tem = simplify_gen_binary (code, GET_MODE (SUBREG_REG (lhs)),
expanding a bit field assignment. When we apply the distributive
law to this, we get (ior (and (A (not B))) (and (B (not B)))),
which then simplifies to (and (A (not B))).
-
+
Note that no checks happen on the validity of applying the inverse
distributive law. This is pointless since we can do it in the
few places where this routine is called.
return NULL_RTX;
}
\f
-/* We have X, a logical `and' of VAROP with the constant CONSTOP, to be done
- in MODE.
-
- Return an equivalent form, if different from X. Otherwise, return X. If
- X is zero, we are to always construct the equivalent form. */
+/* Simplify a logical `and' of VAROP with the constant CONSTOP, to be done
+ in MODE. Return an equivalent form, if different from (and VAROP
+ (const_int CONSTOP)). Otherwise, return NULL_RTX. */
static rtx
-simplify_and_const_int (rtx x, enum machine_mode mode, rtx varop,
- unsigned HOST_WIDE_INT constop)
+simplify_and_const_int_1 (enum machine_mode mode, rtx varop,
+ unsigned HOST_WIDE_INT constop)
{
unsigned HOST_WIDE_INT nonzero;
+ unsigned HOST_WIDE_INT orig_constop;
+ rtx orig_varop;
int i;
+ orig_varop = varop;
+ orig_constop = constop;
+ if (GET_CODE (varop) == CLOBBER)
+ return NULL_RTX;
+
/* Simplify VAROP knowing that we will be only looking at some of the
bits in it.
CONSTOP are not significant and will never be examined. We must
ensure that is the case by explicitly masking out those bits
before returning. */
- varop = force_to_mode (varop, mode, constop, NULL_RTX, 0);
+ varop = force_to_mode (varop, mode, constop, 0);
/* If VAROP is a CLOBBER, we will fail so return it. */
if (GET_CODE (varop) == CLOBBER)
XEXP (varop, 1),
constop))));
- /* If VAROP is PLUS, and the constant is a mask of low bite, distribute
+ /* If VAROP is PLUS, and the constant is a mask of low bits, distribute
the AND and see if one of the operands simplifies to zero. If so, we
may eliminate it. */
return o0;
}
- /* Get VAROP in MODE. Try to get a SUBREG if not. Don't make a new SUBREG
- if we already had one (just check for the simplest cases). */
- if (x && GET_CODE (XEXP (x, 0)) == SUBREG
- && GET_MODE (XEXP (x, 0)) == mode
- && SUBREG_REG (XEXP (x, 0)) == varop)
- varop = XEXP (x, 0);
- else
- varop = gen_lowpart (mode, varop);
-
- /* If we can't make the SUBREG, try to return what we were given. */
- if (GET_CODE (varop) == CLOBBER)
- return x ? x : varop;
+ /* Make a SUBREG if necessary. If we can't make it, fail. */
+ varop = gen_lowpart (mode, varop);
+ if (varop == NULL_RTX || GET_CODE (varop) == CLOBBER)
+ return NULL_RTX;
/* If we are only masking insignificant bits, return VAROP. */
if (constop == nonzero)
- x = varop;
- else
- {
- /* Otherwise, return an AND. */
- constop = trunc_int_for_mode (constop, mode);
- /* See how much, if any, of X we can use. */
- if (x == 0 || GET_CODE (x) != AND || GET_MODE (x) != mode)
- x = simplify_gen_binary (AND, mode, varop, GEN_INT (constop));
+ return varop;
- else
- {
- if (GET_CODE (XEXP (x, 1)) != CONST_INT
- || (unsigned HOST_WIDE_INT) INTVAL (XEXP (x, 1)) != constop)
- SUBST (XEXP (x, 1), GEN_INT (constop));
+ if (varop == orig_varop && constop == orig_constop)
+ return NULL_RTX;
- SUBST (XEXP (x, 0), varop);
- }
- }
+ /* Otherwise, return an AND. */
+ return simplify_gen_binary (AND, mode, varop, gen_int_mode (constop, mode));
+}
+
+
+/* We have X, a logical `and' of VAROP with the constant CONSTOP, to be done
+ in MODE.
+
+ Return an equivalent form, if different from X. Otherwise, return X. If
+ X is zero, we are to always construct the equivalent form. */
+
+static rtx
+simplify_and_const_int (rtx x, enum machine_mode mode, rtx varop,
+ unsigned HOST_WIDE_INT constop)
+{
+ rtx tem = simplify_and_const_int_1 (mode, varop, constop);
+ if (tem)
+ return tem;
+ if (!x)
+ x = simplify_gen_binary (AND, GET_MODE (varop), varop,
+ gen_int_mode (constop, mode));
+ if (GET_MODE (x) != mode)
+ x = gen_lowpart (mode, x);
return x;
}
\f
if (reg_stat[REGNO (x)].last_set_value != 0
&& (reg_stat[REGNO (x)].last_set_mode == mode
- || (GET_MODE_CLASS (reg_stat[REGNO (x)].last_set_mode) == MODE_INT
+ || (GET_MODE_CLASS (reg_stat[REGNO (x)].last_set_mode) == MODE_INT
&& GET_MODE_CLASS (mode) == MODE_INT))
- && (reg_stat[REGNO (x)].last_set_label == label_tick
+ && ((reg_stat[REGNO (x)].last_set_label >= label_tick_ebb_start
+ && reg_stat[REGNO (x)].last_set_label < label_tick)
+ || (reg_stat[REGNO (x)].last_set_label == label_tick
+ && DF_INSN_LUID (reg_stat[REGNO (x)].last_set) < subst_low_luid)
|| (REGNO (x) >= FIRST_PSEUDO_REGISTER
&& REG_N_SETS (REGNO (x)) == 1
- && ! REGNO_REG_SET_P (ENTRY_BLOCK_PTR->next_bb->global_live_at_start,
- REGNO (x))))
- && INSN_CUID (reg_stat[REGNO (x)].last_set) < subst_low_cuid)
+ && !REGNO_REG_SET_P
+ (DF_LR_IN (ENTRY_BLOCK_PTR->next_bb), REGNO (x)))))
{
*nonzero &= reg_stat[REGNO (x)].last_set_nonzero_bits;
return NULL;
{
#ifdef SHORT_IMMEDIATES_SIGN_EXTEND
/* If X is narrower than MODE and TEM is a non-negative
- constant that would appear negative in the mode of X,
- sign-extend it for use in reg_nonzero_bits because some
- machines (maybe most) will actually do the sign-extension
- and this is the conservative approach.
+ constant that would appear negative in the mode of X,
+ sign-extend it for use in reg_nonzero_bits because some
+ machines (maybe most) will actually do the sign-extension
+ and this is the conservative approach.
- ??? For 2.5, try to tighten up the MD files in this regard
- instead of this kludge. */
+ ??? For 2.5, try to tighten up the MD files in this regard
+ instead of this kludge. */
if (GET_MODE_BITSIZE (GET_MODE (x)) < GET_MODE_BITSIZE (mode)
&& GET_CODE (tem) == CONST_INT
unsigned HOST_WIDE_INT mask = reg_stat[REGNO (x)].nonzero_bits;
if (GET_MODE_BITSIZE (GET_MODE (x)) < GET_MODE_BITSIZE (mode))
- /* We don't know anything about the upper bits. */
- mask |= GET_MODE_MASK (mode) ^ GET_MODE_MASK (GET_MODE (x));
+ /* We don't know anything about the upper bits. */
+ mask |= GET_MODE_MASK (mode) ^ GET_MODE_MASK (GET_MODE (x));
*nonzero &= mask;
}
if (reg_stat[REGNO (x)].last_set_value != 0
&& reg_stat[REGNO (x)].last_set_mode == mode
- && (reg_stat[REGNO (x)].last_set_label == label_tick
- || (REGNO (x) >= FIRST_PSEUDO_REGISTER
+ && ((reg_stat[REGNO (x)].last_set_label >= label_tick_ebb_start
+ && reg_stat[REGNO (x)].last_set_label < label_tick)
+ || (reg_stat[REGNO (x)].last_set_label == label_tick
+ && DF_INSN_LUID (reg_stat[REGNO (x)].last_set) < subst_low_luid)
+ || (REGNO (x) >= FIRST_PSEUDO_REGISTER
&& REG_N_SETS (REGNO (x)) == 1
- && ! REGNO_REG_SET_P (ENTRY_BLOCK_PTR->next_bb->global_live_at_start,
- REGNO (x))))
- && INSN_CUID (reg_stat[REGNO (x)].last_set) < subst_low_cuid)
+ && !REGNO_REG_SET_P
+ (DF_LR_IN (ENTRY_BLOCK_PTR->next_bb), REGNO (x)))))
{
*result = reg_stat[REGNO (x)].last_set_sign_bit_copies;
return NULL;
if (nonzero_sign_valid && reg_stat[REGNO (x)].sign_bit_copies != 0
&& GET_MODE_BITSIZE (GET_MODE (x)) == GET_MODE_BITSIZE (mode))
*result = reg_stat[REGNO (x)].sign_bit_copies;
-
+
return NULL;
}
\f
}
\f
/* Simplify a shift of VAROP by COUNT bits. CODE says what kind of shift.
- The result of the shift is RESULT_MODE. X, if nonzero, is an expression
- that we started with.
+ The result of the shift is RESULT_MODE. Return NULL_RTX if we cannot
+ simplify it. Otherwise, return a simplified value.
The shift is normally computed in the widest mode we find in VAROP, as
long as it isn't a different number of words than RESULT_MODE. Exceptions
- are ASHIFTRT and ROTATE, which are always done in their original mode, */
+ are ASHIFTRT and ROTATE, which are always done in their original mode. */
static rtx
-simplify_shift_const (rtx x, enum rtx_code code,
- enum machine_mode result_mode, rtx varop,
- int orig_count)
+simplify_shift_const_1 (enum rtx_code code, enum machine_mode result_mode,
+ rtx varop, int orig_count)
{
enum rtx_code orig_code = code;
- unsigned int count;
- int signed_count;
+ rtx orig_varop = varop;
+ int count;
enum machine_mode mode = result_mode;
enum machine_mode shift_mode, tmode;
unsigned int mode_words
/* We form (outer_op (code varop count) (outer_const)). */
enum rtx_code outer_op = UNKNOWN;
HOST_WIDE_INT outer_const = 0;
- rtx const_rtx;
int complement_p = 0;
- rtx new;
+ rtx new, x;
/* Make sure and truncate the "natural" shift on the way in. We don't
want to do this inside the loop as it makes it more difficult to
what was requested. */
if (orig_count < 0 || orig_count >= (int) GET_MODE_BITSIZE (mode))
- {
- if (x)
- return x;
-
- return gen_rtx_fmt_ee (code, mode, varop, GEN_INT (orig_count));
- }
+ return NULL_RTX;
count = orig_count;
while (count != 0)
{
- /* If we have an operand of (clobber (const_int 0)), just return that
- value. */
+ /* If we have an operand of (clobber (const_int 0)), fail. */
if (GET_CODE (varop) == CLOBBER)
- return varop;
+ return NULL_RTX;
/* If we discovered we had to complement VAROP, leave. Making a NOT
here would cause an infinite loop. */
multiple operations, each of which are defined, we know what the
result is supposed to be. */
- if (count > (unsigned int) (GET_MODE_BITSIZE (shift_mode) - 1))
+ if (count > (GET_MODE_BITSIZE (shift_mode) - 1))
{
if (code == ASHIFTRT)
count = GET_MODE_BITSIZE (shift_mode) - 1;
== 0))
code = LSHIFTRT;
- if (code == LSHIFTRT
- && GET_MODE_BITSIZE (shift_mode) <= HOST_BITS_PER_WIDE_INT
- && !(nonzero_bits (varop, shift_mode) >> count))
- varop = const0_rtx;
- if (code == ASHIFT
- && GET_MODE_BITSIZE (shift_mode) <= HOST_BITS_PER_WIDE_INT
- && !((nonzero_bits (varop, shift_mode) << count)
- & GET_MODE_MASK (shift_mode)))
+ if (((code == LSHIFTRT
+ && GET_MODE_BITSIZE (shift_mode) <= HOST_BITS_PER_WIDE_INT
+ && !(nonzero_bits (varop, shift_mode) >> count))
+ || (code == ASHIFT
+ && GET_MODE_BITSIZE (shift_mode) <= HOST_BITS_PER_WIDE_INT
+ && !((nonzero_bits (varop, shift_mode) << count)
+ & GET_MODE_MASK (shift_mode))))
+ && !side_effects_p (varop))
varop = const0_rtx;
switch (GET_CODE (varop))
}
break;
- case USE:
- /* Similar to the case above, except that we can only do this if
- the resulting mode is the same as that of the underlying
- MEM and adjust the address depending on the *bits* endianness
- because of the way that bit-field extract insns are defined. */
- if ((code == ASHIFTRT || code == LSHIFTRT)
- && (tmode = mode_for_size (GET_MODE_BITSIZE (mode) - count,
- MODE_INT, 1)) != BLKmode
- && tmode == GET_MODE (XEXP (varop, 0)))
- {
- if (BITS_BIG_ENDIAN)
- new = XEXP (varop, 0);
- else
- {
- new = copy_rtx (XEXP (varop, 0));
- SUBST (XEXP (new, 0),
- plus_constant (XEXP (new, 0),
- count / BITS_PER_UNIT));
- }
-
- varop = gen_rtx_fmt_e (code == ASHIFTRT ? SIGN_EXTEND
- : ZERO_EXTEND, mode, new);
- count = 0;
- continue;
- }
- break;
-
case SUBREG:
/* If VAROP is a SUBREG, strip it as long as the inner operand has
the same number of words as what we've seen so far. Then store
interpreted as the sign bit in a narrower mode, so, if
the result is narrower, don't discard the shift. */
if (code == LSHIFTRT
- && count == (unsigned int) (GET_MODE_BITSIZE (result_mode) - 1)
+ && count == (GET_MODE_BITSIZE (result_mode) - 1)
&& (GET_MODE_BITSIZE (result_mode)
>= GET_MODE_BITSIZE (GET_MODE (varop))))
{
&& INTVAL (XEXP (varop, 1)) >= 0
&& INTVAL (XEXP (varop, 1)) < GET_MODE_BITSIZE (GET_MODE (varop))
&& GET_MODE_BITSIZE (result_mode) <= HOST_BITS_PER_WIDE_INT
- && GET_MODE_BITSIZE (mode) <= HOST_BITS_PER_WIDE_INT)
+ && GET_MODE_BITSIZE (mode) <= HOST_BITS_PER_WIDE_INT
+ && !VECTOR_MODE_P (result_mode))
{
enum rtx_code first_code = GET_CODE (varop);
unsigned int first_count = INTVAL (XEXP (varop, 1));
(ashiftrt:M1 (ashift:M1 (and:M1 (subreg:M1 FOO 0 C2) C3) C1).
This simplifies certain SIGN_EXTEND operations. */
if (code == ASHIFT && first_code == ASHIFTRT
- && count == (unsigned int)
- (GET_MODE_BITSIZE (result_mode)
+ && count == (GET_MODE_BITSIZE (result_mode)
- GET_MODE_BITSIZE (GET_MODE (varop))))
{
/* C3 has the low-order C1 bits zero. */
> first_count))
{
varop = XEXP (varop, 0);
-
- signed_count = count - first_count;
- if (signed_count < 0)
- count = -signed_count, code = ASHIFT;
- else
- count = signed_count;
+ count -= first_count;
+ if (count < 0)
+ {
+ count = -count;
+ code = ASHIFT;
+ }
continue;
}
mask_rtx = GEN_INT (nonzero_bits (varop, GET_MODE (varop)));
mask_rtx
- = simplify_binary_operation (code, result_mode, mask_rtx,
- GEN_INT (count));
+ = simplify_const_binary_operation (code, result_mode, mask_rtx,
+ GEN_INT (count));
/* Give up if we can't compute an outer operation to use. */
if (mask_rtx == 0
/* If the shifts are in the same direction, we add the
counts. Otherwise, we subtract them. */
- signed_count = count;
if ((code == ASHIFTRT || code == LSHIFTRT)
== (first_code == ASHIFTRT || first_code == LSHIFTRT))
- signed_count += first_count;
+ count += first_count;
else
- signed_count -= first_count;
+ count -= first_count;
/* If COUNT is positive, the new shift is usually CODE,
except for the two exceptions below, in which case it is
FIRST_CODE. If the count is negative, FIRST_CODE should
always be used */
- if (signed_count > 0
+ if (count > 0
&& ((first_code == ROTATE && code == ASHIFT)
|| (first_code == ASHIFTRT && code == LSHIFTRT)))
- code = first_code, count = signed_count;
- else if (signed_count < 0)
- code = first_code, count = -signed_count;
- else
- count = signed_count;
+ code = first_code;
+ else if (count < 0)
+ code = first_code, count = -count;
varop = XEXP (varop, 0);
continue;
B is not a constant. */
else if (GET_CODE (varop) == code
- && GET_CODE (XEXP (varop, 1)) != CONST_INT
- && 0 != (new
- = simplify_binary_operation (code, mode,
- XEXP (varop, 0),
- GEN_INT (count))))
+ && GET_CODE (XEXP (varop, 0)) == CONST_INT
+ && GET_CODE (XEXP (varop, 1)) != CONST_INT)
{
+ rtx new = simplify_const_binary_operation (code, mode,
+ XEXP (varop, 0),
+ GEN_INT (count));
varop = gen_rtx_fmt_ee (code, mode, new, XEXP (varop, 1));
count = 0;
continue;
&& XEXP (XEXP (varop, 0), 1) == constm1_rtx
&& (STORE_FLAG_VALUE == 1 || STORE_FLAG_VALUE == -1)
&& (code == LSHIFTRT || code == ASHIFTRT)
- && count == (unsigned int)
- (GET_MODE_BITSIZE (GET_MODE (varop)) - 1)
+ && count == (GET_MODE_BITSIZE (GET_MODE (varop)) - 1)
&& rtx_equal_p (XEXP (XEXP (varop, 0), 0), XEXP (varop, 1)))
{
count = 0;
&& !(code == ASHIFTRT && GET_CODE (varop) == XOR
&& 0 > trunc_int_for_mode (INTVAL (XEXP (varop, 1)),
shift_mode))
- && (new = simplify_binary_operation (code, result_mode,
- XEXP (varop, 1),
- GEN_INT (count))) != 0
+ && (new = simplify_const_binary_operation (code, result_mode,
+ XEXP (varop, 1),
+ GEN_INT (count))) != 0
&& GET_CODE (new) == CONST_INT
&& merge_outer_ops (&outer_op, &outer_const, GET_CODE (varop),
INTVAL (new), result_mode, &complement_p))
for some (ashiftrt (xor)). */
if (GET_CODE (XEXP (varop, 1)) == CONST_INT
&& !(code == ASHIFTRT && GET_CODE (varop) == XOR
- && 0 > trunc_int_for_mode (INTVAL (XEXP (varop, 1)),
+ && 0 > trunc_int_for_mode (INTVAL (XEXP (varop, 1)),
shift_mode)))
{
rtx lhs = simplify_shift_const (NULL_RTX, code, shift_mode,
varop = apply_distributive_law (varop);
count = 0;
- continue;
+ continue;
}
break;
if (code == LSHIFTRT
&& XEXP (varop, 1) == const0_rtx
&& GET_MODE (XEXP (varop, 0)) == result_mode
- && count == (unsigned int) (GET_MODE_BITSIZE (result_mode) - 1)
+ && count == (GET_MODE_BITSIZE (result_mode) - 1)
&& GET_MODE_BITSIZE (result_mode) <= HOST_BITS_PER_WIDE_INT
- && ((STORE_FLAG_VALUE
- & ((HOST_WIDE_INT) 1
- < (GET_MODE_BITSIZE (result_mode) - 1))))
+ && STORE_FLAG_VALUE == -1
&& nonzero_bits (XEXP (varop, 0), result_mode) == 1
&& merge_outer_ops (&outer_op, &outer_const, XOR,
(HOST_WIDE_INT) 1, result_mode,
/* (lshiftrt (neg A) C) where A is either 0 or 1 and C is one less
than the number of bits in the mode is equivalent to A. */
if (code == LSHIFTRT
- && count == (unsigned int) (GET_MODE_BITSIZE (result_mode) - 1)
+ && count == (GET_MODE_BITSIZE (result_mode) - 1)
&& nonzero_bits (XEXP (varop, 0), result_mode) == 1)
{
varop = XEXP (varop, 0);
is one less than the number of bits in the mode is
equivalent to (xor A 1). */
if (code == LSHIFTRT
- && count == (unsigned int) (GET_MODE_BITSIZE (result_mode) - 1)
+ && count == (GET_MODE_BITSIZE (result_mode) - 1)
&& XEXP (varop, 1) == constm1_rtx
&& nonzero_bits (XEXP (varop, 0), result_mode) == 1
&& merge_outer_ops (&outer_op, &outer_const, XOR,
/* (ashift (plus foo C) N) is (plus (ashift foo N) C'). */
if (code == ASHIFT
&& GET_CODE (XEXP (varop, 1)) == CONST_INT
- && (new = simplify_binary_operation (ASHIFT, result_mode,
- XEXP (varop, 1),
- GEN_INT (count))) != 0
+ && (new = simplify_const_binary_operation (ASHIFT, result_mode,
+ XEXP (varop, 1),
+ GEN_INT (count))) != 0
&& GET_CODE (new) == CONST_INT
&& merge_outer_ops (&outer_op, &outer_const, PLUS,
INTVAL (new), result_mode, &complement_p))
if (code == LSHIFTRT
&& GET_CODE (XEXP (varop, 1)) == CONST_INT
&& mode_signbit_p (result_mode, XEXP (varop, 1))
- && (new = simplify_binary_operation (code, result_mode,
- XEXP (varop, 1),
- GEN_INT (count))) != 0
+ && (new = simplify_const_binary_operation (code, result_mode,
+ XEXP (varop, 1),
+ GEN_INT (count))) != 0
&& GET_CODE (new) == CONST_INT
&& merge_outer_ops (&outer_op, &outer_const, XOR,
INTVAL (new), result_mode, &complement_p))
if ((STORE_FLAG_VALUE == 1 || STORE_FLAG_VALUE == -1)
&& GET_CODE (XEXP (varop, 0)) == ASHIFTRT
- && count == (unsigned int)
- (GET_MODE_BITSIZE (GET_MODE (varop)) - 1)
+ && count == (GET_MODE_BITSIZE (GET_MODE (varop)) - 1)
&& (code == LSHIFTRT || code == ASHIFTRT)
&& GET_CODE (XEXP (XEXP (varop, 0), 1)) == CONST_INT
- && (unsigned HOST_WIDE_INT) INTVAL (XEXP (XEXP (varop, 0), 1))
- == count
+ && INTVAL (XEXP (XEXP (varop, 0), 1)) == count
&& rtx_equal_p (XEXP (XEXP (varop, 0), 0), XEXP (varop, 1)))
{
count = 0;
a shift of type CODE with SHIFT_MODE shifting VAROP COUNT places. If
OUTER_OP is non-UNKNOWN, it is an operation that needs to be applied
to the result of the shift. OUTER_CONST is the relevant constant,
- but we must turn off all bits turned off in the shift.
-
- If we were passed a value for X, see if we can use any pieces of
- it. If not, make new rtx. */
-
- if (x && GET_RTX_CLASS (GET_CODE (x)) == RTX_BIN_ARITH
- && GET_CODE (XEXP (x, 1)) == CONST_INT
- && (unsigned HOST_WIDE_INT) INTVAL (XEXP (x, 1)) == count)
- const_rtx = XEXP (x, 1);
- else
- const_rtx = GEN_INT (count);
+ but we must turn off all bits turned off in the shift. */
- if (x && GET_CODE (XEXP (x, 0)) == SUBREG
- && GET_MODE (XEXP (x, 0)) == shift_mode
- && SUBREG_REG (XEXP (x, 0)) == varop)
- varop = XEXP (x, 0);
- else if (GET_MODE (varop) != shift_mode)
- varop = gen_lowpart (shift_mode, varop);
-
- /* If we can't make the SUBREG, try to return what we were given. */
- if (GET_CODE (varop) == CLOBBER)
- return x ? x : varop;
+ if (outer_op == UNKNOWN
+ && orig_code == code && orig_count == count
+ && varop == orig_varop
+ && shift_mode == GET_MODE (varop))
+ return NULL_RTX;
- new = simplify_binary_operation (code, shift_mode, varop, const_rtx);
- if (new != 0)
- x = new;
- else
- x = gen_rtx_fmt_ee (code, shift_mode, varop, const_rtx);
+ /* Make a SUBREG if necessary. If we can't make it, fail. */
+ varop = gen_lowpart (shift_mode, varop);
+ if (varop == NULL_RTX || GET_CODE (varop) == CLOBBER)
+ return NULL_RTX;
/* If we have an outer operation and we just made a shift, it is
possible that we could have simplified the shift were it not
for the outer operation. So try to do the simplification
recursively. */
- if (outer_op != UNKNOWN && GET_CODE (x) == code
- && GET_CODE (XEXP (x, 1)) == CONST_INT)
- x = simplify_shift_const (x, code, shift_mode, XEXP (x, 0),
- INTVAL (XEXP (x, 1)));
+ if (outer_op != UNKNOWN)
+ x = simplify_shift_const_1 (code, shift_mode, varop, count);
+ else
+ x = NULL_RTX;
+
+ if (x == NULL_RTX)
+ x = simplify_gen_binary (code, shift_mode, varop, GEN_INT (count));
/* If we were doing an LSHIFTRT in a wider mode than it was originally,
turn off all the bits that the shift would have turned off. */
GET_MODE_MASK (result_mode) >> orig_count);
/* Do the remainder of the processing in RESULT_MODE. */
- x = gen_lowpart (result_mode, x);
+ x = gen_lowpart_or_truncate (result_mode, x);
/* If COMPLEMENT_P is set, we have to complement X before doing the outer
operation. */
if (outer_op == AND)
x = simplify_and_const_int (NULL_RTX, result_mode, x, outer_const);
else if (outer_op == SET)
- /* This means that we have determined that the result is
- equivalent to a constant. This should be rare. */
- x = GEN_INT (outer_const);
+ {
+ /* This means that we have determined that the result is
+ equivalent to a constant. This should be rare. */
+ if (!side_effects_p (x))
+ x = GEN_INT (outer_const);
+ }
else if (GET_RTX_CLASS (outer_op) == RTX_UNARY)
x = simplify_gen_unary (outer_op, result_mode, x, result_mode);
else
return x;
}
+
+/* Simplify a shift of VAROP by COUNT bits. CODE says what kind of shift.
+ The result of the shift is RESULT_MODE. If we cannot simplify it,
+ return X or, if it is NULL, synthesize the expression with
+ simplify_gen_binary. Otherwise, return a simplified value.
+
+ The shift is normally computed in the widest mode we find in VAROP, as
+ long as it isn't a different number of words than RESULT_MODE. Exceptions
+ are ASHIFTRT and ROTATE, which are always done in their original mode. */
+
+static rtx
+simplify_shift_const (rtx x, enum rtx_code code, enum machine_mode result_mode,
+ rtx varop, int count)
+{
+ rtx tem = simplify_shift_const_1 (code, result_mode, varop, count);
+ if (tem)
+ return tem;
+
+ if (!x)
+ x = simplify_gen_binary (code, GET_MODE (varop), varop, GEN_INT (count));
+ if (GET_MODE (x) != result_mode)
+ x = gen_lowpart (result_mode, x);
+ return x;
+}
+
\f
/* Like recog, but we receive the address of a pointer to a new pattern.
We try to match the rtx that the pointer points to.
REG_NOTES (insn) = 0;
insn_code_number = recog (pat, insn, &num_clobbers_to_add);
+ if (dump_file && (dump_flags & TDF_DETAILS))
+ {
+ if (insn_code_number < 0)
+ fputs ("Failed to match this instruction:\n", dump_file);
+ else
+ fputs ("Successfully matched this instruction:\n", dump_file);
+ print_rtl_single (dump_file, pat);
+ }
/* If it isn't, there is the possibility that we previously had an insn
that clobbered some register as a side effect, but the combined
PATTERN (insn) = pat;
insn_code_number = recog (pat, insn, &num_clobbers_to_add);
+ if (dump_file && (dump_flags & TDF_DETAILS))
+ {
+ if (insn_code_number < 0)
+ fputs ("Failed to match this instruction:\n", dump_file);
+ else
+ fputs ("Successfully matched this instruction:\n", dump_file);
+ print_rtl_single (dump_file, pat);
+ }
}
PATTERN (insn) = old_pat;
REG_NOTES (insn) = old_notes;
if (REG_P (XEXP (XVECEXP (newpat, 0, i), 0))
&& ! reg_dead_at_p (XEXP (XVECEXP (newpat, 0, i), 0), insn))
return -1;
- notes = gen_rtx_EXPR_LIST (REG_UNUSED,
- XEXP (XVECEXP (newpat, 0, i), 0), notes);
+ if (GET_CODE (XEXP (XVECEXP (newpat, 0, i), 0)) != SCRATCH)
+ {
+ gcc_assert (REG_P (XEXP (XVECEXP (newpat, 0, i), 0)));
+ notes = gen_rtx_EXPR_LIST (REG_UNUSED,
+ XEXP (XVECEXP (newpat, 0, i), 0), notes);
+ }
}
pat = newpat;
}
result = gen_lowpart_common (omode, x);
-#ifdef CANNOT_CHANGE_MODE_CLASS
- if (result != 0 && GET_CODE (result) == SUBREG)
- record_subregs_of_mode (result);
-#endif
-
if (result)
return result;
op0 = force_to_mode (op0, mode,
((HOST_WIDE_INT) 1
<< (GET_MODE_BITSIZE (mode) - 1)),
- NULL_RTX, 0);
+ 0);
/* Now try cases based on the opcode of OP0. If none of the cases
does a "continue", we exit this loop immediately after the
&& ! unsigned_comparison_p
&& (GET_MODE_BITSIZE (mode) <= HOST_BITS_PER_WIDE_INT)
&& ((unsigned HOST_WIDE_INT) const_op
- < (((unsigned HOST_WIDE_INT) 1
+ < (((unsigned HOST_WIDE_INT) 1
<< (GET_MODE_BITSIZE (mode) - 1))))
&& cmp_optab->handlers[(int) mode].insn_code != CODE_FOR_nothing)
{
HOST_WIDE_INT c1 = -INTVAL (XEXP (SUBREG_REG (op0), 1));
if ((c1 > 0
- && (unsigned HOST_WIDE_INT) c1
+ && (unsigned HOST_WIDE_INT) c1
< (unsigned HOST_WIDE_INT) 1 << (mode_width - 1)
&& (equality_comparison_p || unsigned_comparison_p)
/* (A - C1) zero-extends if it is positive and sign-extends
/* (A - C1) always sign-extends, like C2. */
&& num_sign_bit_copies (a, inner_mode)
> (unsigned int) (GET_MODE_BITSIZE (inner_mode)
- - mode_width - 1)))
+ - (mode_width - 1))))
{
op0 = SUBREG_REG (op0);
continue;
- }
+ }
}
/* If the inner mode is narrower and we are extracting the low part,
case UNEQ: case LTGT:
case LT: case LTU: case UNLT: case LE: case LEU: case UNLE:
case GT: case GTU: case UNGT: case GE: case GEU: case UNGE:
- case UNORDERED: case ORDERED:
+ case UNORDERED: case ORDERED:
/* We can't do anything if OP0 is a condition code value, rather
than an actual data value. */
if (const_op != 0
&& XEXP (XEXP (op0, 0), 0) == const1_rtx)
{
op0 = simplify_and_const_int
- (op0, mode, gen_rtx_LSHIFTRT (mode,
- XEXP (op0, 1),
- XEXP (XEXP (op0, 0), 1)),
+ (NULL_RTX, mode, gen_rtx_LSHIFTRT (mode,
+ XEXP (op0, 1),
+ XEXP (XEXP (op0, 0), 1)),
(HOST_WIDE_INT) 1);
continue;
}
/* If this AND operation is really a ZERO_EXTEND from a narrower
mode, the constant fits within that mode, and this is either an
equality or unsigned comparison, try to do this comparison in
- the narrower mode. */
+ the narrower mode.
+
+ Note that in:
+
+ (ne:DI (and:DI (reg:DI 4) (const_int 0xffffffff)) (const_int 0))
+ -> (ne:DI (reg:SI 4) (const_int 0))
+
+ unless TRULY_NOOP_TRUNCATION allows it or the register is
+ known to hold a value of the required mode the
+ transformation is invalid. */
if ((equality_comparison_p || unsigned_comparison_p)
&& GET_CODE (XEXP (op0, 1)) == CONST_INT
&& (i = exact_log2 ((INTVAL (XEXP (op0, 1))
& GET_MODE_MASK (mode))
+ 1)) >= 0
&& const_op >> i == 0
- && (tmode = mode_for_size (i, MODE_INT, 1)) != BLKmode)
+ && (tmode = mode_for_size (i, MODE_INT, 1)) != BLKmode
+ && (TRULY_NOOP_TRUNCATION (GET_MODE_BITSIZE (tmode),
+ GET_MODE_BITSIZE (GET_MODE (op0)))
+ || (REG_P (XEXP (op0, 0))
+ && reg_truncated_to_mode (tmode, XEXP (op0, 0)))))
{
op0 = gen_lowpart (tmode, XEXP (op0, 0));
continue;
{
/* For paradoxical subregs, allow case 1 as above. Case 3 isn't
implemented. */
- if (REG_P (SUBREG_REG (op0)))
+ if (REG_P (SUBREG_REG (op0)))
{
op0 = SUBREG_REG (op0);
op1 = gen_lowpart (GET_MODE (op0), op1);
if (code == REG)
{
unsigned int regno = REGNO (x);
- unsigned int endregno
- = regno + (regno < FIRST_PSEUDO_REGISTER
- ? hard_regno_nregs[regno][GET_MODE (x)] : 1);
+ unsigned int endregno = END_REGNO (x);
unsigned int r;
for (r = regno; r < endregno; r++)
record_value_for_reg (rtx reg, rtx insn, rtx value)
{
unsigned int regno = REGNO (reg);
- unsigned int endregno
- = regno + (regno < FIRST_PSEUDO_REGISTER
- ? hard_regno_nregs[regno][GET_MODE (reg)] : 1);
+ unsigned int endregno = END_REGNO (reg);
unsigned int i;
/* If VALUE contains REG and we have a previous value for REG, substitute
/* Set things up so get_last_value is allowed to see anything set up to
our insn. */
- subst_low_cuid = INSN_CUID (insn);
+ subst_low_luid = DF_INSN_LUID (insn);
tem = get_last_value (reg);
/* If TEM is simply a binary operation with two CLOBBERs as operands,
reg_stat[i].last_set_nonzero_bits = 0;
reg_stat[i].last_set_sign_bit_copies = 0;
reg_stat[i].last_death = 0;
+ reg_stat[i].truncated_to_mode = 0;
}
/* Mark registers that are being referenced in this value. */
for (i = regno; i < endregno; i++)
{
reg_stat[i].last_set_label = label_tick;
- if (value && reg_stat[i].last_set_table_tick == label_tick)
+ if (!insn
+ || (value && reg_stat[i].last_set_table_tick >= label_tick_ebb_start))
reg_stat[i].last_set_invalid = 1;
else
reg_stat[i].last_set_invalid = 0;
if (value)
{
enum machine_mode mode = GET_MODE (reg);
- subst_low_cuid = INSN_CUID (insn);
+ subst_low_luid = DF_INSN_LUID (insn);
reg_stat[regno].last_set_mode = mode;
if (GET_MODE_CLASS (mode) == MODE_INT
&& GET_MODE_BITSIZE (mode) <= HOST_BITS_PER_WIDE_INT)
if (GET_CODE (dest) == SUBREG)
dest = SUBREG_REG (dest);
+ if (!record_dead_insn)
+ {
+ if (REG_P (dest))
+ record_value_for_reg (dest, NULL_RTX, NULL_RTX);
+ return;
+ }
+
if (REG_P (dest))
{
/* If we are setting the whole register, we know its value. Otherwise
else if (MEM_P (dest)
/* Ignore pushes, they clobber nothing. */
&& ! push_operand (dest, GET_MODE (dest)))
- mem_last_set = INSN_CUID (record_dead_insn);
+ mem_last_set = DF_INSN_LUID (record_dead_insn);
}
/* Update the records of when each REG was most recently set or killed
We update reg_stat[], in particular fields last_set, last_set_value,
last_set_mode, last_set_nonzero_bits, last_set_sign_bit_copies,
last_death, and also the similar information mem_last_set (which insn
- most recently modified memory) and last_call_cuid (which insn was the
+ most recently modified memory) and last_call_luid (which insn was the
most recent subroutine call). */
static void
&& REG_P (XEXP (link, 0)))
{
unsigned int regno = REGNO (XEXP (link, 0));
- unsigned int endregno
- = regno + (regno < FIRST_PSEUDO_REGISTER
- ? hard_regno_nregs[regno][GET_MODE (XEXP (link, 0))]
- : 1);
+ unsigned int endregno = END_REGNO (XEXP (link, 0));
for (i = regno; i < endregno; i++)
reg_stat[i].last_death = insn;
for (i = 0; i < FIRST_PSEUDO_REGISTER; i++)
if (TEST_HARD_REG_BIT (regs_invalidated_by_call, i))
{
+ reg_stat[i].last_set_invalid = 1;
+ reg_stat[i].last_set = insn;
reg_stat[i].last_set_value = 0;
reg_stat[i].last_set_mode = 0;
reg_stat[i].last_set_nonzero_bits = 0;
reg_stat[i].last_set_sign_bit_copies = 0;
reg_stat[i].last_death = 0;
+ reg_stat[i].truncated_to_mode = 0;
}
- last_call_cuid = mem_last_set = INSN_CUID (insn);
+ last_call_luid = mem_last_set = DF_INSN_LUID (insn);
- /* Don't bother recording what this insn does. It might set the
- return value register, but we can't combine into a call
- pattern anyway, so there's no point trying (and it may cause
- a crash, if e.g. we wind up asking for last_set_value of a
- SUBREG of the return value register). */
- return;
+ /* We can't combine into a call pattern. Remember, though, that
+ the return value register is set at this LUID. We could
+ still replace a register with the return value from the
+ wrong subroutine call! */
+ note_stores (PATTERN (insn), record_dead_and_set_regs_1, NULL_RTX);
}
-
- note_stores (PATTERN (insn), record_dead_and_set_regs_1, insn);
+ else
+ note_stores (PATTERN (insn), record_dead_and_set_regs_1, insn);
}
/* If a SUBREG has the promoted bit set, it is in fact a property of the
}
}
-/* Scan X for promoted SUBREGs. For each one found,
- note what it implies to the registers used in it. */
+/* Check if X, a register, is known to contain a value already
+ truncated to MODE. In this case we can use a subreg to refer to
+ the truncated value even though in the generic case we would need
+ an explicit truncation. */
+
+static bool
+reg_truncated_to_mode (enum machine_mode mode, rtx x)
+{
+ enum machine_mode truncated = reg_stat[REGNO (x)].truncated_to_mode;
+
+ if (truncated == 0
+ || reg_stat[REGNO (x)].truncation_label < label_tick_ebb_start)
+ return false;
+ if (GET_MODE_SIZE (truncated) <= GET_MODE_SIZE (mode))
+ return true;
+ if (TRULY_NOOP_TRUNCATION (GET_MODE_BITSIZE (mode),
+ GET_MODE_BITSIZE (truncated)))
+ return true;
+ return false;
+}
+
+/* X is a REG or a SUBREG. If X is some sort of a truncation record
+ it. For non-TRULY_NOOP_TRUNCATION targets we might be able to turn
+ a truncate into a subreg using this information. */
+
+static void
+record_truncated_value (rtx x)
+{
+ enum machine_mode truncated_mode;
+
+ if (GET_CODE (x) == SUBREG && REG_P (SUBREG_REG (x)))
+ {
+ enum machine_mode original_mode = GET_MODE (SUBREG_REG (x));
+ truncated_mode = GET_MODE (x);
+
+ if (GET_MODE_SIZE (original_mode) <= GET_MODE_SIZE (truncated_mode))
+ return;
+
+ if (TRULY_NOOP_TRUNCATION (GET_MODE_BITSIZE (truncated_mode),
+ GET_MODE_BITSIZE (original_mode)))
+ return;
+
+ x = SUBREG_REG (x);
+ }
+ /* ??? For hard-regs we now record everything. We might be able to
+ optimize this using last_set_mode. */
+ else if (REG_P (x) && REGNO (x) < FIRST_PSEUDO_REGISTER)
+ truncated_mode = GET_MODE (x);
+ else
+ return;
+
+ if (reg_stat[REGNO (x)].truncated_to_mode == 0
+ || reg_stat[REGNO (x)].truncation_label < label_tick_ebb_start
+ || (GET_MODE_SIZE (truncated_mode)
+ < GET_MODE_SIZE (reg_stat[REGNO (x)].truncated_to_mode)))
+ {
+ reg_stat[REGNO (x)].truncated_to_mode = truncated_mode;
+ reg_stat[REGNO (x)].truncation_label = label_tick;
+ }
+}
+
+/* Scan X for promoted SUBREGs and truncated REGs. For each one
+ found, note what it implies to the registers used in it. */
static void
-check_promoted_subreg (rtx insn, rtx x)
+check_conversions (rtx insn, rtx x)
{
- if (GET_CODE (x) == SUBREG && SUBREG_PROMOTED_VAR_P (x)
- && REG_P (SUBREG_REG (x)))
- record_promoted_value (insn, x);
+ if (GET_CODE (x) == SUBREG || REG_P (x))
+ {
+ if (GET_CODE (x) == SUBREG
+ && SUBREG_PROMOTED_VAR_P (x)
+ && REG_P (SUBREG_REG (x)))
+ record_promoted_value (insn, x);
+
+ record_truncated_value (x);
+ }
else
{
const char *format = GET_RTX_FORMAT (GET_CODE (x));
switch (format[i])
{
case 'e':
- check_promoted_subreg (insn, XEXP (x, i));
+ check_conversions (insn, XEXP (x, i));
break;
case 'V':
case 'E':
if (XVEC (x, i) != 0)
for (j = 0; j < XVECLEN (x, i); j++)
- check_promoted_subreg (insn, XVECEXP (x, i, j));
+ check_conversions (insn, XVECEXP (x, i, j));
break;
}
}
if (REG_P (x))
{
unsigned int regno = REGNO (x);
- unsigned int endregno
- = regno + (regno < FIRST_PSEUDO_REGISTER
- ? hard_regno_nregs[regno][GET_MODE (x)] : 1);
+ unsigned int endregno = END_REGNO (x);
unsigned int j;
for (j = regno; j < endregno; j++)
live at the beginning of the function, it is always valid. */
|| (! (regno >= FIRST_PSEUDO_REGISTER
&& REG_N_SETS (regno) == 1
- && (! REGNO_REG_SET_P
- (ENTRY_BLOCK_PTR->next_bb->global_live_at_start, regno)))
+ && !REGNO_REG_SET_P
+ (DF_LR_IN (ENTRY_BLOCK_PTR->next_bb), regno))
&& reg_stat[j].last_set_label > tick))
{
if (replace)
no stores after it that might have clobbered the value. We don't
have alias info, so we assume any store invalidates it. */
else if (MEM_P (x) && !MEM_READONLY_P (x)
- && INSN_CUID (insn) <= mem_last_set)
+ && DF_INSN_LUID (insn) <= mem_last_set)
{
if (replace)
*loc = gen_rtx_CLOBBER (GET_MODE (x), const0_rtx);
block. */
if (value == 0
- || (reg_stat[regno].last_set_label != label_tick
+ || (reg_stat[regno].last_set_label < label_tick_ebb_start
&& (regno < FIRST_PSEUDO_REGISTER
|| REG_N_SETS (regno) != 1
- || (REGNO_REG_SET_P
- (ENTRY_BLOCK_PTR->next_bb->global_live_at_start, regno)))))
+ || REGNO_REG_SET_P
+ (DF_LR_IN (ENTRY_BLOCK_PTR->next_bb), regno))))
return 0;
/* If the value was set in a later insn than the ones we are processing,
we can't use it even if the register was only set once. */
- if (INSN_CUID (reg_stat[regno].last_set) >= subst_low_cuid)
+ if (reg_stat[regno].last_set_label == label_tick
+ && DF_INSN_LUID (reg_stat[regno].last_set) >= subst_low_luid)
return 0;
/* If the value has all its registers valid, return it. */
}
\f
/* Return nonzero if expression X refers to a REG or to memory
- that is set in an instruction more recent than FROM_CUID. */
+ that is set in an instruction more recent than FROM_LUID. */
static int
-use_crosses_set_p (rtx x, int from_cuid)
+use_crosses_set_p (rtx x, int from_luid)
{
const char *fmt;
int i;
if (code == REG)
{
unsigned int regno = REGNO (x);
- unsigned endreg = regno + (regno < FIRST_PSEUDO_REGISTER
- ? hard_regno_nregs[regno][GET_MODE (x)] : 1);
+ unsigned endreg = END_REGNO (x);
#ifdef PUSH_ROUNDING
/* Don't allow uses of the stack pointer to be moved,
#endif
for (; regno < endreg; regno++)
if (reg_stat[regno].last_set
- && INSN_CUID (reg_stat[regno].last_set) > from_cuid)
+ && reg_stat[regno].last_set_label == label_tick
+ && DF_INSN_LUID (reg_stat[regno].last_set) > from_luid)
return 1;
return 0;
}
- if (code == MEM && mem_last_set > from_cuid)
+ if (code == MEM && mem_last_set > from_luid)
return 1;
fmt = GET_RTX_FORMAT (code);
{
int j;
for (j = XVECLEN (x, i) - 1; j >= 0; j--)
- if (use_crosses_set_p (XVECEXP (x, i, j), from_cuid))
+ if (use_crosses_set_p (XVECEXP (x, i, j), from_luid))
return 1;
}
else if (fmt[i] == 'e'
- && use_crosses_set_p (XEXP (x, i), from_cuid))
+ && use_crosses_set_p (XEXP (x, i), from_luid))
return 1;
}
return 0;
return;
regno = REGNO (dest);
- endregno = regno + (regno < FIRST_PSEUDO_REGISTER
- ? hard_regno_nregs[regno][GET_MODE (dest)] : 1);
-
+ endregno = END_REGNO (dest);
if (reg_dead_endregno > regno && reg_dead_regno < endregno)
reg_dead_flag = (GET_CODE (x) == CLOBBER) ? 1 : -1;
}
/* Set variables for reg_dead_at_p_1. */
reg_dead_regno = REGNO (reg);
- reg_dead_endregno = reg_dead_regno + (reg_dead_regno < FIRST_PSEUDO_REGISTER
- ? hard_regno_nregs[reg_dead_regno]
- [GET_MODE (reg)]
- : 1);
+ reg_dead_endregno = END_REGNO (reg);
reg_dead_flag = 0;
}
for (i = reg_dead_regno; i < reg_dead_endregno; i++)
- if (REGNO_REG_SET_P (block->global_live_at_start, i))
+ if (REGNO_REG_SET_P (df_get_live_in (block), i))
return 0;
return 1;
}
\f
-/* Note hard registers in X that are used. This code is similar to
- that in flow.c, but much simpler since we don't care about pseudos. */
+/* Note hard registers in X that are used. */
static void
mark_used_regs_combine (rtx x)
If so, mark all of them just like the first. */
if (regno < FIRST_PSEUDO_REGISTER)
{
- unsigned int endregno, r;
-
/* None of this applies to the stack, frame or arg pointers. */
if (regno == STACK_POINTER_REGNUM
#if FRAME_POINTER_REGNUM != HARD_FRAME_POINTER_REGNUM
|| regno == FRAME_POINTER_REGNUM)
return;
- endregno = regno + hard_regno_nregs[regno][GET_MODE (x)];
- for (r = regno; r < endregno; r++)
- SET_HARD_REG_BIT (newpat_used_regs, r);
+ add_to_hard_reg_set (&newpat_used_regs, GET_MODE (x), regno);
}
return;
rtx note = find_regno_note (insn, REG_DEAD, regno);
if (note)
- {
- REG_N_DEATHS (regno)--;
- remove_note (insn, note);
- }
+ remove_note (insn, note);
return note;
}
/* For each register (hardware or pseudo) used within expression X, if its
- death is in an instruction with cuid between FROM_CUID (inclusive) and
+ death is in an instruction with luid between FROM_LUID (inclusive) and
TO_INSN (exclusive), put a REG_DEAD note for that register in the
list headed by PNOTES.
notes will then be distributed as needed. */
static void
-move_deaths (rtx x, rtx maybe_kill_insn, int from_cuid, rtx to_insn,
+move_deaths (rtx x, rtx maybe_kill_insn, int from_luid, rtx to_insn,
rtx *pnotes)
{
const char *fmt;
{
unsigned int regno = REGNO (x);
rtx where_dead = reg_stat[regno].last_death;
- rtx before_dead, after_dead;
/* Don't move the register if it gets killed in between from and to. */
if (maybe_kill_insn && reg_set_p (x, maybe_kill_insn)
&& ! reg_referenced_p (x, maybe_kill_insn))
return;
- /* WHERE_DEAD could be a USE insn made by combine, so first we
- make sure that we have insns with valid INSN_CUID values. */
- before_dead = where_dead;
- while (before_dead && INSN_UID (before_dead) > max_uid_cuid)
- before_dead = PREV_INSN (before_dead);
-
- after_dead = where_dead;
- while (after_dead && INSN_UID (after_dead) > max_uid_cuid)
- after_dead = NEXT_INSN (after_dead);
-
- if (before_dead && after_dead
- && INSN_CUID (before_dead) >= from_cuid
- && (INSN_CUID (after_dead) < INSN_CUID (to_insn)
- || (where_dead != after_dead
- && INSN_CUID (after_dead) == INSN_CUID (to_insn))))
+ if (where_dead
+ && DF_INSN_LUID (where_dead) >= from_luid
+ && DF_INSN_LUID (where_dead) < DF_INSN_LUID (to_insn))
{
rtx note = remove_death (regno, where_dead);
> GET_MODE_SIZE (GET_MODE (x))))
{
unsigned int deadregno = REGNO (XEXP (note, 0));
- unsigned int deadend
- = (deadregno + hard_regno_nregs[deadregno]
- [GET_MODE (XEXP (note, 0))]);
- unsigned int ourend
- = regno + hard_regno_nregs[regno][GET_MODE (x)];
+ unsigned int deadend = END_HARD_REGNO (XEXP (note, 0));
+ unsigned int ourend = END_HARD_REGNO (x);
unsigned int i;
for (i = deadregno; i < deadend; i++)
&& regno < FIRST_PSEUDO_REGISTER
&& hard_regno_nregs[regno][GET_MODE (x)] > 1)
{
- unsigned int ourend
- = regno + hard_regno_nregs[regno][GET_MODE (x)];
+ unsigned int ourend = END_HARD_REGNO (x);
unsigned int i, offset;
rtx oldnotes = 0;
for (i = regno + offset; i < ourend; i++)
move_deaths (regno_reg_rtx[i],
- maybe_kill_insn, from_cuid, to_insn, &oldnotes);
+ maybe_kill_insn, from_luid, to_insn, &oldnotes);
}
if (note != 0 && GET_MODE (XEXP (note, 0)) == GET_MODE (x))
}
else
*pnotes = gen_rtx_EXPR_LIST (REG_DEAD, x, *pnotes);
-
- REG_N_DEATHS (regno)++;
}
return;
{
rtx dest = SET_DEST (x);
- move_deaths (SET_SRC (x), maybe_kill_insn, from_cuid, to_insn, pnotes);
+ move_deaths (SET_SRC (x), maybe_kill_insn, from_luid, to_insn, pnotes);
/* In the case of a ZERO_EXTRACT, a STRICT_LOW_PART, or a SUBREG
that accesses one word of a multi-word item, some
== ((GET_MODE_SIZE (GET_MODE (SUBREG_REG (dest)))
+ UNITS_PER_WORD - 1) / UNITS_PER_WORD))))
{
- move_deaths (dest, maybe_kill_insn, from_cuid, to_insn, pnotes);
+ move_deaths (dest, maybe_kill_insn, from_luid, to_insn, pnotes);
return;
}
being replaced so the old value is not used in this insn. */
if (MEM_P (dest))
- move_deaths (XEXP (dest, 0), maybe_kill_insn, from_cuid,
+ move_deaths (XEXP (dest, 0), maybe_kill_insn, from_luid,
to_insn, pnotes);
return;
}
{
int j;
for (j = XVECLEN (x, i) - 1; j >= 0; j--)
- move_deaths (XVECEXP (x, i, j), maybe_kill_insn, from_cuid,
+ move_deaths (XVECEXP (x, i, j), maybe_kill_insn, from_luid,
to_insn, pnotes);
}
else if (fmt[i] == 'e')
- move_deaths (XEXP (x, i), maybe_kill_insn, from_cuid, to_insn, pnotes);
+ move_deaths (XEXP (x, i), maybe_kill_insn, from_luid, to_insn, pnotes);
}
}
\f
if (tregno >= FIRST_PSEUDO_REGISTER || regno >= FIRST_PSEUDO_REGISTER)
return target == x;
- endtregno = tregno + hard_regno_nregs[tregno][GET_MODE (target)];
- endregno = regno + hard_regno_nregs[regno][GET_MODE (x)];
+ endtregno = end_hard_regno (GET_MODE (target), tregno);
+ endregno = end_hard_regno (GET_MODE (x), regno);
return endregno > tregno && regno < endtregno;
}
as appropriate. I3 and I2 are the insns resulting from the combination
insns including FROM (I2 may be zero).
+ ELIM_I2 and ELIM_I1 are either zero or registers that we know will
+ not need REG_DEAD notes because they are being substituted for. This
+ saves searching in the most common cases.
+
Each note in the list is either ignored or placed on some insns, depending
on the type of note. */
static void
-distribute_notes (rtx notes, rtx from_insn, rtx i3, rtx i2)
+distribute_notes (rtx notes, rtx from_insn, rtx i3, rtx i2, rtx elim_i2,
+ rtx elim_i1)
{
rtx note, next_note;
rtx tem;
{
rtx place = 0, place2 = 0;
- /* If this NOTE references a pseudo register, ensure it references
- the latest copy of that register. */
- if (XEXP (note, 0) && REG_P (XEXP (note, 0))
- && REGNO (XEXP (note, 0)) >= FIRST_PSEUDO_REGISTER)
- XEXP (note, 0) = regno_reg_rtx[REGNO (XEXP (note, 0))];
-
next_note = XEXP (note, 1);
switch (REG_NOTE_KIND (note))
{
if (place && JUMP_P (place))
{
rtx label = JUMP_LABEL (place);
-
+
if (!label)
JUMP_LABEL (place) = XEXP (note, 0);
else
if (place2 && JUMP_P (place2))
{
rtx label = JUMP_LABEL (place2);
-
+
if (!label)
JUMP_LABEL (place2) = XEXP (note, 0);
else
to simply delete it. */
break;
+ case REG_LIBCALL_ID:
+ /* If the insn previously containing this note still exists,
+ put it back where it was. Otherwise move it to the previous
+ insn. */
+ if (!NOTE_P (from_insn))
+ place = from_insn;
+ else
+ place = prev_real_insn (from_insn);
+ break;
case REG_RETVAL:
/* If the insn previously containing this note still exists,
put it back where it was. Otherwise move it to the previous
break;
case REG_DEAD:
- /* If the register is used as an input in I3, it dies there.
+ /* If we replaced the right hand side of FROM_INSN with a
+ REG_EQUAL note, the original use of the dying register
+ will not have been combined into I3 and I2. In such cases,
+ FROM_INSN is guaranteed to be the first of the combined
+ instructions, so we simply need to search back before
+ FROM_INSN for the previous use or set of this register,
+ then alter the notes there appropriately.
+
+ If the register is used as an input in I3, it dies there.
Similarly for I2, if it is nonzero and adjacent to I3.
If the register is not used as an input in either I3 or I2
use of A and put the death note there. */
if (from_insn
- && CALL_P (from_insn)
- && find_reg_fusage (from_insn, USE, XEXP (note, 0)))
- place = from_insn;
- else if (reg_referenced_p (XEXP (note, 0), PATTERN (i3)))
- place = i3;
- else if (i2 != 0 && next_nonnote_insn (i2) == i3
- && reg_referenced_p (XEXP (note, 0), PATTERN (i2)))
- place = i2;
+ && from_insn == i2mod
+ && !reg_overlap_mentioned_p (XEXP (note, 0), i2mod_new_rhs))
+ tem = from_insn;
+ else
+ {
+ if (from_insn
+ && CALL_P (from_insn)
+ && find_reg_fusage (from_insn, USE, XEXP (note, 0)))
+ place = from_insn;
+ else if (reg_referenced_p (XEXP (note, 0), PATTERN (i3)))
+ place = i3;
+ else if (i2 != 0 && next_nonnote_insn (i2) == i3
+ && reg_referenced_p (XEXP (note, 0), PATTERN (i2)))
+ place = i2;
+ else if ((rtx_equal_p (XEXP (note, 0), elim_i2)
+ && !(i2mod
+ && reg_overlap_mentioned_p (XEXP (note, 0),
+ i2mod_old_rhs)))
+ || rtx_equal_p (XEXP (note, 0), elim_i1))
+ break;
+ tem = i3;
+ }
if (place == 0)
{
basic_block bb = this_basic_block;
- for (tem = PREV_INSN (i3); place == 0; tem = PREV_INSN (tem))
+ for (tem = PREV_INSN (tem); place == 0; tem = PREV_INSN (tem))
{
if (! INSN_P (tem))
{
PATTERN (tem) = pc_rtx;
REG_NOTES (tem) = NULL;
- distribute_notes (old_notes, tem, tem, NULL_RTX);
+ distribute_notes (old_notes, tem, tem, NULL_RTX,
+ NULL_RTX, NULL_RTX);
distribute_links (LOG_LINKS (tem));
SET_INSN_DELETED (tem);
REG_NOTES (cc0_setter) = NULL;
distribute_notes (old_notes, cc0_setter,
- cc0_setter, NULL_RTX);
+ cc0_setter, NULL_RTX,
+ NULL_RTX, NULL_RTX);
distribute_links (LOG_LINKS (cc0_setter));
SET_INSN_DELETED (cc0_setter);
i2 but does not die in i2, and place is between i2
and i3, then we may need to move a link from place to
i2. */
- if (i2 && INSN_UID (place) <= max_uid_cuid
- && INSN_CUID (place) > INSN_CUID (i2)
+ if (i2 && DF_INSN_LUID (place) > DF_INSN_LUID (i2)
&& from_insn
- && INSN_CUID (from_insn) > INSN_CUID (i2)
+ && DF_INSN_LUID (from_insn) > DF_INSN_LUID (i2)
&& reg_referenced_p (XEXP (note, 0), PATTERN (i2)))
{
rtx links = LOG_LINKS (place);
break;
}
- /* We haven't found an insn for the death note and it
- is still a REG_DEAD note, but we have hit the beginning
- of the block. If the existing life info says the reg
- was dead, there's nothing left to do. Otherwise, we'll
- need to do a global life update after combine. */
- if (REG_NOTE_KIND (note) == REG_DEAD && place == 0
- && REGNO_REG_SET_P (bb->global_live_at_start,
- REGNO (XEXP (note, 0))))
- SET_BIT (refresh_blocks, this_basic_block->index);
}
/* If the register is set or already dead at PLACE, we needn't do
{
unsigned int regno = REGNO (XEXP (note, 0));
- /* Similarly, if the instruction on which we want to place
- the note is a noop, we'll need do a global live update
- after we remove them in delete_noop_moves. */
- if (noop_move_p (place))
- SET_BIT (refresh_blocks, this_basic_block->index);
if (dead_or_set_p (place, XEXP (note, 0))
|| reg_bitfield_target_p (XEXP (note, 0), PATTERN (place)))
if (place && regno < FIRST_PSEUDO_REGISTER
&& hard_regno_nregs[regno][GET_MODE (XEXP (note, 0))] > 1)
{
- unsigned int endregno
- = regno + hard_regno_nregs[regno]
- [GET_MODE (XEXP (note, 0))];
+ unsigned int endregno = END_HARD_REGNO (XEXP (note, 0));
int all_used = 1;
unsigned int i;
= gen_rtx_EXPR_LIST (REG_DEAD, piece, NULL_RTX);
distribute_notes (new_note, place, place,
- NULL_RTX);
+ NULL_RTX, NULL_RTX, NULL_RTX);
}
else if (! refers_to_regno_p (i, i + 1,
PATTERN (place), 0)
if (! INSN_P (tem))
{
if (tem == BB_HEAD (bb))
- {
- SET_BIT (refresh_blocks,
- this_basic_block->index);
- break;
- }
+ break;
continue;
}
if (dead_or_set_p (tem, piece)
XEXP (note, 1) = REG_NOTES (place);
REG_NOTES (place) = note;
}
- else if ((REG_NOTE_KIND (note) == REG_DEAD
- || REG_NOTE_KIND (note) == REG_UNUSED)
- && REG_P (XEXP (note, 0)))
- REG_N_DEATHS (REGNO (XEXP (note, 0)))--;
if (place2)
- {
- if ((REG_NOTE_KIND (note) == REG_DEAD
- || REG_NOTE_KIND (note) == REG_UNUSED)
- && REG_P (XEXP (note, 0)))
- REG_N_DEATHS (REGNO (XEXP (note, 0)))++;
-
- REG_NOTES (place2) = gen_rtx_fmt_ee (GET_CODE (note),
- REG_NOTE_KIND (note),
- XEXP (note, 0),
- REG_NOTES (place2));
- }
+ REG_NOTES (place2)
+ = gen_rtx_fmt_ee (GET_CODE (note), REG_NOTE_KIND (note),
+ XEXP (note, 0), REG_NOTES (place2));
}
}
\f
/* Set added_links_insn to the earliest insn we added a
link to. */
if (added_links_insn == 0
- || INSN_CUID (added_links_insn) > INSN_CUID (place))
+ || DF_INSN_LUID (added_links_insn) > DF_INSN_LUID (place))
added_links_insn = place;
}
}
return for_each_rtx (&equiv, unmentioned_reg_p_1, expr);
}
\f
-/* Compute INSN_CUID for INSN, which is an insn made by combine. */
-
-static int
-insn_cuid (rtx insn)
-{
- while (insn != 0 && INSN_UID (insn) > max_uid_cuid
- && NONJUMP_INSN_P (insn) && GET_CODE (PATTERN (insn)) == USE)
- insn = NEXT_INSN (insn);
-
- gcc_assert (INSN_UID (insn) <= max_uid_cuid);
-
- return INSN_CUID (insn);
-}
-\f
void
dump_combine_stats (FILE *file)
{
- fnotice
+ fprintf
(file,
";; Combiner statistics: %d attempts, %d substitutions (%d requiring new space),\n;; %d successes.\n\n",
combine_attempts, combine_merges, combine_extras, combine_successes);
void
dump_combine_total_stats (FILE *file)
{
- fnotice
+ fprintf
(file,
"\n;; Combiner totals: %d attempts, %d substitutions (%d requiring new space),\n;; %d successes.\n",
total_attempts, total_merges, total_extras, total_successes);
}
+\f
+static bool
+gate_handle_combine (void)
+{
+ return (optimize > 0);
+}
+
+/* Try combining insns through substitution. */
+static unsigned int
+rest_of_handle_combine (void)
+{
+ int rebuild_jump_labels_after_combine;
+
+ df_set_flags (DF_LR_RUN_DCE + DF_DEFER_INSN_RESCAN);
+ df_note_add_problem ();
+ df_analyze ();
+
+ regstat_init_n_sets_and_refs ();
+
+ rebuild_jump_labels_after_combine
+ = combine_instructions (get_insns (), max_reg_num ());
+
+ /* Combining insns may have turned an indirect jump into a
+ direct jump. Rebuild the JUMP_LABEL fields of jumping
+ instructions. */
+ if (rebuild_jump_labels_after_combine)
+ {
+ timevar_push (TV_JUMP);
+ rebuild_jump_labels (get_insns ());
+ cleanup_cfg (0);
+ timevar_pop (TV_JUMP);
+ }
+
+ regstat_free_n_sets_and_refs ();
+ return 0;
+}
+
+struct tree_opt_pass pass_combine =
+{
+ "combine", /* name */
+ gate_handle_combine, /* gate */
+ rest_of_handle_combine, /* execute */
+ NULL, /* sub */
+ NULL, /* next */
+ 0, /* static_pass_number */
+ TV_COMBINE, /* tv_id */
+ 0, /* properties_required */
+ 0, /* properties_provided */
+ 0, /* properties_destroyed */
+ 0, /* todo_flags_start */
+ TODO_dump_func |
+ TODO_df_finish |
+ TODO_ggc_collect, /* todo_flags_finish */
+ 'c' /* letter */
+};
+