/* Optimize by combining instructions for GNU compiler.
Copyright (C) 1987, 1988, 1992, 1993, 1994, 1995, 1996, 1997, 1998,
- 1999, 2000, 2001, 2002, 2003, 2004 Free Software Foundation, Inc.
+ 1999, 2000, 2001, 2002, 2003, 2004, 2005 Free Software Foundation, Inc.
This file is part of GCC.
You should have received a copy of the GNU General Public License
along with GCC; see the file COPYING. If not, write to the Free
-Software Foundation, 59 Temple Place - Suite 330, Boston, MA
-02111-1307, USA. */
+Software Foundation, 51 Franklin Street, Fifth Floor, Boston, MA
+02110-1301, USA. */
/* This module is essentially the "combiner" phase of the U. of Arizona
Portable Optimizer, but redone to work on our list-structured
flow.c aren't completely updated:
- reg_live_length is not updated
+ - reg_n_refs is not adjusted in the rare case when a register is
+ no longer required in a computation
+ - there are extremely rare cases (see distribute_regnotes) when a
+ REG_DEAD note is lost
- a LOG_LINKS entry that refers to an insn with multiple SETs may be
removed because there is no way to know which register it was
linking
#include "real.h"
#include "toplev.h"
#include "target.h"
+#include "optabs.h"
+#include "insn-codes.h"
#include "rtlhooks-def.h"
/* Include output.h for dump_file. */
#include "output.h"
+#include "params.h"
+#include "timevar.h"
+#include "tree-pass.h"
/* Number of attempts to combine instructions in this function. */
static int rtx_equal_for_field_assignment_p (rtx, rtx);
static rtx make_field_assignment (rtx);
static rtx apply_distributive_law (rtx);
+static rtx distribute_and_simplify_rtx (rtx, int);
static rtx simplify_and_const_int (rtx, enum machine_mode, rtx,
unsigned HOST_WIDE_INT);
static int merge_outer_ops (enum rtx_code *, HOST_WIDE_INT *, enum rtx_code,
int);
static int recog_for_combine (rtx *, rtx, rtx *);
static rtx gen_lowpart_for_combine (enum machine_mode, rtx);
-static rtx gen_binary (enum rtx_code, enum machine_mode, rtx, rtx);
static enum rtx_code simplify_comparison (enum rtx_code, rtx *, rtx *);
static void update_table_tick (rtx);
static void record_value_for_reg (rtx, rtx, rtx);
static int reg_dead_at_p (rtx, rtx);
static void move_deaths (rtx, rtx, int, rtx, rtx *);
static int reg_bitfield_target_p (rtx, rtx);
-static void distribute_notes (rtx, rtx, rtx, rtx);
+static void distribute_notes (rtx, rtx, rtx, rtx, rtx, rtx);
static void distribute_links (rtx);
static void mark_used_regs_combine (rtx);
static int insn_cuid (rtx);
static void record_promoted_value (rtx, rtx);
-static rtx reversed_comparison (rtx, enum machine_mode, rtx, rtx);
-static enum rtx_code combine_reversed_comparison_code (rtx);
static int unmentioned_reg_p_1 (rtx *, void *);
static bool unmentioned_reg_p (rtx, rtx);
\f
#undef RTL_HOOKS_GEN_LOWPART
#define RTL_HOOKS_GEN_LOWPART gen_lowpart_for_combine
+/* Our implementation of gen_lowpart never emits a new pseudo. */
+#undef RTL_HOOKS_GEN_LOWPART_NO_EMIT
+#define RTL_HOOKS_GEN_LOWPART_NO_EMIT gen_lowpart_for_combine
+
#undef RTL_HOOKS_REG_NONZERO_REG_BITS
#define RTL_HOOKS_REG_NONZERO_REG_BITS reg_nonzero_bits_for_combine
new_i2_cost = 0;
}
+ if (undobuf.other_insn)
+ {
+ int old_other_cost, new_other_cost;
+
+ old_other_cost = (INSN_UID (undobuf.other_insn) <= last_insn_cost
+ ? uid_insn_cost[INSN_UID (undobuf.other_insn)] : 0);
+ new_other_cost = insn_rtx_cost (PATTERN (undobuf.other_insn));
+ if (old_other_cost > 0 && new_other_cost > 0)
+ {
+ old_cost += old_other_cost;
+ new_cost += new_other_cost;
+ }
+ else
+ old_cost = 0;
+ }
+
/* Disallow this recombination if both new_cost and old_cost are
greater than zero, and new_cost is greater than old cost. */
- if (!undobuf.other_insn
- && old_cost > 0
+ if (old_cost > 0
&& new_cost > old_cost)
{
if (dump_file)
rtx prev;
#endif
int i;
+ unsigned int j = 0;
rtx links, nextlinks;
+ sbitmap_iterator sbi;
int new_direct_jump_p = 0;
rtx temp = XEXP (links, 0);
if ((set = single_set (temp)) != 0
&& (note = find_reg_equal_equiv_note (temp)) != 0
- && GET_CODE (XEXP (note, 0)) != EXPR_LIST
+ && (note = XEXP (note, 0), GET_CODE (note)) != EXPR_LIST
/* Avoid using a register that may already been marked
dead by an earlier instruction. */
- && ! unmentioned_reg_p (XEXP (note, 0), SET_SRC (set)))
+ && ! unmentioned_reg_p (note, SET_SRC (set))
+ && (GET_MODE (note) == VOIDmode
+ ? SCALAR_INT_MODE_P (GET_MODE (SET_DEST (set)))
+ : GET_MODE (SET_DEST (set)) == GET_MODE (note)))
{
/* Temporarily replace the set's source with the
contents of the REG_EQUAL note. The insn will
be deleted or recognized by try_combine. */
rtx orig = SET_SRC (set);
- SET_SRC (set) = XEXP (note, 0);
+ SET_SRC (set) = note;
next = try_combine (insn, temp, NULL_RTX,
&new_direct_jump_p);
if (next)
}
clear_bb_flags ();
- EXECUTE_IF_SET_IN_SBITMAP (refresh_blocks, 0, i,
- BASIC_BLOCK (i)->flags |= BB_DIRTY);
- new_direct_jump_p |= purge_all_dead_edges (0);
+ EXECUTE_IF_SET_IN_SBITMAP (refresh_blocks, 0, j, sbi)
+ BASIC_BLOCK (j)->flags |= BB_DIRTY;
+ new_direct_jump_p |= purge_all_dead_edges ();
delete_noop_moves ();
update_life_info_in_dirty_blocks (UPDATE_LIFE_GLOBAL_RM_NOTES,
&& REGNO (x) >= FIRST_PSEUDO_REGISTER
/* If this register is undefined at the start of the file, we can't
say what its contents were. */
- && ! REGNO_REG_SET_P (ENTRY_BLOCK_PTR->next_bb->global_live_at_start, REGNO (x))
+ && ! REGNO_REG_SET_P
+ (ENTRY_BLOCK_PTR->next_bb->il.rtl->global_live_at_start, REGNO (x))
&& GET_MODE_BITSIZE (GET_MODE (x)) <= HOST_BITS_PER_WIDE_INT)
{
if (set == 0 || GET_CODE (set) == CLOBBER)
/* If the clobber represents an earlyclobber operand, we must not
substitute an expression containing the clobbered register.
- As we do not analyse the constraint strings here, we have to
+ As we do not analyze the constraint strings here, we have to
make the conservative assumption. However, if the register is
a fixed hard reg, the clobber cannot represent any operand;
we leave it up to the machine description to either accept or
if (INSN_P (p) && p != succ && volatile_insn_p (PATTERN (p)))
return 0;
- /* If INSN or I2 contains an autoincrement or autodecrement,
- make sure that register is not used between there and I3,
- and not already used in I3 either.
+ /* If INSN contains an autoincrement or autodecrement, make sure that
+ register is not used between there and I3, and not already used in
+ I3 either. Neither must it be used in PRED or SUCC, if they exist.
Also insist that I3 not be a jump; if it were one
and the incremented register were spilled, we would lose. */
if (REG_NOTE_KIND (link) == REG_INC
&& (JUMP_P (i3)
|| reg_used_between_p (XEXP (link, 0), insn, i3)
+ || (pred != NULL_RTX
+ && reg_overlap_mentioned_p (XEXP (link, 0), PATTERN (pred)))
+ || (succ != NULL_RTX
+ && reg_overlap_mentioned_p (XEXP (link, 0), PATTERN (succ)))
|| reg_overlap_mentioned_p (XEXP (link, 0), PATTERN (i3))))
return 0;
#endif
/* Never combine loads and stores involving hard regs that are likely
to be spilled. The register allocator can usually handle such
reg-reg moves by tying. If we allow the combiner to make
- substitutions of likely-spilled regs, we may abort in reload.
+ substitutions of likely-spilled regs, reload might die.
As an exception, we allow combinations involving fixed regs; these are
not available to the register allocator so there's no risk involved. */
return 0;
}
+struct likely_spilled_retval_info
+{
+ unsigned regno, nregs;
+ unsigned mask;
+};
+
+/* Called via note_stores by likely_spilled_retval_p. Remove from info->mask
+ hard registers that are known to be written to / clobbered in full. */
+static void
+likely_spilled_retval_1 (rtx x, rtx set, void *data)
+{
+ struct likely_spilled_retval_info *info = data;
+ unsigned regno, nregs;
+ unsigned new_mask;
+
+ if (!REG_P (XEXP (set, 0)))
+ return;
+ regno = REGNO (x);
+ if (regno >= info->regno + info->nregs)
+ return;
+ nregs = hard_regno_nregs[regno][GET_MODE (x)];
+ if (regno + nregs <= info->regno)
+ return;
+ new_mask = (2U << (nregs - 1)) - 1;
+ if (regno < info->regno)
+ new_mask >>= info->regno - regno;
+ else
+ new_mask <<= regno - info->regno;
+ info->mask &= new_mask;
+}
+
+/* Return nonzero iff part of the return value is live during INSN, and
+ it is likely spilled. This can happen when more than one insn is needed
+ to copy the return value, e.g. when we consider to combine into the
+ second copy insn for a complex value. */
+
+static int
+likely_spilled_retval_p (rtx insn)
+{
+ rtx use = BB_END (this_basic_block);
+ rtx reg, p;
+ unsigned regno, nregs;
+ /* We assume here that no machine mode needs more than
+ 32 hard registers when the value overlaps with a register
+ for which FUNCTION_VALUE_REGNO_P is true. */
+ unsigned mask;
+ struct likely_spilled_retval_info info;
+
+ if (!NONJUMP_INSN_P (use) || GET_CODE (PATTERN (use)) != USE || insn == use)
+ return 0;
+ reg = XEXP (PATTERN (use), 0);
+ if (!REG_P (reg) || !FUNCTION_VALUE_REGNO_P (REGNO (reg)))
+ return 0;
+ regno = REGNO (reg);
+ nregs = hard_regno_nregs[regno][GET_MODE (reg)];
+ if (nregs == 1)
+ return 0;
+ mask = (2U << (nregs - 1)) - 1;
+
+ /* Disregard parts of the return value that are set later. */
+ info.regno = regno;
+ info.nregs = nregs;
+ info.mask = mask;
+ for (p = PREV_INSN (use); info.mask && p != insn; p = PREV_INSN (p))
+ note_stores (PATTERN (insn), likely_spilled_retval_1, &info);
+ mask = info.mask;
+
+ /* Check if any of the (probably) live return value registers is
+ likely spilled. */
+ nregs --;
+ do
+ {
+ if ((mask & 1 << nregs)
+ && CLASS_LIKELY_SPILLED_P (REGNO_REG_CLASS (regno + nregs)))
+ return 1;
+ } while (nregs--);
+ return 0;
+}
+
/* Adjust INSN after we made a change to its destination.
Changing the destination can invalidate notes that say something about
distribute_links (gen_rtx_INSN_LIST (VOIDmode, insn, NULL_RTX));
}
+/* Return TRUE if combine can reuse reg X in mode MODE.
+ ADDED_SETS is nonzero if the original set is still required. */
+static bool
+can_change_dest_mode (rtx x, int added_sets, enum machine_mode mode)
+{
+ unsigned int regno;
+
+ if (!REG_P(x))
+ return false;
+
+ regno = REGNO (x);
+ /* Allow hard registers if the new mode is legal, and occupies no more
+ registers than the old mode. */
+ if (regno < FIRST_PSEUDO_REGISTER)
+ return (HARD_REGNO_MODE_OK (regno, mode)
+ && (hard_regno_nregs[regno][GET_MODE (x)]
+ >= hard_regno_nregs[regno][mode]));
+
+ /* Or a pseudo that is only used once. */
+ return (REG_N_SETS (regno) == 1 && !added_sets
+ && !REG_USERVAR_P (x));
+}
+
/* Try to combine the insns I1 and I2 into I3.
Here I1 and I2 appear earlier than I3.
I1 can be zero; then we combine just I2 into I3.
{
/* New patterns for I3 and I2, respectively. */
rtx newpat, newi2pat = 0;
+ rtvec newpat_vec_with_clobbers = 0;
int substed_i2 = 0, substed_i1 = 0;
/* Indicates need to preserve SET in I1 or I2 in I3 if it is not dead. */
int added_sets_1, added_sets_2;
rtx i2pat;
/* Indicates if I2DEST or I1DEST is in I2SRC or I1_SRC. */
int i2dest_in_i2src = 0, i1dest_in_i1src = 0, i2dest_in_i1src = 0;
+ int i2dest_killed = 0, i1dest_killed = 0;
int i1_feeds_i3 = 0;
/* Notes that must be added to REG_NOTES in I3 and I2. */
rtx new_i3_notes, new_i2_notes;
if (cant_combine_insn_p (i3)
|| cant_combine_insn_p (i2)
|| (i1 && cant_combine_insn_p (i1))
+ || likely_spilled_retval_p (i3)
/* We also can't do anything if I3 has a
REG_LIBCALL note since we don't want to disrupt the contiguity of a
libcall. */
added_sets_2 = added_sets_1 = 0;
i2dest = SET_SRC (PATTERN (i3));
+ i2dest_killed = dead_or_set_p (i2, i2dest);
/* Replace the dest in I2 with our dest and make the resulting
insn the new pattern for I3. Then skip to where we
subst_low_cuid = INSN_CUID (i2);
added_sets_2 = added_sets_1 = 0;
i2dest = SET_DEST (temp);
+ i2dest_killed = dead_or_set_p (i2, i2dest);
SUBST (SET_SRC (temp),
immed_double_const (lo, hi, GET_MODE (SET_DEST (temp))));
i2dest_in_i2src = reg_overlap_mentioned_p (i2dest, i2src);
i1dest_in_i1src = i1 && reg_overlap_mentioned_p (i1dest, i1src);
i2dest_in_i1src = i1 && reg_overlap_mentioned_p (i2dest, i1src);
+ i2dest_killed = dead_or_set_p (i2, i2dest);
+ i1dest_killed = i1 && dead_or_set_p (i1, i1dest);
/* See if I1 directly feeds into I3. It does if I1DEST is not used
in I2SRC. */
i2src, const0_rtx))
!= GET_MODE (SET_DEST (newpat))))
{
- unsigned int regno = REGNO (SET_DEST (newpat));
- rtx new_dest = gen_rtx_REG (compare_mode, regno);
-
- if (regno < FIRST_PSEUDO_REGISTER
- || (REG_N_SETS (regno) == 1 && ! added_sets_2
- && ! REG_USERVAR_P (SET_DEST (newpat))))
+ if (can_change_dest_mode(SET_DEST (newpat), added_sets_2,
+ compare_mode))
{
+ unsigned int regno = REGNO (SET_DEST (newpat));
+ rtx new_dest = gen_rtx_REG (compare_mode, regno);
+
if (regno >= FIRST_PSEUDO_REGISTER)
SUBST (regno_reg_rtx[regno], new_dest);
|| (i1 != 0 && FIND_REG_INC_NOTE (i1, NULL_RTX) != 0
&& (n_occurrences + added_sets_1 + (added_sets_2 && ! i1_feeds_i3)
> 1))
- /* Fail if we tried to make a new register (we used to abort, but there's
- really no reason to). */
+ /* Fail if we tried to make a new register. */
|| max_reg_num () != maxreg
/* Fail if we couldn't do something and have a CLOBBER. */
|| GET_CODE (newpat) == CLOBBER
/* Note which hard regs this insn has as inputs. */
mark_used_regs_combine (newpat);
+ /* If recog_for_combine fails, it strips existing clobbers. If we'll
+ consider splitting this pattern, we might need these clobbers. */
+ if (i1 && GET_CODE (newpat) == PARALLEL
+ && GET_CODE (XVECEXP (newpat, 0, XVECLEN (newpat, 0) - 1)) == CLOBBER)
+ {
+ int len = XVECLEN (newpat, 0);
+
+ newpat_vec_with_clobbers = rtvec_alloc (len);
+ for (i = 0; i < len; i++)
+ RTVEC_ELT (newpat_vec_with_clobbers, i) = XVECEXP (newpat, 0, i);
+ }
+
/* Is the result of combination a valid instruction? */
insn_code_number = recog_for_combine (&newpat, i3, &new_i3_notes);
if (m_split == 0 && ! reg_overlap_mentioned_p (ni2dest, newpat))
{
+ enum machine_mode new_mode = GET_MODE (SET_DEST (newpat));
/* If I2DEST is a hard register or the only use of a pseudo,
we can change its mode. */
- if (GET_MODE (SET_DEST (newpat)) != GET_MODE (i2dest)
- && GET_MODE (SET_DEST (newpat)) != VOIDmode
- && REG_P (i2dest)
- && (REGNO (i2dest) < FIRST_PSEUDO_REGISTER
- || (REG_N_SETS (REGNO (i2dest)) == 1 && ! added_sets_2
- && ! REG_USERVAR_P (i2dest))))
+ if (new_mode != GET_MODE (i2dest)
+ && new_mode != VOIDmode
+ && can_change_dest_mode (i2dest, added_sets_2, new_mode))
ni2dest = gen_rtx_REG (GET_MODE (SET_DEST (newpat)),
REGNO (i2dest));
}
}
+ /* If recog_for_combine has discarded clobbers, try to use them
+ again for the split. */
+ if (m_split == 0 && newpat_vec_with_clobbers)
+ m_split
+ = split_insns (gen_rtx_PARALLEL (VOIDmode,
+ newpat_vec_with_clobbers), i3);
+
if (m_split && NEXT_INSN (m_split) == NULL_RTX)
{
m_split = PATTERN (m_split);
&& REG_P (i2dest)
#endif
/* We need I2DEST in the proper mode. If it is a hard register
- or the only use of a pseudo, we can change its mode. */
+ or the only use of a pseudo, we can change its mode.
+ Make sure we don't change a hard register to have a mode that
+ isn't valid for it, or change the number of registers. */
&& (GET_MODE (*split) == GET_MODE (i2dest)
|| GET_MODE (*split) == VOIDmode
- || REGNO (i2dest) < FIRST_PSEUDO_REGISTER
- || (REG_N_SETS (REGNO (i2dest)) == 1 && ! added_sets_2
- && ! REG_USERVAR_P (i2dest)))
+ || can_change_dest_mode (i2dest, added_sets_2,
+ GET_MODE (*split)))
&& (next_real_insn (i2) == i3
|| ! use_crosses_set_p (*split, INSN_CUID (i2)))
/* We can't overwrite I2DEST if its value is still used by
SUBST (*split, newdest);
i2_code_number = recog_for_combine (&newi2pat, i2, &new_i2_notes);
+ /* recog_for_combine might have added CLOBBERs to newi2pat.
+ Make sure NEWPAT does not depend on the clobbered regs. */
+ if (GET_CODE (newi2pat) == PARALLEL)
+ for (i = XVECLEN (newi2pat, 0) - 1; i >= 0; i--)
+ if (GET_CODE (XVECEXP (newi2pat, 0, i)) == CLOBBER)
+ {
+ rtx reg = XEXP (XVECEXP (newi2pat, 0, i), 0);
+ if (reg_overlap_mentioned_p (reg, newpat))
+ {
+ undo_all ();
+ return 0;
+ }
+ }
+
/* If the split point was a MULT and we didn't have one before,
don't use one now. */
if (i2_code_number >= 0 && ! (split_code == MULT && ! have_mult))
REG_N_DEATHS (REGNO (XEXP (note, 0)))++;
distribute_notes (new_other_notes, undobuf.other_insn,
- undobuf.other_insn, NULL_RTX);
+ undobuf.other_insn, NULL_RTX, NULL_RTX, NULL_RTX);
}
#ifdef HAVE_cc0
/* If I2 is the CC0 setter and I3 is the CC0 user then check whether
rtx i3links, i2links, i1links = 0;
rtx midnotes = 0;
unsigned int regno;
+ /* Compute which registers we expect to eliminate. newi2pat may be setting
+ either i3dest or i2dest, so we must check it. Also, i1dest may be the
+ same as i3dest, in which case newi2pat may be setting i1dest. */
+ rtx elim_i2 = ((newi2pat && reg_set_p (i2dest, newi2pat))
+ || i2dest_in_i2src || i2dest_in_i1src
+ || !i2dest_killed
+ ? 0 : i2dest);
+ rtx elim_i1 = (i1 == 0 || i1dest_in_i1src
+ || (newi2pat && reg_set_p (i1dest, newi2pat))
+ || !i1dest_killed
+ ? 0 : i1dest);
/* Get the old REG_NOTES and LOG_LINKS from all our insns and
clear them. */
/* Distribute all the LOG_LINKS and REG_NOTES from I1, I2, and I3. */
if (i3notes)
- distribute_notes (i3notes, i3, i3, newi2pat ? i2 : NULL_RTX);
+ distribute_notes (i3notes, i3, i3, newi2pat ? i2 : NULL_RTX,
+ elim_i2, elim_i1);
if (i2notes)
- distribute_notes (i2notes, i2, i3, newi2pat ? i2 : NULL_RTX);
+ distribute_notes (i2notes, i2, i3, newi2pat ? i2 : NULL_RTX,
+ elim_i2, elim_i1);
if (i1notes)
- distribute_notes (i1notes, i1, i3, newi2pat ? i2 : NULL_RTX);
+ distribute_notes (i1notes, i1, i3, newi2pat ? i2 : NULL_RTX,
+ elim_i2, elim_i1);
if (midnotes)
- distribute_notes (midnotes, NULL_RTX, i3, newi2pat ? i2 : NULL_RTX);
+ distribute_notes (midnotes, NULL_RTX, i3, newi2pat ? i2 : NULL_RTX,
+ elim_i2, elim_i1);
/* Distribute any notes added to I2 or I3 by recog_for_combine. We
know these are REG_UNUSED and want them to go to the desired insn,
if (REG_P (XEXP (temp, 0)))
REG_N_DEATHS (REGNO (XEXP (temp, 0)))++;
- distribute_notes (new_i2_notes, i2, i2, NULL_RTX);
+ distribute_notes (new_i2_notes, i2, i2, NULL_RTX, NULL_RTX, NULL_RTX);
}
if (new_i3_notes)
if (REG_P (XEXP (temp, 0)))
REG_N_DEATHS (REGNO (XEXP (temp, 0)))++;
- distribute_notes (new_i3_notes, i3, i3, NULL_RTX);
+ distribute_notes (new_i3_notes, i3, i3, NULL_RTX, NULL_RTX, NULL_RTX);
}
/* If I3DEST was used in I3SRC, it really died in I3. We may need to
if (newi2pat && reg_set_p (i3dest_killed, newi2pat))
distribute_notes (gen_rtx_EXPR_LIST (REG_DEAD, i3dest_killed,
NULL_RTX),
- NULL_RTX, i2, NULL_RTX);
+ NULL_RTX, i2, NULL_RTX, elim_i2, elim_i1);
else
distribute_notes (gen_rtx_EXPR_LIST (REG_DEAD, i3dest_killed,
NULL_RTX),
- NULL_RTX, i3, newi2pat ? i2 : NULL_RTX);
+ NULL_RTX, i3, newi2pat ? i2 : NULL_RTX,
+ elim_i2, elim_i1);
}
if (i2dest_in_i2src)
if (newi2pat && reg_set_p (i2dest, newi2pat))
distribute_notes (gen_rtx_EXPR_LIST (REG_DEAD, i2dest, NULL_RTX),
- NULL_RTX, i2, NULL_RTX);
+ NULL_RTX, i2, NULL_RTX, NULL_RTX, NULL_RTX);
else
distribute_notes (gen_rtx_EXPR_LIST (REG_DEAD, i2dest, NULL_RTX),
- NULL_RTX, i3, newi2pat ? i2 : NULL_RTX);
+ NULL_RTX, i3, newi2pat ? i2 : NULL_RTX,
+ NULL_RTX, NULL_RTX);
}
if (i1dest_in_i1src)
if (newi2pat && reg_set_p (i1dest, newi2pat))
distribute_notes (gen_rtx_EXPR_LIST (REG_DEAD, i1dest, NULL_RTX),
- NULL_RTX, i2, NULL_RTX);
+ NULL_RTX, i2, NULL_RTX, NULL_RTX, NULL_RTX);
else
distribute_notes (gen_rtx_EXPR_LIST (REG_DEAD, i1dest, NULL_RTX),
- NULL_RTX, i3, newi2pat ? i2 : NULL_RTX);
+ NULL_RTX, i3, newi2pat ? i2 : NULL_RTX,
+ NULL_RTX, NULL_RTX);
}
distribute_links (i3links);
if (src == mask)
SUBST (SET_SRC (x),
- gen_binary (IOR, mode, dest, GEN_INT (src << pos)));
+ simplify_gen_binary (IOR, mode, dest, GEN_INT (src << pos)));
else
- SUBST (SET_SRC (x),
- gen_binary (IOR, mode,
- gen_binary (AND, mode, dest,
- gen_int_mode (~(mask << pos),
- mode)),
- GEN_INT (src << pos)));
+ {
+ rtx negmask = gen_int_mode (~(mask << pos), mode);
+ SUBST (SET_SRC (x),
+ simplify_gen_binary (IOR, mode,
+ simplify_gen_binary (AND, mode,
+ dest, negmask),
+ GEN_INT (src << pos)));
+ }
SUBST (SET_DEST (x), dest);
/* If this is a register being set, ignore it. */
new = XEXP (x, i);
if (in_dest
- && (code == SUBREG || code == STRICT_LOW_PART
- || code == ZERO_EXTRACT)
&& i == 0
- && REG_P (new))
+ && (((code == SUBREG || code == ZERO_EXTRACT)
+ && REG_P (new))
+ || code == STRICT_LOW_PART))
;
else if (COMBINE_RTX_EQUAL_P (XEXP (x, i), from))
SUBST (XEXP (x, 1), temp);
}
- /* If this is a PLUS, MINUS, or MULT, and the first operand is the
- sign extension of a PLUS with a constant, reverse the order of the sign
- extension and the addition. Note that this not the same as the original
- code, but overflow is undefined for signed values. Also note that the
- PLUS will have been partially moved "inside" the sign-extension, so that
- the first operand of X will really look like:
- (ashiftrt (plus (ashift A C4) C5) C4).
- We convert this to
- (plus (ashiftrt (ashift A C4) C2) C4)
- and replace the first operand of X with that expression. Later parts
- of this function may simplify the expression further.
-
- For example, if we start with (mult (sign_extend (plus A C1)) C2),
- we swap the SIGN_EXTEND and PLUS. Later code will apply the
- distributive law to produce (plus (mult (sign_extend X) C1) C3).
-
- We do this to simplify address expressions. */
-
- if ((code == PLUS || code == MINUS || code == MULT)
- && GET_CODE (XEXP (x, 0)) == ASHIFTRT
- && GET_CODE (XEXP (XEXP (x, 0), 0)) == PLUS
- && GET_CODE (XEXP (XEXP (XEXP (x, 0), 0), 0)) == ASHIFT
- && GET_CODE (XEXP (XEXP (XEXP (XEXP (x, 0), 0), 0), 1)) == CONST_INT
- && GET_CODE (XEXP (XEXP (x, 0), 1)) == CONST_INT
- && XEXP (XEXP (XEXP (XEXP (x, 0), 0), 0), 1) == XEXP (XEXP (x, 0), 1)
- && GET_CODE (XEXP (XEXP (XEXP (x, 0), 0), 1)) == CONST_INT
- && (temp = simplify_binary_operation (ASHIFTRT, mode,
- XEXP (XEXP (XEXP (x, 0), 0), 1),
- XEXP (XEXP (x, 0), 1))) != 0)
- {
- rtx new
- = simplify_shift_const (NULL_RTX, ASHIFT, mode,
- XEXP (XEXP (XEXP (XEXP (x, 0), 0), 0), 0),
- INTVAL (XEXP (XEXP (x, 0), 1)));
-
- new = simplify_shift_const (NULL_RTX, ASHIFTRT, mode, new,
- INTVAL (XEXP (XEXP (x, 0), 1)));
-
- SUBST (XEXP (x, 0), gen_binary (PLUS, mode, new, temp));
- }
-
/* If this is a simple operation applied to an IF_THEN_ELSE, try
applying it to the arms of the IF_THEN_ELSE. This often simplifies
things. Check for cases where both arms are testing the same
/* If the result values are STORE_FLAG_VALUE and zero, we can
just make the comparison operation. */
if (true_rtx == const_true_rtx && false_rtx == const0_rtx)
- x = gen_binary (cond_code, mode, cond, cop1);
+ x = simplify_gen_relational (cond_code, mode, VOIDmode,
+ cond, cop1);
else if (true_rtx == const0_rtx && false_rtx == const_true_rtx
&& ((reversed = reversed_comparison_code_parts
(cond_code, cond, cop1, NULL))
!= UNKNOWN))
- x = gen_binary (reversed, mode, cond, cop1);
+ x = simplify_gen_relational (reversed, mode, VOIDmode,
+ cond, cop1);
/* Likewise, we can make the negate of a comparison operation
if the result values are - STORE_FLAG_VALUE and zero. */
&& INTVAL (true_rtx) == - STORE_FLAG_VALUE
&& false_rtx == const0_rtx)
x = simplify_gen_unary (NEG, mode,
- gen_binary (cond_code, mode, cond,
- cop1),
+ simplify_gen_relational (cond_code,
+ mode, VOIDmode,
+ cond, cop1),
mode);
else if (GET_CODE (false_rtx) == CONST_INT
&& INTVAL (false_rtx) == - STORE_FLAG_VALUE
(cond_code, cond, cop1, NULL))
!= UNKNOWN))
x = simplify_gen_unary (NEG, mode,
- gen_binary (reversed, mode,
- cond, cop1),
+ simplify_gen_relational (reversed,
+ mode, VOIDmode,
+ cond, cop1),
mode);
else
return gen_rtx_IF_THEN_ELSE (mode,
- gen_binary (cond_code, VOIDmode,
- cond, cop1),
+ simplify_gen_relational (cond_code,
+ mode,
+ VOIDmode,
+ cond,
+ cop1),
true_rtx, false_rtx);
code = GET_CODE (x);
}
if (inner)
- return gen_binary (code, mode, other, inner);
+ return simplify_gen_binary (code, mode, other, inner);
}
}
if (GET_CODE (XEXP (x, 0)) == XOR
&& XEXP (XEXP (x, 0), 1) == const1_rtx
&& nonzero_bits (XEXP (XEXP (x, 0), 0), mode) == 1)
- return gen_binary (PLUS, mode, XEXP (XEXP (x, 0), 0), constm1_rtx);
+ return simplify_gen_binary (PLUS, mode, XEXP (XEXP (x, 0), 0),
+ constm1_rtx);
temp = expand_compound_operation (XEXP (x, 0));
in1 = XEXP (XEXP (XEXP (x, 0), 0), 0);
in2 = XEXP (XEXP (x, 0), 1);
- return gen_binary (MINUS, mode, XEXP (x, 1),
- gen_binary (MULT, mode, in1, in2));
+ return simplify_gen_binary (MINUS, mode, XEXP (x, 1),
+ simplify_gen_binary (MULT, mode,
+ in1, in2));
}
/* If we have (plus (plus (A const) B)), associate it so that CONST is
they are now checked elsewhere. */
if (GET_CODE (XEXP (x, 0)) == PLUS
&& CONSTANT_ADDRESS_P (XEXP (XEXP (x, 0), 1)))
- return gen_binary (PLUS, mode,
- gen_binary (PLUS, mode, XEXP (XEXP (x, 0), 0),
- XEXP (x, 1)),
- XEXP (XEXP (x, 0), 1));
+ return simplify_gen_binary (PLUS, mode,
+ simplify_gen_binary (PLUS, mode,
+ XEXP (XEXP (x, 0), 0),
+ XEXP (x, 1)),
+ XEXP (XEXP (x, 0), 1));
/* (plus (xor (and <foo> (const_int pow2 - 1)) <c>) <-c>)
when c is (const_int (pow2 + 1) / 2) is a sign extension of a
if (COMPARISON_P (XEXP (x, 0))
&& ((STORE_FLAG_VALUE == -1 && XEXP (x, 1) == const1_rtx)
|| (STORE_FLAG_VALUE == 1 && XEXP (x, 1) == constm1_rtx))
- && (reversed = reversed_comparison (XEXP (x, 0), mode,
- XEXP (XEXP (x, 0), 0),
- XEXP (XEXP (x, 0), 1))))
+ && (reversed = reversed_comparison (XEXP (x, 0), mode)))
return
simplify_gen_unary (NEG, mode, reversed, mode);
& nonzero_bits (XEXP (x, 1), mode)) == 0)
{
/* Try to simplify the expression further. */
- rtx tor = gen_binary (IOR, mode, XEXP (x, 0), XEXP (x, 1));
+ rtx tor = simplify_gen_binary (IOR, mode, XEXP (x, 0), XEXP (x, 1));
temp = combine_simplify_rtx (tor, mode, in_dest);
/* If we could, great. If not, do not go ahead with the IOR
if (STORE_FLAG_VALUE == 1
&& XEXP (x, 0) == const1_rtx
&& COMPARISON_P (XEXP (x, 1))
- && (reversed = reversed_comparison (XEXP (x, 1), mode,
- XEXP (XEXP (x, 1), 0),
- XEXP (XEXP (x, 1), 1))))
+ && (reversed = reversed_comparison (XEXP (x, 1), mode)))
return reversed;
/* (minus <foo> (and <foo> (const_int -pow2))) becomes
in1 = XEXP (XEXP (XEXP (x, 1), 0), 0);
in2 = XEXP (XEXP (x, 1), 1);
- return gen_binary (PLUS, mode, gen_binary (MULT, mode, in1, in2),
- XEXP (x, 0));
+ return simplify_gen_binary (PLUS, mode,
+ simplify_gen_binary (MULT, mode,
+ in1, in2),
+ XEXP (x, 0));
}
/* Canonicalize (minus (neg A) (mult B C)) to
in1 = simplify_gen_unary (NEG, mode, XEXP (XEXP (x, 1), 0), mode);
in2 = XEXP (XEXP (x, 1), 1);
- return gen_binary (MINUS, mode, gen_binary (MULT, mode, in1, in2),
- XEXP (XEXP (x, 0), 0));
+ return simplify_gen_binary (MINUS, mode,
+ simplify_gen_binary (MULT, mode,
+ in1, in2),
+ XEXP (XEXP (x, 0), 0));
}
/* Canonicalize (minus A (plus B C)) to (minus (minus A B) C) for
integers. */
if (GET_CODE (XEXP (x, 1)) == PLUS && INTEGRAL_MODE_P (mode))
- return gen_binary (MINUS, mode,
- gen_binary (MINUS, mode, XEXP (x, 0),
- XEXP (XEXP (x, 1), 0)),
- XEXP (XEXP (x, 1), 1));
+ return simplify_gen_binary (MINUS, mode,
+ simplify_gen_binary (MINUS, mode,
+ XEXP (x, 0),
+ XEXP (XEXP (x, 1), 0)),
+ XEXP (XEXP (x, 1), 1));
break;
case MULT:
if (GET_CODE (XEXP (x, 0)) == PLUS)
{
- x = apply_distributive_law
- (gen_binary (PLUS, mode,
- gen_binary (MULT, mode,
- XEXP (XEXP (x, 0), 0), XEXP (x, 1)),
- gen_binary (MULT, mode,
- XEXP (XEXP (x, 0), 1),
- copy_rtx (XEXP (x, 1)))));
-
- if (GET_CODE (x) != MULT)
- return x;
+ rtx result = distribute_and_simplify_rtx (x, 0);
+ if (result)
+ return result;
}
+
/* Try simplify a*(b/c) as (a*b)/c. */
if (FLOAT_MODE_P (mode) && flag_unsafe_math_optimizations
&& GET_CODE (XEXP (x, 0)) == DIV)
XEXP (XEXP (x, 0), 0),
XEXP (x, 1));
if (tem)
- return gen_binary (DIV, mode, tem, XEXP (XEXP (x, 0), 1));
+ return simplify_gen_binary (DIV, mode, tem, XEXP (XEXP (x, 0), 1));
}
break;
&& nonzero_bits (op0, mode) == 1)
{
op0 = expand_compound_operation (op0);
- return gen_binary (XOR, mode,
- gen_lowpart (mode, op0),
- const1_rtx);
+ return simplify_gen_binary (XOR, mode,
+ gen_lowpart (mode, op0),
+ const1_rtx);
}
else if (STORE_FLAG_VALUE == 1
if (GET_CODE (op0) == VEC_CONCAT)
{
HOST_WIDE_INT op0_size = GET_MODE_SIZE (GET_MODE (XEXP (op0, 0)));
- if (op0_size < offset)
+ if (offset < op0_size)
op0 = XEXP (op0, 0);
else
{
/* Simplify storing of the truth value. */
if (comparison_p && true_rtx == const_true_rtx && false_rtx == const0_rtx)
- return gen_binary (true_code, mode, XEXP (cond, 0), XEXP (cond, 1));
+ return simplify_gen_relational (true_code, mode, VOIDmode,
+ XEXP (cond, 0), XEXP (cond, 1));
/* Also when the truth value has to be reversed. */
if (comparison_p
&& true_rtx == const0_rtx && false_rtx == const_true_rtx
- && (reversed = reversed_comparison (cond, mode, XEXP (cond, 0),
- XEXP (cond, 1))))
+ && (reversed = reversed_comparison (cond, mode)))
return reversed;
/* Sometimes we can simplify the arm of an IF_THEN_ELSE if a register used
comparisons and see if that says anything about the value of each arm. */
if (comparison_p
- && ((false_code = combine_reversed_comparison_code (cond))
+ && ((false_code = reversed_comparison_code (cond, NULL))
!= UNKNOWN)
&& REG_P (XEXP (cond, 0)))
{
the false arm is more complicated than the true arm. */
if (comparison_p
- && combine_reversed_comparison_code (cond) != UNKNOWN
+ && reversed_comparison_code (cond, NULL) != UNKNOWN
&& (true_rtx == pc_rtx
|| (CONSTANT_P (true_rtx)
&& GET_CODE (false_rtx) != CONST_INT && false_rtx != pc_rtx)
|| rtx_equal_p (false_rtx, XEXP (cond, 0))))
{
true_code = reversed_comparison_code (cond, NULL);
- SUBST (XEXP (x, 0),
- reversed_comparison (cond, GET_MODE (cond), XEXP (cond, 0),
- XEXP (cond, 1)));
-
+ SUBST (XEXP (x, 0), reversed_comparison (cond, GET_MODE (cond)));
SUBST (XEXP (x, 1), false_rtx);
SUBST (XEXP (x, 2), true_rtx);
{
case GE:
case GT:
- return gen_binary (SMAX, mode, true_rtx, false_rtx);
+ return simplify_gen_binary (SMAX, mode, true_rtx, false_rtx);
case LE:
case LT:
- return gen_binary (SMIN, mode, true_rtx, false_rtx);
+ return simplify_gen_binary (SMIN, mode, true_rtx, false_rtx);
case GEU:
case GTU:
- return gen_binary (UMAX, mode, true_rtx, false_rtx);
+ return simplify_gen_binary (UMAX, mode, true_rtx, false_rtx);
case LEU:
case LTU:
- return gen_binary (UMIN, mode, true_rtx, false_rtx);
+ return simplify_gen_binary (UMIN, mode, true_rtx, false_rtx);
default:
break;
}
if (z)
{
- temp = subst (gen_binary (true_code, m, cond_op0, cond_op1),
+ temp = subst (simplify_gen_relational (true_code, m, VOIDmode,
+ cond_op0, cond_op1),
pc_rtx, pc_rtx, 0, 0);
- temp = gen_binary (MULT, m, temp,
- gen_binary (MULT, m, c1, const_true_rtx));
+ temp = simplify_gen_binary (MULT, m, temp,
+ simplify_gen_binary (MULT, m, c1,
+ const_true_rtx));
temp = subst (temp, pc_rtx, pc_rtx, 0, 0);
- temp = gen_binary (op, m, gen_lowpart (m, z), temp);
+ temp = simplify_gen_binary (op, m, gen_lowpart (m, z), temp);
if (extend_op != UNKNOWN)
temp = simplify_gen_unary (extend_op, mode, temp, m);
which case we can safely change its mode. */
if (compare_mode != GET_MODE (dest))
{
- unsigned int regno = REGNO (dest);
- rtx new_dest = gen_rtx_REG (compare_mode, regno);
-
- if (regno < FIRST_PSEUDO_REGISTER
- || (REG_N_SETS (regno) == 1 && ! REG_USERVAR_P (dest)))
+ if (can_change_dest_mode (dest, 0, compare_mode))
{
+ unsigned int regno = REGNO (dest);
+ rtx new_dest = gen_rtx_REG (compare_mode, regno);
+
if (regno >= FIRST_PSEUDO_REGISTER)
SUBST (regno_reg_rtx[regno], new_dest);
PUT_CODE (*cc_use, old_code);
other_changed = 0;
- op0 = gen_binary (XOR, GET_MODE (op0), op0, GEN_INT (mask));
+ op0 = simplify_gen_binary (XOR, GET_MODE (op0),
+ op0, GEN_INT (mask));
}
}
}
SUBST (SET_SRC (x), gen_rtx_COMPARE (compare_mode, op0, op1));
src = SET_SRC (x);
}
+ else if (GET_MODE (op0) == compare_mode && op1 == const0_rtx)
+ {
+ SUBST(SET_SRC (x), op0);
+ src = SET_SRC (x);
+ }
else
{
/* Otherwise, update the COMPARE if needed. */
&& rtx_equal_p (XEXP (false_rtx, 1), true_rtx))
term1 = true_rtx, false_rtx = XEXP (false_rtx, 0), true_rtx = const0_rtx;
- term2 = gen_binary (AND, GET_MODE (src),
- XEXP (XEXP (src, 0), 0), true_rtx);
- term3 = gen_binary (AND, GET_MODE (src),
- simplify_gen_unary (NOT, GET_MODE (src),
- XEXP (XEXP (src, 0), 0),
- GET_MODE (src)),
- false_rtx);
+ term2 = simplify_gen_binary (AND, GET_MODE (src),
+ XEXP (XEXP (src, 0), 0), true_rtx);
+ term3 = simplify_gen_binary (AND, GET_MODE (src),
+ simplify_gen_unary (NOT, GET_MODE (src),
+ XEXP (XEXP (src, 0), 0),
+ GET_MODE (src)),
+ false_rtx);
SUBST (SET_SRC (x),
- gen_binary (IOR, GET_MODE (src),
- gen_binary (IOR, GET_MODE (src), term1, term2),
- term3));
+ simplify_gen_binary (IOR, GET_MODE (src),
+ simplify_gen_binary (IOR, GET_MODE (src),
+ term1, term2),
+ term3));
src = SET_SRC (x);
}
if (GET_CODE (op0) == XOR
&& rtx_equal_p (XEXP (op0, 0), op1)
&& ! side_effects_p (op1))
- x = gen_binary (AND, mode,
- simplify_gen_unary (NOT, mode, XEXP (op0, 1), mode),
- op1);
+ x = simplify_gen_binary (AND, mode,
+ simplify_gen_unary (NOT, mode,
+ XEXP (op0, 1), mode),
+ op1);
if (GET_CODE (op0) == XOR
&& rtx_equal_p (XEXP (op0, 1), op1)
&& ! side_effects_p (op1))
- x = gen_binary (AND, mode,
- simplify_gen_unary (NOT, mode, XEXP (op0, 0), mode),
- op1);
+ x = simplify_gen_binary (AND, mode,
+ simplify_gen_unary (NOT, mode,
+ XEXP (op0, 0), mode),
+ op1);
/* Similarly for (~(A ^ B)) & A. */
if (GET_CODE (op0) == NOT
&& GET_CODE (XEXP (op0, 0)) == XOR
&& rtx_equal_p (XEXP (XEXP (op0, 0), 0), op1)
&& ! side_effects_p (op1))
- x = gen_binary (AND, mode, XEXP (XEXP (op0, 0), 1), op1);
+ x = simplify_gen_binary (AND, mode, XEXP (XEXP (op0, 0), 1), op1);
if (GET_CODE (op0) == NOT
&& GET_CODE (XEXP (op0, 0)) == XOR
&& rtx_equal_p (XEXP (XEXP (op0, 0), 1), op1)
&& ! side_effects_p (op1))
- x = gen_binary (AND, mode, XEXP (XEXP (op0, 0), 0), op1);
+ x = simplify_gen_binary (AND, mode, XEXP (XEXP (op0, 0), 0), op1);
/* We can call simplify_and_const_int only if we don't lose
any (sign) bits when converting INTVAL (op1) to
&& GET_CODE (XEXP (op0, 1)) == CONST_INT
&& GET_CODE (op1) == CONST_INT
&& (INTVAL (XEXP (op0, 1)) & INTVAL (op1)) != 0)
- return gen_binary (IOR, mode,
- gen_binary (AND, mode, XEXP (op0, 0),
+ return simplify_gen_binary (IOR, mode,
+ simplify_gen_binary
+ (AND, mode, XEXP (op0, 0),
GEN_INT (INTVAL (XEXP (op0, 1))
& ~INTVAL (op1))), op1);
&& ! side_effects_p (XEXP (op0, 1)))
return op1;
- /* In the following group of tests (and those in case IOR below),
- we start with some combination of logical operations and apply
- the distributive law followed by the inverse distributive law.
- Most of the time, this results in no change. However, if some of
- the operands are the same or inverses of each other, simplifications
- will result.
-
- For example, (and (ior A B) (not B)) can occur as the result of
- expanding a bit field assignment. When we apply the distributive
- law to this, we get (ior (and (A (not B))) (and (B (not B)))),
- which then simplifies to (and (A (not B))).
-
- If we have (and (ior A B) C), apply the distributive law and then
- the inverse distributive law to see if things simplify. */
-
+ /* If we have any of (and (ior A B) C) or (and (xor A B) C),
+ apply the distributive law and then the inverse distributive
+ law to see if things simplify. */
if (GET_CODE (op0) == IOR || GET_CODE (op0) == XOR)
{
- x = apply_distributive_law
- (gen_binary (GET_CODE (op0), mode,
- gen_binary (AND, mode, XEXP (op0, 0), op1),
- gen_binary (AND, mode, XEXP (op0, 1),
- copy_rtx (op1))));
- if (GET_CODE (x) != AND)
- return x;
+ rtx result = distribute_and_simplify_rtx (x, 0);
+ if (result)
+ return result;
}
-
if (GET_CODE (op1) == IOR || GET_CODE (op1) == XOR)
- return apply_distributive_law
- (gen_binary (GET_CODE (op1), mode,
- gen_binary (AND, mode, XEXP (op1, 0), op0),
- gen_binary (AND, mode, XEXP (op1, 1),
- copy_rtx (op0))));
-
- /* Similarly, taking advantage of the fact that
- (and (not A) (xor B C)) == (xor (ior A B) (ior A C)) */
-
- if (GET_CODE (op0) == NOT && GET_CODE (op1) == XOR)
- return apply_distributive_law
- (gen_binary (XOR, mode,
- gen_binary (IOR, mode, XEXP (op0, 0), XEXP (op1, 0)),
- gen_binary (IOR, mode, copy_rtx (XEXP (op0, 0)),
- XEXP (op1, 1))));
-
- else if (GET_CODE (op1) == NOT && GET_CODE (op0) == XOR)
- return apply_distributive_law
- (gen_binary (XOR, mode,
- gen_binary (IOR, mode, XEXP (op1, 0), XEXP (op0, 0)),
- gen_binary (IOR, mode, copy_rtx (XEXP (op1, 0)), XEXP (op0, 1))));
+ {
+ rtx result = distribute_and_simplify_rtx (x, 1);
+ if (result)
+ return result;
+ }
break;
case IOR:
if (GET_CODE (op0) == AND)
{
- x = apply_distributive_law
- (gen_binary (AND, mode,
- gen_binary (IOR, mode, XEXP (op0, 0), op1),
- gen_binary (IOR, mode, XEXP (op0, 1),
- copy_rtx (op1))));
-
- if (GET_CODE (x) != IOR)
- return x;
+ rtx result = distribute_and_simplify_rtx (x, 0);
+ if (result)
+ return result;
}
if (GET_CODE (op1) == AND)
{
- x = apply_distributive_law
- (gen_binary (AND, mode,
- gen_binary (IOR, mode, XEXP (op1, 0), op0),
- gen_binary (IOR, mode, XEXP (op1, 1),
- copy_rtx (op0))));
-
- if (GET_CODE (x) != IOR)
- return x;
+ rtx result = distribute_and_simplify_rtx (x, 1);
+ if (result)
+ return result;
}
/* Convert (ior (ashift A CX) (lshiftrt A CY)) where CX+CY equals the
if (GET_MODE_BITSIZE (mode) <= HOST_BITS_PER_WIDE_INT
&& (nonzero_bits (op0, mode)
& nonzero_bits (op1, mode)) == 0)
- return (gen_binary (IOR, mode, op0, op1));
+ return (simplify_gen_binary (IOR, mode, op0, op1));
/* Convert (XOR (NOT x) (NOT y)) to (XOR x y).
Also convert (XOR (NOT x) y) to (NOT (XOR x y)), similarly for
}
else if (num_negated == 1)
return
- simplify_gen_unary (NOT, mode, gen_binary (XOR, mode, op0, op1),
+ simplify_gen_unary (NOT, mode,
+ simplify_gen_binary (XOR, mode, op0, op1),
mode);
}
if (GET_CODE (op0) == AND
&& rtx_equal_p (XEXP (op0, 1), op1)
&& ! side_effects_p (op1))
- return gen_binary (AND, mode,
- simplify_gen_unary (NOT, mode, XEXP (op0, 0), mode),
- op1);
+ return simplify_gen_binary (AND, mode,
+ simplify_gen_unary (NOT, mode,
+ XEXP (op0, 0), mode),
+ op1);
else if (GET_CODE (op0) == AND
&& rtx_equal_p (XEXP (op0, 0), op1)
&& ! side_effects_p (op1))
- return gen_binary (AND, mode,
- simplify_gen_unary (NOT, mode, XEXP (op0, 1), mode),
- op1);
+ return simplify_gen_binary (AND, mode,
+ simplify_gen_unary (NOT, mode,
+ XEXP (op0, 1), mode),
+ op1);
/* (xor (comparison foo bar) (const_int 1)) can become the reversed
comparison if STORE_FLAG_VALUE is 1. */
if (STORE_FLAG_VALUE == 1
&& op1 == const1_rtx
&& COMPARISON_P (op0)
- && (reversed = reversed_comparison (op0, mode, XEXP (op0, 0),
- XEXP (op0, 1))))
+ && (reversed = reversed_comparison (op0, mode)))
return reversed;
/* (lshiftrt foo C) where C is the number of bits in FOO minus 1
== (unsigned HOST_WIDE_INT) 1 << (GET_MODE_BITSIZE (mode) - 1))
&& op1 == const_true_rtx
&& COMPARISON_P (op0)
- && (reversed = reversed_comparison (op0, mode, XEXP (op0, 0),
- XEXP (op0, 1))))
+ && (reversed = reversed_comparison (op0, mode)))
return reversed;
break;
case ZERO_EXTRACT:
unsignedp = 1;
+
+ /* ... fall through ... */
+
case SIGN_EXTRACT:
/* If the operand is a CLOBBER, just return it. */
if (GET_CODE (XEXP (x, 0)) == CLOBBER)
rtx inner;
rtx pos; /* Always counts from low bit. */
int len;
- rtx mask;
+ rtx mask, cleared, masked;
enum machine_mode compute_mode;
/* Loop until we find something we can't simplify. */
/* If position is ADJUST - X, new position is X. */
pos = XEXP (pos, 0);
else
- pos = gen_binary (MINUS, GET_MODE (pos),
- GEN_INT (GET_MODE_BITSIZE (GET_MODE (inner))
- - len),
- pos);
+ pos = simplify_gen_binary (MINUS, GET_MODE (pos),
+ GEN_INT (GET_MODE_BITSIZE (
+ GET_MODE (inner))
+ - len),
+ pos);
}
}
}
/* Compute a mask of LEN bits, if we can do this on the host machine. */
- if (len < HOST_BITS_PER_WIDE_INT)
- mask = GEN_INT (((HOST_WIDE_INT) 1 << len) - 1);
- else
+ if (len >= HOST_BITS_PER_WIDE_INT)
break;
/* Now compute the equivalent expression. Make a copy of INNER
for the SET_DEST in case it is a MEM into which we will substitute;
we don't want shared RTL in that case. */
- x = gen_rtx_SET
- (VOIDmode, copy_rtx (inner),
- gen_binary (IOR, compute_mode,
- gen_binary (AND, compute_mode,
- simplify_gen_unary (NOT, compute_mode,
- gen_binary (ASHIFT,
- compute_mode,
- mask, pos),
- compute_mode),
- inner),
- gen_binary (ASHIFT, compute_mode,
- gen_binary (AND, compute_mode,
- gen_lowpart
- (compute_mode, SET_SRC (x)),
- mask),
- pos)));
+ mask = GEN_INT (((HOST_WIDE_INT) 1 << len) - 1);
+ cleared = simplify_gen_binary (AND, compute_mode,
+ simplify_gen_unary (NOT, compute_mode,
+ simplify_gen_binary (ASHIFT,
+ compute_mode,
+ mask, pos),
+ compute_mode),
+ inner);
+ masked = simplify_gen_binary (ASHIFT, compute_mode,
+ simplify_gen_binary (
+ AND, compute_mode,
+ gen_lowpart (compute_mode, SET_SRC (x)),
+ mask),
+ pos);
+
+ x = gen_rtx_SET (VOIDmode, copy_rtx (inner),
+ simplify_gen_binary (IOR, compute_mode,
+ cleared, masked));
}
return x;
/* Avoid creating invalid subregs, for example when
simplifying (x>>32)&255. */
- if (final_word >= GET_MODE_SIZE (inner_mode))
+ if (!validate_subreg (tmode, inner_mode, inner, final_word))
return NULL_RTX;
new = gen_rtx_SUBREG (tmode, inner, final_word);
&& GET_MODE_SIZE (inner_mode) < GET_MODE_SIZE (is_mode))
offset -= GET_MODE_SIZE (is_mode) - GET_MODE_SIZE (inner_mode);
- /* If this is a constant position, we can move to the desired byte. */
+ /* If this is a constant position, we can move to the desired byte.
+ Be careful not to go beyond the original object and maintain the
+ natural alignment of the memory. */
if (pos_rtx == 0)
{
- offset += pos / BITS_PER_UNIT;
- pos %= GET_MODE_BITSIZE (wanted_inner_mode);
+ enum machine_mode bfmode = smallest_mode_for_size (len, MODE_INT);
+ offset += (pos / GET_MODE_BITSIZE (bfmode)) * GET_MODE_SIZE (bfmode);
+ pos %= GET_MODE_BITSIZE (bfmode);
}
if (BYTES_BIG_ENDIAN != BITS_BIG_ENDIAN
if (GET_CODE (XEXP (x, 1)) == CONST_INT
&& (INTVAL (XEXP (x, 1)) & ((((HOST_WIDE_INT) 1 << count)) - 1)) == 0
&& (tem = extract_left_shift (XEXP (x, 0), count)) != 0)
- return gen_binary (code, mode, tem,
- GEN_INT (INTVAL (XEXP (x, 1)) >> count));
+ return simplify_gen_binary (code, mode, tem,
+ GEN_INT (INTVAL (XEXP (x, 1)) >> count));
break;
what it originally did, do this SUBREG as a force_to_mode. */
tem = make_compound_operation (SUBREG_REG (x), in_code);
- if (GET_CODE (tem) != GET_CODE (SUBREG_REG (x))
- && GET_MODE_SIZE (mode) < GET_MODE_SIZE (GET_MODE (tem))
- && subreg_lowpart_p (x))
- {
- rtx newer = force_to_mode (tem, mode, ~(HOST_WIDE_INT) 0,
- NULL_RTX, 0);
- /* If we have something other than a SUBREG, we might have
- done an expansion, so rerun ourselves. */
- if (GET_CODE (newer) != SUBREG)
- newer = make_compound_operation (newer, in_code);
+ {
+ rtx simplified;
+ simplified = simplify_subreg (GET_MODE (x), tem, GET_MODE (tem),
+ SUBREG_BYTE (x));
- return newer;
- }
+ if (simplified)
+ tem = simplified;
- /* If this is a paradoxical subreg, and the new code is a sign or
- zero extension, omit the subreg and widen the extension. If it
- is a regular subreg, we can still get rid of the subreg by not
- widening so much, or in fact removing the extension entirely. */
- if ((GET_CODE (tem) == SIGN_EXTEND
- || GET_CODE (tem) == ZERO_EXTEND)
- && subreg_lowpart_p (x))
- {
- if (GET_MODE_SIZE (mode) > GET_MODE_SIZE (GET_MODE (tem))
- || (GET_MODE_SIZE (mode) >
- GET_MODE_SIZE (GET_MODE (XEXP (tem, 0)))))
- {
- if (! SCALAR_INT_MODE_P (mode))
- break;
- tem = gen_rtx_fmt_e (GET_CODE (tem), mode, XEXP (tem, 0));
- }
- else
- tem = gen_lowpart (mode, XEXP (tem, 0));
+ if (GET_CODE (tem) != GET_CODE (SUBREG_REG (x))
+ && GET_MODE_SIZE (mode) < GET_MODE_SIZE (GET_MODE (tem))
+ && subreg_lowpart_p (x))
+ {
+ rtx newer = force_to_mode (tem, mode, ~(HOST_WIDE_INT) 0,
+ NULL_RTX, 0);
+
+ /* If we have something other than a SUBREG, we might have
+ done an expansion, so rerun ourselves. */
+ if (GET_CODE (newer) != SUBREG)
+ newer = make_compound_operation (newer, in_code);
+
+ return newer;
+ }
+
+ if (simplified)
return tem;
- }
+ }
break;
default:
&& (cval & ((HOST_WIDE_INT) 1 << (width - 1))) != 0)
cval |= (HOST_WIDE_INT) -1 << width;
- y = gen_binary (AND, GET_MODE (x), XEXP (x, 0), GEN_INT (cval));
+ y = simplify_gen_binary (AND, GET_MODE (x),
+ XEXP (x, 0), GEN_INT (cval));
if (rtx_cost (y, SET) < rtx_cost (x, SET))
x = y;
}
{
temp = GEN_INT ((INTVAL (XEXP (x, 1)) & mask)
<< INTVAL (XEXP (XEXP (x, 0), 1)));
- temp = gen_binary (GET_CODE (x), GET_MODE (x),
- XEXP (XEXP (x, 0), 0), temp);
- x = gen_binary (LSHIFTRT, GET_MODE (x), temp,
- XEXP (XEXP (x, 0), 1));
+ temp = simplify_gen_binary (GET_CODE (x), GET_MODE (x),
+ XEXP (XEXP (x, 0), 0), temp);
+ x = simplify_gen_binary (LSHIFTRT, GET_MODE (x), temp,
+ XEXP (XEXP (x, 0), 1));
return force_to_mode (x, mode, mask, reg, next_select);
}
reg, next_select));
if (op_mode != GET_MODE (x) || op0 != XEXP (x, 0) || op1 != XEXP (x, 1))
- x = gen_binary (code, op_mode, op0, op1);
+ x = simplify_gen_binary (code, op_mode, op0, op1);
break;
case ASHIFT:
mask, reg, next_select));
if (op_mode != GET_MODE (x) || op0 != XEXP (x, 0))
- x = gen_binary (code, op_mode, op0, XEXP (x, 1));
+ x = simplify_gen_binary (code, op_mode, op0, XEXP (x, 1));
break;
case LSHIFTRT:
inner = force_to_mode (inner, op_mode, inner_mask, reg, next_select);
if (GET_MODE (x) != op_mode || inner != XEXP (x, 0))
- x = gen_binary (LSHIFTRT, op_mode, inner, XEXP (x, 1));
+ x = simplify_gen_binary (LSHIFTRT, op_mode, inner, XEXP (x, 1));
}
/* If we have (and (lshiftrt FOO C1) C2) where the combination of the
/* Must be more sign bit copies than the mask needs. */
&& ((int) num_sign_bit_copies (XEXP (x, 0), GET_MODE (XEXP (x, 0)))
>= exact_log2 (mask + 1)))
- x = gen_binary (LSHIFTRT, GET_MODE (x), XEXP (x, 0),
- GEN_INT (GET_MODE_BITSIZE (GET_MODE (x))
- - exact_log2 (mask + 1)));
+ x = simplify_gen_binary (LSHIFTRT, GET_MODE (x), XEXP (x, 0),
+ GEN_INT (GET_MODE_BITSIZE (GET_MODE (x))
+ - exact_log2 (mask + 1)));
goto shiftrt;
/* If MASK is 1, convert this to an LSHIFTRT. This can be done
even if the shift count isn't a constant. */
if (mask == 1)
- x = gen_binary (LSHIFTRT, GET_MODE (x), XEXP (x, 0), XEXP (x, 1));
+ x = simplify_gen_binary (LSHIFTRT, GET_MODE (x),
+ XEXP (x, 0), XEXP (x, 1));
shiftrt:
{
temp = gen_int_mode (mask << INTVAL (XEXP (XEXP (x, 0), 1)),
GET_MODE (x));
- temp = gen_binary (XOR, GET_MODE (x), XEXP (XEXP (x, 0), 0), temp);
- x = gen_binary (LSHIFTRT, GET_MODE (x), temp, XEXP (XEXP (x, 0), 1));
+ temp = simplify_gen_binary (XOR, GET_MODE (x),
+ XEXP (XEXP (x, 0), 0), temp);
+ x = simplify_gen_binary (LSHIFTRT, GET_MODE (x),
+ temp, XEXP (XEXP (x, 0), 1));
return force_to_mode (x, mode, mask, reg, next_select);
}
else if (cond1 == 0)
true1 = copy_rtx (true1);
- *ptrue = gen_binary (code, mode, true0, true1);
- *pfalse = gen_binary (code, mode, false0, false1);
+ if (COMPARISON_P (x))
+ {
+ *ptrue = simplify_gen_relational (code, mode, VOIDmode,
+ true0, true1);
+ *pfalse = simplify_gen_relational (code, mode, VOIDmode,
+ false0, false1);
+ }
+ else
+ {
+ *ptrue = simplify_gen_binary (code, mode, true0, true1);
+ *pfalse = simplify_gen_binary (code, mode, false0, false1);
+ }
+
return cond0 ? cond0 : cond1;
}
if (COMPARISON_P (cond0)
&& COMPARISON_P (cond1)
- && ((GET_CODE (cond0) == combine_reversed_comparison_code (cond1)
+ && ((GET_CODE (cond0) == reversed_comparison_code (cond1, NULL)
&& rtx_equal_p (XEXP (cond0, 0), XEXP (cond1, 0))
&& rtx_equal_p (XEXP (cond0, 1), XEXP (cond1, 1)))
|| ((swap_condition (GET_CODE (cond0))
- == combine_reversed_comparison_code (cond1))
+ == reversed_comparison_code (cond1, NULL))
&& rtx_equal_p (XEXP (cond0, 0), XEXP (cond1, 1))
&& rtx_equal_p (XEXP (cond0, 1), XEXP (cond1, 0))))
&& ! side_effects_p (x))
{
- *ptrue = gen_binary (MULT, mode, op0, const_true_rtx);
- *pfalse = gen_binary (MULT, mode,
- (code == MINUS
- ? simplify_gen_unary (NEG, mode, op1,
- mode)
- : op1),
- const_true_rtx);
+ *ptrue = simplify_gen_binary (MULT, mode, op0, const_true_rtx);
+ *pfalse = simplify_gen_binary (MULT, mode,
+ (code == MINUS
+ ? simplify_gen_unary (NEG, mode,
+ op1, mode)
+ : op1),
+ const_true_rtx);
return cond0;
}
}
if (COMPARISON_P (cond0)
&& COMPARISON_P (cond1)
- && ((GET_CODE (cond0) == combine_reversed_comparison_code (cond1)
+ && ((GET_CODE (cond0) == reversed_comparison_code (cond1, NULL)
&& rtx_equal_p (XEXP (cond0, 0), XEXP (cond1, 0))
&& rtx_equal_p (XEXP (cond0, 1), XEXP (cond1, 1)))
|| ((swap_condition (GET_CODE (cond0))
- == combine_reversed_comparison_code (cond1))
+ == reversed_comparison_code (cond1, NULL))
&& rtx_equal_p (XEXP (cond0, 0), XEXP (cond1, 1))
&& rtx_equal_p (XEXP (cond0, 1), XEXP (cond1, 0))))
&& ! side_effects_p (x))
if (comparison_dominates_p (cond, code))
return const_true_rtx;
- code = combine_reversed_comparison_code (x);
+ code = reversed_comparison_code (x, NULL);
if (code != UNKNOWN
&& comparison_dominates_p (cond, code))
return const0_rtx;
return x;
}
- else if (GET_CODE (src) == AND && GET_CODE (XEXP (src, 0)) == SUBREG
- && subreg_lowpart_p (XEXP (src, 0))
- && (GET_MODE_SIZE (GET_MODE (XEXP (src, 0)))
- < GET_MODE_SIZE (GET_MODE (SUBREG_REG (XEXP (src, 0)))))
- && GET_CODE (SUBREG_REG (XEXP (src, 0))) == ROTATE
- && GET_CODE (XEXP (SUBREG_REG (XEXP (src, 0)), 0)) == CONST_INT
- && INTVAL (XEXP (SUBREG_REG (XEXP (src, 0)), 0)) == -2
- && rtx_equal_for_field_assignment_p (dest, XEXP (src, 1)))
+ if (GET_CODE (src) == AND && GET_CODE (XEXP (src, 0)) == SUBREG
+ && subreg_lowpart_p (XEXP (src, 0))
+ && (GET_MODE_SIZE (GET_MODE (XEXP (src, 0)))
+ < GET_MODE_SIZE (GET_MODE (SUBREG_REG (XEXP (src, 0)))))
+ && GET_CODE (SUBREG_REG (XEXP (src, 0))) == ROTATE
+ && GET_CODE (XEXP (SUBREG_REG (XEXP (src, 0)), 0)) == CONST_INT
+ && INTVAL (XEXP (SUBREG_REG (XEXP (src, 0)), 0)) == -2
+ && rtx_equal_for_field_assignment_p (dest, XEXP (src, 1)))
{
assign = make_extraction (VOIDmode, dest, 0,
XEXP (SUBREG_REG (XEXP (src, 0)), 1),
/* If SRC is (ior (ashift (const_int 1) POS) DEST), this is a set of a
one-bit field. */
- else if (GET_CODE (src) == IOR && GET_CODE (XEXP (src, 0)) == ASHIFT
- && XEXP (XEXP (src, 0), 0) == const1_rtx
- && rtx_equal_for_field_assignment_p (dest, XEXP (src, 1)))
+ if (GET_CODE (src) == IOR && GET_CODE (XEXP (src, 0)) == ASHIFT
+ && XEXP (XEXP (src, 0), 0) == const1_rtx
+ && rtx_equal_for_field_assignment_p (dest, XEXP (src, 1)))
{
assign = make_extraction (VOIDmode, dest, 0, XEXP (XEXP (src, 0), 1),
1, 1, 1, 0);
return x;
}
+ /* If DEST is already a field assignment, i.e. ZERO_EXTRACT, and the
+ SRC is an AND with all bits of that field set, then we can discard
+ the AND. */
+ if (GET_CODE (dest) == ZERO_EXTRACT
+ && GET_CODE (XEXP (dest, 1)) == CONST_INT
+ && GET_CODE (src) == AND
+ && GET_CODE (XEXP (src, 1)) == CONST_INT)
+ {
+ HOST_WIDE_INT width = INTVAL (XEXP (dest, 1));
+ unsigned HOST_WIDE_INT and_mask = INTVAL (XEXP (src, 1));
+ unsigned HOST_WIDE_INT ze_mask;
+
+ if (width >= HOST_BITS_PER_WIDE_INT)
+ ze_mask = -1;
+ else
+ ze_mask = ((unsigned HOST_WIDE_INT)1 << width) - 1;
+
+ /* Complete overlap. We can remove the source AND. */
+ if ((and_mask & ze_mask) == ze_mask)
+ return gen_rtx_SET (VOIDmode, dest, XEXP (src, 0));
+
+ /* Partial overlap. We can reduce the source AND. */
+ if ((and_mask & ze_mask) != and_mask)
+ {
+ mode = GET_MODE (src);
+ src = gen_rtx_AND (mode, XEXP (src, 0),
+ gen_int_mode (and_mask & ze_mask, mode));
+ return gen_rtx_SET (VOIDmode, dest, src);
+ }
+ }
+
/* The other case we handle is assignments into a constant-position
field. They look like (ior/xor (and DEST C1) OTHER). If C1 represents
a mask that has all one bits except for a group of zero bits and
break;
case SUBREG:
- /* Non-paradoxical SUBREGs distributes over all operations, provided
- the inner modes and byte offsets are the same, this is an extraction
- of a low-order part, we don't convert an fp operation to int or
- vice versa, and we would not be converting a single-word
- operation into a multi-word operation. The latter test is not
- required, but it prevents generating unneeded multi-word operations.
- Some of the previous tests are redundant given the latter test, but
- are retained because they are required for correctness.
+ /* Non-paradoxical SUBREGs distributes over all operations,
+ provided the inner modes and byte offsets are the same, this
+ is an extraction of a low-order part, we don't convert an fp
+ operation to int or vice versa, this is not a vector mode,
+ and we would not be converting a single-word operation into a
+ multi-word operation. The latter test is not required, but
+ it prevents generating unneeded multi-word operations. Some
+ of the previous tests are redundant given the latter test,
+ but are retained because they are required for correctness.
We produce the result slightly differently in this case. */
!= GET_MODE_CLASS (GET_MODE (SUBREG_REG (lhs))))
|| (GET_MODE_SIZE (GET_MODE (lhs))
> GET_MODE_SIZE (GET_MODE (SUBREG_REG (lhs))))
+ || VECTOR_MODE_P (GET_MODE (lhs))
|| GET_MODE_SIZE (GET_MODE (SUBREG_REG (lhs))) > UNITS_PER_WORD)
return x;
- tem = gen_binary (code, GET_MODE (SUBREG_REG (lhs)),
- SUBREG_REG (lhs), SUBREG_REG (rhs));
+ tem = simplify_gen_binary (code, GET_MODE (SUBREG_REG (lhs)),
+ SUBREG_REG (lhs), SUBREG_REG (rhs));
return gen_lowpart (GET_MODE (x), tem);
default:
return x;
/* Form the new inner operation, seeing if it simplifies first. */
- tem = gen_binary (code, GET_MODE (x), lhs, rhs);
+ tem = simplify_gen_binary (code, GET_MODE (x), lhs, rhs);
/* There is one exception to the general way of distributing:
(a | c) ^ (b | c) -> (a ^ b) & ~c */
/* We may be able to continuing distributing the result, so call
ourselves recursively on the inner operation before forming the
outer operation, which we return. */
- return gen_binary (inner_code, GET_MODE (x),
- apply_distributive_law (tem), other);
+ return simplify_gen_binary (inner_code, GET_MODE (x),
+ apply_distributive_law (tem), other);
+}
+
+/* See if X is of the form (* (+ A B) C), and if so convert to
+ (+ (* A C) (* B C)) and try to simplify.
+
+ Most of the time, this results in no change. However, if some of
+ the operands are the same or inverses of each other, simplifications
+ will result.
+
+ For example, (and (ior A B) (not B)) can occur as the result of
+ expanding a bit field assignment. When we apply the distributive
+ law to this, we get (ior (and (A (not B))) (and (B (not B)))),
+ which then simplifies to (and (A (not B))).
+
+ Note that no checks happen on the validity of applying the inverse
+ distributive law. This is pointless since we can do it in the
+ few places where this routine is called.
+
+ N is the index of the term that is decomposed (the arithmetic operation,
+ i.e. (+ A B) in the first example above). !N is the index of the term that
+ is distributed, i.e. of C in the first example above. */
+static rtx
+distribute_and_simplify_rtx (rtx x, int n)
+{
+ enum machine_mode mode;
+ enum rtx_code outer_code, inner_code;
+ rtx decomposed, distributed, inner_op0, inner_op1, new_op0, new_op1, tmp;
+
+ decomposed = XEXP (x, n);
+ if (!ARITHMETIC_P (decomposed))
+ return NULL_RTX;
+
+ mode = GET_MODE (x);
+ outer_code = GET_CODE (x);
+ distributed = XEXP (x, !n);
+
+ inner_code = GET_CODE (decomposed);
+ inner_op0 = XEXP (decomposed, 0);
+ inner_op1 = XEXP (decomposed, 1);
+
+ /* Special case (and (xor B C) (not A)), which is equivalent to
+ (xor (ior A B) (ior A C)) */
+ if (outer_code == AND && inner_code == XOR && GET_CODE (distributed) == NOT)
+ {
+ distributed = XEXP (distributed, 0);
+ outer_code = IOR;
+ }
+
+ if (n == 0)
+ {
+ /* Distribute the second term. */
+ new_op0 = simplify_gen_binary (outer_code, mode, inner_op0, distributed);
+ new_op1 = simplify_gen_binary (outer_code, mode, inner_op1, distributed);
+ }
+ else
+ {
+ /* Distribute the first term. */
+ new_op0 = simplify_gen_binary (outer_code, mode, distributed, inner_op0);
+ new_op1 = simplify_gen_binary (outer_code, mode, distributed, inner_op1);
+ }
+
+ tmp = apply_distributive_law (simplify_gen_binary (inner_code, mode,
+ new_op0, new_op1));
+ if (GET_CODE (tmp) != outer_code
+ && rtx_cost (tmp, SET) < rtx_cost (x, SET))
+ return tmp;
+
+ return NULL_RTX;
}
\f
/* We have X, a logical `and' of VAROP with the constant CONSTOP, to be done
/* If VAROP is a CONST_INT, then we need to apply the mask in CONSTOP
to VAROP and return the new constant. */
if (GET_CODE (varop) == CONST_INT)
- return GEN_INT (trunc_int_for_mode (INTVAL (varop) & constop, mode));
+ return gen_int_mode (INTVAL (varop) & constop, mode);
/* See what bits may be nonzero in VAROP. Unlike the general case of
a call to nonzero_bits, here we don't care about bits outside
gen_lowpart
(mode,
apply_distributive_law
- (gen_binary (GET_CODE (varop), GET_MODE (varop),
- simplify_and_const_int (NULL_RTX, GET_MODE (varop),
- XEXP (varop, 0), constop),
- simplify_and_const_int (NULL_RTX, GET_MODE (varop),
- XEXP (varop, 1), constop))));
+ (simplify_gen_binary (GET_CODE (varop), GET_MODE (varop),
+ simplify_and_const_int (NULL_RTX,
+ GET_MODE (varop),
+ XEXP (varop, 0),
+ constop),
+ simplify_and_const_int (NULL_RTX,
+ GET_MODE (varop),
+ XEXP (varop, 1),
+ constop))));
/* If VAROP is PLUS, and the constant is a mask of low bite, distribute
the AND and see if one of the operands simplifies to zero. If so, we
constop = trunc_int_for_mode (constop, mode);
/* See how much, if any, of X we can use. */
if (x == 0 || GET_CODE (x) != AND || GET_MODE (x) != mode)
- x = gen_binary (AND, mode, varop, GEN_INT (constop));
+ x = simplify_gen_binary (AND, mode, varop, GEN_INT (constop));
else
{
&& (reg_stat[REGNO (x)].last_set_label == label_tick
|| (REGNO (x) >= FIRST_PSEUDO_REGISTER
&& REG_N_SETS (REGNO (x)) == 1
- && ! REGNO_REG_SET_P (ENTRY_BLOCK_PTR->next_bb->global_live_at_start,
- REGNO (x))))
+ && ! REGNO_REG_SET_P
+ (ENTRY_BLOCK_PTR->next_bb->il.rtl->global_live_at_start,
+ REGNO (x))))
&& INSN_CUID (reg_stat[REGNO (x)].last_set) < subst_low_cuid)
{
*nonzero &= reg_stat[REGNO (x)].last_set_nonzero_bits;
&& (reg_stat[REGNO (x)].last_set_label == label_tick
|| (REGNO (x) >= FIRST_PSEUDO_REGISTER
&& REG_N_SETS (REGNO (x)) == 1
- && ! REGNO_REG_SET_P (ENTRY_BLOCK_PTR->next_bb->global_live_at_start,
- REGNO (x))))
+ && ! REGNO_REG_SET_P
+ (ENTRY_BLOCK_PTR->next_bb->il.rtl->global_live_at_start,
+ REGNO (x))))
&& INSN_CUID (reg_stat[REGNO (x)].last_set) < subst_low_cuid)
{
*result = reg_stat[REGNO (x)].last_set_sign_bit_copies;
&& exact_log2 (INTVAL (XEXP (varop, 1))) >= 0)
{
varop
- = gen_binary (ASHIFT, GET_MODE (varop), XEXP (varop, 0),
- GEN_INT (exact_log2 (INTVAL (XEXP (varop, 1)))));
+ = simplify_gen_binary (ASHIFT, GET_MODE (varop),
+ XEXP (varop, 0),
+ GEN_INT (exact_log2 (
+ INTVAL (XEXP (varop, 1)))));
continue;
}
break;
&& exact_log2 (INTVAL (XEXP (varop, 1))) >= 0)
{
varop
- = gen_binary (LSHIFTRT, GET_MODE (varop), XEXP (varop, 0),
- GEN_INT (exact_log2 (INTVAL (XEXP (varop, 1)))));
+ = simplify_gen_binary (LSHIFTRT, GET_MODE (varop),
+ XEXP (varop, 0),
+ GEN_INT (exact_log2 (
+ INTVAL (XEXP (varop, 1)))));
continue;
}
break;
rtx rhs = simplify_shift_const (NULL_RTX, code, shift_mode,
XEXP (varop, 1), count);
- varop = gen_binary (GET_CODE (varop), shift_mode, lhs, rhs);
+ varop = simplify_gen_binary (GET_CODE (varop), shift_mode,
+ lhs, rhs);
varop = apply_distributive_law (varop);
count = 0;
else if (GET_RTX_CLASS (outer_op) == RTX_UNARY)
x = simplify_gen_unary (outer_op, result_mode, x, result_mode);
else
- x = gen_binary (outer_op, result_mode, x, GEN_INT (outer_const));
+ x = simplify_gen_binary (outer_op, result_mode, x,
+ GEN_INT (outer_const));
}
return x;
An insn containing that will not be recognized. */
static rtx
-gen_lowpart_for_combine (enum machine_mode mode, rtx x)
+gen_lowpart_for_combine (enum machine_mode omode, rtx x)
{
+ enum machine_mode imode = GET_MODE (x);
+ unsigned int osize = GET_MODE_SIZE (omode);
+ unsigned int isize = GET_MODE_SIZE (imode);
rtx result;
- if (GET_MODE (x) == mode)
+ if (omode == imode)
return x;
- /* Return identity if this is a CONST or symbolic
- reference. */
- if (mode == Pmode
+ /* Return identity if this is a CONST or symbolic reference. */
+ if (omode == Pmode
&& (GET_CODE (x) == CONST
|| GET_CODE (x) == SYMBOL_REF
|| GET_CODE (x) == LABEL_REF))
/* We can only support MODE being wider than a word if X is a
constant integer or has a mode the same size. */
-
- if (GET_MODE_SIZE (mode) > UNITS_PER_WORD
- && ! ((GET_MODE (x) == VOIDmode
+ if (GET_MODE_SIZE (omode) > UNITS_PER_WORD
+ && ! ((imode == VOIDmode
&& (GET_CODE (x) == CONST_INT
|| GET_CODE (x) == CONST_DOUBLE))
- || GET_MODE_SIZE (GET_MODE (x)) == GET_MODE_SIZE (mode)))
- return gen_rtx_CLOBBER (GET_MODE (x), const0_rtx);
+ || isize == osize))
+ goto fail;
/* X might be a paradoxical (subreg (mem)). In that case, gen_lowpart
won't know what to do. So we will strip off the SUBREG here and
if (GET_CODE (x) == SUBREG && MEM_P (SUBREG_REG (x)))
{
x = SUBREG_REG (x);
- if (GET_MODE (x) == mode)
+
+ /* For use in case we fall down into the address adjustments
+ further below, we need to adjust the known mode and size of
+ x; imode and isize, since we just adjusted x. */
+ imode = GET_MODE (x);
+
+ if (imode == omode)
return x;
+
+ isize = GET_MODE_SIZE (imode);
}
- result = gen_lowpart_common (mode, x);
+ result = gen_lowpart_common (omode, x);
+
#ifdef CANNOT_CHANGE_MODE_CLASS
if (result != 0 && GET_CODE (result) == SUBREG)
record_subregs_of_mode (result);
/* Refuse to work on a volatile memory ref or one with a mode-dependent
address. */
if (MEM_VOLATILE_P (x) || mode_dependent_address_p (XEXP (x, 0)))
- return gen_rtx_CLOBBER (GET_MODE (x), const0_rtx);
+ goto fail;
/* If we want to refer to something bigger than the original memref,
generate a paradoxical subreg instead. That will force a reload
of the original memref X. */
- if (GET_MODE_SIZE (GET_MODE (x)) < GET_MODE_SIZE (mode))
- return gen_rtx_SUBREG (mode, x, 0);
+ if (isize < osize)
+ return gen_rtx_SUBREG (omode, x, 0);
if (WORDS_BIG_ENDIAN)
- offset = (MAX (GET_MODE_SIZE (GET_MODE (x)), UNITS_PER_WORD)
- - MAX (GET_MODE_SIZE (mode), UNITS_PER_WORD));
+ offset = MAX (isize, UNITS_PER_WORD) - MAX (osize, UNITS_PER_WORD);
+ /* Adjust the address so that the address-after-the-data is
+ unchanged. */
if (BYTES_BIG_ENDIAN)
- {
- /* Adjust the address so that the address-after-the-data is
- unchanged. */
- offset -= (MIN (UNITS_PER_WORD, GET_MODE_SIZE (mode))
- - MIN (UNITS_PER_WORD, GET_MODE_SIZE (GET_MODE (x))));
- }
+ offset -= MIN (UNITS_PER_WORD, osize) - MIN (UNITS_PER_WORD, isize);
- return adjust_address_nv (x, mode, offset);
+ return adjust_address_nv (x, omode, offset);
}
/* If X is a comparison operator, rewrite it in a new mode. This
probably won't match, but may allow further simplifications. */
else if (COMPARISON_P (x))
- return gen_rtx_fmt_ee (GET_CODE (x), mode, XEXP (x, 0), XEXP (x, 1));
+ return gen_rtx_fmt_ee (GET_CODE (x), omode, XEXP (x, 0), XEXP (x, 1));
/* If we couldn't simplify X any other way, just enclose it in a
SUBREG. Normally, this SUBREG won't match, but some patterns may
{
int offset = 0;
rtx res;
- enum machine_mode sub_mode = GET_MODE (x);
- offset = subreg_lowpart_offset (mode, sub_mode);
- if (sub_mode == VOIDmode)
+ offset = subreg_lowpart_offset (omode, imode);
+ if (imode == VOIDmode)
{
- sub_mode = int_mode_for_mode (mode);
- x = gen_lowpart_common (sub_mode, x);
- if (x == 0)
- return gen_rtx_CLOBBER (VOIDmode, const0_rtx);
+ imode = int_mode_for_mode (omode);
+ x = gen_lowpart_common (imode, x);
+ if (x == NULL)
+ goto fail;
}
- res = simplify_gen_subreg (mode, x, sub_mode, offset);
+ res = simplify_gen_subreg (omode, x, imode, offset);
if (res)
return res;
- return gen_rtx_CLOBBER (GET_MODE (x), const0_rtx);
- }
-}
-\f
-/* These routines make binary and unary operations by first seeing if they
- fold; if not, a new expression is allocated. */
-
-static rtx
-gen_binary (enum rtx_code code, enum machine_mode mode, rtx op0, rtx op1)
-{
- rtx result;
- rtx tem;
-
- if (GET_CODE (op0) == CLOBBER)
- return op0;
- else if (GET_CODE (op1) == CLOBBER)
- return op1;
-
- if (GET_RTX_CLASS (code) == RTX_COMM_ARITH
- && swap_commutative_operands_p (op0, op1))
- tem = op0, op0 = op1, op1 = tem;
-
- if (GET_RTX_CLASS (code) == RTX_COMPARE
- || GET_RTX_CLASS (code) == RTX_COMM_COMPARE)
- {
- enum machine_mode op_mode = GET_MODE (op0);
-
- /* Strip the COMPARE from (REL_OP (compare X Y) 0) to get
- just (REL_OP X Y). */
- if (GET_CODE (op0) == COMPARE && op1 == const0_rtx)
- {
- op1 = XEXP (op0, 1);
- op0 = XEXP (op0, 0);
- op_mode = GET_MODE (op0);
- }
-
- if (op_mode == VOIDmode)
- op_mode = GET_MODE (op1);
- result = simplify_relational_operation (code, mode, op_mode, op0, op1);
}
- else
- result = simplify_binary_operation (code, mode, op0, op1);
-
- if (result)
- return result;
-
- /* Put complex operands first and constants second. */
- if (GET_RTX_CLASS (code) == RTX_COMM_ARITH
- && swap_commutative_operands_p (op0, op1))
- return gen_rtx_fmt_ee (code, mode, op1, op0);
- /* If we are turning off bits already known off in OP0, we need not do
- an AND. */
- else if (code == AND && GET_CODE (op1) == CONST_INT
- && GET_MODE_BITSIZE (mode) <= HOST_BITS_PER_WIDE_INT
- && (nonzero_bits (op0, mode) & ~INTVAL (op1)) == 0)
- return op0;
-
- return gen_rtx_fmt_ee (code, mode, op0, op1);
+ fail:
+ return gen_rtx_CLOBBER (imode, const0_rtx);
}
\f
/* Simplify a comparison between *POP0 and *POP1 where CODE is the
break;
case SIGN_EXTEND:
- /* Can simplify (compare (zero/sign_extend FOO) CONST)
- to (compare FOO CONST) if CONST fits in FOO's mode and we
- are either testing inequality or have an unsigned comparison
- with ZERO_EXTEND or a signed comparison with SIGN_EXTEND. */
- if (! unsigned_comparison_p
- && (GET_MODE_BITSIZE (GET_MODE (XEXP (op0, 0)))
- <= HOST_BITS_PER_WIDE_INT)
+ /* Can simplify (compare (zero/sign_extend FOO) CONST) to
+ (compare FOO CONST) if CONST fits in FOO's mode and we
+ are either testing inequality or have an unsigned
+ comparison with ZERO_EXTEND or a signed comparison with
+ SIGN_EXTEND. But don't do it if we don't have a compare
+ insn of the given mode, since we'd have to revert it
+ later on, and then we wouldn't know whether to sign- or
+ zero-extend. */
+ mode = GET_MODE (XEXP (op0, 0));
+ if (mode != VOIDmode && GET_MODE_CLASS (mode) == MODE_INT
+ && ! unsigned_comparison_p
+ && (GET_MODE_BITSIZE (mode) <= HOST_BITS_PER_WIDE_INT)
&& ((unsigned HOST_WIDE_INT) const_op
- < (((unsigned HOST_WIDE_INT) 1
- << (GET_MODE_BITSIZE (GET_MODE (XEXP (op0, 0))) - 1)))))
+ < (((unsigned HOST_WIDE_INT) 1
+ << (GET_MODE_BITSIZE (mode) - 1))))
+ && cmp_optab->handlers[(int) mode].insn_code != CODE_FOR_nothing)
{
op0 = XEXP (op0, 0);
continue;
break;
case SUBREG:
- /* Check for the case where we are comparing A - C1 with C2,
- both constants are smaller than 1/2 the maximum positive
- value in MODE, and the comparison is equality or unsigned.
- In that case, if A is either zero-extended to MODE or has
- sufficient sign bits so that the high-order bit in MODE
- is a copy of the sign in the inner mode, we can prove that it is
- safe to do the operation in the wider mode. This simplifies
- many range checks. */
+ /* Check for the case where we are comparing A - C1 with C2, that is
+
+ (subreg:MODE (plus (A) (-C1))) op (C2)
+
+ with C1 a constant, and try to lift the SUBREG, i.e. to do the
+ comparison in the wider mode. One of the following two conditions
+ must be true in order for this to be valid:
+
+ 1. The mode extension results in the same bit pattern being added
+ on both sides and the comparison is equality or unsigned. As
+ C2 has been truncated to fit in MODE, the pattern can only be
+ all 0s or all 1s.
+
+ 2. The mode extension results in the sign bit being copied on
+ each side.
+
+ The difficulty here is that we have predicates for A but not for
+ (A - C1) so we need to check that C1 is within proper bounds so
+ as to perturbate A as little as possible. */
if (mode_width <= HOST_BITS_PER_WIDE_INT
&& subreg_lowpart_p (op0)
+ && GET_MODE_BITSIZE (GET_MODE (SUBREG_REG (op0))) > mode_width
&& GET_CODE (SUBREG_REG (op0)) == PLUS
- && GET_CODE (XEXP (SUBREG_REG (op0), 1)) == CONST_INT
- && INTVAL (XEXP (SUBREG_REG (op0), 1)) < 0
- && (-INTVAL (XEXP (SUBREG_REG (op0), 1))
- < (HOST_WIDE_INT) (GET_MODE_MASK (mode) / 2))
- && (unsigned HOST_WIDE_INT) const_op < GET_MODE_MASK (mode) / 2
- && (0 == (nonzero_bits (XEXP (SUBREG_REG (op0), 0),
- GET_MODE (SUBREG_REG (op0)))
- & ~GET_MODE_MASK (mode))
- || (num_sign_bit_copies (XEXP (SUBREG_REG (op0), 0),
- GET_MODE (SUBREG_REG (op0)))
- > (unsigned int)
- (GET_MODE_BITSIZE (GET_MODE (SUBREG_REG (op0)))
- - GET_MODE_BITSIZE (mode)))))
- {
- op0 = SUBREG_REG (op0);
- continue;
+ && GET_CODE (XEXP (SUBREG_REG (op0), 1)) == CONST_INT)
+ {
+ enum machine_mode inner_mode = GET_MODE (SUBREG_REG (op0));
+ rtx a = XEXP (SUBREG_REG (op0), 0);
+ HOST_WIDE_INT c1 = -INTVAL (XEXP (SUBREG_REG (op0), 1));
+
+ if ((c1 > 0
+ && (unsigned HOST_WIDE_INT) c1
+ < (unsigned HOST_WIDE_INT) 1 << (mode_width - 1)
+ && (equality_comparison_p || unsigned_comparison_p)
+ /* (A - C1) zero-extends if it is positive and sign-extends
+ if it is negative, C2 both zero- and sign-extends. */
+ && ((0 == (nonzero_bits (a, inner_mode)
+ & ~GET_MODE_MASK (mode))
+ && const_op >= 0)
+ /* (A - C1) sign-extends if it is positive and 1-extends
+ if it is negative, C2 both sign- and 1-extends. */
+ || (num_sign_bit_copies (a, inner_mode)
+ > (unsigned int) (GET_MODE_BITSIZE (inner_mode)
+ - mode_width)
+ && const_op < 0)))
+ || ((unsigned HOST_WIDE_INT) c1
+ < (unsigned HOST_WIDE_INT) 1 << (mode_width - 2)
+ /* (A - C1) always sign-extends, like C2. */
+ && num_sign_bit_copies (a, inner_mode)
+ > (unsigned int) (GET_MODE_BITSIZE (inner_mode)
+ - (mode_width - 1))))
+ {
+ op0 = SUBREG_REG (op0);
+ continue;
+ }
}
/* If the inner mode is narrower and we are extracting the low part,
/* ... fall through ... */
case ZERO_EXTEND:
- if ((unsigned_comparison_p || equality_comparison_p)
- && (GET_MODE_BITSIZE (GET_MODE (XEXP (op0, 0)))
- <= HOST_BITS_PER_WIDE_INT)
- && ((unsigned HOST_WIDE_INT) const_op
- < GET_MODE_MASK (GET_MODE (XEXP (op0, 0)))))
+ mode = GET_MODE (XEXP (op0, 0));
+ if (mode != VOIDmode && GET_MODE_CLASS (mode) == MODE_INT
+ && (unsigned_comparison_p || equality_comparison_p)
+ && (GET_MODE_BITSIZE (mode) <= HOST_BITS_PER_WIDE_INT)
+ && ((unsigned HOST_WIDE_INT) const_op < GET_MODE_MASK (mode))
+ && cmp_optab->handlers[(int) mode].insn_code != CODE_FOR_nothing)
{
op0 = XEXP (op0, 0);
continue;
if (code == LT || code == NE)
new_code = GET_CODE (op0);
else
- new_code = combine_reversed_comparison_code (op0);
+ new_code = reversed_comparison_code (op0, NULL);
if (new_code != UNKNOWN)
{
&& c1 != mask
&& c1 != GET_MODE_MASK (tmode))
{
- op0 = gen_binary (AND, tmode,
- SUBREG_REG (XEXP (op0, 0)),
- gen_int_mode (c1, tmode));
+ op0 = simplify_gen_binary (AND, tmode,
+ SUBREG_REG (XEXP (op0, 0)),
+ gen_int_mode (c1, tmode));
op0 = gen_lowpart (mode, op0);
continue;
}
{
rtx inner = XEXP (XEXP (XEXP (op0, 0), 0), 0);
rtx add_const = XEXP (XEXP (op0, 0), 1);
- rtx new_const = gen_binary (ASHIFTRT, GET_MODE (op0), add_const,
- XEXP (op0, 1));
+ rtx new_const = simplify_gen_binary (ASHIFTRT, GET_MODE (op0),
+ add_const, XEXP (op0, 1));
- op0 = gen_binary (PLUS, tmode,
- gen_lowpart (tmode, inner),
- new_const);
+ op0 = simplify_gen_binary (PLUS, tmode,
+ gen_lowpart (tmode, inner),
+ new_const);
continue;
}
make a new AND in the proper mode. */
if (GET_CODE (op0) == AND
&& !have_insn_for (AND, mode))
- op0 = gen_binary (AND, tmode,
- gen_lowpart (tmode,
- XEXP (op0, 0)),
- gen_lowpart (tmode,
- XEXP (op0, 1)));
+ op0 = simplify_gen_binary (AND, tmode,
+ gen_lowpart (tmode,
+ XEXP (op0, 0)),
+ gen_lowpart (tmode,
+ XEXP (op0, 1)));
op0 = gen_lowpart (tmode, op0);
if (zero_extended && GET_CODE (op1) == CONST_INT)
if (op1 == const0_rtx && (code == LT || code == GE)
&& GET_MODE_BITSIZE (mode) <= HOST_BITS_PER_WIDE_INT)
{
- op0 = gen_binary (AND, tmode,
- gen_lowpart (tmode, op0),
- GEN_INT ((HOST_WIDE_INT) 1
- << (GET_MODE_BITSIZE (mode) - 1)));
+ op0 = simplify_gen_binary (AND, tmode,
+ gen_lowpart (tmode, op0),
+ GEN_INT ((HOST_WIDE_INT) 1
+ << (GET_MODE_BITSIZE (mode)
+ - 1)));
code = (code == LT) ? NE : EQ;
break;
}
return code;
}
\f
-/* Like jump.c' reversed_comparison_code, but use combine infrastructure for
- searching backward. */
-static enum rtx_code
-combine_reversed_comparison_code (rtx exp)
+/* Utility function for record_value_for_reg. Count number of
+ rtxs in X. */
+static int
+count_rtxs (rtx x)
{
- enum rtx_code code1 = reversed_comparison_code (exp, NULL);
- rtx x;
-
- if (code1 != UNKNOWN
- || GET_MODE_CLASS (GET_MODE (XEXP (exp, 0))) != MODE_CC)
- return code1;
- /* Otherwise try and find where the condition codes were last set and
- use that. */
- x = get_last_value (XEXP (exp, 0));
- if (!x || GET_CODE (x) != COMPARE)
- return UNKNOWN;
- return reversed_comparison_code_parts (GET_CODE (exp),
- XEXP (x, 0), XEXP (x, 1), NULL);
-}
+ enum rtx_code code = GET_CODE (x);
+ const char *fmt;
+ int i, ret = 1;
-/* Return comparison with reversed code of EXP and operands OP0 and OP1.
- Return NULL_RTX in case we fail to do the reversal. */
-static rtx
-reversed_comparison (rtx exp, enum machine_mode mode, rtx op0, rtx op1)
-{
- enum rtx_code reversed_code = combine_reversed_comparison_code (exp);
- if (reversed_code == UNKNOWN)
- return NULL_RTX;
- else
- return gen_binary (reversed_code, mode, op0, op1);
+ if (GET_RTX_CLASS (code) == '2'
+ || GET_RTX_CLASS (code) == 'c')
+ {
+ rtx x0 = XEXP (x, 0);
+ rtx x1 = XEXP (x, 1);
+
+ if (x0 == x1)
+ return 1 + 2 * count_rtxs (x0);
+
+ if ((GET_RTX_CLASS (GET_CODE (x1)) == '2'
+ || GET_RTX_CLASS (GET_CODE (x1)) == 'c')
+ && (x0 == XEXP (x1, 0) || x0 == XEXP (x1, 1)))
+ return 2 + 2 * count_rtxs (x0)
+ + count_rtxs (x == XEXP (x1, 0)
+ ? XEXP (x1, 1) : XEXP (x1, 0));
+
+ if ((GET_RTX_CLASS (GET_CODE (x0)) == '2'
+ || GET_RTX_CLASS (GET_CODE (x0)) == 'c')
+ && (x1 == XEXP (x0, 0) || x1 == XEXP (x0, 1)))
+ return 2 + 2 * count_rtxs (x1)
+ + count_rtxs (x == XEXP (x0, 0)
+ ? XEXP (x0, 1) : XEXP (x0, 0));
+ }
+
+ fmt = GET_RTX_FORMAT (code);
+ for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
+ if (fmt[i] == 'e')
+ ret += count_rtxs (XEXP (x, i));
+
+ return ret;
}
\f
/* Utility function for following routine. Called when X is part of a value
&& GET_CODE (XEXP (tem, 0)) == CLOBBER
&& GET_CODE (XEXP (tem, 1)) == CLOBBER)
tem = XEXP (tem, 0);
+ else if (count_occurrences (value, reg, 1) >= 2)
+ {
+ /* If there are two or more occurrences of REG in VALUE,
+ prevent the value from growing too much. */
+ if (count_rtxs (tem) > MAX_LAST_VALUE_RTL)
+ tem = gen_rtx_CLOBBER (GET_MODE (tem), const0_rtx);
+ }
value = replace_rtx (copy_rtx (value), reg, tem);
}
|| (! (regno >= FIRST_PSEUDO_REGISTER
&& REG_N_SETS (regno) == 1
&& (! REGNO_REG_SET_P
- (ENTRY_BLOCK_PTR->next_bb->global_live_at_start, regno)))
+ (ENTRY_BLOCK_PTR->next_bb->il.rtl->global_live_at_start,
+ regno)))
&& reg_stat[j].last_set_label > tick))
{
if (replace)
&& (regno < FIRST_PSEUDO_REGISTER
|| REG_N_SETS (regno) != 1
|| (REGNO_REG_SET_P
- (ENTRY_BLOCK_PTR->next_bb->global_live_at_start, regno)))))
+ (ENTRY_BLOCK_PTR->next_bb->il.rtl->global_live_at_start,
+ regno)))))
return 0;
/* If the value was set in a later insn than the ones we are processing,
}
for (i = reg_dead_regno; i < reg_dead_endregno; i++)
- if (REGNO_REG_SET_P (block->global_live_at_start, i))
+ if (REGNO_REG_SET_P (block->il.rtl->global_live_at_start, i))
return 0;
return 1;
while (GET_CODE (testreg) == SUBREG
|| GET_CODE (testreg) == ZERO_EXTRACT
- || GET_CODE (testreg) == SIGN_EXTRACT
|| GET_CODE (testreg) == STRICT_LOW_PART)
testreg = XEXP (testreg, 0);
as appropriate. I3 and I2 are the insns resulting from the combination
insns including FROM (I2 may be zero).
+ ELIM_I2 and ELIM_I1 are either zero or registers that we know will
+ not need REG_DEAD notes because they are being substituted for. This
+ saves searching in the most common cases.
+
Each note in the list is either ignored or placed on some insns, depending
on the type of note. */
static void
-distribute_notes (rtx notes, rtx from_insn, rtx i3, rtx i2)
+distribute_notes (rtx notes, rtx from_insn, rtx i3, rtx i2, rtx elim_i2,
+ rtx elim_i1)
{
rtx note, next_note;
rtx tem;
}
break;
- case REG_ALWAYS_RETURN:
case REG_NORETURN:
case REG_SETJMP:
/* These notes must remain with the call. It should not be
&& reg_referenced_p (XEXP (note, 0), PATTERN (i2)))
place = i2;
+ if (place == 0
+ && (rtx_equal_p (XEXP (note, 0), elim_i2)
+ || rtx_equal_p (XEXP (note, 0), elim_i1)))
+ break;
+
if (place == 0)
{
basic_block bb = this_basic_block;
PATTERN (tem) = pc_rtx;
REG_NOTES (tem) = NULL;
- distribute_notes (old_notes, tem, tem, NULL_RTX);
+ distribute_notes (old_notes, tem, tem, NULL_RTX,
+ NULL_RTX, NULL_RTX);
distribute_links (LOG_LINKS (tem));
SET_INSN_DELETED (tem);
REG_NOTES (cc0_setter) = NULL;
distribute_notes (old_notes, cc0_setter,
- cc0_setter, NULL_RTX);
+ cc0_setter, NULL_RTX,
+ NULL_RTX, NULL_RTX);
distribute_links (LOG_LINKS (cc0_setter));
SET_INSN_DELETED (cc0_setter);
was dead, there's nothing left to do. Otherwise, we'll
need to do a global life update after combine. */
if (REG_NOTE_KIND (note) == REG_DEAD && place == 0
- && REGNO_REG_SET_P (bb->global_live_at_start,
+ && REGNO_REG_SET_P (bb->il.rtl->global_live_at_start,
REGNO (XEXP (note, 0))))
SET_BIT (refresh_blocks, this_basic_block->index);
}
= gen_rtx_EXPR_LIST (REG_DEAD, piece, NULL_RTX);
distribute_notes (new_note, place, place,
- NULL_RTX);
+ NULL_RTX, NULL_RTX, NULL_RTX);
}
else if (! refers_to_regno_p (i, i + 1,
PATTERN (place), 0)
reg = SET_DEST (set);
while (GET_CODE (reg) == SUBREG || GET_CODE (reg) == ZERO_EXTRACT
- || GET_CODE (reg) == SIGN_EXTRACT
|| GET_CODE (reg) == STRICT_LOW_PART)
reg = XEXP (reg, 0);
void
dump_combine_stats (FILE *file)
{
- fnotice
+ fprintf
(file,
";; Combiner statistics: %d attempts, %d substitutions (%d requiring new space),\n;; %d successes.\n\n",
combine_attempts, combine_merges, combine_extras, combine_successes);
void
dump_combine_total_stats (FILE *file)
{
- fnotice
+ fprintf
(file,
"\n;; Combiner totals: %d attempts, %d substitutions (%d requiring new space),\n;; %d successes.\n",
total_attempts, total_merges, total_extras, total_successes);
}
+\f
+
+static bool
+gate_handle_combine (void)
+{
+ return (optimize > 0);
+}
+
+/* Try combining insns through substitution. */
+static void
+rest_of_handle_combine (void)
+{
+ int rebuild_jump_labels_after_combine
+ = combine_instructions (get_insns (), max_reg_num ());
+
+ /* Combining insns may have turned an indirect jump into a
+ direct jump. Rebuild the JUMP_LABEL fields of jumping
+ instructions. */
+ if (rebuild_jump_labels_after_combine)
+ {
+ timevar_push (TV_JUMP);
+ rebuild_jump_labels (get_insns ());
+ timevar_pop (TV_JUMP);
+
+ delete_dead_jumptables ();
+ cleanup_cfg (CLEANUP_EXPENSIVE | CLEANUP_UPDATE_LIFE);
+ }
+}
+
+struct tree_opt_pass pass_combine =
+{
+ "combine", /* name */
+ gate_handle_combine, /* gate */
+ rest_of_handle_combine, /* execute */
+ NULL, /* sub */
+ NULL, /* next */
+ 0, /* static_pass_number */
+ TV_COMBINE, /* tv_id */
+ 0, /* properties_required */
+ 0, /* properties_provided */
+ 0, /* properties_destroyed */
+ 0, /* todo_flags_start */
+ TODO_dump_func |
+ TODO_ggc_collect, /* todo_flags_finish */
+ 'c' /* letter */
+};
+