/* Medium-level subroutines: convert bit-field store and extract
and shifts, multiplies and divides to rtl instructions.
- Copyright (C) 1987, 88, 89, 92-6, 1997 Free Software Foundation, Inc.
+ Copyright (C) 1987, 1988, 1989, 1992, 1993, 1994, 1995, 1996, 1997, 1998,
+ 1999, 2000 Free Software Foundation, Inc.
This file is part of GNU CC.
#include "config.h"
+#include "system.h"
+#include "toplev.h"
#include "rtl.h"
#include "tree.h"
+#include "tm_p.h"
#include "flags.h"
#include "insn-flags.h"
#include "insn-codes.h"
#include "real.h"
#include "recog.h"
-static void store_fixed_bit_field PROTO((rtx, int, int, int, rtx, int));
-static void store_split_bit_field PROTO((rtx, int, int, rtx, int));
-static rtx extract_fixed_bit_field PROTO((enum machine_mode, rtx, int,
- int, int, rtx, int, int));
-static rtx mask_rtx PROTO((enum machine_mode, int,
- int, int));
-static rtx lshift_value PROTO((enum machine_mode, rtx,
- int, int));
-static rtx extract_split_bit_field PROTO((rtx, int, int, int, int));
-
-#define CEIL(x,y) (((x) + (y) - 1) / (y))
+static void store_fixed_bit_field PARAMS ((rtx, unsigned HOST_WIDE_INT,
+ unsigned HOST_WIDE_INT,
+ unsigned HOST_WIDE_INT, rtx,
+ unsigned int));
+static void store_split_bit_field PARAMS ((rtx, unsigned HOST_WIDE_INT,
+ unsigned HOST_WIDE_INT, rtx,
+ unsigned int));
+static rtx extract_fixed_bit_field PARAMS ((enum machine_mode, rtx,
+ unsigned HOST_WIDE_INT,
+ unsigned HOST_WIDE_INT,
+ unsigned HOST_WIDE_INT,
+ rtx, int, unsigned int));
+static rtx mask_rtx PARAMS ((enum machine_mode, int,
+ int, int));
+static rtx lshift_value PARAMS ((enum machine_mode, rtx,
+ int, int));
+static rtx extract_split_bit_field PARAMS ((rtx, unsigned HOST_WIDE_INT,
+ unsigned HOST_WIDE_INT, int,
+ unsigned int));
+static void do_cmp_and_jump PARAMS ((rtx, rtx, enum rtx_code,
+ enum machine_mode, rtx));
/* Non-zero means divides or modulus operations are relatively cheap for
powers of two, so don't use branches; emit the operation instead.
static int sdiv_pow2_cheap, smod_pow2_cheap;
#ifndef SLOW_UNALIGNED_ACCESS
-#define SLOW_UNALIGNED_ACCESS STRICT_ALIGNMENT
+#define SLOW_UNALIGNED_ACCESS(MODE, ALIGN) STRICT_ALIGNMENT
#endif
/* For compilers that support multiple targets with different word sizes,
#define MAX_BITS_PER_WORD BITS_PER_WORD
#endif
-/* Cost of various pieces of RTL. Note that some of these are indexed by shift count,
- and some by mode. */
+/* Cost of various pieces of RTL. Note that some of these are indexed by
+ shift count and some by mode. */
static int add_cost, negate_cost, zero_cost;
static int shift_cost[MAX_BITS_PER_WORD];
static int shiftadd_cost[MAX_BITS_PER_WORD];
void
init_expmed ()
{
- char *free_point;
/* This is "some random pseudo register" for purposes of calling recog
to see what insns exist. */
- rtx reg = gen_rtx (REG, word_mode, 10000);
+ rtx reg = gen_rtx_REG (word_mode, 10000);
rtx shift_insn, shiftadd_insn, shiftsub_insn;
int dummy;
int m;
start_sequence ();
- /* Since we are on the permanent obstack, we must be sure we save this
- spot AFTER we call start_sequence, since it will reuse the rtl it
- makes. */
-
- free_point = (char *) oballoc (0);
+ reg = gen_rtx_REG (word_mode, 10000);
zero_cost = rtx_cost (const0_rtx, 0);
- add_cost = rtx_cost (gen_rtx (PLUS, word_mode, reg, reg), SET);
-
- shift_insn = emit_insn (gen_rtx (SET, VOIDmode, reg,
- gen_rtx (ASHIFT, word_mode, reg,
- const0_rtx)));
-
- shiftadd_insn = emit_insn (gen_rtx (SET, VOIDmode, reg,
- gen_rtx (PLUS, word_mode,
- gen_rtx (MULT, word_mode,
- reg, const0_rtx),
- reg)));
-
- shiftsub_insn = emit_insn (gen_rtx (SET, VOIDmode, reg,
- gen_rtx (MINUS, word_mode,
- gen_rtx (MULT, word_mode,
- reg, const0_rtx),
- reg)));
+ add_cost = rtx_cost (gen_rtx_PLUS (word_mode, reg, reg), SET);
+
+ shift_insn = emit_insn (gen_rtx_SET (VOIDmode, reg,
+ gen_rtx_ASHIFT (word_mode, reg,
+ const0_rtx)));
+
+ shiftadd_insn
+ = emit_insn (gen_rtx_SET (VOIDmode, reg,
+ gen_rtx_PLUS (word_mode,
+ gen_rtx_MULT (word_mode,
+ reg, const0_rtx),
+ reg)));
+
+ shiftsub_insn
+ = emit_insn (gen_rtx_SET (VOIDmode, reg,
+ gen_rtx_MINUS (word_mode,
+ gen_rtx_MULT (word_mode,
+ reg, const0_rtx),
+ reg)));
init_recog ();
shift_cost[0] = 0;
shiftadd_cost[0] = shiftsub_cost[0] = add_cost;
- for (m = 1; m < BITS_PER_WORD; m++)
+ for (m = 1; m < MAX_BITS_PER_WORD; m++)
{
shift_cost[m] = shiftadd_cost[m] = shiftsub_cost[m] = 32000;
shiftsub_cost[m] = rtx_cost (SET_SRC (PATTERN (shiftsub_insn)), SET);
}
- negate_cost = rtx_cost (gen_rtx (NEG, word_mode, reg), SET);
+ negate_cost = rtx_cost (gen_rtx_NEG (word_mode, reg), SET);
sdiv_pow2_cheap
- = (rtx_cost (gen_rtx (DIV, word_mode, reg, GEN_INT (32)), SET)
+ = (rtx_cost (gen_rtx_DIV (word_mode, reg, GEN_INT (32)), SET)
<= 2 * add_cost);
smod_pow2_cheap
- = (rtx_cost (gen_rtx (MOD, word_mode, reg, GEN_INT (32)), SET)
+ = (rtx_cost (gen_rtx_MOD (word_mode, reg, GEN_INT (32)), SET)
<= 2 * add_cost);
for (mode = GET_CLASS_NARROWEST_MODE (MODE_INT);
mode != VOIDmode;
mode = GET_MODE_WIDER_MODE (mode))
{
- reg = gen_rtx (REG, mode, 10000);
- div_cost[(int) mode] = rtx_cost (gen_rtx (UDIV, mode, reg, reg), SET);
- mul_cost[(int) mode] = rtx_cost (gen_rtx (MULT, mode, reg, reg), SET);
+ reg = gen_rtx_REG (mode, 10000);
+ div_cost[(int) mode] = rtx_cost (gen_rtx_UDIV (mode, reg, reg), SET);
+ mul_cost[(int) mode] = rtx_cost (gen_rtx_MULT (mode, reg, reg), SET);
wider_mode = GET_MODE_WIDER_MODE (mode);
if (wider_mode != VOIDmode)
{
mul_widen_cost[(int) wider_mode]
- = rtx_cost (gen_rtx (MULT, wider_mode,
- gen_rtx (ZERO_EXTEND, wider_mode, reg),
- gen_rtx (ZERO_EXTEND, wider_mode, reg)),
+ = rtx_cost (gen_rtx_MULT (wider_mode,
+ gen_rtx_ZERO_EXTEND (wider_mode, reg),
+ gen_rtx_ZERO_EXTEND (wider_mode, reg)),
SET);
mul_highpart_cost[(int) mode]
- = rtx_cost (gen_rtx (TRUNCATE, mode,
- gen_rtx (LSHIFTRT, wider_mode,
- gen_rtx (MULT, wider_mode,
- gen_rtx (ZERO_EXTEND, wider_mode, reg),
- gen_rtx (ZERO_EXTEND, wider_mode, reg)),
- GEN_INT (GET_MODE_BITSIZE (mode)))),
+ = rtx_cost (gen_rtx_TRUNCATE
+ (mode,
+ gen_rtx_LSHIFTRT (wider_mode,
+ gen_rtx_MULT (wider_mode,
+ gen_rtx_ZERO_EXTEND
+ (wider_mode, reg),
+ gen_rtx_ZERO_EXTEND
+ (wider_mode, reg)),
+ GEN_INT (GET_MODE_BITSIZE (mode)))),
SET);
}
}
- /* Free the objects we just allocated. */
end_sequence ();
- obfree (free_point);
}
/* Return an rtx representing minus the value of X.
into a bit-field within structure STR_RTX
containing BITSIZE bits starting at bit BITNUM.
FIELDMODE is the machine-mode of the FIELD_DECL node for this field.
- ALIGN is the alignment that STR_RTX is known to have, measured in bytes.
+ ALIGN is the alignment that STR_RTX is known to have.
TOTAL_SIZE is the size of the structure in bytes, or -1 if varying. */
/* ??? Note that there are two different ideas here for how
to determine the size to count bits within, for a register.
One is BITS_PER_WORD, and the other is the size of operand 3
- of the insv pattern. (The latter assumes that an n-bit machine
- will be able to insert bit fields up to n bits wide.)
- It isn't certain that either of these is right.
- extract_bit_field has the same quandary. */
+ of the insv pattern.
+
+ If operand 3 of the insv pattern is VOIDmode, then we will use BITS_PER_WORD
+ else, we use the mode of operand 3. */
rtx
store_bit_field (str_rtx, bitsize, bitnum, fieldmode, value, align, total_size)
rtx str_rtx;
- register int bitsize;
- int bitnum;
+ unsigned HOST_WIDE_INT bitsize;
+ unsigned HOST_WIDE_INT bitnum;
enum machine_mode fieldmode;
rtx value;
- int align;
- int total_size;
+ unsigned int align;
+ HOST_WIDE_INT total_size;
{
- int unit = (GET_CODE (str_rtx) == MEM) ? BITS_PER_UNIT : BITS_PER_WORD;
- register int offset = bitnum / unit;
- register int bitpos = bitnum % unit;
+ unsigned int unit
+ = (GET_CODE (str_rtx) == MEM) ? BITS_PER_UNIT : BITS_PER_WORD;
+ unsigned HOST_WIDE_INT offset = bitnum / unit;
+ unsigned HOST_WIDE_INT bitpos = bitnum % unit;
register rtx op0 = str_rtx;
+#ifdef HAVE_insv
+ unsigned HOST_WIDE_INT insv_bitsize;
+ enum machine_mode op_mode;
+
+ op_mode = insn_data[(int) CODE_FOR_insv].operand[3].mode;
+ if (op_mode == VOIDmode)
+ op_mode = word_mode;
+ insv_bitsize = GET_MODE_BITSIZE (op_mode);
+#endif
if (GET_CODE (str_rtx) == MEM && ! MEM_IN_STRUCT_P (str_rtx))
abort ();
op0 = SUBREG_REG (op0);
}
+ /* Make sure we are playing with integral modes. Pun with subregs
+ if we aren't. */
+ {
+ enum machine_mode imode = int_mode_for_mode (GET_MODE (op0));
+ if (imode != GET_MODE (op0))
+ {
+ if (GET_CODE (op0) == MEM)
+ op0 = change_address (op0, imode, NULL_RTX);
+ else if (imode != BLKmode)
+ op0 = gen_lowpart (imode, op0);
+ else
+ abort ();
+ }
+ }
+
/* If OP0 is a register, BITPOS must count within a word.
But as we have it, it counts within whatever size OP0 now has.
On a bigendian machine, these are not the same, so convert. */
if (flag_force_mem)
value = force_not_mem (value);
- /* Note that the adjustment of BITPOS above has no effect on whether
- BITPOS is 0 in a REG bigger than a word. */
- if (GET_MODE_SIZE (fieldmode) >= UNITS_PER_WORD
+ if ((GET_MODE_SIZE (fieldmode) >= UNITS_PER_WORD
+ || (GET_MODE_SIZE (GET_MODE (op0)) == GET_MODE_SIZE (fieldmode)
+ && GET_MODE_SIZE (fieldmode) != 0))
&& (GET_CODE (op0) != MEM
- || ! SLOW_UNALIGNED_ACCESS
+ || ! SLOW_UNALIGNED_ACCESS (fieldmode, align)
|| (offset * BITS_PER_UNIT % bitsize == 0
- && align % GET_MODE_SIZE (fieldmode) == 0))
- && bitpos == 0 && bitsize == GET_MODE_BITSIZE (fieldmode))
+ && align % GET_MODE_BITSIZE (fieldmode) == 0))
+ && (BYTES_BIG_ENDIAN ? bitpos + bitsize == unit : bitpos == 0)
+ && bitsize == GET_MODE_BITSIZE (fieldmode))
{
/* Storing in a full-word or multi-word field in a register
+ can be done with just SUBREG. Also, storing in the entire object
can be done with just SUBREG. */
if (GET_MODE (op0) != fieldmode)
{
+ if (GET_CODE (op0) == SUBREG)
+ {
+ if (GET_MODE (SUBREG_REG (op0)) == fieldmode
+ || GET_MODE_CLASS (fieldmode) == MODE_INT
+ || GET_MODE_CLASS (fieldmode) == MODE_PARTIAL_INT)
+ op0 = SUBREG_REG (op0);
+ else
+ /* Else we've got some float mode source being extracted into
+ a different float mode destination -- this combination of
+ subregs results in Severe Tire Damage. */
+ abort ();
+ }
if (GET_CODE (op0) == REG)
- op0 = gen_rtx (SUBREG, fieldmode, op0, offset);
+ op0 = gen_rtx_SUBREG (fieldmode, op0, offset);
else
op0 = change_address (op0, fieldmode,
plus_constant (XEXP (op0, 0), offset));
if (GET_CODE (op0) != MEM
&& (BYTES_BIG_ENDIAN ? bitpos + bitsize == unit : bitpos == 0)
&& bitsize == GET_MODE_BITSIZE (fieldmode)
- && (GET_MODE (op0) == fieldmode
- || (movstrict_optab->handlers[(int) fieldmode].insn_code
- != CODE_FOR_nothing)))
+ && (movstrict_optab->handlers[(int) fieldmode].insn_code
+ != CODE_FOR_nothing))
{
+ int icode = movstrict_optab->handlers[(int) fieldmode].insn_code;
+
/* Get appropriate low part of the value being stored. */
if (GET_CODE (value) == CONST_INT || GET_CODE (value) == REG)
value = gen_lowpart (fieldmode, value);
|| GET_CODE (value) == CONST))
value = convert_to_mode (fieldmode, value, 0);
- if (GET_MODE (op0) == fieldmode)
- emit_move_insn (op0, value);
- else
+ if (! (*insn_data[icode].operand[1].predicate) (value, fieldmode))
+ value = copy_to_mode_reg (fieldmode, value);
+
+ if (GET_CODE (op0) == SUBREG)
{
- int icode = movstrict_optab->handlers[(int) fieldmode].insn_code;
- if(! (*insn_operand_predicate[icode][1]) (value, fieldmode))
- value = copy_to_mode_reg (fieldmode, value);
- emit_insn (GEN_FCN (icode)
- (gen_rtx (SUBREG, fieldmode, op0, offset), value));
+ if (GET_MODE (SUBREG_REG (op0)) == fieldmode
+ || GET_MODE_CLASS (fieldmode) == MODE_INT
+ || GET_MODE_CLASS (fieldmode) == MODE_PARTIAL_INT)
+ op0 = SUBREG_REG (op0);
+ else
+ /* Else we've got some float mode source being extracted into
+ a different float mode destination -- this combination of
+ subregs results in Severe Tire Damage. */
+ abort ();
}
+
+ emit_insn (GEN_FCN (icode)
+ (gen_rtx_SUBREG (fieldmode, op0, offset), value));
+
return value;
}
be less than full.
However, only do that if the value is not BLKmode. */
- int backwards = WORDS_BIG_ENDIAN && fieldmode != BLKmode;
-
- int nwords = (bitsize + (BITS_PER_WORD - 1)) / BITS_PER_WORD;
- int i;
+ unsigned int backwards = WORDS_BIG_ENDIAN && fieldmode != BLKmode;
+ unsigned int nwords = (bitsize + (BITS_PER_WORD - 1)) / BITS_PER_WORD;
+ unsigned int i;
/* This is the mode we must force value to, so that there will be enough
subwords to extract. Note that fieldmode will often (always?) be
{
/* If I is 0, use the low-order word in both field and target;
if I is 1, use the next to lowest word; and so on. */
- int wordnum = (backwards ? nwords - i - 1 : i);
- int bit_offset = (backwards
- ? MAX (bitsize - (i + 1) * BITS_PER_WORD, 0)
- : i * BITS_PER_WORD);
+ unsigned int wordnum = (backwards ? nwords - i - 1 : i);
+ unsigned int bit_offset = (backwards
+ ? MAX ((int) bitsize - ((int) i + 1)
+ * BITS_PER_WORD,
+ 0)
+ : (int) i * BITS_PER_WORD);
+
store_bit_field (op0, MIN (BITS_PER_WORD,
bitsize - i * BITS_PER_WORD),
bitnum + bit_offset, word_mode,
/* OFFSET is the number of words or bytes (UNIT says which)
from STR_RTX to the first word or byte containing part of the field. */
- if (GET_CODE (op0) == REG)
+ if (GET_CODE (op0) != MEM)
{
if (offset != 0
|| GET_MODE_SIZE (GET_MODE (op0)) > UNITS_PER_WORD)
- op0 = gen_rtx (SUBREG, TYPE_MODE (type_for_size (BITS_PER_WORD, 0)),
- op0, offset);
+ {
+ if (GET_CODE (op0) != REG)
+ {
+ /* Since this is a destination (lvalue), we can't copy it to a
+ pseudo. We can trivially remove a SUBREG that does not
+ change the size of the operand. Such a SUBREG may have been
+ added above. Otherwise, abort. */
+ if (GET_CODE (op0) == SUBREG
+ && (GET_MODE_SIZE (GET_MODE (op0))
+ == GET_MODE_SIZE (GET_MODE (SUBREG_REG (op0)))))
+ op0 = SUBREG_REG (op0);
+ else
+ abort ();
+ }
+ op0 = gen_rtx_SUBREG (mode_for_size (BITS_PER_WORD, MODE_INT, 0),
+ op0, offset);
+ }
offset = 0;
}
else
{
if (GET_CODE (value) != REG)
value = copy_to_reg (value);
- value = gen_rtx (SUBREG, word_mode, value, 0);
+ value = gen_rtx_SUBREG (word_mode, value, 0);
}
/* Now OFFSET is nonzero only if OP0 is memory
&& GET_MODE (value) != BLKmode
&& !(bitsize == 1 && GET_CODE (value) == CONST_INT)
/* Ensure insv's size is wide enough for this field. */
- && (GET_MODE_BITSIZE (insn_operand_mode[(int) CODE_FOR_insv][3])
- >= bitsize)
+ && (insv_bitsize >= bitsize)
&& ! ((GET_CODE (op0) == REG || GET_CODE (op0) == SUBREG)
- && (bitsize + bitpos
- > GET_MODE_BITSIZE (insn_operand_mode[(int) CODE_FOR_insv][3]))))
+ && (bitsize + bitpos > insv_bitsize)))
{
int xbitpos = bitpos;
rtx value1;
rtx xop0 = op0;
rtx last = get_last_insn ();
rtx pat;
- enum machine_mode maxmode
- = insn_operand_mode[(int) CODE_FOR_insv][3];
-
+ enum machine_mode maxmode;
int save_volatile_ok = volatile_ok;
+
+ maxmode = insn_data[(int) CODE_FOR_insv].operand[3].mode;
+ if (maxmode == VOIDmode)
+ maxmode = word_mode;
+
volatile_ok = 1;
/* If this machine's insv can only insert into a register, copy OP0
/* This used to check flag_force_mem, but that was a serious
de-optimization now that flag_force_mem is enabled by -O2. */
if (GET_CODE (op0) == MEM
- && ! ((*insn_operand_predicate[(int) CODE_FOR_insv][0])
+ && ! ((*insn_data[(int) CODE_FOR_insv].operand[0].predicate)
(op0, VOIDmode)))
{
rtx tempreg;
if (GET_MODE (op0) == BLKmode
|| GET_MODE_SIZE (GET_MODE (op0)) > GET_MODE_SIZE (maxmode))
bestmode
- = get_best_mode (bitsize, bitnum, align * BITS_PER_UNIT, maxmode,
+ = get_best_mode (bitsize, bitnum, align, maxmode,
MEM_VOLATILE_P (op0));
else
bestmode = GET_MODE (op0);
if (bestmode == VOIDmode
- || (SLOW_UNALIGNED_ACCESS && GET_MODE_SIZE (bestmode) > align))
+ || (SLOW_UNALIGNED_ACCESS (bestmode, align)
+ && GET_MODE_BITSIZE (bestmode) > align))
goto insv_loses;
/* Adjust address to point to the containing unit of that mode. */
op0 = change_address (op0, bestmode,
plus_constant (XEXP (op0, 0), offset));
- /* Fetch that unit, store the bitfield in it, then store the unit. */
+ /* Fetch that unit, store the bitfield in it, then store
+ the unit. */
tempreg = copy_to_reg (op0);
store_bit_field (tempreg, bitsize, bitpos, fieldmode, value,
align, total_size);
if (GET_CODE (xop0) == SUBREG)
/* We can't just change the mode, because this might clobber op0,
and we will need the original value of op0 if insv fails. */
- xop0 = gen_rtx (SUBREG, maxmode, SUBREG_REG (xop0), SUBREG_WORD (xop0));
+ xop0 = gen_rtx_SUBREG (maxmode, SUBREG_REG (xop0), SUBREG_WORD (xop0));
if (GET_CODE (xop0) == REG && GET_MODE (xop0) != maxmode)
- xop0 = gen_rtx (SUBREG, maxmode, xop0, 0);
+ xop0 = gen_rtx_SUBREG (maxmode, xop0, 0);
/* On big-endian machines, we count bits from the most significant.
If the bit field insn does not, we must invert. */
{
/* Avoid making subreg of a subreg, or of a mem. */
if (GET_CODE (value1) != REG)
- value1 = copy_to_reg (value1);
- value1 = gen_rtx (SUBREG, maxmode, value1, 0);
+ value1 = copy_to_reg (value1);
+ value1 = gen_rtx_SUBREG (maxmode, value1, 0);
}
else
value1 = gen_lowpart (maxmode, value1);
/* If this machine's insv insists on a register,
get VALUE1 into a register. */
- if (! ((*insn_operand_predicate[(int) CODE_FOR_insv][3])
+ if (! ((*insn_data[(int) CODE_FOR_insv].operand[3].predicate)
(value1, maxmode)))
value1 = force_reg (maxmode, value1);
(If OP0 is a register, it may be a full word or a narrower mode,
but BITPOS still counts within a full word,
which is significant on bigendian machines.)
- STRUCT_ALIGN is the alignment the structure is known to have (in bytes).
+ STRUCT_ALIGN is the alignment the structure is known to have.
Note that protect_from_queue has already been done on OP0 and VALUE. */
static void
store_fixed_bit_field (op0, offset, bitsize, bitpos, value, struct_align)
register rtx op0;
- register int offset, bitsize, bitpos;
+ unsigned HOST_WIDE_INT offset, bitsize, bitpos;
register rtx value;
- int struct_align;
+ unsigned int struct_align;
{
register enum machine_mode mode;
- int total_bits = BITS_PER_WORD;
+ unsigned int total_bits = BITS_PER_WORD;
rtx subtarget, temp;
int all_zero = 0;
int all_one = 0;
- if (! SLOW_UNALIGNED_ACCESS)
- struct_align = BIGGEST_ALIGNMENT / BITS_PER_UNIT;
+ if (! SLOW_UNALIGNED_ACCESS (word_mode, struct_align))
+ struct_align = BIGGEST_ALIGNMENT;
/* There is a case not handled here:
a structure with a known alignment of just a halfword
a word, we won't be doing the extraction the normal way. */
mode = get_best_mode (bitsize, bitpos + offset * BITS_PER_UNIT,
- struct_align * BITS_PER_UNIT, word_mode,
+ struct_align, word_mode,
GET_CODE (op0) == MEM && MEM_VOLATILE_P (op0));
if (mode == VOIDmode)
total_bits = GET_MODE_BITSIZE (mode);
/* Make sure bitpos is valid for the chosen mode. Adjust BITPOS to
- be be in the range 0 to total_bits-1, and put any excess bytes in
+ be in the range 0 to total_bits-1, and put any excess bytes in
OFFSET. */
if (bitpos >= total_bits)
{
BITSIZE is the field width; BITPOS the position of its first bit
(within the word).
VALUE is the value to store.
- ALIGN is the known alignment of OP0, measured in bytes.
+ ALIGN is the known alignment of OP0.
This is also the size of the memory objects to be used.
This does not yet handle fields wider than BITS_PER_WORD. */
static void
store_split_bit_field (op0, bitsize, bitpos, value, align)
rtx op0;
- int bitsize, bitpos;
+ unsigned HOST_WIDE_INT bitsize, bitpos;
rtx value;
- int align;
+ unsigned int align;
{
- int unit;
- int bitsdone = 0;
+ unsigned int unit;
+ unsigned int bitsdone = 0;
/* Make sure UNIT isn't larger than BITS_PER_WORD, we can only handle that
much at a time. */
if (GET_CODE (op0) == REG || GET_CODE (op0) == SUBREG)
unit = BITS_PER_WORD;
else
- unit = MIN (align * BITS_PER_UNIT, BITS_PER_WORD);
+ unit = MIN (align, BITS_PER_WORD);
/* If VALUE is a constant other than a CONST_INT, get it into a register in
WORD_MODE. If we can do this using gen_lowpart_common, do so. Note
? GET_MODE (value)
: word_mode, value));
}
+ else if (GET_CODE (value) == ADDRESSOF)
+ value = copy_to_reg (value);
while (bitsdone < bitsize)
{
- int thissize;
+ unsigned HOST_WIDE_INT thissize;
rtx part, word;
- int thispos;
- int offset;
+ unsigned HOST_WIDE_INT thispos;
+ unsigned HOST_WIDE_INT offset;
offset = (bitpos + bitsdone) / unit;
thispos = (bitpos + bitsdone) % unit;
GET_MODE (value) == VOIDmode
? UNITS_PER_WORD
: (GET_MODE (value) == BLKmode
- ? 1
- : GET_MODE_ALIGNMENT (GET_MODE (value)) / BITS_PER_UNIT));
+ ? 1 : GET_MODE_ALIGNMENT (GET_MODE (value))));
}
else
{
GET_MODE (value) == VOIDmode
? UNITS_PER_WORD
: (GET_MODE (value) == BLKmode
- ? 1
- : GET_MODE_ALIGNMENT (GET_MODE (value)) / BITS_PER_UNIT));
+ ? 1 : GET_MODE_ALIGNMENT (GET_MODE (value))));
}
/* If OP0 is a register, then handle OFFSET here.
TMODE is the mode the caller would like the value to have;
but the value may be returned with type MODE instead.
- ALIGN is the alignment that STR_RTX is known to have, measured in bytes.
+ ALIGN is the alignment that STR_RTX is known to have.
TOTAL_SIZE is the size in bytes of the containing structure,
or -1 if varying.
extract_bit_field (str_rtx, bitsize, bitnum, unsignedp,
target, mode, tmode, align, total_size)
rtx str_rtx;
- register int bitsize;
- int bitnum;
+ unsigned HOST_WIDE_INT bitsize;
+ unsigned HOST_WIDE_INT bitnum;
int unsignedp;
rtx target;
enum machine_mode mode, tmode;
- int align;
- int total_size;
+ unsigned int align;
+ HOST_WIDE_INT total_size;
{
- int unit = (GET_CODE (str_rtx) == MEM) ? BITS_PER_UNIT : BITS_PER_WORD;
- register int offset = bitnum / unit;
- register int bitpos = bitnum % unit;
+ unsigned int unit
+ = (GET_CODE (str_rtx) == MEM) ? BITS_PER_UNIT : BITS_PER_WORD;
+ unsigned HOST_WIDE_INT offset = bitnum / unit;
+ unsigned HOST_WIDE_INT bitpos = bitnum % unit;
register rtx op0 = str_rtx;
rtx spec_target = target;
rtx spec_target_subreg = 0;
+ enum machine_mode int_mode;
+#ifdef HAVE_extv
+ unsigned HOST_WIDE_INT extv_bitsize;
+ enum machine_mode extv_mode;
+#endif
+#ifdef HAVE_extzv
+ unsigned HOST_WIDE_INT extzv_bitsize;
+ enum machine_mode extzv_mode;
+#endif
+
+#ifdef HAVE_extv
+ extv_mode = insn_data[(int) CODE_FOR_extv].operand[0].mode;
+ if (extv_mode == VOIDmode)
+ extv_mode = word_mode;
+ extv_bitsize = GET_MODE_BITSIZE (extv_mode);
+#endif
+
+#ifdef HAVE_extzv
+ extzv_mode = insn_data[(int) CODE_FOR_extzv].operand[0].mode;
+ if (extzv_mode == VOIDmode)
+ extzv_mode = word_mode;
+ extzv_bitsize = GET_MODE_BITSIZE (extzv_mode);
+#endif
/* Discount the part of the structure before the desired byte.
We need to know how many bytes are safe to reference after it. */
offset += SUBREG_WORD (op0);
+ inner_size = MIN (inner_size, BITS_PER_WORD);
+
if (BYTES_BIG_ENDIAN && (outer_size < inner_size))
{
bitpos += inner_size - outer_size;
op0 = SUBREG_REG (op0);
}
+ /* Make sure we are playing with integral modes. Pun with subregs
+ if we aren't. */
+ {
+ enum machine_mode imode = int_mode_for_mode (GET_MODE (op0));
+ if (imode != GET_MODE (op0))
+ {
+ if (GET_CODE (op0) == MEM)
+ op0 = change_address (op0, imode, NULL_RTX);
+ else if (imode != BLKmode)
+ op0 = gen_lowpart (imode, op0);
+ else
+ abort ();
+ }
+ }
+
/* ??? We currently assume TARGET is at least as big as BITSIZE.
If that's wrong, the solution is to test for it and set TARGET to 0
if needed. */
/* If OP0 is a register, BITPOS must count within a word.
But as we have it, it counts within whatever size OP0 now has.
On a bigendian machine, these are not the same, so convert. */
- if (BYTES_BIG_ENDIAN &&
- GET_CODE (op0) != MEM
+ if (BYTES_BIG_ENDIAN
+ && GET_CODE (op0) != MEM
&& unit > GET_MODE_BITSIZE (GET_MODE (op0)))
bitpos += unit - GET_MODE_BITSIZE (GET_MODE (op0));
So too extracting a subword value in
the least significant part of the register. */
- if (((GET_CODE (op0) == REG
+ if (((GET_CODE (op0) != MEM
&& TRULY_NOOP_TRUNCATION (GET_MODE_BITSIZE (mode),
GET_MODE_BITSIZE (GET_MODE (op0))))
|| (GET_CODE (op0) == MEM
- && (! SLOW_UNALIGNED_ACCESS
+ && (! SLOW_UNALIGNED_ACCESS (mode, align)
|| (offset * BITS_PER_UNIT % bitsize == 0
- && align * BITS_PER_UNIT % bitsize == 0))))
+ && align % bitsize == 0))))
&& ((bitsize >= BITS_PER_WORD && bitsize == GET_MODE_BITSIZE (mode)
&& bitpos % BITS_PER_WORD == 0)
|| (mode_for_size (bitsize, GET_MODE_CLASS (tmode), 0) != BLKmode
+ /* ??? The big endian test here is wrong. This is correct
+ if the value is in a register, and if mode_for_size is not
+ the same mode as op0. This causes us to get unnecessarily
+ inefficient code from the Thumb port when -mbig-endian. */
&& (BYTES_BIG_ENDIAN
? bitpos + bitsize == BITS_PER_WORD
: bitpos == 0))))
{
enum machine_mode mode1
- = mode_for_size (bitsize, GET_MODE_CLASS (tmode), 0);
+ = (VECTOR_MODE_P (tmode) ? mode
+ : mode_for_size (bitsize, GET_MODE_CLASS (tmode), 0));
if (mode1 != GET_MODE (op0))
{
+ if (GET_CODE (op0) == SUBREG)
+ {
+ if (GET_MODE (SUBREG_REG (op0)) == mode1
+ || GET_MODE_CLASS (mode1) == MODE_INT
+ || GET_MODE_CLASS (mode1) == MODE_PARTIAL_INT)
+ op0 = SUBREG_REG (op0);
+ else
+ /* Else we've got some float mode source being extracted into
+ a different float mode destination -- this combination of
+ subregs results in Severe Tire Damage. */
+ abort ();
+ }
if (GET_CODE (op0) == REG)
- op0 = gen_rtx (SUBREG, mode1, op0, offset);
+ op0 = gen_rtx_SUBREG (mode1, op0, offset);
else
op0 = change_address (op0, mode1,
plus_constant (XEXP (op0, 0), offset));
This is because the most significant word is the one which may
be less than full. */
- int nwords = (bitsize + (BITS_PER_WORD - 1)) / BITS_PER_WORD;
- int i;
+ unsigned int nwords = (bitsize + (BITS_PER_WORD - 1)) / BITS_PER_WORD;
+ unsigned int i;
if (target == 0 || GET_CODE (target) != REG)
target = gen_reg_rtx (mode);
/* Indicate for flow that the entire target reg is being set. */
- emit_insn (gen_rtx (CLOBBER, VOIDmode, target));
+ emit_insn (gen_rtx_CLOBBER (VOIDmode, target));
for (i = 0; i < nwords; i++)
{
/* If I is 0, use the low-order word in both field and target;
if I is 1, use the next to lowest word; and so on. */
/* Word number in TARGET to use. */
- int wordnum = (WORDS_BIG_ENDIAN
- ? GET_MODE_SIZE (GET_MODE (target)) / UNITS_PER_WORD - i - 1
- : i);
+ unsigned int wordnum
+ = (WORDS_BIG_ENDIAN
+ ? GET_MODE_SIZE (GET_MODE (target)) / UNITS_PER_WORD - i - 1
+ : i);
/* Offset from start of field in OP0. */
- int bit_offset = (WORDS_BIG_ENDIAN
- ? MAX (0, bitsize - (i + 1) * BITS_PER_WORD)
- : i * BITS_PER_WORD);
+ unsigned int bit_offset = (WORDS_BIG_ENDIAN
+ ? MAX (0, ((int) bitsize - ((int) i + 1)
+ * (int) BITS_PER_WORD))
+ : (int) i * BITS_PER_WORD);
rtx target_part = operand_subword (target, wordnum, 1, VOIDmode);
rtx result_part
= extract_bit_field (op0, MIN (BITS_PER_WORD,
bitsize - i * BITS_PER_WORD),
- bitnum + bit_offset,
- 1, target_part, mode, word_mode,
- align, total_size);
+ bitnum + bit_offset, 1, target_part, mode,
+ word_mode, align, total_size);
if (target_part == 0)
abort ();
need to be zero'd out. */
if (GET_MODE_SIZE (GET_MODE (target)) > nwords * UNITS_PER_WORD)
{
- int i,total_words;
+ unsigned int i, total_words;
total_words = GET_MODE_SIZE (GET_MODE (target)) / UNITS_PER_WORD;
for (i = nwords; i < total_words; i++)
NULL_RTX, 0);
}
- /* From here on we know the desired field is smaller than a word
- so we can assume it is an integer. So we can safely extract it as one
- size of integer, if necessary, and then truncate or extend
- to the size that is wanted. */
+ /* From here on we know the desired field is smaller than a word. */
+
+ /* Check if there is a correspondingly-sized integer field, so we can
+ safely extract it as one size of integer, if necessary; then
+ truncate or extend to the size that is wanted; then use SUBREGs or
+ convert_to_mode to get one of the modes we really wanted. */
+
+ int_mode = int_mode_for_mode (tmode);
+ if (int_mode == BLKmode)
+ int_mode = int_mode_for_mode (mode);
+ if (int_mode == BLKmode)
+ abort(); /* Should probably push op0 out to memory and then
+ do a load. */
/* OFFSET is the number of words or bytes (UNIT says which)
from STR_RTX to the first word or byte containing part of the field. */
- if (GET_CODE (op0) == REG)
+ if (GET_CODE (op0) != MEM)
{
if (offset != 0
|| GET_MODE_SIZE (GET_MODE (op0)) > UNITS_PER_WORD)
- op0 = gen_rtx (SUBREG, TYPE_MODE (type_for_size (BITS_PER_WORD, 0)),
- op0, offset);
+ {
+ if (GET_CODE (op0) != REG)
+ op0 = copy_to_reg (op0);
+ op0 = gen_rtx_SUBREG (mode_for_size (BITS_PER_WORD, MODE_INT, 0),
+ op0, offset);
+ }
offset = 0;
}
else
{
#ifdef HAVE_extzv
if (HAVE_extzv
- && (GET_MODE_BITSIZE (insn_operand_mode[(int) CODE_FOR_extzv][0])
- >= bitsize)
+ && (extzv_bitsize >= bitsize)
&& ! ((GET_CODE (op0) == REG || GET_CODE (op0) == SUBREG)
- && (bitsize + bitpos
- > GET_MODE_BITSIZE (insn_operand_mode[(int) CODE_FOR_extzv][0]))))
+ && (bitsize + bitpos > extzv_bitsize)))
{
- int xbitpos = bitpos, xoffset = offset;
+ unsigned HOST_WIDE_INT xbitpos = bitpos, xoffset = offset;
rtx bitsize_rtx, bitpos_rtx;
- rtx last = get_last_insn();
+ rtx last = get_last_insn ();
rtx xop0 = op0;
rtx xtarget = target;
rtx xspec_target = spec_target;
rtx xspec_target_subreg = spec_target_subreg;
rtx pat;
- enum machine_mode maxmode
- = insn_operand_mode[(int) CODE_FOR_extzv][0];
+ enum machine_mode maxmode;
+
+ maxmode = insn_data[(int) CODE_FOR_extzv].operand[0].mode;
+ if (maxmode == VOIDmode)
+ maxmode = word_mode;
if (GET_CODE (xop0) == MEM)
{
volatile_ok = 1;
/* Is the memory operand acceptable? */
- if (flag_force_mem
- || ! ((*insn_operand_predicate[(int) CODE_FOR_extzv][1])
- (xop0, GET_MODE (xop0))))
+ if (! ((*insn_data[(int) CODE_FOR_extzv].operand[1].predicate)
+ (xop0, GET_MODE (xop0))))
{
/* No, load into a reg and extract from there. */
enum machine_mode bestmode;
if (GET_MODE (xop0) == BLKmode
|| (GET_MODE_SIZE (GET_MODE (op0))
> GET_MODE_SIZE (maxmode)))
- bestmode = get_best_mode (bitsize, bitnum,
- align * BITS_PER_UNIT, maxmode,
+ bestmode = get_best_mode (bitsize, bitnum, align, maxmode,
MEM_VOLATILE_P (xop0));
else
bestmode = GET_MODE (xop0);
if (bestmode == VOIDmode
- || (SLOW_UNALIGNED_ACCESS && GET_MODE_SIZE (bestmode) > align))
+ || (SLOW_UNALIGNED_ACCESS (bestmode, align)
+ && GET_MODE_BITSIZE (bestmode) > align))
goto extzv_loses;
/* Compute offset as multiple of this unit,
/* If op0 is a register, we need it in MAXMODE (which is usually
SImode). to make it acceptable to the format of extzv. */
if (GET_CODE (xop0) == SUBREG && GET_MODE (xop0) != maxmode)
- abort ();
+ goto extzv_loses;
if (GET_CODE (xop0) == REG && GET_MODE (xop0) != maxmode)
- xop0 = gen_rtx (SUBREG, maxmode, xop0, 0);
+ xop0 = gen_rtx_SUBREG (maxmode, xop0, 0);
/* On big-endian machines, we count bits from the most significant.
If the bit field insn does not, we must invert. */
/* If this machine's extzv insists on a register target,
make sure we have one. */
- if (! ((*insn_operand_predicate[(int) CODE_FOR_extzv][0])
+ if (! ((*insn_data[(int) CODE_FOR_extzv].operand[0].predicate)
(xtarget, maxmode)))
xtarget = gen_reg_rtx (maxmode);
else
{
delete_insns_since (last);
- target = extract_fixed_bit_field (tmode, op0, offset, bitsize,
+ target = extract_fixed_bit_field (int_mode, op0, offset, bitsize,
bitpos, target, 1, align);
}
}
else
extzv_loses:
#endif
- target = extract_fixed_bit_field (tmode, op0, offset, bitsize, bitpos,
- target, 1, align);
+ target = extract_fixed_bit_field (int_mode, op0, offset, bitsize,
+ bitpos, target, 1, align);
}
else
{
#ifdef HAVE_extv
if (HAVE_extv
- && (GET_MODE_BITSIZE (insn_operand_mode[(int) CODE_FOR_extv][0])
- >= bitsize)
+ && (extv_bitsize >= bitsize)
&& ! ((GET_CODE (op0) == REG || GET_CODE (op0) == SUBREG)
- && (bitsize + bitpos
- > GET_MODE_BITSIZE (insn_operand_mode[(int) CODE_FOR_extv][0]))))
+ && (bitsize + bitpos > extv_bitsize)))
{
int xbitpos = bitpos, xoffset = offset;
rtx bitsize_rtx, bitpos_rtx;
- rtx last = get_last_insn();
+ rtx last = get_last_insn ();
rtx xop0 = op0, xtarget = target;
rtx xspec_target = spec_target;
rtx xspec_target_subreg = spec_target_subreg;
rtx pat;
- enum machine_mode maxmode
- = insn_operand_mode[(int) CODE_FOR_extv][0];
+ enum machine_mode maxmode;
+
+ maxmode = insn_data[(int) CODE_FOR_extv].operand[0].mode;
+ if (maxmode == VOIDmode)
+ maxmode = word_mode;
if (GET_CODE (xop0) == MEM)
{
/* Is the memory operand acceptable? */
- if (! ((*insn_operand_predicate[(int) CODE_FOR_extv][1])
+ if (! ((*insn_data[(int) CODE_FOR_extv].operand[1].predicate)
(xop0, GET_MODE (xop0))))
{
/* No, load into a reg and extract from there. */
if (GET_MODE (xop0) == BLKmode
|| (GET_MODE_SIZE (GET_MODE (op0))
> GET_MODE_SIZE (maxmode)))
- bestmode = get_best_mode (bitsize, bitnum,
- align * BITS_PER_UNIT, maxmode,
+ bestmode = get_best_mode (bitsize, bitnum, align, maxmode,
MEM_VOLATILE_P (xop0));
else
bestmode = GET_MODE (xop0);
if (bestmode == VOIDmode
- || (SLOW_UNALIGNED_ACCESS && GET_MODE_SIZE (bestmode) > align))
+ || (SLOW_UNALIGNED_ACCESS (bestmode, align)
+ && GET_MODE_BITSIZE (bestmode) > align))
goto extv_loses;
/* Compute offset as multiple of this unit,
/* If op0 is a register, we need it in MAXMODE (which is usually
SImode) to make it acceptable to the format of extv. */
if (GET_CODE (xop0) == SUBREG && GET_MODE (xop0) != maxmode)
- abort ();
+ goto extv_loses;
if (GET_CODE (xop0) == REG && GET_MODE (xop0) != maxmode)
- xop0 = gen_rtx (SUBREG, maxmode, xop0, 0);
+ xop0 = gen_rtx_SUBREG (maxmode, xop0, 0);
/* On big-endian machines, we count bits from the most significant.
If the bit field insn does not, we must invert. */
/* If this machine's extv insists on a register target,
make sure we have one. */
- if (! ((*insn_operand_predicate[(int) CODE_FOR_extv][0])
+ if (! ((*insn_data[(int) CODE_FOR_extv].operand[0].predicate)
(xtarget, maxmode)))
xtarget = gen_reg_rtx (maxmode);
else
{
delete_insns_since (last);
- target = extract_fixed_bit_field (tmode, op0, offset, bitsize,
+ target = extract_fixed_bit_field (int_mode, op0, offset, bitsize,
bitpos, target, 0, align);
}
}
else
extv_loses:
#endif
- target = extract_fixed_bit_field (tmode, op0, offset, bitsize, bitpos,
- target, 0, align);
+ target = extract_fixed_bit_field (int_mode, op0, offset, bitsize,
+ bitpos, target, 0, align);
}
if (target == spec_target)
return target;
target, unsignedp);
if (GET_CODE (target) != REG)
target = copy_to_reg (target);
- return gen_rtx (SUBREG, tmode, target, 0);
+ return gen_rtx_SUBREG (tmode, target, 0);
}
else
return convert_to_mode (tmode, target, unsignedp);
and return TARGET, but this is not guaranteed.
If TARGET is not used, create a pseudo-reg of mode TMODE for the value.
- ALIGN is the alignment that STR_RTX is known to have, measured in bytes. */
+ ALIGN is the alignment that STR_RTX is known to have. */
static rtx
extract_fixed_bit_field (tmode, op0, offset, bitsize, bitpos,
target, unsignedp, align)
enum machine_mode tmode;
register rtx op0, target;
- register int offset, bitsize, bitpos;
+ unsigned HOST_WIDE_INT offset, bitsize, bitpos;
int unsignedp;
- int align;
+ unsigned int align;
{
- int total_bits = BITS_PER_WORD;
+ unsigned int total_bits = BITS_PER_WORD;
enum machine_mode mode;
if (GET_CODE (op0) == SUBREG || GET_CODE (op0) == REG)
includes the entire field. If such a mode would be larger than
a word, we won't be doing the extraction the normal way. */
- mode = get_best_mode (bitsize, bitpos + offset * BITS_PER_UNIT,
- align * BITS_PER_UNIT, word_mode,
+ mode = get_best_mode (bitsize, bitpos + offset * BITS_PER_UNIT, align,
+ word_mode,
GET_CODE (op0) == MEM && MEM_VOLATILE_P (op0));
if (mode == VOIDmode)
total_bits = GET_MODE_BITSIZE (mode);
/* Make sure bitpos is valid for the chosen mode. Adjust BITPOS to
- be be in the range 0 to total_bits-1, and put any excess bytes in
+ be in the range 0 to total_bits-1, and put any excess bytes in
OFFSET. */
if (bitpos >= total_bits)
{
BITSIZE is the field width; BITPOS, position of its first bit, in the word.
UNSIGNEDP is 1 if should zero-extend the contents; else sign-extend.
- ALIGN is the known alignment of OP0, measured in bytes.
- This is also the size of the memory objects to be used. */
+ ALIGN is the known alignment of OP0. This is also the size of the
+ memory objects to be used. */
static rtx
extract_split_bit_field (op0, bitsize, bitpos, unsignedp, align)
rtx op0;
- int bitsize, bitpos, unsignedp, align;
+ unsigned HOST_WIDE_INT bitsize, bitpos;
+ int unsignedp;
+ unsigned int align;
{
- int unit;
- int bitsdone = 0;
- rtx result;
+ unsigned int unit;
+ unsigned int bitsdone = 0;
+ rtx result = NULL_RTX;
int first = 1;
/* Make sure UNIT isn't larger than BITS_PER_WORD, we can only handle that
if (GET_CODE (op0) == REG || GET_CODE (op0) == SUBREG)
unit = BITS_PER_WORD;
else
- unit = MIN (align * BITS_PER_UNIT, BITS_PER_WORD);
+ unit = MIN (align, BITS_PER_WORD);
while (bitsdone < bitsize)
{
- int thissize;
+ unsigned HOST_WIDE_INT thissize;
rtx part, word;
- int thispos;
- int offset;
+ unsigned HOST_WIDE_INT thispos;
+ unsigned HOST_WIDE_INT offset;
offset = (bitpos + bitsdone) / unit;
thispos = (bitpos + bitsdone) % unit;
op1 = expand_expr (amount, NULL_RTX, VOIDmode, 0);
#ifdef SHIFT_COUNT_TRUNCATED
- if (SHIFT_COUNT_TRUNCATED
- && GET_CODE (op1) == CONST_INT
- && (unsigned HOST_WIDE_INT) INTVAL (op1) >= GET_MODE_BITSIZE (mode))
- op1 = GEN_INT ((unsigned HOST_WIDE_INT) INTVAL (op1)
- % GET_MODE_BITSIZE (mode));
+ if (SHIFT_COUNT_TRUNCATED)
+ {
+ if (GET_CODE (op1) == CONST_INT
+ && ((unsigned HOST_WIDE_INT) INTVAL (op1) >=
+ (unsigned HOST_WIDE_INT) GET_MODE_BITSIZE (mode)))
+ op1 = GEN_INT ((unsigned HOST_WIDE_INT) INTVAL (op1)
+ % GET_MODE_BITSIZE (mode));
+ else if (GET_CODE (op1) == SUBREG
+ && SUBREG_WORD (op1) == 0)
+ op1 = SUBREG_REG (op1);
+ }
#endif
if (op1 == const0_rtx)
char log[MAX_BITS_PER_WORD];
};
+static void synth_mult PARAMS ((struct algorithm *,
+ unsigned HOST_WIDE_INT,
+ int));
+static unsigned HOST_WIDE_INT choose_multiplier PARAMS ((unsigned HOST_WIDE_INT,
+ int, int,
+ unsigned HOST_WIDE_INT *,
+ int *, int *));
+static unsigned HOST_WIDE_INT invert_mod2n PARAMS ((unsigned HOST_WIDE_INT,
+ int));
/* Compute and return the best algorithm for multiplying by T.
The algorithm must cost less than cost_limit
If retval.cost >= COST_LIMIT, no algorithm was found and all
{
int m;
struct algorithm *alg_in, *best_alg;
- unsigned int cost;
+ int cost;
unsigned HOST_WIDE_INT q;
/* Indicate that no algorithm is yet found. If no algorithm
for (w = 1; (w & t) != 0; w <<= 1)
;
- if (w > 2
- /* Reject the case where t is 3.
- Thus we prefer addition in that case. */
- && t != 3)
+ /* If T was -1, then W will be zero after the loop. This is another
+ case where T ends with ...111. Handling this with (T + 1) and
+ subtract 1 produces slightly better code and results in algorithm
+ selection much faster than treating it like the ...0111 case
+ below. */
+ if (w == 0
+ || (w > 2
+ /* Reject the case where t is 3.
+ Thus we prefer addition in that case. */
+ && t != 3))
{
/* T ends with ...111. Multiply by (T + 1) and subtract 1. */
and then negate, do the multiplication directly, or do multiplication
by OP1 - 1. */
- mult_cost = rtx_cost (gen_rtx (MULT, mode, op0, op1), SET);
+ mult_cost = rtx_cost (gen_rtx_MULT (mode, op0, op1), SET);
mult_cost = MIN (12 * add_cost, mult_cost);
synth_mult (&alg, val, mult_cost);
rtx shift_subtarget = preserve ? 0 : accum;
rtx add_target
= (opno == alg.ops - 1 && target != 0 && variant != add_variant
- ? target : 0);
+ && ! preserve)
+ ? target : 0;
rtx accum_target = preserve ? 0 : accum;
switch (alg.op[opno])
case alg_add_t_m2:
tem = expand_shift (LSHIFT_EXPR, mode, op0,
build_int_2 (log, 0), NULL_RTX, 0);
- accum = force_operand (gen_rtx (PLUS, mode, accum, tem),
- add_target ? add_target : accum_target);
+ accum = force_operand (gen_rtx_PLUS (mode, accum, tem),
+ add_target
+ ? add_target : accum_target);
val_so_far += (HOST_WIDE_INT) 1 << log;
break;
case alg_sub_t_m2:
tem = expand_shift (LSHIFT_EXPR, mode, op0,
build_int_2 (log, 0), NULL_RTX, 0);
- accum = force_operand (gen_rtx (MINUS, mode, accum, tem),
- add_target ? add_target : accum_target);
+ accum = force_operand (gen_rtx_MINUS (mode, accum, tem),
+ add_target
+ ? add_target : accum_target);
val_so_far -= (HOST_WIDE_INT) 1 << log;
break;
accum = expand_shift (LSHIFT_EXPR, mode, accum,
build_int_2 (log, 0), shift_subtarget,
0);
- accum = force_operand (gen_rtx (PLUS, mode, accum, op0),
- add_target ? add_target : accum_target);
+ accum = force_operand (gen_rtx_PLUS (mode, accum, op0),
+ add_target
+ ? add_target : accum_target);
val_so_far = (val_so_far << log) + 1;
break;
accum = expand_shift (LSHIFT_EXPR, mode, accum,
build_int_2 (log, 0), shift_subtarget,
0);
- accum = force_operand (gen_rtx (MINUS, mode, accum, op0),
- add_target ? add_target : accum_target);
+ accum = force_operand (gen_rtx_MINUS (mode, accum, op0),
+ add_target
+ ? add_target : accum_target);
val_so_far = (val_so_far << log) - 1;
break;
case alg_add_factor:
tem = expand_shift (LSHIFT_EXPR, mode, accum,
build_int_2 (log, 0), NULL_RTX, 0);
- accum = force_operand (gen_rtx (PLUS, mode, accum, tem),
- add_target ? add_target : accum_target);
+ accum = force_operand (gen_rtx_PLUS (mode, accum, tem),
+ add_target
+ ? add_target : accum_target);
val_so_far += val_so_far << log;
break;
case alg_sub_factor:
tem = expand_shift (LSHIFT_EXPR, mode, accum,
build_int_2 (log, 0), NULL_RTX, 0);
- accum = force_operand (gen_rtx (MINUS, mode, tem, accum),
+ accum = force_operand (gen_rtx_MINUS (mode, tem, accum),
(add_target ? add_target
: preserve ? 0 : tem));
val_so_far = (val_so_far << log) - val_so_far;
break;
default:
- abort ();;
+ abort ();
}
/* Write a REG_EQUAL note on the last insn so that we can cse
multiplication sequences. */
insn = get_last_insn ();
- REG_NOTES (insn)
- = gen_rtx (EXPR_LIST, REG_EQUAL,
- gen_rtx (MULT, mode, op0, GEN_INT (val_so_far)),
- REG_NOTES (insn));
+ set_unique_reg_note (insn,
+ REG_EQUAL,
+ gen_rtx_MULT (mode, op0,
+ GEN_INT (val_so_far)));
}
if (variant == negate_variant)
else if (variant == add_variant)
{
val_so_far = val_so_far + 1;
- accum = force_operand (gen_rtx (PLUS, mode, accum, op0), target);
+ accum = force_operand (gen_rtx_PLUS (mode, accum, op0), target);
}
if (val != val_so_far)
int *post_shift_ptr;
int *lgup_ptr;
{
- unsigned HOST_WIDE_INT mhigh_hi, mhigh_lo;
- unsigned HOST_WIDE_INT mlow_hi, mlow_lo;
+ HOST_WIDE_INT mhigh_hi, mlow_hi;
+ unsigned HOST_WIDE_INT mhigh_lo, mlow_lo;
int lgup, post_shift;
int pow, pow2;
- unsigned HOST_WIDE_INT nh, nl, dummy1, dummy2;
+ unsigned HOST_WIDE_INT nl, dummy1;
+ HOST_WIDE_INT nh, dummy2;
/* lgup = ceil(log2(divisor)); */
lgup = ceil_log2 (d);
/* mlow = 2^(N + lgup)/d */
if (pow >= HOST_BITS_PER_WIDE_INT)
{
- nh = (unsigned HOST_WIDE_INT) 1 << (pow - HOST_BITS_PER_WIDE_INT);
+ nh = (HOST_WIDE_INT) 1 << (pow - HOST_BITS_PER_WIDE_INT);
nl = 0;
}
else
/* mhigh = (2^(N + lgup) + 2^N + lgup - precision)/d */
if (pow2 >= HOST_BITS_PER_WIDE_INT)
- nh |= (unsigned HOST_WIDE_INT) 1 << (pow2 - HOST_BITS_PER_WIDE_INT);
+ nh |= (HOST_WIDE_INT) 1 << (pow2 - HOST_BITS_PER_WIDE_INT);
else
nl |= (unsigned HOST_WIDE_INT) 1 << pow2;
div_and_round_double (TRUNC_DIV_EXPR, 1, nl, nh, d, (HOST_WIDE_INT) 0,
build_int_2 (GET_MODE_BITSIZE (mode) - 1, 0),
NULL_RTX, 0);
tem = expand_and (tem, op1, NULL_RTX);
- adj_operand = force_operand (gen_rtx (adj_code, mode, adj_operand, tem),
- adj_operand);
+ adj_operand
+ = force_operand (gen_rtx_fmt_ee (adj_code, mode, adj_operand, tem),
+ adj_operand);
tem = expand_shift (RSHIFT_EXPR, mode, op1,
build_int_2 (GET_MODE_BITSIZE (mode) - 1, 0),
NULL_RTX, 0);
tem = expand_and (tem, op0, NULL_RTX);
- target = force_operand (gen_rtx (adj_code, mode, adj_operand, tem), target);
+ target = force_operand (gen_rtx_fmt_ee (adj_code, mode, adj_operand, tem),
+ target);
return target;
}
{
mul_highpart_optab = unsignedp ? umul_highpart_optab : smul_highpart_optab;
target = expand_binop (mode, mul_highpart_optab,
- op0, wide_op1, target, unsignedp, OPTAB_DIRECT);
+ op0, op1, target, unsignedp, OPTAB_DIRECT);
if (target)
return target;
}
{
mul_highpart_optab = unsignedp ? smul_highpart_optab : umul_highpart_optab;
target = expand_binop (mode, mul_highpart_optab,
- op0, wide_op1, target, unsignedp, OPTAB_DIRECT);
+ op0, op1, target, unsignedp, OPTAB_DIRECT);
if (target)
/* We used the wrong signedness. Adjust the result. */
return expand_mult_highpart_adjust (mode, target, op0,
This could optimize to a bfexts instruction.
But C doesn't use these operations, so their optimizations are
left for later. */
+/* ??? For modulo, we don't actually need the highpart of the first product,
+ the low part will do nicely. And for small divisors, the second multiply
+ can also be a low-part only multiply or even be completely left out.
+ E.g. to calculate the remainder of a division by 3 with a 32 bit
+ multiply, multiply with 0x55555556 and extract the upper two bits;
+ the result is exact for inputs up to 0x1fffffff.
+ The input range can be reduced by using cross-sum rules.
+ For odd divisors >= 3, the following table gives right shift counts
+ so that if an number is shifted by an integer multiple of the given
+ amount, the remainder stays the same:
+ 2, 4, 3, 6, 10, 12, 4, 8, 18, 6, 11, 20, 18, 0, 5, 10, 12, 0, 12, 20,
+ 14, 12, 23, 21, 8, 0, 20, 18, 0, 0, 6, 12, 0, 22, 0, 18, 20, 30, 0, 0,
+ 0, 8, 0, 11, 12, 10, 36, 0, 30, 0, 0, 12, 0, 0, 0, 0, 44, 12, 24, 0,
+ 20, 0, 7, 14, 0, 18, 36, 0, 0, 46, 60, 0, 42, 0, 15, 24, 20, 0, 0, 33,
+ 0, 20, 0, 0, 18, 0, 60, 0, 0, 0, 0, 0, 40, 18, 0, 0, 12
+
+ Cross-sum rules for even numbers can be derived by leaving as many bits
+ to the right alone as the divisor has zeros to the right.
+ E.g. if x is an unsigned 32 bit number:
+ (x mod 12) == (((x & 1023) + ((x >> 8) & ~3)) * 0x15555558 >> 2 * 3) >> 28
+ */
#define EXACT_POWER_OF_2_OR_ZERO_P(x) (((x) & ((x) - 1)) == 0)
optab optab1, optab2;
int op1_is_constant, op1_is_pow2;
int max_cost, extra_cost;
+ static HOST_WIDE_INT last_div_const = 0;
op1_is_constant = GET_CODE (op1) == CONST_INT;
op1_is_pow2 = (op1_is_constant
&& ((EXACT_POWER_OF_2_OR_ZERO_P (INTVAL (op1))
- || EXACT_POWER_OF_2_OR_ZERO_P (-INTVAL (op1)))));
+ || (! unsignedp && EXACT_POWER_OF_2_OR_ZERO_P (-INTVAL (op1))))));
/*
This is the structure of expand_divmod:
size = GET_MODE_BITSIZE (mode);
#endif
+ /* Only deduct something for a REM if the last divide done was
+ for a different constant. Then set the constant of the last
+ divide. */
max_cost = div_cost[(int) compute_mode]
- - (rem_flag ? mul_cost[(int) compute_mode] + add_cost : 0);
+ - (rem_flag && ! (last_div_const != 0 && op1_is_constant
+ && INTVAL (op1) == last_div_const)
+ ? mul_cost[(int) compute_mode] + add_cost : 0);
+
+ last_div_const = ! rem_flag && op1_is_constant ? INTVAL (op1) : 0;
/* Now convert to the best mode to use. */
if (compute_mode != mode)
{
op0 = convert_modes (compute_mode, mode, op0, unsignedp);
op1 = convert_modes (compute_mode, mode, op1, unsignedp);
+
+ /* convert_modes may have placed op1 into a register, so we
+ must recompute the following. */
+ op1_is_constant = GET_CODE (op1) == CONST_INT;
+ op1_is_pow2 = (op1_is_constant
+ && ((EXACT_POWER_OF_2_OR_ZERO_P (INTVAL (op1))
+ || (! unsignedp
+ && EXACT_POWER_OF_2_OR_ZERO_P (-INTVAL (op1)))))) ;
}
/* If one of the operands is a volatile MEM, copy it into a register. */
code = TRUNC_DIV_EXPR;
if (code == FLOOR_MOD_EXPR)
code = TRUNC_MOD_EXPR;
+ if (code == EXACT_DIV_EXPR && op1_is_pow2)
+ code = TRUNC_DIV_EXPR;
}
if (op1 != const0_rtx)
pre_shift = floor_log2 (d);
if (rem_flag)
{
- remainder =
- expand_binop (compute_mode, and_optab, op0,
- GEN_INT (((HOST_WIDE_INT) 1 << pre_shift) - 1),
- remainder, 1,
- OPTAB_LIB_WIDEN);
+ remainder
+ = expand_binop (compute_mode, and_optab, op0,
+ GEN_INT (((HOST_WIDE_INT) 1 << pre_shift) - 1),
+ remainder, 1,
+ OPTAB_LIB_WIDEN);
if (remainder)
return gen_lowpart (mode, remainder);
}
max_cost - extra_cost);
if (t1 == 0)
goto fail1;
- t2 = force_operand (gen_rtx (MINUS, compute_mode,
- op0, t1),
+ t2 = force_operand (gen_rtx_MINUS (compute_mode,
+ op0, t1),
NULL_RTX);
t3 = expand_shift (RSHIFT_EXPR, compute_mode, t2,
build_int_2 (1, 0), NULL_RTX,1);
- t4 = force_operand (gen_rtx (PLUS, compute_mode,
- t1, t3),
+ t4 = force_operand (gen_rtx_PLUS (compute_mode,
+ t1, t3),
NULL_RTX);
- quotient =
- expand_shift (RSHIFT_EXPR, compute_mode, t4,
- build_int_2 (post_shift - 1, 0),
- tquotient, 1);
+ quotient
+ = expand_shift (RSHIFT_EXPR, compute_mode, t4,
+ build_int_2 (post_shift - 1, 0),
+ tquotient, 1);
}
else
{
max_cost - extra_cost);
if (t2 == 0)
goto fail1;
- quotient =
- expand_shift (RSHIFT_EXPR, compute_mode, t2,
- build_int_2 (post_shift, 0),
- tquotient, 1);
+ quotient
+ = expand_shift (RSHIFT_EXPR, compute_mode, t2,
+ build_int_2 (post_shift, 0),
+ tquotient, 1);
}
}
}
if (insn != last
&& (set = single_set (insn)) != 0
&& SET_DEST (set) == quotient)
- REG_NOTES (insn)
- = gen_rtx (EXPR_LIST, REG_EQUAL,
- gen_rtx (UDIV, compute_mode, op0, op1),
- REG_NOTES (insn));
+ set_unique_reg_note (insn,
+ REG_EQUAL,
+ gen_rtx_UDIV (compute_mode, op0, op1));
}
else /* TRUNC_DIV, signed */
{
rtx t1;
t1 = copy_to_mode_reg (compute_mode, op0);
- emit_cmp_insn (t1, const0_rtx, GE,
- NULL_RTX, compute_mode, 0, 0);
- emit_jump_insn (gen_bge (label));
+ do_cmp_and_jump (t1, const0_rtx, GE,
+ compute_mode, label);
expand_inc (t1, GEN_INT (abs_d - 1));
emit_label (label);
quotient = expand_shift (RSHIFT_EXPR, compute_mode, t1,
t2 = expand_shift (RSHIFT_EXPR, compute_mode, t1,
build_int_2 (size - lgup, 0),
NULL_RTX, 1);
- t3 = force_operand (gen_rtx (PLUS, compute_mode,
- op0, t2),
+ t3 = force_operand (gen_rtx_PLUS (compute_mode,
+ op0, t2),
NULL_RTX);
quotient = expand_shift (RSHIFT_EXPR, compute_mode, t3,
build_int_2 (lgup, 0),
insn = get_last_insn ();
if (insn != last
&& (set = single_set (insn)) != 0
- && SET_DEST (set) == quotient)
- REG_NOTES (insn)
- = gen_rtx (EXPR_LIST, REG_EQUAL,
- gen_rtx (DIV, compute_mode, op0,
- GEN_INT (abs_d)),
- REG_NOTES (insn));
+ && SET_DEST (set) == quotient
+ && abs_d < ((unsigned HOST_WIDE_INT) 1
+ << (HOST_BITS_PER_WIDE_INT - 1)))
+ set_unique_reg_note (insn,
+ REG_EQUAL,
+ gen_rtx_DIV (compute_mode,
+ op0,
+ GEN_INT (abs_d)));
quotient = expand_unop (compute_mode, neg_optab,
quotient, quotient, 0);
t3 = expand_shift (RSHIFT_EXPR, compute_mode, op0,
build_int_2 (size - 1, 0), NULL_RTX, 0);
if (d < 0)
- quotient = force_operand (gen_rtx (MINUS, compute_mode, t3, t2),
- tquotient);
+ quotient
+ = force_operand (gen_rtx_MINUS (compute_mode,
+ t3, t2),
+ tquotient);
else
- quotient = force_operand (gen_rtx (MINUS, compute_mode, t2, t3),
- tquotient);
+ quotient
+ = force_operand (gen_rtx_MINUS (compute_mode,
+ t2, t3),
+ tquotient);
}
else
{
max_cost - extra_cost);
if (t1 == 0)
goto fail1;
- t2 = force_operand (gen_rtx (PLUS, compute_mode, t1, op0),
+ t2 = force_operand (gen_rtx_PLUS (compute_mode,
+ t1, op0),
NULL_RTX);
t3 = expand_shift (RSHIFT_EXPR, compute_mode, t2,
- build_int_2 (post_shift, 0), NULL_RTX, 0);
+ build_int_2 (post_shift, 0),
+ NULL_RTX, 0);
t4 = expand_shift (RSHIFT_EXPR, compute_mode, op0,
- build_int_2 (size - 1, 0), NULL_RTX, 0);
+ build_int_2 (size - 1, 0),
+ NULL_RTX, 0);
if (d < 0)
- quotient = force_operand (gen_rtx (MINUS, compute_mode, t4, t3),
- tquotient);
+ quotient
+ = force_operand (gen_rtx_MINUS (compute_mode,
+ t4, t3),
+ tquotient);
else
- quotient = force_operand (gen_rtx (MINUS, compute_mode, t3, t4),
- tquotient);
+ quotient
+ = force_operand (gen_rtx_MINUS (compute_mode,
+ t3, t4),
+ tquotient);
}
}
else /* Too wide mode to use tricky code */
if (insn != last
&& (set = single_set (insn)) != 0
&& SET_DEST (set) == quotient)
- REG_NOTES (insn)
- = gen_rtx (EXPR_LIST, REG_EQUAL,
- gen_rtx (DIV, compute_mode, op0, op1),
- REG_NOTES (insn));
+ set_unique_reg_note (insn,
+ REG_EQUAL,
+ gen_rtx_DIV (compute_mode, op0, op1));
}
break;
}
else
{
rtx nsign, t1, t2, t3, t4;
- t1 = force_operand (gen_rtx (PLUS, compute_mode,
- op0, constm1_rtx), NULL_RTX);
+ t1 = force_operand (gen_rtx_PLUS (compute_mode,
+ op0, constm1_rtx), NULL_RTX);
t2 = expand_binop (compute_mode, ior_optab, op0, t1, NULL_RTX,
0, OPTAB_WIDEN);
nsign = expand_shift (RSHIFT_EXPR, compute_mode, t2,
build_int_2 (size - 1, 0), NULL_RTX, 0);
- t3 = force_operand (gen_rtx (MINUS, compute_mode, t1, nsign),
+ t3 = force_operand (gen_rtx_MINUS (compute_mode, t1, nsign),
NULL_RTX);
t4 = expand_divmod (0, TRUNC_DIV_EXPR, compute_mode, t3, op1,
NULL_RTX, 0);
rtx t5;
t5 = expand_unop (compute_mode, one_cmpl_optab, nsign,
NULL_RTX, 0);
- quotient = force_operand (gen_rtx (PLUS, compute_mode,
- t4, t5),
+ quotient = force_operand (gen_rtx_PLUS (compute_mode,
+ t4, t5),
tquotient);
}
}
Save that for later. */
rtx tem;
rtx label = gen_label_rtx ();
- emit_cmp_insn (remainder, const0_rtx, EQ, NULL_RTX,
- compute_mode, 0, 0);
- emit_jump_insn (gen_beq (label));
+ do_cmp_and_jump (remainder, const0_rtx, EQ, compute_mode, label);
tem = expand_binop (compute_mode, xor_optab, op0, op1,
NULL_RTX, 0, OPTAB_WIDEN);
- emit_cmp_insn (tem, const0_rtx, GE, NULL_RTX, compute_mode, 0, 0);
- emit_jump_insn (gen_bge (label));
+ do_cmp_and_jump (tem, const0_rtx, GE, compute_mode, label);
expand_dec (quotient, const1_rtx);
expand_inc (remainder, op1);
emit_label (label);
label3 = gen_label_rtx ();
label4 = gen_label_rtx ();
label5 = gen_label_rtx ();
- emit_cmp_insn (op1, const0_rtx, LT, NULL_RTX, compute_mode, 0, 0);
- emit_jump_insn (gen_blt (label2));
- emit_cmp_insn (adjusted_op0, const0_rtx, LT, NULL_RTX,
- compute_mode, 0, 0);
- emit_jump_insn (gen_blt (label1));
+ do_cmp_and_jump (op1, const0_rtx, LT, compute_mode, label2);
+ do_cmp_and_jump (adjusted_op0, const0_rtx, LT, compute_mode, label1);
tem = expand_binop (compute_mode, sdiv_optab, adjusted_op0, op1,
quotient, 0, OPTAB_LIB_WIDEN);
if (tem != quotient)
emit_jump_insn (gen_jump (label4));
emit_barrier ();
emit_label (label2);
- emit_cmp_insn (adjusted_op0, const0_rtx, GT, NULL_RTX,
- compute_mode, 0, 0);
- emit_jump_insn (gen_bgt (label3));
+ do_cmp_and_jump (adjusted_op0, const0_rtx, GT, compute_mode, label3);
tem = expand_binop (compute_mode, sdiv_optab, adjusted_op0, op1,
quotient, 0, OPTAB_LIB_WIDEN);
if (tem != quotient)
{
rtx lab;
lab = gen_label_rtx ();
- emit_cmp_insn (t2, const0_rtx, EQ, NULL_RTX,
- compute_mode, 0, 0);
- emit_jump_insn (gen_beq (lab));
+ do_cmp_and_jump (t2, const0_rtx, EQ, compute_mode, lab);
expand_inc (t1, const1_rtx);
emit_label (lab);
quotient = t1;
}
else
- quotient = force_operand (gen_rtx (PLUS, compute_mode,
- t1, t3),
+ quotient = force_operand (gen_rtx_PLUS (compute_mode,
+ t1, t3),
tquotient);
break;
}
/* This could be computed with a branch-less sequence.
Save that for later. */
rtx label = gen_label_rtx ();
- emit_cmp_insn (remainder, const0_rtx, EQ, NULL_RTX,
- compute_mode, 0, 0);
- emit_jump_insn (gen_beq (label));
+ do_cmp_and_jump (remainder, const0_rtx, EQ,
+ compute_mode, label);
expand_inc (quotient, const1_rtx);
expand_dec (remainder, op1);
emit_label (label);
adjusted_op0 = copy_to_mode_reg (compute_mode, op0);
label1 = gen_label_rtx ();
label2 = gen_label_rtx ();
- emit_cmp_insn (adjusted_op0, const0_rtx, NE, NULL_RTX,
- compute_mode, 0, 0);
- emit_jump_insn (gen_bne (label1));
+ do_cmp_and_jump (adjusted_op0, const0_rtx, NE,
+ compute_mode, label1);
emit_move_insn (quotient, const0_rtx);
emit_jump_insn (gen_jump (label2));
emit_barrier ();
{
rtx lab;
lab = gen_label_rtx ();
- emit_cmp_insn (t2, const0_rtx, EQ, NULL_RTX,
- compute_mode, 0, 0);
- emit_jump_insn (gen_beq (lab));
+ do_cmp_and_jump (t2, const0_rtx, EQ, compute_mode, lab);
expand_inc (t1, const1_rtx);
emit_label (lab);
quotient = t1;
}
else
- quotient = force_operand (gen_rtx (PLUS, compute_mode,
- t1, t3),
+ quotient = force_operand (gen_rtx_PLUS (compute_mode,
+ t1, t3),
tquotient);
break;
}
Save that for later. */
rtx tem;
rtx label = gen_label_rtx ();
- emit_cmp_insn (remainder, const0_rtx, EQ, NULL_RTX,
- compute_mode, 0, 0);
- emit_jump_insn (gen_beq (label));
+ do_cmp_and_jump (remainder, const0_rtx, EQ,
+ compute_mode, label);
tem = expand_binop (compute_mode, xor_optab, op0, op1,
NULL_RTX, 0, OPTAB_WIDEN);
- emit_cmp_insn (tem, const0_rtx, LT, NULL_RTX,
- compute_mode, 0, 0);
- emit_jump_insn (gen_blt (label));
+ do_cmp_and_jump (tem, const0_rtx, LT, compute_mode, label);
expand_inc (quotient, const1_rtx);
expand_dec (remainder, op1);
emit_label (label);
label3 = gen_label_rtx ();
label4 = gen_label_rtx ();
label5 = gen_label_rtx ();
- emit_cmp_insn (op1, const0_rtx, LT, NULL_RTX,
- compute_mode, 0, 0);
- emit_jump_insn (gen_blt (label2));
- emit_cmp_insn (adjusted_op0, const0_rtx, GT, NULL_RTX,
- compute_mode, 0, 0);
- emit_jump_insn (gen_bgt (label1));
+ do_cmp_and_jump (op1, const0_rtx, LT, compute_mode, label2);
+ do_cmp_and_jump (adjusted_op0, const0_rtx, GT,
+ compute_mode, label1);
tem = expand_binop (compute_mode, sdiv_optab, adjusted_op0, op1,
quotient, 0, OPTAB_LIB_WIDEN);
if (tem != quotient)
emit_jump_insn (gen_jump (label4));
emit_barrier ();
emit_label (label2);
- emit_cmp_insn (adjusted_op0, const0_rtx, LT, NULL_RTX,
- compute_mode, 0, 0);
- emit_jump_insn (gen_blt (label3));
+ do_cmp_and_jump (adjusted_op0, const0_rtx, LT,
+ compute_mode, label3);
tem = expand_binop (compute_mode, sdiv_optab, adjusted_op0, op1,
quotient, 0, OPTAB_LIB_WIDEN);
if (tem != quotient)
NULL_RTX, unsignedp);
insn = get_last_insn ();
- REG_NOTES (insn)
- = gen_rtx (EXPR_LIST, REG_EQUAL,
- gen_rtx (unsignedp ? UDIV : DIV, compute_mode,
- op0, op1),
- REG_NOTES (insn));
+ set_unique_reg_note (insn,
+ REG_EQUAL,
+ gen_rtx_fmt_ee (unsignedp ? UDIV : DIV,
+ compute_mode,
+ op0, op1));
}
break;
tem = plus_constant (op1, -1);
tem = expand_shift (RSHIFT_EXPR, compute_mode, tem,
build_int_2 (1, 0), NULL_RTX, 1);
- emit_cmp_insn (remainder, tem, LEU, NULL_RTX, compute_mode, 0, 0);
- emit_jump_insn (gen_bleu (label));
+ do_cmp_and_jump (remainder, tem, LEU, compute_mode, label);
expand_inc (quotient, const1_rtx);
expand_dec (remainder, op1);
emit_label (label);
remainder = expand_binop (compute_mode, sub_optab, op0, tem,
remainder, 0, OPTAB_LIB_WIDEN);
}
- abs_rem = expand_abs (compute_mode, remainder, NULL_RTX, 0, 0);
- abs_op1 = expand_abs (compute_mode, op1, NULL_RTX, 0, 0);
+ abs_rem = expand_abs (compute_mode, remainder, NULL_RTX, 0);
+ abs_op1 = expand_abs (compute_mode, op1, NULL_RTX, 0);
tem = expand_shift (LSHIFT_EXPR, compute_mode, abs_rem,
build_int_2 (1, 0), NULL_RTX, 1);
- emit_cmp_insn (tem, abs_op1, LTU, NULL_RTX, compute_mode, 0, 0);
- emit_jump_insn (gen_bltu (label));
+ do_cmp_and_jump (tem, abs_op1, LTU, compute_mode, label);
tem = expand_binop (compute_mode, xor_optab, op0, op1,
NULL_RTX, 0, OPTAB_WIDEN);
mask = expand_shift (RSHIFT_EXPR, compute_mode, tem,
emit_label (label);
}
return gen_lowpart (mode, rem_flag ? remainder : quotient);
+
+ default:
+ abort ();
}
if (quotient == 0)
if (rem_flag)
{
- /* Try to produce the remainder directly without a library call. */
- remainder = sign_expand_binop (compute_mode, umod_optab, smod_optab,
- op0, op1, target,
- unsignedp, OPTAB_WIDEN);
+ /* Try to produce the remainder without producing the quotient.
+ If we seem to have a divmod patten that does not require widening,
+ don't try windening here. We should really have an WIDEN argument
+ to expand_twoval_binop, since what we'd really like to do here is
+ 1) try a mod insn in compute_mode
+ 2) try a divmod insn in compute_mode
+ 3) try a div insn in compute_mode and multiply-subtract to get
+ remainder
+ 4) try the same things with widening allowed. */
+ remainder
+ = sign_expand_binop (compute_mode, umod_optab, smod_optab,
+ op0, op1, target,
+ unsignedp,
+ ((optab2->handlers[(int) compute_mode].insn_code
+ != CODE_FOR_nothing)
+ ? OPTAB_DIRECT : OPTAB_WIDEN));
if (remainder == 0)
{
/* No luck there. Can we do remainder and divide at once
{
case CONST_INT:
t = build_int_2 (INTVAL (x),
- TREE_UNSIGNED (type) || INTVAL (x) >= 0 ? 0 : -1);
+ (TREE_UNSIGNED (type)
+ && (GET_MODE_BITSIZE (TYPE_MODE (type)) < HOST_BITS_PER_WIDE_INT))
+ || INTVAL (x) >= 0 ? 0 : -1);
TREE_TYPE (t) = type;
return t;
rtx last = get_last_insn ();
rtx pattern, comparison;
+ if (unsignedp)
+ code = unsigned_condition (code);
+
/* If one operand is constant, make it the second one. Only do this
if the other operand is not constant as well. */
if (op1 == const1_rtx)
op1 = const0_rtx, code = EQ;
break;
+ default:
+ break;
+ }
+
+ /* If we are comparing a double-word integer with zero, we can convert
+ the comparison into one involving a single word. */
+ if (GET_MODE_BITSIZE (mode) == BITS_PER_WORD * 2
+ && GET_MODE_CLASS (mode) == MODE_INT
+ && op1 == const0_rtx)
+ {
+ if (code == EQ || code == NE)
+ {
+ /* Do a logical OR of the two words and compare the result. */
+ rtx op0h = gen_highpart (word_mode, op0);
+ rtx op0l = gen_lowpart (word_mode, op0);
+ rtx op0both = expand_binop (word_mode, ior_optab, op0h, op0l,
+ NULL_RTX, unsignedp, OPTAB_DIRECT);
+ if (op0both != 0)
+ return emit_store_flag (target, code, op0both, op1, word_mode,
+ unsignedp, normalizep);
+ }
+ else if (code == LT || code == GE)
+ /* If testing the sign bit, can just test on high word. */
+ return emit_store_flag (target, code, gen_highpart (word_mode, op0),
+ op1, word_mode, unsignedp, normalizep);
}
/* From now on, we won't change CODE, so set ICODE now. */
if (icode != CODE_FOR_nothing)
{
+ insn_operand_predicate_fn pred;
+
/* We think we may be able to do this with a scc insn. Emit the
comparison and then the scc insn.
compare_from_rtx may call emit_queue, which would be deleted below
- if the scc insn fails. So call it ourselves before setting LAST. */
+ if the scc insn fails. So call it ourselves before setting LAST.
+ Likewise for do_pending_stack_adjust. */
emit_queue ();
+ do_pending_stack_adjust ();
last = get_last_insn ();
comparison
abort ();
/* Get a reference to the target in the proper mode for this insn. */
- compare_mode = insn_operand_mode[(int) icode][0];
+ compare_mode = insn_data[(int) icode].operand[0].mode;
subtarget = target;
+ pred = insn_data[(int) icode].operand[0].predicate;
if (preserve_subexpressions_p ()
- || ! (*insn_operand_predicate[(int) icode][0]) (subtarget, compare_mode))
+ || ! (*pred) (subtarget, compare_mode))
subtarget = gen_reg_rtx (compare_mode);
pattern = GEN_FCN (icode) (subtarget);
we don't have to do anything. */
if (normalizep == 0 || normalizep == STORE_FLAG_VALUE)
;
- else if (normalizep == - STORE_FLAG_VALUE)
+ /* STORE_FLAG_VALUE might be the most negative number, so write
+ the comparison this way to avoid a compiler-time warning. */
+ else if (- normalizep == STORE_FLAG_VALUE)
op0 = expand_unop (compare_mode, neg_optab, op0, subtarget, 0);
/* We don't want to use STORE_FLAG_VALUE < 0 below since this
else if (GET_MODE_BITSIZE (mode) <= HOST_BITS_PER_WIDE_INT
&& ((STORE_FLAG_VALUE & GET_MODE_MASK (mode))
- == (HOST_WIDE_INT) 1 << (GET_MODE_BITSIZE (mode) - 1)))
+ == (unsigned HOST_WIDE_INT) 1 << (GET_MODE_BITSIZE (mode) - 1)))
;
else
return 0;
target = gen_reg_rtx (GET_MODE (target));
emit_move_insn (target, const1_rtx);
- tem = compare_from_rtx (op0, op1, code, unsignedp, mode, NULL_RTX, 0);
- if (GET_CODE (tem) == CONST_INT)
- return tem;
-
label = gen_label_rtx ();
- if (bcc_gen_fctn[(int) code] == 0)
- abort ();
+ do_compare_rtx_and_jump (op0, op1, code, unsignedp, mode, NULL_RTX, 0,
+ NULL_RTX, label);
- emit_jump_insn ((*bcc_gen_fctn[(int) code]) (label));
emit_move_insn (target, const0_rtx);
emit_label (label);
return target;
}
+\f
+/* Perform possibly multi-word comparison and conditional jump to LABEL
+ if ARG1 OP ARG2 true where ARG1 and ARG2 are of mode MODE
+
+ The algorithm is based on the code in expr.c:do_jump.
+
+ Note that this does not perform a general comparison. Only variants
+ generated within expmed.c are correctly handled, others abort (but could
+ be handled if needed). */
+
+static void
+do_cmp_and_jump (arg1, arg2, op, mode, label)
+ rtx arg1, arg2, label;
+ enum rtx_code op;
+ enum machine_mode mode;
+{
+ /* If this mode is an integer too wide to compare properly,
+ compare word by word. Rely on cse to optimize constant cases. */
+
+ if (GET_MODE_CLASS (mode) == MODE_INT
+ && ! can_compare_p (op, mode, ccp_jump))
+ {
+ rtx label2 = gen_label_rtx ();
+
+ switch (op)
+ {
+ case LTU:
+ do_jump_by_parts_greater_rtx (mode, 1, arg2, arg1, label2, label);
+ break;
+
+ case LEU:
+ do_jump_by_parts_greater_rtx (mode, 1, arg1, arg2, label, label2);
+ break;
+
+ case LT:
+ do_jump_by_parts_greater_rtx (mode, 0, arg2, arg1, label2, label);
+ break;
+
+ case GT:
+ do_jump_by_parts_greater_rtx (mode, 0, arg1, arg2, label2, label);
+ break;
+
+ case GE:
+ do_jump_by_parts_greater_rtx (mode, 0, arg2, arg1, label, label2);
+ break;
+
+ /* do_jump_by_parts_equality_rtx compares with zero. Luckily
+ that's the only equality operations we do */
+ case EQ:
+ if (arg2 != const0_rtx || mode != GET_MODE(arg1))
+ abort();
+ do_jump_by_parts_equality_rtx (arg1, label2, label);
+ break;
+
+ case NE:
+ if (arg2 != const0_rtx || mode != GET_MODE(arg1))
+ abort();
+ do_jump_by_parts_equality_rtx (arg1, label, label2);
+ break;
+
+ default:
+ abort();
+ }
+
+ emit_label (label2);
+ }
+ else
+ {
+ emit_cmp_and_jump_insns (arg1, arg2, op, NULL_RTX, mode, 0, 0, label);
+ }
+}