/* Medium-level subroutines: convert bit-field store and extract
and shifts, multiplies and divides to rtl instructions.
Copyright (C) 1987, 1988, 1989, 1992, 1993, 1994, 1995, 1996, 1997, 1998,
- 1999, 2000 Free Software Foundation, Inc.
+ 1999, 2000, 2001 Free Software Foundation, Inc.
-This file is part of GNU CC.
+This file is part of GCC.
-GNU CC is free software; you can redistribute it and/or modify
-it under the terms of the GNU General Public License as published by
-the Free Software Foundation; either version 2, or (at your option)
-any later version.
+GCC is free software; you can redistribute it and/or modify it under
+the terms of the GNU General Public License as published by the Free
+Software Foundation; either version 2, or (at your option) any later
+version.
-GNU CC is distributed in the hope that it will be useful,
-but WITHOUT ANY WARRANTY; without even the implied warranty of
-MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-GNU General Public License for more details.
+GCC is distributed in the hope that it will be useful, but WITHOUT ANY
+WARRANTY; without even the implied warranty of MERCHANTABILITY or
+FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+for more details.
You should have received a copy of the GNU General Public License
-along with GNU CC; see the file COPYING. If not, write to
-the Free Software Foundation, 59 Temple Place - Suite 330,
-Boston, MA 02111-1307, USA. */
+along with GCC; see the file COPYING. If not, write to the Free
+Software Foundation, 59 Temple Place - Suite 330, Boston, MA
+02111-1307, USA. */
#include "config.h"
#include "tree.h"
#include "tm_p.h"
#include "flags.h"
-#include "insn-flags.h"
-#include "insn-codes.h"
#include "insn-config.h"
#include "expr.h"
+#include "optabs.h"
#include "real.h"
#include "recog.h"
#define MAX_BITS_PER_WORD BITS_PER_WORD
#endif
+/* Reduce conditional compilation elsewhere. */
+#ifndef HAVE_insv
+#define HAVE_insv 0
+#define CODE_FOR_insv CODE_FOR_nothing
+#define gen_insv(a,b,c,d) NULL_RTX
+#endif
+#ifndef HAVE_extv
+#define HAVE_extv 0
+#define CODE_FOR_extv CODE_FOR_nothing
+#define gen_extv(a,b,c,d) NULL_RTX
+#endif
+#ifndef HAVE_extzv
+#define HAVE_extzv 0
+#define CODE_FOR_extzv CODE_FOR_nothing
+#define gen_extzv(a,b,c,d) NULL_RTX
+#endif
+
/* Cost of various pieces of RTL. Note that some of these are indexed by
shift count and some by mode. */
static int add_cost, negate_cost, zero_cost;
void
init_expmed ()
{
- char *free_point;
/* This is "some random pseudo register" for purposes of calling recog
to see what insns exist. */
rtx reg = gen_rtx_REG (word_mode, 10000);
start_sequence ();
- /* Since we are on the permanent obstack, we must be sure we save this
- spot AFTER we call start_sequence, since it will reuse the rtl it
- makes. */
- free_point = (char *) oballoc (0);
-
reg = gen_rtx_REG (word_mode, 10000);
zero_cost = rtx_cost (const0_rtx, 0);
}
}
- /* Free the objects we just allocated. */
end_sequence ();
- obfree (free_point);
}
/* Return an rtx representing minus the value of X.
return result;
}
+
+/* Report on the availability of insv/extv/extzv and the desired mode
+ of each of their operands. Returns MAX_MACHINE_MODE if HAVE_foo
+ is false; else the mode of the specified operand. If OPNO is -1,
+ all the caller cares about is whether the insn is available. */
+enum machine_mode
+mode_for_extraction (pattern, opno)
+ enum extraction_pattern pattern;
+ int opno;
+{
+ const struct insn_data *data;
+
+ switch (pattern)
+ {
+ case EP_insv:
+ if (HAVE_insv)
+ {
+ data = &insn_data[CODE_FOR_insv];
+ break;
+ }
+ return MAX_MACHINE_MODE;
+
+ case EP_extv:
+ if (HAVE_extv)
+ {
+ data = &insn_data[CODE_FOR_extv];
+ break;
+ }
+ return MAX_MACHINE_MODE;
+
+ case EP_extzv:
+ if (HAVE_extzv)
+ {
+ data = &insn_data[CODE_FOR_extzv];
+ break;
+ }
+ return MAX_MACHINE_MODE;
+
+ default:
+ abort ();
+ }
+
+ if (opno == -1)
+ return VOIDmode;
+
+ /* Everyone who uses this function used to follow it with
+ if (result == VOIDmode) result = word_mode; */
+ if (data->operand[opno].mode == VOIDmode)
+ return word_mode;
+ return data->operand[opno].mode;
+}
+
\f
/* Generate code to store value from rtx VALUE
into a bit-field within structure STR_RTX
= (GET_CODE (str_rtx) == MEM) ? BITS_PER_UNIT : BITS_PER_WORD;
unsigned HOST_WIDE_INT offset = bitnum / unit;
unsigned HOST_WIDE_INT bitpos = bitnum % unit;
- register rtx op0 = str_rtx;
-#ifdef HAVE_insv
- unsigned HOST_WIDE_INT insv_bitsize;
- enum machine_mode op_mode;
-
- op_mode = insn_data[(int) CODE_FOR_insv].operand[3].mode;
- if (op_mode == VOIDmode)
- op_mode = word_mode;
- insv_bitsize = GET_MODE_BITSIZE (op_mode);
-#endif
+ rtx op0 = str_rtx;
- if (GET_CODE (str_rtx) == MEM && ! MEM_IN_STRUCT_P (str_rtx))
+ enum machine_mode op_mode = mode_for_extraction (EP_insv, 3);
+
+ /* It is wrong to have align==0, since every object is aligned at
+ least at a bit boundary. This usually means a bug elsewhere. */
+ if (align == 0)
abort ();
/* Discount the part of the structure before the desired byte.
meaningful at a much higher level; when structures are copied
between memory and regs, the higher-numbered regs
always get higher addresses. */
- offset += SUBREG_WORD (op0);
+ offset += (SUBREG_BYTE (op0) / UNITS_PER_WORD);
/* We used to adjust BITPOS here, but now we do the whole adjustment
right after the loop. */
op0 = SUBREG_REG (op0);
}
- /* Make sure we are playing with integral modes. Pun with subregs
- if we aren't. */
- {
- enum machine_mode imode = int_mode_for_mode (GET_MODE (op0));
- if (imode != GET_MODE (op0))
- {
- if (GET_CODE (op0) == MEM)
- op0 = change_address (op0, imode, NULL_RTX);
- else if (imode != BLKmode)
- op0 = gen_lowpart (imode, op0);
- else
- abort ();
- }
- }
-
- /* If OP0 is a register, BITPOS must count within a word.
- But as we have it, it counts within whatever size OP0 now has.
- On a bigendian machine, these are not the same, so convert. */
- if (BYTES_BIG_ENDIAN
- && GET_CODE (op0) != MEM
- && unit > GET_MODE_BITSIZE (GET_MODE (op0)))
- bitpos += unit - GET_MODE_BITSIZE (GET_MODE (op0));
-
value = protect_from_queue (value, 0);
if (flag_force_mem)
value = force_not_mem (value);
- if ((GET_MODE_SIZE (fieldmode) >= UNITS_PER_WORD
- || (GET_MODE_SIZE (GET_MODE (op0)) == GET_MODE_SIZE (fieldmode)
- && GET_MODE_SIZE (fieldmode) != 0))
+ /* If the target is a register, overwriting the entire object, or storing
+ a full-word or multi-word field can be done with just a SUBREG.
+
+ If the target is memory, storing any naturally aligned field can be
+ done with a simple store. For targets that support fast unaligned
+ memory, any naturally sized, unit aligned field can be done directly. */
+
+ if (bitpos == 0
+ && bitsize == GET_MODE_BITSIZE (fieldmode)
&& (GET_CODE (op0) != MEM
- || ! SLOW_UNALIGNED_ACCESS (fieldmode, align)
- || (offset * BITS_PER_UNIT % bitsize == 0
- && align % GET_MODE_BITSIZE (fieldmode) == 0))
- && (BYTES_BIG_ENDIAN ? bitpos + bitsize == unit : bitpos == 0)
- && bitsize == GET_MODE_BITSIZE (fieldmode))
+ ? (GET_MODE_SIZE (fieldmode) >= UNITS_PER_WORD
+ || GET_MODE_SIZE (GET_MODE (op0)) == GET_MODE_SIZE (fieldmode))
+ : (! SLOW_UNALIGNED_ACCESS (fieldmode, align)
+ || (offset * BITS_PER_UNIT % bitsize == 0
+ && align % GET_MODE_BITSIZE (fieldmode) == 0))))
{
- /* Storing in a full-word or multi-word field in a register
- can be done with just SUBREG. Also, storing in the entire object
- can be done with just SUBREG. */
if (GET_MODE (op0) != fieldmode)
{
if (GET_CODE (op0) == SUBREG)
abort ();
}
if (GET_CODE (op0) == REG)
- op0 = gen_rtx_SUBREG (fieldmode, op0, offset);
+ op0 = gen_rtx_SUBREG (fieldmode, op0,
+ (bitnum % BITS_PER_WORD) / BITS_PER_UNIT
+ + (offset * UNITS_PER_WORD));
else
- op0 = change_address (op0, fieldmode,
- plus_constant (XEXP (op0, 0), offset));
+ op0 = adjust_address (op0, fieldmode, offset);
}
emit_move_insn (op0, value);
return value;
}
+ /* Make sure we are playing with integral modes. Pun with subregs
+ if we aren't. This must come after the entire register case above,
+ since that case is valid for any mode. The following cases are only
+ valid for integral modes. */
+ {
+ enum machine_mode imode = int_mode_for_mode (GET_MODE (op0));
+ if (imode != GET_MODE (op0))
+ {
+ if (GET_CODE (op0) == MEM)
+ op0 = adjust_address (op0, imode, 0);
+ else if (imode != BLKmode)
+ op0 = gen_lowpart (imode, op0);
+ else
+ abort ();
+ }
+ }
+
+ /* If OP0 is a register, BITPOS must count within a word.
+ But as we have it, it counts within whatever size OP0 now has.
+ On a bigendian machine, these are not the same, so convert. */
+ if (BYTES_BIG_ENDIAN
+ && GET_CODE (op0) != MEM
+ && unit > GET_MODE_BITSIZE (GET_MODE (op0)))
+ bitpos += unit - GET_MODE_BITSIZE (GET_MODE (op0));
+
/* Storing an lsb-aligned field in a register
can be done with a movestrict instruction. */
}
emit_insn (GEN_FCN (icode)
- (gen_rtx_SUBREG (fieldmode, op0, offset), value));
+ (gen_rtx_SUBREG (fieldmode, op0,
+ (bitnum % BITS_PER_WORD) / BITS_PER_UNIT
+ + (offset * UNITS_PER_WORD)),
+ value));
return value;
}
VOIDmode, because that is what store_field uses to indicate that this
is a bit field, but passing VOIDmode to operand_subword_force will
result in an abort. */
- fieldmode = mode_for_size (nwords * BITS_PER_WORD, MODE_INT, 0);
+ fieldmode = smallest_mode_for_size (nwords * BITS_PER_WORD, MODE_INT);
for (i = 0; i < nwords; i++)
{
abort ();
}
op0 = gen_rtx_SUBREG (mode_for_size (BITS_PER_WORD, MODE_INT, 0),
- op0, offset);
+ op0, (offset * UNITS_PER_WORD));
}
offset = 0;
}
/* Now OFFSET is nonzero only if OP0 is memory
and is therefore always measured in bytes. */
-#ifdef HAVE_insv
if (HAVE_insv
&& GET_MODE (value) != BLKmode
&& !(bitsize == 1 && GET_CODE (value) == CONST_INT)
/* Ensure insv's size is wide enough for this field. */
- && (insv_bitsize >= bitsize)
+ && (GET_MODE_BITSIZE (op_mode) >= bitsize)
&& ! ((GET_CODE (op0) == REG || GET_CODE (op0) == SUBREG)
- && (bitsize + bitpos > insv_bitsize)))
+ && (bitsize + bitpos > GET_MODE_BITSIZE (op_mode))))
{
int xbitpos = bitpos;
rtx value1;
rtx xop0 = op0;
rtx last = get_last_insn ();
rtx pat;
- enum machine_mode maxmode;
+ enum machine_mode maxmode = mode_for_extraction (EP_insv, 3);
int save_volatile_ok = volatile_ok;
- maxmode = insn_data[(int) CODE_FOR_insv].operand[3].mode;
- if (maxmode == VOIDmode)
- maxmode = word_mode;
-
volatile_ok = 1;
/* If this machine's insv can only insert into a register, copy OP0
/* Compute offset as multiple of this unit, counting in bytes. */
offset = (bitnum / unit) * GET_MODE_SIZE (bestmode);
bitpos = bitnum % unit;
- op0 = change_address (op0, bestmode,
- plus_constant (XEXP (op0, 0), offset));
+ op0 = adjust_address (op0, bestmode, offset);
/* Fetch that unit, store the bitfield in it, then store
the unit. */
/* Add OFFSET into OP0's address. */
if (GET_CODE (xop0) == MEM)
- xop0 = change_address (xop0, byte_mode,
- plus_constant (XEXP (xop0, 0), offset));
+ xop0 = adjust_address (xop0, byte_mode, offset);
/* If xop0 is a register, we need it in MAXMODE
to make it acceptable to the format of insv. */
if (GET_CODE (xop0) == SUBREG)
/* We can't just change the mode, because this might clobber op0,
and we will need the original value of op0 if insv fails. */
- xop0 = gen_rtx_SUBREG (maxmode, SUBREG_REG (xop0), SUBREG_WORD (xop0));
+ xop0 = gen_rtx_SUBREG (maxmode, SUBREG_REG (xop0), SUBREG_BYTE (xop0));
if (GET_CODE (xop0) == REG && GET_MODE (xop0) != maxmode)
xop0 = gen_rtx_SUBREG (maxmode, xop0, 0);
else
value1 = gen_lowpart (maxmode, value1);
}
+ else if (GET_CODE (value) == CONST_INT)
+ value1 = GEN_INT (trunc_int_for_mode (INTVAL (value), maxmode));
else if (!CONSTANT_P (value))
/* Parse phase is supposed to make VALUE's data type
match that of the component reference, which is a type
}
else
insv_loses:
-#endif
/* Insv is not available; store using shifts and boolean ops. */
store_fixed_bit_field (op0, offset, bitsize, bitpos, value, align);
return value;
static void
store_fixed_bit_field (op0, offset, bitsize, bitpos, value, struct_align)
- register rtx op0;
+ rtx op0;
unsigned HOST_WIDE_INT offset, bitsize, bitpos;
- register rtx value;
+ rtx value;
unsigned int struct_align;
{
- register enum machine_mode mode;
+ enum machine_mode mode;
unsigned int total_bits = BITS_PER_WORD;
rtx subtarget, temp;
int all_zero = 0;
{
/* Get the proper mode to use for this field. We want a mode that
includes the entire field. If such a mode would be larger than
- a word, we won't be doing the extraction the normal way. */
+ a word, we won't be doing the extraction the normal way.
+ We don't want a mode bigger than the destination. */
+ mode = GET_MODE (op0);
+ if (GET_MODE_BITSIZE (mode) == 0
+ || GET_MODE_BITSIZE (mode) > GET_MODE_BITSIZE (word_mode))
+ mode = word_mode;
mode = get_best_mode (bitsize, bitpos + offset * BITS_PER_UNIT,
- struct_align, word_mode,
+ struct_align, mode,
GET_CODE (op0) == MEM && MEM_VOLATILE_P (op0));
if (mode == VOIDmode)
Then alter OP0 to refer to that word. */
bitpos += (offset % (total_bits / BITS_PER_UNIT)) * BITS_PER_UNIT;
offset -= (offset % (total_bits / BITS_PER_UNIT));
- op0 = change_address (op0, mode,
- plus_constant (XEXP (op0, 0), offset));
+ op0 = adjust_address (op0, mode, offset);
}
mode = GET_MODE (op0);
if (GET_CODE (value) == CONST_INT)
{
- register HOST_WIDE_INT v = INTVAL (value);
+ HOST_WIDE_INT v = INTVAL (value);
if (bitsize < HOST_BITS_PER_WIDE_INT)
v &= ((HOST_WIDE_INT) 1 << bitsize) - 1;
the current word starting from the base register. */
if (GET_CODE (op0) == SUBREG)
{
- word = operand_subword_force (SUBREG_REG (op0),
- SUBREG_WORD (op0) + offset,
+ int word_offset = (SUBREG_BYTE (op0) / UNITS_PER_WORD) + offset;
+ word = operand_subword_force (SUBREG_REG (op0), word_offset,
GET_MODE (SUBREG_REG (op0)));
offset = 0;
}
= (GET_CODE (str_rtx) == MEM) ? BITS_PER_UNIT : BITS_PER_WORD;
unsigned HOST_WIDE_INT offset = bitnum / unit;
unsigned HOST_WIDE_INT bitpos = bitnum % unit;
- register rtx op0 = str_rtx;
+ rtx op0 = str_rtx;
rtx spec_target = target;
rtx spec_target_subreg = 0;
enum machine_mode int_mode;
-#ifdef HAVE_extv
- unsigned HOST_WIDE_INT extv_bitsize;
- enum machine_mode extv_mode;
-#endif
-#ifdef HAVE_extzv
- unsigned HOST_WIDE_INT extzv_bitsize;
- enum machine_mode extzv_mode;
-#endif
-
-#ifdef HAVE_extv
- extv_mode = insn_data[(int) CODE_FOR_extv].operand[0].mode;
- if (extv_mode == VOIDmode)
- extv_mode = word_mode;
- extv_bitsize = GET_MODE_BITSIZE (extv_mode);
-#endif
-
-#ifdef HAVE_extzv
- extzv_mode = insn_data[(int) CODE_FOR_extzv].operand[0].mode;
- if (extzv_mode == VOIDmode)
- extzv_mode = word_mode;
- extzv_bitsize = GET_MODE_BITSIZE (extzv_mode);
-#endif
+ enum machine_mode extv_mode = mode_for_extraction (EP_extv, 0);
+ enum machine_mode extzv_mode = mode_for_extraction (EP_extzv, 0);
/* Discount the part of the structure before the desired byte.
We need to know how many bytes are safe to reference after it. */
int outer_size = GET_MODE_BITSIZE (GET_MODE (op0));
int inner_size = GET_MODE_BITSIZE (GET_MODE (SUBREG_REG (op0)));
- offset += SUBREG_WORD (op0);
+ offset += SUBREG_BYTE (op0) / UNITS_PER_WORD;
inner_size = MIN (inner_size, BITS_PER_WORD);
op0 = SUBREG_REG (op0);
}
+ if (GET_CODE (op0) == REG
+ && mode == GET_MODE (op0)
+ && bitnum == 0
+ && bitsize == GET_MODE_BITSIZE (GET_MODE (op0)))
+ {
+ /* We're trying to extract a full register from itself. */
+ return op0;
+ }
+
/* Make sure we are playing with integral modes. Pun with subregs
if we aren't. */
{
if (imode != GET_MODE (op0))
{
if (GET_CODE (op0) == MEM)
- op0 = change_address (op0, imode, NULL_RTX);
+ op0 = adjust_address (op0, imode, 0);
else if (imode != BLKmode)
op0 = gen_lowpart (imode, op0);
else
: bitpos == 0))))
{
enum machine_mode mode1
- = mode_for_size (bitsize, GET_MODE_CLASS (tmode), 0);
+ = (VECTOR_MODE_P (tmode) ? mode
+ : mode_for_size (bitsize, GET_MODE_CLASS (tmode), 0));
if (mode1 != GET_MODE (op0))
{
abort ();
}
if (GET_CODE (op0) == REG)
- op0 = gen_rtx_SUBREG (mode1, op0, offset);
+ op0 = gen_rtx_SUBREG (mode1, op0,
+ (bitnum % BITS_PER_WORD) / BITS_PER_UNIT
+ + (offset * UNITS_PER_WORD));
else
- op0 = change_address (op0, mode1,
- plus_constant (XEXP (op0, 0), offset));
+ op0 = adjust_address (op0, mode1, offset);
}
if (mode1 != mode)
return convert_to_mode (tmode, op0, unsignedp);
if (GET_CODE (op0) != REG)
op0 = copy_to_reg (op0);
op0 = gen_rtx_SUBREG (mode_for_size (BITS_PER_WORD, MODE_INT, 0),
- op0, offset);
+ op0, (offset * UNITS_PER_WORD));
}
offset = 0;
}
if (unsignedp)
{
-#ifdef HAVE_extzv
if (HAVE_extzv
- && (extzv_bitsize >= bitsize)
+ && (GET_MODE_BITSIZE (extzv_mode) >= bitsize)
&& ! ((GET_CODE (op0) == REG || GET_CODE (op0) == SUBREG)
- && (bitsize + bitpos > extzv_bitsize)))
+ && (bitsize + bitpos > GET_MODE_BITSIZE (extzv_mode))))
{
unsigned HOST_WIDE_INT xbitpos = bitpos, xoffset = offset;
rtx bitsize_rtx, bitpos_rtx;
rtx xspec_target = spec_target;
rtx xspec_target_subreg = spec_target_subreg;
rtx pat;
- enum machine_mode maxmode;
-
- maxmode = insn_data[(int) CODE_FOR_extzv].operand[0].mode;
- if (maxmode == VOIDmode)
- maxmode = word_mode;
+ enum machine_mode maxmode = mode_for_extraction (EP_extzv, 0);
if (GET_CODE (xop0) == MEM)
{
unit = GET_MODE_BITSIZE (bestmode);
xoffset = (bitnum / unit) * GET_MODE_SIZE (bestmode);
xbitpos = bitnum % unit;
- xop0 = change_address (xop0, bestmode,
- plus_constant (XEXP (xop0, 0),
- xoffset));
+ xop0 = adjust_address (xop0, bestmode, xoffset);
+
/* Fetch it to a register in that size. */
xop0 = force_reg (bestmode, xop0);
}
else
/* Get ref to first byte containing part of the field. */
- xop0 = change_address (xop0, byte_mode,
- plus_constant (XEXP (xop0, 0), xoffset));
+ xop0 = adjust_address (xop0, byte_mode, xoffset);
volatile_ok = save_volatile_ok;
}
}
else
extzv_loses:
-#endif
target = extract_fixed_bit_field (int_mode, op0, offset, bitsize,
bitpos, target, 1, align);
}
else
{
-#ifdef HAVE_extv
if (HAVE_extv
- && (extv_bitsize >= bitsize)
+ && (GET_MODE_BITSIZE (extv_mode) >= bitsize)
&& ! ((GET_CODE (op0) == REG || GET_CODE (op0) == SUBREG)
- && (bitsize + bitpos > extv_bitsize)))
+ && (bitsize + bitpos > GET_MODE_BITSIZE (extv_mode))))
{
int xbitpos = bitpos, xoffset = offset;
rtx bitsize_rtx, bitpos_rtx;
rtx xspec_target = spec_target;
rtx xspec_target_subreg = spec_target_subreg;
rtx pat;
- enum machine_mode maxmode;
-
- maxmode = insn_data[(int) CODE_FOR_extv].operand[0].mode;
- if (maxmode == VOIDmode)
- maxmode = word_mode;
+ enum machine_mode maxmode = mode_for_extraction (EP_extv, 0);
if (GET_CODE (xop0) == MEM)
{
unit = GET_MODE_BITSIZE (bestmode);
xoffset = (bitnum / unit) * GET_MODE_SIZE (bestmode);
xbitpos = bitnum % unit;
- xop0 = change_address (xop0, bestmode,
- plus_constant (XEXP (xop0, 0),
- xoffset));
+ xop0 = adjust_address (xop0, bestmode, xoffset);
+
/* Fetch it to a register in that size. */
xop0 = force_reg (bestmode, xop0);
}
else
/* Get ref to first byte containing part of the field. */
- xop0 = change_address (xop0, byte_mode,
- plus_constant (XEXP (xop0, 0), xoffset));
+ xop0 = adjust_address (xop0, byte_mode, xoffset);
}
/* If op0 is a register, we need it in MAXMODE (which is usually
}
else
extv_loses:
-#endif
target = extract_fixed_bit_field (int_mode, op0, offset, bitsize,
bitpos, target, 0, align);
}
extract_fixed_bit_field (tmode, op0, offset, bitsize, bitpos,
target, unsignedp, align)
enum machine_mode tmode;
- register rtx op0, target;
+ rtx op0, target;
unsigned HOST_WIDE_INT offset, bitsize, bitpos;
int unsignedp;
unsigned int align;
Then alter OP0 to refer to that word. */
bitpos += (offset % (total_bits / BITS_PER_UNIT)) * BITS_PER_UNIT;
offset -= (offset % (total_bits / BITS_PER_UNIT));
- op0 = change_address (op0, mode,
- plus_constant (XEXP (op0, 0), offset));
+ op0 = adjust_address (op0, mode, offset);
}
mode = GET_MODE (op0);
the current word starting from the base register. */
if (GET_CODE (op0) == SUBREG)
{
- word = operand_subword_force (SUBREG_REG (op0),
- SUBREG_WORD (op0) + offset,
+ int word_offset = (SUBREG_BYTE (op0) / UNITS_PER_WORD) + offset;
+ word = operand_subword_force (SUBREG_REG (op0), word_offset,
GET_MODE (SUBREG_REG (op0)));
offset = 0;
}
rtx
expand_shift (code, mode, shifted, amount, target, unsignedp)
enum tree_code code;
- register enum machine_mode mode;
+ enum machine_mode mode;
rtx shifted;
tree amount;
- register rtx target;
+ rtx target;
int unsignedp;
{
- register rtx op1, temp = 0;
- register int left = (code == LSHIFT_EXPR || code == LROTATE_EXPR);
- register int rotate = (code == LROTATE_EXPR || code == RROTATE_EXPR);
+ rtx op1, temp = 0;
+ int left = (code == LSHIFT_EXPR || code == LROTATE_EXPR);
+ int rotate = (code == LROTATE_EXPR || code == RROTATE_EXPR);
int try;
/* Previously detected shift-counts computed by NEGATE_EXPR
op1 = GEN_INT ((unsigned HOST_WIDE_INT) INTVAL (op1)
% GET_MODE_BITSIZE (mode));
else if (GET_CODE (op1) == SUBREG
- && SUBREG_WORD (op1) == 0)
+ && SUBREG_BYTE (op1) == 0)
op1 = SUBREG_REG (op1);
}
#endif
that is in range, try a rotate in the opposite direction. */
if (temp == 0 && GET_CODE (op1) == CONST_INT
- && INTVAL (op1) > 0 && INTVAL (op1) < GET_MODE_BITSIZE (mode))
+ && INTVAL (op1) > 0
+ && (unsigned int) INTVAL (op1) < GET_MODE_BITSIZE (mode))
temp = expand_binop (mode,
left ? rotr_optab : rotl_optab,
shifted,
if ((t & 1) == 0)
{
m = floor_log2 (t & -t); /* m = number of low zero bits */
- q = t >> m;
- cost = shift_cost[m];
- synth_mult (alg_in, q, cost_limit - cost);
-
- cost += alg_in->cost;
- if (cost < cost_limit)
+ if (m < BITS_PER_WORD)
{
- struct algorithm *x;
- x = alg_in, alg_in = best_alg, best_alg = x;
- best_alg->log[best_alg->ops] = m;
- best_alg->op[best_alg->ops] = alg_shift;
- cost_limit = cost;
+ q = t >> m;
+ cost = shift_cost[m];
+ synth_mult (alg_in, q, cost_limit - cost);
+
+ cost += alg_in->cost;
+ if (cost < cost_limit)
+ {
+ struct algorithm *x;
+ x = alg_in, alg_in = best_alg, best_alg = x;
+ best_alg->log[best_alg->ops] = m;
+ best_alg->op[best_alg->ops] = alg_shift;
+ cost_limit = cost;
+ }
}
}
unsigned HOST_WIDE_INT d;
d = ((unsigned HOST_WIDE_INT) 1 << m) + 1;
- if (t % d == 0 && t > d)
+ if (t % d == 0 && t > d && m < BITS_PER_WORD)
{
cost = MIN (shiftadd_cost[m], add_cost + shift_cost[m]);
synth_mult (alg_in, t / d, cost_limit - cost);
}
d = ((unsigned HOST_WIDE_INT) 1 << m) - 1;
- if (t % d == 0 && t > d)
+ if (t % d == 0 && t > d && m < BITS_PER_WORD)
{
cost = MIN (shiftsub_cost[m], add_cost + shift_cost[m]);
synth_mult (alg_in, t / d, cost_limit - cost);
q = t - 1;
q = q & -q;
m = exact_log2 (q);
- if (m >= 0)
+ if (m >= 0 && m < BITS_PER_WORD)
{
cost = shiftadd_cost[m];
synth_mult (alg_in, (t - 1) >> m, cost_limit - cost);
q = t + 1;
q = q & -q;
m = exact_log2 (q);
- if (m >= 0)
+ if (m >= 0 && m < BITS_PER_WORD)
{
cost = shiftsub_cost[m];
synth_mult (alg_in, (t + 1) >> m, cost_limit - cost);
best_alg is normally undefined, and this is a critical function. */
alg_out->ops = best_alg->ops + 1;
alg_out->cost = cost_limit;
- bcopy ((char *) best_alg->op, (char *) alg_out->op,
- alg_out->ops * sizeof *alg_out->op);
- bcopy ((char *) best_alg->log, (char *) alg_out->log,
- alg_out->ops * sizeof *alg_out->log);
+ memcpy (alg_out->op, best_alg->op,
+ alg_out->ops * sizeof *alg_out->op);
+ memcpy (alg_out->log, best_alg->log,
+ alg_out->ops * sizeof *alg_out->log);
}
\f
/* Perform a multiplication and return an rtx for the result.
rtx
expand_mult (mode, op0, op1, target, unsignedp)
enum machine_mode mode;
- register rtx op0, op1, target;
+ rtx op0, op1, target;
int unsignedp;
{
rtx const_op1 = op1;
But this causes such a terrible slowdown sometimes
that it seems better to use synth_mult always. */
- if (const_op1 && GET_CODE (const_op1) == CONST_INT)
+ if (const_op1 && GET_CODE (const_op1) == CONST_INT
+ && (unsignedp || ! flag_trapv))
{
struct algorithm alg;
struct algorithm alg2;
/* We found something cheaper than a multiply insn. */
int opno;
rtx accum, tem;
+ enum machine_mode nmode;
op0 = protect_from_queue (op0, 0);
}
/* Write a REG_EQUAL note on the last insn so that we can cse
- multiplication sequences. */
+ multiplication sequences. Note that if ACCUM is a SUBREG,
+ we've set the inner register and must properly indicate
+ that. */
+
+ tem = op0, nmode = mode;
+ if (GET_CODE (accum) == SUBREG)
+ {
+ nmode = GET_MODE (SUBREG_REG (accum));
+ tem = gen_lowpart (nmode, op0);
+ }
insn = get_last_insn ();
set_unique_reg_note (insn,
REG_EQUAL,
- gen_rtx_MULT (mode, op0,
+ gen_rtx_MULT (nmode, tem,
GEN_INT (val_so_far)));
}
/* This used to use umul_optab if unsigned, but for non-widening multiply
there is no difference between signed and unsigned. */
- op0 = expand_binop (mode, smul_optab,
+ op0 = expand_binop (mode,
+ ! unsignedp
+ && flag_trapv && (GET_MODE_CLASS(mode) == MODE_INT)
+ ? smulv_optab : smul_optab,
op0, op1, target, unsignedp, OPTAB_LIB_WIDEN);
if (op0 == 0)
abort ();
int *post_shift_ptr;
int *lgup_ptr;
{
- unsigned HOST_WIDE_INT mhigh_hi, mhigh_lo;
- unsigned HOST_WIDE_INT mlow_hi, mlow_lo;
+ HOST_WIDE_INT mhigh_hi, mlow_hi;
+ unsigned HOST_WIDE_INT mhigh_lo, mlow_lo;
int lgup, post_shift;
int pow, pow2;
- unsigned HOST_WIDE_INT nh, nl, dummy1, dummy2;
+ unsigned HOST_WIDE_INT nl, dummy1;
+ HOST_WIDE_INT nh, dummy2;
/* lgup = ceil(log2(divisor)); */
lgup = ceil_log2 (d);
/* mlow = 2^(N + lgup)/d */
if (pow >= HOST_BITS_PER_WIDE_INT)
{
- nh = (unsigned HOST_WIDE_INT) 1 << (pow - HOST_BITS_PER_WIDE_INT);
+ nh = (HOST_WIDE_INT) 1 << (pow - HOST_BITS_PER_WIDE_INT);
nl = 0;
}
else
/* mhigh = (2^(N + lgup) + 2^N + lgup - precision)/d */
if (pow2 >= HOST_BITS_PER_WIDE_INT)
- nh |= (unsigned HOST_WIDE_INT) 1 << (pow2 - HOST_BITS_PER_WIDE_INT);
+ nh |= (HOST_WIDE_INT) 1 << (pow2 - HOST_BITS_PER_WIDE_INT);
else
nl |= (unsigned HOST_WIDE_INT) 1 << pow2;
div_and_round_double (TRUNC_DIV_EXPR, 1, nl, nh, d, (HOST_WIDE_INT) 0,
rtx
expand_mult_highpart_adjust (mode, adj_operand, op0, op1, target, unsignedp)
enum machine_mode mode;
- register rtx adj_operand, op0, op1, target;
+ rtx adj_operand, op0, op1, target;
int unsignedp;
{
rtx tem;
rtx
expand_mult_highpart (mode, op0, cnst1, target, unsignedp, max_cost)
enum machine_mode mode;
- register rtx op0, target;
+ rtx op0, target;
unsigned HOST_WIDE_INT cnst1;
int unsignedp;
int max_cost;
if (size > HOST_BITS_PER_WIDE_INT)
abort ();
- op1 = GEN_INT (cnst1);
+ op1 = GEN_INT (trunc_int_for_mode (cnst1, mode));
if (GET_MODE_BITSIZE (wider_mode) <= HOST_BITS_PER_INT)
wide_op1 = op1;
multiply. Maybe change expand_binop to handle widening multiply? */
op0 = convert_to_mode (wider_mode, op0, unsignedp);
- tem = expand_mult (wider_mode, op0, wide_op1, NULL_RTX, unsignedp);
+ /* We know that this can't have signed overflow, so pretend this is
+ an unsigned multiply. */
+ tem = expand_mult (wider_mode, op0, wide_op1, NULL_RTX, 0);
tem = expand_shift (RSHIFT_EXPR, wider_mode, tem,
build_int_2 (size, 0), NULL_RTX, 1);
return convert_modes (mode, wider_mode, tem, unsignedp);
{
mul_highpart_optab = unsignedp ? umul_highpart_optab : smul_highpart_optab;
target = expand_binop (mode, mul_highpart_optab,
- op0, wide_op1, target, unsignedp, OPTAB_DIRECT);
+ op0, op1, target, unsignedp, OPTAB_DIRECT);
if (target)
return target;
}
/* Secondly, same as above, but use sign flavor opposite of unsignedp.
Need to adjust the result after the multiplication. */
- if (mul_highpart_cost[(int) mode] + 2 * shift_cost[size-1] + 4 * add_cost < max_cost)
+ if (size - 1 < BITS_PER_WORD
+ && (mul_highpart_cost[(int) mode] + 2 * shift_cost[size-1] + 4 * add_cost
+ < max_cost))
{
mul_highpart_optab = unsignedp ? smul_highpart_optab : umul_highpart_optab;
target = expand_binop (mode, mul_highpart_optab,
- op0, wide_op1, target, unsignedp, OPTAB_DIRECT);
+ op0, op1, target, unsignedp, OPTAB_DIRECT);
if (target)
/* We used the wrong signedness. Adjust the result. */
return expand_mult_highpart_adjust (mode, target, op0,
/* Try widening the mode and perform a non-widening multiplication. */
moptab = smul_optab;
if (smul_optab->handlers[(int) wider_mode].insn_code != CODE_FOR_nothing
+ && size - 1 < BITS_PER_WORD
&& mul_cost[(int) wider_mode] + shift_cost[size-1] < max_cost)
{
op1 = wide_op1;
/* Try widening multiplication of opposite signedness, and adjust. */
moptab = unsignedp ? smul_widen_optab : umul_widen_optab;
if (moptab->handlers[(int) wider_mode].insn_code != CODE_FOR_nothing
+ && size - 1 < BITS_PER_WORD
&& (mul_widen_cost[(int) wider_mode]
+ 2 * shift_cost[size-1] + 4 * add_cost < max_cost))
{
int rem_flag;
enum tree_code code;
enum machine_mode mode;
- register rtx op0, op1, target;
+ rtx op0, op1, target;
int unsignedp;
{
enum machine_mode compute_mode;
- register rtx tquotient;
+ rtx tquotient;
rtx quotient = 0, remainder = 0;
rtx last;
int size;
if (op1 == const1_rtx)
return rem_flag ? const0_rtx : op0;
+ /* When dividing by -1, we could get an overflow.
+ negv_optab can handle overflows. */
+ if (! unsignedp && op1 == constm1_rtx)
+ {
+ if (rem_flag)
+ return const0_rtx;
+ return expand_unop (mode, flag_trapv && GET_MODE_CLASS(mode) == MODE_INT
+ ? negv_optab : neg_optab, op0, target, 0);
+ }
+
if (target
/* Don't use the function value register as a target
since we have to read it as well as write it,
{
rtx t1, t2, t3, t4;
+ if (post_shift - 1 >= BITS_PER_WORD)
+ goto fail1;
+
extra_cost = (shift_cost[post_shift - 1]
+ shift_cost[1] + 2 * add_cost);
t1 = expand_mult_highpart (compute_mode, op0, ml,
{
rtx t1, t2;
+ if (pre_shift >= BITS_PER_WORD
+ || post_shift >= BITS_PER_WORD)
+ goto fail1;
+
t1 = expand_shift (RSHIFT_EXPR, compute_mode, op0,
build_int_2 (pre_shift, 0),
NULL_RTX, 1);
if (rem_flag && d < 0)
{
d = abs_d;
- op1 = GEN_INT (abs_d);
+ op1 = GEN_INT (trunc_int_for_mode (abs_d, compute_mode));
}
if (d == 1)
else if (EXACT_POWER_OF_2_OR_ZERO_P (abs_d))
{
lgup = floor_log2 (abs_d);
- if (abs_d != 2 && BRANCH_COST < 3)
+ if (BRANCH_COST < 1 || (abs_d != 2 && BRANCH_COST < 3))
{
rtx label = gen_label_rtx ();
rtx t1;
t1 = copy_to_mode_reg (compute_mode, op0);
do_cmp_and_jump (t1, const0_rtx, GE,
compute_mode, label);
- expand_inc (t1, GEN_INT (abs_d - 1));
+ expand_inc (t1, GEN_INT (trunc_int_for_mode
+ (abs_d - 1, compute_mode)));
emit_label (label);
quotient = expand_shift (RSHIFT_EXPR, compute_mode, t1,
build_int_2 (lgup, 0),
REG_EQUAL,
gen_rtx_DIV (compute_mode,
op0,
- GEN_INT (abs_d)));
+ GEN_INT
+ (trunc_int_for_mode
+ (abs_d,
+ compute_mode))));
quotient = expand_unop (compute_mode, neg_optab,
quotient, quotient, 0);
{
rtx t1, t2, t3;
+ if (post_shift >= BITS_PER_WORD
+ || size - 1 >= BITS_PER_WORD)
+ goto fail1;
+
extra_cost = (shift_cost[post_shift]
+ shift_cost[size - 1] + add_cost);
t1 = expand_mult_highpart (compute_mode, op0, ml,
{
rtx t1, t2, t3, t4;
+ if (post_shift >= BITS_PER_WORD
+ || size - 1 >= BITS_PER_WORD)
+ goto fail1;
+
ml |= (~(unsigned HOST_WIDE_INT) 0) << (size - 1);
extra_cost = (shift_cost[post_shift]
+ shift_cost[size - 1] + 2 * add_cost);
if (mh)
abort ();
- t1 = expand_shift (RSHIFT_EXPR, compute_mode, op0,
- build_int_2 (size - 1, 0), NULL_RTX, 0);
- t2 = expand_binop (compute_mode, xor_optab, op0, t1,
- NULL_RTX, 0, OPTAB_WIDEN);
- extra_cost = (shift_cost[post_shift]
- + shift_cost[size - 1] + 2 * add_cost);
- t3 = expand_mult_highpart (compute_mode, t2, ml,
- NULL_RTX, 1,
- max_cost - extra_cost);
- if (t3 != 0)
+ if (post_shift < BITS_PER_WORD
+ && size - 1 < BITS_PER_WORD)
{
- t4 = expand_shift (RSHIFT_EXPR, compute_mode, t3,
- build_int_2 (post_shift, 0),
- NULL_RTX, 1);
- quotient = expand_binop (compute_mode, xor_optab,
- t4, t1, tquotient, 0,
- OPTAB_WIDEN);
+ t1 = expand_shift (RSHIFT_EXPR, compute_mode, op0,
+ build_int_2 (size - 1, 0),
+ NULL_RTX, 0);
+ t2 = expand_binop (compute_mode, xor_optab, op0, t1,
+ NULL_RTX, 0, OPTAB_WIDEN);
+ extra_cost = (shift_cost[post_shift]
+ + shift_cost[size - 1] + 2 * add_cost);
+ t3 = expand_mult_highpart (compute_mode, t2, ml,
+ NULL_RTX, 1,
+ max_cost - extra_cost);
+ if (t3 != 0)
+ {
+ t4 = expand_shift (RSHIFT_EXPR, compute_mode, t3,
+ build_int_2 (post_shift, 0),
+ NULL_RTX, 1);
+ quotient = expand_binop (compute_mode, xor_optab,
+ t4, t1, tquotient, 0,
+ OPTAB_WIDEN);
+ }
}
}
}
{
HOST_WIDE_INT d = INTVAL (op1);
unsigned HOST_WIDE_INT ml;
- int post_shift;
+ int pre_shift;
rtx t1;
- post_shift = floor_log2 (d & -d);
- ml = invert_mod2n (d >> post_shift, size);
- t1 = expand_mult (compute_mode, op0, GEN_INT (ml), NULL_RTX,
- unsignedp);
- quotient = expand_shift (RSHIFT_EXPR, compute_mode, t1,
- build_int_2 (post_shift, 0),
- NULL_RTX, unsignedp);
+ pre_shift = floor_log2 (d & -d);
+ ml = invert_mod2n (d >> pre_shift, size);
+ t1 = expand_shift (RSHIFT_EXPR, compute_mode, op0,
+ build_int_2 (pre_shift, 0), NULL_RTX, unsignedp);
+ quotient = expand_mult (compute_mode, t1,
+ GEN_INT (trunc_int_for_mode
+ (ml, compute_mode)),
+ NULL_RTX, 0);
insn = get_last_insn ();
set_unique_reg_note (insn,
remainder = expand_binop (compute_mode, sub_optab, op0, tem,
remainder, 0, OPTAB_LIB_WIDEN);
}
- abs_rem = expand_abs (compute_mode, remainder, NULL_RTX, 0);
- abs_op1 = expand_abs (compute_mode, op1, NULL_RTX, 0);
+ abs_rem = expand_abs (compute_mode, remainder, NULL_RTX, 1, 0);
+ abs_op1 = expand_abs (compute_mode, op1, NULL_RTX, 1, 0);
tem = expand_shift (LSHIFT_EXPR, compute_mode, abs_rem,
build_int_2 (1, 0), NULL_RTX, 1);
do_cmp_and_jump (tem, abs_op1, LTU, compute_mode, label);
/* Return a tree node with data type TYPE, describing the value of X.
Usually this is an RTL_EXPR, if there is no obvious better choice.
X may be an expression, however we only support those expressions
- generated by loop.c. */
+ generated by loop.c. */
tree
make_tree (type, x)
default:
t = make_node (RTL_EXPR);
TREE_TYPE (t) = type;
+
+#ifdef POINTERS_EXTEND_UNSIGNED
+ /* If TYPE is a POINTER_TYPE, X might be Pmode with TYPE_MODE being
+ ptr_mode. So convert. */
+ if (POINTER_TYPE_P (type) && GET_MODE (x) != TYPE_MODE (type))
+ x = convert_memory_address (TYPE_MODE (type), x);
+#endif
+
RTL_EXPR_RTL (t) = x;
/* There are no insns to be output
when this rtl_expr is used. */
/* If one operand is constant, make it the second one. Only do this
if the other operand is not constant as well. */
- if ((CONSTANT_P (op0) && ! CONSTANT_P (op1))
- || (GET_CODE (op0) == CONST_INT && GET_CODE (op1) != CONST_INT))
+ if (swap_commutative_operands_p (op0, op1))
{
tem = op0;
op0 = op1;
break;
}
+ /* If we are comparing a double-word integer with zero, we can convert
+ the comparison into one involving a single word. */
+ if (GET_MODE_BITSIZE (mode) == BITS_PER_WORD * 2
+ && GET_MODE_CLASS (mode) == MODE_INT
+ && op1 == const0_rtx)
+ {
+ if (code == EQ || code == NE)
+ {
+ /* Do a logical OR of the two words and compare the result. */
+ rtx op0h = gen_highpart (word_mode, op0);
+ rtx op0l = gen_lowpart (word_mode, op0);
+ rtx op0both = expand_binop (word_mode, ior_optab, op0h, op0l,
+ NULL_RTX, unsignedp, OPTAB_DIRECT);
+ if (op0both != 0)
+ return emit_store_flag (target, code, op0both, op1, word_mode,
+ unsignedp, normalizep);
+ }
+ else if (code == LT || code == GE)
+ /* If testing the sign bit, can just test on high word. */
+ return emit_store_flag (target, code, gen_highpart (word_mode, op0),
+ op1, word_mode, unsignedp, normalizep);
+ }
+
/* From now on, we won't change CODE, so set ICODE now. */
icode = setcc_gen_code[(int) code];
we can use zero-extension to the wider mode (an unsigned conversion)
as the operation. */
+ /* Note that ABS doesn't yield a positive number for INT_MIN, but
+ that is compensated by the subsequent overflow when subtracting
+ one / negating. */
+
if (abs_optab->handlers[(int) mode].insn_code != CODE_FOR_nothing)
tem = expand_unop (mode, abs_optab, op0, subtarget, 1);
else if (ffs_optab->handlers[(int) mode].insn_code != CODE_FOR_nothing)