/* Medium-level subroutines: convert bit-field store and extract
and shifts, multiplies and divides to rtl instructions.
- Copyright (C) 1987, 88, 89, 92, 93, 94, 1995 Free Software Foundation, Inc.
+ Copyright (C) 1987, 88, 89, 92-5, 1996 Free Software Foundation, Inc.
This file is part of GNU CC.
You should have received a copy of the GNU General Public License
along with GNU CC; see the file COPYING. If not, write to
-the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA. */
+the Free Software Foundation, 59 Temple Place - Suite 330,
+Boston, MA 02111-1307, USA. */
#include "config.h"
enum machine_mode mode;
rtx x;
{
- if (GET_CODE (x) == CONST_INT)
- {
- HOST_WIDE_INT val = - INTVAL (x);
- if (GET_MODE_BITSIZE (mode) < HOST_BITS_PER_WIDE_INT)
- {
- /* Sign extend the value from the bits that are significant. */
- if (val & ((HOST_WIDE_INT) 1 << (GET_MODE_BITSIZE (mode) - 1)))
- val |= (HOST_WIDE_INT) (-1) << GET_MODE_BITSIZE (mode);
- else
- val &= ((HOST_WIDE_INT) 1 << GET_MODE_BITSIZE (mode)) - 1;
- }
- return GEN_INT (val);
- }
- else
- return expand_unop (GET_MODE (x), neg_optab, x, NULL_RTX, 0);
+ rtx result = simplify_unary_operation (NEG, mode, x, mode);
+
+ if (result == 0)
+ result = expand_unop (mode, neg_optab, x, NULL_RTX, 0);
+
+ return result;
}
\f
/* Generate code to store value from rtx VALUE
#ifdef HAVE_insv
if (HAVE_insv
+ && GET_MODE (value) != BLKmode
&& !(bitsize == 1 && GET_CODE (value) == CONST_INT)
/* Ensure insv's size is wide enough for this field. */
&& (GET_MODE_BITSIZE (insn_operand_mode[(int) CODE_FOR_insv][3])
value = word;
else
value = gen_lowpart_common (word_mode,
- force_reg (GET_MODE (value), value));
+ force_reg (GET_MODE (value) != VOIDmode
+ ? GET_MODE (value)
+ : word_mode, value));
}
while (bitsdone < bitsize)
if (BYTES_BIG_ENDIAN)
{
+ int total_bits;
+
+ /* We must do an endian conversion exactly the same way as it is
+ done in extract_bit_field, so that the two calls to
+ extract_fixed_bit_field will have comparable arguments. */
+ if (GET_CODE (value) != MEM || GET_MODE (value) == BLKmode)
+ total_bits = BITS_PER_WORD;
+ else
+ total_bits = GET_MODE_BITSIZE (GET_MODE (value));
+
/* Fetch successively less significant portions. */
if (GET_CODE (value) == CONST_INT)
part = GEN_INT (((unsigned HOST_WIDE_INT) (INTVAL (value))
else
/* The args are chosen so that the last part includes the
lsb. Give extract_bit_field the value it needs (with
- endianness compensation) to fetch the piece we want. */
- part = extract_fixed_bit_field (word_mode, value, 0, thissize,
- GET_MODE_BITSIZE (GET_MODE (value))
- - bitsize + bitsdone,
- NULL_RTX, 1, align);
+ endianness compensation) to fetch the piece we want.
+
+ ??? We have no idea what the alignment of VALUE is, so
+ we have to use a guess. */
+ part
+ = extract_fixed_bit_field
+ (word_mode, value, 0, thissize,
+ total_bits - bitsize + bitsdone, NULL_RTX, 1,
+ GET_MODE (value) == VOIDmode
+ ? UNITS_PER_WORD
+ : (GET_MODE (value) == BLKmode
+ ? 1
+ : GET_MODE_ALIGNMENT (GET_MODE (value)) / BITS_PER_UNIT));
}
else
{
>> bitsdone)
& (((HOST_WIDE_INT) 1 << thissize) - 1));
else
- part = extract_fixed_bit_field (word_mode, value, 0, thissize,
- bitsdone, NULL_RTX, 1, align);
+ part
+ = extract_fixed_bit_field
+ (word_mode, value, 0, thissize, bitsdone, NULL_RTX, 1,
+ GET_MODE (value) == VOIDmode
+ ? UNITS_PER_WORD
+ : (GET_MODE (value) == BLKmode
+ ? 1
+ : GET_MODE_ALIGNMENT (GET_MODE (value)) / BITS_PER_UNIT));
}
/* If OP0 is a register, then handle OFFSET here.
rtx spec_target = target;
rtx spec_target_subreg = 0;
- if (GET_CODE (str_rtx) == MEM && ! MEM_IN_STRUCT_P (str_rtx))
- abort ();
-
/* Discount the part of the structure before the desired byte.
We need to know how many bytes are safe to reference after it. */
if (total_size >= 0)
tmode = mode;
while (GET_CODE (op0) == SUBREG)
{
+ int outer_size = GET_MODE_BITSIZE (GET_MODE (op0));
+ int inner_size = GET_MODE_BITSIZE (GET_MODE (SUBREG_REG (op0)));
+
offset += SUBREG_WORD (op0);
+
+ if (BYTES_BIG_ENDIAN && (outer_size < inner_size))
+ {
+ bitpos += inner_size - outer_size;
+ if (bitpos > unit)
+ {
+ offset += (bitpos / unit);
+ bitpos %= unit;
+ }
+ }
+
op0 = SUBREG_REG (op0);
}
if (target == 0 || GET_CODE (target) != REG)
target = gen_reg_rtx (mode);
+ /* Indicate for flow that the entire target reg is being set. */
+ emit_insn (gen_rtx (CLOBBER, VOIDmode, target));
+
for (i = 0; i < nwords; i++)
{
/* If I is 0, use the low-order word in both field and target;
op1 = expand_expr (amount, NULL_RTX, VOIDmode, 0);
-#if SHIFT_COUNT_TRUNCATED
+#ifdef SHIFT_COUNT_TRUNCATED
if (SHIFT_COUNT_TRUNCATED
&& GET_CODE (op1) == CONST_INT
&& (unsigned HOST_WIDE_INT) INTVAL (op1) >= GET_MODE_BITSIZE (mode))
{
mul_highpart_optab = unsignedp ? umul_highpart_optab : smul_highpart_optab;
target = expand_binop (mode, mul_highpart_optab,
- op0, op1, target, unsignedp, OPTAB_DIRECT);
+ op0, wide_op1, target, unsignedp, OPTAB_DIRECT);
if (target)
return target;
}
{
mul_highpart_optab = unsignedp ? smul_highpart_optab : umul_highpart_optab;
target = expand_binop (mode, mul_highpart_optab,
- op0, op1, target, unsignedp, OPTAB_DIRECT);
+ op0, wide_op1, target, unsignedp, OPTAB_DIRECT);
if (target)
/* We used the wrong signedness. Adjust the result. */
return expand_mult_highpart_adjust (mode, target, op0,
moptab = unsignedp ? umul_widen_optab : smul_widen_optab;
if (moptab->handlers[(int) wider_mode].insn_code != CODE_FOR_nothing
&& mul_widen_cost[(int) wider_mode] < max_cost)
- goto try;
+ {
+ op1 = force_reg (mode, op1);
+ goto try;
+ }
/* Try widening the mode and perform a non-widening multiplication. */
moptab = smul_optab;
if (smul_optab->handlers[(int) wider_mode].insn_code != CODE_FOR_nothing
&& mul_cost[(int) wider_mode] + shift_cost[size-1] < max_cost)
- goto try;
+ {
+ op1 = wide_op1;
+ goto try;
+ }
/* Try widening multiplication of opposite signedness, and adjust. */
moptab = unsignedp ? smul_widen_optab : umul_widen_optab;
&& (mul_widen_cost[(int) wider_mode]
+ 2 * shift_cost[size-1] + 4 * add_cost < max_cost))
{
- tem = expand_binop (wider_mode, moptab, op0, wide_op1,
+ rtx regop1 = force_reg (mode, op1);
+ tem = expand_binop (wider_mode, moptab, op0, regop1,
NULL_RTX, ! unsignedp, OPTAB_WIDEN);
if (tem != 0)
{
try:
/* Pass NULL_RTX as target since TARGET has wrong mode. */
- tem = expand_binop (wider_mode, moptab, op0, wide_op1,
+ tem = expand_binop (wider_mode, moptab, op0, op1,
NULL_RTX, unsignedp, OPTAB_WIDEN);
if (tem == 0)
return 0;
/* Extract the high half of the just generated product. */
- tem = expand_shift (RSHIFT_EXPR, wider_mode, tem,
- build_int_2 (size, 0), NULL_RTX, 1);
- return convert_modes (mode, wider_mode, tem, unsignedp);
+ if (mode == word_mode)
+ {
+ return gen_highpart (mode, tem);
+ }
+ else
+ {
+ tem = expand_shift (RSHIFT_EXPR, wider_mode, tem,
+ build_int_2 (size, 0), NULL_RTX, 1);
+ return convert_modes (mode, wider_mode, tem, unsignedp);
+ }
}
\f
/* Emit the code to divide OP0 by OP1, putting the result in TARGET
last = get_last_insn ();
- /* Promote floor rouding to trunc rounding for unsigned operations. */
+ /* Promote floor rounding to trunc rounding for unsigned operations. */
if (unsignedp)
{
if (code == FLOOR_DIV_EXPR)
{
case TRUNC_MOD_EXPR:
case TRUNC_DIV_EXPR:
- if (op1_is_constant && HOST_BITS_PER_WIDE_INT >= size)
+ if (op1_is_constant)
{
- if (unsignedp
- || (INTVAL (op1)
- == (HOST_WIDE_INT) 1 << (GET_MODE_BITSIZE (compute_mode) - 1)))
+ if (unsignedp)
{
unsigned HOST_WIDE_INT mh, ml;
int pre_shift, post_shift;
pre_shift = floor_log2 (d);
if (rem_flag)
{
- remainder = expand_binop (compute_mode, and_optab, op0,
- GEN_INT (((HOST_WIDE_INT) 1 << pre_shift) - 1),
- remainder, 1,
- OPTAB_LIB_WIDEN);
+ remainder =
+ expand_binop (compute_mode, and_optab, op0,
+ GEN_INT (((HOST_WIDE_INT) 1 << pre_shift) - 1),
+ remainder, 1,
+ OPTAB_LIB_WIDEN);
if (remainder)
return gen_lowpart (mode, remainder);
}
build_int_2 (pre_shift, 0),
tquotient, 1);
}
- else if (d >= ((unsigned HOST_WIDE_INT) 1 << (size - 1)))
- {
- /* Most significant bit of divisor is set, emit a scc insn.
- emit_store_flag needs to be passed a place for the
- result. */
- quotient = emit_store_flag (tquotient, GEU, op0, op1,
- compute_mode, 1, 1);
- /* Can emit_store_flag have failed? */
- if (quotient == 0)
- goto fail1;
- }
- else
+ else if (size <= HOST_BITS_PER_WIDE_INT)
{
- /* Find a suitable multiplier and right shift count instead
- of multiplying with D. */
-
- mh = choose_multiplier (d, size, size,
- &ml, &post_shift, &dummy);
-
- /* If the suggested multiplier is more than SIZE bits, we
- can do better for even divisors, using an initial right
- shift. */
- if (mh != 0 && (d & 1) == 0)
+ if (d >= ((unsigned HOST_WIDE_INT) 1 << (size - 1)))
{
- pre_shift = floor_log2 (d & -d);
- mh = choose_multiplier (d >> pre_shift, size,
- size - pre_shift,
- &ml, &post_shift, &dummy);
- if (mh)
- abort ();
- }
- else
- pre_shift = 0;
-
- if (mh != 0)
- {
- rtx t1, t2, t3, t4;
-
- extra_cost = (shift_cost[post_shift - 1]
- + shift_cost[1] + 2 * add_cost);
- t1 = expand_mult_highpart (compute_mode, op0, ml,
- NULL_RTX, 1,
- max_cost - extra_cost);
- if (t1 == 0)
+ /* Most significant bit of divisor is set; emit an scc
+ insn. */
+ quotient = emit_store_flag (tquotient, GEU, op0, op1,
+ compute_mode, 1, 1);
+ if (quotient == 0)
goto fail1;
- t2 = force_operand (gen_rtx (MINUS, compute_mode,
- op0, t1),
- NULL_RTX);
- t3 = expand_shift (RSHIFT_EXPR, compute_mode, t2,
- build_int_2 (1, 0), NULL_RTX, 1);
- t4 = force_operand (gen_rtx (PLUS, compute_mode,
- t1, t3),
- NULL_RTX);
- quotient = expand_shift (RSHIFT_EXPR, compute_mode, t4,
- build_int_2 (post_shift - 1,
- 0),
- tquotient, 1);
}
else
{
- rtx t1, t2;
+ /* Find a suitable multiplier and right shift count
+ instead of multiplying with D. */
- t1 = expand_shift (RSHIFT_EXPR, compute_mode, op0,
- build_int_2 (pre_shift, 0),
- NULL_RTX, 1);
- extra_cost = (shift_cost[pre_shift]
- + shift_cost[post_shift]);
- t2 = expand_mult_highpart (compute_mode, t1, ml,
- NULL_RTX, 1,
- max_cost - extra_cost);
- if (t2 == 0)
- goto fail1;
- quotient = expand_shift (RSHIFT_EXPR, compute_mode, t2,
- build_int_2 (post_shift, 0),
- tquotient, 1);
+ mh = choose_multiplier (d, size, size,
+ &ml, &post_shift, &dummy);
+
+ /* If the suggested multiplier is more than SIZE bits,
+ we can do better for even divisors, using an
+ initial right shift. */
+ if (mh != 0 && (d & 1) == 0)
+ {
+ pre_shift = floor_log2 (d & -d);
+ mh = choose_multiplier (d >> pre_shift, size,
+ size - pre_shift,
+ &ml, &post_shift, &dummy);
+ if (mh)
+ abort ();
+ }
+ else
+ pre_shift = 0;
+
+ if (mh != 0)
+ {
+ rtx t1, t2, t3, t4;
+
+ extra_cost = (shift_cost[post_shift - 1]
+ + shift_cost[1] + 2 * add_cost);
+ t1 = expand_mult_highpart (compute_mode, op0, ml,
+ NULL_RTX, 1,
+ max_cost - extra_cost);
+ if (t1 == 0)
+ goto fail1;
+ t2 = force_operand (gen_rtx (MINUS, compute_mode,
+ op0, t1),
+ NULL_RTX);
+ t3 = expand_shift (RSHIFT_EXPR, compute_mode, t2,
+ build_int_2 (1, 0), NULL_RTX,1);
+ t4 = force_operand (gen_rtx (PLUS, compute_mode,
+ t1, t3),
+ NULL_RTX);
+ quotient =
+ expand_shift (RSHIFT_EXPR, compute_mode, t4,
+ build_int_2 (post_shift - 1, 0),
+ tquotient, 1);
+ }
+ else
+ {
+ rtx t1, t2;
+
+ t1 = expand_shift (RSHIFT_EXPR, compute_mode, op0,
+ build_int_2 (pre_shift, 0),
+ NULL_RTX, 1);
+ extra_cost = (shift_cost[pre_shift]
+ + shift_cost[post_shift]);
+ t2 = expand_mult_highpart (compute_mode, t1, ml,
+ NULL_RTX, 1,
+ max_cost - extra_cost);
+ if (t2 == 0)
+ goto fail1;
+ quotient =
+ expand_shift (RSHIFT_EXPR, compute_mode, t2,
+ build_int_2 (post_shift, 0),
+ tquotient, 1);
+ }
}
}
+ else /* Too wide mode to use tricky code */
+ break;
insn = get_last_insn ();
if (insn != last
else if (d == -1)
quotient = expand_unop (compute_mode, neg_optab, op0,
tquotient, 0);
+ else if (abs_d == (unsigned HOST_WIDE_INT) 1 << (size - 1))
+ {
+ /* This case is not handled correctly below. */
+ quotient = emit_store_flag (tquotient, EQ, op0, op1,
+ compute_mode, 1, 1);
+ if (quotient == 0)
+ goto fail1;
+ }
else if (EXACT_POWER_OF_2_OR_ZERO_P (d)
&& (rem_flag ? smod_pow2_cheap : sdiv_pow2_cheap))
;
quotient, quotient, 0);
}
}
- else
+ else if (size <= HOST_BITS_PER_WIDE_INT)
{
choose_multiplier (abs_d, size, size - 1,
&ml, &post_shift, &lgup);
tquotient);
}
}
+ else /* Too wide mode to use tricky code */
+ break;
insn = get_last_insn ();
if (insn != last
or remainder to get floor rounding, once we have the remainder.
Notice that we compute also the final remainder value here,
and return the result right away. */
- if (target == 0)
+ if (target == 0 || GET_MODE (target) != compute_mode)
target = gen_reg_rtx (compute_mode);
if (rem_flag)
quotient or remainder to get ceiling rounding, once we have the
remainder. Notice that we compute also the final remainder
value here, and return the result right away. */
- if (target == 0)
+ if (target == 0 || GET_MODE (target) != compute_mode)
target = gen_reg_rtx (compute_mode);
if (rem_flag)
quotient or remainder to get ceiling rounding, once we have the
remainder. Notice that we compute also the final remainder
value here, and return the result right away. */
- if (target == 0)
+ if (target == 0 || GET_MODE (target) != compute_mode)
target = gen_reg_rtx (compute_mode);
if (rem_flag)
{
if (quotient == 0)
{
+ if (target && GET_MODE (target) != compute_mode)
+ target = 0;
+
if (rem_flag)
{
/* Try to produce the remainder directly without a library call. */
if (rem_flag)
{
+ if (target && GET_MODE (target) != compute_mode)
+ target = 0;
+
if (quotient == 0)
/* No divide instruction either. Use library for remainder. */
remainder = sign_expand_binop (compute_mode, umod_optab, smod_optab,
to perform the operation. It says to use zero-extension.
NORMALIZEP is 1 if we should convert the result to be either zero
- or one one. Normalize is -1 if we should convert the result to be
+ or one. Normalize is -1 if we should convert the result to be
either zero or -1. If NORMALIZEP is zero, the result will be left
"raw" out of the scc insn. */
enum machine_mode compare_mode;
enum machine_mode target_mode = GET_MODE (target);
rtx tem;
- rtx last = 0;
+ rtx last = get_last_insn ();
rtx pattern, comparison;
/* If one operand is constant, make it the second one. Only do this
subtarget = 0;
if (code == GE)
- op0 = expand_unop (mode, one_cmpl_optab, op0, subtarget, 0);
+ op0 = expand_unop (mode, one_cmpl_optab, op0,
+ ((STORE_FLAG_VALUE == 1 || normalizep)
+ ? 0 : subtarget), 0);
- if (normalizep || STORE_FLAG_VALUE == 1)
+ if (STORE_FLAG_VALUE == 1 || normalizep)
/* If we are supposed to produce a 0/1 value, we want to do
a logical shift from the sign bit to the low-order bit; for
a -1/0 value, we do an arithmetic shift. */
}
}
- if (last)
- delete_insns_since (last);
+ delete_insns_since (last);
- subtarget = target_mode == mode ? target : 0;
+ /* If expensive optimizations, use different pseudo registers for each
+ insn, instead of reusing the same pseudo. This leads to better CSE,
+ but slows down the compiler, since there are more pseudos */
+ subtarget = (!flag_expensive_optimizations
+ && (target_mode == mode)) ? target : NULL_RTX;
/* If we reached here, we can't do this with a scc insn. However, there
are some comparisons that can be done directly. For example, if
if (tem && normalizep)
tem = expand_shift (RSHIFT_EXPR, mode, tem,
size_int (GET_MODE_BITSIZE (mode) - 1),
- tem, normalizep == 1);
+ subtarget, normalizep == 1);
- if (tem && GET_MODE (tem) != target_mode)
+ if (tem)
{
- convert_move (target, tem, 0);
- tem = target;
+ if (GET_MODE (tem) != target_mode)
+ {
+ convert_move (target, tem, 0);
+ tem = target;
+ }
+ else if (!subtarget)
+ {
+ emit_move_insn (target, tem);
+ tem = target;
+ }
}
-
- if (tem == 0)
+ else
delete_insns_since (last);
return tem;