static void store_fixed_bit_field (rtx, unsigned HOST_WIDE_INT,
unsigned HOST_WIDE_INT,
- unsigned HOST_WIDE_INT, rtx);
+ unsigned HOST_WIDE_INT,
+ unsigned HOST_WIDE_INT,
+ unsigned HOST_WIDE_INT,
+ rtx);
static void store_split_bit_field (rtx, unsigned HOST_WIDE_INT,
- unsigned HOST_WIDE_INT, rtx);
+ unsigned HOST_WIDE_INT,
+ unsigned HOST_WIDE_INT,
+ unsigned HOST_WIDE_INT,
+ rtx);
static rtx extract_fixed_bit_field (enum machine_mode, rtx,
unsigned HOST_WIDE_INT,
unsigned HOST_WIDE_INT,
for (speed = 0; speed < 2; speed++)
{
crtl->maybe_hot_insn_p = speed;
- zero_cost[speed] = rtx_cost (const0_rtx, SET, speed);
+ zero_cost[speed] = set_src_cost (const0_rtx, speed);
for (mode = GET_CLASS_NARROWEST_MODE (MODE_INT);
mode != VOIDmode;
PUT_MODE (&all.shift_sub0, mode);
PUT_MODE (&all.shift_sub1, mode);
- add_cost[speed][mode] = rtx_cost (&all.plus, SET, speed);
- neg_cost[speed][mode] = rtx_cost (&all.neg, SET, speed);
- mul_cost[speed][mode] = rtx_cost (&all.mult, SET, speed);
- sdiv_cost[speed][mode] = rtx_cost (&all.sdiv, SET, speed);
- udiv_cost[speed][mode] = rtx_cost (&all.udiv, SET, speed);
+ add_cost[speed][mode] = set_src_cost (&all.plus, speed);
+ neg_cost[speed][mode] = set_src_cost (&all.neg, speed);
+ mul_cost[speed][mode] = set_src_cost (&all.mult, speed);
+ sdiv_cost[speed][mode] = set_src_cost (&all.sdiv, speed);
+ udiv_cost[speed][mode] = set_src_cost (&all.udiv, speed);
- sdiv_pow2_cheap[speed][mode] = (rtx_cost (&all.sdiv_32, SET, speed)
+ sdiv_pow2_cheap[speed][mode] = (set_src_cost (&all.sdiv_32, speed)
<= 2 * add_cost[speed][mode]);
- smod_pow2_cheap[speed][mode] = (rtx_cost (&all.smod_32, SET, speed)
+ smod_pow2_cheap[speed][mode] = (set_src_cost (&all.smod_32, speed)
<= 4 * add_cost[speed][mode]);
wider_mode = GET_MODE_WIDER_MODE (mode);
XEXP (&all.wide_lshr, 1) = GEN_INT (GET_MODE_BITSIZE (mode));
mul_widen_cost[speed][wider_mode]
- = rtx_cost (&all.wide_mult, SET, speed);
+ = set_src_cost (&all.wide_mult, speed);
mul_highpart_cost[speed][mode]
- = rtx_cost (&all.wide_trunc, SET, speed);
+ = set_src_cost (&all.wide_trunc, speed);
}
shift_cost[speed][mode][0] = 0;
XEXP (&all.shift, 1) = cint[m];
XEXP (&all.shift_mult, 1) = pow2[m];
- shift_cost[speed][mode][m] = rtx_cost (&all.shift, SET, speed);
- shiftadd_cost[speed][mode][m] = rtx_cost (&all.shift_add, SET, speed);
- shiftsub0_cost[speed][mode][m] = rtx_cost (&all.shift_sub0, SET, speed);
- shiftsub1_cost[speed][mode][m] = rtx_cost (&all.shift_sub1, SET, speed);
+ shift_cost[speed][mode][m] = set_src_cost (&all.shift, speed);
+ shiftadd_cost[speed][mode][m] = set_src_cost (&all.shift_add,
+ speed);
+ shiftsub0_cost[speed][mode][m] = set_src_cost (&all.shift_sub0,
+ speed);
+ shiftsub1_cost[speed][mode][m] = set_src_cost (&all.shift_sub1,
+ speed);
}
}
}
static bool
store_bit_field_1 (rtx str_rtx, unsigned HOST_WIDE_INT bitsize,
- unsigned HOST_WIDE_INT bitnum, enum machine_mode fieldmode,
+ unsigned HOST_WIDE_INT bitnum,
+ unsigned HOST_WIDE_INT bitregion_start,
+ unsigned HOST_WIDE_INT bitregion_end,
+ enum machine_mode fieldmode,
rtx value, bool fallback_p)
{
unsigned int unit
/* We may be accessing data outside the field, which means
we can alias adjacent data. */
+ /* ?? not always for C++0x memory model ?? */
if (MEM_P (op0))
{
op0 = shallow_copy_rtx (op0);
0)
: (int) i * BITS_PER_WORD);
rtx value_word = operand_subword_force (value, wordnum, fieldmode);
-
- if (!store_bit_field_1 (op0, MIN (BITS_PER_WORD,
- bitsize - i * BITS_PER_WORD),
- bitnum + bit_offset, word_mode,
+ unsigned HOST_WIDE_INT new_bitsize =
+ MIN (BITS_PER_WORD, bitsize - i * BITS_PER_WORD);
+
+ /* If the remaining chunk doesn't have full wordsize we have
+ to make sure that for big endian machines the higher order
+ bits are used. */
+ if (new_bitsize < BITS_PER_WORD && BYTES_BIG_ENDIAN && !backwards)
+ value_word = simplify_expand_binop (word_mode, lshr_optab,
+ value_word,
+ GEN_INT (BITS_PER_WORD
+ - new_bitsize),
+ NULL_RTX, true,
+ OPTAB_LIB_WIDEN);
+
+ if (!store_bit_field_1 (op0, new_bitsize,
+ bitnum + bit_offset,
+ bitregion_start, bitregion_end,
+ word_mode,
value_word, fallback_p))
{
delete_insns_since (last);
&& GET_MODE (value) != BLKmode
&& bitsize > 0
&& GET_MODE_BITSIZE (op_mode) >= bitsize
+ /* Do not use insv for volatile bitfields when
+ -fstrict-volatile-bitfields is in effect. */
+ && !(MEM_P (op0) && MEM_VOLATILE_P (op0)
+ && flag_strict_volatile_bitfields > 0)
&& ! ((REG_P (op0) || GET_CODE (op0) == SUBREG)
&& (bitsize + bitpos > GET_MODE_BITSIZE (op_mode))))
{
copy_back = true;
}
- /* On big-endian machines, we count bits from the most significant.
- If the bit field insn does not, we must invert. */
-
- if (BITS_BIG_ENDIAN != BYTES_BIG_ENDIAN)
- xbitpos = unit - bitsize - xbitpos;
-
/* We have been counting XBITPOS within UNIT.
Count instead within the size of the register. */
- if (BITS_BIG_ENDIAN && !MEM_P (xop0))
+ if (BYTES_BIG_ENDIAN && !MEM_P (xop0))
xbitpos += GET_MODE_BITSIZE (op_mode) - unit;
unit = GET_MODE_BITSIZE (op_mode);
+ /* If BITS_BIG_ENDIAN is zero on a BYTES_BIG_ENDIAN machine, we count
+ "backwards" from the size of the unit we are inserting into.
+ Otherwise, we count bits from the most significant on a
+ BYTES/BITS_BIG_ENDIAN machine. */
+
+ if (BITS_BIG_ENDIAN != BYTES_BIG_ENDIAN)
+ xbitpos = unit - bitsize - xbitpos;
+
/* Convert VALUE to op_mode (which insv insn wants) in VALUE1. */
value1 = value;
if (GET_MODE (value) != op_mode)
if (HAVE_insv && MEM_P (op0))
{
enum machine_mode bestmode;
+ unsigned HOST_WIDE_INT maxbits = MAX_FIXED_MODE_SIZE;
+
+ if (bitregion_end)
+ maxbits = bitregion_end - bitregion_start + 1;
/* Get the mode to use for inserting into this field. If OP0 is
BLKmode, get the smallest mode consistent with the alignment. If
mode. Otherwise, use the smallest mode containing the field. */
if (GET_MODE (op0) == BLKmode
+ || GET_MODE_BITSIZE (GET_MODE (op0)) > maxbits
|| (op_mode != MAX_MACHINE_MODE
&& GET_MODE_SIZE (GET_MODE (op0)) > GET_MODE_SIZE (op_mode)))
- bestmode = get_best_mode (bitsize, bitnum, MEM_ALIGN (op0),
+ bestmode = get_best_mode (bitsize, bitnum,
+ bitregion_start, bitregion_end,
+ MEM_ALIGN (op0),
(op_mode == MAX_MACHINE_MODE
? VOIDmode : op_mode),
MEM_VOLATILE_P (op0));
the unit. */
tempreg = copy_to_reg (xop0);
if (store_bit_field_1 (tempreg, bitsize, xbitpos,
+ bitregion_start, bitregion_end,
fieldmode, orig_value, false))
{
emit_move_insn (xop0, tempreg);
if (!fallback_p)
return false;
- store_fixed_bit_field (op0, offset, bitsize, bitpos, value);
+ store_fixed_bit_field (op0, offset, bitsize, bitpos,
+ bitregion_start, bitregion_end, value);
return true;
}
/* Generate code to store value from rtx VALUE
into a bit-field within structure STR_RTX
containing BITSIZE bits starting at bit BITNUM.
+
+ BITREGION_START is bitpos of the first bitfield in this region.
+ BITREGION_END is the bitpos of the ending bitfield in this region.
+ These two fields are 0, if the C++ memory model does not apply,
+ or we are not interested in keeping track of bitfield regions.
+
FIELDMODE is the machine-mode of the FIELD_DECL node for this field. */
void
store_bit_field (rtx str_rtx, unsigned HOST_WIDE_INT bitsize,
- unsigned HOST_WIDE_INT bitnum, enum machine_mode fieldmode,
+ unsigned HOST_WIDE_INT bitnum,
+ unsigned HOST_WIDE_INT bitregion_start,
+ unsigned HOST_WIDE_INT bitregion_end,
+ enum machine_mode fieldmode,
rtx value)
{
- if (!store_bit_field_1 (str_rtx, bitsize, bitnum, fieldmode, value, true))
+ /* Under the C++0x memory model, we must not touch bits outside the
+ bit region. Adjust the address to start at the beginning of the
+ bit region. */
+ if (MEM_P (str_rtx)
+ && bitregion_start > 0)
+ {
+ enum machine_mode bestmode;
+ enum machine_mode op_mode;
+ unsigned HOST_WIDE_INT offset;
+
+ op_mode = mode_for_extraction (EP_insv, 3);
+ if (op_mode == MAX_MACHINE_MODE)
+ op_mode = VOIDmode;
+
+ offset = bitregion_start / BITS_PER_UNIT;
+ bitnum -= bitregion_start;
+ bitregion_end -= bitregion_start;
+ bitregion_start = 0;
+ bestmode = get_best_mode (bitsize, bitnum,
+ bitregion_start, bitregion_end,
+ MEM_ALIGN (str_rtx),
+ op_mode,
+ MEM_VOLATILE_P (str_rtx));
+ str_rtx = adjust_address (str_rtx, bestmode, offset);
+ }
+
+ if (!store_bit_field_1 (str_rtx, bitsize, bitnum,
+ bitregion_start, bitregion_end,
+ fieldmode, value, true))
gcc_unreachable ();
}
\f
static void
store_fixed_bit_field (rtx op0, unsigned HOST_WIDE_INT offset,
unsigned HOST_WIDE_INT bitsize,
- unsigned HOST_WIDE_INT bitpos, rtx value)
+ unsigned HOST_WIDE_INT bitpos,
+ unsigned HOST_WIDE_INT bitregion_start,
+ unsigned HOST_WIDE_INT bitregion_end,
+ rtx value)
{
enum machine_mode mode;
unsigned int total_bits = BITS_PER_WORD;
/* Special treatment for a bit field split across two registers. */
if (bitsize + bitpos > BITS_PER_WORD)
{
- store_split_bit_field (op0, bitsize, bitpos, value);
+ store_split_bit_field (op0, bitsize, bitpos,
+ bitregion_start, bitregion_end,
+ value);
return;
}
}
else
{
+ unsigned HOST_WIDE_INT maxbits = MAX_FIXED_MODE_SIZE;
+
+ if (bitregion_end)
+ maxbits = bitregion_end - bitregion_start + 1;
+
/* Get the proper mode to use for this field. We want a mode that
includes the entire field. If such a mode would be larger than
a word, we won't be doing the extraction the normal way.
if (MEM_VOLATILE_P (op0)
&& GET_MODE_BITSIZE (GET_MODE (op0)) > 0
+ && GET_MODE_BITSIZE (GET_MODE (op0)) <= maxbits
&& flag_strict_volatile_bitfields > 0)
mode = GET_MODE (op0);
else
mode = get_best_mode (bitsize, bitpos + offset * BITS_PER_UNIT,
+ bitregion_start, bitregion_end,
MEM_ALIGN (op0), mode, MEM_VOLATILE_P (op0));
if (mode == VOIDmode)
/* The only way this should occur is if the field spans word
boundaries. */
store_split_bit_field (op0, bitsize, bitpos + offset * BITS_PER_UNIT,
- value);
+ bitregion_start, bitregion_end, value);
return;
}
static void
store_split_bit_field (rtx op0, unsigned HOST_WIDE_INT bitsize,
- unsigned HOST_WIDE_INT bitpos, rtx value)
+ unsigned HOST_WIDE_INT bitpos,
+ unsigned HOST_WIDE_INT bitregion_start,
+ unsigned HOST_WIDE_INT bitregion_end,
+ rtx value)
{
unsigned int unit;
unsigned int bitsdone = 0;
it is just an out-of-bounds access. Ignore it. */
if (word != const0_rtx)
store_fixed_bit_field (word, offset * unit / BITS_PER_UNIT, thissize,
- thispos, part);
+ thispos, bitregion_start, bitregion_end, part);
bitsdone += thissize;
}
}
if (ext_mode != MAX_MACHINE_MODE
&& bitsize > 0
&& GET_MODE_BITSIZE (ext_mode) >= bitsize
+ /* Do not use extv/extzv for volatile bitfields when
+ -fstrict-volatile-bitfields is in effect. */
+ && !(MEM_P (op0) && MEM_VOLATILE_P (op0)
+ && flag_strict_volatile_bitfields > 0)
/* If op0 is a register, we need it in EXT_MODE to make it
acceptable to the format of ext(z)v. */
&& !(GET_CODE (op0) == SUBREG && GET_MODE (op0) != ext_mode)
/* Get ref to first byte containing part of the field. */
xop0 = adjust_address (xop0, byte_mode, xoffset);
- /* On big-endian machines, we count bits from the most significant.
- If the bit field insn does not, we must invert. */
- if (BITS_BIG_ENDIAN != BYTES_BIG_ENDIAN)
- xbitpos = unit - bitsize - xbitpos;
-
/* Now convert from counting within UNIT to counting in EXT_MODE. */
- if (BITS_BIG_ENDIAN && !MEM_P (xop0))
+ if (BYTES_BIG_ENDIAN && !MEM_P (xop0))
xbitpos += GET_MODE_BITSIZE (ext_mode) - unit;
unit = GET_MODE_BITSIZE (ext_mode);
+ /* If BITS_BIG_ENDIAN is zero on a BYTES_BIG_ENDIAN machine, we count
+ "backwards" from the size of the unit we are extracting from.
+ Otherwise, we count bits from the most significant on a
+ BYTES/BITS_BIG_ENDIAN machine. */
+
+ if (BITS_BIG_ENDIAN != BYTES_BIG_ENDIAN)
+ xbitpos = unit - bitsize - xbitpos;
+
if (xtarget == 0)
xtarget = xspec_target = gen_reg_rtx (tmode);
&& TRULY_NOOP_TRUNCATION_MODES_P (GET_MODE (xtarget), ext_mode))
{
xtarget = gen_lowpart (ext_mode, xtarget);
- if (GET_MODE_SIZE (ext_mode)
- > GET_MODE_SIZE (GET_MODE (xspec_target)))
+ if (GET_MODE_PRECISION (ext_mode)
+ > GET_MODE_PRECISION (GET_MODE (xspec_target)))
xspec_target_subreg = xtarget;
}
else
if (GET_MODE (op0) == BLKmode
|| (ext_mode != MAX_MACHINE_MODE
&& GET_MODE_SIZE (GET_MODE (op0)) > GET_MODE_SIZE (ext_mode)))
- bestmode = get_best_mode (bitsize, bitnum, MEM_ALIGN (op0),
+ bestmode = get_best_mode (bitsize, bitnum, 0, 0, MEM_ALIGN (op0),
(ext_mode == MAX_MACHINE_MODE
? VOIDmode : ext_mode),
MEM_VOLATILE_P (op0));
mode = tmode;
}
else
- mode = get_best_mode (bitsize, bitpos + offset * BITS_PER_UNIT,
+ mode = get_best_mode (bitsize, bitpos + offset * BITS_PER_UNIT, 0, 0,
MEM_ALIGN (op0), word_mode, MEM_VOLATILE_P (op0));
if (mode == VOIDmode)
if (code == LSHIFT_EXPR
&& CONST_INT_P (op1)
&& INTVAL (op1) > 0
- && INTVAL (op1) < GET_MODE_BITSIZE (mode)
+ && INTVAL (op1) < GET_MODE_PRECISION (mode)
&& INTVAL (op1) < MAX_BITS_PER_WORD
&& shift_cost[speed][mode][INTVAL (op1)] > INTVAL (op1) * add_cost[speed][mode]
&& shift_cost[speed][mode][INTVAL (op1)] != MAX_COST)
else
other_amount
= simplify_gen_binary (MINUS, GET_MODE (op1),
- GEN_INT (GET_MODE_BITSIZE (mode)),
+ GEN_INT (GET_MODE_PRECISION (mode)),
op1);
shifted = force_reg (mode, shifted);
&& !optimize)
? target : 0;
rtx accum_target = optimize ? 0 : accum;
+ rtx accum_inner;
switch (alg->op[opno])
{
that. */
tem = op0, nmode = mode;
+ accum_inner = accum;
if (GET_CODE (accum) == SUBREG)
{
- nmode = GET_MODE (SUBREG_REG (accum));
+ accum_inner = SUBREG_REG (accum);
+ nmode = GET_MODE (accum_inner);
tem = gen_lowpart (nmode, op0);
}
insn = get_last_insn ();
- set_unique_reg_note (insn, REG_EQUAL,
- gen_rtx_MULT (nmode, tem,
- GEN_INT (val_so_far)));
+ set_dst_reg_note (insn, REG_EQUAL,
+ gen_rtx_MULT (nmode, tem, GEN_INT (val_so_far)),
+ accum_inner);
}
if (variant == negate_variant)
result is interpreted as an unsigned coefficient.
Exclude cost of op0 from max_cost to match the cost
calculation of the synth_mult. */
- max_cost = rtx_cost (gen_rtx_MULT (mode, fake_reg, op1), SET, speed)
- - neg_cost[speed][mode];
+ max_cost = (set_src_cost (gen_rtx_MULT (mode, fake_reg, op1),
+ speed)
+ - neg_cost[speed][mode]);
if (max_cost > 0
&& choose_mult_variant (mode, -INTVAL (op1), &algorithm,
&variant, max_cost))
/* Exclude cost of op0 from max_cost to match the cost
calculation of the synth_mult. */
- max_cost = rtx_cost (gen_rtx_MULT (mode, fake_reg, op1), SET, speed);
+ max_cost = set_src_cost (gen_rtx_MULT (mode, fake_reg, op1), speed);
if (choose_mult_variant (mode, coeff, &algorithm, &variant,
max_cost))
return expand_mult_const (mode, op0, coeff, target,
this_optab == umul_widen_optab))
&& CONST_INT_P (cop1)
&& (INTVAL (cop1) >= 0
- || GET_MODE_BITSIZE (mode) <= HOST_BITS_PER_WIDE_INT))
+ || HWI_COMPUTABLE_MODE_P (mode)))
{
HOST_WIDE_INT coeff = INTVAL (cop1);
int max_cost;
/* Try widening multiplication. */
moptab = unsignedp ? umul_widen_optab : smul_widen_optab;
- if (optab_handler (moptab, wider_mode) != CODE_FOR_nothing
+ if (widening_optab_handler (moptab, wider_mode, mode) != CODE_FOR_nothing
&& mul_widen_cost[speed][wider_mode] < max_cost)
{
tem = expand_binop (wider_mode, moptab, op0, narrow_op1, 0,
/* Try widening multiplication of opposite signedness, and adjust. */
moptab = unsignedp ? smul_widen_optab : umul_widen_optab;
- if (optab_handler (moptab, wider_mode) != CODE_FOR_nothing
+ if (widening_optab_handler (moptab, wider_mode, mode) != CODE_FOR_nothing
&& size - 1 < BITS_PER_WORD
&& (mul_widen_cost[speed][wider_mode] + 2 * shift_cost[speed][mode][size-1]
+ 4 * add_cost[speed][mode] < max_cost))
gcc_assert (!SCALAR_FLOAT_MODE_P (mode));
/* We can't support modes wider than HOST_BITS_PER_INT. */
- gcc_assert (GET_MODE_BITSIZE (mode) <= HOST_BITS_PER_WIDE_INT);
+ gcc_assert (HWI_COMPUTABLE_MODE_P (mode));
cnst1 = INTVAL (op1) & GET_MODE_MASK (mode);
temp = gen_rtx_LSHIFTRT (mode, result, shift);
if (optab_handler (lshr_optab, mode) == CODE_FOR_nothing
- || rtx_cost (temp, SET, optimize_insn_for_speed_p ()) > COSTS_N_INSNS (2))
+ || (set_src_cost (temp, optimize_insn_for_speed_p ())
+ > COSTS_N_INSNS (2)))
{
temp = expand_binop (mode, xor_optab, op0, signmask,
NULL_RTX, 1, OPTAB_LIB_WIDEN);
rtx quotient = 0, remainder = 0;
rtx last;
int size;
- rtx insn, set;
+ rtx insn;
optab optab1, optab2;
int op1_is_constant, op1_is_pow2 = 0;
int max_cost, extra_cost;
break;
insn = get_last_insn ();
- if (insn != last
- && (set = single_set (insn)) != 0
- && SET_DEST (set) == quotient)
- set_unique_reg_note (insn,
- REG_EQUAL,
- gen_rtx_UDIV (compute_mode, op0, op1));
+ if (insn != last)
+ set_dst_reg_note (insn, REG_EQUAL,
+ gen_rtx_UDIV (compute_mode, op0, op1),
+ quotient);
}
else /* TRUNC_DIV, signed */
{
{
insn = get_last_insn ();
if (insn != last
- && (set = single_set (insn)) != 0
- && SET_DEST (set) == quotient
&& abs_d < ((unsigned HOST_WIDE_INT) 1
<< (HOST_BITS_PER_WIDE_INT - 1)))
- set_unique_reg_note (insn,
- REG_EQUAL,
- gen_rtx_DIV (compute_mode,
- op0,
- GEN_INT
- (trunc_int_for_mode
- (abs_d,
- compute_mode))));
+ set_dst_reg_note (insn, REG_EQUAL,
+ gen_rtx_DIV (compute_mode, op0,
+ gen_int_mode
+ (abs_d,
+ compute_mode)),
+ quotient);
quotient = expand_unop (compute_mode, neg_optab,
quotient, quotient, 0);
break;
insn = get_last_insn ();
- if (insn != last
- && (set = single_set (insn)) != 0
- && SET_DEST (set) == quotient)
- set_unique_reg_note (insn,
- REG_EQUAL,
- gen_rtx_DIV (compute_mode, op0, op1));
+ if (insn != last)
+ set_dst_reg_note (insn, REG_EQUAL,
+ gen_rtx_DIV (compute_mode, op0, op1),
+ quotient);
}
break;
}
NULL_RTX, 1);
insn = get_last_insn ();
- set_unique_reg_note (insn,
- REG_EQUAL,
- gen_rtx_fmt_ee (unsignedp ? UDIV : DIV,
- compute_mode,
- op0, op1));
+ set_dst_reg_note (insn, REG_EQUAL,
+ gen_rtx_fmt_ee (unsignedp ? UDIV : DIV,
+ compute_mode, op0, op1),
+ quotient);
}
break;
/* For the reverse comparison, use either an addition or a XOR. */
if (want_add
- && rtx_cost (GEN_INT (normalizep), PLUS,
+ && rtx_cost (GEN_INT (normalizep), PLUS, 1,
optimize_insn_for_speed_p ()) == 0)
{
tem = emit_store_flag_1 (subtarget, rcode, op0, op1, mode, 0,
target, 0, OPTAB_WIDEN);
}
else if (!want_add
- && rtx_cost (trueval, XOR,
+ && rtx_cost (trueval, XOR, 1,
optimize_insn_for_speed_p ()) == 0)
{
tem = emit_store_flag_1 (subtarget, rcode, op0, op1, mode, 0,
/* Again, for the reverse comparison, use either an addition or a XOR. */
if (want_add
- && rtx_cost (GEN_INT (normalizep), PLUS,
+ && rtx_cost (GEN_INT (normalizep), PLUS, 1,
optimize_insn_for_speed_p ()) == 0)
{
tem = emit_store_flag_1 (subtarget, rcode, op0, op1, mode, 0,
GEN_INT (normalizep), target, 0, OPTAB_WIDEN);
}
else if (!want_add
- && rtx_cost (trueval, XOR,
+ && rtx_cost (trueval, XOR, 1,
optimize_insn_for_speed_p ()) == 0)
{
tem = emit_store_flag_1 (subtarget, rcode, op0, op1, mode, 0,