static rtx expand_smod_pow2 (enum machine_mode, rtx, HOST_WIDE_INT);
static rtx expand_sdiv_pow2 (enum machine_mode, rtx, HOST_WIDE_INT);
+/* Test whether a value is zero of a power of two. */
+#define EXACT_POWER_OF_2_OR_ZERO_P(x) (((x) & ((x) - 1)) == 0)
+
/* Nonzero means divides or modulus operations are relatively cheap for
powers of two, so don't use branches; emit the operation instead.
Usually, this will mean that the MD file will emit non-branch
unsigned HOST_WIDE_INT bitpos = bitnum % unit;
rtx op0 = str_rtx;
int byte_offset;
+ rtx orig_value;
enum machine_mode op_mode = mode_for_extraction (EP_insv, 3);
{
if (GET_MODE (op0) != fieldmode)
{
- if (GET_CODE (op0) == SUBREG)
- {
- /* Else we've got some float mode source being extracted
- into a different float mode destination -- this
- combination of subregs results in Severe Tire
- Damage. */
- gcc_assert (GET_MODE (SUBREG_REG (op0)) == fieldmode
- || GET_MODE_CLASS (fieldmode) == MODE_INT
- || GET_MODE_CLASS (fieldmode) == MODE_PARTIAL_INT);
- op0 = SUBREG_REG (op0);
- }
- if (REG_P (op0))
- op0 = gen_rtx_SUBREG (fieldmode, op0, byte_offset);
- else
+ if (MEM_P (op0))
op0 = adjust_address (op0, fieldmode, offset);
+ else
+ op0 = simplify_gen_subreg (fieldmode, op0, GET_MODE (op0),
+ byte_offset);
}
emit_move_insn (op0, value);
return value;
offset = 0;
}
- /* If VALUE is a floating-point mode, access it as an integer of the
- corresponding size. This can occur on a machine with 64 bit registers
- that uses SFmode for float. This can also occur for unaligned float
- structure fields. */
- if (GET_MODE_CLASS (GET_MODE (value)) != MODE_INT
+ /* If VALUE has a floating-point or complex mode, access it as an
+ integer of the corresponding size. This can occur on a machine
+ with 64 bit registers that uses SFmode for float. It can also
+ occur for unaligned float or complex fields. */
+ orig_value = value;
+ if (GET_MODE (value) != VOIDmode
+ && GET_MODE_CLASS (GET_MODE (value)) != MODE_INT
&& GET_MODE_CLASS (GET_MODE (value)) != MODE_PARTIAL_INT)
- value = gen_lowpart ((GET_MODE (value) == VOIDmode
- ? word_mode : int_mode_for_mode (GET_MODE (value))),
- value);
+ {
+ value = gen_reg_rtx (int_mode_for_mode (GET_MODE (value)));
+ emit_move_insn (gen_lowpart (GET_MODE (orig_value), value), orig_value);
+ }
/* Now OFFSET is nonzero only if OP0 is memory
and is therefore always measured in bytes. */
/* Fetch that unit, store the bitfield in it, then store
the unit. */
tempreg = copy_to_reg (op0);
- store_bit_field (tempreg, bitsize, bitpos, fieldmode, value);
+ store_bit_field (tempreg, bitsize, bitpos, fieldmode, orig_value);
emit_move_insn (op0, tempreg);
return value;
}
enum machine_mode imode = int_mode_for_mode (GET_MODE (op0));
if (imode != GET_MODE (op0))
{
- if (MEM_P (op0))
- op0 = adjust_address (op0, imode, 0);
- else
- {
- gcc_assert (imode != BLKmode);
- op0 = gen_lowpart (imode, op0);
- }
+ op0 = gen_lowpart (imode, op0);
+
+ /* If we got a SUBREG, force it into a register since we aren't going
+ to be able to do another SUBREG on it. */
+ if (GET_CODE (op0) == SUBREG)
+ op0 = force_reg (imode, op0);
}
}
{
if (mode1 != GET_MODE (op0))
{
- if (GET_CODE (op0) == SUBREG)
+ if (MEM_P (op0))
+ op0 = adjust_address (op0, mode1, offset);
+ else
{
- if (GET_MODE (SUBREG_REG (op0)) == mode1
- || GET_MODE_CLASS (mode1) == MODE_INT
- || GET_MODE_CLASS (mode1) == MODE_PARTIAL_INT)
- op0 = SUBREG_REG (op0);
- else
- /* Else we've got some float mode source being extracted into
- a different float mode destination -- this combination of
- subregs results in Severe Tire Damage. */
+ rtx sub = simplify_gen_subreg (mode1, op0, GET_MODE (op0),
+ byte_offset);
+ if (sub == NULL)
goto no_subreg_mode_swap;
+ op0 = sub;
}
- if (REG_P (op0))
- op0 = gen_rtx_SUBREG (mode1, op0, byte_offset);
- else
- op0 = adjust_address (op0, mode1, offset);
}
if (mode1 != mode)
return convert_to_mode (tmode, op0, unsignedp);
return spec_target;
if (GET_MODE (target) != tmode && GET_MODE (target) != mode)
{
- /* If the target mode is floating-point, first convert to the
+ /* If the target mode is not a scalar integral, first convert to the
integer mode of that size and then access it as a floating-point
value via a SUBREG. */
- if (GET_MODE_CLASS (tmode) != MODE_INT
- && GET_MODE_CLASS (tmode) != MODE_PARTIAL_INT)
+ if (!SCALAR_INT_MODE_P (tmode))
{
- target = convert_to_mode (mode_for_size (GET_MODE_BITSIZE (tmode),
- MODE_INT, 0),
- target, unsignedp);
+ enum machine_mode smode
+ = mode_for_size (GET_MODE_BITSIZE (tmode), MODE_INT, 0);
+ target = convert_to_mode (smode, target, unsignedp);
+ target = force_reg (smode, target);
return gen_lowpart (tmode, target);
}
- else
- return convert_to_mode (tmode, target, unsignedp);
+
+ return convert_to_mode (tmode, target, unsignedp);
}
return target;
}
return immed_double_const (low, high, mode);
}
\f
+/* Extract a bit field from a memory by forcing the alignment of the
+ memory. This efficient only if the field spans at least 4 boundaries.
+
+ OP0 is the MEM.
+ BITSIZE is the field width; BITPOS is the position of the first bit.
+ UNSIGNEDP is true if the result should be zero-extended. */
+
+static rtx
+extract_force_align_mem_bit_field (rtx op0, unsigned HOST_WIDE_INT bitsize,
+ unsigned HOST_WIDE_INT bitpos,
+ int unsignedp)
+{
+ enum machine_mode mode, dmode;
+ unsigned int m_bitsize, m_size;
+ unsigned int sign_shift_up, sign_shift_dn;
+ rtx base, a1, a2, v1, v2, comb, shift, result, start;
+
+ /* Choose a mode that will fit BITSIZE. */
+ mode = smallest_mode_for_size (bitsize, MODE_INT);
+ m_size = GET_MODE_SIZE (mode);
+ m_bitsize = GET_MODE_BITSIZE (mode);
+
+ /* Choose a mode twice as wide. Fail if no such mode exists. */
+ dmode = mode_for_size (m_bitsize * 2, MODE_INT, false);
+ if (dmode == BLKmode)
+ return NULL;
+
+ do_pending_stack_adjust ();
+ start = get_last_insn ();
+
+ /* At the end, we'll need an additional shift to deal with sign/zero
+ extension. By default this will be a left+right shift of the
+ appropriate size. But we may be able to eliminate one of them. */
+ sign_shift_up = sign_shift_dn = m_bitsize - bitsize;
+
+ if (STRICT_ALIGNMENT)
+ {
+ base = plus_constant (XEXP (op0, 0), bitpos / BITS_PER_UNIT);
+ bitpos %= BITS_PER_UNIT;
+
+ /* We load two values to be concatenate. There's an edge condition
+ that bears notice -- an aligned value at the end of a page can
+ only load one value lest we segfault. So the two values we load
+ are at "base & -size" and "(base + size - 1) & -size". If base
+ is unaligned, the addresses will be aligned and sequential; if
+ base is aligned, the addresses will both be equal to base. */
+
+ a1 = expand_simple_binop (Pmode, AND, force_operand (base, NULL),
+ GEN_INT (-(HOST_WIDE_INT)m_size),
+ NULL, true, OPTAB_LIB_WIDEN);
+ mark_reg_pointer (a1, m_bitsize);
+ v1 = gen_rtx_MEM (mode, a1);
+ set_mem_align (v1, m_bitsize);
+ v1 = force_reg (mode, validize_mem (v1));
+
+ a2 = plus_constant (base, GET_MODE_SIZE (mode) - 1);
+ a2 = expand_simple_binop (Pmode, AND, force_operand (a2, NULL),
+ GEN_INT (-(HOST_WIDE_INT)m_size),
+ NULL, true, OPTAB_LIB_WIDEN);
+ v2 = gen_rtx_MEM (mode, a2);
+ set_mem_align (v2, m_bitsize);
+ v2 = force_reg (mode, validize_mem (v2));
+
+ /* Combine these two values into a double-word value. */
+ if (m_bitsize == BITS_PER_WORD)
+ {
+ comb = gen_reg_rtx (dmode);
+ emit_insn (gen_rtx_CLOBBER (VOIDmode, comb));
+ emit_move_insn (gen_rtx_SUBREG (mode, comb, 0), v1);
+ emit_move_insn (gen_rtx_SUBREG (mode, comb, m_size), v2);
+ }
+ else
+ {
+ if (BYTES_BIG_ENDIAN)
+ comb = v1, v1 = v2, v2 = comb;
+ v1 = convert_modes (dmode, mode, v1, true);
+ if (v1 == NULL)
+ goto fail;
+ v2 = convert_modes (dmode, mode, v2, true);
+ v2 = expand_simple_binop (dmode, ASHIFT, v2, GEN_INT (m_bitsize),
+ NULL, true, OPTAB_LIB_WIDEN);
+ if (v2 == NULL)
+ goto fail;
+ comb = expand_simple_binop (dmode, IOR, v1, v2, NULL,
+ true, OPTAB_LIB_WIDEN);
+ if (comb == NULL)
+ goto fail;
+ }
+
+ shift = expand_simple_binop (Pmode, AND, base, GEN_INT (m_size - 1),
+ NULL, true, OPTAB_LIB_WIDEN);
+ shift = expand_mult (Pmode, shift, GEN_INT (BITS_PER_UNIT), NULL, 1);
+
+ if (bitpos != 0)
+ {
+ if (sign_shift_up <= bitpos)
+ bitpos -= sign_shift_up, sign_shift_up = 0;
+ shift = expand_simple_binop (Pmode, PLUS, shift, GEN_INT (bitpos),
+ NULL, true, OPTAB_LIB_WIDEN);
+ }
+ }
+ else
+ {
+ unsigned HOST_WIDE_INT offset = bitpos / BITS_PER_UNIT;
+ bitpos %= BITS_PER_UNIT;
+
+ /* When strict alignment is not required, we can just load directly
+ from memory without masking. If the remaining BITPOS offset is
+ small enough, we may be able to do all operations in MODE as
+ opposed to DMODE. */
+ if (bitpos + bitsize <= m_bitsize)
+ dmode = mode;
+ comb = adjust_address (op0, dmode, offset);
+
+ if (sign_shift_up <= bitpos)
+ bitpos -= sign_shift_up, sign_shift_up = 0;
+ shift = GEN_INT (bitpos);
+ }
+
+ /* Shift down the double-word such that the requested value is at bit 0. */
+ if (shift != const0_rtx)
+ comb = expand_simple_binop (dmode, unsignedp ? LSHIFTRT : ASHIFTRT,
+ comb, shift, NULL, unsignedp, OPTAB_LIB_WIDEN);
+ if (comb == NULL)
+ goto fail;
+
+ /* If the field exactly matches MODE, then all we need to do is return the
+ lowpart. Otherwise, shift to get the sign bits set properly. */
+ result = force_reg (mode, gen_lowpart (mode, comb));
+
+ if (sign_shift_up)
+ result = expand_simple_binop (mode, ASHIFT, result,
+ GEN_INT (sign_shift_up),
+ NULL_RTX, 0, OPTAB_LIB_WIDEN);
+ if (sign_shift_dn)
+ result = expand_simple_binop (mode, unsignedp ? LSHIFTRT : ASHIFTRT,
+ result, GEN_INT (sign_shift_dn),
+ NULL_RTX, 0, OPTAB_LIB_WIDEN);
+
+ return result;
+
+ fail:
+ delete_insns_since (start);
+ return NULL;
+}
+
/* Extract a bit field that is split across two words
and return an RTX for the result.
if (REG_P (op0) || GET_CODE (op0) == SUBREG)
unit = BITS_PER_WORD;
else
- unit = MIN (MEM_ALIGN (op0), BITS_PER_WORD);
+ {
+ unit = MIN (MEM_ALIGN (op0), BITS_PER_WORD);
+ if (0 && bitsize / unit > 2)
+ {
+ rtx tmp = extract_force_align_mem_bit_field (op0, bitsize, bitpos,
+ unsignedp);
+ if (tmp)
+ return tmp;
+ }
+ }
while (bitsdone < bitsize)
{
return temp;
}
\f
-enum alg_code { alg_zero, alg_m, alg_shift,
+enum alg_code { alg_unknown, alg_zero, alg_m, alg_shift,
alg_add_t_m2, alg_sub_t_m2,
alg_add_factor, alg_sub_factor,
alg_add_t2_m, alg_sub_t2_m };
char log[MAX_BITS_PER_WORD];
};
+/* The entry for our multiplication cache/hash table. */
+struct alg_hash_entry {
+ /* The number we are multiplying by. */
+ unsigned int t;
+
+ /* The mode in which we are multiplying something by T. */
+ enum machine_mode mode;
+
+ /* The best multiplication algorithm for t. */
+ enum alg_code alg;
+};
+
+/* The number of cache/hash entries. */
+#define NUM_ALG_HASH_ENTRIES 307
+
+/* Each entry of ALG_HASH caches alg_code for some integer. This is
+ actually a hash table. If we have a collision, that the older
+ entry is kicked out. */
+static struct alg_hash_entry alg_hash[NUM_ALG_HASH_ENTRIES];
+
/* Indicates the type of fixup needed after a constant multiplication.
BASIC_VARIANT means no fixup is needed, NEGATE_VARIANT means that
the result should be negated, and ADD_VARIANT means that the
int op_cost, op_latency;
unsigned HOST_WIDE_INT q;
int maxm = MIN (BITS_PER_WORD, GET_MODE_BITSIZE (mode));
+ int hash_index;
+ bool cache_hit = false;
+ enum alg_code cache_alg = alg_zero;
/* Indicate that no algorithm is yet found. If no algorithm
is found, this value will be returned and indicate failure. */
alg_out->cost.cost = cost_limit->cost + 1;
+ alg_out->cost.latency = cost_limit->latency + 1;
if (cost_limit->cost < 0
|| (cost_limit->cost == 0 && cost_limit->latency <= 0))
best_alg = alloca (sizeof (struct algorithm));
best_cost = *cost_limit;
+ /* Compute the hash index. */
+ hash_index = (t ^ (unsigned int) mode) % NUM_ALG_HASH_ENTRIES;
+
+ /* See if we already know what to do for T. */
+ if (alg_hash[hash_index].t == t
+ && alg_hash[hash_index].mode == mode
+ && alg_hash[hash_index].alg != alg_unknown)
+ {
+ cache_hit = true;
+ cache_alg = alg_hash[hash_index].alg;
+ switch (cache_alg)
+ {
+ case alg_shift:
+ goto do_alg_shift;
+
+ case alg_add_t_m2:
+ case alg_sub_t_m2:
+ goto do_alg_addsub_t_m2;
+
+ case alg_add_factor:
+ case alg_sub_factor:
+ goto do_alg_addsub_factor;
+
+ case alg_add_t2_m:
+ goto do_alg_add_t2_m;
+
+ case alg_sub_t2_m:
+ goto do_alg_sub_t2_m;
+
+ default:
+ gcc_unreachable ();
+ }
+ }
+
/* If we have a group of zero bits at the low-order part of T, try
multiplying by the remaining bits and then doing a shift. */
if ((t & 1) == 0)
{
+ do_alg_shift:
m = floor_log2 (t & -t); /* m = number of low zero bits */
if (m < maxm)
{
best_alg->op[best_alg->ops] = alg_shift;
}
}
+ if (cache_hit)
+ goto done;
}
/* If we have an odd number, add or subtract one. */
{
unsigned HOST_WIDE_INT w;
+ do_alg_addsub_t_m2:
for (w = 1; (w & t) != 0; w <<= 1)
;
/* If T was -1, then W will be zero after the loop. This is another
best_alg->op[best_alg->ops] = alg_add_t_m2;
}
}
+ if (cache_hit)
+ goto done;
}
/* Look for factors of t of the form
good sequence quickly, and therefore be able to prune (by decreasing
COST_LIMIT) the search. */
+ do_alg_addsub_factor:
for (m = floor_log2 (t - 1); m >= 2; m--)
{
unsigned HOST_WIDE_INT d;
d = ((unsigned HOST_WIDE_INT) 1 << m) + 1;
- if (t % d == 0 && t > d && m < maxm)
+ if (t % d == 0 && t > d && m < maxm
+ && (!cache_hit || cache_alg == alg_add_factor))
{
/* If the target has a cheap shift-and-add instruction use
that in preference to a shift insn followed by an add insn.
Assume that the shift-and-add is "atomic" with a latency
- equal to it's cost, otherwise assume that on superscalar
+ equal to its cost, otherwise assume that on superscalar
hardware the shift may be executed concurrently with the
earlier steps in the algorithm. */
op_cost = add_cost[mode] + shift_cost[mode][m];
}
d = ((unsigned HOST_WIDE_INT) 1 << m) - 1;
- if (t % d == 0 && t > d && m < maxm)
+ if (t % d == 0 && t > d && m < maxm
+ && (!cache_hit || cache_alg == alg_sub_factor))
{
/* If the target has a cheap shift-and-subtract insn use
that in preference to a shift insn followed by a sub insn.
op_latency = add_cost[mode];
new_limit.cost = best_cost.cost - op_cost;
- new_limit.cost = best_cost.cost - op_latency;
+ new_limit.latency = best_cost.latency - op_latency;
synth_mult (alg_in, t / d, &new_limit, mode);
alg_in->cost.cost += op_cost;
break;
}
}
+ if (cache_hit)
+ goto done;
/* Try shift-and-add (load effective address) instructions,
i.e. do a*3, a*5, a*9. */
if ((t & 1) != 0)
{
+ do_alg_add_t2_m:
q = t - 1;
q = q & -q;
m = exact_log2 (q);
best_alg->op[best_alg->ops] = alg_add_t2_m;
}
}
+ if (cache_hit)
+ goto done;
+ do_alg_sub_t2_m:
q = t + 1;
q = q & -q;
m = exact_log2 (q);
best_alg->op[best_alg->ops] = alg_sub_t2_m;
}
}
+ if (cache_hit)
+ goto done;
+ }
+
+ done:
+ /* If best_cost has not decreased, we have not found any algorithm. */
+ if (!CHEAPER_MULT_COST (&best_cost, cost_limit))
+ return;
+
+ /* Cache the result. */
+ if (!cache_hit)
+ {
+ alg_hash[hash_index].t = t;
+ alg_hash[hash_index].mode = mode;
+ alg_hash[hash_index].alg = best_alg->op[best_alg->ops];
}
/* If we are getting a too long sequence for `struct algorithm'
if (best_alg->ops == MAX_BITS_PER_WORD)
return;
- /* If best_cost has not decreased, we have not found any algorithm. */
- if (!CHEAPER_MULT_COST (&best_cost, cost_limit))
- return;
-
/* Copy the algorithm from temporary space to the space at alg_out.
We avoid using structure assignment because the majority of
best_alg is normally undefined, and this is a critical function. */
if (const_op1 && GET_CODE (const_op1) == CONST_INT
&& (unsignedp || !flag_trapv))
{
- int mult_cost = rtx_cost (gen_rtx_MULT (mode, op0, op1), SET);
+ HOST_WIDE_INT coeff = INTVAL (const_op1);
+ int mult_cost;
- if (choose_mult_variant (mode, INTVAL (const_op1), &algorithm, &variant,
+ /* Special case powers of two. */
+ if (EXACT_POWER_OF_2_OR_ZERO_P (coeff))
+ {
+ if (coeff == 0)
+ return const0_rtx;
+ if (coeff == 1)
+ return op0;
+ return expand_shift (LSHIFT_EXPR, mode, op0,
+ build_int_cst (NULL_TREE, floor_log2 (coeff)),
+ target, unsignedp);
+ }
+
+ mult_cost = rtx_cost (gen_rtx_MULT (mode, op0, op1), SET);
+ if (choose_mult_variant (mode, coeff, &algorithm, &variant,
mult_cost))
- return expand_mult_const (mode, op0, INTVAL (const_op1), target,
+ return expand_mult_const (mode, op0, coeff, target,
&algorithm, variant);
}
static rtx
expand_smod_pow2 (enum machine_mode mode, rtx op0, HOST_WIDE_INT d)
{
- unsigned HOST_WIDE_INT mask;
+ unsigned HOST_WIDE_INT masklow, maskhigh;
rtx result, temp, shift, label;
int logd;
if (signmask)
{
signmask = force_reg (mode, signmask);
- mask = ((HOST_WIDE_INT) 1 << logd) - 1;
+ masklow = ((HOST_WIDE_INT) 1 << logd) - 1;
shift = GEN_INT (GET_MODE_BITSIZE (mode) - logd);
/* Use the rtx_cost of a LSHIFTRT instruction to determine
which instruction sequence to use. If logical right shifts
are expensive the use 2 XORs, 2 SUBs and an AND, otherwise
use a LSHIFTRT, 1 ADD, 1 SUB and an AND. */
-
+
temp = gen_rtx_LSHIFTRT (mode, result, shift);
if (lshr_optab->handlers[mode].insn_code == CODE_FOR_nothing
|| rtx_cost (temp, SET) > COSTS_N_INSNS (2))
NULL_RTX, 1, OPTAB_LIB_WIDEN);
temp = expand_binop (mode, sub_optab, temp, signmask,
NULL_RTX, 1, OPTAB_LIB_WIDEN);
- temp = expand_binop (mode, and_optab, temp, GEN_INT (mask),
+ temp = expand_binop (mode, and_optab, temp, GEN_INT (masklow),
NULL_RTX, 1, OPTAB_LIB_WIDEN);
temp = expand_binop (mode, xor_optab, temp, signmask,
NULL_RTX, 1, OPTAB_LIB_WIDEN);
temp = expand_binop (mode, add_optab, op0, signmask,
NULL_RTX, 1, OPTAB_LIB_WIDEN);
- temp = expand_binop (mode, and_optab, temp, GEN_INT (mask),
+ temp = expand_binop (mode, and_optab, temp, GEN_INT (masklow),
NULL_RTX, 1, OPTAB_LIB_WIDEN);
temp = expand_binop (mode, sub_optab, temp, signmask,
NULL_RTX, 1, OPTAB_LIB_WIDEN);
can avoid an explicit compare operation in the following comparison
against zero. */
- mask = (HOST_WIDE_INT) -1 << (GET_MODE_BITSIZE (mode) - 1)
- | (((HOST_WIDE_INT) 1 << logd) - 1);
+ masklow = ((HOST_WIDE_INT) 1 << logd) - 1;
+ if (GET_MODE_BITSIZE (mode) <= HOST_BITS_PER_WIDE_INT)
+ {
+ masklow |= (HOST_WIDE_INT) -1 << (GET_MODE_BITSIZE (mode) - 1);
+ maskhigh = -1;
+ }
+ else
+ maskhigh = (HOST_WIDE_INT) -1
+ << (GET_MODE_BITSIZE (mode) - HOST_BITS_PER_WIDE_INT - 1);
- temp = expand_binop (mode, and_optab, op0, GEN_INT (mask), result,
- 1, OPTAB_LIB_WIDEN);
+ temp = expand_binop (mode, and_optab, op0,
+ immed_double_const (masklow, maskhigh, mode),
+ result, 1, OPTAB_LIB_WIDEN);
if (temp != result)
emit_move_insn (result, temp);
temp = expand_binop (mode, sub_optab, result, const1_rtx, result,
0, OPTAB_LIB_WIDEN);
- mask = (HOST_WIDE_INT) -1 << logd;
- temp = expand_binop (mode, ior_optab, temp, GEN_INT (mask), result,
- 1, OPTAB_LIB_WIDEN);
+ masklow = (HOST_WIDE_INT) -1 << logd;
+ maskhigh = -1;
+ temp = expand_binop (mode, ior_optab, temp,
+ immed_double_const (masklow, maskhigh, mode),
+ result, 1, OPTAB_LIB_WIDEN);
temp = expand_binop (mode, add_optab, temp, const1_rtx, result,
0, OPTAB_LIB_WIDEN);
if (temp != result)
{
rtx temp2;
+ /* ??? emit_conditional_move forces a stack adjustment via
+ compare_from_rtx so, if the sequence is discarded, it will
+ be lost. Do it now instead. */
+ do_pending_stack_adjust ();
+
start_sequence ();
temp2 = copy_to_mode_reg (mode, op0);
temp = expand_binop (mode, add_optab, temp2, GEN_INT (d-1),
(x mod 12) == (((x & 1023) + ((x >> 8) & ~3)) * 0x15555558 >> 2 * 3) >> 28
*/
-#define EXACT_POWER_OF_2_OR_ZERO_P(x) (((x) & ((x) - 1)) == 0)
-
rtx
expand_divmod (int rem_flag, enum tree_code code, enum machine_mode mode,
rtx op0, rtx op1, rtx target, int unsignedp)
if (remainder)
return gen_lowpart (mode, remainder);
}
- quotient = expand_sdiv_pow2 (compute_mode, op0, abs_d);
+
+ if (sdiv_pow2_cheap[compute_mode]
+ && ((sdiv_optab->handlers[compute_mode].insn_code
+ != CODE_FOR_nothing)
+ || (sdivmod_optab->handlers[compute_mode].insn_code
+ != CODE_FOR_nothing)))
+ quotient = expand_divmod (0, TRUNC_DIV_EXPR,
+ compute_mode, op0,
+ gen_int_mode (abs_d,
+ compute_mode),
+ NULL_RTX, 0);
+ else
+ quotient = expand_sdiv_pow2 (compute_mode, op0, abs_d);
/* We have computed OP0 / abs(OP1). If OP1 is negative,
negate the quotient. */