/* Expand the basic unary and binary arithmetic operations, for GNU compiler.
Copyright (C) 1987, 1988, 1992, 1993, 1994, 1995, 1996, 1997, 1998,
- 1999, 2000, 2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2009
+ 1999, 2000, 2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2009, 2010
Free Software Foundation, Inc.
This file is part of GCC.
See expr.h for documentation of these optabs. */
-#if GCC_VERSION >= 4000
-__extension__ struct optab optab_table[OTI_MAX]
+#if GCC_VERSION >= 4000 && HAVE_DESIGNATED_INITIALIZERS
+__extension__ struct optab_d optab_table[OTI_MAX]
= { [0 ... OTI_MAX - 1].handlers[0 ... NUM_MACHINE_MODES - 1].insn_code
= CODE_FOR_nothing };
#else
/* init_insn_codes will do runtime initialization otherwise. */
-struct optab optab_table[OTI_MAX];
+struct optab_d optab_table[OTI_MAX];
#endif
rtx libfunc_table[LTI_MAX];
/* Tables of patterns for converting one mode to another. */
-#if GCC_VERSION >= 4000
-__extension__ struct convert_optab convert_optab_table[COI_MAX]
+#if GCC_VERSION >= 4000 && HAVE_DESIGNATED_INITIALIZERS
+__extension__ struct convert_optab_d convert_optab_table[COI_MAX]
= { [0 ... COI_MAX - 1].handlers[0 ... NUM_MACHINE_MODES - 1]
[0 ... NUM_MACHINE_MODES - 1].insn_code
= CODE_FOR_nothing };
#else
/* init_convert_optab will do runtime initialization otherwise. */
-struct convert_optab convert_optab_table[COI_MAX];
+struct convert_optab_d convert_optab_table[COI_MAX];
#endif
/* Contains the optab used for each rtx code. */
optab code_to_optab[NUM_RTX_CODE + 1];
-/* Indexed by the rtx-code for a conditional (eg. EQ, LT,...)
- gives the gen_function to make a branch to test that condition. */
-
-rtxfun bcc_gen_fctn[NUM_RTX_CODE];
-
-/* Indexed by the rtx-code for a conditional (eg. EQ, LT,...)
- gives the insn code to make a store-condition insn
- to test that condition. */
-
-enum insn_code setcc_gen_code[NUM_RTX_CODE];
-
#ifdef HAVE_conditional_move
/* Indexed by the machine mode, gives the insn code to make a conditional
move insn. This is not indexed by the rtx-code like bcc_gen_fctn and
enum insn_code vcond_gen_code[NUM_MACHINE_MODES];
enum insn_code vcondu_gen_code[NUM_MACHINE_MODES];
-/* The insn generating function can not take an rtx_code argument.
- TRAP_RTX is used as an rtx argument. Its code is replaced with
- the code to be used in the trap insn and all other fields are ignored. */
-static GTY(()) rtx trap_rtx;
-
-static void prepare_float_lib_cmp (rtx *, rtx *, enum rtx_code *,
- enum machine_mode *, int *);
+static void prepare_float_lib_cmp (rtx, rtx, enum rtx_code, rtx *,
+ enum machine_mode *);
static rtx expand_unop_direct (enum machine_mode, optab, rtx, rtx, int);
/* Debug facility for use in GDB. */
void debug_optab_libfuncs (void);
-#ifndef HAVE_conditional_trap
-#define HAVE_conditional_trap 0
-#define gen_conditional_trap(a,b) (gcc_unreachable (), NULL_RTX)
-#endif
-
/* Prefixes for the current version of decimal floating point (BID vs. DPD) */
#if ENABLE_DECIMAL_BID_FORMAT
#define DECIMAL_PREFIX "bid_"
return vec_shr_optab;
case VEC_WIDEN_MULT_HI_EXPR:
- return TYPE_UNSIGNED (type) ?
+ return TYPE_UNSIGNED (type) ?
vec_widen_umult_hi_optab : vec_widen_smult_hi_optab;
case VEC_WIDEN_MULT_LO_EXPR:
- return TYPE_UNSIGNED (type) ?
+ return TYPE_UNSIGNED (type) ?
vec_widen_umult_lo_optab : vec_widen_smult_lo_optab;
case VEC_UNPACK_HI_EXPR:
vec_unpacku_hi_optab : vec_unpacks_hi_optab;
case VEC_UNPACK_LO_EXPR:
- return TYPE_UNSIGNED (type) ?
+ return TYPE_UNSIGNED (type) ?
vec_unpacku_lo_optab : vec_unpacks_lo_optab;
case VEC_UNPACK_FLOAT_HI_EXPR:
case VEC_UNPACK_FLOAT_LO_EXPR:
/* The signedness is determined from input operand. */
- return TYPE_UNSIGNED (type) ?
+ return TYPE_UNSIGNED (type) ?
vec_unpacku_float_lo_optab : vec_unpacks_float_lo_optab;
case VEC_PACK_TRUNC_EXPR:
E.g, when called to expand the following operations, this is how
the arguments will be initialized:
nops OP0 OP1 WIDE_OP
- widening-sum 2 oprnd0 - oprnd1
+ widening-sum 2 oprnd0 - oprnd1
widening-dot-product 3 oprnd0 oprnd1 oprnd2
widening-mult 2 oprnd0 oprnd1 -
type-promotion (vec-unpack) 1 oprnd0 - - */
rtx
-expand_widen_pattern_expr (tree exp, rtx op0, rtx op1, rtx wide_op, rtx target,
- int unsignedp)
-{
+expand_widen_pattern_expr (sepops ops, rtx op0, rtx op1, rtx wide_op,
+ rtx target, int unsignedp)
+{
tree oprnd0, oprnd1, oprnd2;
- enum machine_mode wmode = 0, tmode0, tmode1 = 0;
+ enum machine_mode wmode = VOIDmode, tmode0, tmode1 = VOIDmode;
optab widen_pattern_optab;
- int icode;
- enum machine_mode xmode0, xmode1 = 0, wxmode = 0;
+ int icode;
+ enum machine_mode xmode0, xmode1 = VOIDmode, wxmode = VOIDmode;
rtx temp;
rtx pat;
rtx xop0, xop1, wxop;
- int nops = TREE_OPERAND_LENGTH (exp);
+ int nops = TREE_CODE_LENGTH (ops->code);
- oprnd0 = TREE_OPERAND (exp, 0);
+ oprnd0 = ops->op0;
tmode0 = TYPE_MODE (TREE_TYPE (oprnd0));
widen_pattern_optab =
- optab_for_tree_code (TREE_CODE (exp), TREE_TYPE (oprnd0), optab_default);
+ optab_for_tree_code (ops->code, TREE_TYPE (oprnd0), optab_default);
icode = (int) optab_handler (widen_pattern_optab, tmode0)->insn_code;
gcc_assert (icode != CODE_FOR_nothing);
xmode0 = insn_data[icode].operand[1].mode;
if (nops >= 2)
{
- oprnd1 = TREE_OPERAND (exp, 1);
+ oprnd1 = ops->op1;
tmode1 = TYPE_MODE (TREE_TYPE (oprnd1));
xmode1 = insn_data[icode].operand[2].mode;
}
{
gcc_assert (tmode1 == tmode0);
gcc_assert (op1);
- oprnd2 = TREE_OPERAND (exp, 2);
+ oprnd2 = ops->op2;
wmode = TYPE_MODE (TREE_TYPE (oprnd2));
wxmode = insn_data[icode].operand[3].mode;
}
/* Generate insns for VEC_LSHIFT_EXPR, VEC_RSHIFT_EXPR. */
rtx
-expand_vec_shift_expr (tree vec_shift_expr, rtx target)
+expand_vec_shift_expr (sepops ops, rtx target)
{
enum insn_code icode;
rtx rtx_op1, rtx_op2;
enum machine_mode mode1;
enum machine_mode mode2;
- enum machine_mode mode = TYPE_MODE (TREE_TYPE (vec_shift_expr));
- tree vec_oprnd = TREE_OPERAND (vec_shift_expr, 0);
- tree shift_oprnd = TREE_OPERAND (vec_shift_expr, 1);
+ enum machine_mode mode = TYPE_MODE (ops->type);
+ tree vec_oprnd = ops->op0;
+ tree shift_oprnd = ops->op1;
optab shift_optab;
rtx pat;
- switch (TREE_CODE (vec_shift_expr))
+ switch (ops->code)
{
case VEC_RSHIFT_EXPR:
shift_optab = vec_shr_optab;
/* If we can compute the condition at compile time, pick the
appropriate subroutine. */
tmp = simplify_relational_operation (cmp_code, SImode, op1_mode, cmp1, cmp2);
- if (tmp != 0 && GET_CODE (tmp) == CONST_INT)
+ if (tmp != 0 && CONST_INT_P (tmp))
{
if (tmp == const0_rtx)
return expand_superword_shift (binoptab, outof_input, superword_op1,
NO_DEFER_POP;
do_compare_rtx_and_jump (cmp1, cmp2, cmp_code, false, op1_mode,
- 0, 0, subword_label);
+ 0, 0, subword_label, -1);
OK_DEFER_POP;
if (!expand_superword_shift (binoptab, outof_input, superword_op1,
avoid_expensive_constant (enum machine_mode mode, optab binoptab,
rtx x, bool unsignedp)
{
+ bool speed = optimize_insn_for_speed_p ();
+
if (mode != VOIDmode
&& optimize
&& CONSTANT_P (x)
- && rtx_cost (x, binoptab->code, optimize_insn_for_speed_p ())
- > COSTS_N_INSNS (1))
+ && rtx_cost (x, binoptab->code, speed) > rtx_cost (x, SET, speed))
{
- if (GET_CODE (x) == CONST_INT)
+ if (CONST_INT_P (x))
{
HOST_WIDE_INT intval = trunc_int_for_mode (INTVAL (x), mode);
if (intval != INTVAL (x))
rtx xop0 = op0, xop1 = op1;
rtx temp;
rtx swap;
-
+
if (target)
temp = target;
else
xop0 = xop1;
xop1 = swap;
}
-
+
/* If we are optimizing, force expensive constants into a register. */
xop0 = avoid_expensive_constant (mode0, binoptab, xop0, unsignedp);
if (!shift_optab_p (binoptab))
seem that we don't need to convert CONST_INTs, but we do, so
that they're properly zero-extended, sign-extended or truncated
for their mode. */
-
+
if (GET_MODE (xop0) != mode0 && mode0 != VOIDmode)
xop0 = convert_modes (mode0,
GET_MODE (xop0) != VOIDmode
? GET_MODE (xop0)
: mode,
xop0, unsignedp);
-
+
if (GET_MODE (xop1) != mode1 && mode1 != VOIDmode)
xop1 = convert_modes (mode1,
GET_MODE (xop1) != VOIDmode
? GET_MODE (xop1)
: mode,
xop1, unsignedp);
-
+
/* If operation is commutative,
try to make the first operand a register.
Even better, try to make it the same as the target.
/* Now, if insn's predicates don't allow our operands, put them into
pseudo regs. */
-
+
if (!insn_data[icode].operand[1].predicate (xop0, mode0)
&& mode0 != VOIDmode)
xop0 = copy_to_mode_reg (mode0, xop0);
-
+
if (!insn_data[icode].operand[2].predicate (xop1, mode1)
&& mode1 != VOIDmode)
xop1 = copy_to_mode_reg (mode1, xop1);
-
- if (binoptab == vec_pack_trunc_optab
+
+ if (binoptab == vec_pack_trunc_optab
|| binoptab == vec_pack_usat_optab
|| binoptab == vec_pack_ssat_optab
|| binoptab == vec_pack_ufix_trunc_optab
if (!insn_data[icode].operand[0].predicate (temp, tmp_mode))
temp = gen_reg_rtx (tmp_mode);
-
+
pat = GEN_FCN (icode) (temp, xop0, xop1);
if (pat)
{
return expand_binop (mode, binoptab, op0, op1, NULL_RTX,
unsignedp, methods);
}
-
+
emit_insn (pat);
return temp;
}
/* If subtracting an integer constant, convert this into an addition of
the negated constant. */
- if (binoptab == sub_optab && GET_CODE (op1) == CONST_INT)
+ if (binoptab == sub_optab && CONST_INT_P (op1))
{
op1 = negate_rtx (mode, op1);
binoptab = add_optab;
rtx newop1;
unsigned int bits = GET_MODE_BITSIZE (mode);
- if (GET_CODE (op1) == CONST_INT)
- newop1 = GEN_INT (bits - INTVAL (op1));
+ if (CONST_INT_P (op1))
+ newop1 = GEN_INT (bits - INTVAL (op1));
else if (targetm.shift_truncation_mask (mode) == bits - 1)
- newop1 = negate_rtx (mode, op1);
+ newop1 = negate_rtx (GET_MODE (op1), op1);
else
- newop1 = expand_binop (mode, sub_optab,
+ newop1 = expand_binop (GET_MODE (op1), sub_optab,
GEN_INT (bits), op1,
NULL_RTX, unsignedp, OPTAB_DIRECT);
-
+
temp = expand_binop_directly (mode, otheroptab, op0, newop1,
target, unsignedp, methods, last);
if (temp)
{
int i;
rtx insns;
- rtx equiv_value;
/* If TARGET is the same as one of the operands, the REG_EQUAL note
won't be accurate, so use a new target. */
if (i == GET_MODE_BITSIZE (mode) / BITS_PER_WORD)
{
- if (binoptab->code != UNKNOWN)
- equiv_value
- = gen_rtx_fmt_ee (binoptab->code, mode,
- copy_rtx (op0), copy_rtx (op1));
- else
- equiv_value = 0;
-
emit_insn (insns);
return target;
}
if ((binoptab == lshr_optab || binoptab == ashl_optab
|| binoptab == ashr_optab)
&& mclass == MODE_INT
- && (GET_CODE (op1) == CONST_INT || optimize_insn_for_speed_p ())
+ && (CONST_INT_P (op1) || optimize_insn_for_speed_p ())
&& GET_MODE_SIZE (mode) == 2 * UNITS_PER_WORD
&& optab_handler (binoptab, word_mode)->insn_code != CODE_FOR_nothing
&& optab_handler (ashl_optab, word_mode)->insn_code != CODE_FOR_nothing
op1_mode = GET_MODE (op1) != VOIDmode ? GET_MODE (op1) : word_mode;
/* Apply the truncation to constant shifts. */
- if (double_shift_mask > 0 && GET_CODE (op1) == CONST_INT)
+ if (double_shift_mask > 0 && CONST_INT_P (op1))
op1 = GEN_INT (INTVAL (op1) & double_shift_mask);
if (op1 == CONST0_RTX (op1_mode))
/* Synthesize double word rotates from single word shifts. */
if ((binoptab == rotl_optab || binoptab == rotr_optab)
&& mclass == MODE_INT
- && GET_CODE (op1) == CONST_INT
+ && CONST_INT_P (op1)
&& GET_MODE_SIZE (mode) == 2 * UNITS_PER_WORD
&& optab_handler (ashl_optab, word_mode)->insn_code != CODE_FOR_nothing
&& optab_handler (lshr_optab, word_mode)->insn_code != CODE_FOR_nothing)
{
rtx temp;
optab direct_optab = unsignedp ? uoptab : soptab;
- struct optab wide_soptab;
+ struct optab_d wide_soptab;
/* Do it without widening, if possible. */
temp = expand_binop (mode, direct_optab, op0, op1, target,
if (temp || methods == OPTAB_WIDEN)
return temp;
- /* Use the right width lib call if that exists. */
+ /* Use the right width libcall if that exists. */
temp = expand_binop (mode, direct_optab, op0, op1, target, unsignedp, OPTAB_LIB);
if (temp || methods == OPTAB_LIB)
return temp;
- /* Must widen and use a lib call, use either signed or unsigned. */
+ /* Must widen and use a libcall, use either signed or unsigned. */
temp = expand_binop (mode, &wide_soptab, op0, op1, target,
unsignedp, methods);
if (temp != 0)
expand_ctz (enum machine_mode mode, rtx op0, rtx target)
{
rtx seq, temp;
-
+
if (optab_handler (clz_optab, mode)->insn_code == CODE_FOR_nothing)
return 0;
-
+
start_sequence ();
temp = expand_unop_direct (mode, neg_optab, op0, NULL_RTX, true);
/* Try calculating ffs(x) using ctz(x) if we have that instruction, or
else with the sequence used by expand_clz.
-
+
The ffs builtin promises to return zero for a zero value and ctz/clz
may have an undefined value in that case. If they do not give us a
convenient value, we have to generate a test and branch. */
if (defined_at_zero && val == -1)
/* No correction needed at zero. */;
- else
+ else
{
/* We don't try to do anything clever with the situation found
on some processors (eg Alpha) where ctz(0:mode) ==
if (unoptab == ffs_optab || unoptab == clz_optab || unoptab == ctz_optab
|| unoptab == popcount_optab || unoptab == parity_optab)
outmode
- = GET_MODE (hard_libcall_value (TYPE_MODE (integer_type_node)));
+ = GET_MODE (hard_libcall_value (TYPE_MODE (integer_type_node),
+ optab_libfunc (unoptab, mode)));
start_sequence ();
NO_DEFER_POP;
do_compare_rtx_and_jump (target, CONST0_RTX (mode), GE, 0, mode,
- NULL_RTX, NULL_RTX, op1);
+ NULL_RTX, NULL_RTX, op1, -1);
op0 = expand_unop (mode, result_unsignedp ? neg_optab : negv_optab,
target, target, 0);
return target;
}
+/* Emit code to compute the one's complement absolute value of OP0
+ (if (OP0 < 0) OP0 = ~OP0), with result to TARGET if convenient.
+ (TARGET may be NULL_RTX.) The return value says where the result
+ actually is to be found.
+
+ MODE is the mode of the operand; the mode of the result is
+ different but can be deduced from MODE. */
+
+rtx
+expand_one_cmpl_abs_nojump (enum machine_mode mode, rtx op0, rtx target)
+{
+ rtx temp;
+
+ /* Not applicable for floating point modes. */
+ if (FLOAT_MODE_P (mode))
+ return NULL_RTX;
+
+ /* If we have a MAX insn, we can do this as MAX (x, ~x). */
+ if (optab_handler (smax_optab, mode)->insn_code != CODE_FOR_nothing)
+ {
+ rtx last = get_last_insn ();
+
+ temp = expand_unop (mode, one_cmpl_optab, op0, NULL_RTX, 0);
+ if (temp != 0)
+ temp = expand_binop (mode, smax_optab, op0, temp, target, 0,
+ OPTAB_WIDEN);
+
+ if (temp != 0)
+ return temp;
+
+ delete_insns_since (last);
+ }
+
+ /* If this machine has expensive jumps, we can do one's complement
+ absolute value of X as (((signed) x >> (W-1)) ^ x). */
+
+ if (GET_MODE_CLASS (mode) == MODE_INT
+ && BRANCH_COST (optimize_insn_for_speed_p (),
+ false) >= 2)
+ {
+ rtx extended = expand_shift (RSHIFT_EXPR, mode, op0,
+ size_int (GET_MODE_BITSIZE (mode) - 1),
+ NULL_RTX, 0);
+
+ temp = expand_binop (mode, xor_optab, extended, op0, target, 0,
+ OPTAB_LIB_WIDEN);
+
+ if (temp != 0)
+ return temp;
+ }
+
+ return NULL_RTX;
+}
+
/* A subroutine of expand_copysign, perform the copysign operation using the
abs and neg primitives advertised to exist on the target. The assumption
is that we have a split register file, and leaving op0 in fp registers,
with two operands: an output TARGET and an input OP0.
TARGET *must* be nonzero, and the output is always stored there.
CODE is an rtx code such that (CODE OP0) is an rtx that describes
- the value that is stored into TARGET.
+ the value that is stored into TARGET.
Return false if expansion failed. */
emit_libcall_block (rtx insns, rtx target, rtx result, rtx equiv)
{
rtx final_dest = target;
- rtx prev, next, last, insn;
+ rtx next, last, insn;
/* If this is a reg with REG_USERVAR_P set, then it could possibly turn
into a MEM later. Protect the libcall block from this change. */
/* If we're using non-call exceptions, a libcall corresponding to an
operation that may trap may also trap. */
+ /* ??? See the comment in front of make_reg_eh_region_note. */
if (flag_non_call_exceptions && may_trap_p (equiv))
{
for (insn = insns; insn; insn = NEXT_INSN (insn))
if (CALL_P (insn))
{
rtx note = find_reg_note (insn, REG_EH_REGION, NULL_RTX);
-
- if (note != 0 && INTVAL (XEXP (note, 0)) <= 0)
- remove_note (insn, note);
+ if (note)
+ {
+ int lp_nr = INTVAL (XEXP (note, 0));
+ if (lp_nr == 0 || lp_nr == INT_MIN)
+ remove_note (insn, note);
+ }
}
}
else
- /* look for any CALL_INSNs in this sequence, and attach a REG_EH_REGION
- reg note to indicate that this call cannot throw or execute a nonlocal
- goto (unless there is already a REG_EH_REGION note, in which case
- we update it). */
- for (insn = insns; insn; insn = NEXT_INSN (insn))
- if (CALL_P (insn))
- {
- rtx note = find_reg_note (insn, REG_EH_REGION, NULL_RTX);
-
- if (note != 0)
- XEXP (note, 0) = constm1_rtx;
- else
- add_reg_note (insn, REG_EH_REGION, constm1_rtx);
- }
+ {
+ /* Look for any CALL_INSNs in this sequence, and attach a REG_EH_REGION
+ reg note to indicate that this call cannot throw or execute a nonlocal
+ goto (unless there is already a REG_EH_REGION note, in which case
+ we update it). */
+ for (insn = insns; insn; insn = NEXT_INSN (insn))
+ if (CALL_P (insn))
+ make_reg_eh_region_note_nothrow_nononlocal (insn);
+ }
/* First emit all insns that set pseudos. Remove them from the list as
we go. Avoid insns that set pseudos which were referenced in previous
break;
}
- prev = get_last_insn ();
-
/* Write the remaining insns followed by the final copy. */
-
for (insn = insns; insn; insn = next)
{
next = NEXT_INSN (insn);
{
int icode;
- if (optab_handler (cmp_optab, mode)->insn_code != CODE_FOR_nothing)
- {
- if (purpose == ccp_jump)
- return bcc_gen_fctn[(int) code] != NULL;
- else if (purpose == ccp_store_flag)
- return setcc_gen_code[(int) code] != CODE_FOR_nothing;
- else
- /* There's only one cmov entry point, and it's allowed to fail. */
- return 1;
- }
if (purpose == ccp_jump
&& (icode = optab_handler (cbranch_optab, mode)->insn_code) != CODE_FOR_nothing
&& insn_data[icode].operand[0].predicate (test, mode))
*PMODE is the mode of the inputs (in case they are const_int).
*PUNSIGNEDP nonzero says that the operands are unsigned;
- this matters if they need to be widened.
+ this matters if they need to be widened (as given by METHODS).
If they have mode BLKmode, then SIZE specifies the size of both operands.
comparisons must have already been folded. */
static void
-prepare_cmp_insn (rtx *px, rtx *py, enum rtx_code *pcomparison, rtx size,
- enum machine_mode *pmode, int *punsignedp,
- enum can_compare_purpose purpose)
+prepare_cmp_insn (rtx x, rtx y, enum rtx_code comparison, rtx size,
+ int unsignedp, enum optab_methods methods,
+ rtx *ptest, enum machine_mode *pmode)
{
enum machine_mode mode = *pmode;
- rtx x = *px, y = *py;
- int unsignedp = *punsignedp;
- rtx libfunc;
+ rtx libfunc, test;
+ enum machine_mode cmp_mode;
+ enum mode_class mclass;
+
+ /* The other methods are not needed. */
+ gcc_assert (methods == OPTAB_DIRECT || methods == OPTAB_WIDEN
+ || methods == OPTAB_LIB_WIDEN);
- /* If we are inside an appropriately-short loop and we are optimizing,
- force expensive constants into a register. */
+ /* If we are optimizing, force expensive constants into a register. */
if (CONSTANT_P (x) && optimize
&& (rtx_cost (x, COMPARE, optimize_insn_for_speed_p ())
> COSTS_N_INSNS (1)))
/* Don't let both operands fail to indicate the mode. */
if (GET_MODE (x) == VOIDmode && GET_MODE (y) == VOIDmode)
x = force_reg (mode, x);
+ if (mode == VOIDmode)
+ mode = GET_MODE (x) != VOIDmode ? GET_MODE (x) : GET_MODE (y);
/* Handle all BLKmode compares. */
if (mode == BLKmode)
{
- enum machine_mode cmp_mode, result_mode;
+ enum machine_mode result_mode;
enum insn_code cmp_code;
tree length_type;
rtx libfunc;
continue;
/* Must make sure the size fits the insn's mode. */
- if ((GET_CODE (size) == CONST_INT
+ if ((CONST_INT_P (size)
&& INTVAL (size) >= (1 << GET_MODE_BITSIZE (cmp_mode)))
|| (GET_MODE_BITSIZE (GET_MODE (size))
> GET_MODE_BITSIZE (cmp_mode)))
size = convert_to_mode (cmp_mode, size, 1);
emit_insn (GEN_FCN (cmp_code) (result, x, y, size, opalign));
- *px = result;
- *py = const0_rtx;
- *pmode = result_mode;
+ *ptest = gen_rtx_fmt_ee (comparison, VOIDmode, result, const0_rtx);
+ *pmode = result_mode;
return;
}
+ if (methods != OPTAB_LIB && methods != OPTAB_LIB_WIDEN)
+ goto fail;
+
/* Otherwise call a library function, memcmp. */
libfunc = memcmp_libfunc;
length_type = sizetype;
XEXP (x, 0), Pmode,
XEXP (y, 0), Pmode,
size, cmp_mode);
- *px = result;
- *py = const0_rtx;
+
+ *ptest = gen_rtx_fmt_ee (comparison, VOIDmode, result, const0_rtx);
*pmode = result_mode;
return;
}
y = force_reg (mode, y);
}
- *px = x;
- *py = y;
if (GET_MODE_CLASS (mode) == MODE_CC)
{
- gcc_assert (can_compare_p (*pcomparison, CCmode, purpose));
+ gcc_assert (can_compare_p (comparison, CCmode, ccp_jump));
+ *ptest = gen_rtx_fmt_ee (comparison, VOIDmode, x, y);
return;
}
- else if (can_compare_p (*pcomparison, mode, purpose))
- return;
- /* Handle a lib call just for the mode we are using. */
- libfunc = optab_libfunc (cmp_optab, mode);
- if (libfunc && !SCALAR_FLOAT_MODE_P (mode))
+ mclass = GET_MODE_CLASS (mode);
+ test = gen_rtx_fmt_ee (comparison, VOIDmode, x, y);
+ cmp_mode = mode;
+ do
+ {
+ enum insn_code icode;
+ icode = optab_handler (cbranch_optab, cmp_mode)->insn_code;
+ if (icode != CODE_FOR_nothing
+ && insn_data[icode].operand[0].predicate (test, VOIDmode))
+ {
+ rtx last = get_last_insn ();
+ rtx op0 = prepare_operand (icode, x, 1, mode, cmp_mode, unsignedp);
+ rtx op1 = prepare_operand (icode, y, 2, mode, cmp_mode, unsignedp);
+ if (op0 && op1
+ && insn_data[icode].operand[1].predicate
+ (op0, insn_data[icode].operand[1].mode)
+ && insn_data[icode].operand[2].predicate
+ (op1, insn_data[icode].operand[2].mode))
+ {
+ XEXP (test, 0) = op0;
+ XEXP (test, 1) = op1;
+ *ptest = test;
+ *pmode = cmp_mode;
+ return;
+ }
+ delete_insns_since (last);
+ }
+
+ if (methods == OPTAB_DIRECT || !CLASS_HAS_WIDER_MODES_P (mclass))
+ break;
+ cmp_mode = GET_MODE_WIDER_MODE (cmp_mode);
+ }
+ while (cmp_mode != VOIDmode);
+
+ if (methods != OPTAB_LIB_WIDEN)
+ goto fail;
+
+ if (!SCALAR_FLOAT_MODE_P (mode))
{
rtx result;
+ /* Handle a libcall just for the mode we are using. */
+ libfunc = optab_libfunc (cmp_optab, mode);
+ gcc_assert (libfunc);
+
/* If we want unsigned, and this mode has a distinct unsigned
comparison routine, use that. */
if (unsignedp)
/* There are two kinds of comparison routines. Biased routines
return 0/1/2, and unbiased routines return -1/0/1. Other parts
of gcc expect that the comparison operation is equivalent
- to the modified comparison. For signed comparisons compare the
+ to the modified comparison. For signed comparisons compare the
result against 1 in the biased case, and zero in the unbiased
case. For unsigned comparisons always compare against 1 after
biasing the unbiased result by adding 1. This gives us a way to
represent LTU. */
- *px = result;
- *pmode = word_mode;
- *py = const1_rtx;
+ x = result;
+ y = const1_rtx;
if (!TARGET_LIB_INT_CMP_BIASED)
{
- if (*punsignedp)
- *px = plus_constant (result, 1);
+ if (unsignedp)
+ x = plus_constant (result, 1);
else
- *py = const0_rtx;
+ y = const0_rtx;
}
- return;
+
+ *pmode = word_mode;
+ prepare_cmp_insn (x, y, comparison, NULL_RTX, unsignedp, methods,
+ ptest, pmode);
}
+ else
+ prepare_float_lib_cmp (x, y, comparison, ptest, pmode);
- gcc_assert (SCALAR_FLOAT_MODE_P (mode));
- prepare_float_lib_cmp (px, py, pcomparison, pmode, punsignedp);
+ return;
+
+ fail:
+ *ptest = NULL_RTX;
}
/* Before emitting an insn with code ICODE, make sure that X, which is going
WIDER_MODE (UNSIGNEDP determines whether it is an unsigned conversion), and
that it is accepted by the operand predicate. Return the new value. */
-static rtx
+rtx
prepare_operand (int icode, rtx x, int opnum, enum machine_mode mode,
enum machine_mode wider_mode, int unsignedp)
{
}
/* Subroutine of emit_cmp_and_jump_insns; this function is called when we know
- we can do the comparison.
- The arguments are the same as for emit_cmp_and_jump_insns; but LABEL may
- be NULL_RTX which indicates that only a comparison is to be generated. */
+ we can do the branch. */
static void
-emit_cmp_and_jump_insn_1 (rtx x, rtx y, enum machine_mode mode,
- enum rtx_code comparison, int unsignedp, rtx label)
+emit_cmp_and_jump_insn_1 (rtx test, enum machine_mode mode, rtx label)
{
- rtx test = gen_rtx_fmt_ee (comparison, mode, x, y);
- enum mode_class mclass = GET_MODE_CLASS (mode);
- enum machine_mode wider_mode = mode;
-
- /* Try combined insns first. */
- do
- {
- enum machine_mode optab_mode = mclass == MODE_CC ? CCmode : wider_mode;
- enum insn_code icode;
- PUT_MODE (test, wider_mode);
-
- if (label)
- {
- icode = optab_handler (cbranch_optab, optab_mode)->insn_code;
-
- if (icode != CODE_FOR_nothing
- && insn_data[icode].operand[0].predicate (test, wider_mode))
- {
- x = prepare_operand (icode, x, 1, mode, wider_mode, unsignedp);
- y = prepare_operand (icode, y, 2, mode, wider_mode, unsignedp);
- emit_jump_insn (GEN_FCN (icode) (test, x, y, label));
- return;
- }
- }
-
- /* Handle some compares against zero. */
- icode = optab_handler (tst_optab, optab_mode)->insn_code;
- if (y == CONST0_RTX (mode) && icode != CODE_FOR_nothing)
- {
- x = prepare_operand (icode, x, 0, mode, wider_mode, unsignedp);
- emit_insn (GEN_FCN (icode) (x));
- if (label)
- emit_jump_insn (bcc_gen_fctn[(int) comparison] (label));
- return;
- }
-
- /* Handle compares for which there is a directly suitable insn. */
-
- icode = optab_handler (cmp_optab, optab_mode)->insn_code;
- if (icode != CODE_FOR_nothing)
- {
- x = prepare_operand (icode, x, 0, mode, wider_mode, unsignedp);
- y = prepare_operand (icode, y, 1, mode, wider_mode, unsignedp);
- emit_insn (GEN_FCN (icode) (x, y));
- if (label)
- emit_jump_insn (bcc_gen_fctn[(int) comparison] (label));
- return;
- }
-
- if (!CLASS_HAS_WIDER_MODES_P (mclass))
- break;
+ enum machine_mode optab_mode;
+ enum mode_class mclass;
+ enum insn_code icode;
- wider_mode = GET_MODE_WIDER_MODE (wider_mode);
- }
- while (wider_mode != VOIDmode);
+ mclass = GET_MODE_CLASS (mode);
+ optab_mode = (mclass == MODE_CC) ? CCmode : mode;
+ icode = optab_handler (cbranch_optab, optab_mode)->insn_code;
- gcc_unreachable ();
+ gcc_assert (icode != CODE_FOR_nothing);
+ gcc_assert (insn_data[icode].operand[0].predicate (test, VOIDmode));
+ emit_jump_insn (GEN_FCN (icode) (test, XEXP (test, 0), XEXP (test, 1), label));
}
/* Generate code to compare X with Y so that the condition codes are
ensure that the comparison RTL has the canonical form.
UNSIGNEDP nonzero says that X and Y are unsigned; this matters if they
- need to be widened by emit_cmp_insn. UNSIGNEDP is also used to select
- the proper branch condition code.
+ need to be widened. UNSIGNEDP is also used to select the proper
+ branch condition code.
If X and Y have mode BLKmode, then SIZE specifies the size of both X and Y.
MODE is the mode of the inputs (in case they are const_int).
- COMPARISON is the rtl operator to compare with (EQ, NE, GT, etc.). It will
- be passed unchanged to emit_cmp_insn, then potentially converted into an
- unsigned variant based on UNSIGNEDP to select a proper jump instruction. */
+ COMPARISON is the rtl operator to compare with (EQ, NE, GT, etc.).
+ It will be potentially converted into an unsigned variant based on
+ UNSIGNEDP to select a proper jump instruction. */
void
emit_cmp_and_jump_insns (rtx x, rtx y, enum rtx_code comparison, rtx size,
enum machine_mode mode, int unsignedp, rtx label)
{
rtx op0 = x, op1 = y;
+ rtx test;
/* Swap operands and condition to ensure canonical RTL. */
- if (swap_commutative_operands_p (x, y))
+ if (swap_commutative_operands_p (x, y)
+ && can_compare_p (swap_condition (comparison), mode, ccp_jump))
{
- /* If we're not emitting a branch, callers are required to pass
- operands in an order conforming to canonical RTL. We relax this
- for commutative comparisons so callers using EQ don't need to do
- swapping by hand. */
- gcc_assert (label || (comparison == swap_condition (comparison)));
-
op0 = y, op1 = x;
comparison = swap_condition (comparison);
}
-#ifdef HAVE_cc0
- /* If OP0 is still a constant, then both X and Y must be constants.
- Force X into a register to create canonical RTL. */
+ /* If OP0 is still a constant, then both X and Y must be constants
+ or the opposite comparison is not supported. Force X into a register
+ to create canonical RTL. */
if (CONSTANT_P (op0))
op0 = force_reg (mode, op0);
-#endif
if (unsignedp)
comparison = unsigned_condition (comparison);
- prepare_cmp_insn (&op0, &op1, &comparison, size, &mode, &unsignedp,
- ccp_jump);
- emit_cmp_and_jump_insn_1 (op0, op1, mode, comparison, unsignedp, label);
+ prepare_cmp_insn (op0, op1, comparison, size, unsignedp, OPTAB_LIB_WIDEN,
+ &test, &mode);
+ emit_cmp_and_jump_insn_1 (test, mode, label);
}
-/* Like emit_cmp_and_jump_insns, but generate only the comparison. */
-
-void
-emit_cmp_insn (rtx x, rtx y, enum rtx_code comparison, rtx size,
- enum machine_mode mode, int unsignedp)
-{
- emit_cmp_and_jump_insns (x, y, comparison, size, mode, unsignedp, 0);
-}
\f
/* Emit a library call comparison between floating point X and Y.
COMPARISON is the rtl operator to compare with (EQ, NE, GT, etc.). */
static void
-prepare_float_lib_cmp (rtx *px, rtx *py, enum rtx_code *pcomparison,
- enum machine_mode *pmode, int *punsignedp)
+prepare_float_lib_cmp (rtx x, rtx y, enum rtx_code comparison,
+ rtx *ptest, enum machine_mode *pmode)
{
- enum rtx_code comparison = *pcomparison;
enum rtx_code swapped = swap_condition (comparison);
enum rtx_code reversed = reverse_condition_maybe_unordered (comparison);
- rtx x = *px;
- rtx y = *py;
enum machine_mode orig_mode = GET_MODE (x);
enum machine_mode mode, cmp_mode;
rtx value, target, insns, equiv;
mode != VOIDmode;
mode = GET_MODE_WIDER_MODE (mode))
{
- if ((libfunc = optab_libfunc (code_to_optab[comparison], mode)))
+ if (code_to_optab[comparison]
+ && (libfunc = optab_libfunc (code_to_optab[comparison], mode)))
break;
- if ((libfunc = optab_libfunc (code_to_optab[swapped] , mode)))
+ if (code_to_optab[swapped]
+ && (libfunc = optab_libfunc (code_to_optab[swapped], mode)))
{
rtx tmp;
tmp = x; x = y; y = tmp;
break;
}
- if ((libfunc = optab_libfunc (code_to_optab[reversed], mode))
+ if (code_to_optab[reversed]
+ && (libfunc = optab_libfunc (code_to_optab[reversed], mode))
&& FLOAT_LIB_COMPARE_RETURNS_BOOL (mode, reversed))
{
comparison = reversed;
|| FLOAT_LIB_COMPARE_RETURNS_BOOL (mode, comparison))
comparison = reversed_p ? EQ : NE;
- *px = target;
- *py = const0_rtx;
+ *ptest = gen_rtx_fmt_ee (comparison, VOIDmode, target, const0_rtx);
*pmode = cmp_mode;
- *pcomparison = comparison;
- *punsignedp = 0;
}
\f
/* Generate code to indirectly jump to a location given in the rtx LOC. */
(op3, insn_data[icode].operand[3].mode))
op3 = copy_to_mode_reg (insn_data[icode].operand[3].mode, op3);
- /* Everything should now be in the suitable form, so emit the compare insn
- and then the conditional move. */
+ /* Everything should now be in the suitable form. */
- comparison
- = compare_from_rtx (op0, op1, code, unsignedp, cmode, NULL_RTX);
+ code = unsignedp ? unsigned_condition (code) : code;
+ comparison = simplify_gen_relational (code, VOIDmode, cmode, op0, op1);
- /* ??? Watch for const0_rtx (nop) and const_true_rtx (unconditional)? */
/* We can get const0_rtx or const_true_rtx in some circumstances. Just
return NULL and let the caller figure out how best to deal with this
situation. */
- if (GET_CODE (comparison) != code)
+ if (!COMPARISON_P (comparison))
return NULL_RTX;
- insn = GEN_FCN (icode) (subtarget, comparison, op2, op3);
+ do_pending_stack_adjust ();
+ start_sequence ();
+ prepare_cmp_insn (XEXP (comparison, 0), XEXP (comparison, 1),
+ GET_CODE (comparison), NULL_RTX, unsignedp, OPTAB_WIDEN,
+ &comparison, &cmode);
+ if (!comparison)
+ insn = NULL_RTX;
+ else
+ insn = GEN_FCN (icode) (subtarget, comparison, op2, op3);
/* If that failed, then give up. */
if (insn == 0)
- return 0;
+ {
+ end_sequence ();
+ return 0;
+ }
emit_insn (insn);
-
+ insn = get_insns ();
+ end_sequence ();
+ emit_insn (insn);
if (subtarget != target)
convert_move (target, subtarget, 0);
(op3, insn_data[icode].operand[3].mode))
op3 = copy_to_mode_reg (insn_data[icode].operand[3].mode, op3);
- /* Everything should now be in the suitable form, so emit the compare insn
- and then the conditional move. */
+ /* Everything should now be in the suitable form. */
- comparison
- = compare_from_rtx (op0, op1, code, unsignedp, cmode, NULL_RTX);
+ code = unsignedp ? unsigned_condition (code) : code;
+ comparison = simplify_gen_relational (code, VOIDmode, cmode, op0, op1);
- /* ??? Watch for const0_rtx (nop) and const_true_rtx (unconditional)? */
/* We can get const0_rtx or const_true_rtx in some circumstances. Just
return NULL and let the caller figure out how best to deal with this
situation. */
- if (GET_CODE (comparison) != code)
+ if (!COMPARISON_P (comparison))
return NULL_RTX;
- insn = GEN_FCN (icode) (subtarget, comparison, op2, op3);
+ do_pending_stack_adjust ();
+ start_sequence ();
+ prepare_cmp_insn (XEXP (comparison, 0), XEXP (comparison, 1),
+ GET_CODE (comparison), NULL_RTX, unsignedp, OPTAB_WIDEN,
+ &comparison, &cmode);
+ if (!comparison)
+ insn = NULL_RTX;
+ else
+ insn = GEN_FCN (icode) (subtarget, comparison, op2, op3);
/* If that failed, then give up. */
if (insn == 0)
- return 0;
+ {
+ end_sequence ();
+ return 0;
+ }
emit_insn (insn);
-
+ insn = get_insns ();
+ end_sequence ();
+ emit_insn (insn);
if (subtarget != target)
convert_move (target, subtarget, 0);
if ((GET_MODE_CLASS (tmode) == MODE_FLOAT && DECIMAL_FLOAT_MODE_P (fmode))
|| (GET_MODE_CLASS (fmode) == MODE_FLOAT && DECIMAL_FLOAT_MODE_P (tmode)))
gen_interclass_conv_libfunc (tab, opname, tmode, fmode);
-
+
if (GET_MODE_PRECISION (fmode) <= GET_MODE_PRECISION (tmode))
return;
if ((GET_MODE_CLASS (tmode) == MODE_FLOAT && DECIMAL_FLOAT_MODE_P (fmode))
|| (GET_MODE_CLASS (fmode) == MODE_FLOAT && DECIMAL_FLOAT_MODE_P (tmode)))
gen_interclass_conv_libfunc (tab, opname, tmode, fmode);
-
+
if (GET_MODE_PRECISION (fmode) > GET_MODE_PRECISION (tmode))
return;
return DECL_NAME ((const_tree) entry1) == (const_tree) entry2;
}
+/* Build a decl for a libfunc named NAME. */
+
+tree
+build_libfunc_function (const char *name)
+{
+ tree decl = build_decl (UNKNOWN_LOCATION, FUNCTION_DECL,
+ get_identifier (name),
+ build_function_type (integer_type_node, NULL_TREE));
+ /* ??? We don't have any type information except for this is
+ a function. Pretend this is "int foo()". */
+ DECL_ARTIFICIAL (decl) = 1;
+ DECL_EXTERNAL (decl) = 1;
+ TREE_PUBLIC (decl) = 1;
+ gcc_assert (DECL_ASSEMBLER_NAME (decl));
+
+ /* Zap the nonsensical SYMBOL_REF_DECL for this. What we're left with
+ are the flags assigned by targetm.encode_section_info. */
+ SET_SYMBOL_REF_DECL (XEXP (DECL_RTL (decl), 0), NULL);
+
+ return decl;
+}
+
rtx
init_one_libfunc (const char *name)
{
{
/* Create a new decl, so that it can be passed to
targetm.encode_section_info. */
- /* ??? We don't have any type information except for this is
- a function. Pretend this is "int foo()". */
- decl = build_decl (FUNCTION_DECL, get_identifier (name),
- build_function_type (integer_type_node, NULL_TREE));
- DECL_ARTIFICIAL (decl) = 1;
- DECL_EXTERNAL (decl) = 1;
- TREE_PUBLIC (decl) = 1;
-
- /* Zap the nonsensical SYMBOL_REF_DECL for this. What we're left with
- are the flags assigned by targetm.encode_section_info. */
- SET_SYMBOL_REF_DECL (XEXP (DECL_RTL (decl), 0), NULL);
-
+ decl = build_libfunc_function (name);
*slot = decl;
}
return XEXP (DECL_RTL (decl), 0);
init_optabs (void)
{
unsigned int i;
- enum machine_mode int_mode;
static bool reinit;
libfunc_hash = htab_create_ggc (10, hash_libfunc, eq_libfunc, NULL);
/* Start by initializing all tables to contain CODE_FOR_nothing. */
- for (i = 0; i < NUM_RTX_CODE; i++)
- setcc_gen_code[i] = CODE_FOR_nothing;
-
#ifdef HAVE_conditional_move
for (i = 0; i < NUM_MACHINE_MODES; i++)
movcc_gen_code[i] = CODE_FOR_nothing;
vcondu_gen_code[i] = CODE_FOR_nothing;
}
-#if GCC_VERSION >= 4000
+#if GCC_VERSION >= 4000 && HAVE_DESIGNATED_INITIALIZERS
/* We statically initialize the insn_codes with CODE_FOR_nothing. */
if (reinit)
init_insn_codes ();
have_insn_for. */
init_optab (mov_optab, SET);
init_optab (movstrict_optab, STRICT_LOW_PART);
- init_optab (cmp_optab, COMPARE);
+ init_optab (cbranch_optab, COMPARE);
+
+ init_optab (cmov_optab, UNKNOWN);
+ init_optab (cstore_optab, UNKNOWN);
+ init_optab (ctrap_optab, UNKNOWN);
init_optab (storent_optab, UNKNOWN);
+ init_optab (cmp_optab, UNKNOWN);
init_optab (ucmp_optab, UNKNOWN);
- init_optab (tst_optab, UNKNOWN);
init_optab (eq_optab, EQ);
init_optab (ne_optab, NE);
init_optab (expm1_optab, UNKNOWN);
init_optab (ldexp_optab, UNKNOWN);
init_optab (scalb_optab, UNKNOWN);
+ init_optab (significand_optab, UNKNOWN);
init_optab (logb_optab, UNKNOWN);
init_optab (ilogb_optab, UNKNOWN);
init_optab (log_optab, UNKNOWN);
init_optab (isinf_optab, UNKNOWN);
init_optab (strlen_optab, UNKNOWN);
- init_optab (cbranch_optab, UNKNOWN);
- init_optab (cmov_optab, UNKNOWN);
- init_optab (cstore_optab, UNKNOWN);
init_optab (push_optab, UNKNOWN);
init_optab (reduc_smax_optab, UNKNOWN);
init_optab (ssum_widen_optab, UNKNOWN);
init_optab (usum_widen_optab, UNKNOWN);
- init_optab (sdot_prod_optab, UNKNOWN);
+ init_optab (sdot_prod_optab, UNKNOWN);
init_optab (udot_prod_optab, UNKNOWN);
init_optab (vec_extract_optab, UNKNOWN);
/* The ffs function operates on `int'. Fall back on it if we do not
have a libgcc2 function for that width. */
if (INT_TYPE_SIZE < BITS_PER_WORD)
- {
- int_mode = mode_for_size (INT_TYPE_SIZE, MODE_INT, 0);
- set_optab_libfunc (ffs_optab, mode_for_size (INT_TYPE_SIZE, MODE_INT, 0),
- "ffs");
- }
+ set_optab_libfunc (ffs_optab, mode_for_size (INT_TYPE_SIZE, MODE_INT, 0),
+ "ffs");
/* Explicitly initialize the bswap libfuncs since we need them to be
valid for things other than word_mode. */
gcov_flush_libfunc = init_one_libfunc ("__gcov_flush");
- if (HAVE_conditional_trap)
- trap_rtx = gen_rtx_fmt_ee (EQ, VOIDmode, NULL_RTX, NULL_RTX);
-
/* Allow the target to add more libcalls or rename some, etc. */
targetm.init_libfuncs ();
CODE. Return 0 on failure. */
rtx
-gen_cond_trap (enum rtx_code code ATTRIBUTE_UNUSED, rtx op1,
- rtx op2 ATTRIBUTE_UNUSED, rtx tcode ATTRIBUTE_UNUSED)
+gen_cond_trap (enum rtx_code code, rtx op1, rtx op2, rtx tcode)
{
enum machine_mode mode = GET_MODE (op1);
enum insn_code icode;
rtx insn;
-
- if (!HAVE_conditional_trap)
- return 0;
+ rtx trap_rtx;
if (mode == VOIDmode)
return 0;
- icode = optab_handler (cmp_optab, mode)->insn_code;
+ icode = optab_handler (ctrap_optab, mode)->insn_code;
if (icode == CODE_FOR_nothing)
return 0;
+ /* Some targets only accept a zero trap code. */
+ if (insn_data[icode].operand[3].predicate
+ && !insn_data[icode].operand[3].predicate (tcode, VOIDmode))
+ return 0;
+
+ do_pending_stack_adjust ();
start_sequence ();
- op1 = prepare_operand (icode, op1, 0, mode, mode, 0);
- op2 = prepare_operand (icode, op2, 1, mode, mode, 0);
- if (!op1 || !op2)
+ prepare_cmp_insn (op1, op2, code, NULL_RTX, false, OPTAB_DIRECT,
+ &trap_rtx, &mode);
+ if (!trap_rtx)
+ insn = NULL_RTX;
+ else
+ insn = GEN_FCN (icode) (trap_rtx, XEXP (trap_rtx, 0), XEXP (trap_rtx, 1),
+ tcode);
+
+ /* If that failed, then give up. */
+ if (insn == 0)
{
end_sequence ();
return 0;
}
- emit_insn (GEN_FCN (icode) (op1, op2));
- PUT_CODE (trap_rtx, code);
- gcc_assert (HAVE_conditional_trap);
- insn = gen_conditional_trap (trap_rtx, tcode);
- if (insn)
- {
- emit_insn (insn);
- insn = get_insns ();
- }
+ emit_insn (insn);
+ insn = get_insns ();
end_sequence ();
-
return insn;
}
return gen_rtx_fmt_ee (rcode, VOIDmode, rtx_op0, rtx_op1);
}
-/* Return insn code for VEC_COND_EXPR EXPR. */
+/* Return insn code for TYPE, the type of a VEC_COND_EXPR. */
static inline enum insn_code
-get_vcond_icode (tree expr, enum machine_mode mode)
+get_vcond_icode (tree type, enum machine_mode mode)
{
enum insn_code icode = CODE_FOR_nothing;
- if (TYPE_UNSIGNED (TREE_TYPE (expr)))
+ if (TYPE_UNSIGNED (type))
icode = vcondu_gen_code[mode];
else
icode = vcond_gen_code[mode];
}
/* Return TRUE iff, appropriate vector insns are available
- for vector cond expr expr in VMODE mode. */
+ for vector cond expr with type TYPE in VMODE mode. */
bool
-expand_vec_cond_expr_p (tree expr, enum machine_mode vmode)
+expand_vec_cond_expr_p (tree type, enum machine_mode vmode)
{
- if (get_vcond_icode (expr, vmode) == CODE_FOR_nothing)
+ if (get_vcond_icode (type, vmode) == CODE_FOR_nothing)
return false;
return true;
}
-/* Generate insns for VEC_COND_EXPR. */
+/* Generate insns for a VEC_COND_EXPR, given its TYPE and its
+ three operands. */
rtx
-expand_vec_cond_expr (tree vec_cond_expr, rtx target)
+expand_vec_cond_expr (tree vec_cond_type, tree op0, tree op1, tree op2,
+ rtx target)
{
enum insn_code icode;
rtx comparison, rtx_op1, rtx_op2, cc_op0, cc_op1;
- enum machine_mode mode = TYPE_MODE (TREE_TYPE (vec_cond_expr));
- bool unsignedp = TYPE_UNSIGNED (TREE_TYPE (vec_cond_expr));
+ enum machine_mode mode = TYPE_MODE (vec_cond_type);
+ bool unsignedp = TYPE_UNSIGNED (vec_cond_type);
- icode = get_vcond_icode (vec_cond_expr, mode);
+ icode = get_vcond_icode (vec_cond_type, mode);
if (icode == CODE_FOR_nothing)
return 0;
target = gen_reg_rtx (mode);
/* Get comparison rtx. First expand both cond expr operands. */
- comparison = vector_compare_rtx (TREE_OPERAND (vec_cond_expr, 0),
+ comparison = vector_compare_rtx (op0,
unsignedp, icode);
cc_op0 = XEXP (comparison, 0);
cc_op1 = XEXP (comparison, 1);
/* Expand both operands and force them in reg, if required. */
- rtx_op1 = expand_normal (TREE_OPERAND (vec_cond_expr, 1));
+ rtx_op1 = expand_normal (op1);
if (!insn_data[icode].operand[1].predicate (rtx_op1, mode)
&& mode != VOIDmode)
rtx_op1 = force_reg (mode, rtx_op1);
- rtx_op2 = expand_normal (TREE_OPERAND (vec_cond_expr, 2));
+ rtx_op2 = expand_normal (op2);
if (!insn_data[icode].operand[2].predicate (rtx_op2, mode)
&& mode != VOIDmode)
rtx_op2 = force_reg (mode, rtx_op2);
emit_insn (seq);
if (cc_reg)
- return emit_store_flag (target, EQ, cc_reg, const0_rtx, VOIDmode, 0, 1);
+ return emit_store_flag_force (target, EQ, cc_reg, const0_rtx, VOIDmode, 0, 1);
else
- return emit_store_flag (target, EQ, subtarget, old_val, VOIDmode, 1, 1);
+ return emit_store_flag_force (target, EQ, subtarget, old_val, VOIDmode, 1, 1);
}
/* This is a helper function for the other atomic operations. This function