enum rtx_code, int, rtx));
static void prepare_float_lib_cmp PARAMS ((rtx *, rtx *, enum rtx_code *,
enum machine_mode *, int *));
+static rtx expand_vector_binop PARAMS ((enum machine_mode, optab,
+ rtx, rtx, rtx, int,
+ enum optab_methods));
+static rtx expand_vector_unop PARAMS ((enum machine_mode, optab, rtx, rtx,
+ int));
\f
-/* Add a REG_EQUAL note to the last insn in SEQ. TARGET is being set to
+/* Add a REG_EQUAL note to the last insn in INSNS. TARGET is being set to
the result of operation CODE applied to OP0 (and OP1 if it is a binary
operation).
again, ensuring that TARGET is not one of the operands. */
static int
-add_equal_note (seq, target, code, op0, op1)
- rtx seq;
+add_equal_note (insns, target, code, op0, op1)
+ rtx insns;
rtx target;
enum rtx_code code;
rtx op0, op1;
{
- rtx set;
- int i;
+ rtx last_insn, insn, set;
rtx note;
- if ((GET_RTX_CLASS (code) != '1' && GET_RTX_CLASS (code) != '2'
- && GET_RTX_CLASS (code) != 'c' && GET_RTX_CLASS (code) != '<')
- || GET_CODE (seq) != SEQUENCE
- || (set = single_set (XVECEXP (seq, 0, XVECLEN (seq, 0) - 1))) == 0
- || GET_CODE (target) == ZERO_EXTRACT
- || (! rtx_equal_p (SET_DEST (set), target)
- /* For a STRICT_LOW_PART, the REG_NOTE applies to what is inside the
- SUBREG. */
- && (GET_CODE (SET_DEST (set)) != STRICT_LOW_PART
- || ! rtx_equal_p (SUBREG_REG (XEXP (SET_DEST (set), 0)),
- target))))
+ if (! insns
+ || ! INSN_P (insns)
+ || NEXT_INSN (insns) == NULL_RTX)
+ abort ();
+
+ if (GET_RTX_CLASS (code) != '1' && GET_RTX_CLASS (code) != '2'
+ && GET_RTX_CLASS (code) != 'c' && GET_RTX_CLASS (code) != '<')
+ return 1;
+
+ if (GET_CODE (target) == ZERO_EXTRACT)
+ return 1;
+
+ for (last_insn = insns;
+ NEXT_INSN (last_insn) != NULL_RTX;
+ last_insn = NEXT_INSN (last_insn))
+ ;
+
+ set = single_set (last_insn);
+ if (set == NULL_RTX)
+ return 1;
+
+ if (! rtx_equal_p (SET_DEST (set), target)
+ /* For a STRICT_LOW_PART, the REG_NOTE applies to what is inside the
+ SUBREG. */
+ && (GET_CODE (SET_DEST (set)) != STRICT_LOW_PART
+ || ! rtx_equal_p (SUBREG_REG (XEXP (SET_DEST (set), 0)),
+ target)))
return 1;
/* If TARGET is in OP0 or OP1, check if anything in SEQ sets TARGET
besides the last insn. */
if (reg_overlap_mentioned_p (target, op0)
|| (op1 && reg_overlap_mentioned_p (target, op1)))
- for (i = XVECLEN (seq, 0) - 2; i >= 0; i--)
- if (reg_set_p (target, XVECEXP (seq, 0, i)))
- return 0;
+ {
+ insn = PREV_INSN (last_insn);
+ while (insn != NULL_RTX)
+ {
+ if (reg_set_p (target, insn))
+ return 0;
+
+ insn = PREV_INSN (insn);
+ }
+ }
if (GET_RTX_CLASS (code) == '1')
note = gen_rtx_fmt_e (code, GET_MODE (target), copy_rtx (op0));
else
note = gen_rtx_fmt_ee (code, GET_MODE (target), copy_rtx (op0), copy_rtx (op1));
- set_unique_reg_note (XVECEXP (seq, 0, XVECLEN (seq, 0) - 1), REG_EQUAL, note);
+ set_unique_reg_note (last_insn, REG_EQUAL, note);
return 1;
}
}
/* In case the insn wants input operands in modes different from
- the result, convert the operands. It would seem that we
- don't need to convert CONST_INTs, but we do, so that they're
- a properly sign-extended for their modes; we choose the
- widest mode between mode and mode[01], so that, in a widening
- operation, we call convert_modes with different FROM and TO
- modes, which ensures the value is sign-extended. Shift
- operations are an exception, because the second operand needs
- not be extended to the mode of the result. */
-
- if (GET_MODE (op0) != mode0
- && mode0 != VOIDmode)
+ those of the actual operands, convert the operands. It would
+ seem that we don't need to convert CONST_INTs, but we do, so
+ that they're properly zero-extended or sign-extended for their
+ modes; shift operations are an exception, because the second
+ operand need not be extended to the mode of the result. */
+
+ if (GET_MODE (op0) != mode0 && mode0 != VOIDmode)
xop0 = convert_modes (mode0,
GET_MODE (op0) != VOIDmode
? GET_MODE (op0)
- : GET_MODE_SIZE (mode) > GET_MODE_SIZE (mode0)
- ? mode
- : mode0,
+ : mode,
xop0, unsignedp);
- if (GET_MODE (xop1) != mode1
- && mode1 != VOIDmode)
+ if (GET_MODE (op1) != mode1 && mode1 != VOIDmode)
xop1 = convert_modes (mode1,
GET_MODE (op1) != VOIDmode
? GET_MODE (op1)
- : (GET_MODE_SIZE (mode) > GET_MODE_SIZE (mode1)
- && ! shift_op)
- ? mode
- : mode1,
+ : (shift_op ? mode1 : mode),
xop1, unsignedp);
/* Now, if insn's predicates don't allow our operands, put them into
pat = GEN_FCN (icode) (temp, xop0, xop1);
if (pat)
{
- /* If PAT is a multi-insn sequence, try to add an appropriate
+ /* If PAT is composed of more than one insn, try to add an appropriate
REG_EQUAL note to it. If we can't because TEMP conflicts with an
operand, call ourselves again, this time without a target. */
- if (GET_CODE (pat) == SEQUENCE
+ if (INSN_P (pat) && NEXT_INSN (pat) != NULL_RTX
&& ! add_equal_note (pat, temp, binoptab->code, xop0, xop1))
{
delete_insns_since (last);
NULL_RTX, unsignedp, next_methods);
into_temp2 = expand_binop (word_mode, reverse_unsigned_shift,
into_input, second_shift_count,
- into_target, unsignedp, next_methods);
+ NULL_RTX, unsignedp, next_methods);
if (into_temp1 != 0 && into_temp2 != 0)
inter = expand_binop (word_mode, ior_optab, into_temp1, into_temp2,
NULL_RTX, unsignedp, next_methods);
outof_temp2 = expand_binop (word_mode, reverse_unsigned_shift,
outof_input, second_shift_count,
- outof_target, unsignedp, next_methods);
+ NULL_RTX, unsignedp, next_methods);
if (inter != 0 && outof_temp1 != 0 && outof_temp2 != 0)
inter = expand_binop (word_mode, ior_optab,
if (shift_count != BITS_PER_WORD)
emit_no_conflict_block (insns, target, op0, op1, equiv_value);
else
- emit_insns (insns);
+ emit_insn (insns);
return target;
&& GET_MODE_SIZE (mode) >= 2 * UNITS_PER_WORD
&& binoptab->handlers[(int) word_mode].insn_code != CODE_FOR_nothing)
{
- int i;
+ unsigned int i;
optab otheroptab = binoptab == add_optab ? sub_optab : add_optab;
- unsigned int nwords = GET_MODE_BITSIZE (mode) / BITS_PER_WORD;
+ const unsigned int nwords = GET_MODE_BITSIZE (mode) / BITS_PER_WORD;
rtx carry_in = NULL_RTX, carry_out = NULL_RTX;
- rtx xop0, xop1;
+ rtx xop0, xop1, xtarget;
/* We can handle either a 1 or -1 value for the carry. If STORE_FLAG
value is one of those, use it. Otherwise, use 1 since it is the
xop0 = force_reg (mode, op0);
xop1 = force_reg (mode, op1);
- if (target == 0 || GET_CODE (target) != REG
- || target == xop0 || target == xop1)
- target = gen_reg_rtx (mode);
+ xtarget = gen_reg_rtx (mode);
+
+ if (target == 0 || GET_CODE (target) != REG)
+ target = xtarget;
/* Indicate for flow that the entire target reg is being set. */
if (GET_CODE (target) == REG)
- emit_insn (gen_rtx_CLOBBER (VOIDmode, target));
+ emit_insn (gen_rtx_CLOBBER (VOIDmode, xtarget));
/* Do the actual arithmetic. */
for (i = 0; i < nwords; i++)
{
int index = (WORDS_BIG_ENDIAN ? nwords - i - 1 : i);
- rtx target_piece = operand_subword (target, index, 1, mode);
+ rtx target_piece = operand_subword (xtarget, index, 1, mode);
rtx op0_piece = operand_subword_force (xop0, index, mode);
rtx op1_piece = operand_subword_force (xop1, index, mode);
rtx x;
carry_in = carry_out;
}
- if (i == GET_MODE_BITSIZE (mode) / BITS_PER_WORD)
+ if (i == GET_MODE_BITSIZE (mode) / (unsigned) BITS_PER_WORD)
{
if (mov_optab->handlers[(int) mode].insn_code != CODE_FOR_nothing)
{
- rtx temp = emit_move_insn (target, target);
+ rtx temp = emit_move_insn (target, xtarget);
set_unique_reg_note (temp,
REG_EQUAL,
rtx temp = expand_binop (word_mode, binoptab, op0_low, op1_xhigh,
NULL_RTX, 0, OPTAB_DIRECT);
+ if (!REG_P (product_high))
+ product_high = force_reg (word_mode, product_high);
+
if (temp != 0)
temp = expand_binop (word_mode, add_optab, temp, product_high,
product_high, 0, next_methods);
if (temp != 0 && temp != product_high)
emit_move_insn (product_high, temp);
+ emit_move_insn (operand_subword (product, high, 1, mode), product_high);
+
if (temp != 0)
{
if (mov_optab->handlers[(int) mode].insn_code != CODE_FOR_nothing)
delete_insns_since (last);
}
+ /* Open-code the vector operations if we have no hardware support
+ for them. */
+ if (class == MODE_VECTOR_INT || class == MODE_VECTOR_FLOAT)
+ return expand_vector_binop (mode, binoptab, op0, op1, target,
+ unsignedp, methods);
+
/* We need to open-code the complex type operations: '+, -, * and /' */
/* At this point we allow operations between two similar complex
else
real1 = op1;
- if (real0 == 0 || real1 == 0 || ! (imag0 != 0|| imag1 != 0))
+ if (real0 == 0 || real1 == 0 || ! (imag0 != 0 || imag1 != 0))
abort ();
switch (binoptab->code)
else if (res != realr)
emit_move_insn (realr, res);
- if (imag0 && imag1)
+ if (imag0 != 0 && imag1 != 0)
res = expand_binop (submode, binoptab, imag0, imag1,
imagr, unsignedp, methods);
- else if (imag0)
+ else if (imag0 != 0)
res = imag0;
else if (binoptab->code == MINUS)
res = expand_unop (submode,
case MULT:
/* (a+ib) * (c+id) = (ac-bd) + i(ad+cb) */
- if (imag0 && imag1)
+ if (imag0 != 0 && imag1 != 0)
{
rtx temp1, temp2;
delete_insns_since (entry_last);
return 0;
}
+
+/* Like expand_binop, but for open-coding vectors binops. */
+
+static rtx
+expand_vector_binop (mode, binoptab, op0, op1, target, unsignedp, methods)
+ enum machine_mode mode;
+ optab binoptab;
+ rtx op0, op1;
+ rtx target;
+ int unsignedp;
+ enum optab_methods methods;
+{
+ enum machine_mode submode, tmode;
+ int size, elts, subsize, subbitsize, i;
+ rtx t, a, b, res, seq;
+ enum mode_class class;
+
+ class = GET_MODE_CLASS (mode);
+
+ size = GET_MODE_SIZE (mode);
+ submode = GET_MODE_INNER (mode);
+
+ /* Search for the widest vector mode with the same inner mode that is
+ still narrower than MODE and that allows to open-code this operator.
+ Note, if we find such a mode and the handler later decides it can't
+ do the expansion, we'll be called recursively with the narrower mode. */
+ for (tmode = GET_CLASS_NARROWEST_MODE (class);
+ GET_MODE_SIZE (tmode) < GET_MODE_SIZE (mode);
+ tmode = GET_MODE_WIDER_MODE (tmode))
+ {
+ if (GET_MODE_INNER (tmode) == GET_MODE_INNER (mode)
+ && binoptab->handlers[(int) tmode].insn_code != CODE_FOR_nothing)
+ submode = tmode;
+ }
+
+ switch (binoptab->code)
+ {
+ case AND:
+ case IOR:
+ case XOR:
+ tmode = int_mode_for_mode (mode);
+ if (tmode != BLKmode)
+ submode = tmode;
+ case PLUS:
+ case MINUS:
+ case MULT:
+ case DIV:
+ subsize = GET_MODE_SIZE (submode);
+ subbitsize = GET_MODE_BITSIZE (submode);
+ elts = size / subsize;
+
+ /* If METHODS is OPTAB_DIRECT, we don't insist on the exact mode,
+ but that we operate on more than one element at a time. */
+ if (subsize == GET_MODE_UNIT_SIZE (mode) && methods == OPTAB_DIRECT)
+ return 0;
+
+ start_sequence ();
+
+ /* Errors can leave us with a const0_rtx as operand. */
+ if (GET_MODE (op0) != mode)
+ op0 = copy_to_mode_reg (mode, op0);
+ if (GET_MODE (op1) != mode)
+ op1 = copy_to_mode_reg (mode, op1);
+
+ if (!target)
+ target = gen_reg_rtx (mode);
+
+ for (i = 0; i < elts; ++i)
+ {
+ /* If this is part of a register, and not the first item in the
+ word, we can't store using a SUBREG - that would clobber
+ previous results.
+ And storing with a SUBREG is only possible for the least
+ significant part, hence we can't do it for big endian
+ (unless we want to permute the evaluation order. */
+ if (GET_CODE (target) == REG
+ && (BYTES_BIG_ENDIAN
+ ? subsize < UNITS_PER_WORD
+ : ((i * subsize) % UNITS_PER_WORD) != 0))
+ t = NULL_RTX;
+ else
+ t = simplify_gen_subreg (submode, target, mode, i * subsize);
+ if (CONSTANT_P (op0))
+ a = simplify_gen_subreg (submode, op0, mode, i * subsize);
+ else
+ a = extract_bit_field (op0, subbitsize, i * subbitsize, unsignedp,
+ NULL_RTX, submode, submode, size);
+ if (CONSTANT_P (op1))
+ b = simplify_gen_subreg (submode, op1, mode, i * subsize);
+ else
+ b = extract_bit_field (op1, subbitsize, i * subbitsize, unsignedp,
+ NULL_RTX, submode, submode, size);
+
+ if (binoptab->code == DIV)
+ {
+ if (class == MODE_VECTOR_FLOAT)
+ res = expand_binop (submode, binoptab, a, b, t,
+ unsignedp, methods);
+ else
+ res = expand_divmod (0, TRUNC_DIV_EXPR, submode,
+ a, b, t, unsignedp);
+ }
+ else
+ res = expand_binop (submode, binoptab, a, b, t,
+ unsignedp, methods);
+
+ if (res == 0)
+ break;
+
+ if (t)
+ emit_move_insn (t, res);
+ else
+ store_bit_field (target, subbitsize, i * subbitsize, submode, res,
+ size);
+ }
+ break;
+
+ default:
+ abort ();
+ }
+
+ seq = get_insns ();
+ end_sequence ();
+ emit_insn (seq);
+
+ return target;
+}
+
+/* Like expand_unop but for open-coding vector unops. */
+
+static rtx
+expand_vector_unop (mode, unoptab, op0, target, unsignedp)
+ enum machine_mode mode;
+ optab unoptab;
+ rtx op0;
+ rtx target;
+ int unsignedp;
+{
+ enum machine_mode submode, tmode;
+ int size, elts, subsize, subbitsize, i;
+ rtx t, a, res, seq;
+
+ size = GET_MODE_SIZE (mode);
+ submode = GET_MODE_INNER (mode);
+
+ /* Search for the widest vector mode with the same inner mode that is
+ still narrower than MODE and that allows to open-code this operator.
+ Note, if we find such a mode and the handler later decides it can't
+ do the expansion, we'll be called recursively with the narrower mode. */
+ for (tmode = GET_CLASS_NARROWEST_MODE (GET_MODE_CLASS (mode));
+ GET_MODE_SIZE (tmode) < GET_MODE_SIZE (mode);
+ tmode = GET_MODE_WIDER_MODE (tmode))
+ {
+ if (GET_MODE_INNER (tmode) == GET_MODE_INNER (mode)
+ && unoptab->handlers[(int) tmode].insn_code != CODE_FOR_nothing)
+ submode = tmode;
+ }
+ /* If there is no negate operation, try doing a subtract from zero. */
+ if (unoptab == neg_optab && GET_MODE_CLASS (submode) == MODE_INT
+ /* Avoid infinite recursion when an
+ error has left us with the wrong mode. */
+ && GET_MODE (op0) == mode)
+ {
+ rtx temp;
+ temp = expand_binop (mode, sub_optab, CONST0_RTX (mode), op0,
+ target, unsignedp, OPTAB_DIRECT);
+ if (temp)
+ return temp;
+ }
+
+ if (unoptab == one_cmpl_optab)
+ {
+ tmode = int_mode_for_mode (mode);
+ if (tmode != BLKmode)
+ submode = tmode;
+ }
+
+ subsize = GET_MODE_SIZE (submode);
+ subbitsize = GET_MODE_BITSIZE (submode);
+ elts = size / subsize;
+
+ /* Errors can leave us with a const0_rtx as operand. */
+ if (GET_MODE (op0) != mode)
+ op0 = copy_to_mode_reg (mode, op0);
+
+ if (!target)
+ target = gen_reg_rtx (mode);
+
+ start_sequence ();
+
+ for (i = 0; i < elts; ++i)
+ {
+ /* If this is part of a register, and not the first item in the
+ word, we can't store using a SUBREG - that would clobber
+ previous results.
+ And storing with a SUBREG is only possible for the least
+ significant part, hence we can't do it for big endian
+ (unless we want to permute the evaluation order. */
+ if (GET_CODE (target) == REG
+ && (BYTES_BIG_ENDIAN
+ ? subsize < UNITS_PER_WORD
+ : ((i * subsize) % UNITS_PER_WORD) != 0))
+ t = NULL_RTX;
+ else
+ t = simplify_gen_subreg (submode, target, mode, i * subsize);
+ if (CONSTANT_P (op0))
+ a = simplify_gen_subreg (submode, op0, mode, i * subsize);
+ else
+ a = extract_bit_field (op0, subbitsize, i * subbitsize, unsignedp,
+ t, submode, submode, size);
+
+ res = expand_unop (submode, unoptab, a, t, unsignedp);
+
+ if (t)
+ emit_move_insn (t, res);
+ else
+ store_bit_field (target, subbitsize, i * subbitsize, submode, res,
+ size);
+ }
+
+ seq = get_insns ();
+ end_sequence ();
+ emit_insn (seq);
+
+ return target;
+}
\f
/* Expand a binary operator which has both signed and unsigned forms.
UOPTAB is the optab for unsigned operations, and SOPTAB is for
rtx pat;
rtx xop0 = op0, xop1 = op1;
- /* In case this insn wants input operands in modes different from the
- result, convert the operands. */
- if (GET_MODE (op0) != VOIDmode && GET_MODE (op0) != mode0)
- xop0 = convert_to_mode (mode0, xop0, unsignedp);
+ /* In case the insn wants input operands in modes different from
+ those of the actual operands, convert the operands. It would
+ seem that we don't need to convert CONST_INTs, but we do, so
+ that they're properly zero-extended or sign-extended for their
+ modes. */
+
+ if (GET_MODE (op0) != mode0 && mode0 != VOIDmode)
+ xop0 = convert_modes (mode0,
+ GET_MODE (op0) != VOIDmode
+ ? GET_MODE (op0)
+ : mode,
+ xop0, unsignedp);
- if (GET_MODE (op1) != VOIDmode && GET_MODE (op1) != mode1)
- xop1 = convert_to_mode (mode1, xop1, unsignedp);
+ if (GET_MODE (op1) != mode1 && mode1 != VOIDmode)
+ xop1 = convert_modes (mode1,
+ GET_MODE (op1) != VOIDmode
+ ? GET_MODE (op1)
+ : mode,
+ xop1, unsignedp);
/* Now, if insn doesn't accept these operands, put them into pseudos. */
if (! (*insn_data[icode].operand[1].predicate) (xop0, mode0))
pat = GEN_FCN (icode) (temp, xop0);
if (pat)
{
- if (GET_CODE (pat) == SEQUENCE
+ if (INSN_P (pat) && NEXT_INSN (pat) != NULL_RTX
&& ! add_equal_note (pat, temp, unoptab->code, xop0, NULL_RTX))
{
delete_insns_since (last);
return target;
}
+ if (class == MODE_VECTOR_FLOAT || class == MODE_VECTOR_INT)
+ return expand_vector_unop (mode, unoptab, op0, target, unsignedp);
+
/* It can't be done in this mode. Can we do it in a wider mode? */
if (class == MODE_INT || class == MODE_FLOAT || class == MODE_COMPLEX_FLOAT)
pat = GEN_FCN (icode) (temp, xop0);
if (pat)
{
- if (GET_CODE (pat) == SEQUENCE
+ if (INSN_P (pat) && NEXT_INSN (pat) != NULL_RTX
&& ! add_equal_note (pat, temp, this_abs_optab->code, xop0,
NULL_RTX))
{
pat = GEN_FCN (icode) (temp, op0);
- if (GET_CODE (pat) == SEQUENCE && code != UNKNOWN)
+ if (INSN_P (pat) && NEXT_INSN (pat) != NULL_RTX && code != UNKNOWN)
add_equal_note (pat, temp, code, op0, NULL_RTX);
emit_insn (pat);
TARGET, OP0, and OP1 are the output and inputs of the operations,
respectively. OP1 may be zero for a unary operation.
- EQUIV, if non-zero, is an expression to be placed into a REG_EQUAL note
+ EQUIV, if nonzero, is an expression to be placed into a REG_EQUAL note
on the last insn.
If TARGET is not a register, INSNS is simply emitted with no special
rtx prev, next, first, last, insn;
if (GET_CODE (target) != REG || reload_in_progress)
- return emit_insns (insns);
+ return emit_insn (insns);
else
for (insn = insns; insn; insn = NEXT_INSN (insn))
if (GET_CODE (insn) != INSN
|| find_reg_note (insn, REG_LIBCALL, NULL_RTX))
- return emit_insns (insns);
+ return emit_insn (insns);
/* First emit all insns that do not store into words of the output and remove
these from the list. */
for (insn = insns; insn; insn = next)
{
- rtx set = 0;
+ rtx set = 0, note;
int i;
next = NEXT_INSN (insn);
+ /* Some ports (cris) create an libcall regions at their own. We must
+ avoid any potential nesting of LIBCALLs. */
+ if ((note = find_reg_note (insn, REG_LIBCALL, NULL)) != NULL)
+ remove_note (insn, note);
+ if ((note = find_reg_note (insn, REG_RETVAL, NULL)) != NULL)
+ remove_note (insn, note);
+
if (GET_CODE (PATTERN (insn)) == SET || GET_CODE (PATTERN (insn)) == USE
|| GET_CODE (PATTERN (insn)) == CLOBBER)
set = PATTERN (insn);
for (insn = insns; insn; insn = next)
{
rtx set = single_set (insn);
+ rtx note;
+
+ /* Some ports (cris) create an libcall regions at their own. We must
+ avoid any potential nesting of LIBCALLs. */
+ if ((note = find_reg_note (insn, REG_LIBCALL, NULL)) != NULL)
+ remove_note (insn, note);
+ if ((note = find_reg_note (insn, REG_RETVAL, NULL)) != NULL)
+ remove_note (insn, note);
next = NEXT_INSN (insn);
#endif
{
#ifdef TARGET_MEM_FUNCTIONS
- emit_library_call (memcmp_libfunc, LCT_PURE_MAKE_BLOCK,
- TYPE_MODE (integer_type_node), 3,
- XEXP (x, 0), Pmode, XEXP (y, 0), Pmode,
- convert_to_mode (TYPE_MODE (sizetype), size,
- TREE_UNSIGNED (sizetype)),
- TYPE_MODE (sizetype));
+ result = emit_library_call_value (memcmp_libfunc, NULL_RTX, LCT_PURE_MAKE_BLOCK,
+ TYPE_MODE (integer_type_node), 3,
+ XEXP (x, 0), Pmode, XEXP (y, 0), Pmode,
+ convert_to_mode (TYPE_MODE (sizetype), size,
+ TREE_UNSIGNED (sizetype)),
+ TYPE_MODE (sizetype));
#else
- emit_library_call (bcmp_libfunc, LCT_PURE_MAKE_BLOCK,
- TYPE_MODE (integer_type_node), 3,
- XEXP (x, 0), Pmode, XEXP (y, 0), Pmode,
- convert_to_mode (TYPE_MODE (integer_type_node),
- size,
- TREE_UNSIGNED (integer_type_node)),
- TYPE_MODE (integer_type_node));
+ result = emit_library_call_value (bcmp_libfunc, NULL_RTX, LCT_PURE_MAKE_BLOCK,
+ TYPE_MODE (integer_type_node), 3,
+ XEXP (x, 0), Pmode, XEXP (y, 0), Pmode,
+ convert_to_mode (TYPE_MODE (integer_type_node),
+ size,
+ TREE_UNSIGNED (integer_type_node)),
+ TYPE_MODE (integer_type_node));
#endif
- /* Immediately move the result of the libcall into a pseudo
- register so reload doesn't clobber the value if it needs
- the return register for a spill reg. */
- result = gen_reg_rtx (TYPE_MODE (integer_type_node));
result_mode = TYPE_MODE (integer_type_node);
- emit_move_insn (result,
- hard_libcall_value (result_mode));
}
*px = result;
*py = const0_rtx;
if (unsignedp && ucmp_optab->handlers[(int) mode].libfunc)
libfunc = ucmp_optab->handlers[(int) mode].libfunc;
- emit_library_call (libfunc, LCT_CONST_MAKE_BLOCK, word_mode, 2, x, mode,
- y, mode);
-
- /* Immediately move the result of the libcall into a pseudo
- register so reload doesn't clobber the value if it needs
- the return register for a spill reg. */
- result = gen_reg_rtx (word_mode);
- emit_move_insn (result, hard_libcall_value (word_mode));
+ result = emit_library_call_value (libfunc, NULL_RTX, LCT_CONST_MAKE_BLOCK,
+ word_mode, 2, x, mode, y, mode);
/* Integer comparison returns a result that must be compared against 1,
so that even if we do an unsigned compare afterward,
int *punsignedp;
{
enum rtx_code comparison = *pcomparison;
+ rtx tmp;
rtx x = *px = protect_from_queue (*px, 0);
rtx y = *py = protect_from_queue (*py, 0);
enum machine_mode mode = GET_MODE (x);
case GT:
libfunc = gthf2_libfunc;
+ if (libfunc == NULL_RTX)
+ {
+ tmp = x; x = y; y = tmp;
+ *pcomparison = LT;
+ libfunc = lthf2_libfunc;
+ }
break;
case GE:
libfunc = gehf2_libfunc;
+ if (libfunc == NULL_RTX)
+ {
+ tmp = x; x = y; y = tmp;
+ *pcomparison = LE;
+ libfunc = lehf2_libfunc;
+ }
break;
case LT:
libfunc = lthf2_libfunc;
+ if (libfunc == NULL_RTX)
+ {
+ tmp = x; x = y; y = tmp;
+ *pcomparison = GT;
+ libfunc = gthf2_libfunc;
+ }
break;
case LE:
libfunc = lehf2_libfunc;
+ if (libfunc == NULL_RTX)
+ {
+ tmp = x; x = y; y = tmp;
+ *pcomparison = GE;
+ libfunc = gehf2_libfunc;
+ }
break;
case UNORDERED:
case GT:
libfunc = gtsf2_libfunc;
+ if (libfunc == NULL_RTX)
+ {
+ tmp = x; x = y; y = tmp;
+ *pcomparison = LT;
+ libfunc = ltsf2_libfunc;
+ }
break;
case GE:
libfunc = gesf2_libfunc;
+ if (libfunc == NULL_RTX)
+ {
+ tmp = x; x = y; y = tmp;
+ *pcomparison = LE;
+ libfunc = lesf2_libfunc;
+ }
break;
case LT:
libfunc = ltsf2_libfunc;
+ if (libfunc == NULL_RTX)
+ {
+ tmp = x; x = y; y = tmp;
+ *pcomparison = GT;
+ libfunc = gtsf2_libfunc;
+ }
break;
case LE:
libfunc = lesf2_libfunc;
+ if (libfunc == NULL_RTX)
+ {
+ tmp = x; x = y; y = tmp;
+ *pcomparison = GE;
+ libfunc = gesf2_libfunc;
+ }
break;
case UNORDERED:
case GT:
libfunc = gtdf2_libfunc;
+ if (libfunc == NULL_RTX)
+ {
+ tmp = x; x = y; y = tmp;
+ *pcomparison = LT;
+ libfunc = ltdf2_libfunc;
+ }
break;
case GE:
libfunc = gedf2_libfunc;
+ if (libfunc == NULL_RTX)
+ {
+ tmp = x; x = y; y = tmp;
+ *pcomparison = LE;
+ libfunc = ledf2_libfunc;
+ }
break;
case LT:
libfunc = ltdf2_libfunc;
+ if (libfunc == NULL_RTX)
+ {
+ tmp = x; x = y; y = tmp;
+ *pcomparison = GT;
+ libfunc = gtdf2_libfunc;
+ }
break;
case LE:
libfunc = ledf2_libfunc;
+ if (libfunc == NULL_RTX)
+ {
+ tmp = x; x = y; y = tmp;
+ *pcomparison = GE;
+ libfunc = gedf2_libfunc;
+ }
break;
case UNORDERED:
case GT:
libfunc = gtxf2_libfunc;
+ if (libfunc == NULL_RTX)
+ {
+ tmp = x; x = y; y = tmp;
+ *pcomparison = LT;
+ libfunc = ltxf2_libfunc;
+ }
break;
case GE:
libfunc = gexf2_libfunc;
+ if (libfunc == NULL_RTX)
+ {
+ tmp = x; x = y; y = tmp;
+ *pcomparison = LE;
+ libfunc = lexf2_libfunc;
+ }
break;
case LT:
libfunc = ltxf2_libfunc;
+ if (libfunc == NULL_RTX)
+ {
+ tmp = x; x = y; y = tmp;
+ *pcomparison = GT;
+ libfunc = gtxf2_libfunc;
+ }
break;
case LE:
libfunc = lexf2_libfunc;
+ if (libfunc == NULL_RTX)
+ {
+ tmp = x; x = y; y = tmp;
+ *pcomparison = GE;
+ libfunc = gexf2_libfunc;
+ }
break;
case UNORDERED:
case GT:
libfunc = gttf2_libfunc;
+ if (libfunc == NULL_RTX)
+ {
+ tmp = x; x = y; y = tmp;
+ *pcomparison = LT;
+ libfunc = lttf2_libfunc;
+ }
break;
case GE:
libfunc = getf2_libfunc;
+ if (libfunc == NULL_RTX)
+ {
+ tmp = x; x = y; y = tmp;
+ *pcomparison = LE;
+ libfunc = letf2_libfunc;
+ }
break;
case LT:
libfunc = lttf2_libfunc;
+ if (libfunc == NULL_RTX)
+ {
+ tmp = x; x = y; y = tmp;
+ *pcomparison = GT;
+ libfunc = gttf2_libfunc;
+ }
break;
case LE:
libfunc = letf2_libfunc;
+ if (libfunc == NULL_RTX)
+ {
+ tmp = x; x = y; y = tmp;
+ *pcomparison = GE;
+ libfunc = getf2_libfunc;
+ }
break;
case UNORDERED:
if (libfunc == 0)
abort ();
- emit_library_call (libfunc, LCT_CONST_MAKE_BLOCK, word_mode, 2, x, mode, y,
- mode);
-
- /* Immediately move the result of the libcall into a pseudo
- register so reload doesn't clobber the value if it needs
- the return register for a spill reg. */
- result = gen_reg_rtx (word_mode);
- emit_move_insn (result, hard_libcall_value (word_mode));
+ result = emit_library_call_value (libfunc, NULL_RTX, LCT_CONST_MAKE_BLOCK,
+ word_mode, 2, x, mode, y, mode);
*px = result;
*py = const0_rtx;
*pmode = word_mode;
return target;
}
-/* Return non-zero if a conditional move of mode MODE is supported.
+/* Return nonzero if a conditional move of mode MODE is supported.
This function is for combine so it can tell whether an insn that looks
like a conditional move is actually supported by the hardware. If we
}
/* Generate the body of an instruction to copy Y into X.
- It may be a SEQUENCE, if one insn isn't enough. */
+ It may be a list of insns, if one insn isn't enough. */
rtx
gen_move_insn (x, y)
start_sequence ();
emit_move_insn_1 (x, y);
- seq = gen_sequence ();
+ seq = get_insns ();
end_sequence ();
return seq;
}
}
}
-#if !defined (REAL_IS_NOT_DOUBLE) || defined (REAL_ARITHMETIC)
-
/* Unsigned integer, and no way to convert directly.
Convert as signed, then conditionally adjust the result. */
if (unsignedp)
emit_cmp_and_jump_insns (from, const0_rtx, GE, NULL_RTX, GET_MODE (from),
0, label);
- /* On SCO 3.2.1, ldexp rejects values outside [0.5, 1).
- Rather than setting up a dconst_dot_5, let's hope SCO
- fixes the bug. */
- offset = REAL_VALUE_LDEXP (dconst1, GET_MODE_BITSIZE (GET_MODE (from)));
+
+ real_2expN (&offset, GET_MODE_BITSIZE (GET_MODE (from)));
temp = expand_binop (fmode, add_optab, target,
CONST_DOUBLE_FROM_REAL_VALUE (offset, fmode),
target, 0, OPTAB_LIB_WIDEN);
emit_label (label);
goto done;
}
-#endif
/* No hardware instruction available; call a library routine to convert from
SImode, DImode, or TImode into SFmode, DFmode, XFmode, or TFmode. */
}
}
-#if !defined (REAL_IS_NOT_DOUBLE) || defined (REAL_ARITHMETIC)
/* For an unsigned conversion, there is one more way to do it.
If we have a signed conversion, we generate code that compares
the real value to the largest representable positive number. If if
rtx limit, lab1, lab2, insn;
bitsize = GET_MODE_BITSIZE (GET_MODE (to));
- offset = REAL_VALUE_LDEXP (dconst1, bitsize - 1);
+ real_2expN (&offset, bitsize - 1);
limit = CONST_DOUBLE_FROM_REAL_VALUE (offset, fmode);
lab1 = gen_label_rtx ();
lab2 = gen_label_rtx ();
NULL_RTX, 0, OPTAB_LIB_WIDEN);
expand_fix (to, target, 0);
target = expand_binop (GET_MODE (to), xor_optab, to,
- GEN_INT (trunc_int_for_mode
- ((HOST_WIDE_INT) 1 << (bitsize - 1),
- GET_MODE (to))),
+ gen_int_mode
+ ((HOST_WIDE_INT) 1 << (bitsize - 1),
+ GET_MODE (to)),
to, 1, OPTAB_LIB_WIDEN);
if (target != to)
return;
}
-#endif
/* We can't do it with an insn, so use a library call. But first ensure
that the mode of TO is at least as wide as SImode, since those are the
new_optab ()
{
int i;
- optab op = (optab) xmalloc (sizeof (struct optab));
+ optab op = (optab) ggc_alloc (sizeof (struct optab));
for (i = 0; i < NUM_MACHINE_MODES; i++)
{
op->handlers[i].insn_code = CODE_FOR_nothing;
init_one_libfunc (name)
const char *name;
{
- /* Create a FUNCTION_DECL that can be passed to ENCODE_SECTION_INFO. */
+ /* Create a FUNCTION_DECL that can be passed to
+ targetm.encode_section_info. */
/* ??? We don't have any type information except for this is
a function. Pretend this is "int foo()". */
tree decl = build_decl (FUNCTION_DECL, get_identifier (name),
return XEXP (DECL_RTL (decl), 0);
}
-/* Mark ARG (which is really an OPTAB *) for GC. */
-
-void
-mark_optab (arg)
- void *arg;
-{
- optab o = *(optab *) arg;
- int i;
-
- for (i = 0; i < NUM_MACHINE_MODES; ++i)
- ggc_mark_rtx (o->handlers[i].libfunc);
-}
-
/* Call this once to initialize the contents of the optabs
appropriately for the current target machine. */
sqrt_optab = init_optab (SQRT);
sin_optab = init_optab (UNKNOWN);
cos_optab = init_optab (UNKNOWN);
+ exp_optab = init_optab (UNKNOWN);
+ log_optab = init_optab (UNKNOWN);
strlen_optab = init_optab (UNKNOWN);
cbranch_optab = init_optab (UNKNOWN);
cmov_optab = init_optab (UNKNOWN);
truncxfdf2_libfunc = init_one_libfunc ("__truncxfdf2");
trunctfdf2_libfunc = init_one_libfunc ("__trunctfdf2");
+ abort_libfunc = init_one_libfunc ("abort");
memcpy_libfunc = init_one_libfunc ("memcpy");
memmove_libfunc = init_one_libfunc ("memmove");
bcopy_libfunc = init_one_libfunc ("bcopy");
/* Allow the target to add more libcalls or rename some, etc. */
INIT_TARGET_OPTABS;
#endif
-
- /* Add these GC roots. */
- ggc_add_root (optab_table, OTI_MAX, sizeof(optab), mark_optab);
- ggc_add_rtx_root (libfunc_table, LTI_MAX);
}
\f
+static GTY(()) rtx trap_rtx;
+
#ifdef HAVE_conditional_trap
/* The insn generating function can not take an rtx_code argument.
TRAP_RTX is used as an rtx argument. Its code is replaced with
the code to be used in the trap insn and all other fields are
ignored. */
-static rtx trap_rtx;
static void
init_traps ()
if (HAVE_conditional_trap)
{
trap_rtx = gen_rtx_fmt_ee (EQ, VOIDmode, NULL_RTX, NULL_RTX);
- ggc_add_rtx_root (&trap_rtx, 1);
}
}
#endif
if (insn)
{
emit_insn (insn);
- insn = gen_sequence ();
+ insn = get_insns ();
}
end_sequence();
return insn;
return 0;
}
+
+#include "gt-optabs.h"