/* Expand the basic unary and binary arithmetic operations, for GNU compiler.
- Copyright (C) 1987, 88, 92, 93, 94, 1995 Free Software Foundation, Inc.
+ Copyright (C) 1987, 88, 92, 93, 94, 95, 1996 Free Software Foundation, Inc.
This file is part of GNU CC.
You should have received a copy of the GNU General Public License
along with GNU CC; see the file COPYING. If not, write to
-the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA. */
+the Free Software Foundation, 59 Temple Place - Suite 330,
+Boston, MA 02111-1307, USA. */
#include "config.h"
/* Tables of patterns for extending one integer mode to another. */
enum insn_code extendtab[MAX_MACHINE_MODE][MAX_MACHINE_MODE][2];
-/* Tables of patterns for converting between fixed and floating point. */
+/* Tables of patterns for converting between fixed and floating point. */
enum insn_code fixtab[NUM_MACHINE_MODES][NUM_MACHINE_MODES][2];
enum insn_code fixtrunctab[NUM_MACHINE_MODES][NUM_MACHINE_MODES][2];
enum insn_code floattab[NUM_MACHINE_MODES][NUM_MACHINE_MODES][2];
rtx memset_libfunc;
rtx bzero_libfunc;
+rtx throw_libfunc;
+rtx sjthrow_libfunc;
+rtx sjpopnthrow_libfunc;
+rtx terminate_libfunc;
+rtx setjmp_libfunc;
+rtx longjmp_libfunc;
+rtx get_dynamic_handler_chain_libfunc;
+
rtx eqhf2_libfunc;
rtx nehf2_libfunc;
rtx gthf2_libfunc;
enum insn_code setcc_gen_code[NUM_RTX_CODE];
+#ifdef HAVE_conditional_move
+/* Indexed by the machine mode, gives the insn code to make a conditional
+ move insn. This is not indexed by the rtx-code like bcc_gen_fctn and
+ setcc_gen_code to cut down on the number of named patterns. Consider a day
+ when a lot more rtx codes are conditional (eg: for the ARM). */
+
+enum insn_code movcc_gen_code[NUM_MACHINE_MODES];
+#endif
+
static int add_equal_note PROTO((rtx, rtx, enum rtx_code, rtx, rtx));
static rtx widen_operand PROTO((rtx, enum machine_mode,
enum machine_mode, int, int));
temp = gen_reg_rtx (mode);
/* If it is a commutative operator and the modes would match
- if we would swap the operands, we can save the conversions. */
+ if we would swap the operands, we can save the conversions. */
if (commutative_op)
{
if (GET_MODE (op0) != mode0 && GET_MODE (op1) != mode1
{
/* Store carry from main add/subtract. */
carry_out = gen_reg_rtx (word_mode);
- carry_out = emit_store_flag (carry_out,
- binoptab == add_optab ? LTU : GTU,
- x, op0_piece,
- word_mode, 1, normalizep);
- if (carry_out == 0)
- break;
+ carry_out = emit_store_flag_force (carry_out,
+ (binoptab == add_optab
+ ? LTU : GTU),
+ x, op0_piece,
+ word_mode, 1, normalizep);
}
if (i > 0)
{
/* THIS CODE HAS NOT BEEN TESTED. */
/* Get out carry from adding/subtracting carry in. */
- carry_tmp = emit_store_flag (carry_tmp,
- binoptab == add_optab
- ? LTU : GTU,
- x, carry_in,
- word_mode, 1, normalizep);
+ carry_tmp = emit_store_flag_force (carry_tmp,
+ binoptab == add_optab
+ ? LTU : GTU,
+ x, carry_in,
+ word_mode, 1, normalizep);
/* Logical-ior the two poss. carry together. */
carry_out = expand_binop (word_mode, ior_optab,
if (i == GET_MODE_BITSIZE (mode) / BITS_PER_WORD)
{
- rtx temp = emit_move_insn (target, target);
+ if (mov_optab->handlers[(int) mode].insn_code != CODE_FOR_nothing)
+ {
+ rtx temp = emit_move_insn (target, target);
- REG_NOTES (temp) = gen_rtx (EXPR_LIST, REG_EQUAL,
- gen_rtx (binoptab->code, mode,
- copy_rtx (xop0),
- copy_rtx (xop1)),
- REG_NOTES (temp));
+ REG_NOTES (temp) = gen_rtx (EXPR_LIST, REG_EQUAL,
+ gen_rtx (binoptab->code, mode,
+ copy_rtx (xop0),
+ copy_rtx (xop1)),
+ REG_NOTES (temp));
+ }
return target;
}
else
/* If the target is the same as one of the inputs, don't use it. This
prevents problems with the REG_EQUAL note. */
- if (target == op0 || target == op1)
+ if (target == op0 || target == op1
+ || (target != 0 && GET_CODE (target) != REG))
target = 0;
/* Multiply the two lower words to get a double-word product.
if (temp != 0)
{
- temp = emit_move_insn (product, product);
- REG_NOTES (temp) = gen_rtx (EXPR_LIST, REG_EQUAL,
- gen_rtx (MULT, mode, copy_rtx (op0),
- copy_rtx (op1)),
- REG_NOTES (temp));
-
+ if (mov_optab->handlers[(int) mode].insn_code != CODE_FOR_nothing)
+ {
+ temp = emit_move_insn (product, product);
+ REG_NOTES (temp) = gen_rtx (EXPR_LIST, REG_EQUAL,
+ gen_rtx (MULT, mode,
+ copy_rtx (op0),
+ copy_rtx (op1)),
+ REG_NOTES (temp));
+ }
return product;
}
}
op1x = convert_to_mode (word_mode, op1, 1);
}
- if (GET_MODE (op0) != mode)
+ if (GET_MODE (op0) != VOIDmode
+ && GET_MODE (op0) != mode)
op0 = convert_to_mode (mode, op0, unsignedp);
/* Pass 1 for NO_QUEUE so we don't lose any increments
}
/* If that does not win, use conditional jump and negate. */
+
+ /* It is safe to use the target if it is the same
+ as the source if this is also a pseudo register */
+ if (op0 == target && GET_CODE (op0) == REG
+ && REGNO (op0) >= FIRST_PSEUDO_REGISTER)
+ safe = 1;
+
op1 = gen_label_rtx ();
if (target == 0 || ! safe
|| GET_MODE (target) != mode
op0 = protect_from_queue (op0, 0);
- if (flag_force_mem)
+ /* Sign and zero extension from memory is often done specially on
+ RISC machines, so forcing into a register here can pessimize
+ code. */
+ if (flag_force_mem && code != SIGN_EXTEND && code != ZERO_EXTEND)
op0 = force_not_mem (op0);
/* Now, if insn does not accept our operands, put them into pseudos. */
}
last = emit_move_insn (target, result);
- REG_NOTES (last) = gen_rtx (EXPR_LIST,
- REG_EQUAL, copy_rtx (equiv), REG_NOTES (last));
+ if (mov_optab->handlers[(int) GET_MODE (target)].insn_code
+ != CODE_FOR_nothing)
+ REG_NOTES (last) = gen_rtx (EXPR_LIST,
+ REG_EQUAL, copy_rtx (equiv), REG_NOTES (last));
if (prev == 0)
first = get_insns ();
else
#endif
{
+ rtx result;
+
#ifdef TARGET_MEM_FUNCTIONS
emit_library_call (memcmp_libfunc, 0,
TYPE_MODE (integer_type_node), 3,
XEXP (x, 0), Pmode, XEXP (y, 0), Pmode,
- size, Pmode);
+ convert_to_mode (TYPE_MODE (sizetype), size,
+ TREE_UNSIGNED (sizetype)),
+ TYPE_MODE (sizetype));
#else
emit_library_call (bcmp_libfunc, 0,
TYPE_MODE (integer_type_node), 3,
XEXP (x, 0), Pmode, XEXP (y, 0), Pmode,
- size, Pmode);
+ convert_to_mode (TYPE_MODE (integer_type_node),
+ size,
+ TREE_UNSIGNED (integer_type_node)),
+ TYPE_MODE (integer_type_node));
#endif
- emit_cmp_insn (hard_libcall_value (TYPE_MODE (integer_type_node)),
+
+ /* Immediately move the result of the libcall into a pseudo
+ register so reload doesn't clobber the value if it needs
+ the return register for a spill reg. */
+ result = gen_reg_rtx (TYPE_MODE (integer_type_node));
+ emit_move_insn (result,
+ hard_libcall_value (TYPE_MODE (integer_type_node)));
+ emit_cmp_insn (result,
const0_rtx, comparison, NULL_RTX,
TYPE_MODE (integer_type_node), 0, 0);
}
&& class != MODE_FLOAT)
{
rtx libfunc = cmp_optab->handlers[(int) mode].libfunc;
+ rtx result;
+
/* If we want unsigned, and this mode has a distinct unsigned
comparison routine, use that. */
if (unsignedp && ucmp_optab->handlers[(int) mode].libfunc)
emit_library_call (libfunc, 1,
word_mode, 2, x, mode, y, mode);
+ /* Immediately move the result of the libcall into a pseudo
+ register so reload doesn't clobber the value if it needs
+ the return register for a spill reg. */
+ result = gen_reg_rtx (word_mode);
+ emit_move_insn (result, hard_libcall_value (word_mode));
+
/* Integer comparison returns a result that must be compared against 1,
so that even if we do an unsigned compare afterward,
there is still a value that can represent the result "less than". */
-
- emit_cmp_insn (hard_libcall_value (word_mode), const1_rtx,
+ emit_cmp_insn (result, const1_rtx,
comparison, NULL_RTX, word_mode, unsignedp, 0);
return;
}
{
enum machine_mode mode = GET_MODE (x);
rtx libfunc = 0;
+ rtx result;
if (mode == HFmode)
switch (comparison)
emit_library_call (libfunc, 1,
word_mode, 2, x, mode, y, mode);
- emit_cmp_insn (hard_libcall_value (word_mode), const0_rtx, comparison,
+ /* Immediately move the result of the libcall into a pseudo
+ register so reload doesn't clobber the value if it needs
+ the return register for a spill reg. */
+ result = gen_reg_rtx (word_mode);
+ emit_move_insn (result, hard_libcall_value (word_mode));
+
+ emit_cmp_insn (result, const0_rtx, comparison,
NULL_RTX, word_mode, 0, 0);
}
\f
emit_barrier ();
}
\f
+#ifdef HAVE_conditional_move
+
+/* Emit a conditional move instruction if the machine supports one for that
+ condition and machine mode.
+
+ OP0 and OP1 are the operands that should be compared using CODE. CMODE is
+ the mode to use should they be constants. If it is VOIDmode, they cannot
+ both be constants.
+
+ OP2 should be stored in TARGET if the comparison is true, otherwise OP3
+ should be stored there. MODE is the mode to use should they be constants.
+ If it is VOIDmode, they cannot both be constants.
+
+ The result is either TARGET (perhaps modified) or NULL_RTX if the operation
+ is not supported. */
+
+rtx
+emit_conditional_move (target, code, op0, op1, cmode, op2, op3, mode,
+ unsignedp)
+ rtx target;
+ enum rtx_code code;
+ rtx op0, op1;
+ enum machine_mode cmode;
+ rtx op2, op3;
+ enum machine_mode mode;
+ int unsignedp;
+{
+ rtx tem, subtarget, comparison, insn;
+ enum insn_code icode;
+
+ /* If one operand is constant, make it the second one. Only do this
+ if the other operand is not constant as well. */
+
+ if ((CONSTANT_P (op0) && ! CONSTANT_P (op1))
+ || (GET_CODE (op0) == CONST_INT && GET_CODE (op1) != CONST_INT))
+ {
+ tem = op0;
+ op0 = op1;
+ op1 = tem;
+ code = swap_condition (code);
+ }
+
+ if (cmode == VOIDmode)
+ cmode = GET_MODE (op0);
+
+ if ((CONSTANT_P (op2) && ! CONSTANT_P (op3))
+ || (GET_CODE (op2) == CONST_INT && GET_CODE (op3) != CONST_INT))
+ {
+ tem = op2;
+ op2 = op3;
+ op3 = tem;
+ /* ??? This may not be appropriate (consider IEEE). Perhaps we should
+ call can_reverse_comparison_p here and bail out if necessary.
+ It's not clear whether we need to do this canonicalization though. */
+ code = reverse_condition (code);
+ }
+
+ if (mode == VOIDmode)
+ mode = GET_MODE (op2);
+
+ icode = movcc_gen_code[mode];
+
+ if (icode == CODE_FOR_nothing)
+ return 0;
+
+ if (flag_force_mem)
+ {
+ op2 = force_not_mem (op2);
+ op3 = force_not_mem (op3);
+ }
+
+ if (target)
+ target = protect_from_queue (target, 1);
+ else
+ target = gen_reg_rtx (mode);
+
+ subtarget = target;
+
+ emit_queue ();
+
+ op2 = protect_from_queue (op2, 0);
+ op3 = protect_from_queue (op3, 0);
+
+ /* If the insn doesn't accept these operands, put them in pseudos. */
+
+ if (! (*insn_operand_predicate[icode][0])
+ (subtarget, insn_operand_mode[icode][0]))
+ subtarget = gen_reg_rtx (insn_operand_mode[icode][0]);
+
+ if (! (*insn_operand_predicate[icode][2])
+ (op2, insn_operand_mode[icode][2]))
+ op2 = copy_to_mode_reg (insn_operand_mode[icode][2], op2);
+
+ if (! (*insn_operand_predicate[icode][3])
+ (op3, insn_operand_mode[icode][3]))
+ op3 = copy_to_mode_reg (insn_operand_mode[icode][3], op3);
+
+ /* Everything should now be in the suitable form, so emit the compare insn
+ and then the conditional move. */
+
+ comparison
+ = compare_from_rtx (op0, op1, code, unsignedp, cmode, NULL_RTX, 0);
+
+ /* ??? Watch for const0_rtx (nop) and const_true_rtx (unconditional)? */
+ if (GET_CODE (comparison) != code)
+ /* This shouldn't happen. */
+ abort ();
+
+ insn = GEN_FCN (icode) (subtarget, comparison, op2, op3);
+
+ /* If that failed, then give up. */
+ if (insn == 0)
+ return 0;
+
+ emit_insn (insn);
+
+ if (subtarget != target)
+ convert_move (target, subtarget, 0);
+
+ return target;
+}
+
+/* Return non-zero if a conditional move of mode MODE is supported.
+
+ This function is for combine so it can tell whether an insn that looks
+ like a conditional move is actually supported by the hardware. If we
+ guess wrong we lose a bit on optimization, but that's it. */
+/* ??? sparc64 supports conditionally moving integers values based on fp
+ comparisons, and vice versa. How do we handle them? */
+
+int
+can_conditionally_move_p (mode)
+ enum machine_mode mode;
+{
+ if (movcc_gen_code[mode] != CODE_FOR_nothing)
+ return 1;
+
+ return 0;
+}
+
+#endif /* HAVE_conditional_move */
+\f
/* These three functions generate an insn body and return it
rather than emitting the insn.
/* There is no such mode. Pretend the target is wide enough. */
fmode = GET_MODE (to);
- /* Avoid double-rounding when TO is narrower than FROM. */
+ /* Avoid double-rounding when TO is narrower than FROM. */
if ((significand_size (fmode) + 1)
< GET_MODE_BITSIZE (GET_MODE (from)))
{
/* The sign bit is not set. Convert as signed. */
expand_float (target, from, 0);
emit_jump_insn (gen_jump (label));
+ emit_barrier ();
/* The sign bit is set.
Convert to a usable (positive signed) value by shifting right
emit_label (lab2);
- /* Make a place for a REG_NOTE and add it. */
- insn = emit_move_insn (to, to);
- REG_NOTES (insn) = gen_rtx (EXPR_LIST, REG_EQUAL,
- gen_rtx (UNSIGNED_FIX, GET_MODE (to),
- copy_rtx (from)),
- REG_NOTES (insn));
-
+ if (mov_optab->handlers[(int) GET_MODE (to)].insn_code
+ != CODE_FOR_nothing)
+ {
+ /* Make a place for a REG_NOTE and add it. */
+ insn = emit_move_insn (to, to);
+ REG_NOTES (insn) = gen_rtx (EXPR_LIST, REG_EQUAL,
+ gen_rtx (UNSIGNED_FIX, GET_MODE (to),
+ copy_rtx (from)),
+ REG_NOTES (insn));
+ }
return;
}
#endif
GET_MODE (to), from));
}
- if (GET_MODE (to) == GET_MODE (target))
- emit_move_insn (to, target);
- else
- convert_move (to, target, 0);
+ if (target != to)
+ {
+ if (GET_MODE (to) == GET_MODE (target))
+ emit_move_insn (to, target);
+ else
+ convert_move (to, target, 0);
+ }
}
\f
static optab
register int first_mode;
register int last_mode;
register char *opname;
- register char suffix;
+ register int suffix;
{
register int mode;
register unsigned opname_len = strlen (opname);
init_integral_libfuncs (optable, opname, suffix)
register optab optable;
register char *opname;
- register char suffix;
+ register int suffix;
{
init_libfuncs (optable, SImode, TImode, opname, suffix);
}
init_floating_libfuncs (optable, opname, suffix)
register optab optable;
register char *opname;
- register char suffix;
+ register int suffix;
{
init_libfuncs (optable, SFmode, TFmode, opname, suffix);
}
init_complex_libfuncs (optable, opname, suffix)
register optab optable;
register char *opname;
- register char suffix;
+ register int suffix;
{
init_libfuncs (optable, SCmode, TCmode, opname, suffix);
}
for (i = 0; i < NUM_RTX_CODE; i++)
setcc_gen_code[i] = CODE_FOR_nothing;
+#ifdef HAVE_conditional_move
+ for (i = 0; i < NUM_MACHINE_MODES; i++)
+ movcc_gen_code[i] = CODE_FOR_nothing;
+#endif
+
add_optab = init_optab (PLUS);
sub_optab = init_optab (MINUS);
smul_optab = init_optab (MULT);
for (i = 0; i < NUM_MACHINE_MODES; i++)
{
movstr_optab[i] = CODE_FOR_nothing;
+ clrstr_optab[i] = CODE_FOR_nothing;
#ifdef HAVE_SECONDARY_RELOADS
reload_in_optab[i] = reload_out_optab[i] = CODE_FOR_nothing;
smul_optab->handlers[(int) DImode].libfunc
= gen_rtx (SYMBOL_REF, Pmode, MULDI3_LIBCALL);
#endif
-#ifdef MULTI3_LIBCALL
- smul_optab->handlers[(int) TImode].libfunc
- = gen_rtx (SYMBOL_REF, Pmode, MULTI3_LIBCALL);
-#endif
#ifdef DIVSI3_LIBCALL
sdiv_optab->handlers[(int) SImode].libfunc
sdiv_optab->handlers[(int) DImode].libfunc
= gen_rtx (SYMBOL_REF, Pmode, DIVDI3_LIBCALL);
#endif
-#ifdef DIVTI3_LIBCALL
- sdiv_optab->handlers[(int) TImode].libfunc
- = gen_rtx (SYMBOL_REF, Pmode, DIVTI3_LIBCALL);
-#endif
#ifdef UDIVSI3_LIBCALL
udiv_optab->handlers[(int) SImode].libfunc
udiv_optab->handlers[(int) DImode].libfunc
= gen_rtx (SYMBOL_REF, Pmode, UDIVDI3_LIBCALL);
#endif
-#ifdef UDIVTI3_LIBCALL
- udiv_optab->handlers[(int) TImode].libfunc
- = gen_rtx (SYMBOL_REF, Pmode, UDIVTI3_LIBCALL);
-#endif
-
#ifdef MODSI3_LIBCALL
smod_optab->handlers[(int) SImode].libfunc
smod_optab->handlers[(int) DImode].libfunc
= gen_rtx (SYMBOL_REF, Pmode, MODDI3_LIBCALL);
#endif
-#ifdef MODTI3_LIBCALL
- smod_optab->handlers[(int) TImode].libfunc
- = gen_rtx (SYMBOL_REF, Pmode, MODTI3_LIBCALL);
-#endif
-
#ifdef UMODSI3_LIBCALL
umod_optab->handlers[(int) SImode].libfunc
umod_optab->handlers[(int) DImode].libfunc
= gen_rtx (SYMBOL_REF, Pmode, UMODDI3_LIBCALL);
#endif
-#ifdef UMODTI3_LIBCALL
- umod_optab->handlers[(int) TImode].libfunc
- = gen_rtx (SYMBOL_REF, Pmode, UMODTI3_LIBCALL);
-#endif
-
-/* Define library calls for quad FP instructions */
-#ifdef ADDTF3_LIBCALL
- add_optab->handlers[(int) TFmode].libfunc
- = gen_rtx (SYMBOL_REF, Pmode, ADDTF3_LIBCALL);
-#endif
-#ifdef SUBTF3_LIBCALL
- sub_optab->handlers[(int) TFmode].libfunc
- = gen_rtx (SYMBOL_REF, Pmode, SUBTF3_LIBCALL);
-#endif
-#ifdef MULTF3_LIBCALL
- smul_optab->handlers[(int) TFmode].libfunc
- = gen_rtx (SYMBOL_REF, Pmode, MULTF3_LIBCALL);
-#endif
-#ifdef DIVTF3_LIBCALL
- flodiv_optab->handlers[(int) TFmode].libfunc
- = gen_rtx (SYMBOL_REF, Pmode, DIVTF3_LIBCALL);
-#endif
-#ifdef SQRTTF2_LIBCALL
- sqrt_optab->handlers[(int) TFmode].libfunc
- = gen_rtx (SYMBOL_REF, Pmode, SQRTTF2_LIBCALL);
-#endif
/* Use cabs for DC complex abs, since systems generally have cabs.
Don't define any libcall for SCmode, so that cabs will be used. */
memset_libfunc = gen_rtx (SYMBOL_REF, Pmode, "memset");
bzero_libfunc = gen_rtx (SYMBOL_REF, Pmode, "bzero");
+ throw_libfunc = gen_rtx (SYMBOL_REF, Pmode, "__throw");
+ sjthrow_libfunc = gen_rtx (SYMBOL_REF, Pmode, "__sjthrow");
+ sjpopnthrow_libfunc = gen_rtx (SYMBOL_REF, Pmode, "__sjpopnthrow");
+ terminate_libfunc = gen_rtx (SYMBOL_REF, Pmode, "__terminate");
+#ifdef USE_BUILTIN_SETJMP
+ setjmp_libfunc = gen_rtx (SYMBOL_REF, Pmode, "__builtin_setjmp");
+ longjmp_libfunc = gen_rtx (SYMBOL_REF, Pmode, "__builtin_longjmp");
+#else
+ setjmp_libfunc = gen_rtx (SYMBOL_REF, Pmode, "setjmp");
+ longjmp_libfunc = gen_rtx (SYMBOL_REF, Pmode, "longjmp");
+#endif
+ get_dynamic_handler_chain_libfunc = gen_rtx (SYMBOL_REF, Pmode, "__get_dynamic_handler_chain");
+
eqhf2_libfunc = gen_rtx (SYMBOL_REF, Pmode, "__eqhf2");
nehf2_libfunc = gen_rtx (SYMBOL_REF, Pmode, "__nehf2");
gthf2_libfunc = gen_rtx (SYMBOL_REF, Pmode, "__gthf2");
lttf2_libfunc = gen_rtx (SYMBOL_REF, Pmode, "__lttf2");
letf2_libfunc = gen_rtx (SYMBOL_REF, Pmode, "__letf2");
-/* Define library calls for quad FP instructions */
-#ifdef EQTF2_LIBCALL
- eqtf2_libfunc = gen_rtx (SYMBOL_REF, Pmode, EQTF2_LIBCALL);
-#endif
-#ifdef NETF2_LIBCALL
- netf2_libfunc = gen_rtx (SYMBOL_REF, Pmode, NETF2_LIBCALL);
-#endif
-#ifdef GTTF2_LIBCALL
- gttf2_libfunc = gen_rtx (SYMBOL_REF, Pmode, GTTF2_LIBCALL);
-#endif
-#ifdef GETF2_LIBCALL
- getf2_libfunc = gen_rtx (SYMBOL_REF, Pmode, GETF2_LIBCALL);
-#endif
-#ifdef LTTF2_LIBCALL
- lttf2_libfunc = gen_rtx (SYMBOL_REF, Pmode, LTTF2_LIBCALL);
-#endif
-#ifdef LETF2_LIBCALL
- letf2_libfunc = gen_rtx (SYMBOL_REF, Pmode, LETF2_LIBCALL);
-#endif
-
floatsisf_libfunc = gen_rtx (SYMBOL_REF, Pmode, "__floatsisf");
floatdisf_libfunc = gen_rtx (SYMBOL_REF, Pmode, "__floatdisf");
floattisf_libfunc = gen_rtx (SYMBOL_REF, Pmode, "__floattisf");
fixunstfdi_libfunc = gen_rtx (SYMBOL_REF, Pmode, "__fixunstfdi");
fixunstfti_libfunc = gen_rtx (SYMBOL_REF, Pmode, "__fixunstfti");
-/* Define library calls for quad FP instructions */
-#ifdef TRUNCTFSF2_LIBCALL
- trunctfsf2_libfunc = gen_rtx (SYMBOL_REF, Pmode, TRUNCTFSF2_LIBCALL);
-#endif
-#ifdef TRUNCTFDF2_LIBCALL
- trunctfdf2_libfunc = gen_rtx (SYMBOL_REF, Pmode, TRUNCTFDF2_LIBCALL);
-#endif
-#ifdef EXTENDSFTF2_LIBCALL
- extendsftf2_libfunc = gen_rtx (SYMBOL_REF, Pmode, EXTENDSFTF2_LIBCALL);
-#endif
-#ifdef EXTENDDFTF2_LIBCALL
- extenddftf2_libfunc = gen_rtx (SYMBOL_REF, Pmode, EXTENDDFTF2_LIBCALL);
-#endif
-#ifdef FLOATSITF2_LIBCALL
- floatsitf_libfunc = gen_rtx (SYMBOL_REF, Pmode, FLOATSITF2_LIBCALL);
-#endif
-#ifdef FIX_TRUNCTFSI2_LIBCALL
- fixtfsi_libfunc = gen_rtx (SYMBOL_REF, Pmode, FIX_TRUNCTFSI2_LIBCALL);
-#endif
-#ifdef FIXUNS_TRUNCTFSI2_LIBCALL
- fixunstfsi_libfunc = gen_rtx (SYMBOL_REF, Pmode, FIXUNS_TRUNCTFSI2_LIBCALL);
-#endif
-
#ifdef INIT_TARGET_OPTABS
/* Allow the target to add more libcalls or rename some, etc. */
INIT_TARGET_OPTABS;
\f
#ifdef BROKEN_LDEXP
-/* SCO 3.2 apparently has a broken ldexp. */
+/* SCO 3.2 apparently has a broken ldexp. */
double
ldexp(x,n)