/* Expand the basic unary and binary arithmetic operations, for GNU compiler.
- Copyright (C) 1987, 1988, 1992, 1993, 1994 Free Software Foundation, Inc.
+ Copyright (C) 1987, 88, 92, 93, 94, 1995 Free Software Foundation, Inc.
This file is part of GNU CC.
optab add_optab;
optab sub_optab;
optab smul_optab;
+optab smul_highpart_optab;
+optab umul_highpart_optab;
optab smul_widen_optab;
optab umul_widen_optab;
optab sdiv_optab;
rtx memset_libfunc;
rtx bzero_libfunc;
+rtx eqhf2_libfunc;
+rtx nehf2_libfunc;
+rtx gthf2_libfunc;
+rtx gehf2_libfunc;
+rtx lthf2_libfunc;
+rtx lehf2_libfunc;
+
rtx eqsf2_libfunc;
rtx nesf2_libfunc;
rtx gtsf2_libfunc;
enum insn_code setcc_gen_code[NUM_RTX_CODE];
+#ifdef HAVE_conditional_move
+/* Indexed by the machine mode, gives the insn code to make a conditional
+ move insn. This is not indexed by the rtx-code like bcc_gen_fctn and
+ setcc_gen_code to cut down on the number of named patterns. Consider a day
+ when a lot more rtx codes are conditional (eg: for the ARM). */
+
+enum insn_code movcc_gen_code[NUM_MACHINE_MODES];
+#endif
+
static int add_equal_note PROTO((rtx, rtx, enum rtx_code, rtx, rtx));
static rtx widen_operand PROTO((rtx, enum machine_mode,
enum machine_mode, int, int));
op0 = force_reg (mode, op0);
if (CONSTANT_P (op1) && preserve_subexpressions_p ()
- && rtx_cost (op1, binoptab->code) > 2)
- op1 = force_reg (shift_op ? word_mode : mode, op1);
+ && ! shift_op && rtx_cost (op1, binoptab->code) > 2)
+ op1 = force_reg (mode, op1);
/* Record where to delete back to if we backtrack. */
last = get_last_insn ();
Also try to make the last operand a constant. */
if (GET_RTX_CLASS (binoptab->code) == 'c'
|| binoptab == smul_widen_optab
- || binoptab == umul_widen_optab)
+ || binoptab == umul_widen_optab
+ || binoptab == smul_highpart_optab
+ || binoptab == umul_highpart_optab)
{
commutative_op = 1;
the result, convert the operands. */
if (GET_MODE (op0) != VOIDmode
- && GET_MODE (op0) != mode0)
+ && GET_MODE (op0) != mode0
+ && mode0 != VOIDmode)
xop0 = convert_to_mode (mode0, xop0, unsignedp);
if (GET_MODE (xop1) != VOIDmode
- && GET_MODE (xop1) != mode1)
+ && GET_MODE (xop1) != mode1
+ && mode1 != VOIDmode)
xop1 = convert_to_mode (mode1, xop1, unsignedp);
/* Now, if insn's predicates don't allow our operands, put them into
pseudo regs. */
- if (! (*insn_operand_predicate[icode][1]) (xop0, mode0))
+ if (! (*insn_operand_predicate[icode][1]) (xop0, mode0)
+ && mode0 != VOIDmode)
xop0 = copy_to_mode_reg (mode0, xop0);
- if (! (*insn_operand_predicate[icode][2]) (xop1, mode1))
+ if (! (*insn_operand_predicate[icode][2]) (xop1, mode1)
+ && mode1 != VOIDmode)
xop1 = copy_to_mode_reg (mode1, xop1);
if (! (*insn_operand_predicate[icode][0]) (temp, mode))
if (carries == 0)
inter = 0;
else
- inter = expand_binop (word_mode, binoptab, outof_input,
- op1, outof_target, unsignedp, next_methods);
-
- if (inter != 0 && inter != outof_target)
- emit_move_insn (outof_target, inter);
-
- if (inter != 0)
inter = expand_binop (word_mode, unsigned_shift, into_input,
op1, 0, unsignedp, next_methods);
if (inter != 0 && inter != into_target)
emit_move_insn (into_target, inter);
+
+ if (inter != 0)
+ inter = expand_binop (word_mode, binoptab, outof_input,
+ op1, outof_target, unsignedp, next_methods);
+
+ if (inter != 0 && inter != outof_target)
+ emit_move_insn (outof_target, inter);
}
insns = get_insns ();
NULL_RTX, unsignedp, methods);
if (temp1 == 0 || temp2 == 0)
- res = expand_binop (submode, add_optab, temp1, temp2,
- imagr, unsignedp, methods);
+ break;
+
+ res = expand_binop (submode, add_optab, temp1, temp2,
+ imagr, unsignedp, methods);
if (res == 0)
break;
NULL_RTX, unsignedp, methods);
if (temp1 == 0 || temp2 == 0)
- imag_t = expand_binop (submode, sub_optab, temp1, temp2,
- NULL_RTX, unsignedp, methods);
+ break;
+
+ imag_t = expand_binop (submode, sub_optab, temp1, temp2,
+ NULL_RTX, unsignedp, methods);
if (real_t == 0 || imag_t == 0)
break;
op1x = convert_to_mode (word_mode, op1, 1);
}
+ if (GET_MODE (op0) != mode)
+ op0 = convert_to_mode (mode, op0, unsignedp);
+
/* Pass 1 for NO_QUEUE so we don't lose any increments
if the libcall is cse'd or moved. */
value = emit_library_call_value (binoptab->handlers[(int) mode].libfunc,
}
}
+ /* If there is no negate operation, try doing a subtract from zero.
+ The US Software GOFAST library needs this. */
+ if (unoptab == neg_optab)
+ {
+ rtx temp;
+ temp = expand_binop (mode, sub_optab, CONST0_RTX (mode), op0,
+ target, unsignedp, OPTAB_LIB_WIDEN);
+ if (temp)
+ return temp;
+ }
+
return 0;
}
\f
MODE is the mode of the operand; the mode of the result is
different but can be deduced from MODE.
+ UNSIGNEDP is relevant if extension is needed. */
+
+rtx
+expand_abs (mode, op0, target, unsignedp, safe)
+ enum machine_mode mode;
+ rtx op0;
+ rtx target;
+ int unsignedp;
+ int safe;
+{
+ rtx temp, op1;
+
+ /* First try to do it with a special abs instruction. */
+ temp = expand_unop (mode, abs_optab, op0, target, 0);
+ if (temp != 0)
+ return temp;
+
+ /* If this machine has expensive jumps, we can do integer absolute
+ value of X as (((signed) x >> (W-1)) ^ x) - ((signed) x >> (W-1)),
+ where W is the width of MODE. */
+
+ if (GET_MODE_CLASS (mode) == MODE_INT && BRANCH_COST >= 2)
+ {
+ rtx extended = expand_shift (RSHIFT_EXPR, mode, op0,
+ size_int (GET_MODE_BITSIZE (mode) - 1),
+ NULL_RTX, 0);
+
+ temp = expand_binop (mode, xor_optab, extended, op0, target, 0,
+ OPTAB_LIB_WIDEN);
+ if (temp != 0)
+ temp = expand_binop (mode, sub_optab, temp, extended, target, 0,
+ OPTAB_LIB_WIDEN);
+
+ if (temp != 0)
+ return temp;
+ }
+
+ /* If that does not win, use conditional jump and negate. */
+ op1 = gen_label_rtx ();
+ if (target == 0 || ! safe
+ || GET_MODE (target) != mode
+ || (GET_CODE (target) == MEM && MEM_VOLATILE_P (target))
+ || (GET_CODE (target) == REG
+ && REGNO (target) < FIRST_PSEUDO_REGISTER))
+ target = gen_reg_rtx (mode);
+
+ emit_move_insn (target, op0);
+ NO_DEFER_POP;
+
+ /* If this mode is an integer too wide to compare properly,
+ compare word by word. Rely on CSE to optimize constant cases. */
+ if (GET_MODE_CLASS (mode) == MODE_INT && ! can_compare_p (mode))
+ do_jump_by_parts_greater_rtx (mode, 0, target, const0_rtx,
+ NULL_RTX, op1);
+ else
+ {
+ temp = compare_from_rtx (target, CONST0_RTX (mode), GE, 0, mode,
+ NULL_RTX, 0);
+ if (temp == const1_rtx)
+ return target;
+ else if (temp != const0_rtx)
+ {
+ if (bcc_gen_fctn[(int) GET_CODE (temp)] != 0)
+ emit_jump_insn ((*bcc_gen_fctn[(int) GET_CODE (temp)]) (op1));
+ else
+ abort ();
+ }
+ }
+
+ op0 = expand_unop (mode, neg_optab, target, target, 0);
+ if (op0 != target)
+ emit_move_insn (target, op0);
+ emit_label (op1);
+ OK_DEFER_POP;
+ return target;
+}
+\f
+/* Emit code to compute the absolute value of OP0, with result to
+ TARGET if convenient. (TARGET may be 0.) The return value says
+ where the result actually is to be found.
+
+ MODE is the mode of the operand; the mode of the result is
+ different but can be deduced from MODE.
+
UNSIGNEDP is relevant for complex integer modes. */
rtx
INSNS is a block of code generated to perform the operation, not including
the CLOBBER and final copy. All insns that compute intermediate values
- are first emitted, followed by the block as described above. Only
- INSNs are allowed in the block; no library calls or jumps may be
- present.
+ are first emitted, followed by the block as described above.
TARGET, OP0, and OP1 are the output and inputs of the operations,
respectively. OP1 may be zero for a unary operation.
on the last insn.
If TARGET is not a register, INSNS is simply emitted with no special
- processing.
+ processing. Likewise if anything in INSNS is not an INSN or if
+ there is a libcall block inside INSNS.
The final insn emitted is returned. */
if (GET_CODE (target) != REG || reload_in_progress)
return emit_insns (insns);
+ else
+ for (insn = insns; insn; insn = NEXT_INSN (insn))
+ if (GET_CODE (insn) != INSN
+ || find_reg_note (insn, REG_LIBCALL, NULL_RTX))
+ return emit_insns (insns);
/* First emit all insns that do not store into words of the output and remove
these from the list. */
next = NEXT_INSN (insn);
- if (GET_CODE (insn) != INSN)
- abort ();
-
if (GET_CODE (PATTERN (insn)) == SET)
set = PATTERN (insn);
else if (GET_CODE (PATTERN (insn)) == PARALLEL)
enum machine_mode mode = GET_MODE (x);
rtx libfunc = 0;
- if (mode == SFmode)
+ if (mode == HFmode)
+ switch (comparison)
+ {
+ case EQ:
+ libfunc = eqhf2_libfunc;
+ break;
+
+ case NE:
+ libfunc = nehf2_libfunc;
+ break;
+
+ case GT:
+ libfunc = gthf2_libfunc;
+ break;
+
+ case GE:
+ libfunc = gehf2_libfunc;
+ break;
+
+ case LT:
+ libfunc = lthf2_libfunc;
+ break;
+
+ case LE:
+ libfunc = lehf2_libfunc;
+ break;
+ }
+ else if (mode == SFmode)
switch (comparison)
{
case EQ:
emit_barrier ();
}
\f
+#ifdef HAVE_conditional_move
+
+/* Emit a conditional move instruction if the machine supports one for that
+ condition and machine mode.
+
+ OP0 and OP1 are the operands that should be compared using CODE. CMODE is
+ the mode to use should they be constants. If it is VOIDmode, they cannot
+ both be constants.
+
+ OP2 should be stored in TARGET if the comparison is true, otherwise OP3
+ should be stored there. MODE is the mode to use should they be constants.
+ If it is VOIDmode, they cannot both be constants.
+
+ The result is either TARGET (perhaps modified) or NULL_RTX if the operation
+ is not supported. */
+
+rtx
+emit_conditional_move (target, code, op0, op1, cmode, op2, op3, mode,
+ unsignedp)
+ rtx target;
+ enum rtx_code code;
+ rtx op0, op1;
+ enum machine_mode cmode;
+ rtx op2, op3;
+ enum machine_mode mode;
+ int unsignedp;
+{
+ rtx tem, subtarget, comparison, insn;
+ enum insn_code icode;
+
+ /* If one operand is constant, make it the second one. Only do this
+ if the other operand is not constant as well. */
+
+ if ((CONSTANT_P (op0) && ! CONSTANT_P (op1))
+ || (GET_CODE (op0) == CONST_INT && GET_CODE (op1) != CONST_INT))
+ {
+ tem = op0;
+ op0 = op1;
+ op1 = tem;
+ code = swap_condition (code);
+ }
+
+ if (cmode == VOIDmode)
+ cmode = GET_MODE (op0);
+
+ if ((CONSTANT_P (op2) && ! CONSTANT_P (op3))
+ || (GET_CODE (op2) == CONST_INT && GET_CODE (op3) != CONST_INT))
+ {
+ tem = op2;
+ op2 = op3;
+ op3 = tem;
+ /* ??? This may not be appropriate (consider IEEE). Perhaps we should
+ call can_reverse_comparison_p here and bail out if necessary.
+ It's not clear whether we need to do this canonicalization though. */
+ code = reverse_condition (code);
+ }
+
+ if (mode == VOIDmode)
+ mode = GET_MODE (op2);
+
+ icode = movcc_gen_code[mode];
+
+ if (icode == CODE_FOR_nothing)
+ return 0;
+
+ if (flag_force_mem)
+ {
+ op2 = force_not_mem (op2);
+ op3 = force_not_mem (op3);
+ }
+
+ if (target)
+ target = protect_from_queue (target, 1);
+ else
+ target = gen_reg_rtx (mode);
+
+ subtarget = target;
+
+ emit_queue ();
+
+ op2 = protect_from_queue (op2, 0);
+ op3 = protect_from_queue (op3, 0);
+
+ /* If the insn doesn't accept these operands, put them in pseudos. */
+
+ if (! (*insn_operand_predicate[icode][0])
+ (subtarget, insn_operand_mode[icode][0]))
+ subtarget = gen_reg_rtx (insn_operand_mode[icode][0]);
+
+ if (! (*insn_operand_predicate[icode][2])
+ (op2, insn_operand_mode[icode][2]))
+ op2 = copy_to_mode_reg (insn_operand_mode[icode][2], op2);
+
+ if (! (*insn_operand_predicate[icode][3])
+ (op3, insn_operand_mode[icode][3]))
+ op3 = copy_to_mode_reg (insn_operand_mode[icode][3], op3);
+
+ /* Everything should now be in the suitable form, so emit the compare insn
+ and then the conditional move. */
+
+ comparison
+ = compare_from_rtx (op0, op1, code, unsignedp, cmode, NULL_RTX, 0);
+
+ /* ??? Watch for const0_rtx (nop) and const_true_rtx (unconditional)? */
+ if (GET_CODE (comparison) != code)
+ /* This shouldn't happen. */
+ abort ();
+
+ insn = GEN_FCN (icode) (subtarget, comparison, op2, op3);
+
+ /* If that failed, then give up. */
+ if (insn == 0)
+ return 0;
+
+ emit_insn (insn);
+
+ if (subtarget != target)
+ convert_move (target, subtarget, 0);
+
+ return target;
+}
+
+/* Return non-zero if a conditional move of mode MODE is supported.
+
+ This function is for combine so it can tell whether an insn that looks
+ like a conditional move is actually supported by the hardware. If we
+ guess wrong we lose a bit on optimization, but that's it. */
+/* ??? sparc64 supports conditionally moving integers values based on fp
+ comparisons, and vice versa. How do we handle them? */
+
+int
+can_conditionally_move_p (mode)
+ enum machine_mode mode;
+{
+ if (movcc_gen_code[mode] != CODE_FOR_nothing)
+ return 1;
+
+ return 0;
+}
+
+#endif /* HAVE_conditional_move */
+\f
/* These three functions generate an insn body and return it
rather than emitting the insn.
expand_float (target, temp, 0);
/* Multiply by 2 to undo the shift above. */
- target = expand_binop (fmode, add_optab, target, target,
+ temp = expand_binop (fmode, add_optab, target, target,
target, 0, OPTAB_LIB_WIDEN);
+ if (temp != target)
+ emit_move_insn (target, temp);
+
do_pending_stack_adjust ();
emit_label (label);
goto done;
}
#endif
- /* No hardware instruction available; call a library rotine to convert from
+ /* No hardware instruction available; call a library routine to convert from
SImode, DImode, or TImode into SFmode, DFmode, XFmode, or TFmode. */
{
rtx libfcn;
if (libfcn)
{
rtx insns;
+ rtx value;
to = protect_from_queue (to, 1);
from = protect_from_queue (from, 0);
start_sequence ();
- emit_library_call (libfcn, 1, GET_MODE (to), 1, from, GET_MODE (from));
+ value = emit_library_call_value (libfcn, NULL_RTX, 1, GET_MODE (to),
+
+ 1, from, GET_MODE (from));
insns = get_insns ();
end_sequence ();
- emit_libcall_block (insns, target, hard_libcall_value (GET_MODE (to)),
- gen_rtx (unsignedp ? FIX : UNSIGNED_FIX,
+ emit_libcall_block (insns, target, value,
+ gen_rtx (unsignedp ? UNSIGNED_FIX : FIX,
GET_MODE (to), from));
}
for (i = 0; i < NUM_RTX_CODE; i++)
setcc_gen_code[i] = CODE_FOR_nothing;
+#ifdef HAVE_conditional_move
+ for (i = 0; i < NUM_MACHINE_MODES; i++)
+ movcc_gen_code[i] = CODE_FOR_nothing;
+#endif
+
add_optab = init_optab (PLUS);
sub_optab = init_optab (MINUS);
smul_optab = init_optab (MULT);
+ smul_highpart_optab = init_optab (UNKNOWN);
+ umul_highpart_optab = init_optab (UNKNOWN);
smul_widen_optab = init_optab (UNKNOWN);
umul_widen_optab = init_optab (UNKNOWN);
sdiv_optab = init_optab (DIV);
init_integral_libfuncs (ashl_optab, "ashl", '3');
init_integral_libfuncs (ashr_optab, "ashr", '3');
init_integral_libfuncs (lshr_optab, "lshr", '3');
- init_integral_libfuncs (rotl_optab, "rotl", '3');
- init_integral_libfuncs (rotr_optab, "rotr", '3');
init_integral_libfuncs (smin_optab, "min", '3');
init_floating_libfuncs (smin_optab, "min", '3');
init_integral_libfuncs (smax_optab, "max", '3');
memset_libfunc = gen_rtx (SYMBOL_REF, Pmode, "memset");
bzero_libfunc = gen_rtx (SYMBOL_REF, Pmode, "bzero");
+ eqhf2_libfunc = gen_rtx (SYMBOL_REF, Pmode, "__eqhf2");
+ nehf2_libfunc = gen_rtx (SYMBOL_REF, Pmode, "__nehf2");
+ gthf2_libfunc = gen_rtx (SYMBOL_REF, Pmode, "__gthf2");
+ gehf2_libfunc = gen_rtx (SYMBOL_REF, Pmode, "__gehf2");
+ lthf2_libfunc = gen_rtx (SYMBOL_REF, Pmode, "__lthf2");
+ lehf2_libfunc = gen_rtx (SYMBOL_REF, Pmode, "__lehf2");
+
eqsf2_libfunc = gen_rtx (SYMBOL_REF, Pmode, "__eqsf2");
nesf2_libfunc = gen_rtx (SYMBOL_REF, Pmode, "__nesf2");
gtsf2_libfunc = gen_rtx (SYMBOL_REF, Pmode, "__gtsf2");
#ifdef FIXUNS_TRUNCTFSI2_LIBCALL
fixunstfsi_libfunc = gen_rtx (SYMBOL_REF, Pmode, FIXUNS_TRUNCTFSI2_LIBCALL);
#endif
+
+#ifdef INIT_TARGET_OPTABS
+ /* Allow the target to add more libcalls or rename some, etc. */
+ INIT_TARGET_OPTABS;
+#endif
}
\f
#ifdef BROKEN_LDEXP