;;- Machine description for SPARC chip for GNU C compiler
-;; Copyright (C) 1987, 88, 89, 92-98, 1999 Free Software Foundation, Inc.
+;; Copyright (C) 1987, 1988, 1989, 1992, 1993, 1994, 1995, 1996, 1997, 1998,
+;; 1999, 2000 Free Software Foundation, Inc.
;; Contributed by Michael Tiemann (tiemann@cygnus.com)
;; 64 bit SPARC V9 support by Michael Tiemann, Jim Wilson, and Doug Evans,
;; at Cygnus Support.
;; 9 sethh
;; 10 setlm
;; 11 embmedany_sethi, embmedany_brsum
-;; 12 movsf_const_high
;; 13 embmedany_textuhi
;; 14 embmedany_texthi
;; 15 embmedany_textulo
;; 16 embmedany_textlo
-;; 17 movsf_const_lo
;; 18 sethm
;; 19 setlo
;;
;; 2 goto_handler_and_restore
;; 3 goto_handler_and_restore_v9*
;; 4 flush
-;; 5 nonlocal_goto_receiver
;;
;; The upper 32 fp regs on the v9 can't hold SFmode values. To deal with this
;; is a bit of a misnomer as it covers all 64 fp regs. The corresponding
;; constraint letter is 'e'. To avoid any confusion, 'e' is used instead of
;; 'f' for all DF/TFmode values, including those that are specific to the v8.
-;;
-;; -mlive-g0 is *not* supported for TARGET_ARCH64, so we don't bother to
-;; test TARGET_LIVE_G0 if we have TARGET_ARCH64.
;; Attribute for cpu type.
;; These must match the values for enum processor_type in sparc.h.
(cond [(symbol_ref "TARGET_ARCH64") (const_string "arch64bit")]
(const_string "arch32bit"))))
-;; Whether -mlive-g0 is in effect.
-(define_attr "live_g0" "no,yes"
- (const
- (cond [(symbol_ref "TARGET_LIVE_G0") (const_string "yes")]
- (const_string "no"))))
-
;; Insn type. Used to default other attribute values.
;; type "unary" insns have one input operand (1) and one output operand (0)
;; type "call_no_delay_slot" is a call followed by an unimp instruction.
(define_attr "type"
- "move,unary,binary,compare,load,sload,store,ialu,shift,uncond_branch,branch,call,call_no_delay_slot,return,address,imul,fpload,fpstore,fp,fpmove,fpcmove,fpcmp,fpmul,fpdivs,fpdivd,fpsqrts,fpsqrtd,cmove,multi,misc"
+ "move,unary,binary,compare,load,sload,store,ialu,shift,uncond_branch,branch,call,sibcall,call_no_delay_slot,return,address,imul,fpload,fpstore,fp,fpmove,fpcmove,fpcmp,fpmul,fpdivs,fpdivd,fpsqrts,fpsqrtd,cmove,multi,misc"
(const_string "binary"))
;; Set true if insn uses call-clobbered intermediate register.
;; Attributes for instruction and branch scheduling
(define_attr "in_call_delay" "false,true"
- (cond [(eq_attr "type" "uncond_branch,branch,call,call_no_delay_slot,return,multi")
+ (cond [(eq_attr "type" "uncond_branch,branch,call,sibcall,call_no_delay_slot,return,multi")
(const_string "false")
(eq_attr "type" "load,fpload,store,fpstore")
(if_then_else (eq_attr "length" "1")
(define_delay (eq_attr "type" "call")
[(eq_attr "in_call_delay" "true") (nil) (nil)])
+(define_attr "eligible_for_sibcall_delay" "false,true"
+ (symbol_ref "eligible_for_sibcall_delay(insn)"))
+
+(define_delay (eq_attr "type" "sibcall")
+ [(eq_attr "eligible_for_sibcall_delay" "true") (nil) (nil)])
+
(define_attr "leaf_function" "false,true"
(const (symbol_ref "current_function_uses_only_leaf_regs")))
;; because it prevents us from moving back the final store of inner loops.
(define_attr "in_branch_delay" "false,true"
- (if_then_else (and (eq_attr "type" "!uncond_branch,branch,call,call_no_delay_slot,multi")
+ (if_then_else (and (eq_attr "type" "!uncond_branch,branch,call,sibcall,call_no_delay_slot,multi")
(eq_attr "length" "1"))
(const_string "true")
(const_string "false")))
(define_attr "in_uncond_branch_delay" "false,true"
- (if_then_else (and (eq_attr "type" "!uncond_branch,branch,call,call_no_delay_slot,multi")
+ (if_then_else (and (eq_attr "type" "!uncond_branch,branch,call,sibcall,call_no_delay_slot,multi")
(eq_attr "length" "1"))
(const_string "true")
(const_string "false")))
(define_attr "in_annul_branch_delay" "false,true"
- (if_then_else (and (eq_attr "type" "!uncond_branch,branch,call,call_no_delay_slot,multi")
+ (if_then_else (and (eq_attr "type" "!uncond_branch,branch,call,sibcall,call_no_delay_slot,multi")
(eq_attr "length" "1"))
(const_string "true")
(const_string "false")))
(define_function_unit "ieuN" 2 0
(and (eq_attr "cpu" "ultrasparc")
- (eq_attr "type" "ialu,binary,move,unary,shift,compare,call,call_no_delay_slot,uncond_branch"))
+ (eq_attr "type" "ialu,binary,move,unary,shift,compare,call,sibcall,call_no_delay_slot,uncond_branch"))
1 1)
(define_function_unit "ieu0" 1 0
(define_function_unit "ieu1" 1 0
(and (eq_attr "cpu" "ultrasparc")
- (eq_attr "type" "compare,call,call_no_delay_slot,uncond_branch"))
+ (eq_attr "type" "compare,call,sibcall,call_no_delay_slot,uncond_branch"))
1 1)
(define_function_unit "cti" 1 0
[(set (reg:CCFP 96)
(compare:CCFP (match_operand:TF 0 "register_operand" "")
(match_operand:TF 1 "register_operand" "")))]
- "TARGET_FPU && TARGET_HARD_QUAD"
+ "TARGET_FPU"
"
{
sparc_compare_op0 = operands[0];
(parallel [(set (match_operand:SI 0 "register_operand" "")
(eq:SI (match_dup 3) (const_int 0)))
(clobber (reg:CC 100))])]
- "! TARGET_LIVE_G0"
+ ""
"{ operands[3] = gen_reg_rtx (SImode); }")
(define_expand "seqdi_special"
(parallel [(set (match_operand:SI 0 "register_operand" "")
(ne:SI (match_dup 3) (const_int 0)))
(clobber (reg:CC 100))])]
- "! TARGET_LIVE_G0"
+ ""
"{ operands[3] = gen_reg_rtx (SImode); }")
(define_expand "snedi_special"
(define_expand "seq"
[(set (match_operand:SI 0 "intreg_operand" "")
(eq:SI (match_dup 1) (const_int 0)))]
- "! TARGET_LIVE_G0"
+ ""
"
{
if (GET_MODE (sparc_compare_op0) == SImode)
emit_insn (pat);
DONE;
}
+ else if (GET_MODE (sparc_compare_op0) == TFmode && ! TARGET_HARD_QUAD)
+ {
+ sparc_emit_float_lib_cmp (sparc_compare_op0, sparc_compare_op1, EQ);
+ emit_jump_insn (gen_sne (operands[0]));
+ DONE;
+ }
else if (TARGET_V9)
{
if (gen_v9_scc (EQ, operands))
(define_expand "sne"
[(set (match_operand:SI 0 "intreg_operand" "")
(ne:SI (match_dup 1) (const_int 0)))]
- "! TARGET_LIVE_G0"
+ ""
"
{
if (GET_MODE (sparc_compare_op0) == SImode)
emit_insn (pat);
DONE;
}
+ else if (GET_MODE (sparc_compare_op0) == TFmode && ! TARGET_HARD_QUAD)
+ {
+ sparc_emit_float_lib_cmp (sparc_compare_op0, sparc_compare_op1, NE);
+ emit_jump_insn (gen_sne (operands[0]));
+ DONE;
+ }
else if (TARGET_V9)
{
if (gen_v9_scc (NE, operands))
(define_expand "sgt"
[(set (match_operand:SI 0 "intreg_operand" "")
(gt:SI (match_dup 1) (const_int 0)))]
- "! TARGET_LIVE_G0"
+ ""
"
{
- if (TARGET_V9)
+ if (GET_MODE (sparc_compare_op0) == TFmode && ! TARGET_HARD_QUAD)
+ {
+ sparc_emit_float_lib_cmp (sparc_compare_op0, sparc_compare_op1, GT);
+ emit_jump_insn (gen_sne (operands[0]));
+ DONE;
+ }
+ else if (TARGET_V9)
{
if (gen_v9_scc (GT, operands))
DONE;
(define_expand "slt"
[(set (match_operand:SI 0 "intreg_operand" "")
(lt:SI (match_dup 1) (const_int 0)))]
- "! TARGET_LIVE_G0"
+ ""
"
{
- if (TARGET_V9)
+ if (GET_MODE (sparc_compare_op0) == TFmode && ! TARGET_HARD_QUAD)
+ {
+ sparc_emit_float_lib_cmp (sparc_compare_op0, sparc_compare_op1, LT);
+ emit_jump_insn (gen_sne (operands[0]));
+ DONE;
+ }
+ else if (TARGET_V9)
{
if (gen_v9_scc (LT, operands))
DONE;
(define_expand "sge"
[(set (match_operand:SI 0 "intreg_operand" "")
(ge:SI (match_dup 1) (const_int 0)))]
- "! TARGET_LIVE_G0"
+ ""
"
{
- if (TARGET_V9)
+ if (GET_MODE (sparc_compare_op0) == TFmode && ! TARGET_HARD_QUAD)
+ {
+ sparc_emit_float_lib_cmp (sparc_compare_op0, sparc_compare_op1, GE);
+ emit_jump_insn (gen_sne (operands[0]));
+ DONE;
+ }
+ else if (TARGET_V9)
{
if (gen_v9_scc (GE, operands))
DONE;
(define_expand "sle"
[(set (match_operand:SI 0 "intreg_operand" "")
(le:SI (match_dup 1) (const_int 0)))]
- "! TARGET_LIVE_G0"
+ ""
"
{
- if (TARGET_V9)
+ if (GET_MODE (sparc_compare_op0) == TFmode && ! TARGET_HARD_QUAD)
+ {
+ sparc_emit_float_lib_cmp (sparc_compare_op0, sparc_compare_op1, LE);
+ emit_jump_insn (gen_sne (operands[0]));
+ DONE;
+ }
+ else if (TARGET_V9)
{
if (gen_v9_scc (LE, operands))
DONE;
(define_expand "sgtu"
[(set (match_operand:SI 0 "intreg_operand" "")
(gtu:SI (match_dup 1) (const_int 0)))]
- "! TARGET_LIVE_G0"
+ ""
"
{
if (! TARGET_V9)
(define_expand "sltu"
[(set (match_operand:SI 0 "intreg_operand" "")
(ltu:SI (match_dup 1) (const_int 0)))]
- "! TARGET_LIVE_G0"
+ ""
"
{
if (TARGET_V9)
(define_expand "sgeu"
[(set (match_operand:SI 0 "intreg_operand" "")
(geu:SI (match_dup 1) (const_int 0)))]
- "! TARGET_LIVE_G0"
+ ""
"
{
if (TARGET_V9)
(define_expand "sleu"
[(set (match_operand:SI 0 "intreg_operand" "")
(leu:SI (match_dup 1) (const_int 0)))]
- "! TARGET_LIVE_G0"
+ ""
"
{
if (! TARGET_V9)
(ne:SI (match_operand:SI 1 "register_operand" "r")
(const_int 0)))
(clobber (reg:CC 100))]
- "! TARGET_LIVE_G0"
+ ""
"#"
[(set_attr "length" "2")])
(neg:SI (ne:SI (match_operand:SI 1 "register_operand" "r")
(const_int 0))))
(clobber (reg:CC 100))]
- "! TARGET_LIVE_G0"
+ ""
"#"
[(set_attr "length" "2")])
[(set (match_operand:DI 0 "register_operand" "")
(ne:DI (match_operand:DI 1 "register_operand" "")
(const_int 0)))]
- "TARGET_ARCH64"
+ "TARGET_ARCH64
+ && ! reg_overlap_mentioned_p (operands[1], operands[0])"
[(set (match_dup 0) (const_int 0))
(set (match_dup 0) (if_then_else:DI (ne:DI (match_dup 1)
(const_int 0))
[(set (match_operand:DI 0 "register_operand" "")
(neg:DI (ne:DI (match_operand:DI 1 "register_operand" "")
(const_int 0))))]
- "TARGET_ARCH64"
+ "TARGET_ARCH64
+ && ! reg_overlap_mentioned_p (operands[1], operands[0])"
[(set (match_dup 0) (const_int 0))
(set (match_dup 0) (if_then_else:DI (ne:DI (match_dup 1)
(const_int 0))
[(set (match_operand:SI 0 "register_operand" "")
(ne:SI (match_operand:DI 1 "register_operand" "")
(const_int 0)))]
- "TARGET_ARCH64"
+ "TARGET_ARCH64
+ && ! reg_overlap_mentioned_p (operands[1], operands[0])"
[(set (match_dup 0) (const_int 0))
(set (match_dup 0) (if_then_else:SI (ne:DI (match_dup 1)
(const_int 0))
(eq:SI (match_operand:SI 1 "register_operand" "r")
(const_int 0)))
(clobber (reg:CC 100))]
- "! TARGET_LIVE_G0"
+ ""
"#"
[(set_attr "length" "2")])
(neg:SI (eq:SI (match_operand:SI 1 "register_operand" "r")
(const_int 0))))
(clobber (reg:CC 100))]
- "! TARGET_LIVE_G0"
+ ""
"#"
[(set_attr "length" "2")])
[(set (match_operand:DI 0 "register_operand" "")
(eq:DI (match_operand:DI 1 "register_operand" "")
(const_int 0)))]
- "TARGET_ARCH64"
+ "TARGET_ARCH64
+ && ! reg_overlap_mentioned_p (operands[1], operands[0])"
[(set (match_dup 0) (const_int 0))
(set (match_dup 0) (if_then_else:DI (eq:DI (match_dup 1)
(const_int 0))
[(set (match_operand:DI 0 "register_operand" "")
(neg:DI (eq:DI (match_operand:DI 1 "register_operand" "")
(const_int 0))))]
- "TARGET_ARCH64"
+ "TARGET_ARCH64
+ && ! reg_overlap_mentioned_p (operands[1], operands[0])"
[(set (match_dup 0) (const_int 0))
(set (match_dup 0) (if_then_else:DI (eq:DI (match_dup 1)
(const_int 0))
[(set (match_operand:SI 0 "register_operand" "")
(eq:SI (match_operand:DI 1 "register_operand" "")
(const_int 0)))]
- "TARGET_ARCH64"
+ "TARGET_ARCH64
+ && ! reg_overlap_mentioned_p (operands[1], operands[0])"
[(set (match_dup 0) (const_int 0))
(set (match_dup 0) (if_then_else:SI (eq:DI (match_dup 1)
(const_int 0))
(const_int 0))
(match_operand:SI 2 "register_operand" "r")))
(clobber (reg:CC 100))]
- "! TARGET_LIVE_G0"
+ ""
"#"
[(set_attr "length" "2")])
(const_int 0))
(match_operand:SI 2 "register_operand" "")))
(clobber (reg:CC 100))]
- "! TARGET_LIVE_G0"
+ ""
[(set (reg:CC_NOOV 100) (compare:CC_NOOV (neg:SI (match_dup 1))
(const_int 0)))
(set (match_dup 0) (plus:SI (ltu:SI (reg:CC 100) (const_int 0))
(ne:SI (match_operand:SI 1 "register_operand" "r")
(const_int 0))))
(clobber (reg:CC 100))]
- "! TARGET_LIVE_G0"
+ ""
"#"
[(set_attr "length" "2")])
(ne:SI (match_operand:SI 1 "register_operand" "")
(const_int 0))))
(clobber (reg:CC 100))]
- "! TARGET_LIVE_G0"
+ ""
[(set (reg:CC_NOOV 100) (compare:CC_NOOV (neg:SI (match_dup 1))
(const_int 0)))
(set (match_dup 0) (minus:SI (match_dup 2)
(const_int 0))
(match_operand:SI 2 "register_operand" "r")))
(clobber (reg:CC 100))]
- "! TARGET_LIVE_G0"
+ ""
"#"
[(set_attr "length" "2")])
(const_int 0))
(match_operand:SI 2 "register_operand" "")))
(clobber (reg:CC 100))]
- "! TARGET_LIVE_G0"
+ ""
[(set (reg:CC_NOOV 100) (compare:CC_NOOV (neg:SI (match_dup 1))
(const_int 0)))
(set (match_dup 0) (plus:SI (geu:SI (reg:CC 100) (const_int 0))
(eq:SI (match_operand:SI 1 "register_operand" "r")
(const_int 0))))
(clobber (reg:CC 100))]
- "! TARGET_LIVE_G0"
+ ""
"#"
[(set_attr "length" "2")])
(eq:SI (match_operand:SI 1 "register_operand" "")
(const_int 0))))
(clobber (reg:CC 100))]
- "! TARGET_LIVE_G0"
+ ""
[(set (reg:CC_NOOV 100) (compare:CC_NOOV (neg:SI (match_dup 1))
(const_int 0)))
(set (match_dup 0) (minus:SI (match_dup 2)
(define_insn "*sltu_insn"
[(set (match_operand:SI 0 "register_operand" "=r")
(ltu:SI (reg:CC 100) (const_int 0)))]
- "! TARGET_LIVE_G0"
+ ""
"addx\\t%%g0, 0, %0"
[(set_attr "type" "misc")
(set_attr "length" "1")])
(define_insn "*neg_sltu_insn"
[(set (match_operand:SI 0 "register_operand" "=r")
(neg:SI (ltu:SI (reg:CC 100) (const_int 0))))]
- "! TARGET_LIVE_G0"
+ ""
"subx\\t%%g0, 0, %0"
[(set_attr "type" "misc")
(set_attr "length" "1")])
[(set (match_operand:SI 0 "register_operand" "=r")
(minus:SI (neg:SI (ltu:SI (reg:CC 100) (const_int 0)))
(match_operand:SI 1 "arith_operand" "rI")))]
- "! TARGET_LIVE_G0"
+ ""
"subx\\t%%g0, %1, %0"
[(set_attr "type" "misc")
(set_attr "length" "1")])
[(set (match_operand:SI 0 "register_operand" "=r")
(neg:SI (plus:SI (ltu:SI (reg:CC 100) (const_int 0))
(match_operand:SI 1 "arith_operand" "rI"))))]
- "! TARGET_LIVE_G0"
+ ""
"subx\\t%%g0, %1, %0"
[(set_attr "type" "misc")
(set_attr "length" "1")])
(define_insn "*sgeu_insn"
[(set (match_operand:SI 0 "register_operand" "=r")
(geu:SI (reg:CC 100) (const_int 0)))]
- "! TARGET_LIVE_G0"
+ ""
"subx\\t%%g0, -1, %0"
[(set_attr "type" "misc")
(set_attr "length" "1")])
(define_insn "*neg_sgeu_insn"
[(set (match_operand:SI 0 "register_operand" "=r")
(neg:SI (geu:SI (reg:CC 100) (const_int 0))))]
- "! TARGET_LIVE_G0"
+ ""
"addx\\t%%g0, -1, %0"
[(set_attr "type" "misc")
(set_attr "length" "1")])
[(set (match_operand:SI 0 "register_operand" "=r")
(plus:SI (ltu:SI (reg:CC 100) (const_int 0))
(match_operand:SI 1 "arith_operand" "rI")))]
- "! TARGET_LIVE_G0"
+ ""
"addx\\t%%g0, %1, %0"
[(set_attr "type" "misc")
(set_attr "length" "1")])
emit_v9_brxx_insn (EQ, sparc_compare_op0, operands[0]);
DONE;
}
+ else if (GET_MODE (sparc_compare_op0) == TFmode && ! TARGET_HARD_QUAD)
+ {
+ sparc_emit_float_lib_cmp (sparc_compare_op0, sparc_compare_op1, EQ);
+ emit_jump_insn (gen_bne (operands[0]));
+ DONE;
+ }
operands[1] = gen_compare_reg (EQ, sparc_compare_op0, sparc_compare_op1);
}")
emit_v9_brxx_insn (NE, sparc_compare_op0, operands[0]);
DONE;
}
+ else if (GET_MODE (sparc_compare_op0) == TFmode && ! TARGET_HARD_QUAD)
+ {
+ sparc_emit_float_lib_cmp (sparc_compare_op0, sparc_compare_op1, NE);
+ emit_jump_insn (gen_bne (operands[0]));
+ DONE;
+ }
operands[1] = gen_compare_reg (NE, sparc_compare_op0, sparc_compare_op1);
}")
emit_v9_brxx_insn (GT, sparc_compare_op0, operands[0]);
DONE;
}
+ else if (GET_MODE (sparc_compare_op0) == TFmode && ! TARGET_HARD_QUAD)
+ {
+ sparc_emit_float_lib_cmp (sparc_compare_op0, sparc_compare_op1, GT);
+ emit_jump_insn (gen_bne (operands[0]));
+ DONE;
+ }
operands[1] = gen_compare_reg (GT, sparc_compare_op0, sparc_compare_op1);
}")
emit_v9_brxx_insn (LT, sparc_compare_op0, operands[0]);
DONE;
}
+ else if (GET_MODE (sparc_compare_op0) == TFmode && ! TARGET_HARD_QUAD)
+ {
+ sparc_emit_float_lib_cmp (sparc_compare_op0, sparc_compare_op1, LT);
+ emit_jump_insn (gen_bne (operands[0]));
+ DONE;
+ }
operands[1] = gen_compare_reg (LT, sparc_compare_op0, sparc_compare_op1);
}")
emit_v9_brxx_insn (GE, sparc_compare_op0, operands[0]);
DONE;
}
+ else if (GET_MODE (sparc_compare_op0) == TFmode && ! TARGET_HARD_QUAD)
+ {
+ sparc_emit_float_lib_cmp (sparc_compare_op0, sparc_compare_op1, GE);
+ emit_jump_insn (gen_bne (operands[0]));
+ DONE;
+ }
operands[1] = gen_compare_reg (GE, sparc_compare_op0, sparc_compare_op1);
}")
emit_v9_brxx_insn (LE, sparc_compare_op0, operands[0]);
DONE;
}
+ else if (GET_MODE (sparc_compare_op0) == TFmode && ! TARGET_HARD_QUAD)
+ {
+ sparc_emit_float_lib_cmp (sparc_compare_op0, sparc_compare_op1, LE);
+ emit_jump_insn (gen_bne (operands[0]));
+ DONE;
+ }
operands[1] = gen_compare_reg (LE, sparc_compare_op0, sparc_compare_op1);
}")
"
{ operands[1] = gen_compare_reg (LEU, sparc_compare_op0, sparc_compare_op1);
}")
+
+(define_expand "bunordered"
+ [(set (pc)
+ (if_then_else (unordered (match_dup 1) (const_int 0))
+ (label_ref (match_operand 0 "" ""))
+ (pc)))]
+ ""
+ "
+{
+ if (GET_MODE (sparc_compare_op0) == TFmode && ! TARGET_HARD_QUAD)
+ {
+ sparc_emit_float_lib_cmp (sparc_compare_op0, sparc_compare_op1,
+ UNORDERED);
+ emit_jump_insn (gen_beq (operands[0]));
+ DONE;
+ }
+ operands[1] = gen_compare_reg (UNORDERED, sparc_compare_op0,
+ sparc_compare_op1);
+}")
+
+(define_expand "bordered"
+ [(set (pc)
+ (if_then_else (ordered (match_dup 1) (const_int 0))
+ (label_ref (match_operand 0 "" ""))
+ (pc)))]
+ ""
+ "
+{
+ if (GET_MODE (sparc_compare_op0) == TFmode && ! TARGET_HARD_QUAD)
+ {
+ sparc_emit_float_lib_cmp (sparc_compare_op0, sparc_compare_op1, ORDERED);
+ emit_jump_insn (gen_bne (operands[0]));
+ DONE;
+ }
+ operands[1] = gen_compare_reg (ORDERED, sparc_compare_op0,
+ sparc_compare_op1);
+}")
+
+(define_expand "bungt"
+ [(set (pc)
+ (if_then_else (ungt (match_dup 1) (const_int 0))
+ (label_ref (match_operand 0 "" ""))
+ (pc)))]
+ ""
+ "
+{
+ if (GET_MODE (sparc_compare_op0) == TFmode && ! TARGET_HARD_QUAD)
+ {
+ sparc_emit_float_lib_cmp (sparc_compare_op0, sparc_compare_op1, UNGT);
+ emit_jump_insn (gen_bgt (operands[0]));
+ DONE;
+ }
+ operands[1] = gen_compare_reg (UNGT, sparc_compare_op0, sparc_compare_op1);
+}")
+
+(define_expand "bunlt"
+ [(set (pc)
+ (if_then_else (unlt (match_dup 1) (const_int 0))
+ (label_ref (match_operand 0 "" ""))
+ (pc)))]
+ ""
+ "
+{
+ if (GET_MODE (sparc_compare_op0) == TFmode && ! TARGET_HARD_QUAD)
+ {
+ sparc_emit_float_lib_cmp (sparc_compare_op0, sparc_compare_op1, UNLT);
+ emit_jump_insn (gen_bne (operands[0]));
+ DONE;
+ }
+ operands[1] = gen_compare_reg (UNLT, sparc_compare_op0, sparc_compare_op1);
+}")
+
+(define_expand "buneq"
+ [(set (pc)
+ (if_then_else (uneq (match_dup 1) (const_int 0))
+ (label_ref (match_operand 0 "" ""))
+ (pc)))]
+ ""
+ "
+{
+ if (GET_MODE (sparc_compare_op0) == TFmode && ! TARGET_HARD_QUAD)
+ {
+ sparc_emit_float_lib_cmp (sparc_compare_op0, sparc_compare_op1, UNEQ);
+ emit_jump_insn (gen_beq (operands[0]));
+ DONE;
+ }
+ operands[1] = gen_compare_reg (UNEQ, sparc_compare_op0, sparc_compare_op1);
+}")
+
+(define_expand "bunge"
+ [(set (pc)
+ (if_then_else (unge (match_dup 1) (const_int 0))
+ (label_ref (match_operand 0 "" ""))
+ (pc)))]
+ ""
+ "
+{
+ if (GET_MODE (sparc_compare_op0) == TFmode && ! TARGET_HARD_QUAD)
+ {
+ sparc_emit_float_lib_cmp (sparc_compare_op0, sparc_compare_op1, UNGE);
+ emit_jump_insn (gen_bne (operands[0]));
+ DONE;
+ }
+ operands[1] = gen_compare_reg (UNGE, sparc_compare_op0, sparc_compare_op1);
+}")
+
+(define_expand "bunle"
+ [(set (pc)
+ (if_then_else (unle (match_dup 1) (const_int 0))
+ (label_ref (match_operand 0 "" ""))
+ (pc)))]
+ ""
+ "
+{
+ if (GET_MODE (sparc_compare_op0) == TFmode && ! TARGET_HARD_QUAD)
+ {
+ sparc_emit_float_lib_cmp (sparc_compare_op0, sparc_compare_op1, UNLE);
+ emit_jump_insn (gen_bne (operands[0]));
+ DONE;
+ }
+ operands[1] = gen_compare_reg (UNLE, sparc_compare_op0, sparc_compare_op1);
+}")
+
+(define_expand "bltgt"
+ [(set (pc)
+ (if_then_else (ltgt (match_dup 1) (const_int 0))
+ (label_ref (match_operand 0 "" ""))
+ (pc)))]
+ ""
+ "
+{
+ if (GET_MODE (sparc_compare_op0) == TFmode && ! TARGET_HARD_QUAD)
+ {
+ sparc_emit_float_lib_cmp (sparc_compare_op0, sparc_compare_op1, LTGT);
+ emit_jump_insn (gen_bne (operands[0]));
+ DONE;
+ }
+ operands[1] = gen_compare_reg (LTGT, sparc_compare_op0, sparc_compare_op1);
+}")
\f
;; Now match both normal and inverted jump.
/* Handle sets of MEM first. */
if (GET_CODE (operands[0]) == MEM)
{
- /* This checks TARGET_LIVE_G0 for us. */
if (reg_or_0_operand (operands[1], QImode))
goto movqi_is_ok;
/* Handle sets of MEM first. */
if (GET_CODE (operands[0]) == MEM)
{
- /* This checks TARGET_LIVE_G0 for us. */
if (reg_or_0_operand (operands[1], HImode))
goto movhi_is_ok;
/* Handle sets of MEM first. */
if (GET_CODE (operands[0]) == MEM)
{
- /* This checks TARGET_LIVE_G0 for us. */
if (reg_or_0_operand (operands[1], SImode))
goto movsi_is_ok;
;
}")
-;; Special LIVE_G0 pattern to obtain zero in a register.
-(define_insn "*movsi_zero_liveg0"
- [(set (match_operand:SI 0 "register_operand" "=r")
- (match_operand:SI 1 "zero_operand" "J"))]
- "TARGET_LIVE_G0"
- "and\\t%0, 0, %0"
- [(set_attr "type" "binary")
- (set_attr "length" "1")])
-
;; This is needed to show CSE exactly which bits are set
;; in a 64-bit register by sethi instructions.
(define_insn "*movsi_const64_special"
{
current_function_uses_pic_offset_table = 1;
operands[2] = gen_rtx_SYMBOL_REF (Pmode, \"_GLOBAL_OFFSET_TABLE_\");
- operands[3] = gen_reg_rtx (SImode);
- operands[4] = gen_reg_rtx (SImode);
+ if (no_new_pseudos)
+ {
+ operands[3] = operands[0];
+ operands[4] = operands[0];
+ }
+ else
+ {
+ operands[3] = gen_reg_rtx (SImode);
+ operands[4] = gen_reg_rtx (SImode);
+ }
operands[5] = pic_offset_table_rtx;
}")
[(set_attr "type" "move")
(set_attr "length" "1")])
-(define_insn "*movdi_insn_sp64"
+(define_insn "*movdi_insn_sp64_novis"
+ [(set (match_operand:DI 0 "nonimmediate_operand" "=r,r,r,r,m,?e,?e,?m")
+ (match_operand:DI 1 "input_operand" "rI,K,J,m,rJ,e,m,e"))]
+ "TARGET_ARCH64 && ! TARGET_VIS &&
+ (register_operand (operands[0], DImode)
+ || reg_or_0_operand (operands[1], DImode))"
+ "@
+ mov\\t%1, %0
+ sethi\\t%%hi(%a1), %0
+ clr\\t%0
+ ldx\\t%1, %0
+ stx\\t%r1, %0
+ fmovd\\t%1, %0
+ ldd\\t%1, %0
+ std\\t%1, %0"
+ [(set_attr "type" "move,move,move,load,store,fpmove,fpload,fpstore")
+ (set_attr "length" "1")])
+
+(define_insn "*movdi_insn_sp64_vis"
[(set (match_operand:DI 0 "nonimmediate_operand" "=r,r,r,r,m,?e,?e,?m,b")
(match_operand:DI 1 "input_operand" "rI,K,J,m,rJ,e,m,e,J"))]
- "TARGET_ARCH64 &&
+ "TARGET_ARCH64 && TARGET_VIS &&
(register_operand (operands[0], DImode)
|| reg_or_0_operand (operands[1], DImode))"
"@
{
current_function_uses_pic_offset_table = 1;
operands[2] = gen_rtx_SYMBOL_REF (Pmode, \"_GLOBAL_OFFSET_TABLE_\");
- operands[3] = gen_reg_rtx (DImode);
- operands[4] = gen_reg_rtx (DImode);
+ if (no_new_pseudos)
+ {
+ operands[3] = operands[0];
+ operands[4] = operands[0];
+ }
+ else
+ {
+ operands[3] = gen_reg_rtx (DImode);
+ operands[4] = gen_reg_rtx (DImode);
+ }
operands[5] = pic_offset_table_rtx;
}")
(define_insn "*sethi_di_medlow_embmedany_pic"
[(set (match_operand:DI 0 "register_operand" "=r")
- (high:DI (match_operand:DI 1 "sp64_medium_pic_operand" "")))
- ;; The clobber is here because emit_move_sequence assumes the worst case.
- (clobber (reg:DI 1))]
+ (high:DI (match_operand:DI 1 "sp64_medium_pic_operand" "")))]
"(TARGET_CM_MEDLOW || TARGET_CM_EMBMEDANY) && check_pic (1)"
"sethi\\t%%hi(%a1), %0"
[(set_attr "type" "move")
(define_insn "*sethi_di_medlow"
[(set (match_operand:DI 0 "register_operand" "=r")
- (high:DI (match_operand:DI 1 "symbolic_operand" "")))
- ;; The clobber is here because emit_move_sequence assumes the worst case.
- (clobber (reg:DI 1))]
+ (high:DI (match_operand:DI 1 "symbolic_operand" "")))]
"TARGET_CM_MEDLOW && check_pic (1)"
"sethi\\t%%hi(%a1), %0"
[(set_attr "type" "move")
[(clobber (const_int 0))]
"
{
+#if HOST_BITS_PER_WIDE_INT == 32
emit_insn (gen_movsi (gen_highpart (SImode, operands[0]),
(INTVAL (operands[1]) < 0) ?
constm1_rtx :
const0_rtx));
emit_insn (gen_movsi (gen_lowpart (SImode, operands[0]),
operands[1]));
+#else
+ unsigned int low, high;
+
+ low = INTVAL (operands[1]) & 0xffffffff;
+ high = (INTVAL (operands[1]) >> 32) & 0xffffffff;
+ emit_insn (gen_movsi (gen_highpart (SImode, operands[0]), GEN_INT (high)));
+
+ /* Slick... but this trick loses if this subreg constant part
+ can be done in one insn. */
+ if (low == high && (low & 0x3ff) != 0 && low + 0x1000 >= 0x2000)
+ emit_insn (gen_movsi (gen_lowpart (SImode, operands[0]),
+ gen_highpart (SImode, operands[0])));
+ else
+ emit_insn (gen_movsi (gen_lowpart (SImode, operands[0]), GEN_INT (low)));
+#endif
DONE;
}")
\f
;; Floating point move insns
-(define_insn "*clear_sf"
- [(set (match_operand:SF 0 "register_operand" "=f")
- (match_operand:SF 1 "" ""))]
- "TARGET_VIS
- && GET_CODE (operands[1]) == CONST_DOUBLE
- && GET_CODE (operands[0]) == REG
- && fp_zero_operand (operands[1])"
- "fzeros\\t%0"
- [(set_attr "type" "fpmove")
- (set_attr "length" "1")])
-
-(define_insn "*movsf_const_intreg"
- [(set (match_operand:SF 0 "register_operand" "=f,r")
- (match_operand:SF 1 "const_double_operand" "m,F"))]
- "TARGET_FPU"
+(define_insn "*movsf_insn_novis"
+ [(set (match_operand:SF 0 "nonimmediate_operand" "=f,*r,*r,*r,*r,*r,f,m,m")
+ (match_operand:SF 1 "input_operand" "f,G,Q,*rR,S,m,m,f,*rG"))]
+ "(TARGET_FPU && ! TARGET_VIS)
+ && (register_operand (operands[0], SFmode)
+ || register_operand (operands[1], SFmode)
+ || fp_zero_operand (operands[1], SFmode))"
"*
{
- REAL_VALUE_TYPE r;
- long i;
+ if (GET_CODE (operands[1]) == CONST_DOUBLE
+ && (which_alternative == 2
+ || which_alternative == 3
+ || which_alternative == 4))
+ {
+ REAL_VALUE_TYPE r;
+ long i;
- if (which_alternative == 0)
- return \"ld\\t%1, %0\";
+ REAL_VALUE_FROM_CONST_DOUBLE (r, operands[1]);
+ REAL_VALUE_TO_TARGET_SINGLE (r, i);
+ operands[1] = GEN_INT (i);
+ }
- REAL_VALUE_FROM_CONST_DOUBLE (r, operands[1]);
- REAL_VALUE_TO_TARGET_SINGLE (r, i);
- if (SPARC_SIMM13_P (i) || SPARC_SETHI_P (i))
+ switch (which_alternative)
{
- operands[1] = GEN_INT (i);
- if (SPARC_SIMM13_P (INTVAL (operands[1])))
- return \"mov\\t%1, %0\";
- else if (SPARC_SETHI_P (INTVAL (operands[1])))
- return \"sethi\\t%%hi(%a1), %0\";
- else
- abort ();
+ case 0:
+ return \"fmovs\\t%1, %0\";
+ case 1:
+ return \"clr\\t%0\";
+ case 2:
+ return \"sethi\\t%%hi(%a1), %0\";
+ case 3:
+ return \"mov\\t%1, %0\";
+ case 4:
+ return \"#\";
+ case 5:
+ case 6:
+ return \"ld\\t%1, %0\";
+ case 7:
+ case 8:
+ return \"st\\t%r1, %0\";
+ default:
+ abort();
}
- else
- return \"#\";
}"
- [(set_attr "type" "move")
- (set_attr "length" "1,2")])
-
-;; There isn't much I can do about this, if I change the
-;; mode then flow info gets really confused because the
-;; destination no longer looks the same. Ho hum...
-(define_insn "*movsf_const_high"
- [(set (match_operand:SF 0 "register_operand" "=r")
- (unspec:SF [(match_operand 1 "const_int_operand" "")] 12))]
- ""
- "sethi\\t%%hi(%a1), %0"
- [(set_attr "type" "move")
+ [(set_attr "type" "fpmove,move,move,move,*,load,fpload,fpstore,store")
(set_attr "length" "1")])
-(define_insn "*movsf_const_lo"
- [(set (match_operand:SF 0 "register_operand" "=r")
- (unspec:SF [(match_operand:SF 1 "register_operand" "r")
- (match_operand 2 "const_int_operand" "")] 17))]
- ""
- "or\\t%1, %%lo(%a2), %0"
- [(set_attr "type" "move")
- (set_attr "length" "1")])
+(define_insn "*movsf_insn_vis"
+ [(set (match_operand:SF 0 "nonimmediate_operand" "=f,f,*r,*r,*r,*r,*r,f,m,m")
+ (match_operand:SF 1 "input_operand" "f,G,G,Q,*rR,S,m,m,f,*rG"))]
+ "(TARGET_FPU && TARGET_VIS)
+ && (register_operand (operands[0], SFmode)
+ || register_operand (operands[1], SFmode)
+ || fp_zero_operand (operands[1], SFmode))"
+ "*
+{
+ if (GET_CODE (operands[1]) == CONST_DOUBLE
+ && (which_alternative == 3
+ || which_alternative == 4
+ || which_alternative == 5))
+ {
+ REAL_VALUE_TYPE r;
+ long i;
-(define_split
+ REAL_VALUE_FROM_CONST_DOUBLE (r, operands[1]);
+ REAL_VALUE_TO_TARGET_SINGLE (r, i);
+ operands[1] = GEN_INT (i);
+ }
+
+ switch (which_alternative)
+ {
+ case 0:
+ return \"fmovs\\t%1, %0\";
+ case 1:
+ return \"fzeros\\t%0\";
+ case 2:
+ return \"clr\\t%0\";
+ case 3:
+ return \"sethi\\t%%hi(%a1), %0\";
+ case 4:
+ return \"mov\\t%1, %0\";
+ case 5:
+ return \"#\";
+ case 6:
+ case 7:
+ return \"ld\\t%1, %0\";
+ case 8:
+ case 9:
+ return \"st\\t%r1, %0\";
+ default:
+ abort();
+ }
+}"
+ [(set_attr "type" "fpmove,fpmove,move,move,move,*,load,fpload,fpstore,store")
+ (set_attr "length" "1")])
+
+(define_insn "*movsf_lo_sum"
[(set (match_operand:SF 0 "register_operand" "")
- (match_operand:SF 1 "const_double_operand" ""))]
- "TARGET_FPU
- && (GET_CODE (operands[0]) == REG
- && REGNO (operands[0]) < 32)"
- [(set (match_dup 0) (unspec:SF [(match_dup 1)] 12))
- (set (match_dup 0) (unspec:SF [(match_dup 0) (match_dup 1)] 17))]
- "
+ (lo_sum:SF (match_operand:SF 1 "register_operand" "")
+ (match_operand:SF 2 "const_double_operand" "")))]
+ "TARGET_FPU && fp_high_losum_p (operands[2])"
+ "*
+{
+ REAL_VALUE_TYPE r;
+ long i;
+
+ REAL_VALUE_FROM_CONST_DOUBLE (r, operands[2]);
+ REAL_VALUE_TO_TARGET_SINGLE (r, i);
+ operands[2] = GEN_INT (i);
+ return \"or\\t%1, %%lo(%a2), %0\";
+}"
+ [(set_attr "type" "ialu")
+ (set_attr "length" "1")])
+
+(define_insn "*movsf_high"
+ [(set (match_operand:SF 0 "register_operand" "")
+ (high:SF (match_operand:SF 1 "const_double_operand" "")))]
+ "TARGET_FPU && fp_high_losum_p (operands[1])"
+ "*
{
REAL_VALUE_TYPE r;
long i;
REAL_VALUE_FROM_CONST_DOUBLE (r, operands[1]);
REAL_VALUE_TO_TARGET_SINGLE (r, i);
operands[1] = GEN_INT (i);
-}")
+ return \"sethi\\t%%hi(%1), %0\";
+}"
+ [(set_attr "type" "move")
+ (set_attr "length" "1")])
+
+(define_split
+ [(set (match_operand:SF 0 "register_operand" "")
+ (match_operand:SF 1 "const_double_operand" ""))]
+ "TARGET_FPU
+ && fp_high_losum_p (operands[1])
+ && (GET_CODE (operands[0]) == REG
+ && REGNO (operands[0]) < 32)"
+ [(set (match_dup 0) (high:SF (match_dup 1)))
+ (set (match_dup 0) (lo_sum:SF (match_dup 0) (match_dup 1)))])
+
+;; Exactly the same as above, except that all `f' cases are deleted.
+;; This is necessary to prevent reload from ever trying to use a `f' reg
+;; when -mno-fpu.
+
+(define_insn "*movsf_no_f_insn"
+ [(set (match_operand:SF 0 "nonimmediate_operand" "=r,r,m")
+ (match_operand:SF 1 "input_operand" "r,m,r"))]
+ "! TARGET_FPU
+ && (register_operand (operands[0], SFmode)
+ || register_operand (operands[1], SFmode))"
+ "@
+ mov\\t%1, %0
+ ld\\t%1, %0
+ st\\t%1, %0"
+ [(set_attr "type" "move,load,store")
+ (set_attr "length" "1")])
(define_expand "movsf"
[(set (match_operand:SF 0 "general_operand" "")
if (GET_CODE (operands[0]) == REG
&& CONSTANT_P (operands[1]))
{
- if (TARGET_VIS
- && GET_CODE (operands[1]) == CONST_DOUBLE
- && fp_zero_operand (operands[1]))
- goto movsf_is_ok;
-
/* emit_group_store will send such bogosity to us when it is
not storing directly into memory. So fix this up to avoid
crashes in output_constant_pool. */
if (operands [1] == const0_rtx)
operands[1] = CONST0_RTX (SFmode);
+
+ if (TARGET_VIS && fp_zero_operand (operands[1], SFmode))
+ goto movsf_is_ok;
+
+ /* We are able to build any SF constant in integer registers
+ with at most 2 instructions. */
+ if (REGNO (operands[0]) < 32)
+ goto movsf_is_ok;
+
operands[1] = validize_mem (force_const_mem (GET_MODE (operands[0]),
operands[1]));
}
/* Handle sets of MEM first. */
if (GET_CODE (operands[0]) == MEM)
{
- if (register_operand (operands[1], SFmode))
+ if (register_operand (operands[1], SFmode)
+ || fp_zero_operand (operands[1], SFmode))
goto movsf_is_ok;
if (! reload_in_progress)
;
}")
-(define_insn "*movsf_insn"
- [(set (match_operand:SF 0 "nonimmediate_operand" "=f,f,m,r,r,m")
- (match_operand:SF 1 "input_operand" "f,m,f,r,m,r"))]
- "TARGET_FPU
- && (register_operand (operands[0], SFmode)
- || register_operand (operands[1], SFmode))"
- "@
- fmovs\\t%1, %0
- ld\\t%1, %0
- st\\t%1, %0
- mov\\t%1, %0
- ld\\t%1, %0
- st\\t%1, %0"
- [(set_attr "type" "fpmove,fpload,fpstore,move,load,store")
- (set_attr "length" "1")])
-
-;; Exactly the same as above, except that all `f' cases are deleted.
-;; This is necessary to prevent reload from ever trying to use a `f' reg
-;; when -mno-fpu.
-
-(define_insn "*movsf_no_f_insn"
- [(set (match_operand:SF 0 "nonimmediate_operand" "=r,r,m")
- (match_operand:SF 1 "input_operand" "r,m,r"))]
- "! TARGET_FPU
- && (register_operand (operands[0], SFmode)
- || register_operand (operands[1], SFmode))"
- "@
- mov\\t%1, %0
- ld\\t%1, %0
- st\\t%1, %0"
- [(set_attr "type" "move,load,store")
- (set_attr "length" "1")])
-
-(define_insn "*clear_df"
- [(set (match_operand:DF 0 "register_operand" "=e")
- (match_operand:DF 1 "const_double_operand" ""))]
- "TARGET_VIS
- && fp_zero_operand (operands[1])"
- "fzero\\t%0"
- [(set_attr "type" "fpmove")
- (set_attr "length" "1")])
-
-(define_insn "*movdf_const_intreg_sp32"
- [(set (match_operand:DF 0 "register_operand" "=e,e,r")
- (match_operand:DF 1 "const_double_operand" "T,o,F"))]
- "TARGET_FPU && ! TARGET_ARCH64"
- "@
- ldd\\t%1, %0
- #
- #"
- [(set_attr "type" "move")
- (set_attr "length" "1,2,2")])
-
-;; Now that we redo life analysis with a clean slate after
-;; instruction splitting for sched2 this can work.
-(define_insn "*movdf_const_intreg_sp64"
- [(set (match_operand:DF 0 "register_operand" "=e,r")
- (match_operand:DF 1 "const_double_operand" "m,F"))]
- "TARGET_FPU && TARGET_ARCH64"
- "@
- ldd\\t%1, %0
- #"
- [(set_attr "type" "move")
- (set_attr "length" "1,2")])
-
-(define_split
- [(set (match_operand:DF 0 "register_operand" "")
- (match_operand:DF 1 "const_double_operand" ""))]
- "TARGET_FPU
- && (GET_CODE (operands[0]) == REG
- && REGNO (operands[0]) < 32)
- && reload_completed"
- [(clobber (const_int 0))]
- "
-{
- REAL_VALUE_TYPE r;
- long l[2];
-
- REAL_VALUE_FROM_CONST_DOUBLE (r, operands[1]);
- REAL_VALUE_TO_TARGET_DOUBLE (r, l);
- if (GET_CODE (operands[0]) == SUBREG)
- operands[0] = alter_subreg (operands[0]);
- operands[0] = gen_rtx_raw_REG (DImode, REGNO (operands[0]));
-
- if (TARGET_ARCH64)
- {
-#if HOST_BITS_PER_WIDE_INT == 64
- HOST_WIDE_INT val;
-
- val = ((HOST_WIDE_INT)l[1] |
- ((HOST_WIDE_INT)l[0] << 32));
- emit_insn (gen_movdi (operands[0], GEN_INT (val)));
-#else
- emit_insn (gen_movdi (operands[0],
- gen_rtx_CONST_DOUBLE (VOIDmode, const0_rtx,
- l[1], l[0])));
-#endif
- }
- else
- {
- emit_insn (gen_movsi (gen_highpart (SImode, operands[0]),
- GEN_INT (l[0])));
-
- /* Slick... but this trick loses if this subreg constant part
- can be done in one insn. */
- if (l[1] == l[0]
- && !(SPARC_SETHI_P (l[0])
- || SPARC_SIMM13_P (l[0])))
- {
- emit_insn (gen_movsi (gen_lowpart (SImode, operands[0]),
- gen_highpart (SImode, operands[0])));
- }
- else
- {
- emit_insn (gen_movsi (gen_lowpart (SImode, operands[0]),
- GEN_INT (l[1])));
- }
- }
- DONE;
-}")
-
(define_expand "movdf"
[(set (match_operand:DF 0 "general_operand" "")
(match_operand:DF 1 "general_operand" ""))]
if (GET_CODE (operands[0]) == REG
&& CONSTANT_P (operands[1]))
{
- if (TARGET_VIS
- && GET_CODE (operands[1]) == CONST_DOUBLE
- && fp_zero_operand (operands[1]))
- goto movdf_is_ok;
-
/* emit_group_store will send such bogosity to us when it is
not storing directly into memory. So fix this up to avoid
crashes in output_constant_pool. */
if (operands [1] == const0_rtx)
operands[1] = CONST0_RTX (DFmode);
+
+ if ((TARGET_VIS || REGNO (operands[0]) < 32)
+ && fp_zero_operand (operands[1], DFmode))
+ goto movdf_is_ok;
+
operands[1] = validize_mem (force_const_mem (GET_MODE (operands[0]),
operands[1]));
}
/* Handle MEM cases first. */
if (GET_CODE (operands[0]) == MEM)
{
- if (register_operand (operands[1], DFmode))
+ if (register_operand (operands[1], DFmode)
+ || fp_zero_operand (operands[1], DFmode))
goto movdf_is_ok;
if (! reload_in_progress)
;; Be careful, fmovd does not exist when !v9.
(define_insn "*movdf_insn_sp32"
- [(set (match_operand:DF 0 "nonimmediate_operand" "=e,T,U,T,e,r,r,o,e,o")
- (match_operand:DF 1 "input_operand" "T,e,T,U,e,r,o,r,o,e"))]
+ [(set (match_operand:DF 0 "nonimmediate_operand" "=e,T,U,T,o,e,*r,o,e,o")
+ (match_operand:DF 1 "input_operand" "T#F,e,T,U,G,e,*rFo,*r,o#F,e"))]
"TARGET_FPU
&& ! TARGET_V9
&& (register_operand (operands[0], DFmode)
- || register_operand (operands[1], DFmode))"
+ || register_operand (operands[1], DFmode)
+ || fp_zero_operand (operands[1], DFmode))"
"@
ldd\\t%1, %0
std\\t%1, %0
(set_attr "length" "1,1,1,1,2,2,2,2,2,2")])
(define_insn "*movdf_no_e_insn_sp32"
- [(set (match_operand:DF 0 "nonimmediate_operand" "=U,T,r,r,o")
- (match_operand:DF 1 "input_operand" "T,U,r,o,r"))]
+ [(set (match_operand:DF 0 "nonimmediate_operand" "=U,T,o,r,o")
+ (match_operand:DF 1 "input_operand" "T,U,G,ro,r"))]
"! TARGET_FPU
+ && ! TARGET_V9
&& ! TARGET_ARCH64
&& (register_operand (operands[0], DFmode)
- || register_operand (operands[1], DFmode))"
+ || register_operand (operands[1], DFmode)
+ || fp_zero_operand (operands[1], DFmode))"
"@
ldd\\t%1, %0
std\\t%1, %0
[(set_attr "type" "load,store,*,*,*")
(set_attr "length" "1,1,2,2,2")])
+(define_insn "*movdf_no_e_insn_v9_sp32"
+ [(set (match_operand:DF 0 "nonimmediate_operand" "=U,T,T,r,o")
+ (match_operand:DF 1 "input_operand" "T,U,G,ro,rG"))]
+ "! TARGET_FPU
+ && TARGET_V9
+ && ! TARGET_ARCH64
+ && (register_operand (operands[0], DFmode)
+ || register_operand (operands[1], DFmode)
+ || fp_zero_operand (operands[1], DFmode))"
+ "@
+ ldd\\t%1, %0
+ std\\t%1, %0
+ stx\\t%r1, %0
+ #
+ #"
+ [(set_attr "type" "load,store,store,*,*")
+ (set_attr "length" "1,1,1,2,2")])
+
;; We have available v9 double floats but not 64-bit
-;; integer registers.
-(define_insn "*movdf_insn_v9only"
- [(set (match_operand:DF 0 "nonimmediate_operand" "=e,e,m,U,T,r,r,o")
- (match_operand:DF 1 "input_operand" "e,m,e,T,U,r,o,r"))]
+;; integer registers and no VIS.
+(define_insn "*movdf_insn_v9only_novis"
+ [(set (match_operand:DF 0 "nonimmediate_operand" "=e,e,T,T,U,T,e,*r,o")
+ (match_operand:DF 1 "input_operand" "e,T#F,G,e,T,U,o#F,*roF,*rGe"))]
"TARGET_FPU
&& TARGET_V9
+ && ! TARGET_VIS
&& ! TARGET_ARCH64
&& (register_operand (operands[0], DFmode)
- || register_operand (operands[1], DFmode))"
+ || register_operand (operands[1], DFmode)
+ || fp_zero_operand (operands[1], DFmode))"
"@
fmovd\\t%1, %0
ldd\\t%1, %0
+ stx\\t%r1, %0
std\\t%1, %0
ldd\\t%1, %0
std\\t%1, %0
#
#
#"
- [(set_attr "type" "fpmove,load,store,load,store,*,*,*")
- (set_attr "length" "1,1,1,1,1,2,2,2")])
+ [(set_attr "type" "fpmove,load,store,store,load,store,*,*,*")
+ (set_attr "length" "1,1,1,1,1,1,2,2,2")])
+
+;; We have available v9 double floats but not 64-bit
+;; integer registers but we have VIS.
+(define_insn "*movdf_insn_v9only_vis"
+ [(set (match_operand:DF 0 "nonimmediate_operand" "=e,e,e,T,T,U,T,e,*r,o")
+ (match_operand:DF 1 "input_operand" "G,e,T#F,G,e,T,U,o#F,*roGF,*rGe"))]
+ "TARGET_FPU
+ && TARGET_VIS
+ && ! TARGET_ARCH64
+ && (register_operand (operands[0], DFmode)
+ || register_operand (operands[1], DFmode)
+ || fp_zero_operand (operands[1], DFmode))"
+ "@
+ fzero\\t%0
+ fmovd\\t%1, %0
+ ldd\\t%1, %0
+ stx\\t%r1, %0
+ std\\t%1, %0
+ ldd\\t%1, %0
+ std\\t%1, %0
+ #
+ #
+ #"
+ [(set_attr "type" "fpmove,fpmove,load,store,store,load,store,*,*,*")
+ (set_attr "length" "1,1,1,1,1,1,1,2,2,2")])
;; We have available both v9 double floats and 64-bit
-;; integer registers.
-(define_insn "*movdf_insn_sp64"
- [(set (match_operand:DF 0 "nonimmediate_operand" "=e,e,m,r,r,m")
- (match_operand:DF 1 "input_operand" "e,m,e,r,m,r"))]
+;; integer registers. No VIS though.
+(define_insn "*movdf_insn_sp64_novis"
+ [(set (match_operand:DF 0 "nonimmediate_operand" "=e,e,m,*r,*r,m,*r")
+ (match_operand:DF 1 "input_operand" "e,m#F,e,*rG,m,*rG,F"))]
"TARGET_FPU
- && TARGET_V9
+ && ! TARGET_VIS
&& TARGET_ARCH64
&& (register_operand (operands[0], DFmode)
- || register_operand (operands[1], DFmode))"
+ || register_operand (operands[1], DFmode)
+ || fp_zero_operand (operands[1], DFmode))"
"@
fmovd\\t%1, %0
ldd\\t%1, %0
std\\t%1, %0
- mov\\t%1, %0
+ mov\\t%r1, %0
ldx\\t%1, %0
- stx\\t%1, %0"
- [(set_attr "type" "fpmove,load,store,move,load,store")
- (set_attr "length" "1")])
+ stx\\t%r1, %0
+ #"
+ [(set_attr "type" "fpmove,load,store,move,load,store,*")
+ (set_attr "length" "1,1,1,1,1,1,2")])
+
+;; We have available both v9 double floats and 64-bit
+;; integer registers. And we have VIS.
+(define_insn "*movdf_insn_sp64_vis"
+ [(set (match_operand:DF 0 "nonimmediate_operand" "=e,e,e,m,*r,*r,m,*r")
+ (match_operand:DF 1 "input_operand" "G,e,m#F,e,*rG,m,*rG,F"))]
+ "TARGET_FPU
+ && TARGET_VIS
+ && TARGET_ARCH64
+ && (register_operand (operands[0], DFmode)
+ || register_operand (operands[1], DFmode)
+ || fp_zero_operand (operands[1], DFmode))"
+ "@
+ fzero\\t%0
+ fmovd\\t%1, %0
+ ldd\\t%1, %0
+ std\\t%1, %0
+ mov\\t%r1, %0
+ ldx\\t%1, %0
+ stx\\t%r1, %0
+ #"
+ [(set_attr "type" "fpmove,fpmove,load,store,move,load,store,*")
+ (set_attr "length" "1,1,1,1,1,1,1,2")])
(define_insn "*movdf_no_e_insn_sp64"
[(set (match_operand:DF 0 "nonimmediate_operand" "=r,r,m")
- (match_operand:DF 1 "input_operand" "r,m,r"))]
+ (match_operand:DF 1 "input_operand" "r,m,rG"))]
"! TARGET_FPU
&& TARGET_ARCH64
&& (register_operand (operands[0], DFmode)
- || register_operand (operands[1], DFmode))"
+ || register_operand (operands[1], DFmode)
+ || fp_zero_operand (operands[1], DFmode))"
"@
mov\\t%1, %0
ldx\\t%1, %0
- stx\\t%1, %0"
+ stx\\t%r1, %0"
[(set_attr "type" "move,load,store")
(set_attr "length" "1")])
+(define_split
+ [(set (match_operand:DF 0 "register_operand" "")
+ (match_operand:DF 1 "const_double_operand" ""))]
+ "TARGET_FPU
+ && (GET_CODE (operands[0]) == REG
+ && REGNO (operands[0]) < 32)
+ && ! fp_zero_operand(operands[1], DFmode)
+ && reload_completed"
+ [(clobber (const_int 0))]
+ "
+{
+ REAL_VALUE_TYPE r;
+ long l[2];
+
+ REAL_VALUE_FROM_CONST_DOUBLE (r, operands[1]);
+ REAL_VALUE_TO_TARGET_DOUBLE (r, l);
+ if (GET_CODE (operands[0]) == SUBREG)
+ operands[0] = alter_subreg (operands[0]);
+ operands[0] = gen_rtx_raw_REG (DImode, REGNO (operands[0]));
+
+ if (TARGET_ARCH64)
+ {
+#if HOST_BITS_PER_WIDE_INT == 64
+ HOST_WIDE_INT val;
+
+ val = ((HOST_WIDE_INT)(unsigned long)l[1] |
+ ((HOST_WIDE_INT)(unsigned long)l[0] << 32));
+ emit_insn (gen_movdi (operands[0], GEN_INT (val)));
+#else
+ emit_insn (gen_movdi (operands[0],
+ gen_rtx_CONST_DOUBLE (VOIDmode, const0_rtx,
+ l[1], l[0])));
+#endif
+ }
+ else
+ {
+ emit_insn (gen_movsi (gen_highpart (SImode, operands[0]),
+ GEN_INT (l[0])));
+
+ /* Slick... but this trick loses if this subreg constant part
+ can be done in one insn. */
+ if (l[1] == l[0]
+ && !(SPARC_SETHI_P (l[0])
+ || SPARC_SIMM13_P (l[0])))
+ {
+ emit_insn (gen_movsi (gen_lowpart (SImode, operands[0]),
+ gen_highpart (SImode, operands[0])));
+ }
+ else
+ {
+ emit_insn (gen_movsi (gen_lowpart (SImode, operands[0]),
+ GEN_INT (l[1])));
+ }
+ }
+ DONE;
+}")
+
;; Ok, now the splits to handle all the multi insn and
;; mis-aligned memory address cases.
;; In these splits please take note that we must be
(define_split
[(set (match_operand:DF 0 "register_operand" "")
(match_operand:DF 1 "memory_operand" ""))]
- "((! TARGET_V9
- || (! TARGET_ARCH64
- && ((GET_CODE (operands[0]) == REG
- && REGNO (operands[0]) < 32)
- || (GET_CODE (operands[0]) == SUBREG
- && GET_CODE (SUBREG_REG (operands[0])) == REG
- && REGNO (SUBREG_REG (operands[0])) < 32))))
- && (reload_completed
- && (((REGNO (operands[0])) % 2) != 0
- || ! mem_min_alignment (operands[1], 8))
- && offsettable_memref_p (operands[1])))"
+ "reload_completed
+ && ! TARGET_ARCH64
+ && (((REGNO (operands[0]) % 2) != 0)
+ || ! mem_min_alignment (operands[1], 8))
+ && offsettable_memref_p (operands[1])"
[(clobber (const_int 0))]
"
{
(define_split
[(set (match_operand:DF 0 "memory_operand" "")
(match_operand:DF 1 "register_operand" ""))]
- "((! TARGET_V9
- || (! TARGET_ARCH64
- && ((GET_CODE (operands[1]) == REG
- && REGNO (operands[1]) < 32)
- || (GET_CODE (operands[1]) == SUBREG
- && GET_CODE (SUBREG_REG (operands[1])) == REG
- && REGNO (SUBREG_REG (operands[1])) < 32))))
- && (reload_completed
- && (((REGNO (operands[1])) % 2) != 0
- || ! mem_min_alignment (operands[0], 8))
- && offsettable_memref_p (operands[0])))"
+ "reload_completed
+ && ! TARGET_ARCH64
+ && (((REGNO (operands[1]) % 2) != 0)
+ || ! mem_min_alignment (operands[0], 8))
+ && offsettable_memref_p (operands[0])"
[(clobber (const_int 0))]
"
{
DONE;
}")
+(define_split
+ [(set (match_operand:DF 0 "memory_operand" "")
+ (match_operand:DF 1 "fp_zero_operand" ""))]
+ "reload_completed
+ && (! TARGET_V9
+ || (! TARGET_ARCH64
+ && ! mem_min_alignment (operands[0], 8)))
+ && offsettable_memref_p (operands[0])"
+ [(clobber (const_int 0))]
+ "
+{
+ rtx dest1, dest2;
+
+ dest1 = change_address (operands[0], SFmode, NULL_RTX);
+ dest2 = change_address (operands[0], SFmode,
+ plus_constant_for_output (XEXP (dest1, 0), 4));
+ emit_insn (gen_movsf (dest1, CONST0_RTX (SFmode)));
+ emit_insn (gen_movsf (dest2, CONST0_RTX (SFmode)));
+ DONE;
+}")
+
+(define_split
+ [(set (match_operand:DF 0 "register_operand" "")
+ (match_operand:DF 1 "fp_zero_operand" ""))]
+ "reload_completed
+ && ! TARGET_ARCH64
+ && ((GET_CODE (operands[0]) == REG
+ && REGNO (operands[0]) < 32)
+ || (GET_CODE (operands[0]) == SUBREG
+ && GET_CODE (SUBREG_REG (operands[0])) == REG
+ && REGNO (SUBREG_REG (operands[0])) < 32))"
+ [(clobber (const_int 0))]
+ "
+{
+ rtx set_dest = operands[0];
+ rtx dest1, dest2;
+
+ if (GET_CODE (set_dest) == SUBREG)
+ set_dest = alter_subreg (set_dest);
+ dest1 = gen_highpart (SFmode, set_dest);
+ dest2 = gen_lowpart (SFmode, set_dest);
+ emit_insn (gen_movsf (dest1, CONST0_RTX (SFmode)));
+ emit_insn (gen_movsf (dest2, CONST0_RTX (SFmode)));
+ DONE;
+}")
+
(define_expand "movtf"
[(set (match_operand:TF 0 "general_operand" "")
(match_operand:TF 1 "general_operand" ""))]
crashes in output_constant_pool. */
if (operands [1] == const0_rtx)
operands[1] = CONST0_RTX (TFmode);
+
+ if (TARGET_VIS && fp_zero_operand (operands[1], TFmode))
+ goto movtf_is_ok;
+
operands[1] = validize_mem (force_const_mem (GET_MODE (operands[0]),
operands[1]));
}
full 16-byte alignment for quads. */
if (GET_CODE (operands[0]) == MEM)
{
- if (register_operand (operands[1], TFmode))
- goto movtf_is_ok;
+ if (register_operand (operands[1], TFmode)
+ || fp_zero_operand (operands[1], TFmode))
+ goto movtf_is_ok;
if (! reload_in_progress)
{
;; Be careful, fmovq and {st,ld}{x,q} do not exist when !arch64 so
;; we must split them all. :-(
(define_insn "*movtf_insn_sp32"
- [(set (match_operand:TF 0 "nonimmediate_operand" "=e,o,U,o,e,r,r,o")
- (match_operand:TF 1 "input_operand" "o,e,o,U,e,r,o,r"))]
+ [(set (match_operand:TF 0 "nonimmediate_operand" "=e,o,U,r")
+ (match_operand:TF 1 "input_operand" "oe,GeUr,o,roG"))]
+ "TARGET_FPU
+ && ! TARGET_VIS
+ && ! TARGET_ARCH64
+ && (register_operand (operands[0], TFmode)
+ || register_operand (operands[1], TFmode)
+ || fp_zero_operand (operands[1], TFmode))"
+ "#"
+ [(set_attr "length" "4")])
+
+(define_insn "*movtf_insn_vis_sp32"
+ [(set (match_operand:TF 0 "nonimmediate_operand" "=e,o,U,r")
+ (match_operand:TF 1 "input_operand" "Goe,GeUr,o,roG"))]
"TARGET_FPU
+ && TARGET_VIS
&& ! TARGET_ARCH64
&& (register_operand (operands[0], TFmode)
- || register_operand (operands[1], TFmode))"
+ || register_operand (operands[1], TFmode)
+ || fp_zero_operand (operands[1], TFmode))"
"#"
[(set_attr "length" "4")])
;; when -mno-fpu.
(define_insn "*movtf_no_e_insn_sp32"
- [(set (match_operand:TF 0 "nonimmediate_operand" "=U,o,r,r,o")
- (match_operand:TF 1 "input_operand" "o,U,r,o,r"))]
+ [(set (match_operand:TF 0 "nonimmediate_operand" "=o,U,o,r,o")
+ (match_operand:TF 1 "input_operand" "G,o,U,roG,r"))]
"! TARGET_FPU
&& ! TARGET_ARCH64
&& (register_operand (operands[0], TFmode)
- || register_operand (operands[1], TFmode))"
+ || register_operand (operands[1], TFmode)
+ || fp_zero_operand (operands[1], TFmode))"
"#"
[(set_attr "length" "4")])
;; Now handle the float reg cases directly when arch64,
;; hard_quad, and proper reg number alignment are all true.
(define_insn "*movtf_insn_hq_sp64"
- [(set (match_operand:TF 0 "nonimmediate_operand" "=e,e,m,r,r,o")
- (match_operand:TF 1 "input_operand" "e,m,e,r,o,r"))]
+ [(set (match_operand:TF 0 "nonimmediate_operand" "=e,e,m,o,r")
+ (match_operand:TF 1 "input_operand" "e,m,e,Gr,roG"))]
"TARGET_FPU
+ && ! TARGET_VIS
+ && TARGET_ARCH64
+ && TARGET_HARD_QUAD
+ && (register_operand (operands[0], TFmode)
+ || register_operand (operands[1], TFmode)
+ || fp_zero_operand (operands[1], TFmode))"
+ "@
+ fmovq\\t%1, %0
+ ldq\\t%1, %0
+ stq\\t%1, %0
+ #
+ #"
+ [(set_attr "type" "fpmove,fpload,fpstore,*,*")
+ (set_attr "length" "1,1,1,2,2")])
+
+(define_insn "*movtf_insn_hq_vis_sp64"
+ [(set (match_operand:TF 0 "nonimmediate_operand" "=e,e,m,eo,r,o")
+ (match_operand:TF 1 "input_operand" "e,m,e,G,roG,r"))]
+ "TARGET_FPU
+ && TARGET_VIS
&& TARGET_ARCH64
- && TARGET_V9
&& TARGET_HARD_QUAD
&& (register_operand (operands[0], TFmode)
- || register_operand (operands[1], TFmode))"
+ || register_operand (operands[1], TFmode)
+ || fp_zero_operand (operands[1], TFmode))"
"@
fmovq\\t%1, %0
ldq\\t%1, %0
;; Now we allow the integer register cases even when
;; only arch64 is true.
(define_insn "*movtf_insn_sp64"
- [(set (match_operand:TF 0 "nonimmediate_operand" "=e,o,r,o,e,r")
- (match_operand:TF 1 "input_operand" "o,e,o,r,e,r"))]
+ [(set (match_operand:TF 0 "nonimmediate_operand" "=e,o,r")
+ (match_operand:TF 1 "input_operand" "oe,Ger,orG"))]
+ "TARGET_FPU
+ && ! TARGET_VIS
+ && TARGET_ARCH64
+ && ! TARGET_HARD_QUAD
+ && (register_operand (operands[0], TFmode)
+ || register_operand (operands[1], TFmode)
+ || fp_zero_operand (operands[1], TFmode))"
+ "#"
+ [(set_attr "length" "2")])
+
+(define_insn "*movtf_insn_vis_sp64"
+ [(set (match_operand:TF 0 "nonimmediate_operand" "=e,o,r")
+ (match_operand:TF 1 "input_operand" "Goe,Ger,orG"))]
"TARGET_FPU
+ && TARGET_VIS
&& TARGET_ARCH64
&& ! TARGET_HARD_QUAD
&& (register_operand (operands[0], TFmode)
- || register_operand (operands[1], TFmode))"
+ || register_operand (operands[1], TFmode)
+ || fp_zero_operand (operands[1], TFmode))"
"#"
[(set_attr "length" "2")])
(define_insn "*movtf_no_e_insn_sp64"
- [(set (match_operand:TF 0 "nonimmediate_operand" "=r,o,r")
- (match_operand:TF 1 "input_operand" "o,r,r"))]
+ [(set (match_operand:TF 0 "nonimmediate_operand" "=r,o")
+ (match_operand:TF 1 "input_operand" "orG,rG"))]
"! TARGET_FPU
&& TARGET_ARCH64
&& (register_operand (operands[0], TFmode)
- || register_operand (operands[1], TFmode))"
+ || register_operand (operands[1], TFmode)
+ || fp_zero_operand (operands[1], TFmode))"
"#"
[(set_attr "length" "2")])
if (GET_CODE (set_src) == SUBREG)
set_src = alter_subreg (set_src);
- /* Ugly, but gen_highpart will crap out here for 32-bit targets. */
- dest1 = gen_rtx_SUBREG (DFmode, set_dest, WORDS_BIG_ENDIAN == 0);
- dest2 = gen_rtx_SUBREG (DFmode, set_dest, WORDS_BIG_ENDIAN != 0);
- src1 = gen_rtx_SUBREG (DFmode, set_src, WORDS_BIG_ENDIAN == 0);
- src2 = gen_rtx_SUBREG (DFmode, set_src, WORDS_BIG_ENDIAN != 0);
+ dest1 = gen_df_reg (set_dest, 0);
+ dest2 = gen_df_reg (set_dest, 1);
+ src1 = gen_df_reg (set_src, 0);
+ src2 = gen_df_reg (set_src, 1);
/* Now emit using the real source and destination we found, swapping
the order if we detect overlap. */
}")
(define_split
+ [(set (match_operand:TF 0 "nonimmediate_operand" "")
+ (match_operand:TF 1 "fp_zero_operand" ""))]
+ "reload_completed"
+ [(clobber (const_int 0))]
+ "
+{
+ rtx set_dest = operands[0];
+ rtx dest1, dest2;
+
+ switch (GET_CODE (set_dest))
+ {
+ case SUBREG:
+ set_dest = alter_subreg (set_dest);
+ /* FALLTHROUGH */
+ case REG:
+ dest1 = gen_df_reg (set_dest, 0);
+ dest2 = gen_df_reg (set_dest, 1);
+ break;
+ case MEM:
+ dest1 = change_address (set_dest, DFmode, NULL_RTX);
+ dest2 = change_address (set_dest, DFmode,
+ plus_constant_for_output (XEXP (dest1, 0), 8));
+ break;
+ default:
+ abort ();
+ }
+
+ emit_insn (gen_movdf (dest1, CONST0_RTX (DFmode)));
+ emit_insn (gen_movdf (dest2, CONST0_RTX (DFmode)));
+ DONE;
+}")
+
+(define_split
[(set (match_operand:TF 0 "register_operand" "")
(match_operand:TF 1 "memory_operand" ""))]
"(reload_completed
rtx word0 = change_address (operands[1], DFmode, NULL_RTX);
rtx word1 = change_address (operands[1], DFmode,
plus_constant_for_output (XEXP (word0, 0), 8));
- rtx dest1, dest2;
+ rtx set_dest, dest1, dest2;
- /* Ugly, but gen_highpart will crap out here for 32-bit targets. */
- dest1 = gen_rtx_SUBREG (DFmode, operands[0], WORDS_BIG_ENDIAN == 0);
- dest2 = gen_rtx_SUBREG (DFmode, operands[0], WORDS_BIG_ENDIAN != 0);
+ set_dest = operands[0];
+ if (GET_CODE (set_dest) == SUBREG)
+ set_dest = alter_subreg (set_dest);
+
+ dest1 = gen_df_reg (set_dest, 0);
+ dest2 = gen_df_reg (set_dest, 1);
/* Now output, ordering such that we don't clobber any registers
mentioned in the address. */
[(clobber (const_int 0))]
"
{
- rtx word0 = change_address (operands[0], DFmode, NULL_RTX);
- rtx word1 = change_address (operands[0], DFmode,
- plus_constant_for_output (XEXP (word0, 0), 8));
- rtx src1, src2;
+ rtx word1 = change_address (operands[0], DFmode, NULL_RTX);
+ rtx word2 = change_address (operands[0], DFmode,
+ plus_constant_for_output (XEXP (word1, 0), 8));
+ rtx set_src;
+
+ set_src = operands[1];
+ if (GET_CODE (set_src) == SUBREG)
+ set_src = alter_subreg (set_src);
- /* Ugly, but gen_highpart will crap out here for 32-bit targets. */
- src1 = gen_rtx_SUBREG (DFmode, operands[1], WORDS_BIG_ENDIAN == 0);
- src2 = gen_rtx_SUBREG (DFmode, operands[1], WORDS_BIG_ENDIAN != 0);
- emit_insn (gen_movdf (word0, src1));
- emit_insn (gen_movdf (word1, src2));
+ emit_insn (gen_movdf (word1, gen_df_reg (set_src, 0)));
+ emit_insn (gen_movdf (word2, gen_df_reg (set_src, 1)));
DONE;
}")
\f
[(set_attr "type" "fpcmove")
(set_attr "length" "1")])
-(define_insn "*movdf_cc_sp64"
+(define_insn "movdf_cc_sp64"
[(set (match_operand:DF 0 "register_operand" "=e,e")
(if_then_else:DF (match_operator 1 "comparison_operator"
[(match_operand 2 "icc_or_fcc_reg_operand" "X,X")
[(set_attr "type" "fpcmove")
(set_attr "length" "1")])
-(define_insn "*movtf_cc_sp64"
+(define_insn "*movtf_cc_hq_sp64"
[(set (match_operand:TF 0 "register_operand" "=e,e")
(if_then_else:TF (match_operator 1 "comparison_operator"
[(match_operand 2 "icc_or_fcc_reg_operand" "X,X")
[(set_attr "type" "fpcmove")
(set_attr "length" "1")])
+(define_insn "*movtf_cc_sp64"
+ [(set (match_operand:TF 0 "register_operand" "=e,e")
+ (if_then_else:TF (match_operator 1 "comparison_operator"
+ [(match_operand 2 "icc_or_fcc_reg_operand" "X,X")
+ (const_int 0)])
+ (match_operand:TF 3 "register_operand" "e,0")
+ (match_operand:TF 4 "register_operand" "0,e")))]
+ "TARGET_V9 && TARGET_FPU && !TARGET_HARD_QUAD"
+ "#"
+ [(set_attr "type" "fpcmove")
+ (set_attr "length" "2")])
+
+(define_split
+ [(set (match_operand:TF 0 "register_operand" "=e,e")
+ (if_then_else:TF (match_operator 1 "comparison_operator"
+ [(match_operand 2 "icc_or_fcc_reg_operand" "X,X")
+ (const_int 0)])
+ (match_operand:TF 3 "register_operand" "e,0")
+ (match_operand:TF 4 "register_operand" "0,e")))]
+ "reload_completed && TARGET_V9 && TARGET_FPU && !TARGET_HARD_QUAD"
+ [(clobber (const_int 0))]
+ "
+{
+ rtx set_dest = operands[0];
+ rtx set_srca = operands[3];
+ rtx set_srcb = operands[4];
+ int third = rtx_equal_p (set_dest, set_srca);
+ rtx dest1, dest2;
+ rtx srca1, srca2, srcb1, srcb2;
+
+ if (GET_CODE (set_dest) == SUBREG)
+ set_dest = alter_subreg (set_dest);
+ if (GET_CODE (set_srca) == SUBREG)
+ set_srca = alter_subreg (set_srca);
+ if (GET_CODE (set_srcb) == SUBREG)
+ set_srcb = alter_subreg (set_srcb);
+
+ dest1 = gen_df_reg (set_dest, 0);
+ dest2 = gen_df_reg (set_dest, 1);
+ srca1 = gen_df_reg (set_srca, 0);
+ srca2 = gen_df_reg (set_srca, 1);
+ srcb1 = gen_df_reg (set_srcb, 0);
+ srcb2 = gen_df_reg (set_srcb, 1);
+
+ /* Now emit using the real source and destination we found, swapping
+ the order if we detect overlap. */
+ if ((third && reg_overlap_mentioned_p (dest1, srcb2))
+ || (!third && reg_overlap_mentioned_p (dest1, srca2)))
+ {
+ emit_insn (gen_movdf_cc_sp64 (dest2, operands[1], operands[2], srca2, srcb2));
+ emit_insn (gen_movdf_cc_sp64 (dest1, operands[1], operands[2], srca1, srcb1));
+ }
+ else
+ {
+ emit_insn (gen_movdf_cc_sp64 (dest1, operands[1], operands[2], srca1, srcb1));
+ emit_insn (gen_movdf_cc_sp64 (dest2, operands[1], operands[2], srca2, srcb2));
+ }
+ DONE;
+}")
+
(define_insn "*movqi_cc_reg_sp64"
[(set (match_operand:QI 0 "register_operand" "=r,r")
(if_then_else:QI (match_operator 1 "v9_regcmp_op"
[(set_attr "type" "fpcmove")
(set_attr "length" "1")])
-(define_insn "*movdf_cc_reg_sp64"
+(define_insn "movdf_cc_reg_sp64"
[(set (match_operand:DF 0 "register_operand" "=e,e")
(if_then_else:DF (match_operator 1 "v9_regcmp_op"
[(match_operand:DI 2 "register_operand" "r,r")
[(set_attr "type" "fpcmove")
(set_attr "length" "1")])
-(define_insn "*movtf_cc_reg_sp64"
+(define_insn "*movtf_cc_reg_hq_sp64"
[(set (match_operand:TF 0 "register_operand" "=e,e")
(if_then_else:TF (match_operator 1 "v9_regcmp_op"
[(match_operand:DI 2 "register_operand" "r,r")
(const_int 0)])
(match_operand:TF 3 "register_operand" "e,0")
(match_operand:TF 4 "register_operand" "0,e")))]
- "TARGET_ARCH64 && TARGET_FPU"
+ "TARGET_ARCH64 && TARGET_FPU && TARGET_HARD_QUAD"
"@
fmovrq%D1\\t%2, %3, %0
fmovrq%d1\\t%2, %4, %0"
[(set_attr "type" "fpcmove")
(set_attr "length" "1")])
+
+(define_insn "*movtf_cc_reg_sp64"
+ [(set (match_operand:TF 0 "register_operand" "=e,e")
+ (if_then_else:TF (match_operator 1 "v9_regcmp_op"
+ [(match_operand:DI 2 "register_operand" "r,r")
+ (const_int 0)])
+ (match_operand:TF 3 "register_operand" "e,0")
+ (match_operand:TF 4 "register_operand" "0,e")))]
+ "TARGET_ARCH64 && TARGET_FPU && ! TARGET_HARD_QUAD"
+ "#"
+ [(set_attr "type" "fpcmove")
+ (set_attr "length" "2")])
+
+(define_split
+ [(set (match_operand:TF 0 "register_operand" "=e,e")
+ (if_then_else:TF (match_operator 1 "v9_regcmp_op"
+ [(match_operand:DI 2 "register_operand" "r,r")
+ (const_int 0)])
+ (match_operand:TF 3 "register_operand" "e,0")
+ (match_operand:TF 4 "register_operand" "0,e")))]
+ "reload_completed && TARGET_ARCH64 && TARGET_FPU && ! TARGET_HARD_QUAD"
+ [(clobber (const_int 0))]
+ "
+{
+ rtx set_dest = operands[0];
+ rtx set_srca = operands[3];
+ rtx set_srcb = operands[4];
+ int third = rtx_equal_p (set_dest, set_srca);
+ rtx dest1, dest2;
+ rtx srca1, srca2, srcb1, srcb2;
+
+ if (GET_CODE (set_dest) == SUBREG)
+ set_dest = alter_subreg (set_dest);
+ if (GET_CODE (set_srca) == SUBREG)
+ set_srca = alter_subreg (set_srca);
+ if (GET_CODE (set_srcb) == SUBREG)
+ set_srcb = alter_subreg (set_srcb);
+
+ dest1 = gen_df_reg (set_dest, 0);
+ dest2 = gen_df_reg (set_dest, 1);
+ srca1 = gen_df_reg (set_srca, 0);
+ srca2 = gen_df_reg (set_srca, 1);
+ srcb1 = gen_df_reg (set_srcb, 0);
+ srcb2 = gen_df_reg (set_srcb, 1);
+
+ /* Now emit using the real source and destination we found, swapping
+ the order if we detect overlap. */
+ if ((third && reg_overlap_mentioned_p (dest1, srcb2))
+ || (!third && reg_overlap_mentioned_p (dest1, srca2)))
+ {
+ emit_insn (gen_movdf_cc_reg_sp64 (dest2, operands[1], operands[2], srca2, srcb2));
+ emit_insn (gen_movdf_cc_reg_sp64 (dest1, operands[1], operands[2], srca1, srcb1));
+ }
+ else
+ {
+ emit_insn (gen_movdf_cc_reg_sp64 (dest1, operands[1], operands[2], srca1, srcb1));
+ emit_insn (gen_movdf_cc_reg_sp64 (dest2, operands[1], operands[2], srca2, srcb2));
+ }
+ DONE;
+}")
+
\f
;;- zero extension instructions
[(set (reg:CC 100)
(compare:CC (zero_extend:SI (match_operand:QI 0 "register_operand" "r"))
(const_int 0)))]
- "! TARGET_LIVE_G0"
+ ""
+ "andcc\\t%0, 0xff, %%g0"
+ [(set_attr "type" "compare")
+ (set_attr "length" "1")])
+
+(define_insn "*cmp_zero_qi"
+ [(set (reg:CC 100)
+ (compare:CC (match_operand:QI 0 "register_operand" "r")
+ (const_int 0)))]
+ ""
"andcc\\t%0, 0xff, %%g0"
[(set_attr "type" "compare")
(set_attr "length" "1")])
[(set_attr "type" "compare")
(set_attr "length" "1")])
+(define_insn "*cmp_zero_extendqisi2_andcc_set"
+ [(set (reg:CC 100)
+ (compare:CC (and:SI (match_operand:SI 1 "register_operand" "r")
+ (const_int 255))
+ (const_int 0)))
+ (set (match_operand:SI 0 "register_operand" "=r")
+ (zero_extend:SI (subreg:QI (match_dup 1) 0)))]
+ ""
+ "andcc\\t%1, 0xff, %0"
+ [(set_attr "type" "compare")
+ (set_attr "length" "1")])
+
(define_insn "*cmp_zero_extendqidi2"
[(set (reg:CCX 100)
(compare:CCX (zero_extend:DI (match_operand:QI 0 "register_operand" "r"))
[(set_attr "type" "compare")
(set_attr "length" "1")])
+(define_insn "*cmp_zero_qi_sp64"
+ [(set (reg:CCX 100)
+ (compare:CCX (match_operand:QI 0 "register_operand" "r")
+ (const_int 0)))]
+ "TARGET_ARCH64"
+ "andcc\\t%0, 0xff, %%g0"
+ [(set_attr "type" "compare")
+ (set_attr "length" "1")])
+
(define_insn "*cmp_zero_extendqidi2_set"
[(set (reg:CCX 100)
(compare:CCX (zero_extend:DI (match_operand:QI 1 "register_operand" "r"))
[(set_attr "type" "compare")
(set_attr "length" "1")])
+(define_insn "*cmp_zero_extendqidi2_andcc_set"
+ [(set (reg:CCX 100)
+ (compare:CCX (and:DI (match_operand:DI 1 "register_operand" "r")
+ (const_int 255))
+ (const_int 0)))
+ (set (match_operand:DI 0 "register_operand" "=r")
+ (zero_extend:DI (subreg:QI (match_dup 1) 0)))]
+ "TARGET_ARCH64"
+ "andcc\\t%1, 0xff, %0"
+ [(set_attr "type" "compare")
+ (set_attr "length" "1")])
+
;; Similarly, handle {SI,DI}->QI mode truncation followed by a compare.
(define_insn "*cmp_siqi_trunc"
[(set (reg:CC 100)
(compare:CC (subreg:QI (match_operand:SI 0 "register_operand" "r") 0)
(const_int 0)))]
- "! TARGET_LIVE_G0"
+ ""
"andcc\\t%0, 0xff, %%g0"
[(set_attr "type" "compare")
(set_attr "length" "1")])
(match_operand:SI 1 "small_int_or_double" "n")
(match_operand:SI 2 "small_int_or_double" "n"))
(const_int 0)))]
- "! TARGET_LIVE_G0
- && ((GET_CODE (operands[2]) == CONST_INT
- && INTVAL (operands[2]) > 19)
- || (GET_CODE (operands[2]) == CONST_DOUBLE
- && CONST_DOUBLE_LOW (operands[2]) > 19))"
+ "(GET_CODE (operands[2]) == CONST_INT
+ && INTVAL (operands[2]) > 19)
+ || (GET_CODE (operands[2]) == CONST_DOUBLE
+ && CONST_DOUBLE_LOW (operands[2]) > 19)"
"*
{
int len = (GET_CODE (operands[1]) == CONST_INT
[(set_attr "type" "fp")
(set_attr "length" "1")])
-(define_insn "extendsftf2"
+(define_expand "extendsftf2"
+ [(set (match_operand:TF 0 "register_operand" "=e")
+ (float_extend:TF
+ (match_operand:SF 1 "register_operand" "f")))]
+ "TARGET_FPU && (TARGET_HARD_QUAD || TARGET_ARCH64)"
+ "
+{
+ if (! TARGET_HARD_QUAD)
+ {
+ rtx slot0;
+
+ if (GET_CODE (operands[0]) != MEM)
+ slot0 = assign_stack_temp (TFmode, GET_MODE_SIZE(TFmode), 0);
+ else
+ slot0 = operands[0];
+
+ emit_library_call (gen_rtx (SYMBOL_REF, Pmode, \"_Qp_stoq\"), 0,
+ VOIDmode, 2,
+ XEXP (slot0, 0), Pmode,
+ operands[1], SFmode);
+
+ if (GET_CODE (operands[0]) != MEM)
+ emit_insn (gen_rtx_SET (VOIDmode, operands[0], slot0));
+ DONE;
+ }
+}")
+
+(define_insn "*extendsftf2_hq"
[(set (match_operand:TF 0 "register_operand" "=e")
(float_extend:TF
(match_operand:SF 1 "register_operand" "f")))]
[(set_attr "type" "fp")
(set_attr "length" "1")])
-(define_insn "extenddftf2"
+(define_expand "extenddftf2"
+ [(set (match_operand:TF 0 "register_operand" "=e")
+ (float_extend:TF
+ (match_operand:DF 1 "register_operand" "e")))]
+ "TARGET_FPU && (TARGET_HARD_QUAD || TARGET_ARCH64)"
+ "
+{
+ if (! TARGET_HARD_QUAD)
+ {
+ rtx slot0;
+
+ if (GET_CODE (operands[0]) != MEM)
+ slot0 = assign_stack_temp (TFmode, GET_MODE_SIZE(TFmode), 0);
+ else
+ slot0 = operands[0];
+
+ emit_library_call (gen_rtx (SYMBOL_REF, Pmode, \"_Qp_dtoq\"), 0,
+ VOIDmode, 2,
+ XEXP (slot0, 0), Pmode,
+ operands[1], DFmode);
+
+ if (GET_CODE (operands[0]) != MEM)
+ emit_insn (gen_rtx_SET (VOIDmode, operands[0], slot0));
+ DONE;
+ }
+}")
+
+(define_insn "*extenddftf2_hq"
[(set (match_operand:TF 0 "register_operand" "=e")
(float_extend:TF
(match_operand:DF 1 "register_operand" "e")))]
[(set_attr "type" "fp")
(set_attr "length" "1")])
-(define_insn "trunctfsf2"
+(define_expand "trunctfsf2"
+ [(set (match_operand:SF 0 "register_operand" "=f")
+ (float_truncate:SF
+ (match_operand:TF 1 "register_operand" "e")))]
+ "TARGET_FPU && (TARGET_HARD_QUAD || TARGET_ARCH64)"
+ "
+{
+ if (! TARGET_HARD_QUAD)
+ {
+ rtx slot0;
+
+ if (GET_CODE (operands[1]) != MEM)
+ {
+ slot0 = assign_stack_temp (TFmode, GET_MODE_SIZE(TFmode), 0);
+ emit_insn (gen_rtx_SET (VOIDmode, slot0, operands[1]));
+ }
+ else
+ slot0 = operands[1];
+
+ emit_library_call_value (gen_rtx (SYMBOL_REF, Pmode, \"_Qp_qtos\"),
+ operands[0], 0, SFmode, 1,
+ XEXP (slot0, 0), Pmode);
+ DONE;
+ }
+}")
+
+(define_insn "*trunctfsf2_hq"
[(set (match_operand:SF 0 "register_operand" "=f")
(float_truncate:SF
(match_operand:TF 1 "register_operand" "e")))]
[(set_attr "type" "fp")
(set_attr "length" "1")])
-(define_insn "trunctfdf2"
+(define_expand "trunctfdf2"
+ [(set (match_operand:DF 0 "register_operand" "=f")
+ (float_truncate:DF
+ (match_operand:TF 1 "register_operand" "e")))]
+ "TARGET_FPU && (TARGET_HARD_QUAD || TARGET_ARCH64)"
+ "
+{
+ if (! TARGET_HARD_QUAD)
+ {
+ rtx slot0;
+
+ if (GET_CODE (operands[1]) != MEM)
+ {
+ slot0 = assign_stack_temp (TFmode, GET_MODE_SIZE(TFmode), 0);
+ emit_insn (gen_rtx_SET (VOIDmode, slot0, operands[1]));
+ }
+ else
+ slot0 = operands[1];
+
+ emit_library_call_value (gen_rtx (SYMBOL_REF, Pmode, \"_Qp_qtod\"),
+ operands[0], 0, DFmode, 1,
+ XEXP (slot0, 0), Pmode);
+ DONE;
+ }
+}")
+
+(define_insn "*trunctfdf2_hq"
[(set (match_operand:DF 0 "register_operand" "=e")
(float_truncate:DF
(match_operand:TF 1 "register_operand" "e")))]
[(set_attr "type" "fp")
(set_attr "length" "1")])
-(define_insn "floatsitf2"
+(define_expand "floatsitf2"
+ [(set (match_operand:TF 0 "register_operand" "=e")
+ (float:TF (match_operand:SI 1 "register_operand" "f")))]
+ "TARGET_FPU && (TARGET_HARD_QUAD || TARGET_ARCH64)"
+ "
+{
+ if (! TARGET_HARD_QUAD)
+ {
+ rtx slot0;
+
+ if (GET_CODE (operands[1]) != MEM)
+ slot0 = assign_stack_temp (TFmode, GET_MODE_SIZE(TFmode), 0);
+ else
+ slot0 = operands[1];
+
+ emit_library_call (gen_rtx (SYMBOL_REF, Pmode, \"_Qp_itoq\"), 0,
+ VOIDmode, 2,
+ XEXP (slot0, 0), Pmode,
+ operands[1], SImode);
+
+ if (GET_CODE (operands[0]) != MEM)
+ emit_insn (gen_rtx_SET (VOIDmode, operands[0], slot0));
+ DONE;
+ }
+}")
+
+(define_insn "*floatsitf2_hq"
[(set (match_operand:TF 0 "register_operand" "=e")
(float:TF (match_operand:SI 1 "register_operand" "f")))]
"TARGET_FPU && TARGET_HARD_QUAD"
[(set_attr "type" "fp")
(set_attr "length" "1")])
+(define_expand "floatunssitf2"
+ [(set (match_operand:TF 0 "register_operand" "=e")
+ (unsigned_float:TF (match_operand:SI 1 "register_operand" "e")))]
+ "TARGET_FPU && TARGET_ARCH64 && ! TARGET_HARD_QUAD"
+ "
+{
+ rtx slot0;
+
+ if (GET_CODE (operands[1]) != MEM)
+ slot0 = assign_stack_temp (TFmode, GET_MODE_SIZE(TFmode), 0);
+ else
+ slot0 = operands[1];
+
+ emit_library_call (gen_rtx (SYMBOL_REF, Pmode, \"_Qp_uitoq\"), 0,
+ VOIDmode, 2,
+ XEXP (slot0, 0), Pmode,
+ operands[1], SImode);
+
+ if (GET_CODE (operands[0]) != MEM)
+ emit_insn (gen_rtx_SET (VOIDmode, operands[0], slot0));
+ DONE;
+}")
+
;; Now the same for 64 bit sources.
(define_insn "floatdisf2"
[(set_attr "type" "fp")
(set_attr "length" "1")])
-(define_insn "floatditf2"
+(define_expand "floatditf2"
+ [(set (match_operand:TF 0 "register_operand" "=e")
+ (float:TF (match_operand:DI 1 "register_operand" "e")))]
+ "TARGET_FPU && TARGET_V9 && (TARGET_HARD_QUAD || TARGET_ARCH64)"
+ "
+{
+ if (! TARGET_HARD_QUAD)
+ {
+ rtx slot0;
+
+ if (GET_CODE (operands[1]) != MEM)
+ slot0 = assign_stack_temp (TFmode, GET_MODE_SIZE(TFmode), 0);
+ else
+ slot0 = operands[1];
+
+ emit_library_call (gen_rtx (SYMBOL_REF, Pmode, \"_Qp_xtoq\"), 0,
+ VOIDmode, 2,
+ XEXP (slot0, 0), Pmode,
+ operands[1], DImode);
+
+ if (GET_CODE (operands[0]) != MEM)
+ emit_insn (gen_rtx_SET (VOIDmode, operands[0], slot0));
+ DONE;
+ }
+}")
+
+(define_insn "*floatditf2_hq"
[(set (match_operand:TF 0 "register_operand" "=e")
(float:TF (match_operand:DI 1 "register_operand" "e")))]
"TARGET_V9 && TARGET_FPU && TARGET_HARD_QUAD"
[(set_attr "type" "fp")
(set_attr "length" "1")])
+(define_expand "floatunsditf2"
+ [(set (match_operand:TF 0 "register_operand" "=e")
+ (unsigned_float:TF (match_operand:DI 1 "register_operand" "e")))]
+ "TARGET_FPU && TARGET_ARCH64 && ! TARGET_HARD_QUAD"
+ "
+{
+ rtx slot0;
+
+ if (GET_CODE (operands[1]) != MEM)
+ slot0 = assign_stack_temp (TFmode, GET_MODE_SIZE(TFmode), 0);
+ else
+ slot0 = operands[1];
+
+ emit_library_call (gen_rtx (SYMBOL_REF, Pmode, \"_Qp_uxtoq\"), 0,
+ VOIDmode, 2,
+ XEXP (slot0, 0), Pmode,
+ operands[1], DImode);
+
+ if (GET_CODE (operands[0]) != MEM)
+ emit_insn (gen_rtx_SET (VOIDmode, operands[0], slot0));
+ DONE;
+}")
+
;; Convert a float to an actual integer.
;; Truncation is performed as part of the conversion.
[(set_attr "type" "fp")
(set_attr "length" "1")])
-(define_insn "fix_truncdfsi2"
- [(set (match_operand:SI 0 "register_operand" "=f")
- (fix:SI (fix:DF (match_operand:DF 1 "register_operand" "e"))))]
- "TARGET_FPU"
- "fdtoi\\t%1, %0"
- [(set_attr "type" "fp")
- (set_attr "length" "1")])
+(define_insn "fix_truncdfsi2"
+ [(set (match_operand:SI 0 "register_operand" "=f")
+ (fix:SI (fix:DF (match_operand:DF 1 "register_operand" "e"))))]
+ "TARGET_FPU"
+ "fdtoi\\t%1, %0"
+ [(set_attr "type" "fp")
+ (set_attr "length" "1")])
+
+(define_expand "fix_trunctfsi2"
+ [(set (match_operand:SI 0 "register_operand" "=f")
+ (fix:SI (fix:TF (match_operand:TF 1 "register_operand" "e"))))]
+ "TARGET_FPU && (TARGET_HARD_QUAD || TARGET_ARCH64)"
+ "
+{
+ if (! TARGET_HARD_QUAD)
+ {
+ rtx slot0;
+
+ if (GET_CODE (operands[1]) != MEM)
+ {
+ slot0 = assign_stack_temp (TFmode, GET_MODE_SIZE(TFmode), 0);
+ emit_insn (gen_rtx_SET (VOIDmode, slot0, operands[1]));
+ }
+ else
+ slot0 = operands[1];
+
+ emit_library_call_value (gen_rtx (SYMBOL_REF, Pmode, \"_Qp_qtoi\"),
+ operands[0], 0, SImode, 1,
+ XEXP (slot0, 0), Pmode);
+ DONE;
+ }
+}")
-(define_insn "fix_trunctfsi2"
+(define_insn "*fix_trunctfsi2_hq"
[(set (match_operand:SI 0 "register_operand" "=f")
(fix:SI (fix:TF (match_operand:TF 1 "register_operand" "e"))))]
"TARGET_FPU && TARGET_HARD_QUAD"
[(set_attr "type" "fp")
(set_attr "length" "1")])
+(define_expand "fixuns_trunctfsi2"
+ [(set (match_operand:SI 0 "register_operand" "=f")
+ (unsigned_fix:SI (fix:TF (match_operand:TF 1 "register_operand" "e"))))]
+ "TARGET_FPU && TARGET_ARCH64 && ! TARGET_HARD_QUAD"
+ "
+{
+ rtx slot0;
+
+ if (GET_CODE (operands[1]) != MEM)
+ {
+ slot0 = assign_stack_temp (TFmode, GET_MODE_SIZE(TFmode), 0);
+ emit_insn (gen_rtx_SET (VOIDmode, slot0, operands[1]));
+ }
+ else
+ slot0 = operands[1];
+
+ emit_library_call_value (gen_rtx (SYMBOL_REF, Pmode, \"_Qp_qtoui\"),
+ operands[0], 0, SImode, 1,
+ XEXP (slot0, 0), Pmode);
+ DONE;
+}")
+
;; Now the same, for V9 targets
(define_insn "fix_truncsfdi2"
[(set_attr "type" "fp")
(set_attr "length" "1")])
-(define_insn "fix_trunctfdi2"
+(define_expand "fix_trunctfdi2"
+ [(set (match_operand:DI 0 "register_operand" "=e")
+ (fix:SI (fix:TF (match_operand:TF 1 "register_operand" "e"))))]
+ "TARGET_V9 && TARGET_FPU && (TARGET_HARD_QUAD || TARGET_ARCH64)"
+ "
+{
+ if (! TARGET_HARD_QUAD)
+ {
+ rtx slot0;
+
+ if (GET_CODE (operands[1]) != MEM)
+ {
+ slot0 = assign_stack_temp (TFmode, GET_MODE_SIZE(TFmode), 0);
+ emit_insn (gen_rtx_SET (VOIDmode, slot0, operands[1]));
+ }
+ else
+ slot0 = operands[1];
+
+ emit_library_call_value (gen_rtx (SYMBOL_REF, Pmode, \"_Qp_qtox\"),
+ operands[0], 0, DImode, 1,
+ XEXP (slot0, 0), Pmode);
+ DONE;
+ }
+}")
+
+(define_insn "*fix_trunctfdi2_hq"
[(set (match_operand:DI 0 "register_operand" "=e")
(fix:DI (fix:TF (match_operand:TF 1 "register_operand" "e"))))]
"TARGET_V9 && TARGET_FPU && TARGET_HARD_QUAD"
"fqtox\\t%1, %0"
[(set_attr "type" "fp")
(set_attr "length" "1")])
+
+(define_expand "fixuns_trunctfdi2"
+ [(set (match_operand:DI 0 "register_operand" "=f")
+ (unsigned_fix:DI (fix:TF (match_operand:TF 1 "register_operand" "e"))))]
+ "TARGET_FPU && TARGET_ARCH64 && ! TARGET_HARD_QUAD"
+ "
+{
+ rtx slot0;
+
+ if (GET_CODE (operands[1]) != MEM)
+ {
+ slot0 = assign_stack_temp (TFmode, GET_MODE_SIZE(TFmode), 0);
+ emit_insn (gen_rtx_SET (VOIDmode, slot0, operands[1]));
+ }
+ else
+ slot0 = operands[1];
+
+ emit_library_call_value (gen_rtx (SYMBOL_REF, Pmode, \"_Qp_qtoux\"),
+ operands[0], 0, DImode, 1,
+ XEXP (slot0, 0), Pmode);
+ DONE;
+}")
+
\f
;;- arithmetic instructions
""
"
{
+ HOST_WIDE_INT i;
+
if (! TARGET_ARCH64)
{
emit_insn (gen_rtx_PARALLEL (VOIDmode, gen_rtvec (2,
}
if (arith_double_4096_operand(operands[2], DImode))
{
- emit_insn (gen_rtx_SET (VOIDmode, operands[0],
- gen_rtx_MINUS (DImode, operands[1],
- GEN_INT(-4096))));
+ switch (GET_CODE (operands[1]))
+ {
+ case CONST_INT: i = INTVAL (operands[1]); break;
+ case CONST_DOUBLE: i = CONST_DOUBLE_LOW (operands[1]); break;
+ default:
+ emit_insn (gen_rtx_SET (VOIDmode, operands[0],
+ gen_rtx_MINUS (DImode, operands[1],
+ GEN_INT(-4096))));
+ DONE;
+ }
+ emit_insn (gen_movdi (operands[0], GEN_INT (i + 4096)));
DONE;
}
}")
operands[5] = gen_lowpart (SImode, operands[2]);
operands[6] = gen_highpart (SImode, operands[0]);
operands[7] = gen_highpart (SImode, operands[1]);
+#if HOST_BITS_PER_WIDE_INT == 32
if (GET_CODE (operands[2]) == CONST_INT)
{
if (INTVAL (operands[2]) < 0)
operands[8] = const0_rtx;
}
else
+#endif
operands[8] = gen_highpart (SImode, operands[2]);
}")
operands[5] = gen_lowpart (SImode, operands[2]);
operands[6] = gen_highpart (SImode, operands[0]);
operands[7] = gen_highpart (SImode, operands[1]);
+#if HOST_BITS_PER_WIDE_INT == 32
if (GET_CODE (operands[2]) == CONST_INT)
{
if (INTVAL (operands[2]) < 0)
operands[8] = const0_rtx;
}
else
+#endif
operands[8] = gen_highpart (SImode, operands[2]);
}")
""
"
{
- if (arith_4096_operand(operands[2], DImode))
+ if (arith_4096_operand(operands[2], SImode))
{
- emit_insn (gen_rtx_SET (VOIDmode, operands[0],
- gen_rtx_MINUS (SImode, operands[1],
- GEN_INT(-4096))));
+ if (GET_CODE (operands[1]) == CONST_INT)
+ emit_insn (gen_movsi (operands[0],
+ GEN_INT (INTVAL (operands[1]) + 4096)));
+ else
+ emit_insn (gen_rtx_SET (VOIDmode, operands[0],
+ gen_rtx_MINUS (SImode, operands[1],
+ GEN_INT(-4096))));
DONE;
}
}")
(compare:CC_NOOV (plus:SI (match_operand:SI 0 "arith_operand" "%r")
(match_operand:SI 1 "arith_operand" "rI"))
(const_int 0)))]
- "! TARGET_LIVE_G0"
+ ""
"addcc\\t%0, %1, %%g0"
[(set_attr "type" "compare")
(set_attr "length" "1")])
""
"
{
- if (arith_4096_operand(operands[2], DImode))
+ if (arith_4096_operand(operands[2], SImode))
{
emit_insn (gen_rtx_SET (VOIDmode, operands[0],
gen_rtx_PLUS (SImode, operands[1],
(compare:CC_NOOV (minus:SI (match_operand:SI 0 "reg_or_0_operand" "rJ")
(match_operand:SI 1 "arith_operand" "rI"))
(const_int 0)))]
- "! TARGET_LIVE_G0"
+ ""
"subcc\\t%r0, %1, %%g0"
[(set_attr "type" "compare")
(set_attr "length" "1")])
(clobber (match_scratch:SI 4 "=X,&h"))]
"TARGET_V8PLUS"
"@
- smul %1,%2,%0\;srlx %0,%3,%0
- smul %1,%2,%4\;srlx %4,%3,%0"
+ smul\\t%1, %2, %0\;srlx\\t%0, %3, %0
+ smul\\t%1, %2, %4\;srlx\\t%4, %3, %0"
[(set_attr "length" "2")])
;; The combiner changes TRUNCATE in the previous pattern to SUBREG.
(lshiftrt:DI (mult:DI (sign_extend:DI (match_operand:SI 1 "register_operand" "r"))
(sign_extend:DI (match_operand:SI 2 "register_operand" "r")))
(const_int 32))))]
- "TARGET_HARD_MUL32 && ! TARGET_LIVE_G0"
+ "TARGET_HARD_MUL32"
"smul\\t%1, %2, %%g0\\n\\trd\\t%%y, %0"
[(set_attr "length" "2")])
(lshiftrt:DI (mult:DI (sign_extend:DI (match_operand:SI 1 "register_operand" "r"))
(match_operand:SI 2 "register_operand" "r"))
(const_int 32))))]
- "TARGET_HARD_MUL32 && ! TARGET_LIVE_G0"
+ "TARGET_HARD_MUL32"
"smul\\t%1, %2, %%g0\\n\\trd\\t%%y, %0"
[(set_attr "length" "2")])
(lshiftrt:DI (mult:DI (zero_extend:DI (match_operand:SI 1 "register_operand" "r"))
(zero_extend:DI (match_operand:SI 2 "register_operand" "r")))
(const_int 32))))]
- "TARGET_HARD_MUL32 && ! TARGET_LIVE_G0"
+ "TARGET_HARD_MUL32"
"umul\\t%1, %2, %%g0\\n\\trd\\t%%y, %0"
[(set_attr "length" "2")])
(lshiftrt:DI (mult:DI (zero_extend:DI (match_operand:SI 1 "register_operand" "r"))
(match_operand:SI 2 "uns_small_int" ""))
(const_int 32))))]
- "TARGET_HARD_MUL32 && ! TARGET_LIVE_G0"
+ "TARGET_HARD_MUL32"
"umul\\t%1, %2, %%g0\\n\\trd\\t%%y, %0"
[(set_attr "length" "2")])
[(set (match_operand:SI 0 "register_operand" "")
(udiv:SI (match_operand:SI 1 "reg_or_nonsymb_mem_operand" "")
(match_operand:SI 2 "input_operand" "")))]
- "(TARGET_V8 || TARGET_DEPRECATED_V8_INSNS) && ! TARGET_LIVE_G0"
+ "TARGET_V8 || TARGET_DEPRECATED_V8_INSNS"
"")
(define_insn "udivsi3_sp32"
(match_operand:SI 2 "input_operand" "rI,m,r")))]
"(TARGET_V8
|| TARGET_DEPRECATED_V8_INSNS)
- && TARGET_ARCH32 && ! TARGET_LIVE_G0"
+ && TARGET_ARCH32"
"*
{
output_asm_insn (\"wr\\t%%g0, %%g0, %%y\", operands);
(const_int 0)))
(set (match_operand:SI 0 "register_operand" "=r")
(udiv:SI (match_dup 1) (match_dup 2)))]
- "(TARGET_V8
- || TARGET_DEPRECATED_V8_INSNS)
- && ! TARGET_LIVE_G0"
+ "TARGET_V8
+ || TARGET_DEPRECATED_V8_INSNS"
"*
{
if (TARGET_V9)
operands[5] = gen_lowpart (SImode, operands[0]);
operands[6] = gen_highpart (SImode, operands[2]);
operands[7] = gen_lowpart (SImode, operands[2]);
+#if HOST_BITS_PER_WIDE_INT == 32
if (GET_CODE (operands[3]) == CONST_INT)
{
if (INTVAL (operands[3]) < 0)
operands[8] = const0_rtx;
}
else
+#endif
operands[8] = gen_highpart (SImode, operands[3]);
operands[9] = gen_lowpart (SImode, operands[3]);
}")
[(match_operand:SI 0 "arith_operand" "%r")
(match_operand:SI 1 "arith_operand" "rI")])
(const_int 0)))]
- "! TARGET_LIVE_G0"
+ ""
"%A2cc\\t%0, %1, %%g0"
[(set_attr "type" "compare")
(set_attr "length" "1")])
(not:SI (xor:SI (match_operand:SI 0 "reg_or_0_operand" "%rJ")
(match_operand:SI 1 "arith_operand" "rI")))
(const_int 0)))]
- "! TARGET_LIVE_G0"
+ ""
"xnorcc\\t%r0, %1, %%g0"
[(set_attr "type" "compare")
(set_attr "length" "1")])
[(not:SI (match_operand:SI 0 "arith_operand" "rI"))
(match_operand:SI 1 "reg_or_0_operand" "rJ")])
(const_int 0)))]
- "! TARGET_LIVE_G0"
+ ""
"%B2cc\\t%r1, %0, %%g0"
[(set_attr "type" "compare")
(set_attr "length" "1")])
[(set (match_operand:DI 0 "register_operand" "=r")
(neg:DI (match_operand:DI 1 "register_operand" "r")))
(clobber (reg:CC 100))]
- "! TARGET_ARCH64
- && ! TARGET_LIVE_G0"
+ "TARGET_ARCH32"
"#"
[(set_attr "type" "unary")
(set_attr "length" "2")])
[(set (match_operand:DI 0 "register_operand" "")
(neg:DI (match_operand:DI 1 "register_operand" "")))
(clobber (reg:CC 100))]
- "! TARGET_ARCH64
- && ! TARGET_LIVE_G0
+ "TARGET_ARCH32
&& reload_completed"
[(parallel [(set (reg:CC_NOOV 100)
(compare:CC_NOOV (minus:SI (const_int 0) (match_dup 5))
[(set_attr "type" "unary")
(set_attr "length" "1")])
-(define_expand "negsi2"
- [(set (match_operand:SI 0 "register_operand" "")
- (neg:SI (match_operand:SI 1 "arith_operand" "")))]
- ""
- "
-{
- if (TARGET_LIVE_G0)
- {
- rtx zero_reg = gen_reg_rtx (SImode);
-
- emit_insn (gen_rtx_SET (VOIDmode, zero_reg, const0_rtx));
- emit_insn (gen_rtx_SET (VOIDmode, operands[0],
- gen_rtx_MINUS (SImode, zero_reg,
- operands[1])));
- DONE;
- }
-}")
-
-(define_insn "*negsi2_not_liveg0"
+(define_insn "negsi2"
[(set (match_operand:SI 0 "register_operand" "=r")
- (neg:SI (match_operand:SI 1 "arith_operand" "rI")))]
- "! TARGET_LIVE_G0"
+ (neg:SI (match_operand:SI 1 "arith_operand" "rI")))]
+ ""
"sub\\t%%g0, %1, %0"
[(set_attr "type" "unary")
(set_attr "length" "1")])
[(set (reg:CC_NOOV 100)
(compare:CC_NOOV (neg:SI (match_operand:SI 0 "arith_operand" "rI"))
(const_int 0)))]
- "! TARGET_LIVE_G0"
+ ""
"subcc\\t%%g0, %0, %%g0"
[(set_attr "type" "compare")
(set_attr "length" "1")])
(const_int 0)))
(set (match_operand:SI 0 "register_operand" "=r")
(neg:SI (match_dup 1)))]
- "! TARGET_LIVE_G0"
+ ""
"subcc\\t%%g0, %1, %0"
[(set_attr "type" "compare")
(set_attr "length" "1")])
[(set_attr "type" "unary,fp")
(set_attr "length" "1")])
-(define_expand "one_cmplsi2"
- [(set (match_operand:SI 0 "register_operand" "")
- (not:SI (match_operand:SI 1 "arith_operand" "")))]
- ""
- "
-{
- if (TARGET_LIVE_G0
- && GET_CODE (operands[1]) == CONST_INT)
- {
- rtx zero_reg = gen_reg_rtx (SImode);
-
- emit_insn (gen_rtx_SET (VOIDmode, zero_reg, const0_rtx));
- emit_insn (gen_rtx_SET (VOIDmode,
- operands[0],
- gen_rtx_NOT (SImode,
- gen_rtx_XOR (SImode,
- zero_reg,
- operands[1]))));
- DONE;
- }
-}")
-
-(define_insn "*one_cmplsi2_not_liveg0"
+(define_insn "one_cmplsi2"
[(set (match_operand:SI 0 "register_operand" "=r,d")
(not:SI (match_operand:SI 1 "arith_operand" "rI,d")))]
- "! TARGET_LIVE_G0"
+ ""
"@
xnor\\t%%g0, %1, %0
fnot1s\\t%1, %0"
[(set_attr "type" "unary,fp")
(set_attr "length" "1,1")])
-(define_insn "*one_cmplsi2_liveg0"
- [(set (match_operand:SI 0 "register_operand" "=r,d")
- (not:SI (match_operand:SI 1 "arith_operand" "r,d")))]
- "TARGET_LIVE_G0"
- "@
- xnor\\t%1, 0, %0
- fnot1s\\t%1, %0"
- [(set_attr "type" "unary,fp")
- (set_attr "length" "1,1")])
-
(define_insn "*cmp_cc_not"
[(set (reg:CC 100)
(compare:CC (not:SI (match_operand:SI 0 "arith_operand" "rI"))
(const_int 0)))]
- "! TARGET_LIVE_G0"
+ ""
"xnorcc\\t%%g0, %0, %%g0"
[(set_attr "type" "compare")
(set_attr "length" "1")])
(const_int 0)))
(set (match_operand:SI 0 "register_operand" "=r")
(not:SI (match_dup 1)))]
- "! TARGET_LIVE_G0"
+ ""
"xnorcc\\t%%g0, %1, %0"
[(set_attr "type" "compare")
(set_attr "length" "1")])
\f
;; Floating point arithmetic instructions.
-(define_insn "addtf3"
+(define_expand "addtf3"
+ [(set (match_operand:TF 0 "nonimmediate_operand" "")
+ (plus:TF (match_operand:TF 1 "general_operand" "")
+ (match_operand:TF 2 "general_operand" "")))]
+ "TARGET_FPU && (TARGET_HARD_QUAD || TARGET_ARCH64)"
+ "
+{
+ if (! TARGET_HARD_QUAD)
+ {
+ rtx slot0, slot1, slot2;
+
+ if (GET_CODE (operands[0]) != MEM)
+ slot0 = assign_stack_temp (TFmode, GET_MODE_SIZE(TFmode), 0);
+ else
+ slot0 = operands[0];
+ if (GET_CODE (operands[1]) != MEM)
+ {
+ slot1 = assign_stack_temp (TFmode, GET_MODE_SIZE(TFmode), 0);
+ emit_insn (gen_rtx_SET (VOIDmode, slot1, operands[1]));
+ }
+ else
+ slot1 = operands[1];
+ if (GET_CODE (operands[2]) != MEM)
+ {
+ slot2 = assign_stack_temp (TFmode, GET_MODE_SIZE(TFmode), 0);
+ emit_insn (gen_rtx_SET (VOIDmode, slot2, operands[2]));
+ }
+ else
+ slot2 = operands[2];
+
+ emit_library_call (gen_rtx (SYMBOL_REF, Pmode, \"_Qp_add\"), 0,
+ VOIDmode, 3,
+ XEXP (slot0, 0), Pmode,
+ XEXP (slot1, 0), Pmode,
+ XEXP (slot2, 0), Pmode);
+
+ if (GET_CODE (operands[0]) != MEM)
+ emit_insn (gen_rtx_SET (VOIDmode, operands[0], slot0));
+ DONE;
+ }
+}")
+
+(define_insn "*addtf3_hq"
[(set (match_operand:TF 0 "register_operand" "=e")
(plus:TF (match_operand:TF 1 "register_operand" "e")
(match_operand:TF 2 "register_operand" "e")))]
[(set_attr "type" "fp")
(set_attr "length" "1")])
-(define_insn "subtf3"
+(define_expand "subtf3"
+ [(set (match_operand:TF 0 "nonimmediate_operand" "")
+ (minus:TF (match_operand:TF 1 "general_operand" "")
+ (match_operand:TF 2 "general_operand" "")))]
+ "TARGET_FPU && (TARGET_HARD_QUAD || TARGET_ARCH64)"
+ "
+{
+ if (! TARGET_HARD_QUAD)
+ {
+ rtx slot0, slot1, slot2;
+
+ if (GET_CODE (operands[0]) != MEM)
+ slot0 = assign_stack_temp (TFmode, GET_MODE_SIZE(TFmode), 0);
+ else
+ slot0 = operands[0];
+ if (GET_CODE (operands[1]) != MEM)
+ {
+ slot1 = assign_stack_temp (TFmode, GET_MODE_SIZE(TFmode), 0);
+ emit_insn (gen_rtx_SET (VOIDmode, slot1, operands[1]));
+ }
+ else
+ slot1 = operands[1];
+ if (GET_CODE (operands[2]) != MEM)
+ {
+ slot2 = assign_stack_temp (TFmode, GET_MODE_SIZE(TFmode), 0);
+ emit_insn (gen_rtx_SET (VOIDmode, slot2, operands[2]));
+ }
+ else
+ slot2 = operands[2];
+
+ emit_library_call (gen_rtx (SYMBOL_REF, Pmode, \"_Qp_sub\"), 0,
+ VOIDmode, 3,
+ XEXP (slot0, 0), Pmode,
+ XEXP (slot1, 0), Pmode,
+ XEXP (slot2, 0), Pmode);
+
+ if (GET_CODE (operands[0]) != MEM)
+ emit_insn (gen_rtx_SET (VOIDmode, operands[0], slot0));
+ DONE;
+ }
+}")
+
+(define_insn "*subtf3_hq"
[(set (match_operand:TF 0 "register_operand" "=e")
(minus:TF (match_operand:TF 1 "register_operand" "e")
(match_operand:TF 2 "register_operand" "e")))]
[(set_attr "type" "fp")
(set_attr "length" "1")])
-(define_insn "multf3"
+(define_expand "multf3"
+ [(set (match_operand:TF 0 "nonimmediate_operand" "")
+ (mult:TF (match_operand:TF 1 "general_operand" "")
+ (match_operand:TF 2 "general_operand" "")))]
+ "TARGET_FPU && (TARGET_HARD_QUAD || TARGET_ARCH64)"
+ "
+{
+ if (! TARGET_HARD_QUAD)
+ {
+ rtx slot0, slot1, slot2;
+
+ if (GET_CODE (operands[0]) != MEM)
+ slot0 = assign_stack_temp (TFmode, GET_MODE_SIZE(TFmode), 0);
+ else
+ slot0 = operands[0];
+ if (GET_CODE (operands[1]) != MEM)
+ {
+ slot1 = assign_stack_temp (TFmode, GET_MODE_SIZE(TFmode), 0);
+ emit_insn (gen_rtx_SET (VOIDmode, slot1, operands[1]));
+ }
+ else
+ slot1 = operands[1];
+ if (GET_CODE (operands[2]) != MEM)
+ {
+ slot2 = assign_stack_temp (TFmode, GET_MODE_SIZE(TFmode), 0);
+ emit_insn (gen_rtx_SET (VOIDmode, slot2, operands[2]));
+ }
+ else
+ slot2 = operands[2];
+
+ emit_library_call (gen_rtx (SYMBOL_REF, Pmode, \"_Qp_mul\"), 0,
+ VOIDmode, 3,
+ XEXP (slot0, 0), Pmode,
+ XEXP (slot1, 0), Pmode,
+ XEXP (slot2, 0), Pmode);
+
+ if (GET_CODE (operands[0]) != MEM)
+ emit_insn (gen_rtx_SET (VOIDmode, operands[0], slot0));
+ DONE;
+ }
+}")
+
+(define_insn "*multf3_hq"
[(set (match_operand:TF 0 "register_operand" "=e")
(mult:TF (match_operand:TF 1 "register_operand" "e")
(match_operand:TF 2 "register_operand" "e")))]
[(set_attr "type" "fpmul")
(set_attr "length" "1")])
+(define_expand "divtf3"
+ [(set (match_operand:TF 0 "nonimmediate_operand" "")
+ (div:TF (match_operand:TF 1 "general_operand" "")
+ (match_operand:TF 2 "general_operand" "")))]
+ "TARGET_FPU && (TARGET_HARD_QUAD || TARGET_ARCH64)"
+ "
+{
+ if (! TARGET_HARD_QUAD)
+ {
+ rtx slot0, slot1, slot2;
+
+ if (GET_CODE (operands[0]) != MEM)
+ slot0 = assign_stack_temp (TFmode, GET_MODE_SIZE(TFmode), 0);
+ else
+ slot0 = operands[0];
+ if (GET_CODE (operands[1]) != MEM)
+ {
+ slot1 = assign_stack_temp (TFmode, GET_MODE_SIZE(TFmode), 0);
+ emit_insn (gen_rtx_SET (VOIDmode, slot1, operands[1]));
+ }
+ else
+ slot1 = operands[1];
+ if (GET_CODE (operands[2]) != MEM)
+ {
+ slot2 = assign_stack_temp (TFmode, GET_MODE_SIZE(TFmode), 0);
+ emit_insn (gen_rtx_SET (VOIDmode, slot2, operands[2]));
+ }
+ else
+ slot2 = operands[2];
+
+ emit_library_call (gen_rtx (SYMBOL_REF, Pmode, \"_Qp_div\"), 0,
+ VOIDmode, 3,
+ XEXP (slot0, 0), Pmode,
+ XEXP (slot1, 0), Pmode,
+ XEXP (slot2, 0), Pmode);
+
+ if (GET_CODE (operands[0]) != MEM)
+ emit_insn (gen_rtx_SET (VOIDmode, operands[0], slot0));
+ DONE;
+ }
+}")
+
;; don't have timing for quad-prec. divide.
-(define_insn "divtf3"
+(define_insn "*divtf3_hq"
[(set (match_operand:TF 0 "register_operand" "=e")
(div:TF (match_operand:TF 1 "register_operand" "e")
(match_operand:TF 2 "register_operand" "e")))]
[(set_attr "type" "fpmove")
(set_attr "length" "1")])
-(define_insn "sqrttf2"
+(define_expand "sqrttf2"
+ [(set (match_operand:TF 0 "register_operand" "=e")
+ (sqrt:TF (match_operand:TF 1 "register_operand" "e")))]
+ "TARGET_FPU && (TARGET_HARD_QUAD || TARGET_ARCH64)"
+ "
+{
+ if (! TARGET_HARD_QUAD)
+ {
+ rtx slot0, slot1;
+
+ if (GET_CODE (operands[0]) != MEM)
+ slot0 = assign_stack_temp (TFmode, GET_MODE_SIZE(TFmode), 0);
+ else
+ slot0 = operands[0];
+ if (GET_CODE (operands[1]) != MEM)
+ {
+ slot1 = assign_stack_temp (TFmode, GET_MODE_SIZE(TFmode), 0);
+ emit_insn (gen_rtx_SET (VOIDmode, slot1, operands[1]));
+ }
+ else
+ slot1 = operands[1];
+
+ emit_library_call (gen_rtx (SYMBOL_REF, Pmode, \"_Qp_sqrt\"), 0,
+ VOIDmode, 2,
+ XEXP (slot0, 0), Pmode,
+ XEXP (slot1, 0), Pmode);
+
+ if (GET_CODE (operands[0]) != MEM)
+ emit_insn (gen_rtx_SET (VOIDmode, operands[0], slot0));
+ DONE;
+ }
+}")
+
+(define_insn "*sqrttf2_hq"
[(set (match_operand:TF 0 "register_operand" "=e")
(sqrt:TF (match_operand:TF 1 "register_operand" "e")))]
"TARGET_FPU && TARGET_HARD_QUAD"
"*
{
if (GET_CODE (operands[2]) == REG && REGNO (operands[2]) == REGNO (operands[0]))
- return \"mov 1,%L0\;sllx %L0,%2,%L0\;sub %L0,1,%L0\;srlx %L0,32,%H0\";
- return \"mov 1,%H0\;sllx %H0,%2,%L0\;sub %L0,1,%L0\;srlx %L0,32,%H0\";
+ return \"mov\\t1, %L0\;sllx\\t%L0, %2, %L0\;sub\\t%L0, 1, %L0\;srlx\\t%L0, 32, %H0\";
+ return \"mov\\t1, %H0\;sllx\\t%H0, %2, %L0\;sub\\t%L0, 1, %L0\;srlx\\t%L0, 32, %H0\";
}"
[(set_attr "length" "4")])
(compare:CC_NOOV (ashift:SI (match_operand:SI 0 "register_operand" "r")
(const_int 1))
(const_int 0)))]
- "! TARGET_LIVE_G0"
+ ""
"addcc\\t%0, %0, %%g0"
[(set_attr "type" "compare")
(set_attr "length" "1")])
instead. */
if (! TARGET_V9 && flag_delayed_branch
- && (insn_addresses[INSN_UID (operands[0])]
- == insn_addresses[INSN_UID (insn)]))
+ && (INSN_ADDRESSES (INSN_UID (operands[0]))
+ == INSN_ADDRESSES (INSN_UID (insn))))
return \"b\\t%l0%#\";
else
return TARGET_V9 ? \"ba,pt%*\\t%%xcc, %l0%(\" : \"b%*\\t%l0%(\";
/* Pass constm1 to indicate that it may expect a structure value, but
we don't know what size it is. */
- emit_call_insn (gen_call (operands[0], const0_rtx, NULL, constm1_rtx));
+ emit_call_insn (GEN_CALL (operands[0], const0_rtx, NULL, constm1_rtx));
for (i = 0; i < XVECLEN (operands[2], 0); i++)
{
DONE;
}")
+;;- tail calls
+(define_expand "sibcall"
+ [(parallel [(call (match_operand 0 "call_operand" "") (const_int 0))
+ (return)])]
+ ""
+ "")
+
+(define_insn "*sibcall_symbolic_sp32"
+ [(call (mem:SI (match_operand:SI 0 "symbolic_operand" "s"))
+ (match_operand 1 "" ""))
+ (return)]
+ "! TARGET_PTR64"
+ "* return output_sibcall(insn, operands[0]);"
+ [(set_attr "type" "sibcall")])
+
+(define_insn "*sibcall_symbolic_sp64"
+ [(call (mem:SI (match_operand:DI 0 "symbolic_operand" "s"))
+ (match_operand 1 "" ""))
+ (return)]
+ "TARGET_PTR64"
+ "* return output_sibcall(insn, operands[0]);"
+ [(set_attr "type" "sibcall")])
+
+(define_expand "sibcall_value"
+ [(parallel [(set (match_operand 0 "register_operand" "=rf")
+ (call (match_operand:SI 1 "" "") (const_int 0)))
+ (return)])]
+ ""
+ "")
+
+(define_insn "*sibcall_value_symbolic_sp32"
+ [(set (match_operand 0 "" "=rf")
+ (call (mem:SI (match_operand:SI 1 "symbolic_operand" "s"))
+ (match_operand 2 "" "")))
+ (return)]
+ "! TARGET_PTR64"
+ "* return output_sibcall(insn, operands[1]);"
+ [(set_attr "type" "sibcall")])
+
+(define_insn "*sibcall_value_symbolic_sp64"
+ [(set (match_operand 0 "" "")
+ (call (mem:SI (match_operand:DI 1 "symbolic_operand" "s"))
+ (match_operand 2 "" "")))
+ (return)]
+ "TARGET_PTR64"
+ "* return output_sibcall(insn, operands[1]);"
+ [(set_attr "type" "sibcall")])
+
+(define_expand "sibcall_epilogue"
+ [(const_int 0)]
+ ""
+ "DONE;")
+
;; UNSPEC_VOLATILE is considered to use and clobber all hard registers and
;; all of memory. This blocks insns from being moved across this point.
[(unspec:SI [(match_operand:SI 0 "register_operand" "r")
(match_operand:SI 1 "register_operand" "r")] 1)]
"! TARGET_ARCH64"
- "cmp %1,0\;be,a .+8\;add %0,4,%0"
+ "cmp\\t%1, 0\;be,a\\t.+8\;add\\t%0, 4, %0"
[(set_attr "type" "multi")])
\f
(define_insn "return"
(define_insn "flush"
[(unspec_volatile [(match_operand:SI 0 "memory_operand" "m")] 3)]
""
- "* return TARGET_V9 ? \"flush %f0\" : \"iflush %f0\";"
+ "* return TARGET_V9 ? \"flush\\t%f0\" : \"iflush\\t%f0\";"
[(set_attr "type" "misc")])
(define_insn "flushdi"
[(unspec_volatile [(match_operand:DI 0 "memory_operand" "m")] 3)]
""
- "* return TARGET_V9 ? \"flush %f0\" : \"iflush %f0\";"
+ "* return TARGET_V9 ? \"flush\\t%f0\" : \"iflush\\t%f0\";"
[(set_attr "type" "misc")])
\f
"TARGET_SPARCLITE || TARGET_SPARCLET"
"*
{
- if (TARGET_LIVE_G0)
- output_asm_insn (\"and %%g0,0,%%g0\", operands);
- return \"sub %%g0,%1,%0\;and %0,%1,%0\;scan %0,0,%0\;mov 32,%2\;sub %2,%0,%0\;sra %0,31,%2\;and %2,31,%2\;add %2,%0,%0\";
+ return \"sub\\t%%g0, %1, %0\;and\\t%0, %1, %0\;scan\\t%0, 0, %0\;mov\\t32, %2\;sub\\t%2, %0, %0\;sra\\t%0, 31, %2\;and\\t%2, 31, %2\;add\\t%2, %0, %0\";
}"
[(set_attr "type" "multi")
(set_attr "length" "8")])
; (ffs:DI (match_operand:DI 1 "register_operand" "r")))
; (clobber (match_scratch:DI 2 "=&r"))]
; "TARGET_ARCH64"
-; "neg %1,%2\;xnor %1,%2,%2\;popc %2,%0\;movzr %1,0,%0"
+; "neg\\t%1, %2\;xnor\\t%1, %2, %2\;popc\\t%2, %0\;movzr\\t%1, 0, %0"
; [(set_attr "type" "multi")
; (set_attr "length" "4")])
[(set (match_operand:QI 0 "restore_operand" "")
(match_operand:QI 1 "arith_operand" "rI"))
(return)]
- "! TARGET_EPILOGUE && ! TARGET_LIVE_G0"
+ "! TARGET_EPILOGUE"
"*
{
if (! TARGET_ARCH64 && current_function_returns_struct)
[(set (match_operand:HI 0 "restore_operand" "")
(match_operand:HI 1 "arith_operand" "rI"))
(return)]
- "! TARGET_EPILOGUE && ! TARGET_LIVE_G0"
+ "! TARGET_EPILOGUE"
"*
{
if (! TARGET_ARCH64 && current_function_returns_struct)
[(set (match_operand:SI 0 "restore_operand" "")
(match_operand:SI 1 "arith_operand" "rI"))
(return)]
- "! TARGET_EPILOGUE && ! TARGET_LIVE_G0"
+ "! TARGET_EPILOGUE"
"*
{
if (! TARGET_ARCH64 && current_function_returns_struct)
[(set (match_operand:SF 0 "restore_operand" "=r")
(match_operand:SF 1 "register_operand" "r"))
(return)]
- "! TARGET_EPILOGUE && ! TARGET_LIVE_G0"
+ "! TARGET_EPILOGUE"
"*
{
if (! TARGET_ARCH64 && current_function_returns_struct)
}"
[(set_attr "type" "multi")])
+(define_insn "*return_df_no_fpu"
+ [(set (match_operand:DF 0 "restore_operand" "=r")
+ (match_operand:DF 1 "register_operand" "r"))
+ (return)]
+ "! TARGET_EPILOGUE && TARGET_ARCH64"
+ "*
+{
+ if (IN_OR_GLOBAL_P (operands[1]))
+ return \"return\\t%%i7+8\\n\\tmov\\t%Y1, %Y0\";
+ else
+ return \"ret\;restore %%g0, %1, %Y0\";
+}"
+ [(set_attr "type" "multi")])
+
(define_insn "*return_addsi"
[(set (match_operand:SI 0 "restore_operand" "")
(plus:SI (match_operand:SI 1 "register_operand" "r")
(match_operand:SI 2 "arith_operand" "rI")))
(return)]
- "! TARGET_EPILOGUE && ! TARGET_LIVE_G0"
+ "! TARGET_EPILOGUE"
"*
{
if (! TARGET_ARCH64 && current_function_returns_struct)
(lo_sum:SI (match_operand:SI 1 "register_operand" "r")
(match_operand:SI 2 "immediate_operand" "in")))
(return)]
- "! TARGET_EPILOGUE && ! TARGET_LIVE_G0 && ! TARGET_CM_MEDMID"
+ "! TARGET_EPILOGUE && ! TARGET_CM_MEDMID"
"*
{
if (! TARGET_ARCH64 && current_function_returns_struct)
(match_operand:DI 2 "immediate_operand" "in")))
(return)]
"TARGET_ARCH64 && ! TARGET_EPILOGUE && ! TARGET_CM_MEDMID"
- "ret\;restore %r1, %%lo(a2), %Y0"
+ "ret\;restore %r1, %%lo(%a2), %Y0"
[(set_attr "type" "multi")])
;; The following pattern is only generated by delayed-branch scheduling,
&& in_same_eh_region (insn, operands[2])
&& in_same_eh_region (insn, ins1)"
"call\\t%a0, %1\\n\\tadd\\t%%o7, (%l2-.-4), %%o7")
+\f
+(define_expand "prologue"
+ [(const_int 1)]
+ "flag_pic && current_function_uses_pic_offset_table"
+ "
+{
+ load_pic_register ();
+ DONE;
+}")
-;; After a nonlocal goto, we need to restore the PIC register, but only
-;; if we need it. So do nothing much here, but we'll check for this in
-;; finalize_pic.
+;; We need to reload %l7 for -mflat -fpic,
+;; otherwise %l7 should be preserved simply
+;; by loading the function's register window
+(define_expand "exception_receiver"
+ [(const_int 0)]
+ "TARGET_FLAT && flag_pic"
+ "
+{
+ load_pic_register ();
+ DONE;
+}")
-;; Make sure this unspec_volatile number agrees with finalize_pic.
-(define_insn "nonlocal_goto_receiver"
- [(unspec_volatile [(const_int 0)] 5)]
- "flag_pic"
- ""
- [(set_attr "length" "0")])
+;; Likewise
+(define_expand "builtin_setjmp_receiver"
+ [(label_ref (match_operand 0 "" ""))]
+ "TARGET_FLAT && flag_pic"
+ "
+{
+ load_pic_register ();
+ DONE;
+}")
\f
(define_insn "trap"
[(trap_if (const_int 1) (const_int 5))]