;;- Machine description for SPARC chip for GNU C compiler
-;; Copyright (C) 1987, 88, 89, 92-96, 1997 Free Software Foundation, Inc.
+;; Copyright (C) 1987, 1988, 1989, 1992, 1993, 1994, 1995, 1996, 1997, 1998,
+;; 1999, 2000 Free Software Foundation, Inc.
;; Contributed by Michael Tiemann (tiemann@cygnus.com)
;; 64 bit SPARC V9 support by Michael Tiemann, Jim Wilson, and Doug Evans,
;; at Cygnus Support.
;; 9 sethh
;; 10 setlm
;; 11 embmedany_sethi, embmedany_brsum
-;; 12 movsf_const_high
;; 13 embmedany_textuhi
;; 14 embmedany_texthi
;; 15 embmedany_textulo
;; 16 embmedany_textlo
-;; 17 movsf_const_lo
;; 18 sethm
;; 19 setlo
;;
;; 2 goto_handler_and_restore
;; 3 goto_handler_and_restore_v9*
;; 4 flush
-;; 5 nonlocal_goto_receiver
;;
;; The upper 32 fp regs on the v9 can't hold SFmode values. To deal with this
;; is a bit of a misnomer as it covers all 64 fp regs. The corresponding
;; constraint letter is 'e'. To avoid any confusion, 'e' is used instead of
;; 'f' for all DF/TFmode values, including those that are specific to the v8.
-;;
-;; -mlive-g0 is *not* supported for TARGET_ARCH64, so we don't bother to
-;; test TARGET_LIVE_G0 if we have TARGET_ARCH64.
;; Attribute for cpu type.
;; These must match the values for enum processor_type in sparc.h.
-(define_attr "cpu" "v7,cypress,v8,supersparc,sparclite,f930,f934,sparclet,tsc701,v9,ultrasparc"
+(define_attr "cpu" "v7,cypress,v8,supersparc,sparclite,f930,f934,hypersparc,sparclite86x,sparclet,tsc701,v9,ultrasparc"
(const (symbol_ref "sparc_cpu_attr")))
;; Attribute for the instruction set.
(cond [(symbol_ref "TARGET_ARCH64") (const_string "arch64bit")]
(const_string "arch32bit"))))
-;; Whether -mlive-g0 is in effect.
-(define_attr "live_g0" "no,yes"
- (const
- (cond [(symbol_ref "TARGET_LIVE_G0") (const_string "yes")]
- (const_string "no"))))
-
;; Insn type. Used to default other attribute values.
;; type "unary" insns have one input operand (1) and one output operand (0)
;; type "call_no_delay_slot" is a call followed by an unimp instruction.
(define_attr "type"
- "move,unary,binary,compare,load,sload,store,ialu,shift,uncond_branch,branch,call,call_no_delay_slot,return,address,imul,fpload,fpstore,fp,fpmove,fpcmove,fpcmp,fpmul,fpdivs,fpdivd,fpsqrt,cmove,multi,misc"
+ "move,unary,binary,compare,load,sload,store,ialu,shift,uncond_branch,branch,call,sibcall,call_no_delay_slot,return,address,imul,fpload,fpstore,fp,fpmove,fpcmove,fpcmp,fpmul,fpdivs,fpdivd,fpsqrts,fpsqrtd,cmove,multi,misc"
(const_string "binary"))
;; Set true if insn uses call-clobbered intermediate register.
;; Attributes for instruction and branch scheduling
(define_attr "in_call_delay" "false,true"
- (cond [(eq_attr "type" "uncond_branch,branch,call,call_no_delay_slot,return,multi")
+ (cond [(eq_attr "type" "uncond_branch,branch,call,sibcall,call_no_delay_slot,return,multi")
(const_string "false")
(eq_attr "type" "load,fpload,store,fpstore")
(if_then_else (eq_attr "length" "1")
(define_delay (eq_attr "type" "call")
[(eq_attr "in_call_delay" "true") (nil) (nil)])
+(define_attr "eligible_for_sibcall_delay" "false,true"
+ (symbol_ref "eligible_for_sibcall_delay(insn)"))
+
+(define_delay (eq_attr "type" "sibcall")
+ [(eq_attr "eligible_for_sibcall_delay" "true") (nil) (nil)])
+
(define_attr "leaf_function" "false,true"
- (const (symbol_ref "leaf_function")))
+ (const (symbol_ref "current_function_uses_only_leaf_regs")))
+
+(define_attr "eligible_for_return_delay" "false,true"
+ (symbol_ref "eligible_for_return_delay(insn)"))
(define_attr "in_return_delay" "false,true"
(if_then_else (and (and (and (eq_attr "type" "move,load,sload,store,binary,ialu")
(eq_attr "length" "1"))
(eq_attr "leaf_function" "false"))
- (match_insn "eligible_for_return_delay"))
+ (eq_attr "eligible_for_return_delay" "false"))
(const_string "true")
(const_string "false")))
;; because it prevents us from moving back the final store of inner loops.
(define_attr "in_branch_delay" "false,true"
- (if_then_else (and (eq_attr "type" "!uncond_branch,branch,call,call_no_delay_slot,multi")
+ (if_then_else (and (eq_attr "type" "!uncond_branch,branch,call,sibcall,call_no_delay_slot,multi")
(eq_attr "length" "1"))
(const_string "true")
(const_string "false")))
(define_attr "in_uncond_branch_delay" "false,true"
- (if_then_else (and (eq_attr "type" "!uncond_branch,branch,call,call_no_delay_slot,multi")
+ (if_then_else (and (eq_attr "type" "!uncond_branch,branch,call,sibcall,call_no_delay_slot,multi")
(eq_attr "length" "1"))
(const_string "true")
(const_string "false")))
(define_attr "in_annul_branch_delay" "false,true"
- (if_then_else (and (eq_attr "type" "!uncond_branch,branch,call,call_no_delay_slot,multi")
+ (if_then_else (and (eq_attr "type" "!uncond_branch,branch,call,sibcall,call_no_delay_slot,multi")
(eq_attr "length" "1"))
(const_string "true")
(const_string "false")))
(define_function_unit "fp_mds" 1 0
(and (eq_attr "cpu" "cypress")
- (eq_attr "type" "fpsqrt"))
+ (eq_attr "type" "fpsqrts,fpsqrtd"))
63 63)
;; ----- The TMS390Z55 scheduling
(define_function_unit "fp_mds" 1 0
(and (eq_attr "cpu" "supersparc")
- (eq_attr "type" "fpsqrt"))
+ (eq_attr "type" "fpsqrts,fpsqrtd"))
12 10)
(define_function_unit "fp_mds" 1 0
(eq_attr "type" "imul"))
4 4)
+;; ----- hypersparc/sparclite86x scheduling
+;; The Hypersparc can issue 1 - 2 insns per cycle. The dual issue cases are:
+;; L-Ld/St I-Int F-Float B-Branch LI/LF/LB/II/IF/IB/FF/FB
+;; II/FF case is only when loading a 32 bit hi/lo constant
+;; Single issue insns include call, jmpl, u/smul, u/sdiv, lda, sta, fcmp
+;; Memory delivers its result in one cycle to IU
+
+(define_function_unit "memory" 1 0
+ (and (ior (eq_attr "cpu" "hypersparc") (eq_attr "cpu" "sparclite86x"))
+ (eq_attr "type" "load,sload,fpload"))
+ 1 1)
+
+(define_function_unit "memory" 1 0
+ (and (ior (eq_attr "cpu" "hypersparc") (eq_attr "cpu" "sparclite86x"))
+ (eq_attr "type" "store,fpstore"))
+ 2 1)
+
+(define_function_unit "sparclite86x_branch" 1 0
+ (and (eq_attr "cpu" "sparclite86x")
+ (eq_attr "type" "branch"))
+ 1 1)
+
+;; integer multiply insns
+(define_function_unit "sparclite86x_shift" 1 0
+ (and (eq_attr "cpu" "sparclite86x")
+ (eq_attr "type" "shift"))
+ 1 1)
+
+(define_function_unit "fp_alu" 1 0
+ (and (ior (eq_attr "cpu" "hypersparc") (eq_attr "cpu" "sparclite86x"))
+ (eq_attr "type" "fp,fpmove,fpcmp"))
+ 1 1)
+
+(define_function_unit "fp_mds" 1 0
+ (and (ior (eq_attr "cpu" "hypersparc") (eq_attr "cpu" "sparclite86x"))
+ (eq_attr "type" "fpmul"))
+ 1 1)
+
+(define_function_unit "fp_mds" 1 0
+ (and (ior (eq_attr "cpu" "hypersparc") (eq_attr "cpu" "sparclite86x"))
+ (eq_attr "type" "fpdivs"))
+ 8 6)
+
+(define_function_unit "fp_mds" 1 0
+ (and (ior (eq_attr "cpu" "hypersparc") (eq_attr "cpu" "sparclite86x"))
+ (eq_attr "type" "fpdivd"))
+ 12 10)
+
+(define_function_unit "fp_mds" 1 0
+ (and (ior (eq_attr "cpu" "hypersparc") (eq_attr "cpu" "sparclite86x"))
+ (eq_attr "type" "fpsqrts,fpsqrtd"))
+ 17 15)
+
+(define_function_unit "fp_mds" 1 0
+ (and (ior (eq_attr "cpu" "hypersparc") (eq_attr "cpu" "sparclite86x"))
+ (eq_attr "type" "imul"))
+ 17 15)
+
;; ----- sparclet tsc701 scheduling
;; The tsc701 issues 1 insn per cycle.
;; Results may be written back out of order.
(define_function_unit "ieuN" 2 0
(and (eq_attr "cpu" "ultrasparc")
- (eq_attr "type" "ialu,binary,move,unary,shift,compare,call,call_no_delay_slot,uncond_branch"))
+ (eq_attr "type" "ialu,binary,move,unary,shift,compare,call,sibcall,call_no_delay_slot,uncond_branch"))
1 1)
(define_function_unit "ieu0" 1 0
(define_function_unit "ieu1" 1 0
(and (eq_attr "cpu" "ultrasparc")
- (eq_attr "type" "compare,call,call_no_delay_slot,uncond_branch"))
+ (eq_attr "type" "compare,call,sibcall,call_no_delay_slot,uncond_branch"))
1 1)
(define_function_unit "cti" 1 0
;; Timings; throughput/latency
;; FMOV 1/1 fmov, fabs, fneg
;; FMOVcc 1/2
-;; FADD 1/4 add/sub, format conv, compar
-;; FMUL 1/4
+;; FADD 1/3 add/sub, format conv, compar
+;; FMUL 1/3
;; FDIVs 12/12
;; FDIVd 22/22
;; FSQRTs 12/12
;; FSQRTd 22/22
;; FCMP takes 1 cycle to branch, 2 cycles to conditional move.
;;
+;; FDIV{s,d}/FSQRT{s,d} are given their own unit since they only
+;; use the FPM multiplier for final rounding 3 cycles before the
+;; end of their latency and we have no real way to model that.
+;;
;; ??? This is really bogus because the timings really depend upon
;; who uses the result. We should record who the user is with
;; more descriptive 'type' attribute names and account for these
(define_function_unit "fadd" 1 0
(and (eq_attr "cpu" "ultrasparc")
(eq_attr "type" "fp"))
- 4 1)
+ 3 1)
(define_function_unit "fadd" 1 0
(and (eq_attr "cpu" "ultrasparc")
(define_function_unit "fmul" 1 0
(and (eq_attr "cpu" "ultrasparc")
(eq_attr "type" "fpmul"))
- 4 1)
+ 3 1)
(define_function_unit "fadd" 1 0
(and (eq_attr "cpu" "ultrasparc")
(eq_attr "type" "fpcmove"))
2 1)
-(define_function_unit "fmul" 1 0
+(define_function_unit "fdiv" 1 0
(and (eq_attr "cpu" "ultrasparc")
(eq_attr "type" "fpdivs"))
12 12)
-(define_function_unit "fmul" 1 0
+(define_function_unit "fdiv" 1 0
(and (eq_attr "cpu" "ultrasparc")
(eq_attr "type" "fpdivd"))
22 22)
-(define_function_unit "fmul" 1 0
+(define_function_unit "fdiv" 1 0
(and (eq_attr "cpu" "ultrasparc")
- (eq_attr "type" "fpsqrt"))
+ (eq_attr "type" "fpsqrts"))
12 12)
+
+(define_function_unit "fdiv" 1 0
+ (and (eq_attr "cpu" "ultrasparc")
+ (eq_attr "type" "fpsqrtd"))
+ 22 22)
\f
;; Compare instructions.
;; This controls RTL generation and register allocation.
(parallel [(set (match_operand:SI 0 "register_operand" "")
(eq:SI (match_dup 3) (const_int 0)))
(clobber (reg:CC 100))])]
- "! TARGET_LIVE_G0"
+ ""
"{ operands[3] = gen_reg_rtx (SImode); }")
(define_expand "seqdi_special"
(parallel [(set (match_operand:SI 0 "register_operand" "")
(ne:SI (match_dup 3) (const_int 0)))
(clobber (reg:CC 100))])]
- "! TARGET_LIVE_G0"
+ ""
"{ operands[3] = gen_reg_rtx (SImode); }")
(define_expand "snedi_special"
(define_expand "seq"
[(set (match_operand:SI 0 "intreg_operand" "")
(eq:SI (match_dup 1) (const_int 0)))]
- "! TARGET_LIVE_G0"
+ ""
"
{
if (GET_MODE (sparc_compare_op0) == SImode)
}
else if (GET_MODE (sparc_compare_op0) == TFmode && ! TARGET_HARD_QUAD)
{
- emit_float_lib_cmp (sparc_compare_op0, sparc_compare_op1, EQ);
- emit_insn (gen_sne (operands[0]));
+ sparc_emit_float_lib_cmp (sparc_compare_op0, sparc_compare_op1, EQ);
+ emit_jump_insn (gen_sne (operands[0]));
DONE;
- }
+ }
else if (TARGET_V9)
{
if (gen_v9_scc (EQ, operands))
(define_expand "sne"
[(set (match_operand:SI 0 "intreg_operand" "")
(ne:SI (match_dup 1) (const_int 0)))]
- "! TARGET_LIVE_G0"
+ ""
"
{
if (GET_MODE (sparc_compare_op0) == SImode)
}
else if (GET_MODE (sparc_compare_op0) == TFmode && ! TARGET_HARD_QUAD)
{
- emit_float_lib_cmp (sparc_compare_op0, sparc_compare_op1, NE);
- emit_insn (gen_sne (operands[0]));
+ sparc_emit_float_lib_cmp (sparc_compare_op0, sparc_compare_op1, NE);
+ emit_jump_insn (gen_sne (operands[0]));
DONE;
- }
+ }
else if (TARGET_V9)
{
if (gen_v9_scc (NE, operands))
(define_expand "sgt"
[(set (match_operand:SI 0 "intreg_operand" "")
(gt:SI (match_dup 1) (const_int 0)))]
- "! TARGET_LIVE_G0"
+ ""
"
{
if (GET_MODE (sparc_compare_op0) == TFmode && ! TARGET_HARD_QUAD)
{
- emit_float_lib_cmp (sparc_compare_op0, sparc_compare_op1, GT);
- emit_insn (gen_sne (operands[0]));
+ sparc_emit_float_lib_cmp (sparc_compare_op0, sparc_compare_op1, GT);
+ emit_jump_insn (gen_sne (operands[0]));
DONE;
}
else if (TARGET_V9)
(define_expand "slt"
[(set (match_operand:SI 0 "intreg_operand" "")
(lt:SI (match_dup 1) (const_int 0)))]
- "! TARGET_LIVE_G0"
+ ""
"
{
if (GET_MODE (sparc_compare_op0) == TFmode && ! TARGET_HARD_QUAD)
{
- emit_float_lib_cmp (sparc_compare_op0, sparc_compare_op1, LT);
- emit_insn (gen_sne (operands[0]));
+ sparc_emit_float_lib_cmp (sparc_compare_op0, sparc_compare_op1, LT);
+ emit_jump_insn (gen_sne (operands[0]));
DONE;
}
else if (TARGET_V9)
(define_expand "sge"
[(set (match_operand:SI 0 "intreg_operand" "")
(ge:SI (match_dup 1) (const_int 0)))]
- "! TARGET_LIVE_G0"
+ ""
"
{
if (GET_MODE (sparc_compare_op0) == TFmode && ! TARGET_HARD_QUAD)
{
- emit_float_lib_cmp (sparc_compare_op0, sparc_compare_op1, GE);
- emit_insn (gen_sne (operands[0]));
+ sparc_emit_float_lib_cmp (sparc_compare_op0, sparc_compare_op1, GE);
+ emit_jump_insn (gen_sne (operands[0]));
DONE;
}
else if (TARGET_V9)
(define_expand "sle"
[(set (match_operand:SI 0 "intreg_operand" "")
(le:SI (match_dup 1) (const_int 0)))]
- "! TARGET_LIVE_G0"
+ ""
"
{
if (GET_MODE (sparc_compare_op0) == TFmode && ! TARGET_HARD_QUAD)
{
- emit_float_lib_cmp (sparc_compare_op0, sparc_compare_op1, LE);
- emit_insn (gen_sne (operands[0]));
+ sparc_emit_float_lib_cmp (sparc_compare_op0, sparc_compare_op1, LE);
+ emit_jump_insn (gen_sne (operands[0]));
DONE;
}
else if (TARGET_V9)
(define_expand "sgtu"
[(set (match_operand:SI 0 "intreg_operand" "")
(gtu:SI (match_dup 1) (const_int 0)))]
- "! TARGET_LIVE_G0"
+ ""
"
{
if (! TARGET_V9)
(define_expand "sltu"
[(set (match_operand:SI 0 "intreg_operand" "")
(ltu:SI (match_dup 1) (const_int 0)))]
- "! TARGET_LIVE_G0"
+ ""
"
{
if (TARGET_V9)
(define_expand "sgeu"
[(set (match_operand:SI 0 "intreg_operand" "")
(geu:SI (match_dup 1) (const_int 0)))]
- "! TARGET_LIVE_G0"
+ ""
"
{
if (TARGET_V9)
(define_expand "sleu"
[(set (match_operand:SI 0 "intreg_operand" "")
(leu:SI (match_dup 1) (const_int 0)))]
- "! TARGET_LIVE_G0"
+ ""
"
{
if (! TARGET_V9)
(ne:SI (match_operand:SI 1 "register_operand" "r")
(const_int 0)))
(clobber (reg:CC 100))]
- "! TARGET_LIVE_G0"
+ ""
"#"
[(set_attr "length" "2")])
(neg:SI (ne:SI (match_operand:SI 1 "register_operand" "r")
(const_int 0))))
(clobber (reg:CC 100))]
- "! TARGET_LIVE_G0"
+ ""
"#"
[(set_attr "length" "2")])
[(set (match_operand:DI 0 "register_operand" "")
(ne:DI (match_operand:DI 1 "register_operand" "")
(const_int 0)))]
- "TARGET_ARCH64"
+ "TARGET_ARCH64
+ && ! reg_overlap_mentioned_p (operands[1], operands[0])"
[(set (match_dup 0) (const_int 0))
(set (match_dup 0) (if_then_else:DI (ne:DI (match_dup 1)
(const_int 0))
[(set (match_operand:DI 0 "register_operand" "")
(neg:DI (ne:DI (match_operand:DI 1 "register_operand" "")
(const_int 0))))]
- "TARGET_ARCH64"
+ "TARGET_ARCH64
+ && ! reg_overlap_mentioned_p (operands[1], operands[0])"
[(set (match_dup 0) (const_int 0))
(set (match_dup 0) (if_then_else:DI (ne:DI (match_dup 1)
(const_int 0))
[(set (match_operand:SI 0 "register_operand" "")
(ne:SI (match_operand:DI 1 "register_operand" "")
(const_int 0)))]
- "TARGET_ARCH64"
+ "TARGET_ARCH64
+ && ! reg_overlap_mentioned_p (operands[1], operands[0])"
[(set (match_dup 0) (const_int 0))
(set (match_dup 0) (if_then_else:SI (ne:DI (match_dup 1)
(const_int 0))
(eq:SI (match_operand:SI 1 "register_operand" "r")
(const_int 0)))
(clobber (reg:CC 100))]
- "! TARGET_LIVE_G0"
+ ""
"#"
[(set_attr "length" "2")])
(neg:SI (eq:SI (match_operand:SI 1 "register_operand" "r")
(const_int 0))))
(clobber (reg:CC 100))]
- "! TARGET_LIVE_G0"
+ ""
"#"
[(set_attr "length" "2")])
[(set (match_operand:DI 0 "register_operand" "")
(eq:DI (match_operand:DI 1 "register_operand" "")
(const_int 0)))]
- "TARGET_ARCH64"
+ "TARGET_ARCH64
+ && ! reg_overlap_mentioned_p (operands[1], operands[0])"
[(set (match_dup 0) (const_int 0))
(set (match_dup 0) (if_then_else:DI (eq:DI (match_dup 1)
(const_int 0))
[(set (match_operand:DI 0 "register_operand" "")
(neg:DI (eq:DI (match_operand:DI 1 "register_operand" "")
(const_int 0))))]
- "TARGET_ARCH64"
+ "TARGET_ARCH64
+ && ! reg_overlap_mentioned_p (operands[1], operands[0])"
[(set (match_dup 0) (const_int 0))
(set (match_dup 0) (if_then_else:DI (eq:DI (match_dup 1)
(const_int 0))
[(set (match_operand:SI 0 "register_operand" "")
(eq:SI (match_operand:DI 1 "register_operand" "")
(const_int 0)))]
- "TARGET_ARCH64"
+ "TARGET_ARCH64
+ && ! reg_overlap_mentioned_p (operands[1], operands[0])"
[(set (match_dup 0) (const_int 0))
(set (match_dup 0) (if_then_else:SI (eq:DI (match_dup 1)
(const_int 0))
(const_int 0))
(match_operand:SI 2 "register_operand" "r")))
(clobber (reg:CC 100))]
- "! TARGET_LIVE_G0"
+ ""
"#"
[(set_attr "length" "2")])
(const_int 0))
(match_operand:SI 2 "register_operand" "")))
(clobber (reg:CC 100))]
- "! TARGET_LIVE_G0"
+ ""
[(set (reg:CC_NOOV 100) (compare:CC_NOOV (neg:SI (match_dup 1))
(const_int 0)))
(set (match_dup 0) (plus:SI (ltu:SI (reg:CC 100) (const_int 0))
(ne:SI (match_operand:SI 1 "register_operand" "r")
(const_int 0))))
(clobber (reg:CC 100))]
- "! TARGET_LIVE_G0"
+ ""
"#"
[(set_attr "length" "2")])
(ne:SI (match_operand:SI 1 "register_operand" "")
(const_int 0))))
(clobber (reg:CC 100))]
- "! TARGET_LIVE_G0"
+ ""
[(set (reg:CC_NOOV 100) (compare:CC_NOOV (neg:SI (match_dup 1))
(const_int 0)))
(set (match_dup 0) (minus:SI (match_dup 2)
(const_int 0))
(match_operand:SI 2 "register_operand" "r")))
(clobber (reg:CC 100))]
- "! TARGET_LIVE_G0"
+ ""
"#"
[(set_attr "length" "2")])
(const_int 0))
(match_operand:SI 2 "register_operand" "")))
(clobber (reg:CC 100))]
- "! TARGET_LIVE_G0"
+ ""
[(set (reg:CC_NOOV 100) (compare:CC_NOOV (neg:SI (match_dup 1))
(const_int 0)))
(set (match_dup 0) (plus:SI (geu:SI (reg:CC 100) (const_int 0))
(eq:SI (match_operand:SI 1 "register_operand" "r")
(const_int 0))))
(clobber (reg:CC 100))]
- "! TARGET_LIVE_G0"
+ ""
"#"
[(set_attr "length" "2")])
(eq:SI (match_operand:SI 1 "register_operand" "")
(const_int 0))))
(clobber (reg:CC 100))]
- "! TARGET_LIVE_G0"
+ ""
[(set (reg:CC_NOOV 100) (compare:CC_NOOV (neg:SI (match_dup 1))
(const_int 0)))
(set (match_dup 0) (minus:SI (match_dup 2)
(define_insn "*sltu_insn"
[(set (match_operand:SI 0 "register_operand" "=r")
(ltu:SI (reg:CC 100) (const_int 0)))]
- "! TARGET_LIVE_G0"
+ ""
"addx\\t%%g0, 0, %0"
[(set_attr "type" "misc")
(set_attr "length" "1")])
(define_insn "*neg_sltu_insn"
[(set (match_operand:SI 0 "register_operand" "=r")
(neg:SI (ltu:SI (reg:CC 100) (const_int 0))))]
- "! TARGET_LIVE_G0"
+ ""
"subx\\t%%g0, 0, %0"
[(set_attr "type" "misc")
(set_attr "length" "1")])
[(set (match_operand:SI 0 "register_operand" "=r")
(minus:SI (neg:SI (ltu:SI (reg:CC 100) (const_int 0)))
(match_operand:SI 1 "arith_operand" "rI")))]
- "! TARGET_LIVE_G0"
+ ""
"subx\\t%%g0, %1, %0"
[(set_attr "type" "misc")
(set_attr "length" "1")])
[(set (match_operand:SI 0 "register_operand" "=r")
(neg:SI (plus:SI (ltu:SI (reg:CC 100) (const_int 0))
(match_operand:SI 1 "arith_operand" "rI"))))]
- "! TARGET_LIVE_G0"
+ ""
"subx\\t%%g0, %1, %0"
[(set_attr "type" "misc")
(set_attr "length" "1")])
(define_insn "*sgeu_insn"
[(set (match_operand:SI 0 "register_operand" "=r")
(geu:SI (reg:CC 100) (const_int 0)))]
- "! TARGET_LIVE_G0"
+ ""
"subx\\t%%g0, -1, %0"
[(set_attr "type" "misc")
(set_attr "length" "1")])
(define_insn "*neg_sgeu_insn"
[(set (match_operand:SI 0 "register_operand" "=r")
(neg:SI (geu:SI (reg:CC 100) (const_int 0))))]
- "! TARGET_LIVE_G0"
+ ""
"addx\\t%%g0, -1, %0"
[(set_attr "type" "misc")
(set_attr "length" "1")])
[(set (match_operand:SI 0 "register_operand" "=r")
(plus:SI (ltu:SI (reg:CC 100) (const_int 0))
(match_operand:SI 1 "arith_operand" "rI")))]
- "! TARGET_LIVE_G0"
+ ""
"addx\\t%%g0, %1, %0"
[(set_attr "type" "misc")
(set_attr "length" "1")])
}
else if (GET_MODE (sparc_compare_op0) == TFmode && ! TARGET_HARD_QUAD)
{
- emit_float_lib_cmp (sparc_compare_op0, sparc_compare_op1, EQ);
+ sparc_emit_float_lib_cmp (sparc_compare_op0, sparc_compare_op1, EQ);
emit_jump_insn (gen_bne (operands[0]));
DONE;
- }
+ }
operands[1] = gen_compare_reg (EQ, sparc_compare_op0, sparc_compare_op1);
}")
}
else if (GET_MODE (sparc_compare_op0) == TFmode && ! TARGET_HARD_QUAD)
{
- emit_float_lib_cmp (sparc_compare_op0, sparc_compare_op1, NE);
+ sparc_emit_float_lib_cmp (sparc_compare_op0, sparc_compare_op1, NE);
emit_jump_insn (gen_bne (operands[0]));
DONE;
- }
+ }
operands[1] = gen_compare_reg (NE, sparc_compare_op0, sparc_compare_op1);
}")
}
else if (GET_MODE (sparc_compare_op0) == TFmode && ! TARGET_HARD_QUAD)
{
- emit_float_lib_cmp (sparc_compare_op0, sparc_compare_op1, GT);
+ sparc_emit_float_lib_cmp (sparc_compare_op0, sparc_compare_op1, GT);
emit_jump_insn (gen_bne (operands[0]));
DONE;
- }
+ }
operands[1] = gen_compare_reg (GT, sparc_compare_op0, sparc_compare_op1);
}")
}
else if (GET_MODE (sparc_compare_op0) == TFmode && ! TARGET_HARD_QUAD)
{
- emit_float_lib_cmp (sparc_compare_op0, sparc_compare_op1, LT);
+ sparc_emit_float_lib_cmp (sparc_compare_op0, sparc_compare_op1, LT);
emit_jump_insn (gen_bne (operands[0]));
DONE;
- }
+ }
operands[1] = gen_compare_reg (LT, sparc_compare_op0, sparc_compare_op1);
}")
}
else if (GET_MODE (sparc_compare_op0) == TFmode && ! TARGET_HARD_QUAD)
{
- emit_float_lib_cmp (sparc_compare_op0, sparc_compare_op1, GE);
+ sparc_emit_float_lib_cmp (sparc_compare_op0, sparc_compare_op1, GE);
emit_jump_insn (gen_bne (operands[0]));
DONE;
- }
+ }
operands[1] = gen_compare_reg (GE, sparc_compare_op0, sparc_compare_op1);
}")
}
else if (GET_MODE (sparc_compare_op0) == TFmode && ! TARGET_HARD_QUAD)
{
- emit_float_lib_cmp (sparc_compare_op0, sparc_compare_op1, LE);
+ sparc_emit_float_lib_cmp (sparc_compare_op0, sparc_compare_op1, LE);
emit_jump_insn (gen_bne (operands[0]));
DONE;
- }
+ }
operands[1] = gen_compare_reg (LE, sparc_compare_op0, sparc_compare_op1);
}")
"
{ operands[1] = gen_compare_reg (LEU, sparc_compare_op0, sparc_compare_op1);
}")
+
+(define_expand "bunordered"
+ [(set (pc)
+ (if_then_else (unordered (match_dup 1) (const_int 0))
+ (label_ref (match_operand 0 "" ""))
+ (pc)))]
+ ""
+ "
+{
+ if (GET_MODE (sparc_compare_op0) == TFmode && ! TARGET_HARD_QUAD)
+ {
+ sparc_emit_float_lib_cmp (sparc_compare_op0, sparc_compare_op1,
+ UNORDERED);
+ emit_jump_insn (gen_beq (operands[0]));
+ DONE;
+ }
+ operands[1] = gen_compare_reg (UNORDERED, sparc_compare_op0,
+ sparc_compare_op1);
+}")
+
+(define_expand "bordered"
+ [(set (pc)
+ (if_then_else (ordered (match_dup 1) (const_int 0))
+ (label_ref (match_operand 0 "" ""))
+ (pc)))]
+ ""
+ "
+{
+ if (GET_MODE (sparc_compare_op0) == TFmode && ! TARGET_HARD_QUAD)
+ {
+ sparc_emit_float_lib_cmp (sparc_compare_op0, sparc_compare_op1, ORDERED);
+ emit_jump_insn (gen_bne (operands[0]));
+ DONE;
+ }
+ operands[1] = gen_compare_reg (ORDERED, sparc_compare_op0,
+ sparc_compare_op1);
+}")
+
+(define_expand "bungt"
+ [(set (pc)
+ (if_then_else (ungt (match_dup 1) (const_int 0))
+ (label_ref (match_operand 0 "" ""))
+ (pc)))]
+ ""
+ "
+{
+ if (GET_MODE (sparc_compare_op0) == TFmode && ! TARGET_HARD_QUAD)
+ {
+ sparc_emit_float_lib_cmp (sparc_compare_op0, sparc_compare_op1, UNGT);
+ emit_jump_insn (gen_bgt (operands[0]));
+ DONE;
+ }
+ operands[1] = gen_compare_reg (UNGT, sparc_compare_op0, sparc_compare_op1);
+}")
+
+(define_expand "bunlt"
+ [(set (pc)
+ (if_then_else (unlt (match_dup 1) (const_int 0))
+ (label_ref (match_operand 0 "" ""))
+ (pc)))]
+ ""
+ "
+{
+ if (GET_MODE (sparc_compare_op0) == TFmode && ! TARGET_HARD_QUAD)
+ {
+ sparc_emit_float_lib_cmp (sparc_compare_op0, sparc_compare_op1, UNLT);
+ emit_jump_insn (gen_bne (operands[0]));
+ DONE;
+ }
+ operands[1] = gen_compare_reg (UNLT, sparc_compare_op0, sparc_compare_op1);
+}")
+
+(define_expand "buneq"
+ [(set (pc)
+ (if_then_else (uneq (match_dup 1) (const_int 0))
+ (label_ref (match_operand 0 "" ""))
+ (pc)))]
+ ""
+ "
+{
+ if (GET_MODE (sparc_compare_op0) == TFmode && ! TARGET_HARD_QUAD)
+ {
+ sparc_emit_float_lib_cmp (sparc_compare_op0, sparc_compare_op1, UNEQ);
+ emit_jump_insn (gen_beq (operands[0]));
+ DONE;
+ }
+ operands[1] = gen_compare_reg (UNEQ, sparc_compare_op0, sparc_compare_op1);
+}")
+
+(define_expand "bunge"
+ [(set (pc)
+ (if_then_else (unge (match_dup 1) (const_int 0))
+ (label_ref (match_operand 0 "" ""))
+ (pc)))]
+ ""
+ "
+{
+ if (GET_MODE (sparc_compare_op0) == TFmode && ! TARGET_HARD_QUAD)
+ {
+ sparc_emit_float_lib_cmp (sparc_compare_op0, sparc_compare_op1, UNGE);
+ emit_jump_insn (gen_bne (operands[0]));
+ DONE;
+ }
+ operands[1] = gen_compare_reg (UNGE, sparc_compare_op0, sparc_compare_op1);
+}")
+
+(define_expand "bunle"
+ [(set (pc)
+ (if_then_else (unle (match_dup 1) (const_int 0))
+ (label_ref (match_operand 0 "" ""))
+ (pc)))]
+ ""
+ "
+{
+ if (GET_MODE (sparc_compare_op0) == TFmode && ! TARGET_HARD_QUAD)
+ {
+ sparc_emit_float_lib_cmp (sparc_compare_op0, sparc_compare_op1, UNLE);
+ emit_jump_insn (gen_bne (operands[0]));
+ DONE;
+ }
+ operands[1] = gen_compare_reg (UNLE, sparc_compare_op0, sparc_compare_op1);
+}")
+
+(define_expand "bltgt"
+ [(set (pc)
+ (if_then_else (ltgt (match_dup 1) (const_int 0))
+ (label_ref (match_operand 0 "" ""))
+ (pc)))]
+ ""
+ "
+{
+ if (GET_MODE (sparc_compare_op0) == TFmode && ! TARGET_HARD_QUAD)
+ {
+ sparc_emit_float_lib_cmp (sparc_compare_op0, sparc_compare_op1, LTGT);
+ emit_jump_insn (gen_bne (operands[0]));
+ DONE;
+ }
+ operands[1] = gen_compare_reg (LTGT, sparc_compare_op0, sparc_compare_op1);
+}")
\f
;; Now match both normal and inverted jump.
/* Handle sets of MEM first. */
if (GET_CODE (operands[0]) == MEM)
{
- /* This checks TARGET_LIVE_G0 for us. */
if (reg_or_0_operand (operands[1], QImode))
goto movqi_is_ok;
}")
(define_insn "*movqi_insn"
- [(set (match_operand:QI 0 "general_operand" "=r,r,m")
+ [(set (match_operand:QI 0 "nonimmediate_operand" "=r,r,m")
(match_operand:QI 1 "input_operand" "rI,m,rJ"))]
"(register_operand (operands[0], QImode)
|| reg_or_0_operand (operands[1], QImode))"
/* Handle sets of MEM first. */
if (GET_CODE (operands[0]) == MEM)
{
- /* This checks TARGET_LIVE_G0 for us. */
if (reg_or_0_operand (operands[1], HImode))
goto movhi_is_ok;
(set_attr "length" "1")])
(define_insn "*movhi_insn"
- [(set (match_operand:HI 0 "general_operand" "=r,r,r,m")
+ [(set (match_operand:HI 0 "nonimmediate_operand" "=r,r,r,m")
(match_operand:HI 1 "input_operand" "rI,K,m,rJ"))]
"(register_operand (operands[0], HImode)
|| reg_or_0_operand (operands[1], HImode))"
/* Handle sets of MEM first. */
if (GET_CODE (operands[0]) == MEM)
{
- /* This checks TARGET_LIVE_G0 for us. */
if (reg_or_0_operand (operands[1], SImode))
goto movsi_is_ok;
;
}")
-;; Special LIVE_G0 pattern to obtain zero in a register.
-(define_insn "*movsi_zero_liveg0"
- [(set (match_operand:SI 0 "register_operand" "=r")
- (match_operand:SI 1 "zero_operand" "J"))]
- "TARGET_LIVE_G0"
- "and\\t%0, 0, %0"
- [(set_attr "type" "binary")
- (set_attr "length" "1")])
-
;; This is needed to show CSE exactly which bits are set
;; in a 64-bit register by sethi instructions.
(define_insn "*movsi_const64_special"
(set_attr "length" "1")])
(define_insn "*movsi_insn"
- [(set (match_operand:SI 0 "general_operand" "=r,f,r,r,r,f,m,m,d")
+ [(set (match_operand:SI 0 "nonimmediate_operand" "=r,f,r,r,r,f,m,m,d")
(match_operand:SI 1 "input_operand" "rI,!f,K,J,m,!m,rJ,!f,J"))]
"(register_operand (operands[0], SImode)
|| reg_or_0_operand (operands[1], SImode))"
{
current_function_uses_pic_offset_table = 1;
operands[2] = gen_rtx_SYMBOL_REF (Pmode, \"_GLOBAL_OFFSET_TABLE_\");
- operands[3] = gen_reg_rtx (SImode);
- operands[4] = gen_reg_rtx (SImode);
+ if (no_new_pseudos)
+ {
+ operands[3] = operands[0];
+ operands[4] = operands[0];
+ }
+ else
+ {
+ operands[3] = gen_reg_rtx (SImode);
+ operands[4] = gen_reg_rtx (SImode);
+ }
operands[5] = pic_offset_table_rtx;
}")
{
/* Where possible, convert CONST_DOUBLE into a CONST_INT. */
if (GET_CODE (operands[1]) == CONST_DOUBLE
-#if HOST_BITS_PER_WIDE_INT != 64
+#if HOST_BITS_PER_WIDE_INT == 32
&& ((CONST_DOUBLE_HIGH (operands[1]) == 0
&& (CONST_DOUBLE_LOW (operands[1]) & 0x80000000) == 0)
- || (CONST_DOUBLE_HIGH (operands[1]) == 0xffffffff
+ || (CONST_DOUBLE_HIGH (operands[1]) == (HOST_WIDE_INT) 0xffffffff
&& (CONST_DOUBLE_LOW (operands[1]) & 0x80000000) != 0))
#endif
)
}
}
- if (GET_CODE (operands[1]) == LABEL_REF)
- {
- if (! TARGET_ARCH64)
- abort ();
- /* ??? revisit this... */
- emit_insn (gen_move_label_di (operands[0], operands[1]));
- DONE;
- }
-
if (flag_pic)
{
if (CONSTANT_P (operands[1])
&& pic_address_needs_scratch (operands[1]))
operands[1] = legitimize_pic_address (operands[1], DImode, 0);
+ if (GET_CODE (operands[1]) == LABEL_REF)
+ {
+ if (! TARGET_ARCH64)
+ abort ();
+ emit_insn (gen_movdi_pic_label_ref (operands[0], operands[1]));
+ DONE;
+ }
+
if (symbolic_operand (operands[1], DImode))
{
operands[1] = legitimize_pic_address (operands[1],
;; (reg:DI 2 %g2))
;;
(define_insn "*movdi_insn_sp32"
- [(set (match_operand:DI 0 "general_operand" "=T,U,o,r,r,r,?T,?f,?f,?o,?f")
+ [(set (match_operand:DI 0 "nonimmediate_operand" "=T,U,o,r,r,r,?T,?f,?f,?o,?f")
(match_operand:DI 1 "input_operand" "U,T,r,o,i,r,f,T,o,f,f"))]
"! TARGET_ARCH64 &&
(register_operand (operands[0], DImode)
[(set_attr "type" "move")
(set_attr "length" "1")])
-(define_insn "*movdi_insn_sp64"
- [(set (match_operand:DI 0 "general_operand" "=r,r,r,r,m,?e,?e,?m,b")
+(define_insn "*movdi_insn_sp64_novis"
+ [(set (match_operand:DI 0 "nonimmediate_operand" "=r,r,r,r,m,?e,?e,?m")
+ (match_operand:DI 1 "input_operand" "rI,K,J,m,rJ,e,m,e"))]
+ "TARGET_ARCH64 && ! TARGET_VIS &&
+ (register_operand (operands[0], DImode)
+ || reg_or_0_operand (operands[1], DImode))"
+ "@
+ mov\\t%1, %0
+ sethi\\t%%hi(%a1), %0
+ clr\\t%0
+ ldx\\t%1, %0
+ stx\\t%r1, %0
+ fmovd\\t%1, %0
+ ldd\\t%1, %0
+ std\\t%1, %0"
+ [(set_attr "type" "move,move,move,load,store,fpmove,fpload,fpstore")
+ (set_attr "length" "1")])
+
+(define_insn "*movdi_insn_sp64_vis"
+ [(set (match_operand:DI 0 "nonimmediate_operand" "=r,r,r,r,m,?e,?e,?m,b")
(match_operand:DI 1 "input_operand" "rI,K,J,m,rJ,e,m,e,J"))]
- "TARGET_ARCH64 &&
+ "TARGET_ARCH64 && TARGET_VIS &&
(register_operand (operands[0], DImode)
|| reg_or_0_operand (operands[1], DImode))"
"@
[(set_attr "type" "move,move,move,load,store,fpmove,fpload,fpstore,fpmove")
(set_attr "length" "1")])
-;; ??? revisit this...
-(define_insn "move_label_di"
- [(set (match_operand:DI 0 "register_operand" "=r")
- ; This was previously (label_ref:DI (match_operand 1 "" "")) but that
- ; loses the volatil and other flags of the original label_ref.
- (match_operand:DI 1 "label_ref_operand" ""))
- (set (reg:DI 15) (pc))]
- "TARGET_ARCH64"
- "*
+(define_expand "movdi_pic_label_ref"
+ [(set (match_dup 3) (high:DI
+ (unspec:DI [(match_operand:DI 1 "label_ref_operand" "")
+ (match_dup 2)] 5)))
+ (set (match_dup 4) (lo_sum:DI (match_dup 3)
+ (unspec:DI [(match_dup 1) (match_dup 2)] 5)))
+ (set (match_operand:DI 0 "register_operand" "=r")
+ (minus:DI (match_dup 5) (match_dup 4)))]
+ "TARGET_ARCH64 && flag_pic"
+ "
{
- return \"\\n1:\\trd\\t%%pc, %%o7\\n\\tsethi\\t%%hi(%l1-1b), %0\\n\\tadd\\t%0, %%lo(%l1-1b), %0\\n\\tsra\\t%0, 0, %0\\n\\tadd\\t%0, %%o7, %0\";
-}"
- [(set_attr "type" "multi")
- (set_attr "length" "5")])
-
-;; Sparc-v9 code model support insns. See sparc_emit_set_symbolic_const64
-;; in sparc.c to see what is going on here... PIC stuff comes first.
+ current_function_uses_pic_offset_table = 1;
+ operands[2] = gen_rtx_SYMBOL_REF (Pmode, \"_GLOBAL_OFFSET_TABLE_\");
+ if (no_new_pseudos)
+ {
+ operands[3] = operands[0];
+ operands[4] = operands[0];
+ }
+ else
+ {
+ operands[3] = gen_reg_rtx (DImode);
+ operands[4] = gen_reg_rtx (DImode);
+ }
+ operands[5] = pic_offset_table_rtx;
+}")
-(define_insn "*pic_lo_sum_di"
+(define_insn "*movdi_high_pic_label_ref"
[(set (match_operand:DI 0 "register_operand" "=r")
- (lo_sum:DI (match_operand:DI 1 "register_operand" "r")
- (unspec:DI [(match_operand:DI 2 "immediate_operand" "in")] 0)))]
+ (high:DI
+ (unspec:DI [(match_operand:DI 1 "label_ref_operand" "")
+ (match_operand:DI 2 "" "")] 5)))]
"TARGET_ARCH64 && flag_pic"
- "or\\t%1, %%lo(%a2), %0"
+ "sethi\\t%%hi(%a2-(%a1-.)), %0"
+ [(set_attr "type" "move")
+ (set_attr "length" "1")])
+
+(define_insn "*movdi_lo_sum_pic_label_ref"
+ [(set (match_operand:DI 0 "register_operand" "=r")
+ (lo_sum:DI (match_operand:DI 1 "register_operand" "r")
+ (unspec:DI [(match_operand:DI 2 "label_ref_operand" "")
+ (match_operand:DI 3 "" "")] 5)))]
+ "TARGET_ARCH64 && flag_pic"
+ "or\\t%1, %%lo(%a3-(%a2-.)), %0"
[(set_attr "type" "ialu")
(set_attr "length" "1")])
-(define_insn "*pic_sethi_di"
+;; Sparc-v9 code model support insns. See sparc_emit_set_symbolic_const64
+;; in sparc.c to see what is going on here... PIC stuff comes first.
+
+(define_insn "movdi_lo_sum_pic"
+ [(set (match_operand:DI 0 "register_operand" "=r")
+ (lo_sum:DI (match_operand:DI 1 "register_operand" "r")
+ (unspec:DI [(match_operand:DI 2 "immediate_operand" "in")] 0)))]
+ "TARGET_ARCH64 && flag_pic"
+ "or\\t%1, %%lo(%a2), %0"
+ [(set_attr "type" "ialu")
+ (set_attr "length" "1")])
+
+(define_insn "movdi_high_pic"
[(set (match_operand:DI 0 "register_operand" "=r")
(high:DI (unspec:DI [(match_operand 1 "" "")] 0)))]
"TARGET_ARCH64 && flag_pic && check_pic (1)"
[(set (match_operand:DI 0 "register_operand" "=r")
(high:DI (match_operand:DI 1 "sp64_medium_pic_operand" "")))]
"(TARGET_CM_MEDLOW || TARGET_CM_EMBMEDANY) && check_pic (1)"
- "sethi\\t%%lo(%a1), %0"
+ "sethi\\t%%hi(%a1), %0"
[(set_attr "type" "move")
(set_attr "length" "1")])
[(clobber (const_int 0))]
"
{
+#if HOST_BITS_PER_WIDE_INT == 32
emit_insn (gen_movsi (gen_highpart (SImode, operands[0]),
(INTVAL (operands[1]) < 0) ?
constm1_rtx :
const0_rtx));
emit_insn (gen_movsi (gen_lowpart (SImode, operands[0]),
operands[1]));
+#else
+ unsigned int low, high;
+
+ low = INTVAL (operands[1]) & 0xffffffff;
+ high = (INTVAL (operands[1]) >> 32) & 0xffffffff;
+ emit_insn (gen_movsi (gen_highpart (SImode, operands[0]), GEN_INT (high)));
+
+ /* Slick... but this trick loses if this subreg constant part
+ can be done in one insn. */
+ if (low == high && (low & 0x3ff) != 0 && low + 0x1000 >= 0x2000)
+ emit_insn (gen_movsi (gen_lowpart (SImode, operands[0]),
+ gen_highpart (SImode, operands[0])));
+ else
+ emit_insn (gen_movsi (gen_lowpart (SImode, operands[0]), GEN_INT (low)));
+#endif
DONE;
}")
\f
;; Floating point move insns
-(define_insn "*clear_sf"
- [(set (match_operand:SF 0 "general_operand" "=f")
- (match_operand:SF 1 "" ""))]
- "TARGET_VIS
- && GET_CODE (operands[1]) == CONST_DOUBLE
- && GET_CODE (operands[0]) == REG
- && fp_zero_operand (operands[1])"
- "fzeros\\t%0"
- [(set_attr "type" "fpmove")
+(define_insn "*movsf_insn_novis"
+ [(set (match_operand:SF 0 "nonimmediate_operand" "=f,*r,*r,*r,*r,*r,f,m,m")
+ (match_operand:SF 1 "input_operand" "f,G,Q,*rR,S,m,m,f,*rG"))]
+ "(TARGET_FPU && ! TARGET_VIS)
+ && (register_operand (operands[0], SFmode)
+ || register_operand (operands[1], SFmode)
+ || fp_zero_operand (operands[1], SFmode))"
+ "*
+{
+ if (GET_CODE (operands[1]) == CONST_DOUBLE
+ && (which_alternative == 2
+ || which_alternative == 3
+ || which_alternative == 4))
+ {
+ REAL_VALUE_TYPE r;
+ long i;
+
+ REAL_VALUE_FROM_CONST_DOUBLE (r, operands[1]);
+ REAL_VALUE_TO_TARGET_SINGLE (r, i);
+ operands[1] = GEN_INT (i);
+ }
+
+ switch (which_alternative)
+ {
+ case 0:
+ return \"fmovs\\t%1, %0\";
+ case 1:
+ return \"clr\\t%0\";
+ case 2:
+ return \"sethi\\t%%hi(%a1), %0\";
+ case 3:
+ return \"mov\\t%1, %0\";
+ case 4:
+ return \"#\";
+ case 5:
+ case 6:
+ return \"ld\\t%1, %0\";
+ case 7:
+ case 8:
+ return \"st\\t%r1, %0\";
+ default:
+ abort();
+ }
+}"
+ [(set_attr "type" "fpmove,move,move,move,*,load,fpload,fpstore,store")
(set_attr "length" "1")])
-(define_insn "*movsf_const_intreg"
- [(set (match_operand:SF 0 "general_operand" "=f,r")
- (match_operand:SF 1 "" "m,F"))]
- "TARGET_FPU
- && GET_CODE (operands[1]) == CONST_DOUBLE
- && GET_CODE (operands[0]) == REG"
+(define_insn "*movsf_insn_vis"
+ [(set (match_operand:SF 0 "nonimmediate_operand" "=f,f,*r,*r,*r,*r,*r,f,m,m")
+ (match_operand:SF 1 "input_operand" "f,G,G,Q,*rR,S,m,m,f,*rG"))]
+ "(TARGET_FPU && TARGET_VIS)
+ && (register_operand (operands[0], SFmode)
+ || register_operand (operands[1], SFmode)
+ || fp_zero_operand (operands[1], SFmode))"
"*
{
- REAL_VALUE_TYPE r;
- long i;
+ if (GET_CODE (operands[1]) == CONST_DOUBLE
+ && (which_alternative == 3
+ || which_alternative == 4
+ || which_alternative == 5))
+ {
+ REAL_VALUE_TYPE r;
+ long i;
- if (which_alternative == 0)
- return \"ld\\t%1, %0\";
+ REAL_VALUE_FROM_CONST_DOUBLE (r, operands[1]);
+ REAL_VALUE_TO_TARGET_SINGLE (r, i);
+ operands[1] = GEN_INT (i);
+ }
- REAL_VALUE_FROM_CONST_DOUBLE (r, operands[1]);
- REAL_VALUE_TO_TARGET_SINGLE (r, i);
- if (SPARC_SIMM13_P (i) || SPARC_SETHI_P (i))
+ switch (which_alternative)
{
- operands[1] = GEN_INT (i);
- if (SPARC_SIMM13_P (INTVAL (operands[1])))
- return \"mov\\t%1, %0\";
- else if (SPARC_SETHI_P (INTVAL (operands[1])))
- return \"sethi\\t%%hi(%a1), %0\";
+ case 0:
+ return \"fmovs\\t%1, %0\";
+ case 1:
+ return \"fzeros\\t%0\";
+ case 2:
+ return \"clr\\t%0\";
+ case 3:
+ return \"sethi\\t%%hi(%a1), %0\";
+ case 4:
+ return \"mov\\t%1, %0\";
+ case 5:
+ return \"#\";
+ case 6:
+ case 7:
+ return \"ld\\t%1, %0\";
+ case 8:
+ case 9:
+ return \"st\\t%r1, %0\";
+ default:
+ abort();
}
- else
- return \"#\";
}"
- [(set_attr "type" "move")
+ [(set_attr "type" "fpmove,fpmove,move,move,move,*,load,fpload,fpstore,store")
(set_attr "length" "1")])
-;; There isn't much I can do about this, if I change the
-;; mode then flow info gets really confused because the
-;; destination no longer looks the same. Ho hum...
-(define_insn "*movsf_const_high"
- [(set (match_operand:SF 0 "register_operand" "=r")
- (unspec:SF [(match_operand 1 "const_int_operand" "")] 12))]
- ""
- "sethi\\t%%hi(%a1), %0"
- [(set_attr "type" "move")
+(define_insn "*movsf_lo_sum"
+ [(set (match_operand:SF 0 "register_operand" "")
+ (lo_sum:SF (match_operand:SF 1 "register_operand" "")
+ (match_operand:SF 2 "const_double_operand" "")))]
+ "TARGET_FPU && fp_high_losum_p (operands[2])"
+ "*
+{
+ REAL_VALUE_TYPE r;
+ long i;
+
+ REAL_VALUE_FROM_CONST_DOUBLE (r, operands[2]);
+ REAL_VALUE_TO_TARGET_SINGLE (r, i);
+ operands[2] = GEN_INT (i);
+ return \"or\\t%1, %%lo(%a2), %0\";
+}"
+ [(set_attr "type" "ialu")
(set_attr "length" "1")])
-(define_insn "*movsf_const_lo"
- [(set (match_operand:SF 0 "register_operand" "=r")
- (unspec:SF [(match_operand 1 "register_operand" "r")
- (match_operand 2 "const_int_operand" "")] 17))]
- ""
- "or\\t%1, %%lo(%a2), %0"
+(define_insn "*movsf_high"
+ [(set (match_operand:SF 0 "register_operand" "")
+ (high:SF (match_operand:SF 1 "const_double_operand" "")))]
+ "TARGET_FPU && fp_high_losum_p (operands[1])"
+ "*
+{
+ REAL_VALUE_TYPE r;
+ long i;
+
+ REAL_VALUE_FROM_CONST_DOUBLE (r, operands[1]);
+ REAL_VALUE_TO_TARGET_SINGLE (r, i);
+ operands[1] = GEN_INT (i);
+ return \"sethi\\t%%hi(%1), %0\";
+}"
[(set_attr "type" "move")
(set_attr "length" "1")])
[(set (match_operand:SF 0 "register_operand" "")
(match_operand:SF 1 "const_double_operand" ""))]
"TARGET_FPU
+ && fp_high_losum_p (operands[1])
&& (GET_CODE (operands[0]) == REG
&& REGNO (operands[0]) < 32)"
- [(set (match_dup 0) (unspec:SF [(match_dup 1)] 12))
- (set (match_dup 0) (unspec:SF [(match_dup 0) (match_dup 1)] 17))]
- "
-{
- REAL_VALUE_TYPE r;
- long i;
+ [(set (match_dup 0) (high:SF (match_dup 1)))
+ (set (match_dup 0) (lo_sum:SF (match_dup 0) (match_dup 1)))])
- REAL_VALUE_FROM_CONST_DOUBLE (r, operands[1]);
- REAL_VALUE_TO_TARGET_SINGLE (r, i);
- operands[1] = GEN_INT (i);
-}")
+;; Exactly the same as above, except that all `f' cases are deleted.
+;; This is necessary to prevent reload from ever trying to use a `f' reg
+;; when -mno-fpu.
+
+(define_insn "*movsf_no_f_insn"
+ [(set (match_operand:SF 0 "nonimmediate_operand" "=r,r,m")
+ (match_operand:SF 1 "input_operand" "r,m,r"))]
+ "! TARGET_FPU
+ && (register_operand (operands[0], SFmode)
+ || register_operand (operands[1], SFmode))"
+ "@
+ mov\\t%1, %0
+ ld\\t%1, %0
+ st\\t%1, %0"
+ [(set_attr "type" "move,load,store")
+ (set_attr "length" "1")])
(define_expand "movsf"
[(set (match_operand:SF 0 "general_operand" "")
if (GET_CODE (operands[0]) == REG
&& CONSTANT_P (operands[1]))
{
- if (TARGET_VIS
- && GET_CODE (operands[1]) == CONST_DOUBLE
- && fp_zero_operand (operands[1]))
- goto movsf_is_ok;
-
/* emit_group_store will send such bogosity to us when it is
not storing directly into memory. So fix this up to avoid
crashes in output_constant_pool. */
if (operands [1] == const0_rtx)
operands[1] = CONST0_RTX (SFmode);
+
+ if (TARGET_VIS && fp_zero_operand (operands[1], SFmode))
+ goto movsf_is_ok;
+
+ /* We are able to build any SF constant in integer registers
+ with at most 2 instructions. */
+ if (REGNO (operands[0]) < 32)
+ goto movsf_is_ok;
+
operands[1] = validize_mem (force_const_mem (GET_MODE (operands[0]),
operands[1]));
}
/* Handle sets of MEM first. */
if (GET_CODE (operands[0]) == MEM)
{
- if (register_operand (operands[1], SFmode))
+ if (register_operand (operands[1], SFmode)
+ || fp_zero_operand (operands[1], SFmode))
goto movsf_is_ok;
if (! reload_in_progress)
;
}")
-(define_insn "*movsf_insn"
- [(set (match_operand:SF 0 "general_operand" "=f,f,m,r,r,m")
- (match_operand:SF 1 "input_operand" "f,m,f,r,m,r"))]
- "TARGET_FPU
- && (register_operand (operands[0], SFmode)
- || register_operand (operands[1], SFmode))"
- "@
- fmovs\\t%1, %0
- ld\\t%1, %0
- st\\t%1, %0
- mov\\t%1, %0
- ld\\t%1, %0
- st\\t%1, %0"
- [(set_attr "type" "fpmove,fpload,fpstore,move,load,store")
- (set_attr "length" "1")])
-
-;; Exactly the same as above, except that all `f' cases are deleted.
-;; This is necessary to prevent reload from ever trying to use a `f' reg
-;; when -mno-fpu.
-
-(define_insn "*movsf_no_f_insn"
- [(set (match_operand:SF 0 "general_operand" "=r,r,m")
- (match_operand:SF 1 "input_operand" "r,m,r"))]
- "! TARGET_FPU
- && (register_operand (operands[0], SFmode)
- || register_operand (operands[1], SFmode))"
- "@
- mov\\t%1, %0
- ld\\t%1, %0
- st\\t%1, %0"
- [(set_attr "type" "move,load,store")
- (set_attr "length" "1")])
-
-(define_insn "*clear_df"
- [(set (match_operand:DF 0 "general_operand" "=e")
- (match_operand:DF 1 "" ""))]
- "TARGET_VIS
- && GET_CODE (operands[1]) == CONST_DOUBLE
- && GET_CODE (operands[0]) == REG
- && fp_zero_operand (operands[1])"
- "fzero\\t%0"
- [(set_attr "type" "fpmove")
- (set_attr "length" "1")])
-
-(define_insn "*movdf_const_intreg_sp32"
- [(set (match_operand:DF 0 "general_operand" "=e,e,r")
- (match_operand:DF 1 "" "T,o,F"))]
- "TARGET_FPU && ! TARGET_ARCH64
- && GET_CODE (operands[1]) == CONST_DOUBLE
- && GET_CODE (operands[0]) == REG"
- "*
-{
- if (which_alternative == 0)
- return \"ldd\\t%1, %0\";
- else
- return \"#\";
-}"
- [(set_attr "type" "move")
- (set_attr "length" "1")])
-
-(define_insn "*movdf_const_intreg_sp64"
- [(set (match_operand:DF 0 "general_operand" "=e,e,r")
- (match_operand:DF 1 "" "m,o,F"))]
- "TARGET_FPU && TARGET_ARCH64
- && GET_CODE (operands[1]) == CONST_DOUBLE
- && GET_CODE (operands[0]) == REG"
- "*
-{
- if (which_alternative == 0)
- return \"ldd\\t%1, %0\";
- else
- return \"#\";
-}"
- [(set_attr "type" "move")
- (set_attr "length" "1")])
-
-(define_split
- [(set (match_operand:DF 0 "register_operand" "")
- (match_operand:DF 1 "const_double_operand" ""))]
- "TARGET_FPU
- && GET_CODE (operands[1]) == CONST_DOUBLE
- && (GET_CODE (operands[0]) == REG
- && REGNO (operands[0]) < 32)
- && reload_completed"
- [(clobber (const_int 0))]
- "
-{
- REAL_VALUE_TYPE r;
- long l[2];
-
- REAL_VALUE_FROM_CONST_DOUBLE (r, operands[1]);
- REAL_VALUE_TO_TARGET_DOUBLE (r, l);
- if (GET_CODE (operands[0]) == SUBREG)
- operands[0] = alter_subreg (operands[0]);
- operands[0] = gen_rtx_raw_REG (DImode, REGNO (operands[0]));
-
- emit_insn (gen_movsi (gen_highpart (SImode, operands[0]),
- GEN_INT (l[0])));
-
- /* Slick... but this trick loses if this subreg constant part
- can be done in one insn. */
- if (l[1] == l[0]
- && !(SPARC_SETHI_P (l[0])
- || SPARC_SIMM13_P (l[0])))
- {
- emit_insn (gen_movsi (gen_lowpart (SImode, operands[0]),
- gen_highpart (SImode, operands[0])));
- }
- else
- {
- emit_insn (gen_movsi (gen_lowpart (SImode, operands[0]),
- GEN_INT (l[1])));
- }
- DONE;
-}")
-
(define_expand "movdf"
[(set (match_operand:DF 0 "general_operand" "")
(match_operand:DF 1 "general_operand" ""))]
if (GET_CODE (operands[0]) == REG
&& CONSTANT_P (operands[1]))
{
- if (TARGET_VIS
- && GET_CODE (operands[1]) == CONST_DOUBLE
- && fp_zero_operand (operands[1]))
- goto movdf_is_ok;
-
/* emit_group_store will send such bogosity to us when it is
not storing directly into memory. So fix this up to avoid
crashes in output_constant_pool. */
if (operands [1] == const0_rtx)
operands[1] = CONST0_RTX (DFmode);
+
+ if ((TARGET_VIS || REGNO (operands[0]) < 32)
+ && fp_zero_operand (operands[1], DFmode))
+ goto movdf_is_ok;
+
operands[1] = validize_mem (force_const_mem (GET_MODE (operands[0]),
operands[1]));
}
/* Handle MEM cases first. */
if (GET_CODE (operands[0]) == MEM)
{
- if (register_operand (operands[1], DFmode))
+ if (register_operand (operands[1], DFmode)
+ || fp_zero_operand (operands[1], DFmode))
goto movdf_is_ok;
if (! reload_in_progress)
;; Be careful, fmovd does not exist when !v9.
(define_insn "*movdf_insn_sp32"
- [(set (match_operand:DF 0 "general_operand" "=e,T,U,T,e,r,r,o,e,o")
- (match_operand:DF 1 "input_operand" "T,e,T,U,e,r,o,r,o,e"))]
+ [(set (match_operand:DF 0 "nonimmediate_operand" "=e,T,U,T,o,e,*r,o,e,o")
+ (match_operand:DF 1 "input_operand" "T#F,e,T,U,G,e,*rFo,*r,o#F,e"))]
"TARGET_FPU
&& ! TARGET_V9
&& (register_operand (operands[0], DFmode)
- || register_operand (operands[1], DFmode))"
+ || register_operand (operands[1], DFmode)
+ || fp_zero_operand (operands[1], DFmode))"
"@
ldd\\t%1, %0
std\\t%1, %0
(set_attr "length" "1,1,1,1,2,2,2,2,2,2")])
(define_insn "*movdf_no_e_insn_sp32"
- [(set (match_operand:DF 0 "general_operand" "=U,T,r,r,o")
- (match_operand:DF 1 "input_operand" "T,U,r,o,r"))]
+ [(set (match_operand:DF 0 "nonimmediate_operand" "=U,T,o,r,o")
+ (match_operand:DF 1 "input_operand" "T,U,G,ro,r"))]
"! TARGET_FPU
+ && ! TARGET_V9
&& ! TARGET_ARCH64
&& (register_operand (operands[0], DFmode)
- || register_operand (operands[1], DFmode))"
+ || register_operand (operands[1], DFmode)
+ || fp_zero_operand (operands[1], DFmode))"
"@
ldd\\t%1, %0
std\\t%1, %0
[(set_attr "type" "load,store,*,*,*")
(set_attr "length" "1,1,2,2,2")])
+(define_insn "*movdf_no_e_insn_v9_sp32"
+ [(set (match_operand:DF 0 "nonimmediate_operand" "=U,T,T,r,o")
+ (match_operand:DF 1 "input_operand" "T,U,G,ro,rG"))]
+ "! TARGET_FPU
+ && TARGET_V9
+ && ! TARGET_ARCH64
+ && (register_operand (operands[0], DFmode)
+ || register_operand (operands[1], DFmode)
+ || fp_zero_operand (operands[1], DFmode))"
+ "@
+ ldd\\t%1, %0
+ std\\t%1, %0
+ stx\\t%r1, %0
+ #
+ #"
+ [(set_attr "type" "load,store,store,*,*")
+ (set_attr "length" "1,1,1,2,2")])
+
;; We have available v9 double floats but not 64-bit
-;; integer registers.
-(define_insn "*movdf_insn_v9only"
- [(set (match_operand:DF 0 "general_operand" "=e,e,m,U,T,r,r,o")
- (match_operand:DF 1 "input_operand" "e,m,e,T,U,r,o,r"))]
+;; integer registers and no VIS.
+(define_insn "*movdf_insn_v9only_novis"
+ [(set (match_operand:DF 0 "nonimmediate_operand" "=e,e,T,T,U,T,e,*r,o")
+ (match_operand:DF 1 "input_operand" "e,T#F,G,e,T,U,o#F,*roF,*rGe"))]
"TARGET_FPU
&& TARGET_V9
+ && ! TARGET_VIS
+ && ! TARGET_ARCH64
+ && (register_operand (operands[0], DFmode)
+ || register_operand (operands[1], DFmode)
+ || fp_zero_operand (operands[1], DFmode))"
+ "@
+ fmovd\\t%1, %0
+ ldd\\t%1, %0
+ stx\\t%r1, %0
+ std\\t%1, %0
+ ldd\\t%1, %0
+ std\\t%1, %0
+ #
+ #
+ #"
+ [(set_attr "type" "fpmove,load,store,store,load,store,*,*,*")
+ (set_attr "length" "1,1,1,1,1,1,2,2,2")])
+
+;; We have available v9 double floats but not 64-bit
+;; integer registers but we have VIS.
+(define_insn "*movdf_insn_v9only_vis"
+ [(set (match_operand:DF 0 "nonimmediate_operand" "=e,e,e,T,T,U,T,e,*r,o")
+ (match_operand:DF 1 "input_operand" "G,e,T#F,G,e,T,U,o#F,*roGF,*rGe"))]
+ "TARGET_FPU
+ && TARGET_VIS
&& ! TARGET_ARCH64
&& (register_operand (operands[0], DFmode)
- || register_operand (operands[1], DFmode))"
+ || register_operand (operands[1], DFmode)
+ || fp_zero_operand (operands[1], DFmode))"
"@
+ fzero\\t%0
fmovd\\t%1, %0
ldd\\t%1, %0
+ stx\\t%r1, %0
std\\t%1, %0
ldd\\t%1, %0
std\\t%1, %0
#
#
#"
- [(set_attr "type" "fpmove,load,store,load,store,*,*,*")
- (set_attr "length" "1,1,1,1,1,2,2,2")])
+ [(set_attr "type" "fpmove,fpmove,load,store,store,load,store,*,*,*")
+ (set_attr "length" "1,1,1,1,1,1,1,2,2,2")])
;; We have available both v9 double floats and 64-bit
-;; integer registers.
-(define_insn "*movdf_insn_sp64"
- [(set (match_operand:DF 0 "general_operand" "=e,e,m,r,r,m")
- (match_operand:DF 1 "input_operand" "e,m,e,r,m,r"))]
+;; integer registers. No VIS though.
+(define_insn "*movdf_insn_sp64_novis"
+ [(set (match_operand:DF 0 "nonimmediate_operand" "=e,e,m,*r,*r,m,*r")
+ (match_operand:DF 1 "input_operand" "e,m#F,e,*rG,m,*rG,F"))]
"TARGET_FPU
- && TARGET_V9
+ && ! TARGET_VIS
&& TARGET_ARCH64
&& (register_operand (operands[0], DFmode)
- || register_operand (operands[1], DFmode))"
+ || register_operand (operands[1], DFmode)
+ || fp_zero_operand (operands[1], DFmode))"
"@
fmovd\\t%1, %0
ldd\\t%1, %0
std\\t%1, %0
- mov\\t%1, %0
+ mov\\t%r1, %0
ldx\\t%1, %0
- stx\\t%1, %0"
- [(set_attr "type" "fpmove,load,store,move,load,store")
- (set_attr "length" "1")])
+ stx\\t%r1, %0
+ #"
+ [(set_attr "type" "fpmove,load,store,move,load,store,*")
+ (set_attr "length" "1,1,1,1,1,1,2")])
+
+;; We have available both v9 double floats and 64-bit
+;; integer registers. And we have VIS.
+(define_insn "*movdf_insn_sp64_vis"
+ [(set (match_operand:DF 0 "nonimmediate_operand" "=e,e,e,m,*r,*r,m,*r")
+ (match_operand:DF 1 "input_operand" "G,e,m#F,e,*rG,m,*rG,F"))]
+ "TARGET_FPU
+ && TARGET_VIS
+ && TARGET_ARCH64
+ && (register_operand (operands[0], DFmode)
+ || register_operand (operands[1], DFmode)
+ || fp_zero_operand (operands[1], DFmode))"
+ "@
+ fzero\\t%0
+ fmovd\\t%1, %0
+ ldd\\t%1, %0
+ std\\t%1, %0
+ mov\\t%r1, %0
+ ldx\\t%1, %0
+ stx\\t%r1, %0
+ #"
+ [(set_attr "type" "fpmove,fpmove,load,store,move,load,store,*")
+ (set_attr "length" "1,1,1,1,1,1,1,2")])
(define_insn "*movdf_no_e_insn_sp64"
- [(set (match_operand:DF 0 "general_operand" "=r,r,m")
- (match_operand:DF 1 "input_operand" "r,m,r"))]
+ [(set (match_operand:DF 0 "nonimmediate_operand" "=r,r,m")
+ (match_operand:DF 1 "input_operand" "r,m,rG"))]
"! TARGET_FPU
&& TARGET_ARCH64
&& (register_operand (operands[0], DFmode)
- || register_operand (operands[1], DFmode))"
+ || register_operand (operands[1], DFmode)
+ || fp_zero_operand (operands[1], DFmode))"
"@
mov\\t%1, %0
ldx\\t%1, %0
- stx\\t%1, %0"
+ stx\\t%r1, %0"
[(set_attr "type" "move,load,store")
(set_attr "length" "1")])
+(define_split
+ [(set (match_operand:DF 0 "register_operand" "")
+ (match_operand:DF 1 "const_double_operand" ""))]
+ "TARGET_FPU
+ && (GET_CODE (operands[0]) == REG
+ && REGNO (operands[0]) < 32)
+ && ! fp_zero_operand(operands[1], DFmode)
+ && reload_completed"
+ [(clobber (const_int 0))]
+ "
+{
+ REAL_VALUE_TYPE r;
+ long l[2];
+
+ REAL_VALUE_FROM_CONST_DOUBLE (r, operands[1]);
+ REAL_VALUE_TO_TARGET_DOUBLE (r, l);
+ if (GET_CODE (operands[0]) == SUBREG)
+ operands[0] = alter_subreg (operands[0]);
+ operands[0] = gen_rtx_raw_REG (DImode, REGNO (operands[0]));
+
+ if (TARGET_ARCH64)
+ {
+#if HOST_BITS_PER_WIDE_INT == 64
+ HOST_WIDE_INT val;
+
+ val = ((HOST_WIDE_INT)(unsigned long)l[1] |
+ ((HOST_WIDE_INT)(unsigned long)l[0] << 32));
+ emit_insn (gen_movdi (operands[0], GEN_INT (val)));
+#else
+ emit_insn (gen_movdi (operands[0],
+ gen_rtx_CONST_DOUBLE (VOIDmode, const0_rtx,
+ l[1], l[0])));
+#endif
+ }
+ else
+ {
+ emit_insn (gen_movsi (gen_highpart (SImode, operands[0]),
+ GEN_INT (l[0])));
+
+ /* Slick... but this trick loses if this subreg constant part
+ can be done in one insn. */
+ if (l[1] == l[0]
+ && !(SPARC_SETHI_P (l[0])
+ || SPARC_SIMM13_P (l[0])))
+ {
+ emit_insn (gen_movsi (gen_lowpart (SImode, operands[0]),
+ gen_highpart (SImode, operands[0])));
+ }
+ else
+ {
+ emit_insn (gen_movsi (gen_lowpart (SImode, operands[0]),
+ GEN_INT (l[1])));
+ }
+ }
+ DONE;
+}")
+
;; Ok, now the splits to handle all the multi insn and
;; mis-aligned memory address cases.
;; In these splits please take note that we must be
(define_split
[(set (match_operand:DF 0 "register_operand" "")
(match_operand:DF 1 "memory_operand" ""))]
- "((! TARGET_V9
- || (! TARGET_ARCH64
- && ((GET_CODE (operands[0]) == REG
- && REGNO (operands[0]) < 32)
- || (GET_CODE (operands[0]) == SUBREG
- && GET_CODE (SUBREG_REG (operands[0])) == REG
- && REGNO (SUBREG_REG (operands[0])) < 32))))
- && (reload_completed
- && (((REGNO (operands[0])) % 2) != 0
- || ! mem_min_alignment (operands[1], 8))
- && offsettable_memref_p (operands[1])))"
+ "reload_completed
+ && ! TARGET_ARCH64
+ && (((REGNO (operands[0]) % 2) != 0)
+ || ! mem_min_alignment (operands[1], 8))
+ && offsettable_memref_p (operands[1])"
[(clobber (const_int 0))]
"
{
(define_split
[(set (match_operand:DF 0 "memory_operand" "")
(match_operand:DF 1 "register_operand" ""))]
- "((! TARGET_V9
- || (! TARGET_ARCH64
- && ((GET_CODE (operands[1]) == REG
- && REGNO (operands[1]) < 32)
- || (GET_CODE (operands[1]) == SUBREG
- && GET_CODE (SUBREG_REG (operands[1])) == REG
- && REGNO (SUBREG_REG (operands[1])) < 32))))
- && (reload_completed
- && (((REGNO (operands[1])) % 2) != 0
- || ! mem_min_alignment (operands[0], 8))
- && offsettable_memref_p (operands[0])))"
+ "reload_completed
+ && ! TARGET_ARCH64
+ && (((REGNO (operands[1]) % 2) != 0)
+ || ! mem_min_alignment (operands[0], 8))
+ && offsettable_memref_p (operands[0])"
[(clobber (const_int 0))]
"
{
DONE;
}")
+(define_split
+ [(set (match_operand:DF 0 "memory_operand" "")
+ (match_operand:DF 1 "fp_zero_operand" ""))]
+ "reload_completed
+ && (! TARGET_V9
+ || (! TARGET_ARCH64
+ && ! mem_min_alignment (operands[0], 8)))
+ && offsettable_memref_p (operands[0])"
+ [(clobber (const_int 0))]
+ "
+{
+ rtx dest1, dest2;
+
+ dest1 = change_address (operands[0], SFmode, NULL_RTX);
+ dest2 = change_address (operands[0], SFmode,
+ plus_constant_for_output (XEXP (dest1, 0), 4));
+ emit_insn (gen_movsf (dest1, CONST0_RTX (SFmode)));
+ emit_insn (gen_movsf (dest2, CONST0_RTX (SFmode)));
+ DONE;
+}")
+
+(define_split
+ [(set (match_operand:DF 0 "register_operand" "")
+ (match_operand:DF 1 "fp_zero_operand" ""))]
+ "reload_completed
+ && ! TARGET_ARCH64
+ && ((GET_CODE (operands[0]) == REG
+ && REGNO (operands[0]) < 32)
+ || (GET_CODE (operands[0]) == SUBREG
+ && GET_CODE (SUBREG_REG (operands[0])) == REG
+ && REGNO (SUBREG_REG (operands[0])) < 32))"
+ [(clobber (const_int 0))]
+ "
+{
+ rtx set_dest = operands[0];
+ rtx dest1, dest2;
+
+ if (GET_CODE (set_dest) == SUBREG)
+ set_dest = alter_subreg (set_dest);
+ dest1 = gen_highpart (SFmode, set_dest);
+ dest2 = gen_lowpart (SFmode, set_dest);
+ emit_insn (gen_movsf (dest1, CONST0_RTX (SFmode)));
+ emit_insn (gen_movsf (dest2, CONST0_RTX (SFmode)));
+ DONE;
+}")
+
(define_expand "movtf"
[(set (match_operand:TF 0 "general_operand" "")
(match_operand:TF 1 "general_operand" ""))]
crashes in output_constant_pool. */
if (operands [1] == const0_rtx)
operands[1] = CONST0_RTX (TFmode);
+
+ if (TARGET_VIS && fp_zero_operand (operands[1], TFmode))
+ goto movtf_is_ok;
+
operands[1] = validize_mem (force_const_mem (GET_MODE (operands[0]),
operands[1]));
}
full 16-byte alignment for quads. */
if (GET_CODE (operands[0]) == MEM)
{
- if (register_operand (operands[1], TFmode))
- goto movtf_is_ok;
+ if (register_operand (operands[1], TFmode)
+ || fp_zero_operand (operands[1], TFmode))
+ goto movtf_is_ok;
if (! reload_in_progress)
{
;; Be careful, fmovq and {st,ld}{x,q} do not exist when !arch64 so
;; we must split them all. :-(
(define_insn "*movtf_insn_sp32"
- [(set (match_operand:TF 0 "general_operand" "=e,o,U,o,e,r,r,o")
- (match_operand:TF 1 "input_operand" "o,e,o,U,e,r,o,r"))]
+ [(set (match_operand:TF 0 "nonimmediate_operand" "=e,o,U,r")
+ (match_operand:TF 1 "input_operand" "oe,GeUr,o,roG"))]
+ "TARGET_FPU
+ && ! TARGET_VIS
+ && ! TARGET_ARCH64
+ && (register_operand (operands[0], TFmode)
+ || register_operand (operands[1], TFmode)
+ || fp_zero_operand (operands[1], TFmode))"
+ "#"
+ [(set_attr "length" "4")])
+
+(define_insn "*movtf_insn_vis_sp32"
+ [(set (match_operand:TF 0 "nonimmediate_operand" "=e,o,U,r")
+ (match_operand:TF 1 "input_operand" "Goe,GeUr,o,roG"))]
"TARGET_FPU
+ && TARGET_VIS
&& ! TARGET_ARCH64
&& (register_operand (operands[0], TFmode)
- || register_operand (operands[1], TFmode))"
+ || register_operand (operands[1], TFmode)
+ || fp_zero_operand (operands[1], TFmode))"
"#"
[(set_attr "length" "4")])
;; when -mno-fpu.
(define_insn "*movtf_no_e_insn_sp32"
- [(set (match_operand:TF 0 "general_operand" "=U,o,r,r,o")
- (match_operand:TF 1 "input_operand" "o,U,r,o,r"))]
+ [(set (match_operand:TF 0 "nonimmediate_operand" "=o,U,o,r,o")
+ (match_operand:TF 1 "input_operand" "G,o,U,roG,r"))]
"! TARGET_FPU
&& ! TARGET_ARCH64
&& (register_operand (operands[0], TFmode)
- || register_operand (operands[1], TFmode))"
+ || register_operand (operands[1], TFmode)
+ || fp_zero_operand (operands[1], TFmode))"
"#"
[(set_attr "length" "4")])
-;; Now handle the float reg cases directly when arch64,
-;; hard_quad, and proper reg number alignment are all true.
-(define_insn "*movtf_insn_hq_sp64"
- [(set (match_operand:TF 0 "general_operand" "=e,e,m,r,r,o")
- (match_operand:TF 1 "input_operand" "e,m,e,r,o,r"))]
+;; Now handle the float reg cases directly when arch64,
+;; hard_quad, and proper reg number alignment are all true.
+(define_insn "*movtf_insn_hq_sp64"
+ [(set (match_operand:TF 0 "nonimmediate_operand" "=e,e,m,o,r")
+ (match_operand:TF 1 "input_operand" "e,m,e,Gr,roG"))]
+ "TARGET_FPU
+ && ! TARGET_VIS
+ && TARGET_ARCH64
+ && TARGET_HARD_QUAD
+ && (register_operand (operands[0], TFmode)
+ || register_operand (operands[1], TFmode)
+ || fp_zero_operand (operands[1], TFmode))"
+ "@
+ fmovq\\t%1, %0
+ ldq\\t%1, %0
+ stq\\t%1, %0
+ #
+ #"
+ [(set_attr "type" "fpmove,fpload,fpstore,*,*")
+ (set_attr "length" "1,1,1,2,2")])
+
+(define_insn "*movtf_insn_hq_vis_sp64"
+ [(set (match_operand:TF 0 "nonimmediate_operand" "=e,e,m,eo,r,o")
+ (match_operand:TF 1 "input_operand" "e,m,e,G,roG,r"))]
"TARGET_FPU
+ && TARGET_VIS
&& TARGET_ARCH64
- && TARGET_V9
&& TARGET_HARD_QUAD
&& (register_operand (operands[0], TFmode)
- || register_operand (operands[1], TFmode))"
+ || register_operand (operands[1], TFmode)
+ || fp_zero_operand (operands[1], TFmode))"
"@
fmovq\\t%1, %0
ldq\\t%1, %0
;; Now we allow the integer register cases even when
;; only arch64 is true.
(define_insn "*movtf_insn_sp64"
- [(set (match_operand:TF 0 "general_operand" "=e,e,o,r,r,o")
- (match_operand:TF 1 "input_operand" "e,o,e,r,o,r"))]
+ [(set (match_operand:TF 0 "nonimmediate_operand" "=e,o,r")
+ (match_operand:TF 1 "input_operand" "oe,Ger,orG"))]
+ "TARGET_FPU
+ && ! TARGET_VIS
+ && TARGET_ARCH64
+ && ! TARGET_HARD_QUAD
+ && (register_operand (operands[0], TFmode)
+ || register_operand (operands[1], TFmode)
+ || fp_zero_operand (operands[1], TFmode))"
+ "#"
+ [(set_attr "length" "2")])
+
+(define_insn "*movtf_insn_vis_sp64"
+ [(set (match_operand:TF 0 "nonimmediate_operand" "=e,o,r")
+ (match_operand:TF 1 "input_operand" "Goe,Ger,orG"))]
"TARGET_FPU
+ && TARGET_VIS
&& TARGET_ARCH64
&& ! TARGET_HARD_QUAD
&& (register_operand (operands[0], TFmode)
- || register_operand (operands[1], TFmode))"
+ || register_operand (operands[1], TFmode)
+ || fp_zero_operand (operands[1], TFmode))"
"#"
[(set_attr "length" "2")])
(define_insn "*movtf_no_e_insn_sp64"
- [(set (match_operand:TF 0 "general_operand" "=r,r,o")
- (match_operand:TF 1 "input_operand" "r,o,r"))]
+ [(set (match_operand:TF 0 "nonimmediate_operand" "=r,o")
+ (match_operand:TF 1 "input_operand" "orG,rG"))]
"! TARGET_FPU
&& TARGET_ARCH64
&& (register_operand (operands[0], TFmode)
- || register_operand (operands[1], TFmode))"
+ || register_operand (operands[1], TFmode)
+ || fp_zero_operand (operands[1], TFmode))"
"#"
[(set_attr "length" "2")])
if (GET_CODE (set_src) == SUBREG)
set_src = alter_subreg (set_src);
- /* Ugly, but gen_highpart will crap out here for 32-bit targets. */
- dest1 = gen_rtx_SUBREG (DFmode, set_dest, WORDS_BIG_ENDIAN == 0);
- dest2 = gen_rtx_SUBREG (DFmode, set_dest, WORDS_BIG_ENDIAN != 0);
- src1 = gen_rtx_SUBREG (DFmode, set_src, WORDS_BIG_ENDIAN == 0);
- src2 = gen_rtx_SUBREG (DFmode, set_src, WORDS_BIG_ENDIAN != 0);
+ dest1 = gen_df_reg (set_dest, 0);
+ dest2 = gen_df_reg (set_dest, 1);
+ src1 = gen_df_reg (set_src, 0);
+ src2 = gen_df_reg (set_src, 1);
/* Now emit using the real source and destination we found, swapping
the order if we detect overlap. */
}")
(define_split
+ [(set (match_operand:TF 0 "nonimmediate_operand" "")
+ (match_operand:TF 1 "fp_zero_operand" ""))]
+ "reload_completed"
+ [(clobber (const_int 0))]
+ "
+{
+ rtx set_dest = operands[0];
+ rtx dest1, dest2;
+
+ switch (GET_CODE (set_dest))
+ {
+ case SUBREG:
+ set_dest = alter_subreg (set_dest);
+ /* FALLTHROUGH */
+ case REG:
+ dest1 = gen_df_reg (set_dest, 0);
+ dest2 = gen_df_reg (set_dest, 1);
+ break;
+ case MEM:
+ dest1 = change_address (set_dest, DFmode, NULL_RTX);
+ dest2 = change_address (set_dest, DFmode,
+ plus_constant_for_output (XEXP (dest1, 0), 8));
+ break;
+ default:
+ abort ();
+ }
+
+ emit_insn (gen_movdf (dest1, CONST0_RTX (DFmode)));
+ emit_insn (gen_movdf (dest2, CONST0_RTX (DFmode)));
+ DONE;
+}")
+
+(define_split
[(set (match_operand:TF 0 "register_operand" "")
(match_operand:TF 1 "memory_operand" ""))]
"(reload_completed
rtx word0 = change_address (operands[1], DFmode, NULL_RTX);
rtx word1 = change_address (operands[1], DFmode,
plus_constant_for_output (XEXP (word0, 0), 8));
- rtx dest1, dest2;
+ rtx set_dest, dest1, dest2;
+
+ set_dest = operands[0];
+ if (GET_CODE (set_dest) == SUBREG)
+ set_dest = alter_subreg (set_dest);
- /* Ugly, but gen_highpart will crap out here for 32-bit targets. */
- dest1 = gen_rtx_SUBREG (DFmode, operands[0], WORDS_BIG_ENDIAN == 0);
- dest2 = gen_rtx_SUBREG (DFmode, operands[0], WORDS_BIG_ENDIAN != 0);
+ dest1 = gen_df_reg (set_dest, 0);
+ dest2 = gen_df_reg (set_dest, 1);
/* Now output, ordering such that we don't clobber any registers
mentioned in the address. */
[(clobber (const_int 0))]
"
{
- rtx word0 = change_address (operands[0], DFmode, NULL_RTX);
- rtx word1 = change_address (operands[0], DFmode,
- plus_constant_for_output (XEXP (word0, 0), 8));
- rtx src1, src2;
+ rtx word1 = change_address (operands[0], DFmode, NULL_RTX);
+ rtx word2 = change_address (operands[0], DFmode,
+ plus_constant_for_output (XEXP (word1, 0), 8));
+ rtx set_src;
+
+ set_src = operands[1];
+ if (GET_CODE (set_src) == SUBREG)
+ set_src = alter_subreg (set_src);
- /* Ugly, but gen_highpart will crap out here for 32-bit targets. */
- src1 = gen_rtx_SUBREG (DFmode, operands[1], WORDS_BIG_ENDIAN == 0);
- src2 = gen_rtx_SUBREG (DFmode, operands[1], WORDS_BIG_ENDIAN != 0);
- emit_insn (gen_movdf (word0, src1));
- emit_insn (gen_movdf (word1, src2));
+ emit_insn (gen_movdf (word1, gen_df_reg (set_src, 0)));
+ emit_insn (gen_movdf (word2, gen_df_reg (set_src, 1)));
DONE;
}")
\f
[(set_attr "type" "fpcmove")
(set_attr "length" "1")])
-(define_insn "*movdf_cc_sp64"
+(define_insn "movdf_cc_sp64"
[(set (match_operand:DF 0 "register_operand" "=e,e")
(if_then_else:DF (match_operator 1 "comparison_operator"
[(match_operand 2 "icc_or_fcc_reg_operand" "X,X")
[(set_attr "type" "fpcmove")
(set_attr "length" "1")])
-(define_insn "*movtf_cc_sp64"
+(define_insn "*movtf_cc_hq_sp64"
[(set (match_operand:TF 0 "register_operand" "=e,e")
(if_then_else:TF (match_operator 1 "comparison_operator"
[(match_operand 2 "icc_or_fcc_reg_operand" "X,X")
[(set_attr "type" "fpcmove")
(set_attr "length" "1")])
+(define_insn "*movtf_cc_sp64"
+ [(set (match_operand:TF 0 "register_operand" "=e,e")
+ (if_then_else:TF (match_operator 1 "comparison_operator"
+ [(match_operand 2 "icc_or_fcc_reg_operand" "X,X")
+ (const_int 0)])
+ (match_operand:TF 3 "register_operand" "e,0")
+ (match_operand:TF 4 "register_operand" "0,e")))]
+ "TARGET_V9 && TARGET_FPU && !TARGET_HARD_QUAD"
+ "#"
+ [(set_attr "type" "fpcmove")
+ (set_attr "length" "2")])
+
+(define_split
+ [(set (match_operand:TF 0 "register_operand" "=e,e")
+ (if_then_else:TF (match_operator 1 "comparison_operator"
+ [(match_operand 2 "icc_or_fcc_reg_operand" "X,X")
+ (const_int 0)])
+ (match_operand:TF 3 "register_operand" "e,0")
+ (match_operand:TF 4 "register_operand" "0,e")))]
+ "reload_completed && TARGET_V9 && TARGET_FPU && !TARGET_HARD_QUAD"
+ [(clobber (const_int 0))]
+ "
+{
+ rtx set_dest = operands[0];
+ rtx set_srca = operands[3];
+ rtx set_srcb = operands[4];
+ int third = rtx_equal_p (set_dest, set_srca);
+ rtx dest1, dest2;
+ rtx srca1, srca2, srcb1, srcb2;
+
+ if (GET_CODE (set_dest) == SUBREG)
+ set_dest = alter_subreg (set_dest);
+ if (GET_CODE (set_srca) == SUBREG)
+ set_srca = alter_subreg (set_srca);
+ if (GET_CODE (set_srcb) == SUBREG)
+ set_srcb = alter_subreg (set_srcb);
+
+ dest1 = gen_df_reg (set_dest, 0);
+ dest2 = gen_df_reg (set_dest, 1);
+ srca1 = gen_df_reg (set_srca, 0);
+ srca2 = gen_df_reg (set_srca, 1);
+ srcb1 = gen_df_reg (set_srcb, 0);
+ srcb2 = gen_df_reg (set_srcb, 1);
+
+ /* Now emit using the real source and destination we found, swapping
+ the order if we detect overlap. */
+ if ((third && reg_overlap_mentioned_p (dest1, srcb2))
+ || (!third && reg_overlap_mentioned_p (dest1, srca2)))
+ {
+ emit_insn (gen_movdf_cc_sp64 (dest2, operands[1], operands[2], srca2, srcb2));
+ emit_insn (gen_movdf_cc_sp64 (dest1, operands[1], operands[2], srca1, srcb1));
+ }
+ else
+ {
+ emit_insn (gen_movdf_cc_sp64 (dest1, operands[1], operands[2], srca1, srcb1));
+ emit_insn (gen_movdf_cc_sp64 (dest2, operands[1], operands[2], srca2, srcb2));
+ }
+ DONE;
+}")
+
(define_insn "*movqi_cc_reg_sp64"
[(set (match_operand:QI 0 "register_operand" "=r,r")
(if_then_else:QI (match_operator 1 "v9_regcmp_op"
[(set_attr "type" "fpcmove")
(set_attr "length" "1")])
-(define_insn "*movdf_cc_reg_sp64"
+(define_insn "movdf_cc_reg_sp64"
[(set (match_operand:DF 0 "register_operand" "=e,e")
(if_then_else:DF (match_operator 1 "v9_regcmp_op"
[(match_operand:DI 2 "register_operand" "r,r")
[(set_attr "type" "fpcmove")
(set_attr "length" "1")])
-(define_insn "*movtf_cc_reg_sp64"
+(define_insn "*movtf_cc_reg_hq_sp64"
[(set (match_operand:TF 0 "register_operand" "=e,e")
(if_then_else:TF (match_operator 1 "v9_regcmp_op"
[(match_operand:DI 2 "register_operand" "r,r")
(const_int 0)])
(match_operand:TF 3 "register_operand" "e,0")
(match_operand:TF 4 "register_operand" "0,e")))]
- "TARGET_ARCH64 && TARGET_FPU"
+ "TARGET_ARCH64 && TARGET_FPU && TARGET_HARD_QUAD"
"@
fmovrq%D1\\t%2, %3, %0
fmovrq%d1\\t%2, %4, %0"
[(set_attr "type" "fpcmove")
(set_attr "length" "1")])
+
+(define_insn "*movtf_cc_reg_sp64"
+ [(set (match_operand:TF 0 "register_operand" "=e,e")
+ (if_then_else:TF (match_operator 1 "v9_regcmp_op"
+ [(match_operand:DI 2 "register_operand" "r,r")
+ (const_int 0)])
+ (match_operand:TF 3 "register_operand" "e,0")
+ (match_operand:TF 4 "register_operand" "0,e")))]
+ "TARGET_ARCH64 && TARGET_FPU && ! TARGET_HARD_QUAD"
+ "#"
+ [(set_attr "type" "fpcmove")
+ (set_attr "length" "2")])
+
+(define_split
+ [(set (match_operand:TF 0 "register_operand" "=e,e")
+ (if_then_else:TF (match_operator 1 "v9_regcmp_op"
+ [(match_operand:DI 2 "register_operand" "r,r")
+ (const_int 0)])
+ (match_operand:TF 3 "register_operand" "e,0")
+ (match_operand:TF 4 "register_operand" "0,e")))]
+ "reload_completed && TARGET_ARCH64 && TARGET_FPU && ! TARGET_HARD_QUAD"
+ [(clobber (const_int 0))]
+ "
+{
+ rtx set_dest = operands[0];
+ rtx set_srca = operands[3];
+ rtx set_srcb = operands[4];
+ int third = rtx_equal_p (set_dest, set_srca);
+ rtx dest1, dest2;
+ rtx srca1, srca2, srcb1, srcb2;
+
+ if (GET_CODE (set_dest) == SUBREG)
+ set_dest = alter_subreg (set_dest);
+ if (GET_CODE (set_srca) == SUBREG)
+ set_srca = alter_subreg (set_srca);
+ if (GET_CODE (set_srcb) == SUBREG)
+ set_srcb = alter_subreg (set_srcb);
+
+ dest1 = gen_df_reg (set_dest, 0);
+ dest2 = gen_df_reg (set_dest, 1);
+ srca1 = gen_df_reg (set_srca, 0);
+ srca2 = gen_df_reg (set_srca, 1);
+ srcb1 = gen_df_reg (set_srcb, 0);
+ srcb2 = gen_df_reg (set_srcb, 1);
+
+ /* Now emit using the real source and destination we found, swapping
+ the order if we detect overlap. */
+ if ((third && reg_overlap_mentioned_p (dest1, srcb2))
+ || (!third && reg_overlap_mentioned_p (dest1, srca2)))
+ {
+ emit_insn (gen_movdf_cc_reg_sp64 (dest2, operands[1], operands[2], srca2, srcb2));
+ emit_insn (gen_movdf_cc_reg_sp64 (dest1, operands[1], operands[2], srca1, srcb1));
+ }
+ else
+ {
+ emit_insn (gen_movdf_cc_reg_sp64 (dest1, operands[1], operands[2], srca1, srcb1));
+ emit_insn (gen_movdf_cc_reg_sp64 (dest2, operands[1], operands[2], srca2, srcb2));
+ }
+ DONE;
+}")
+
\f
;;- zero extension instructions
operand1 = XEXP (operand1, 0);
}
- emit_insn (gen_ashlsi3 (temp, gen_rtx_SUBREG (SImode, operand1,
- op1_subword),
+ emit_insn (gen_ashlsi3 (temp, gen_rtx_SUBREG (SImode, operand1, op1_subword),
shift_16));
emit_insn (gen_lshrsi3 (operand0, temp, shift_16));
DONE;
operand1 = XEXP (operand1, 0);
}
- emit_insn (gen_ashldi3 (temp, gen_rtx_SUBREG (DImode, operand1,
- op1_subword),
+ emit_insn (gen_ashldi3 (temp, gen_rtx_SUBREG (DImode, operand1, op1_subword),
shift_48));
emit_insn (gen_lshrdi3 (operand0, temp, shift_48));
DONE;
[(set (reg:CC 100)
(compare:CC (zero_extend:SI (match_operand:QI 0 "register_operand" "r"))
(const_int 0)))]
- "! TARGET_LIVE_G0"
+ ""
+ "andcc\\t%0, 0xff, %%g0"
+ [(set_attr "type" "compare")
+ (set_attr "length" "1")])
+
+(define_insn "*cmp_zero_qi"
+ [(set (reg:CC 100)
+ (compare:CC (match_operand:QI 0 "register_operand" "r")
+ (const_int 0)))]
+ ""
"andcc\\t%0, 0xff, %%g0"
[(set_attr "type" "compare")
(set_attr "length" "1")])
[(set_attr "type" "compare")
(set_attr "length" "1")])
+(define_insn "*cmp_zero_extendqisi2_andcc_set"
+ [(set (reg:CC 100)
+ (compare:CC (and:SI (match_operand:SI 1 "register_operand" "r")
+ (const_int 255))
+ (const_int 0)))
+ (set (match_operand:SI 0 "register_operand" "=r")
+ (zero_extend:SI (subreg:QI (match_dup 1) 0)))]
+ ""
+ "andcc\\t%1, 0xff, %0"
+ [(set_attr "type" "compare")
+ (set_attr "length" "1")])
+
(define_insn "*cmp_zero_extendqidi2"
[(set (reg:CCX 100)
(compare:CCX (zero_extend:DI (match_operand:QI 0 "register_operand" "r"))
[(set_attr "type" "compare")
(set_attr "length" "1")])
+(define_insn "*cmp_zero_qi_sp64"
+ [(set (reg:CCX 100)
+ (compare:CCX (match_operand:QI 0 "register_operand" "r")
+ (const_int 0)))]
+ "TARGET_ARCH64"
+ "andcc\\t%0, 0xff, %%g0"
+ [(set_attr "type" "compare")
+ (set_attr "length" "1")])
+
(define_insn "*cmp_zero_extendqidi2_set"
[(set (reg:CCX 100)
(compare:CCX (zero_extend:DI (match_operand:QI 1 "register_operand" "r"))
[(set_attr "type" "compare")
(set_attr "length" "1")])
+(define_insn "*cmp_zero_extendqidi2_andcc_set"
+ [(set (reg:CCX 100)
+ (compare:CCX (and:DI (match_operand:DI 1 "register_operand" "r")
+ (const_int 255))
+ (const_int 0)))
+ (set (match_operand:DI 0 "register_operand" "=r")
+ (zero_extend:DI (subreg:QI (match_dup 1) 0)))]
+ "TARGET_ARCH64"
+ "andcc\\t%1, 0xff, %0"
+ [(set_attr "type" "compare")
+ (set_attr "length" "1")])
+
;; Similarly, handle {SI,DI}->QI mode truncation followed by a compare.
(define_insn "*cmp_siqi_trunc"
[(set (reg:CC 100)
(compare:CC (subreg:QI (match_operand:SI 0 "register_operand" "r") 0)
(const_int 0)))]
- "! TARGET_LIVE_G0"
+ ""
"andcc\\t%0, 0xff, %%g0"
[(set_attr "type" "compare")
(set_attr "length" "1")])
operand1 = XEXP (operand1, 0);
}
- emit_insn (gen_ashlsi3 (temp, gen_rtx_SUBREG (SImode, operand1,
- op1_subword),
+ emit_insn (gen_ashlsi3 (temp, gen_rtx_SUBREG (SImode, operand1, op1_subword),
shift_16));
emit_insn (gen_ashrsi3 (operand0, temp, shift_16));
DONE;
op0_subword = SUBREG_WORD (operand0);
operand0 = XEXP (operand0, 0);
}
- emit_insn (gen_ashlsi3 (temp, gen_rtx_SUBREG (SImode, operand1,
- op1_subword),
+ emit_insn (gen_ashlsi3 (temp, gen_rtx_SUBREG (SImode, operand1, op1_subword),
shift_24));
if (GET_MODE (operand0) != SImode)
operand0 = gen_rtx_SUBREG (SImode, operand0, op0_subword);
operand1 = XEXP (operand1, 0);
}
- emit_insn (gen_ashlsi3 (temp, gen_rtx_SUBREG (SImode, operand1,
- op1_subword),
+ emit_insn (gen_ashlsi3 (temp, gen_rtx_SUBREG (SImode, operand1, op1_subword),
shift_24));
emit_insn (gen_ashrsi3 (operand0, temp, shift_24));
DONE;
operand1 = XEXP (operand1, 0);
}
- emit_insn (gen_ashldi3 (temp, gen_rtx_SUBREG (DImode, operand1,
- op1_subword),
+ emit_insn (gen_ashldi3 (temp, gen_rtx_SUBREG (DImode, operand1, op1_subword),
shift_56));
emit_insn (gen_ashrdi3 (operand0, temp, shift_56));
DONE;
operand1 = XEXP (operand1, 0);
}
- emit_insn (gen_ashldi3 (temp, gen_rtx_SUBREG (DImode, operand1,
- op1_subword),
+ emit_insn (gen_ashldi3 (temp, gen_rtx_SUBREG (DImode, operand1, op1_subword),
shift_48));
emit_insn (gen_ashrdi3 (operand0, temp, shift_48));
DONE;
(match_operand:SI 1 "small_int_or_double" "n")
(match_operand:SI 2 "small_int_or_double" "n"))
(const_int 0)))]
- "! TARGET_LIVE_G0
- && ((GET_CODE (operands[2]) == CONST_INT
- && INTVAL (operands[2]) > 19)
- || (GET_CODE (operands[2]) == CONST_DOUBLE
- && CONST_DOUBLE_LOW (operands[2]) > 19))"
+ "(GET_CODE (operands[2]) == CONST_INT
+ && INTVAL (operands[2]) > 19)
+ || (GET_CODE (operands[2]) == CONST_DOUBLE
+ && CONST_DOUBLE_LOW (operands[2]) > 19)"
"*
{
int len = (GET_CODE (operands[1]) == CONST_INT
[(set_attr "type" "fp")
(set_attr "length" "1")])
-(define_insn "extendsftf2"
+(define_expand "extendsftf2"
+ [(set (match_operand:TF 0 "register_operand" "=e")
+ (float_extend:TF
+ (match_operand:SF 1 "register_operand" "f")))]
+ "TARGET_FPU && (TARGET_HARD_QUAD || TARGET_ARCH64)"
+ "
+{
+ if (! TARGET_HARD_QUAD)
+ {
+ rtx slot0;
+
+ if (GET_CODE (operands[0]) != MEM)
+ slot0 = assign_stack_temp (TFmode, GET_MODE_SIZE(TFmode), 0);
+ else
+ slot0 = operands[0];
+
+ emit_library_call (gen_rtx (SYMBOL_REF, Pmode, \"_Qp_stoq\"), 0,
+ VOIDmode, 2,
+ XEXP (slot0, 0), Pmode,
+ operands[1], SFmode);
+
+ if (GET_CODE (operands[0]) != MEM)
+ emit_insn (gen_rtx_SET (VOIDmode, operands[0], slot0));
+ DONE;
+ }
+}")
+
+(define_insn "*extendsftf2_hq"
[(set (match_operand:TF 0 "register_operand" "=e")
(float_extend:TF
(match_operand:SF 1 "register_operand" "f")))]
[(set_attr "type" "fp")
(set_attr "length" "1")])
-(define_insn "extenddftf2"
+(define_expand "extenddftf2"
+ [(set (match_operand:TF 0 "register_operand" "=e")
+ (float_extend:TF
+ (match_operand:DF 1 "register_operand" "e")))]
+ "TARGET_FPU && (TARGET_HARD_QUAD || TARGET_ARCH64)"
+ "
+{
+ if (! TARGET_HARD_QUAD)
+ {
+ rtx slot0;
+
+ if (GET_CODE (operands[0]) != MEM)
+ slot0 = assign_stack_temp (TFmode, GET_MODE_SIZE(TFmode), 0);
+ else
+ slot0 = operands[0];
+
+ emit_library_call (gen_rtx (SYMBOL_REF, Pmode, \"_Qp_dtoq\"), 0,
+ VOIDmode, 2,
+ XEXP (slot0, 0), Pmode,
+ operands[1], DFmode);
+
+ if (GET_CODE (operands[0]) != MEM)
+ emit_insn (gen_rtx_SET (VOIDmode, operands[0], slot0));
+ DONE;
+ }
+}")
+
+(define_insn "*extenddftf2_hq"
[(set (match_operand:TF 0 "register_operand" "=e")
(float_extend:TF
(match_operand:DF 1 "register_operand" "e")))]
[(set_attr "type" "fp")
(set_attr "length" "1")])
-(define_insn "trunctfsf2"
+(define_expand "trunctfsf2"
+ [(set (match_operand:SF 0 "register_operand" "=f")
+ (float_truncate:SF
+ (match_operand:TF 1 "register_operand" "e")))]
+ "TARGET_FPU && (TARGET_HARD_QUAD || TARGET_ARCH64)"
+ "
+{
+ if (! TARGET_HARD_QUAD)
+ {
+ rtx slot0;
+
+ if (GET_CODE (operands[1]) != MEM)
+ {
+ slot0 = assign_stack_temp (TFmode, GET_MODE_SIZE(TFmode), 0);
+ emit_insn (gen_rtx_SET (VOIDmode, slot0, operands[1]));
+ }
+ else
+ slot0 = operands[1];
+
+ emit_library_call_value (gen_rtx (SYMBOL_REF, Pmode, \"_Qp_qtos\"),
+ operands[0], 0, SFmode, 1,
+ XEXP (slot0, 0), Pmode);
+ DONE;
+ }
+}")
+
+(define_insn "*trunctfsf2_hq"
[(set (match_operand:SF 0 "register_operand" "=f")
(float_truncate:SF
(match_operand:TF 1 "register_operand" "e")))]
[(set_attr "type" "fp")
(set_attr "length" "1")])
-(define_insn "trunctfdf2"
+(define_expand "trunctfdf2"
+ [(set (match_operand:DF 0 "register_operand" "=f")
+ (float_truncate:DF
+ (match_operand:TF 1 "register_operand" "e")))]
+ "TARGET_FPU && (TARGET_HARD_QUAD || TARGET_ARCH64)"
+ "
+{
+ if (! TARGET_HARD_QUAD)
+ {
+ rtx slot0;
+
+ if (GET_CODE (operands[1]) != MEM)
+ {
+ slot0 = assign_stack_temp (TFmode, GET_MODE_SIZE(TFmode), 0);
+ emit_insn (gen_rtx_SET (VOIDmode, slot0, operands[1]));
+ }
+ else
+ slot0 = operands[1];
+
+ emit_library_call_value (gen_rtx (SYMBOL_REF, Pmode, \"_Qp_qtod\"),
+ operands[0], 0, DFmode, 1,
+ XEXP (slot0, 0), Pmode);
+ DONE;
+ }
+}")
+
+(define_insn "*trunctfdf2_hq"
[(set (match_operand:DF 0 "register_operand" "=e")
(float_truncate:DF
(match_operand:TF 1 "register_operand" "e")))]
[(set_attr "type" "fp")
(set_attr "length" "1")])
-(define_insn "floatsitf2"
+(define_expand "floatsitf2"
+ [(set (match_operand:TF 0 "register_operand" "=e")
+ (float:TF (match_operand:SI 1 "register_operand" "f")))]
+ "TARGET_FPU && (TARGET_HARD_QUAD || TARGET_ARCH64)"
+ "
+{
+ if (! TARGET_HARD_QUAD)
+ {
+ rtx slot0;
+
+ if (GET_CODE (operands[1]) != MEM)
+ slot0 = assign_stack_temp (TFmode, GET_MODE_SIZE(TFmode), 0);
+ else
+ slot0 = operands[1];
+
+ emit_library_call (gen_rtx (SYMBOL_REF, Pmode, \"_Qp_itoq\"), 0,
+ VOIDmode, 2,
+ XEXP (slot0, 0), Pmode,
+ operands[1], SImode);
+
+ if (GET_CODE (operands[0]) != MEM)
+ emit_insn (gen_rtx_SET (VOIDmode, operands[0], slot0));
+ DONE;
+ }
+}")
+
+(define_insn "*floatsitf2_hq"
[(set (match_operand:TF 0 "register_operand" "=e")
(float:TF (match_operand:SI 1 "register_operand" "f")))]
"TARGET_FPU && TARGET_HARD_QUAD"
[(set_attr "type" "fp")
(set_attr "length" "1")])
+(define_expand "floatunssitf2"
+ [(set (match_operand:TF 0 "register_operand" "=e")
+ (unsigned_float:TF (match_operand:SI 1 "register_operand" "e")))]
+ "TARGET_FPU && TARGET_ARCH64 && ! TARGET_HARD_QUAD"
+ "
+{
+ rtx slot0;
+
+ if (GET_CODE (operands[1]) != MEM)
+ slot0 = assign_stack_temp (TFmode, GET_MODE_SIZE(TFmode), 0);
+ else
+ slot0 = operands[1];
+
+ emit_library_call (gen_rtx (SYMBOL_REF, Pmode, \"_Qp_uitoq\"), 0,
+ VOIDmode, 2,
+ XEXP (slot0, 0), Pmode,
+ operands[1], SImode);
+
+ if (GET_CODE (operands[0]) != MEM)
+ emit_insn (gen_rtx_SET (VOIDmode, operands[0], slot0));
+ DONE;
+}")
+
;; Now the same for 64 bit sources.
(define_insn "floatdisf2"
[(set_attr "type" "fp")
(set_attr "length" "1")])
-(define_insn "floatditf2"
- [(set (match_operand:TF 0 "register_operand" "=e")
- (float:TF (match_operand:DI 1 "register_operand" "e")))]
- "TARGET_V9 && TARGET_FPU && TARGET_HARD_QUAD"
- "fxtoq\\t%1, %0"
- [(set_attr "type" "fp")
- (set_attr "length" "1")])
+(define_expand "floatditf2"
+ [(set (match_operand:TF 0 "register_operand" "=e")
+ (float:TF (match_operand:DI 1 "register_operand" "e")))]
+ "TARGET_FPU && TARGET_V9 && (TARGET_HARD_QUAD || TARGET_ARCH64)"
+ "
+{
+ if (! TARGET_HARD_QUAD)
+ {
+ rtx slot0;
+
+ if (GET_CODE (operands[1]) != MEM)
+ slot0 = assign_stack_temp (TFmode, GET_MODE_SIZE(TFmode), 0);
+ else
+ slot0 = operands[1];
+
+ emit_library_call (gen_rtx (SYMBOL_REF, Pmode, \"_Qp_xtoq\"), 0,
+ VOIDmode, 2,
+ XEXP (slot0, 0), Pmode,
+ operands[1], DImode);
+
+ if (GET_CODE (operands[0]) != MEM)
+ emit_insn (gen_rtx_SET (VOIDmode, operands[0], slot0));
+ DONE;
+ }
+}")
+
+(define_insn "*floatditf2_hq"
+ [(set (match_operand:TF 0 "register_operand" "=e")
+ (float:TF (match_operand:DI 1 "register_operand" "e")))]
+ "TARGET_V9 && TARGET_FPU && TARGET_HARD_QUAD"
+ "fxtoq\\t%1, %0"
+ [(set_attr "type" "fp")
+ (set_attr "length" "1")])
+
+(define_expand "floatunsditf2"
+ [(set (match_operand:TF 0 "register_operand" "=e")
+ (unsigned_float:TF (match_operand:DI 1 "register_operand" "e")))]
+ "TARGET_FPU && TARGET_ARCH64 && ! TARGET_HARD_QUAD"
+ "
+{
+ rtx slot0;
+
+ if (GET_CODE (operands[1]) != MEM)
+ slot0 = assign_stack_temp (TFmode, GET_MODE_SIZE(TFmode), 0);
+ else
+ slot0 = operands[1];
+
+ emit_library_call (gen_rtx (SYMBOL_REF, Pmode, \"_Qp_uxtoq\"), 0,
+ VOIDmode, 2,
+ XEXP (slot0, 0), Pmode,
+ operands[1], DImode);
+
+ if (GET_CODE (operands[0]) != MEM)
+ emit_insn (gen_rtx_SET (VOIDmode, operands[0], slot0));
+ DONE;
+}")
;; Convert a float to an actual integer.
;; Truncation is performed as part of the conversion.
[(set_attr "type" "fp")
(set_attr "length" "1")])
-(define_insn "fix_trunctfsi2"
+(define_expand "fix_trunctfsi2"
+ [(set (match_operand:SI 0 "register_operand" "=f")
+ (fix:SI (fix:TF (match_operand:TF 1 "register_operand" "e"))))]
+ "TARGET_FPU && (TARGET_HARD_QUAD || TARGET_ARCH64)"
+ "
+{
+ if (! TARGET_HARD_QUAD)
+ {
+ rtx slot0;
+
+ if (GET_CODE (operands[1]) != MEM)
+ {
+ slot0 = assign_stack_temp (TFmode, GET_MODE_SIZE(TFmode), 0);
+ emit_insn (gen_rtx_SET (VOIDmode, slot0, operands[1]));
+ }
+ else
+ slot0 = operands[1];
+
+ emit_library_call_value (gen_rtx (SYMBOL_REF, Pmode, \"_Qp_qtoi\"),
+ operands[0], 0, SImode, 1,
+ XEXP (slot0, 0), Pmode);
+ DONE;
+ }
+}")
+
+(define_insn "*fix_trunctfsi2_hq"
[(set (match_operand:SI 0 "register_operand" "=f")
(fix:SI (fix:TF (match_operand:TF 1 "register_operand" "e"))))]
"TARGET_FPU && TARGET_HARD_QUAD"
[(set_attr "type" "fp")
(set_attr "length" "1")])
+(define_expand "fixuns_trunctfsi2"
+ [(set (match_operand:SI 0 "register_operand" "=f")
+ (unsigned_fix:SI (fix:TF (match_operand:TF 1 "register_operand" "e"))))]
+ "TARGET_FPU && TARGET_ARCH64 && ! TARGET_HARD_QUAD"
+ "
+{
+ rtx slot0;
+
+ if (GET_CODE (operands[1]) != MEM)
+ {
+ slot0 = assign_stack_temp (TFmode, GET_MODE_SIZE(TFmode), 0);
+ emit_insn (gen_rtx_SET (VOIDmode, slot0, operands[1]));
+ }
+ else
+ slot0 = operands[1];
+
+ emit_library_call_value (gen_rtx (SYMBOL_REF, Pmode, \"_Qp_qtoui\"),
+ operands[0], 0, SImode, 1,
+ XEXP (slot0, 0), Pmode);
+ DONE;
+}")
+
;; Now the same, for V9 targets
(define_insn "fix_truncsfdi2"
[(set_attr "type" "fp")
(set_attr "length" "1")])
-(define_insn "fix_trunctfdi2"
+(define_expand "fix_trunctfdi2"
+ [(set (match_operand:DI 0 "register_operand" "=e")
+ (fix:SI (fix:TF (match_operand:TF 1 "register_operand" "e"))))]
+ "TARGET_V9 && TARGET_FPU && (TARGET_HARD_QUAD || TARGET_ARCH64)"
+ "
+{
+ if (! TARGET_HARD_QUAD)
+ {
+ rtx slot0;
+
+ if (GET_CODE (operands[1]) != MEM)
+ {
+ slot0 = assign_stack_temp (TFmode, GET_MODE_SIZE(TFmode), 0);
+ emit_insn (gen_rtx_SET (VOIDmode, slot0, operands[1]));
+ }
+ else
+ slot0 = operands[1];
+
+ emit_library_call_value (gen_rtx (SYMBOL_REF, Pmode, \"_Qp_qtox\"),
+ operands[0], 0, DImode, 1,
+ XEXP (slot0, 0), Pmode);
+ DONE;
+ }
+}")
+
+(define_insn "*fix_trunctfdi2_hq"
[(set (match_operand:DI 0 "register_operand" "=e")
(fix:DI (fix:TF (match_operand:TF 1 "register_operand" "e"))))]
"TARGET_V9 && TARGET_FPU && TARGET_HARD_QUAD"
"fqtox\\t%1, %0"
[(set_attr "type" "fp")
(set_attr "length" "1")])
+
+(define_expand "fixuns_trunctfdi2"
+ [(set (match_operand:DI 0 "register_operand" "=f")
+ (unsigned_fix:DI (fix:TF (match_operand:TF 1 "register_operand" "e"))))]
+ "TARGET_FPU && TARGET_ARCH64 && ! TARGET_HARD_QUAD"
+ "
+{
+ rtx slot0;
+
+ if (GET_CODE (operands[1]) != MEM)
+ {
+ slot0 = assign_stack_temp (TFmode, GET_MODE_SIZE(TFmode), 0);
+ emit_insn (gen_rtx_SET (VOIDmode, slot0, operands[1]));
+ }
+ else
+ slot0 = operands[1];
+
+ emit_library_call_value (gen_rtx (SYMBOL_REF, Pmode, \"_Qp_qtoux\"),
+ operands[0], 0, DImode, 1,
+ XEXP (slot0, 0), Pmode);
+ DONE;
+}")
+
\f
;;- arithmetic instructions
""
"
{
+ HOST_WIDE_INT i;
+
if (! TARGET_ARCH64)
{
emit_insn (gen_rtx_PARALLEL (VOIDmode, gen_rtvec (2,
}
if (arith_double_4096_operand(operands[2], DImode))
{
- emit_insn (gen_rtx_SET (VOIDmode, operands[0],
- gen_rtx_MINUS (DImode, operands[1],
- GEN_INT(-4096))));
+ switch (GET_CODE (operands[1]))
+ {
+ case CONST_INT: i = INTVAL (operands[1]); break;
+ case CONST_DOUBLE: i = CONST_DOUBLE_LOW (operands[1]); break;
+ default:
+ emit_insn (gen_rtx_SET (VOIDmode, operands[0],
+ gen_rtx_MINUS (DImode, operands[1],
+ GEN_INT(-4096))));
+ DONE;
+ }
+ emit_insn (gen_movdi (operands[0], GEN_INT (i + 4096)));
DONE;
}
}")
operands[5] = gen_lowpart (SImode, operands[2]);
operands[6] = gen_highpart (SImode, operands[0]);
operands[7] = gen_highpart (SImode, operands[1]);
+#if HOST_BITS_PER_WIDE_INT == 32
if (GET_CODE (operands[2]) == CONST_INT)
{
if (INTVAL (operands[2]) < 0)
operands[8] = const0_rtx;
}
else
+#endif
operands[8] = gen_highpart (SImode, operands[2]);
}")
operands[5] = gen_lowpart (SImode, operands[2]);
operands[6] = gen_highpart (SImode, operands[0]);
operands[7] = gen_highpart (SImode, operands[1]);
+#if HOST_BITS_PER_WIDE_INT == 32
if (GET_CODE (operands[2]) == CONST_INT)
{
if (INTVAL (operands[2]) < 0)
operands[8] = const0_rtx;
}
else
+#endif
operands[8] = gen_highpart (SImode, operands[2]);
}")
""
"
{
- if (arith_4096_operand(operands[2], DImode))
+ if (arith_4096_operand(operands[2], SImode))
{
- emit_insn (gen_rtx_SET (VOIDmode, operands[0],
- gen_rtx_MINUS (SImode, operands[1],
- GEN_INT(-4096))));
+ if (GET_CODE (operands[1]) == CONST_INT)
+ emit_insn (gen_movsi (operands[0],
+ GEN_INT (INTVAL (operands[1]) + 4096)));
+ else
+ emit_insn (gen_rtx_SET (VOIDmode, operands[0],
+ gen_rtx_MINUS (SImode, operands[1],
+ GEN_INT(-4096))));
DONE;
}
}")
(compare:CC_NOOV (plus:SI (match_operand:SI 0 "arith_operand" "%r")
(match_operand:SI 1 "arith_operand" "rI"))
(const_int 0)))]
- "! TARGET_LIVE_G0"
+ ""
"addcc\\t%0, %1, %%g0"
[(set_attr "type" "compare")
(set_attr "length" "1")])
""
"
{
- if (arith_4096_operand(operands[2], DImode))
+ if (arith_4096_operand(operands[2], SImode))
{
emit_insn (gen_rtx_SET (VOIDmode, operands[0],
gen_rtx_PLUS (SImode, operands[1],
(compare:CC_NOOV (minus:SI (match_operand:SI 0 "reg_or_0_operand" "rJ")
(match_operand:SI 1 "arith_operand" "rI"))
(const_int 0)))]
- "! TARGET_LIVE_G0"
+ ""
"subcc\\t%r0, %1, %%g0"
[(set_attr "type" "compare")
(set_attr "length" "1")])
\f
;; Integer Multiply/Divide.
-;; The 32 bit multiply/divide instructions are deprecated on v9 and shouldn't
-;; we used. We still use them in 32 bit v9 compilers.
-;; The 64 bit v9 compiler will (/should) widen the args and use muldi3.
+;; The 32 bit multiply/divide instructions are deprecated on v9, but at
+;; least in UltraSPARC I, II and IIi it is a win tick-wise.
(define_insn "mulsi3"
[(set (match_operand:SI 0 "register_operand" "=r")
}"
[(set_attr "length" "9,8")])
-;; It is not known whether this will match.
-
(define_insn "*cmp_mul_set"
- [(set (match_operand:SI 0 "register_operand" "=r")
- (mult:SI (match_operand:SI 1 "arith_operand" "%r")
- (match_operand:SI 2 "arith_operand" "rI")))
- (set (reg:CC_NOOV 100)
- (compare:CC_NOOV (mult:SI (match_dup 1) (match_dup 2))
- (const_int 0)))]
+ [(set (reg:CC 100)
+ (compare:CC (mult:SI (match_operand:SI 1 "arith_operand" "%r")
+ (match_operand:SI 2 "arith_operand" "rI"))
+ (const_int 0)))
+ (set (match_operand:SI 0 "register_operand" "=r")
+ (mult:SI (match_dup 1) (match_dup 2)))]
"TARGET_V8 || TARGET_SPARCLITE || TARGET_DEPRECATED_V8_INSNS"
"smulcc\\t%1, %2, %0"
[(set_attr "type" "imul")
if (CONSTANT_P (operands[2]))
{
if (TARGET_V8PLUS)
- {
- emit_insn (gen_const_mulsidi3_v8plus (operands[0], operands[1],
- operands[2]));
- DONE;
- }
- emit_insn (gen_const_mulsidi3 (operands[0], operands[1], operands[2]));
+ emit_insn (gen_const_mulsidi3_v8plus (operands[0], operands[1],
+ operands[2]));
+ else
+ emit_insn (gen_const_mulsidi3_sp32 (operands[0], operands[1],
+ operands[2]));
DONE;
}
if (TARGET_V8PLUS)
(if_then_else (eq_attr "isa" "sparclet")
(const_int 1) (const_int 2)))])
+(define_insn "*mulsidi3_sp64"
+ [(set (match_operand:DI 0 "register_operand" "=r")
+ (mult:DI (sign_extend:DI (match_operand:SI 1 "register_operand" "r"))
+ (sign_extend:DI (match_operand:SI 2 "register_operand" "r"))))]
+ "TARGET_DEPRECATED_V8_INSNS && TARGET_ARCH64"
+ "smul\\t%1, %2, %0"
+ [(set_attr "length" "1")])
+
;; Extra pattern, because sign_extend of a constant isn't valid.
;; XXX
-(define_insn "const_mulsidi3"
+(define_insn "const_mulsidi3_sp32"
[(set (match_operand:DI 0 "register_operand" "=r")
(mult:DI (sign_extend:DI (match_operand:SI 1 "register_operand" "r"))
(match_operand:SI 2 "small_int" "I")))]
- "TARGET_HARD_MUL"
+ "TARGET_HARD_MUL32"
"*
{
return TARGET_SPARCLET ? \"smuld\\t%1, %2, %L0\" : \"smul\\t%1, %2, %L0\\n\\trd\\t%%y, %H0\";
(if_then_else (eq_attr "isa" "sparclet")
(const_int 1) (const_int 2)))])
+(define_insn "const_mulsidi3_sp64"
+ [(set (match_operand:DI 0 "register_operand" "=r")
+ (mult:DI (sign_extend:DI (match_operand:SI 1 "register_operand" "r"))
+ (match_operand:SI 2 "small_int" "I")))]
+ "TARGET_DEPRECATED_V8_INSNS && TARGET_ARCH64"
+ "smul\\t%1, %2, %0"
+ [(set_attr "length" "1")])
+
(define_expand "smulsi3_highpart"
[(set (match_operand:SI 0 "register_operand" "")
(truncate:SI
(lshiftrt:DI (mult:DI (sign_extend:DI (match_operand:SI 1 "register_operand" ""))
(sign_extend:DI (match_operand:SI 2 "arith_operand" "")))
(const_int 32))))]
- "TARGET_HARD_MUL"
+ "TARGET_HARD_MUL && TARGET_ARCH32"
"
{
if (CONSTANT_P (operands[2]))
(clobber (match_scratch:SI 4 "=X,&h"))]
"TARGET_V8PLUS"
"@
- smul %1,%2,%0\;srlx %0,%3,%0
- smul %1,%2,%4\;srlx %4,%3,%0"
+ smul\\t%1, %2, %0\;srlx\\t%0, %3, %0
+ smul\\t%1, %2, %4\;srlx\\t%4, %3, %0"
[(set_attr "length" "2")])
;; The combiner changes TRUNCATE in the previous pattern to SUBREG.
(lshiftrt:DI (mult:DI (sign_extend:DI (match_operand:SI 1 "register_operand" "r"))
(sign_extend:DI (match_operand:SI 2 "register_operand" "r")))
(const_int 32))))]
- "TARGET_HARD_MUL32
- && ! TARGET_LIVE_G0"
+ "TARGET_HARD_MUL32"
"smul\\t%1, %2, %%g0\\n\\trd\\t%%y, %0"
[(set_attr "length" "2")])
(lshiftrt:DI (mult:DI (sign_extend:DI (match_operand:SI 1 "register_operand" "r"))
(match_operand:SI 2 "register_operand" "r"))
(const_int 32))))]
- "TARGET_HARD_MUL32
- && ! TARGET_LIVE_G0"
+ "TARGET_HARD_MUL32"
"smul\\t%1, %2, %%g0\\n\\trd\\t%%y, %0"
[(set_attr "length" "2")])
if (CONSTANT_P (operands[2]))
{
if (TARGET_V8PLUS)
- {
- emit_insn (gen_const_umulsidi3_v8plus (operands[0], operands[1],
- operands[2]));
- DONE;
- }
- emit_insn (gen_const_umulsidi3 (operands[0], operands[1], operands[2]));
+ emit_insn (gen_const_umulsidi3_v8plus (operands[0], operands[1],
+ operands[2]));
+ else
+ emit_insn (gen_const_umulsidi3_sp32 (operands[0], operands[1],
+ operands[2]));
DONE;
}
if (TARGET_V8PLUS)
(if_then_else (eq_attr "isa" "sparclet")
(const_int 1) (const_int 2)))])
+(define_insn "*umulsidi3_sp64"
+ [(set (match_operand:DI 0 "register_operand" "=r")
+ (mult:DI (zero_extend:DI (match_operand:SI 1 "register_operand" "r"))
+ (zero_extend:DI (match_operand:SI 2 "register_operand" "r"))))]
+ "TARGET_DEPRECATED_V8_INSNS && TARGET_ARCH64"
+ "umul\\t%1, %2, %0"
+ [(set_attr "length" "1")])
+
;; Extra pattern, because sign_extend of a constant isn't valid.
;; XXX
-(define_insn "const_umulsidi3"
+(define_insn "const_umulsidi3_sp32"
[(set (match_operand:DI 0 "register_operand" "=r")
(mult:DI (zero_extend:DI (match_operand:SI 1 "register_operand" "r"))
(match_operand:SI 2 "uns_small_int" "")))]
(if_then_else (eq_attr "isa" "sparclet")
(const_int 1) (const_int 2)))])
+(define_insn "const_umulsidi3_sp64"
+ [(set (match_operand:DI 0 "register_operand" "=r")
+ (mult:DI (zero_extend:DI (match_operand:SI 1 "register_operand" "r"))
+ (match_operand:SI 2 "uns_small_int" "")))]
+ "TARGET_DEPRECATED_V8_INSNS && TARGET_ARCH64"
+ "umul\\t%1, %2, %0"
+ [(set_attr "length" "1")])
+
;; XXX
(define_insn "const_umulsidi3_v8plus"
[(set (match_operand:DI 0 "register_operand" "=h,r")
(lshiftrt:DI (mult:DI (zero_extend:DI (match_operand:SI 1 "register_operand" ""))
(zero_extend:DI (match_operand:SI 2 "uns_arith_operand" "")))
(const_int 32))))]
- "TARGET_HARD_MUL"
+ "TARGET_HARD_MUL && TARGET_ARCH32"
"
{
if (CONSTANT_P (operands[2]))
(lshiftrt:DI (mult:DI (zero_extend:DI (match_operand:SI 1 "register_operand" "r"))
(zero_extend:DI (match_operand:SI 2 "register_operand" "r")))
(const_int 32))))]
- "TARGET_HARD_MUL32
- && ! TARGET_LIVE_G0"
+ "TARGET_HARD_MUL32"
"umul\\t%1, %2, %%g0\\n\\trd\\t%%y, %0"
[(set_attr "length" "2")])
(lshiftrt:DI (mult:DI (zero_extend:DI (match_operand:SI 1 "register_operand" "r"))
(match_operand:SI 2 "uns_small_int" ""))
(const_int 32))))]
- "TARGET_HARD_MUL32
- && ! TARGET_LIVE_G0"
+ "TARGET_HARD_MUL32"
"umul\\t%1, %2, %%g0\\n\\trd\\t%%y, %0"
[(set_attr "length" "2")])
;; The v8 architecture specifies that there must be 3 instructions between
;; a y register write and a use of it for correct results.
-;; XXX SHEESH
-(define_insn "divsi3"
+(define_expand "divsi3"
+ [(parallel [(set (match_operand:SI 0 "register_operand" "=r,r")
+ (div:SI (match_operand:SI 1 "register_operand" "r,r")
+ (match_operand:SI 2 "input_operand" "rI,m")))
+ (clobber (match_scratch:SI 3 "=&r,&r"))])]
+ "TARGET_V8 || TARGET_DEPRECATED_V8_INSNS"
+ "
+{
+ if (TARGET_ARCH64)
+ {
+ operands[3] = gen_reg_rtx(SImode);
+ emit_insn (gen_ashrsi3 (operands[3], operands[1], GEN_INT (31)));
+ emit_insn (gen_divsi3_sp64 (operands[0], operands[1], operands[2],
+ operands[3]));
+ DONE;
+ }
+}")
+
+(define_insn "divsi3_sp32"
[(set (match_operand:SI 0 "register_operand" "=r,r")
(div:SI (match_operand:SI 1 "register_operand" "r,r")
(match_operand:SI 2 "input_operand" "rI,m")))
(clobber (match_scratch:SI 3 "=&r,&r"))]
- "(TARGET_V8
- || TARGET_DEPRECATED_V8_INSNS)
- && ! TARGET_LIVE_G0"
+ "(TARGET_V8 || TARGET_DEPRECATED_V8_INSNS)
+ && TARGET_ARCH32"
"*
{
if (which_alternative == 0)
- if (TARGET_V9)
- return \"sra\\t%1, 31, %3\\n\\twr\\t%%g0, %3, %%y\\n\\tsdiv\\t%1, %2, %0\";
- else
- return \"sra\\t%1, 31, %3\\n\\twr\\t%%g0, %3, %%y\\n\\tnop\\n\\tnop\\n\\tnop\\n\\tsdiv\\t%1, %2, %0\";
+ if (TARGET_V9)
+ return \"sra\\t%1, 31, %3\\n\\twr\\t%3, 0, %%y\\n\\tsdiv\\t%1, %2, %0\";
+ else
+ return \"sra\\t%1, 31, %3\\n\\twr\\t%3, 0, %%y\\n\\tnop\\n\\tnop\\n\\tnop\\n\\tsdiv\\t%1, %2, %0\";
else
if (TARGET_V9)
- return \"sra\\t%1, 31, %3\\n\\twr\\t%%g0, %3, %%y\\n\\tld\\t%2, %3\\n\\tsdiv\\t%1, %3, %0\";
+ return \"sra\\t%1, 31, %3\\n\\twr\\t%3, 0, %%y\\n\\tld\\t%2, %3\\n\\tsdiv\\t%1, %3, %0\";
else
- return \"sra\\t%1, 31, %3\\n\\twr\\t%%g0, %3, %%y\\n\\tld\\t%2, %3\\n\\tnop\\n\\tnop\\n\\tsdiv\\t%1, %3, %0\";
+ return \"sra\\t%1, 31, %3\\n\\twr\\t%3, 0, %%y\\n\\tld\\t%2, %3\\n\\tnop\\n\\tnop\\n\\tsdiv\\t%1, %3, %0\";
}"
[(set (attr "length")
(if_then_else (eq_attr "isa" "v9")
(const_int 4) (const_int 7)))])
+(define_insn "divsi3_sp64"
+ [(set (match_operand:SI 0 "register_operand" "=r")
+ (div:SI (match_operand:SI 1 "register_operand" "r")
+ (match_operand:SI 2 "input_operand" "rI")))
+ (use (match_operand:SI 3 "register_operand" "r"))]
+ "TARGET_DEPRECATED_V8_INSNS && TARGET_ARCH64"
+ "wr\\t%%g0, %3, %%y\\n\\tsdiv\\t%1, %2, %0"
+ [(set_attr "length" "2")])
+
(define_insn "divdi3"
[(set (match_operand:DI 0 "register_operand" "=r")
(div:DI (match_operand:DI 1 "register_operand" "r")
"TARGET_ARCH64"
"sdivx\\t%1, %2, %0")
-;; It is not known whether this will match.
-
-;; XXX I hope it doesn't fucking match...
(define_insn "*cmp_sdiv_cc_set"
- [(set (match_operand:SI 0 "register_operand" "=r")
- (div:SI (match_operand:SI 1 "register_operand" "r")
- (match_operand:SI 2 "arith_operand" "rI")))
- (set (reg:CC 100)
- (compare:CC (div:SI (match_dup 1) (match_dup 2))
+ [(set (reg:CC 100)
+ (compare:CC (div:SI (match_operand:SI 1 "register_operand" "r")
+ (match_operand:SI 2 "arith_operand" "rI"))
(const_int 0)))
+ (set (match_operand:SI 0 "register_operand" "=r")
+ (div:SI (match_dup 1) (match_dup 2)))
(clobber (match_scratch:SI 3 "=&r"))]
- "(TARGET_V8
- || TARGET_DEPRECATED_V8_INSNS)
- && ! TARGET_LIVE_G0"
+ "TARGET_V8 || TARGET_DEPRECATED_V8_INSNS"
"*
{
if (TARGET_V9)
- return \"sra\\t%1, 31, %3\\n\\twr\\t%%g0, %3, %%y\\n\\tsdivcc\\t%1, %2, %0\";
+ return \"sra\\t%1, 31, %3\\n\\twr\\t%3, 0, %%y\\n\\tsdivcc\\t%1, %2, %0\";
else
- return \"sra\\t%1, 31, %3\\n\\twr\\t%%g0, %3, %%y\\n\\tnop\\n\\tnop\\n\\tnop\\n\\tsdivcc\\t%1, %2, %0\";
+ return \"sra\\t%1, 31, %3\\n\\twr\\t%3, 0, %%y\\n\\tnop\\n\\tnop\\n\\tnop\\n\\tsdivcc\\t%1, %2, %0\";
}"
[(set (attr "length")
(if_then_else (eq_attr "isa" "v9")
(const_int 3) (const_int 6)))])
;; XXX
-(define_insn "udivsi3"
+(define_expand "udivsi3"
+ [(set (match_operand:SI 0 "register_operand" "")
+ (udiv:SI (match_operand:SI 1 "reg_or_nonsymb_mem_operand" "")
+ (match_operand:SI 2 "input_operand" "")))]
+ "TARGET_V8 || TARGET_DEPRECATED_V8_INSNS"
+ "")
+
+(define_insn "udivsi3_sp32"
[(set (match_operand:SI 0 "register_operand" "=r,&r,&r")
(udiv:SI (match_operand:SI 1 "reg_or_nonsymb_mem_operand" "r,r,m")
(match_operand:SI 2 "input_operand" "rI,m,r")))]
"(TARGET_V8
|| TARGET_DEPRECATED_V8_INSNS)
- && ! TARGET_LIVE_G0"
+ && TARGET_ARCH32"
"*
{
output_asm_insn (\"wr\\t%%g0, %%g0, %%y\", operands);
switch (which_alternative)
{
default:
- if (TARGET_V9)
- return \"udiv\\t%1, %2, %0\";
return \"nop\\n\\tnop\\n\\tnop\\n\\tudiv\\t%1, %2, %0\";
case 1:
return \"ld\\t%2, %0\\n\\tnop\\n\\tnop\\n\\tudiv\\t%1, %0, %0\";
return \"ld\\t%1, %0\\n\\tnop\\n\\tnop\\n\\tudiv\\t%0, %2, %0\";
}
}"
- [(set (attr "length")
- (if_then_else (and (eq_attr "isa" "v9")
- (eq_attr "alternative" "0"))
- (const_int 2) (const_int 5)))])
+ [(set_attr "length" "5")])
+
+(define_insn "udivsi3_sp64"
+ [(set (match_operand:SI 0 "register_operand" "=r")
+ (udiv:SI (match_operand:SI 1 "reg_or_nonsymb_mem_operand" "r")
+ (match_operand:SI 2 "input_operand" "rI")))]
+ "TARGET_DEPRECATED_V8_INSNS && TARGET_ARCH64"
+ "wr\\t%%g0, 0, %%y\\n\\tudiv\\t%1, %2, %0"
+ [(set_attr "length" "2")])
(define_insn "udivdi3"
[(set (match_operand:DI 0 "register_operand" "=r")
"TARGET_ARCH64"
"udivx\\t%1, %2, %0")
-;; It is not known whether this will match.
-
-;; XXX I hope it doesn't fucking match...
(define_insn "*cmp_udiv_cc_set"
- [(set (match_operand:SI 0 "register_operand" "=r")
- (udiv:SI (match_operand:SI 1 "register_operand" "r")
- (match_operand:SI 2 "arith_operand" "rI")))
- (set (reg:CC 100)
- (compare:CC (udiv:SI (match_dup 1) (match_dup 2))
- (const_int 0)))]
- "(TARGET_V8
- || TARGET_DEPRECATED_V8_INSNS)
- && ! TARGET_LIVE_G0"
+ [(set (reg:CC 100)
+ (compare:CC (udiv:SI (match_operand:SI 1 "register_operand" "r")
+ (match_operand:SI 2 "arith_operand" "rI"))
+ (const_int 0)))
+ (set (match_operand:SI 0 "register_operand" "=r")
+ (udiv:SI (match_dup 1) (match_dup 2)))]
+ "TARGET_V8
+ || TARGET_DEPRECATED_V8_INSNS"
"*
{
if (TARGET_V9)
operands[5] = gen_lowpart (SImode, operands[0]);
operands[6] = gen_highpart (SImode, operands[2]);
operands[7] = gen_lowpart (SImode, operands[2]);
+#if HOST_BITS_PER_WIDE_INT == 32
if (GET_CODE (operands[3]) == CONST_INT)
{
if (INTVAL (operands[3]) < 0)
operands[8] = const0_rtx;
}
else
+#endif
operands[8] = gen_highpart (SImode, operands[3]);
operands[9] = gen_lowpart (SImode, operands[3]);
}")
[(match_operand:SI 0 "arith_operand" "%r")
(match_operand:SI 1 "arith_operand" "rI")])
(const_int 0)))]
- "! TARGET_LIVE_G0"
+ ""
"%A2cc\\t%0, %1, %%g0"
[(set_attr "type" "compare")
(set_attr "length" "1")])
(not:SI (xor:SI (match_operand:SI 0 "reg_or_0_operand" "%rJ")
(match_operand:SI 1 "arith_operand" "rI")))
(const_int 0)))]
- "! TARGET_LIVE_G0"
+ ""
"xnorcc\\t%r0, %1, %%g0"
[(set_attr "type" "compare")
(set_attr "length" "1")])
[(not:SI (match_operand:SI 0 "arith_operand" "rI"))
(match_operand:SI 1 "reg_or_0_operand" "rJ")])
(const_int 0)))]
- "! TARGET_LIVE_G0"
+ ""
"%B2cc\\t%r1, %0, %%g0"
[(set_attr "type" "compare")
(set_attr "length" "1")])
{
if (! TARGET_ARCH64)
{
- emit_insn (gen_rtx_PARALLEL (VOIDmode, gen_rtvec (2,
- gen_rtx_SET (VOIDmode, operand0,
- gen_rtx_NEG (DImode, operand1)),
- gen_rtx_CLOBBER (VOIDmode,
- gen_rtx_REG (CCmode, SPARC_ICC_REG)))));
+ emit_insn (gen_rtx_PARALLEL
+ (VOIDmode,
+ gen_rtvec (2,
+ gen_rtx_SET (VOIDmode, operand0,
+ gen_rtx_NEG (DImode, operand1)),
+ gen_rtx_CLOBBER (VOIDmode,
+ gen_rtx_REG (CCmode,
+ SPARC_ICC_REG)))));
DONE;
}
}")
[(set (match_operand:DI 0 "register_operand" "=r")
(neg:DI (match_operand:DI 1 "register_operand" "r")))
(clobber (reg:CC 100))]
- "! TARGET_ARCH64
- && ! TARGET_LIVE_G0"
+ "TARGET_ARCH32"
"#"
[(set_attr "type" "unary")
(set_attr "length" "2")])
[(set (match_operand:DI 0 "register_operand" "")
(neg:DI (match_operand:DI 1 "register_operand" "")))
(clobber (reg:CC 100))]
- "! TARGET_ARCH64
- && ! TARGET_LIVE_G0
+ "TARGET_ARCH32
&& reload_completed"
[(parallel [(set (reg:CC_NOOV 100)
(compare:CC_NOOV (minus:SI (const_int 0) (match_dup 5))
[(set_attr "type" "unary")
(set_attr "length" "1")])
-(define_expand "negsi2"
- [(set (match_operand:SI 0 "register_operand" "")
- (neg:SI (match_operand:SI 1 "arith_operand" "")))]
- ""
- "
-{
- if (TARGET_LIVE_G0)
- {
- rtx zero_reg = gen_reg_rtx (SImode);
-
- emit_insn (gen_rtx_SET (VOIDmode, zero_reg, const0_rtx));
- emit_insn (gen_rtx_SET (VOIDmode, operands[0],
- gen_rtx_MINUS (SImode, zero_reg,
- operands[1])));
- DONE;
- }
-}")
-
-(define_insn "*negsi2_not_liveg0"
+(define_insn "negsi2"
[(set (match_operand:SI 0 "register_operand" "=r")
- (neg:SI (match_operand:SI 1 "arith_operand" "rI")))]
- "! TARGET_LIVE_G0"
+ (neg:SI (match_operand:SI 1 "arith_operand" "rI")))]
+ ""
"sub\\t%%g0, %1, %0"
[(set_attr "type" "unary")
(set_attr "length" "1")])
[(set (reg:CC_NOOV 100)
(compare:CC_NOOV (neg:SI (match_operand:SI 0 "arith_operand" "rI"))
(const_int 0)))]
- "! TARGET_LIVE_G0"
+ ""
"subcc\\t%%g0, %0, %%g0"
[(set_attr "type" "compare")
(set_attr "length" "1")])
(const_int 0)))
(set (match_operand:SI 0 "register_operand" "=r")
(neg:SI (match_dup 1)))]
- "! TARGET_LIVE_G0"
+ ""
"subcc\\t%%g0, %1, %0"
[(set_attr "type" "compare")
(set_attr "length" "1")])
[(set_attr "type" "unary,fp")
(set_attr "length" "1")])
-(define_expand "one_cmplsi2"
- [(set (match_operand:SI 0 "register_operand" "")
- (not:SI (match_operand:SI 1 "arith_operand" "")))]
- ""
- "
-{
- if (TARGET_LIVE_G0
- && GET_CODE (operands[1]) == CONST_INT)
- {
- rtx zero_reg = gen_reg_rtx (SImode);
-
- emit_insn (gen_rtx_SET (VOIDmode, zero_reg, const0_rtx));
- emit_insn (gen_rtx_SET (VOIDmode,
- operands[0],
- gen_rtx_NOT (SImode,
- gen_rtx_XOR (SImode,
- zero_reg,
- operands[1]))));
- DONE;
- }
-}")
-
-(define_insn "*one_cmplsi2_not_liveg0"
+(define_insn "one_cmplsi2"
[(set (match_operand:SI 0 "register_operand" "=r,d")
(not:SI (match_operand:SI 1 "arith_operand" "rI,d")))]
- "! TARGET_LIVE_G0"
+ ""
"@
xnor\\t%%g0, %1, %0
fnot1s\\t%1, %0"
[(set_attr "type" "unary,fp")
(set_attr "length" "1,1")])
-(define_insn "*one_cmplsi2_liveg0"
- [(set (match_operand:SI 0 "register_operand" "=r,d")
- (not:SI (match_operand:SI 1 "arith_operand" "r,d")))]
- "TARGET_LIVE_G0"
- "@
- xnor\\t%1, 0, %0
- fnot1s\\t%1, %0"
- [(set_attr "type" "unary,fp")
- (set_attr "length" "1,1")])
-
(define_insn "*cmp_cc_not"
[(set (reg:CC 100)
(compare:CC (not:SI (match_operand:SI 0 "arith_operand" "rI"))
(const_int 0)))]
- "! TARGET_LIVE_G0"
+ ""
"xnorcc\\t%%g0, %0, %%g0"
[(set_attr "type" "compare")
(set_attr "length" "1")])
(const_int 0)))
(set (match_operand:SI 0 "register_operand" "=r")
(not:SI (match_dup 1)))]
- "! TARGET_LIVE_G0"
+ ""
"xnorcc\\t%%g0, %1, %0"
[(set_attr "type" "compare")
(set_attr "length" "1")])
\f
;; Floating point arithmetic instructions.
-(define_insn "addtf3"
+(define_expand "addtf3"
+ [(set (match_operand:TF 0 "nonimmediate_operand" "")
+ (plus:TF (match_operand:TF 1 "general_operand" "")
+ (match_operand:TF 2 "general_operand" "")))]
+ "TARGET_FPU && (TARGET_HARD_QUAD || TARGET_ARCH64)"
+ "
+{
+ if (! TARGET_HARD_QUAD)
+ {
+ rtx slot0, slot1, slot2;
+
+ if (GET_CODE (operands[0]) != MEM)
+ slot0 = assign_stack_temp (TFmode, GET_MODE_SIZE(TFmode), 0);
+ else
+ slot0 = operands[0];
+ if (GET_CODE (operands[1]) != MEM)
+ {
+ slot1 = assign_stack_temp (TFmode, GET_MODE_SIZE(TFmode), 0);
+ emit_insn (gen_rtx_SET (VOIDmode, slot1, operands[1]));
+ }
+ else
+ slot1 = operands[1];
+ if (GET_CODE (operands[2]) != MEM)
+ {
+ slot2 = assign_stack_temp (TFmode, GET_MODE_SIZE(TFmode), 0);
+ emit_insn (gen_rtx_SET (VOIDmode, slot2, operands[2]));
+ }
+ else
+ slot2 = operands[2];
+
+ emit_library_call (gen_rtx (SYMBOL_REF, Pmode, \"_Qp_add\"), 0,
+ VOIDmode, 3,
+ XEXP (slot0, 0), Pmode,
+ XEXP (slot1, 0), Pmode,
+ XEXP (slot2, 0), Pmode);
+
+ if (GET_CODE (operands[0]) != MEM)
+ emit_insn (gen_rtx_SET (VOIDmode, operands[0], slot0));
+ DONE;
+ }
+}")
+
+(define_insn "*addtf3_hq"
[(set (match_operand:TF 0 "register_operand" "=e")
(plus:TF (match_operand:TF 1 "register_operand" "e")
(match_operand:TF 2 "register_operand" "e")))]
[(set_attr "type" "fp")
(set_attr "length" "1")])
-(define_insn "subtf3"
+(define_expand "subtf3"
+ [(set (match_operand:TF 0 "nonimmediate_operand" "")
+ (minus:TF (match_operand:TF 1 "general_operand" "")
+ (match_operand:TF 2 "general_operand" "")))]
+ "TARGET_FPU && (TARGET_HARD_QUAD || TARGET_ARCH64)"
+ "
+{
+ if (! TARGET_HARD_QUAD)
+ {
+ rtx slot0, slot1, slot2;
+
+ if (GET_CODE (operands[0]) != MEM)
+ slot0 = assign_stack_temp (TFmode, GET_MODE_SIZE(TFmode), 0);
+ else
+ slot0 = operands[0];
+ if (GET_CODE (operands[1]) != MEM)
+ {
+ slot1 = assign_stack_temp (TFmode, GET_MODE_SIZE(TFmode), 0);
+ emit_insn (gen_rtx_SET (VOIDmode, slot1, operands[1]));
+ }
+ else
+ slot1 = operands[1];
+ if (GET_CODE (operands[2]) != MEM)
+ {
+ slot2 = assign_stack_temp (TFmode, GET_MODE_SIZE(TFmode), 0);
+ emit_insn (gen_rtx_SET (VOIDmode, slot2, operands[2]));
+ }
+ else
+ slot2 = operands[2];
+
+ emit_library_call (gen_rtx (SYMBOL_REF, Pmode, \"_Qp_sub\"), 0,
+ VOIDmode, 3,
+ XEXP (slot0, 0), Pmode,
+ XEXP (slot1, 0), Pmode,
+ XEXP (slot2, 0), Pmode);
+
+ if (GET_CODE (operands[0]) != MEM)
+ emit_insn (gen_rtx_SET (VOIDmode, operands[0], slot0));
+ DONE;
+ }
+}")
+
+(define_insn "*subtf3_hq"
[(set (match_operand:TF 0 "register_operand" "=e")
(minus:TF (match_operand:TF 1 "register_operand" "e")
(match_operand:TF 2 "register_operand" "e")))]
[(set_attr "type" "fp")
(set_attr "length" "1")])
-(define_insn "subsf3"
- [(set (match_operand:SF 0 "register_operand" "=f")
- (minus:SF (match_operand:SF 1 "register_operand" "f")
- (match_operand:SF 2 "register_operand" "f")))]
- "TARGET_FPU"
- "fsubs\\t%1, %2, %0"
- [(set_attr "type" "fp")
- (set_attr "length" "1")])
+(define_insn "subsf3"
+ [(set (match_operand:SF 0 "register_operand" "=f")
+ (minus:SF (match_operand:SF 1 "register_operand" "f")
+ (match_operand:SF 2 "register_operand" "f")))]
+ "TARGET_FPU"
+ "fsubs\\t%1, %2, %0"
+ [(set_attr "type" "fp")
+ (set_attr "length" "1")])
+
+(define_expand "multf3"
+ [(set (match_operand:TF 0 "nonimmediate_operand" "")
+ (mult:TF (match_operand:TF 1 "general_operand" "")
+ (match_operand:TF 2 "general_operand" "")))]
+ "TARGET_FPU && (TARGET_HARD_QUAD || TARGET_ARCH64)"
+ "
+{
+ if (! TARGET_HARD_QUAD)
+ {
+ rtx slot0, slot1, slot2;
+
+ if (GET_CODE (operands[0]) != MEM)
+ slot0 = assign_stack_temp (TFmode, GET_MODE_SIZE(TFmode), 0);
+ else
+ slot0 = operands[0];
+ if (GET_CODE (operands[1]) != MEM)
+ {
+ slot1 = assign_stack_temp (TFmode, GET_MODE_SIZE(TFmode), 0);
+ emit_insn (gen_rtx_SET (VOIDmode, slot1, operands[1]));
+ }
+ else
+ slot1 = operands[1];
+ if (GET_CODE (operands[2]) != MEM)
+ {
+ slot2 = assign_stack_temp (TFmode, GET_MODE_SIZE(TFmode), 0);
+ emit_insn (gen_rtx_SET (VOIDmode, slot2, operands[2]));
+ }
+ else
+ slot2 = operands[2];
+
+ emit_library_call (gen_rtx (SYMBOL_REF, Pmode, \"_Qp_mul\"), 0,
+ VOIDmode, 3,
+ XEXP (slot0, 0), Pmode,
+ XEXP (slot1, 0), Pmode,
+ XEXP (slot2, 0), Pmode);
+
+ if (GET_CODE (operands[0]) != MEM)
+ emit_insn (gen_rtx_SET (VOIDmode, operands[0], slot0));
+ DONE;
+ }
+}")
-(define_insn "multf3"
+(define_insn "*multf3_hq"
[(set (match_operand:TF 0 "register_operand" "=e")
(mult:TF (match_operand:TF 1 "register_operand" "e")
(match_operand:TF 2 "register_operand" "e")))]
[(set_attr "type" "fpmul")
(set_attr "length" "1")])
+(define_expand "divtf3"
+ [(set (match_operand:TF 0 "nonimmediate_operand" "")
+ (div:TF (match_operand:TF 1 "general_operand" "")
+ (match_operand:TF 2 "general_operand" "")))]
+ "TARGET_FPU && (TARGET_HARD_QUAD || TARGET_ARCH64)"
+ "
+{
+ if (! TARGET_HARD_QUAD)
+ {
+ rtx slot0, slot1, slot2;
+
+ if (GET_CODE (operands[0]) != MEM)
+ slot0 = assign_stack_temp (TFmode, GET_MODE_SIZE(TFmode), 0);
+ else
+ slot0 = operands[0];
+ if (GET_CODE (operands[1]) != MEM)
+ {
+ slot1 = assign_stack_temp (TFmode, GET_MODE_SIZE(TFmode), 0);
+ emit_insn (gen_rtx_SET (VOIDmode, slot1, operands[1]));
+ }
+ else
+ slot1 = operands[1];
+ if (GET_CODE (operands[2]) != MEM)
+ {
+ slot2 = assign_stack_temp (TFmode, GET_MODE_SIZE(TFmode), 0);
+ emit_insn (gen_rtx_SET (VOIDmode, slot2, operands[2]));
+ }
+ else
+ slot2 = operands[2];
+
+ emit_library_call (gen_rtx (SYMBOL_REF, Pmode, \"_Qp_div\"), 0,
+ VOIDmode, 3,
+ XEXP (slot0, 0), Pmode,
+ XEXP (slot1, 0), Pmode,
+ XEXP (slot2, 0), Pmode);
+
+ if (GET_CODE (operands[0]) != MEM)
+ emit_insn (gen_rtx_SET (VOIDmode, operands[0], slot0));
+ DONE;
+ }
+}")
+
;; don't have timing for quad-prec. divide.
-(define_insn "divtf3"
+(define_insn "*divtf3_hq"
[(set (match_operand:TF 0 "register_operand" "=e")
(div:TF (match_operand:TF 1 "register_operand" "e")
(match_operand:TF 2 "register_operand" "e")))]
[(set_attr "type" "fpmove")
(set_attr "length" "1")])
-(define_insn "abstf2"
+(define_expand "abstf2"
[(set (match_operand:TF 0 "register_operand" "")
(abs:TF (match_operand:TF 1 "register_operand" "")))]
"TARGET_FPU"
operands[3] = gen_rtx_raw_REG (SFmode, REGNO (operands[1]));
operands[4] = gen_rtx_raw_REG (SFmode, REGNO (operands[0]) + 1);
operands[5] = gen_rtx_raw_REG (SFmode, REGNO (operands[1]) + 1);
- operands[6] = gen_rtx_raw_REG (SFmode, REGNO (operands[0]) + 2);
- operands[7] = gen_rtx_raw_REG (SFmode, REGNO (operands[1]) + 2);")
+ operands[6] = gen_rtx_raw_REG (DFmode, REGNO (operands[0]) + 2);
+ operands[7] = gen_rtx_raw_REG (DFmode, REGNO (operands[1]) + 2);")
+
+(define_insn "*abstf2_hq_v9"
+ [(set (match_operand:TF 0 "register_operand" "=e,e")
+ (abs:TF (match_operand:TF 1 "register_operand" "0,e")))]
+ "TARGET_FPU && TARGET_V9 && TARGET_HARD_QUAD"
+ "@
+ fabsd\\t%0, %0
+ fabsq\\t%1, %0"
+ [(set_attr "type" "fpmove")
+ (set_attr "length" "1")])
(define_insn "*abstf2_v9"
[(set (match_operand:TF 0 "register_operand" "=e,e")
(abs:TF (match_operand:TF 1 "register_operand" "0,e")))]
- ; We don't use quad float insns here so we don't need TARGET_HARD_QUAD.
- "TARGET_FPU && TARGET_V9"
+ "TARGET_FPU && TARGET_V9 && !TARGET_HARD_QUAD"
"@
fabsd\\t%0, %0
#"
[(set (match_operand:DF 0 "register_operand" "=e")
(abs:DF (match_operand:DF 1 "register_operand" "e")))]
"TARGET_FPU && TARGET_V9"
- "fabsd\\t%0, %0"
+ "fabsd\\t%1, %0"
[(set_attr "type" "fpmove")
(set_attr "length" "1")])
[(set_attr "type" "fpmove")
(set_attr "length" "1")])
-(define_insn "sqrttf2"
+(define_expand "sqrttf2"
+ [(set (match_operand:TF 0 "register_operand" "=e")
+ (sqrt:TF (match_operand:TF 1 "register_operand" "e")))]
+ "TARGET_FPU && (TARGET_HARD_QUAD || TARGET_ARCH64)"
+ "
+{
+ if (! TARGET_HARD_QUAD)
+ {
+ rtx slot0, slot1;
+
+ if (GET_CODE (operands[0]) != MEM)
+ slot0 = assign_stack_temp (TFmode, GET_MODE_SIZE(TFmode), 0);
+ else
+ slot0 = operands[0];
+ if (GET_CODE (operands[1]) != MEM)
+ {
+ slot1 = assign_stack_temp (TFmode, GET_MODE_SIZE(TFmode), 0);
+ emit_insn (gen_rtx_SET (VOIDmode, slot1, operands[1]));
+ }
+ else
+ slot1 = operands[1];
+
+ emit_library_call (gen_rtx (SYMBOL_REF, Pmode, \"_Qp_sqrt\"), 0,
+ VOIDmode, 2,
+ XEXP (slot0, 0), Pmode,
+ XEXP (slot1, 0), Pmode);
+
+ if (GET_CODE (operands[0]) != MEM)
+ emit_insn (gen_rtx_SET (VOIDmode, operands[0], slot0));
+ DONE;
+ }
+}")
+
+(define_insn "*sqrttf2_hq"
[(set (match_operand:TF 0 "register_operand" "=e")
(sqrt:TF (match_operand:TF 1 "register_operand" "e")))]
"TARGET_FPU && TARGET_HARD_QUAD"
"fsqrtq\\t%1, %0"
- [(set_attr "type" "fpsqrt")
+ [(set_attr "type" "fpsqrtd")
(set_attr "length" "1")])
(define_insn "sqrtdf2"
(sqrt:DF (match_operand:DF 1 "register_operand" "e")))]
"TARGET_FPU"
"fsqrtd\\t%1, %0"
- [(set_attr "type" "fpsqrt")
+ [(set_attr "type" "fpsqrtd")
(set_attr "length" "1")])
(define_insn "sqrtsf2"
(sqrt:SF (match_operand:SF 1 "register_operand" "f")))]
"TARGET_FPU"
"fsqrts\\t%1, %0"
- [(set_attr "type" "fpsqrt")
+ [(set_attr "type" "fpsqrts")
(set_attr "length" "1")])
\f
;;- arithmetic shift instructions
[(set_attr "type" "shift")
(set_attr "length" "1")])
+;; We special case multiplication by two, as add can be done
+;; in both ALUs, while shift only in IEU0 on UltraSPARC.
+(define_insn "*ashlsi3_const1"
+ [(set (match_operand:SI 0 "register_operand" "=r")
+ (ashift:SI (match_operand:SI 1 "register_operand" "r")
+ (const_int 1)))]
+ ""
+ "add\\t%1, %1, %0"
+ [(set_attr "type" "binary")
+ (set_attr "length" "1")])
+
(define_expand "ashldi3"
[(set (match_operand:DI 0 "register_operand" "=r")
(ashift:DI (match_operand:DI 1 "register_operand" "r")
}
}")
-(define_insn ""
+;; We special case multiplication by two, as add can be done
+;; in both ALUs, while shift only in IEU0 on UltraSPARC.
+(define_insn "*ashldi3_const1"
+ [(set (match_operand:DI 0 "register_operand" "=r")
+ (ashift:DI (match_operand:DI 1 "register_operand" "r")
+ (const_int 1)))]
+ "TARGET_ARCH64"
+ "add\\t%1, %1, %0"
+ [(set_attr "type" "binary")
+ (set_attr "length" "1")])
+
+(define_insn "*ashldi3_sp64"
[(set (match_operand:DI 0 "register_operand" "=r")
(ashift:DI (match_operand:DI 1 "register_operand" "r")
(match_operand:SI 2 "arith_operand" "rI")))]
"*
{
if (GET_CODE (operands[2]) == REG && REGNO (operands[2]) == REGNO (operands[0]))
- return \"mov 1,%L0\;sllx %L0,%2,%L0\;sub %L0,1,%L0\;srlx %L0,32,%H0\";
- return \"mov 1,%H0\;sllx %H0,%2,%L0\;sub %L0,1,%L0\;srlx %L0,32,%H0\";
+ return \"mov\\t1, %L0\;sllx\\t%L0, %2, %L0\;sub\\t%L0, 1, %L0\;srlx\\t%L0, 32, %H0\";
+ return \"mov\\t1, %H0\;sllx\\t%H0, %2, %L0\;sub\\t%L0, 1, %L0\;srlx\\t%L0, 32, %H0\";
}"
[(set_attr "length" "4")])
(compare:CC_NOOV (ashift:SI (match_operand:SI 0 "register_operand" "r")
(const_int 1))
(const_int 0)))]
- "! TARGET_LIVE_G0"
+ ""
"addcc\\t%0, %0, %%g0"
[(set_attr "type" "compare")
(set_attr "length" "1")])
[(set_attr "type" "shift")
(set_attr "length" "1")])
+(define_insn "*ashrsi3_extend"
+ [(set (match_operand:DI 0 "register_operand" "=r")
+ (sign_extend:DI (ashiftrt:SI (match_operand:SI 1 "register_operand" "r")
+ (match_operand:SI 2 "arith_operand" "r"))))]
+ "TARGET_ARCH64"
+ "sra\\t%1, %2, %0"
+ [(set_attr "type" "shift")
+ (set_attr "length" "1")])
+
+;; This handles the case as above, but with constant shift instead of
+;; register. Combiner "simplifies" it for us a little bit though.
+(define_insn "*ashrsi3_extend2"
+ [(set (match_operand:DI 0 "register_operand" "=r")
+ (ashiftrt:DI (ashift:DI (subreg:DI (match_operand:SI 1 "register_operand" "r") 0)
+ (const_int 32))
+ (match_operand:SI 2 "small_int_or_double" "n")))]
+ "TARGET_ARCH64
+ && ((GET_CODE (operands[2]) == CONST_INT
+ && INTVAL (operands[2]) >= 32 && INTVAL (operands[2]) < 64)
+ || (GET_CODE (operands[2]) == CONST_DOUBLE
+ && !CONST_DOUBLE_HIGH (operands[2])
+ && CONST_DOUBLE_LOW (operands[2]) >= 32
+ && CONST_DOUBLE_LOW (operands[2]) < 64))"
+ "*
+{
+ operands[2] = GEN_INT (INTVAL (operands[2]) - 32);
+
+ return \"sra\\t%1, %2, %0\";
+}"
+ [(set_attr "type" "shift")
+ (set_attr "length" "1")])
+
(define_expand "ashrdi3"
[(set (match_operand:DI 0 "register_operand" "=r")
(ashiftrt:DI (match_operand:DI 1 "register_operand" "r")
[(set_attr "type" "shift")
(set_attr "length" "1")])
+;; This handles the case where
+;; (zero_extend:DI (lshiftrt:SI (match_operand:SI) (match_operand:SI))),
+;; but combiner "simplifies" it for us.
+(define_insn "*lshrsi3_extend"
+ [(set (match_operand:DI 0 "register_operand" "=r")
+ (and:DI (subreg:DI (lshiftrt:SI (match_operand:SI 1 "register_operand" "r")
+ (match_operand:SI 2 "arith_operand" "r")) 0)
+ (match_operand 3 "" "")))]
+ "TARGET_ARCH64
+ && ((GET_CODE (operands[3]) == CONST_DOUBLE
+ && CONST_DOUBLE_HIGH (operands[3]) == 0
+ && CONST_DOUBLE_LOW (operands[3]) == 0xffffffff)
+#if HOST_BITS_PER_WIDE_INT >= 64
+ || (GET_CODE (operands[3]) == CONST_INT
+ && (unsigned HOST_WIDE_INT) INTVAL (operands[3]) == 0xffffffff)
+#endif
+ )"
+ "srl\\t%1, %2, %0"
+ [(set_attr "type" "shift")
+ (set_attr "length" "1")])
+
+;; This handles the case where
+;; (lshiftrt:DI (zero_extend:DI (match_operand:SI)) (const_int >=0 < 32))
+;; but combiner "simplifies" it for us.
+(define_insn "*lshrsi3_extend2"
+ [(set (match_operand:DI 0 "register_operand" "=r")
+ (zero_extract:DI (subreg:DI (match_operand:SI 1 "register_operand" "r") 0)
+ (match_operand 2 "small_int_or_double" "n")
+ (const_int 32)))]
+ "TARGET_ARCH64
+ && ((GET_CODE (operands[2]) == CONST_INT
+ && (unsigned HOST_WIDE_INT) INTVAL (operands[2]) < 32)
+ || (GET_CODE (operands[2]) == CONST_DOUBLE
+ && CONST_DOUBLE_HIGH (operands[2]) == 0
+ && (unsigned HOST_WIDE_INT) CONST_DOUBLE_LOW (operands[2]) < 32))"
+ "*
+{
+ operands[2] = GEN_INT (32 - INTVAL (operands[2]));
+
+ return \"srl\\t%1, %2, %0\";
+}"
+ [(set_attr "type" "shift")
+ (set_attr "length" "1")])
+
(define_expand "lshrdi3"
[(set (match_operand:DI 0 "register_operand" "=r")
(lshiftrt:DI (match_operand:DI 1 "register_operand" "r")
"TARGET_V8PLUS"
"*return sparc_v8plus_shift (operands, insn, \"srlx\");"
[(set_attr "length" "5,5,6")])
+
+(define_insn ""
+ [(set (match_operand:SI 0 "register_operand" "=r")
+ (ashiftrt:SI (subreg:SI (lshiftrt:DI (match_operand:DI 1 "register_operand" "r")
+ (const_int 32)) 0)
+ (match_operand:SI 2 "small_int_or_double" "n")))]
+ "TARGET_ARCH64
+ && ((GET_CODE (operands[2]) == CONST_INT
+ && (unsigned HOST_WIDE_INT) INTVAL (operands[2]) < 32)
+ || (GET_CODE (operands[2]) == CONST_DOUBLE
+ && !CONST_DOUBLE_HIGH (operands[2])
+ && (unsigned HOST_WIDE_INT) CONST_DOUBLE_LOW (operands[2]) < 32))"
+ "*
+{
+ operands[2] = GEN_INT (INTVAL (operands[2]) + 32);
+
+ return \"srax\\t%1, %2, %0\";
+}"
+ [(set_attr "type" "shift")
+ (set_attr "length" "1")])
+
+(define_insn ""
+ [(set (match_operand:SI 0 "register_operand" "=r")
+ (lshiftrt:SI (subreg:SI (ashiftrt:DI (match_operand:DI 1 "register_operand" "r")
+ (const_int 32)) 0)
+ (match_operand:SI 2 "small_int_or_double" "n")))]
+ "TARGET_ARCH64
+ && ((GET_CODE (operands[2]) == CONST_INT
+ && (unsigned HOST_WIDE_INT) INTVAL (operands[2]) < 32)
+ || (GET_CODE (operands[2]) == CONST_DOUBLE
+ && !CONST_DOUBLE_HIGH (operands[2])
+ && (unsigned HOST_WIDE_INT) CONST_DOUBLE_LOW (operands[2]) < 32))"
+ "*
+{
+ operands[2] = GEN_INT (INTVAL (operands[2]) + 32);
+
+ return \"srlx\\t%1, %2, %0\";
+}"
+ [(set_attr "type" "shift")
+ (set_attr "length" "1")])
+
+(define_insn ""
+ [(set (match_operand:SI 0 "register_operand" "=r")
+ (ashiftrt:SI (subreg:SI (ashiftrt:DI (match_operand:DI 1 "register_operand" "r")
+ (match_operand:SI 2 "small_int_or_double" "n")) 0)
+ (match_operand:SI 3 "small_int_or_double" "n")))]
+ "TARGET_ARCH64
+ && GET_CODE (operands[2]) == CONST_INT && GET_CODE (operands[3]) == CONST_INT
+ && (unsigned HOST_WIDE_INT) INTVAL (operands[2]) >= 32
+ && (unsigned HOST_WIDE_INT) INTVAL (operands[3]) < 32
+ && (unsigned HOST_WIDE_INT) (INTVAL (operands[2]) + INTVAL (operands[3])) < 64"
+ "*
+{
+ operands[2] = GEN_INT (INTVAL (operands[2]) + INTVAL (operands[3]));
+
+ return \"srax\\t%1, %2, %0\";
+}"
+ [(set_attr "type" "shift")
+ (set_attr "length" "1")])
+
+(define_insn ""
+ [(set (match_operand:SI 0 "register_operand" "=r")
+ (lshiftrt:SI (subreg:SI (lshiftrt:DI (match_operand:DI 1 "register_operand" "r")
+ (match_operand:SI 2 "small_int_or_double" "n")) 0)
+ (match_operand:SI 3 "small_int_or_double" "n")))]
+ "TARGET_ARCH64
+ && GET_CODE (operands[2]) == CONST_INT && GET_CODE (operands[3]) == CONST_INT
+ && (unsigned HOST_WIDE_INT) INTVAL (operands[2]) >= 32
+ && (unsigned HOST_WIDE_INT) INTVAL (operands[3]) < 32
+ && (unsigned HOST_WIDE_INT) (INTVAL (operands[2]) + INTVAL (operands[3])) < 64"
+ "*
+{
+ operands[2] = GEN_INT (INTVAL (operands[2]) + INTVAL (operands[3]));
+
+ return \"srlx\\t%1, %2, %0\";
+}"
+ [(set_attr "type" "shift")
+ (set_attr "length" "1")])
\f
;; Unconditional and other jump instructions
;; On the Sparc, by setting the annul bit on an unconditional branch, the
instead. */
if (! TARGET_V9 && flag_delayed_branch
- && (insn_addresses[INSN_UID (operands[0])]
- == insn_addresses[INSN_UID (insn)]))
+ && (INSN_ADDRESSES (INSN_UID (operands[0]))
+ == INSN_ADDRESSES (INSN_UID (insn))))
return \"b\\t%l0%#\";
else
return TARGET_V9 ? \"ba,pt%*\\t%%xcc, %l0%(\" : \"b%*\\t%l0%(\";
""
"
{
- if (GET_MODE (operands[0]) != Pmode)
+ if (GET_MODE (operands[0]) != CASE_VECTOR_MODE)
abort ();
/* In pic mode, our address differences are against the base of the
the two address loads. */
if (flag_pic)
{
- rtx tmp;
+ rtx tmp, tmp2;
tmp = gen_rtx_LABEL_REF (Pmode, operands[1]);
- tmp = gen_rtx_PLUS (Pmode, operands[0], tmp);
+ tmp2 = operands[0];
+ if (CASE_VECTOR_MODE != Pmode)
+ tmp2 = gen_rtx_SIGN_EXTEND (Pmode, tmp2);
+ tmp = gen_rtx_PLUS (Pmode, tmp2, tmp);
operands[0] = memory_address (Pmode, tmp);
}
}")
if (! TARGET_ARCH64 && INTVAL (operands[3]) != 0)
emit_jump_insn
- (gen_rtx_PARALLEL (VOIDmode,
- gen_rtvec (3,
- gen_rtx_SET (VOIDmode, pc_rtx,
- XEXP (operands[0], 0)),
- GEN_INT (INTVAL (operands[3]) & 0xfff),
- gen_rtx_CLOBBER (VOIDmode,
- gen_rtx_REG (Pmode, 15)))));
+ (gen_rtx_PARALLEL
+ (VOIDmode,
+ gen_rtvec (3,
+ gen_rtx_SET (VOIDmode, pc_rtx, XEXP (operands[0], 0)),
+ operands[3],
+ gen_rtx_CLOBBER (VOIDmode, gen_rtx_REG (Pmode, 15)))));
else
emit_jump_insn
- (gen_rtx_PARALLEL (VOIDmode,
- gen_rtvec (2,
- gen_rtx_SET (VOIDmode, pc_rtx,
- XEXP (operands[0], 0)),
- gen_rtx_CLOBBER (VOIDmode,
- gen_rtx_REG (Pmode, 15)))));
+ (gen_rtx_PARALLEL
+ (VOIDmode,
+ gen_rtvec (2,
+ gen_rtx_SET (VOIDmode, pc_rtx, XEXP (operands[0], 0)),
+ gen_rtx_CLOBBER (VOIDmode, gen_rtx_REG (Pmode, 15)))));
goto finish_call;
}
if (! TARGET_ARCH64 && INTVAL (operands[3]) != 0)
emit_call_insn
- (gen_rtx_PARALLEL (VOIDmode,
- gen_rtvec (3, gen_rtx_CALL (VOIDmode, fn_rtx, nregs_rtx),
- GEN_INT (INTVAL (operands[3]) & 0xfff),
- gen_rtx_CLOBBER (VOIDmode,
- gen_rtx_REG (Pmode, 15)))));
+ (gen_rtx_PARALLEL
+ (VOIDmode,
+ gen_rtvec (3, gen_rtx_CALL (VOIDmode, fn_rtx, nregs_rtx),
+ operands[3],
+ gen_rtx_CLOBBER (VOIDmode, gen_rtx_REG (Pmode, 15)))));
else
emit_call_insn
- (gen_rtx_PARALLEL (VOIDmode,
- gen_rtvec (2, gen_rtx_CALL (VOIDmode, fn_rtx, nregs_rtx),
- gen_rtx_CLOBBER (VOIDmode,
- gen_rtx_REG (Pmode, 15)))));
+ (gen_rtx_PARALLEL
+ (VOIDmode,
+ gen_rtvec (2, gen_rtx_CALL (VOIDmode, fn_rtx, nregs_rtx),
+ gen_rtx_CLOBBER (VOIDmode, gen_rtx_REG (Pmode, 15)))));
finish_call:
#if 0
vec = gen_rtvec (2,
gen_rtx_SET (VOIDmode, operands[0],
- gen_rtx_CALL (VOIDmode, fn_rtx, nregs_rtx)),
+ gen_rtx_CALL (VOIDmode, fn_rtx, nregs_rtx)),
gen_rtx_CLOBBER (VOIDmode, gen_rtx_REG (Pmode, 15)));
emit_call_insn (gen_rtx_PARALLEL (VOIDmode, vec));
/* Pass constm1 to indicate that it may expect a structure value, but
we don't know what size it is. */
- emit_call_insn (gen_call (operands[0], const0_rtx, NULL, constm1_rtx));
+ emit_call_insn (GEN_CALL (operands[0], const0_rtx, NULL, constm1_rtx));
for (i = 0; i < XVECLEN (operands[2], 0); i++)
{
DONE;
}")
+;;- tail calls
+(define_expand "sibcall"
+ [(parallel [(call (match_operand 0 "call_operand" "") (const_int 0))
+ (return)])]
+ ""
+ "")
+
+(define_insn "*sibcall_symbolic_sp32"
+ [(call (mem:SI (match_operand:SI 0 "symbolic_operand" "s"))
+ (match_operand 1 "" ""))
+ (return)]
+ "! TARGET_PTR64"
+ "* return output_sibcall(insn, operands[0]);"
+ [(set_attr "type" "sibcall")])
+
+(define_insn "*sibcall_symbolic_sp64"
+ [(call (mem:SI (match_operand:DI 0 "symbolic_operand" "s"))
+ (match_operand 1 "" ""))
+ (return)]
+ "TARGET_PTR64"
+ "* return output_sibcall(insn, operands[0]);"
+ [(set_attr "type" "sibcall")])
+
+(define_expand "sibcall_value"
+ [(parallel [(set (match_operand 0 "register_operand" "=rf")
+ (call (match_operand:SI 1 "" "") (const_int 0)))
+ (return)])]
+ ""
+ "")
+
+(define_insn "*sibcall_value_symbolic_sp32"
+ [(set (match_operand 0 "" "=rf")
+ (call (mem:SI (match_operand:SI 1 "symbolic_operand" "s"))
+ (match_operand 2 "" "")))
+ (return)]
+ "! TARGET_PTR64"
+ "* return output_sibcall(insn, operands[1]);"
+ [(set_attr "type" "sibcall")])
+
+(define_insn "*sibcall_value_symbolic_sp64"
+ [(set (match_operand 0 "" "")
+ (call (mem:SI (match_operand:DI 1 "symbolic_operand" "s"))
+ (match_operand 2 "" "")))
+ (return)]
+ "TARGET_PTR64"
+ "* return output_sibcall(insn, operands[1]);"
+ [(set_attr "type" "sibcall")])
+
+(define_expand "sibcall_epilogue"
+ [(const_int 0)]
+ ""
+ "DONE;")
+
;; UNSPEC_VOLATILE is considered to use and clobber all hard registers and
;; all of memory. This blocks insns from being moved across this point.
(define_insn "blockage"
[(unspec_volatile [(const_int 0)] 0)]
""
- "")
+ ""
+ [(set_attr "length" "0")])
;; Prepare to return any type including a structure value.
if (! TARGET_ARCH64)
{
- rtx rtnreg = gen_rtx_REG (SImode, (leaf_function ? 15 : 31));
+ rtx rtnreg = gen_rtx_REG (SImode, (current_function_uses_only_leaf_regs
+ ? 15 : 31));
rtx value = gen_reg_rtx (SImode);
/* Fetch the instruction where we will return to and see if it's an unimp
[(unspec:SI [(match_operand:SI 0 "register_operand" "r")
(match_operand:SI 1 "register_operand" "r")] 1)]
"! TARGET_ARCH64"
- "cmp %1,0\;be,a .+8\;add %0,4,%0"
+ "cmp\\t%1, 0\;be,a\\t.+8\;add\\t%0, 4, %0"
[(set_attr "type" "multi")])
\f
(define_insn "return"
""
"
{
+#if 0
rtx chain = operands[0];
+#endif
rtx fp = operands[1];
rtx stack = operands[2];
rtx lab = operands[3];
really needed. */
/*emit_insn (gen_rtx_USE (VOIDmode, frame_pointer_rtx));*/
emit_insn (gen_rtx_USE (VOIDmode, stack_pointer_rtx));
+
+#if 0
/* Return, restoring reg window and jumping to goto handler. */
if (TARGET_V9 && GET_CODE (chain) == CONST_INT
&& ! (INTVAL (chain) & ~(HOST_WIDE_INT)0xffffffff))
}
/* Put in the static chain register the nonlocal label address. */
emit_move_insn (static_chain_rtx, chain);
+#endif
+
emit_insn (gen_rtx_USE (VOIDmode, static_chain_rtx));
emit_insn (gen_goto_handler_and_restore (labreg));
emit_barrier ();
(set_attr "length" "1")])
(define_insn "goto_handler_and_restore"
- [(unspec_volatile [(match_operand:SI 0 "register_operand" "=r")] 2)]
- ""
+ [(unspec_volatile [(match_operand 0 "register_operand" "=r")] 2)]
+ "GET_MODE (operands[0]) == Pmode"
"jmp\\t%0+0\\n\\trestore"
[(set_attr "type" "misc")
(set_attr "length" "2")])
-(define_insn "goto_handler_and_restore_v9"
- [(unspec_volatile [(match_operand:SI 0 "register_operand" "=r,r")
- (match_operand:SI 1 "register_operand" "=r,r")
- (match_operand:SI 2 "const_int_operand" "I,n")] 3)]
- "TARGET_V9 && ! TARGET_ARCH64"
- "@
- return\\t%0+0\\n\\tmov\\t%2, %Y1
- sethi\\t%%hi(%2), %1\\n\\treturn\\t%0+0\\n\\tor\\t%Y1, %%lo(%2), %Y1"
- [(set_attr "type" "misc")
- (set_attr "length" "2,3")])
-
-(define_insn "*goto_handler_and_restore_v9_sp64"
- [(unspec_volatile [(match_operand:DI 0 "register_operand" "=r,r")
- (match_operand:DI 1 "register_operand" "=r,r")
- (match_operand:SI 2 "const_int_operand" "I,n")] 3)]
- "TARGET_V9 && TARGET_ARCH64"
- "@
- return\\t%0+0\\n\\tmov\\t%2, %Y1
- sethi\\t%%hi(%2), %1\\n\\treturn\\t%0+0\\n\\tor\\t%Y1, %%lo(%2), %Y1"
- [(set_attr "type" "misc")
- (set_attr "length" "2,3")])
+;;(define_insn "goto_handler_and_restore_v9"
+;; [(unspec_volatile [(match_operand:SI 0 "register_operand" "=r,r")
+;; (match_operand:SI 1 "register_operand" "=r,r")
+;; (match_operand:SI 2 "const_int_operand" "I,n")] 3)]
+;; "TARGET_V9 && ! TARGET_ARCH64"
+;; "@
+;; return\\t%0+0\\n\\tmov\\t%2, %Y1
+;; sethi\\t%%hi(%2), %1\\n\\treturn\\t%0+0\\n\\tor\\t%Y1, %%lo(%2), %Y1"
+;; [(set_attr "type" "misc")
+;; (set_attr "length" "2,3")])
+;;
+;;(define_insn "*goto_handler_and_restore_v9_sp64"
+;; [(unspec_volatile [(match_operand:DI 0 "register_operand" "=r,r")
+;; (match_operand:DI 1 "register_operand" "=r,r")
+;; (match_operand:SI 2 "const_int_operand" "I,n")] 3)]
+;; "TARGET_V9 && TARGET_ARCH64"
+;; "@
+;; return\\t%0+0\\n\\tmov\\t%2, %Y1
+;; sethi\\t%%hi(%2), %1\\n\\treturn\\t%0+0\\n\\tor\\t%Y1, %%lo(%2), %Y1"
+;; [(set_attr "type" "misc")
+;; (set_attr "length" "2,3")])
;; Pattern for use after a setjmp to store FP and the return register
;; into the stack area.
;; Special pattern for the FLUSH instruction.
+; We do SImode and DImode versions of this to quiet down genrecog's complaints
+; of the define_insn otherwise missing a mode. We make "flush", aka
+; gen_flush, the default one since sparc_initialize_trampoline uses
+; it on SImode mem values.
+
(define_insn "flush"
- [(unspec_volatile [(match_operand 0 "memory_operand" "m")] 4)]
+ [(unspec_volatile [(match_operand:SI 0 "memory_operand" "m")] 3)]
""
"* return TARGET_V9 ? \"flush\\t%f0\" : \"iflush\\t%f0\";"
- [(set_attr "type" "misc")
- (set_attr "length" "1")])
+ [(set_attr "type" "misc")])
+
+(define_insn "flushdi"
+ [(unspec_volatile [(match_operand:DI 0 "memory_operand" "m")] 3)]
+ ""
+ "* return TARGET_V9 ? \"flush\\t%f0\" : \"iflush\\t%f0\";"
+ [(set_attr "type" "misc")])
+
\f
;; find first set.
"TARGET_SPARCLITE || TARGET_SPARCLET"
"*
{
- if (TARGET_LIVE_G0)
- output_asm_insn (\"and %%g0,0,%%g0\", operands);
- return \"sub %%g0,%1,%0\;and %0,%1,%0\;scan %0,0,%0\;mov 32,%2\;sub %2,%0,%0\;sra %0,31,%2\;and %2,31,%2\;add %2,%0,%0\";
+ return \"sub\\t%%g0, %1, %0\;and\\t%0, %1, %0\;scan\\t%0, 0, %0\;mov\\t32, %2\;sub\\t%2, %0, %0\;sra\\t%0, 31, %2\;and\\t%2, 31, %2\;add\\t%2, %0, %0\";
}"
[(set_attr "type" "multi")
(set_attr "length" "8")])
; (ffs:DI (match_operand:DI 1 "register_operand" "r")))
; (clobber (match_scratch:DI 2 "=&r"))]
; "TARGET_ARCH64"
-; "neg %1,%2\;xnor %1,%2,%2\;popc %2,%0\;movzr %1,0,%0"
+; "neg\\t%1, %2\;xnor\\t%1, %2, %2\;popc\\t%2, %0\;movzr\\t%1, 0, %0"
; [(set_attr "type" "multi")
; (set_attr "length" "4")])
+
\f
;; Peepholes go at the end.
[(set (match_operand:QI 0 "restore_operand" "")
(match_operand:QI 1 "arith_operand" "rI"))
(return)]
- "! TARGET_EPILOGUE && ! TARGET_LIVE_G0"
+ "! TARGET_EPILOGUE"
"*
{
if (! TARGET_ARCH64 && current_function_returns_struct)
[(set (match_operand:HI 0 "restore_operand" "")
(match_operand:HI 1 "arith_operand" "rI"))
(return)]
- "! TARGET_EPILOGUE && ! TARGET_LIVE_G0"
+ "! TARGET_EPILOGUE"
"*
{
if (! TARGET_ARCH64 && current_function_returns_struct)
[(set (match_operand:SI 0 "restore_operand" "")
(match_operand:SI 1 "arith_operand" "rI"))
(return)]
- "! TARGET_EPILOGUE && ! TARGET_LIVE_G0"
+ "! TARGET_EPILOGUE"
"*
{
if (! TARGET_ARCH64 && current_function_returns_struct)
;; ! TARGET_FPU because we move complex types around by parts using
;; SF mode SUBREGs.
(define_insn "*return_sf_no_fpu"
- [(set (match_operand:SF 0 "restore_operand" "r")
+ [(set (match_operand:SF 0 "restore_operand" "=r")
(match_operand:SF 1 "register_operand" "r"))
(return)]
- "! TARGET_EPILOGUE && ! TARGET_LIVE_G0"
+ "! TARGET_EPILOGUE"
"*
{
if (! TARGET_ARCH64 && current_function_returns_struct)
}"
[(set_attr "type" "multi")])
+(define_insn "*return_df_no_fpu"
+ [(set (match_operand:DF 0 "restore_operand" "=r")
+ (match_operand:DF 1 "register_operand" "r"))
+ (return)]
+ "! TARGET_EPILOGUE && TARGET_ARCH64"
+ "*
+{
+ if (IN_OR_GLOBAL_P (operands[1]))
+ return \"return\\t%%i7+8\\n\\tmov\\t%Y1, %Y0\";
+ else
+ return \"ret\;restore %%g0, %1, %Y0\";
+}"
+ [(set_attr "type" "multi")])
+
(define_insn "*return_addsi"
[(set (match_operand:SI 0 "restore_operand" "")
(plus:SI (match_operand:SI 1 "register_operand" "r")
(match_operand:SI 2 "arith_operand" "rI")))
(return)]
- "! TARGET_EPILOGUE && ! TARGET_LIVE_G0"
+ "! TARGET_EPILOGUE"
"*
{
if (! TARGET_ARCH64 && current_function_returns_struct)
}"
[(set_attr "type" "multi")])
+(define_insn "*return_losum_si"
+ [(set (match_operand:SI 0 "restore_operand" "")
+ (lo_sum:SI (match_operand:SI 1 "register_operand" "r")
+ (match_operand:SI 2 "immediate_operand" "in")))
+ (return)]
+ "! TARGET_EPILOGUE && ! TARGET_CM_MEDMID"
+ "*
+{
+ if (! TARGET_ARCH64 && current_function_returns_struct)
+ return \"jmp\\t%%i7+12\\n\\trestore %r1, %%lo(%a2), %Y0\";
+ /* If operands are global or in registers, can use return */
+ else if (TARGET_V9 && IN_OR_GLOBAL_P (operands[1]))
+ return \"return\\t%%i7+8\\n\\tor\\t%Y1, %%lo(%a2), %Y0\";
+ else
+ return \"ret\;restore %r1, %%lo(%a2), %Y0\";
+}"
+ [(set_attr "type" "multi")])
+
(define_insn "*return_di"
[(set (match_operand:DI 0 "restore_operand" "")
(match_operand:DI 1 "arith_double_operand" "rHI"))
"ret\;restore %r1, %2, %Y0"
[(set_attr "type" "multi")])
+(define_insn "*return_losum_di"
+ [(set (match_operand:DI 0 "restore_operand" "")
+ (lo_sum:DI (match_operand:DI 1 "arith_operand" "%r")
+ (match_operand:DI 2 "immediate_operand" "in")))
+ (return)]
+ "TARGET_ARCH64 && ! TARGET_EPILOGUE && ! TARGET_CM_MEDMID"
+ "ret\;restore %r1, %%lo(%a2), %Y0"
+ [(set_attr "type" "multi")])
+
;; The following pattern is only generated by delayed-branch scheduling,
;; when the insn winds up in the epilogue.
(define_insn "*return_sf"
&& in_same_eh_region (insn, operands[2])
&& in_same_eh_region (insn, ins1)"
"call\\t%a0, %1\\n\\tadd\\t%%o7, (%l2-.-4), %%o7")
+\f
+(define_expand "prologue"
+ [(const_int 1)]
+ "flag_pic && current_function_uses_pic_offset_table"
+ "
+{
+ load_pic_register ();
+ DONE;
+}")
-;; After a nonlocal goto, we need to restore the PIC register, but only
-;; if we need it. So do nothing much here, but we'll check for this in
-;; finalize_pic.
+;; We need to reload %l7 for -mflat -fpic,
+;; otherwise %l7 should be preserved simply
+;; by loading the function's register window
+(define_expand "exception_receiver"
+ [(const_int 0)]
+ "TARGET_FLAT && flag_pic"
+ "
+{
+ load_pic_register ();
+ DONE;
+}")
-;; Make sure this unspec_volatile number agrees with finalize_pic.
-(define_insn "nonlocal_goto_receiver"
- [(unspec_volatile [(const_int 0)] 5)]
- "flag_pic"
- "")
+;; Likewise
+(define_expand "builtin_setjmp_receiver"
+ [(label_ref (match_operand 0 "" ""))]
+ "TARGET_FLAT && flag_pic"
+ "
+{
+ load_pic_register ();
+ DONE;
+}")
\f
(define_insn "trap"
[(trap_if (const_int 1) (const_int 5))]
"t%C0\\t%%xcc, %1"
[(set_attr "type" "misc")
(set_attr "length" "1")])
-