;;- Machine description for SPARC chip for GNU C compiler
-;; Copyright (C) 1987, 88, 89, 92-95, 1996 Free Software Foundation, Inc.
+;; Copyright (C) 1987, 88, 89, 92-98, 1999 Free Software Foundation, Inc.
;; Contributed by Michael Tiemann (tiemann@cygnus.com)
;; 64 bit SPARC V9 support by Michael Tiemann, Jim Wilson, and Doug Evans,
;; at Cygnus Support.
;;- See file "rtl.def" for documentation on define_insn, match_*, et. al.
+;; Uses of UNSPEC and UNSPEC_VOLATILE in this file:
+;;
+;; UNSPEC: 0 movsi_{lo_sum,high}_pic
+;; pic_lo_sum_di
+;; pic_sethi_di
+;; 1 update_return
+;; 2 get_pc
+;; 5 movsi_{,lo_sum_,high_}pic_label_ref
+;; 6 seth44
+;; 7 setm44
+;; 8 setl44
+;; 9 sethh
+;; 10 setlm
+;; 11 embmedany_sethi, embmedany_brsum
+;; 12 movsf_const_high
+;; 13 embmedany_textuhi
+;; 14 embmedany_texthi
+;; 15 embmedany_textulo
+;; 16 embmedany_textlo
+;; 17 movsf_const_lo
+;; 18 sethm
+;; 19 setlo
+;;
+;; UNSPEC_VOLATILE: 0 blockage
+;; 1 flush_register_windows
+;; 2 goto_handler_and_restore
+;; 3 goto_handler_and_restore_v9*
+;; 4 flush
+;; 5 nonlocal_goto_receiver
+;;
+
;; The upper 32 fp regs on the v9 can't hold SFmode values. To deal with this
;; a second register class, EXTRA_FP_REGS, exists for the v9 chip. The name
;; is a bit of a misnomer as it covers all 64 fp regs. The corresponding
;; Attribute for cpu type.
;; These must match the values for enum processor_type in sparc.h.
-(define_attr "cpu" "v7,cypress,v8,supersparc,sparclite,f930,f934,sparclet,tsc701,v8plus,v9,ultrasparc"
+(define_attr "cpu" "v7,cypress,v8,supersparc,sparclite,f930,f934,hypersparc,sparclite86x,sparclet,tsc701,v9,ultrasparc"
(const (symbol_ref "sparc_cpu_attr")))
;; Attribute for the instruction set.
;; type "call_no_delay_slot" is a call followed by an unimp instruction.
(define_attr "type"
- "move,unary,binary,compare,load,store,ialu,shift,uncond_branch,branch,call,call_no_delay_slot,address,imul,fpload,fpstore,fp,fpcmp,fpmul,fpdivs,fpdivd,fpsqrt,cmove,multi,misc"
+ "move,unary,binary,compare,load,sload,store,ialu,shift,uncond_branch,branch,call,call_no_delay_slot,return,address,imul,fpload,fpstore,fp,fpmove,fpcmove,fpcmp,fpmul,fpdivs,fpdivd,fpsqrts,fpsqrtd,cmove,multi,misc"
(const_string "binary"))
;; Set true if insn uses call-clobbered intermediate register.
;; Length (in # of insns).
(define_attr "length" ""
- (cond [(eq_attr "type" "load,fpload")
+ (cond [(eq_attr "type" "load,sload,fpload")
(if_then_else (match_operand 1 "symbolic_memory_operand" "")
(const_int 2) (const_int 1))
;; Attributes for instruction and branch scheduling
(define_attr "in_call_delay" "false,true"
- (cond [(eq_attr "type" "uncond_branch,branch,call,call_no_delay_slot,multi")
+ (cond [(eq_attr "type" "uncond_branch,branch,call,call_no_delay_slot,return,multi")
(const_string "false")
(eq_attr "type" "load,fpload,store,fpstore")
(if_then_else (eq_attr "length" "1")
(define_delay (eq_attr "type" "call")
[(eq_attr "in_call_delay" "true") (nil) (nil)])
+(define_attr "leaf_function" "false,true"
+ (const (symbol_ref "current_function_uses_only_leaf_regs")))
+
+(define_attr "eligible_for_return_delay" "false,true"
+ (symbol_ref "eligible_for_return_delay(insn)"))
+
+(define_attr "in_return_delay" "false,true"
+ (if_then_else (and (and (and (eq_attr "type" "move,load,sload,store,binary,ialu")
+ (eq_attr "length" "1"))
+ (eq_attr "leaf_function" "false"))
+ (eq_attr "eligible_for_return_delay" "false"))
+ (const_string "true")
+ (const_string "false")))
+
+(define_delay (and (eq_attr "type" "return")
+ (eq_attr "isa" "v9"))
+ [(eq_attr "in_return_delay" "true") (nil) (nil)])
+
;; ??? Should implement the notion of predelay slots for floating point
;; branches. This would allow us to remove the nop always inserted before
;; a floating point branch.
;; ---- cypress CY7C602 scheduling:
;; Memory with load-delay of 1 (i.e., 2 cycle load).
+
(define_function_unit "memory" 1 0
- (and (eq_attr "type" "load,fpload") (eq_attr "cpu" "cypress")) 2 2)
+ (and (eq_attr "cpu" "cypress")
+ (eq_attr "type" "load,sload,fpload"))
+ 2 2)
;; SPARC has two floating-point units: the FP ALU,
;; and the FP MUL/DIV/SQRT unit.
;; More insns cause the chip to stall.
(define_function_unit "fp_alu" 1 0
- (and (eq_attr "type" "fp") (eq_attr "cpu" "cypress")) 5 5)
+ (and (eq_attr "cpu" "cypress")
+ (eq_attr "type" "fp,fpmove"))
+ 5 5)
+
(define_function_unit "fp_mds" 1 0
- (and (eq_attr "type" "fpmul") (eq_attr "cpu" "cypress")) 7 7)
+ (and (eq_attr "cpu" "cypress")
+ (eq_attr "type" "fpmul"))
+ 7 7)
+
(define_function_unit "fp_mds" 1 0
- (and (eq_attr "type" "fpdivs,fpdivd") (eq_attr "cpu" "cypress")) 37 37)
+ (and (eq_attr "cpu" "cypress")
+ (eq_attr "type" "fpdivs,fpdivd"))
+ 37 37)
+
(define_function_unit "fp_mds" 1 0
- (and (eq_attr "type" "fpsqrt") (eq_attr "cpu" "cypress")) 63 63)
+ (and (eq_attr "cpu" "cypress")
+ (eq_attr "type" "fpsqrts,fpsqrtd"))
+ 63 63)
;; ----- The TMS390Z55 scheduling
-;; The Supersparc can issue 1 - 3 insns per cycle; here we assume
-;; three insns/cycle, and hence multiply all costs by three.
-;; Combinations up to two integer, one ld/st, one fp.
+;; The Supersparc can issue 1 - 3 insns per cycle: up to two integer,
+;; one ld/st, one fp.
;; Memory delivers its result in one cycle to IU, zero cycles to FP
+
(define_function_unit "memory" 1 0
- (and (eq_attr "type" "load") (eq_attr "cpu" "supersparc")) 3 3)
+ (and (eq_attr "cpu" "supersparc")
+ (eq_attr "type" "load,sload"))
+ 1 1)
+
(define_function_unit "memory" 1 0
- (and (eq_attr "type" "fpload") (eq_attr "cpu" "supersparc")) 1 3)
-;; at least one in three instructions can be a mem opt.
+ (and (eq_attr "cpu" "supersparc")
+ (eq_attr "type" "fpload"))
+ 0 1)
+
(define_function_unit "memory" 1 0
- (and (eq_attr "type" "store,fpstore") (eq_attr "cpu" "supersparc")) 1 3)
-;; at least one in three instructions can be a shift op.
+ (and (eq_attr "cpu" "supersparc")
+ (eq_attr "type" "store,fpstore"))
+ 1 1)
+
(define_function_unit "shift" 1 0
- (and (eq_attr "type" "shift") (eq_attr "cpu" "supersparc")) 1 3)
+ (and (eq_attr "cpu" "supersparc")
+ (eq_attr "type" "shift"))
+ 1 1)
;; There are only two write ports to the integer register file
;; A store also uses a write port
+
(define_function_unit "iwport" 2 0
- (and (eq_attr "type" "load,store,shift,ialu") (eq_attr "cpu" "supersparc")) 1 3)
+ (and (eq_attr "cpu" "supersparc")
+ (eq_attr "type" "load,sload,store,shift,ialu"))
+ 1 1)
;; Timings; throughput/latency
;; FADD 1/3 add/sub, format conv, compar, abs, neg
;; IMUL 4/4
(define_function_unit "fp_alu" 1 0
- (and (eq_attr "type" "fp,fpcmp") (eq_attr "cpu" "supersparc")) 9 3)
+ (and (eq_attr "cpu" "supersparc")
+ (eq_attr "type" "fp,fpmove,fpcmp"))
+ 3 1)
+
+(define_function_unit "fp_mds" 1 0
+ (and (eq_attr "cpu" "supersparc")
+ (eq_attr "type" "fpmul"))
+ 3 1)
+
+(define_function_unit "fp_mds" 1 0
+ (and (eq_attr "cpu" "supersparc")
+ (eq_attr "type" "fpdivs"))
+ 6 4)
+
+(define_function_unit "fp_mds" 1 0
+ (and (eq_attr "cpu" "supersparc")
+ (eq_attr "type" "fpdivd"))
+ 9 7)
+
+(define_function_unit "fp_mds" 1 0
+ (and (eq_attr "cpu" "supersparc")
+ (eq_attr "type" "fpsqrts,fpsqrtd"))
+ 12 10)
+
+(define_function_unit "fp_mds" 1 0
+ (and (eq_attr "cpu" "supersparc")
+ (eq_attr "type" "imul"))
+ 4 4)
+
+;; ----- hypersparc/sparclite86x scheduling
+;; The Hypersparc can issue 1 - 2 insns per cycle. The dual issue cases are:
+;; L-Ld/St I-Int F-Float B-Branch LI/LF/LB/II/IF/IB/FF/FB
+;; II/FF case is only when loading a 32 bit hi/lo constant
+;; Single issue insns include call, jmpl, u/smul, u/sdiv, lda, sta, fcmp
+;; Memory delivers its result in one cycle to IU
+
+(define_function_unit "memory" 1 0
+ (and (ior (eq_attr "cpu" "hypersparc") (eq_attr "cpu" "sparclite86x"))
+ (eq_attr "type" "load,sload,fpload"))
+ 1 1)
+
+(define_function_unit "memory" 1 0
+ (and (ior (eq_attr "cpu" "hypersparc") (eq_attr "cpu" "sparclite86x"))
+ (eq_attr "type" "store,fpstore"))
+ 2 1)
+
+(define_function_unit "sparclite86x_branch" 1 0
+ (and (eq_attr "cpu" "sparclite86x")
+ (eq_attr "type" "branch"))
+ 1 1)
+
+;; integer multiply insns
+(define_function_unit "sparclite86x_shift" 1 0
+ (and (eq_attr "cpu" "sparclite86x")
+ (eq_attr "type" "shift"))
+ 1 1)
+
+(define_function_unit "fp_alu" 1 0
+ (and (ior (eq_attr "cpu" "hypersparc") (eq_attr "cpu" "sparclite86x"))
+ (eq_attr "type" "fp,fpmove,fpcmp"))
+ 1 1)
+
(define_function_unit "fp_mds" 1 0
- (and (eq_attr "type" "fpmul") (eq_attr "cpu" "supersparc")) 9 3)
+ (and (ior (eq_attr "cpu" "hypersparc") (eq_attr "cpu" "sparclite86x"))
+ (eq_attr "type" "fpmul"))
+ 1 1)
+
(define_function_unit "fp_mds" 1 0
- (and (eq_attr "type" "fpdivs") (eq_attr "cpu" "supersparc")) 18 12)
+ (and (ior (eq_attr "cpu" "hypersparc") (eq_attr "cpu" "sparclite86x"))
+ (eq_attr "type" "fpdivs"))
+ 8 6)
+
(define_function_unit "fp_mds" 1 0
- (and (eq_attr "type" "fpdivd") (eq_attr "cpu" "supersparc")) 27 21)
+ (and (ior (eq_attr "cpu" "hypersparc") (eq_attr "cpu" "sparclite86x"))
+ (eq_attr "type" "fpdivd"))
+ 12 10)
+
(define_function_unit "fp_mds" 1 0
- (and (eq_attr "type" "fpsqrt") (eq_attr "cpu" "supersparc")) 36 30)
+ (and (ior (eq_attr "cpu" "hypersparc") (eq_attr "cpu" "sparclite86x"))
+ (eq_attr "type" "fpsqrts,fpsqrtd"))
+ 17 15)
+
(define_function_unit "fp_mds" 1 0
- (and (eq_attr "type" "imul") (eq_attr "cpu" "supersparc")) 12 12)
+ (and (ior (eq_attr "cpu" "hypersparc") (eq_attr "cpu" "sparclite86x"))
+ (eq_attr "type" "imul"))
+ 17 15)
;; ----- sparclet tsc701 scheduling
;; The tsc701 issues 1 insn per cycle.
;; Results may be written back out of order.
;; Loads take 2 extra cycles to complete and 4 can be buffered at a time.
+
(define_function_unit "tsc701_load" 4 1
- (and (eq_attr "type" "load") (eq_attr "cpu" "tsc701")) 3 1)
+ (and (eq_attr "cpu" "tsc701")
+ (eq_attr "type" "load,sload"))
+ 3 1)
+
;; Stores take 2(?) extra cycles to complete.
;; It is desirable to not have any memory operation in the following 2 cycles.
;; (??? or 2 memory ops in the case of std).
+
(define_function_unit "tsc701_store" 1 0
- (and (eq_attr "type" "store") (eq_attr "cpu" "tsc701")) 3 3
- [(eq_attr "type" "load,store")])
+ (and (eq_attr "cpu" "tsc701")
+ (eq_attr "type" "store"))
+ 3 3
+ [(eq_attr "type" "load,sload,store")])
+
;; The multiply unit has a latency of 5.
(define_function_unit "tsc701_mul" 1 0
- (and (eq_attr "type" "imul") (eq_attr "cpu" "tsc701")) 5 5)
+ (and (eq_attr "cpu" "tsc701")
+ (eq_attr "type" "imul"))
+ 5 5)
+
+;; ----- The UltraSPARC-1 scheduling
+;; UltraSPARC has two integer units. Shift instructions can only execute
+;; on IE0. Condition code setting instructions, call, and jmpl (including
+;; the ret and retl pseudo-instructions) can only execute on IE1.
+;; Branch on register uses IE1, but branch on condition code does not.
+;; Conditional moves take 2 cycles. No other instruction can issue in the
+;; same cycle as a conditional move.
+;; Multiply and divide take many cycles during which no other instructions
+;; can issue.
+;; Memory delivers its result in two cycles (except for signed loads,
+;; which take one cycle more). One memory instruction can be issued per
+;; cycle.
+
+(define_function_unit "memory" 1 0
+ (and (eq_attr "cpu" "ultrasparc")
+ (eq_attr "type" "load,fpload"))
+ 2 1)
+
+(define_function_unit "memory" 1 0
+ (and (eq_attr "cpu" "ultrasparc")
+ (eq_attr "type" "sload"))
+ 3 1)
+
+(define_function_unit "memory" 1 0
+ (and (eq_attr "cpu" "ultrasparc")
+ (eq_attr "type" "store,fpstore"))
+ 1 1)
+
+(define_function_unit "ieuN" 2 0
+ (and (eq_attr "cpu" "ultrasparc")
+ (eq_attr "type" "ialu,binary,move,unary,shift,compare,call,call_no_delay_slot,uncond_branch"))
+ 1 1)
+
+(define_function_unit "ieu0" 1 0
+ (and (eq_attr "cpu" "ultrasparc")
+ (eq_attr "type" "shift"))
+ 1 1)
+
+(define_function_unit "ieu0" 1 0
+ (and (eq_attr "cpu" "ultrasparc")
+ (eq_attr "type" "cmove"))
+ 2 1)
+
+(define_function_unit "ieu1" 1 0
+ (and (eq_attr "cpu" "ultrasparc")
+ (eq_attr "type" "compare,call,call_no_delay_slot,uncond_branch"))
+ 1 1)
+
+(define_function_unit "cti" 1 0
+ (and (eq_attr "cpu" "ultrasparc")
+ (eq_attr "type" "branch"))
+ 1 1)
+
+;; Timings; throughput/latency
+;; FMOV 1/1 fmov, fabs, fneg
+;; FMOVcc 1/2
+;; FADD 1/3 add/sub, format conv, compar
+;; FMUL 1/3
+;; FDIVs 12/12
+;; FDIVd 22/22
+;; FSQRTs 12/12
+;; FSQRTd 22/22
+;; FCMP takes 1 cycle to branch, 2 cycles to conditional move.
+;;
+;; FDIV{s,d}/FSQRT{s,d} are given their own unit since they only
+;; use the FPM multiplier for final rounding 3 cycles before the
+;; end of their latency and we have no real way to model that.
+;;
+;; ??? This is really bogus because the timings really depend upon
+;; who uses the result. We should record who the user is with
+;; more descriptive 'type' attribute names and account for these
+;; issues in ultrasparc_adjust_cost.
+
+(define_function_unit "fadd" 1 0
+ (and (eq_attr "cpu" "ultrasparc")
+ (eq_attr "type" "fpmove"))
+ 1 1)
+
+(define_function_unit "fadd" 1 0
+ (and (eq_attr "cpu" "ultrasparc")
+ (eq_attr "type" "fpcmove"))
+ 2 1)
+
+(define_function_unit "fadd" 1 0
+ (and (eq_attr "cpu" "ultrasparc")
+ (eq_attr "type" "fp"))
+ 3 1)
+
+(define_function_unit "fadd" 1 0
+ (and (eq_attr "cpu" "ultrasparc")
+ (eq_attr "type" "fpcmp"))
+ 2 1)
+
+(define_function_unit "fmul" 1 0
+ (and (eq_attr "cpu" "ultrasparc")
+ (eq_attr "type" "fpmul"))
+ 3 1)
+
+(define_function_unit "fadd" 1 0
+ (and (eq_attr "cpu" "ultrasparc")
+ (eq_attr "type" "fpcmove"))
+ 2 1)
+
+(define_function_unit "fdiv" 1 0
+ (and (eq_attr "cpu" "ultrasparc")
+ (eq_attr "type" "fpdivs"))
+ 12 12)
+
+(define_function_unit "fdiv" 1 0
+ (and (eq_attr "cpu" "ultrasparc")
+ (eq_attr "type" "fpdivd"))
+ 22 22)
+
+(define_function_unit "fdiv" 1 0
+ (and (eq_attr "cpu" "ultrasparc")
+ (eq_attr "type" "fpsqrts"))
+ 12 12)
+
+(define_function_unit "fdiv" 1 0
+ (and (eq_attr "cpu" "ultrasparc")
+ (eq_attr "type" "fpsqrtd"))
+ 22 22)
\f
;; Compare instructions.
;; This controls RTL generation and register allocation.
[(set (reg:CCFP 96)
(compare:CCFP (match_operand:TF 0 "register_operand" "")
(match_operand:TF 1 "register_operand" "")))]
- "TARGET_FPU"
+ "TARGET_FPU && TARGET_HARD_QUAD"
"
{
sparc_compare_op0 = operands[0];
(compare:CC (match_operand:SI 0 "register_operand" "r")
(match_operand:SI 1 "arith_operand" "rI")))]
""
- "cmp %0,%1"
+ "cmp\\t%0, %1"
[(set_attr "type" "compare")])
(define_insn "*cmpdi_sp64"
(compare:CCX (match_operand:DI 0 "register_operand" "r")
(match_operand:DI 1 "arith_double_operand" "rHI")))]
"TARGET_ARCH64"
- "cmp %0,%1"
+ "cmp\\t%0, %1"
[(set_attr "type" "compare")])
(define_insn "*cmpsf_fpe"
"*
{
if (TARGET_V9)
- return \"fcmpes %0,%1,%2\";
- return \"fcmpes %1,%2\";
+ return \"fcmpes\\t%0, %1, %2\";
+ return \"fcmpes\\t%1, %2\";
}"
[(set_attr "type" "fpcmp")])
"*
{
if (TARGET_V9)
- return \"fcmped %0,%1,%2\";
- return \"fcmped %1,%2\";
+ return \"fcmped\\t%0, %1, %2\";
+ return \"fcmped\\t%1, %2\";
}"
[(set_attr "type" "fpcmp")])
"*
{
if (TARGET_V9)
- return \"fcmpeq %0,%1,%2\";
- return \"fcmpeq %1,%2\";
+ return \"fcmpeq\\t%0, %1, %2\";
+ return \"fcmpeq\\t%1, %2\";
}"
[(set_attr "type" "fpcmp")])
"*
{
if (TARGET_V9)
- return \"fcmps %0,%1,%2\";
- return \"fcmps %1,%2\";
+ return \"fcmps\\t%0, %1, %2\";
+ return \"fcmps\\t%1, %2\";
}"
[(set_attr "type" "fpcmp")])
"*
{
if (TARGET_V9)
- return \"fcmpd %0,%1,%2\";
- return \"fcmpd %1,%2\";
+ return \"fcmpd\\t%0, %1, %2\";
+ return \"fcmpd\\t%1, %2\";
}"
[(set_attr "type" "fpcmp")])
"*
{
if (TARGET_V9)
- return \"fcmpq %0,%1,%2\";
- return \"fcmpq %1,%2\";
+ return \"fcmpq\\t%0, %1, %2\";
+ return \"fcmpq\\t%1, %2\";
}"
[(set_attr "type" "fpcmp")])
\f
[(set (match_dup 3)
(xor:DI (match_operand:DI 1 "register_operand" "")
(match_operand:DI 2 "register_operand" "")))
- (parallel [(set (match_operand:DI 0 "register_operand" "")
- (eq:DI (match_dup 3) (const_int 0)))
- (clobber (reg:CCX 100))])]
+ (set (match_operand:DI 0 "register_operand" "")
+ (eq:DI (match_dup 3) (const_int 0)))]
"TARGET_ARCH64"
"{ operands[3] = gen_reg_rtx (DImode); }")
[(set (match_dup 3)
(xor:DI (match_operand:DI 1 "register_operand" "")
(match_operand:DI 2 "register_operand" "")))
- (parallel [(set (match_operand:DI 0 "register_operand" "")
- (ne:DI (match_dup 3) (const_int 0)))
- (clobber (reg:CCX 100))])]
+ (set (match_operand:DI 0 "register_operand" "")
+ (ne:DI (match_dup 3) (const_int 0)))]
"TARGET_ARCH64"
"{ operands[3] = gen_reg_rtx (DImode); }")
[(set (match_dup 3)
(xor:DI (match_operand:DI 1 "register_operand" "")
(match_operand:DI 2 "register_operand" "")))
- (parallel [(set (match_operand:SI 0 "register_operand" "")
- (eq:DI (match_dup 3) (const_int 0)))
- (clobber (reg:CCX 100))])]
+ (set (match_operand:SI 0 "register_operand" "")
+ (eq:SI (match_dup 3) (const_int 0)))]
"TARGET_ARCH64"
"{ operands[3] = gen_reg_rtx (DImode); }")
[(set (match_dup 3)
(xor:DI (match_operand:DI 1 "register_operand" "")
(match_operand:DI 2 "register_operand" "")))
- (parallel [(set (match_operand:SI 0 "register_operand" "")
- (ne:DI (match_dup 3) (const_int 0)))
- (clobber (reg:CCX 100))])]
+ (set (match_operand:SI 0 "register_operand" "")
+ (ne:SI (match_dup 3) (const_int 0)))]
"TARGET_ARCH64"
"{ operands[3] = gen_reg_rtx (DImode); }")
(xor:SI (match_operand:SI 1 "register_operand" "")
(match_operand:SI 2 "register_operand" "")))
(parallel [(set (match_operand:DI 0 "register_operand" "")
- (eq:SI (match_dup 3) (const_int 0)))
+ (eq:DI (match_dup 3) (const_int 0)))
(clobber (reg:CC 100))])]
"TARGET_ARCH64"
"{ operands[3] = gen_reg_rtx (SImode); }")
(xor:SI (match_operand:SI 1 "register_operand" "")
(match_operand:SI 2 "register_operand" "")))
(parallel [(set (match_operand:DI 0 "register_operand" "")
- (ne:SI (match_dup 3) (const_int 0)))
+ (ne:DI (match_dup 3) (const_int 0)))
(clobber (reg:CC 100))])]
"TARGET_ARCH64"
"{ operands[3] = gen_reg_rtx (SImode); }")
emit_insn (pat);
DONE;
}
- else if (GET_MODE (sparc_compare_op0) == TFmode && ! TARGET_HARD_QUAD)
- {
- emit_float_lib_cmp (sparc_compare_op0, sparc_compare_op1, EQ);
- emit_insn (gen_sne (operands[0]));
- DONE;
- }
else if (TARGET_V9)
{
if (gen_v9_scc (EQ, operands))
DONE;
/* fall through */
}
- operands[1] = gen_compare_reg (EQ, sparc_compare_op0, sparc_compare_op1);
+ FAIL;
}")
;; ??? v9: Operand 0 needs a mode, so SImode was chosen.
emit_insn (pat);
DONE;
}
- else if (GET_MODE (sparc_compare_op0) == TFmode && ! TARGET_HARD_QUAD)
- {
- emit_float_lib_cmp (sparc_compare_op0, sparc_compare_op1, NE);
- emit_insn (gen_sne (operands[0]));
- DONE;
- }
else if (TARGET_V9)
{
if (gen_v9_scc (NE, operands))
DONE;
/* fall through */
}
- operands[1] = gen_compare_reg (NE, sparc_compare_op0, sparc_compare_op1);
+ FAIL;
}")
(define_expand "sgt"
"! TARGET_LIVE_G0"
"
{
- if (GET_MODE (sparc_compare_op0) == TFmode && ! TARGET_HARD_QUAD)
- {
- emit_float_lib_cmp (sparc_compare_op0, sparc_compare_op1, GT);
- emit_insn (gen_sne (operands[0]));
- DONE;
- }
- else if (TARGET_V9)
+ if (TARGET_V9)
{
if (gen_v9_scc (GT, operands))
DONE;
/* fall through */
}
- operands[1] = gen_compare_reg (GT, sparc_compare_op0, sparc_compare_op1);
+ FAIL;
}")
(define_expand "slt"
"! TARGET_LIVE_G0"
"
{
- if (GET_MODE (sparc_compare_op0) == TFmode && ! TARGET_HARD_QUAD)
- {
- emit_float_lib_cmp (sparc_compare_op0, sparc_compare_op1, LT);
- emit_insn (gen_sne (operands[0]));
- DONE;
- }
- else if (TARGET_V9)
+ if (TARGET_V9)
{
if (gen_v9_scc (LT, operands))
DONE;
/* fall through */
}
- operands[1] = gen_compare_reg (LT, sparc_compare_op0, sparc_compare_op1);
+ FAIL;
}")
(define_expand "sge"
"! TARGET_LIVE_G0"
"
{
- if (GET_MODE (sparc_compare_op0) == TFmode && ! TARGET_HARD_QUAD)
- {
- emit_float_lib_cmp (sparc_compare_op0, sparc_compare_op1, GE);
- emit_insn (gen_sne (operands[0]));
- DONE;
- }
- else if (TARGET_V9)
+ if (TARGET_V9)
{
if (gen_v9_scc (GE, operands))
DONE;
/* fall through */
}
- operands[1] = gen_compare_reg (GE, sparc_compare_op0, sparc_compare_op1);
+ FAIL;
}")
(define_expand "sle"
"! TARGET_LIVE_G0"
"
{
- if (GET_MODE (sparc_compare_op0) == TFmode && ! TARGET_HARD_QUAD)
- {
- emit_float_lib_cmp (sparc_compare_op0, sparc_compare_op1, LE);
- emit_insn (gen_sne (operands[0]));
- DONE;
- }
- else if (TARGET_V9)
+ if (TARGET_V9)
{
if (gen_v9_scc (LE, operands))
DONE;
/* fall through */
}
- operands[1] = gen_compare_reg (LE, sparc_compare_op0, sparc_compare_op1);
+ FAIL;
}")
(define_expand "sgtu"
{
if (! TARGET_V9)
{
- rtx tem;
+ rtx tem, pat;
/* We can do ltu easily, so if both operands are registers, swap them and
do a LTU. */
tem = sparc_compare_op0;
sparc_compare_op0 = sparc_compare_op1;
sparc_compare_op1 = tem;
- emit_insn (gen_sltu (operands[0]));
+ pat = gen_sltu (operands[0]);
+ if (pat == NULL_RTX)
+ FAIL;
+ emit_insn (pat);
DONE;
}
}
if (gen_v9_scc (GTU, operands))
DONE;
}
- operands[1] = gen_compare_reg (GTU, sparc_compare_op0, sparc_compare_op1);
+ FAIL;
}")
(define_expand "sltu"
{
if (! TARGET_V9)
{
- rtx tem;
+ rtx tem, pat;
/* We can do geu easily, so if both operands are registers, swap them and
do a GEU. */
tem = sparc_compare_op0;
sparc_compare_op0 = sparc_compare_op1;
sparc_compare_op1 = tem;
- emit_insn (gen_sgeu (operands[0]));
+ pat = gen_sgeu (operands[0]);
+ if (pat == NULL_RTX)
+ FAIL;
+ emit_insn (pat);
DONE;
}
}
if (gen_v9_scc (LEU, operands))
DONE;
}
- operands[1] = gen_compare_reg (LEU, sparc_compare_op0, sparc_compare_op1);
+ FAIL;
}")
;; Now the DEFINE_INSNs for the scc cases.
;; The SEQ and SNE patterns are special because they can be done
-;; without any branching and do not involve a COMPARE.
+;; without any branching and do not involve a COMPARE. We want
+;; them to always use the splitz below so the results can be
+;; scheduled.
(define_insn "*snesi_zero"
[(set (match_operand:SI 0 "register_operand" "=r")
(const_int 0)))
(clobber (reg:CC 100))]
"! TARGET_LIVE_G0"
- "subcc %%g0,%1,%%g0\;addx %%g0,0,%0"
- [(set_attr "type" "unary")
- (set_attr "length" "2")])
+ "#"
+ [(set_attr "length" "2")])
+
+(define_split
+ [(set (match_operand:SI 0 "register_operand" "")
+ (ne:SI (match_operand:SI 1 "register_operand" "")
+ (const_int 0)))
+ (clobber (reg:CC 100))]
+ ""
+ [(set (reg:CC_NOOV 100) (compare:CC_NOOV (neg:SI (match_dup 1))
+ (const_int 0)))
+ (set (match_dup 0) (ltu:SI (reg:CC 100) (const_int 0)))]
+ "")
(define_insn "*neg_snesi_zero"
[(set (match_operand:SI 0 "register_operand" "=r")
(const_int 0))))
(clobber (reg:CC 100))]
"! TARGET_LIVE_G0"
- "subcc %%g0,%1,%%g0\;subx %%g0,0,%0"
- [(set_attr "type" "unary")
- (set_attr "length" "2")])
+ "#"
+ [(set_attr "length" "2")])
+
+(define_split
+ [(set (match_operand:SI 0 "register_operand" "")
+ (neg:SI (ne:SI (match_operand:SI 1 "register_operand" "")
+ (const_int 0))))
+ (clobber (reg:CC 100))]
+ ""
+ [(set (reg:CC_NOOV 100) (compare:CC_NOOV (neg:SI (match_dup 1))
+ (const_int 0)))
+ (set (match_dup 0) (neg:SI (ltu:SI (reg:CC 100) (const_int 0))))]
+ "")
(define_insn "*snesi_zero_extend"
[(set (match_operand:DI 0 "register_operand" "=r")
- (ne:SI (match_operand:SI 1 "register_operand" "r")
+ (ne:DI (match_operand:SI 1 "register_operand" "r")
(const_int 0)))
(clobber (reg:CC 100))]
"TARGET_ARCH64"
- "subcc %%g0,%1,%%g0\;addx %%g0,0,%0"
+ "#"
[(set_attr "type" "unary")
(set_attr "length" "2")])
+(define_split
+ [(set (match_operand:DI 0 "register_operand" "")
+ (ne:DI (match_operand:SI 1 "register_operand" "")
+ (const_int 0)))
+ (clobber (reg:CC 100))]
+ "TARGET_ARCH64"
+ [(set (reg:CC_NOOV 100) (compare:CC_NOOV (minus:SI (const_int 0) (match_dup 1))
+ (const_int 0)))
+ (set (match_dup 0) (zero_extend:DI (plus:SI (plus:SI (const_int 0)
+ (const_int 0))
+ (ltu:SI (reg:CC_NOOV 100)
+ (const_int 0)))))]
+ "")
+
(define_insn "*snedi_zero"
- [(set (match_operand:DI 0 "register_operand" "=r")
+ [(set (match_operand:DI 0 "register_operand" "=&r")
(ne:DI (match_operand:DI 1 "register_operand" "r")
- (const_int 0)))
- (clobber (reg:CCX 100))]
+ (const_int 0)))]
"TARGET_ARCH64"
- "mov 0,%0\;movrnz %1,1,%0"
- [(set_attr "type" "unary")
+ "#"
+ [(set_attr "type" "cmove")
(set_attr "length" "2")])
+(define_split
+ [(set (match_operand:DI 0 "register_operand" "")
+ (ne:DI (match_operand:DI 1 "register_operand" "")
+ (const_int 0)))]
+ "TARGET_ARCH64"
+ [(set (match_dup 0) (const_int 0))
+ (set (match_dup 0) (if_then_else:DI (ne:DI (match_dup 1)
+ (const_int 0))
+ (const_int 1)
+ (match_dup 0)))]
+ "")
+
(define_insn "*neg_snedi_zero"
- [(set (match_operand:DI 0 "register_operand" "=r")
+ [(set (match_operand:DI 0 "register_operand" "=&r")
(neg:DI (ne:DI (match_operand:DI 1 "register_operand" "r")
- (const_int 0))))
- (clobber (reg:CCX 100))]
+ (const_int 0))))]
"TARGET_ARCH64"
- "mov 0,%0\;movrnz %1,-1,%0"
- [(set_attr "type" "unary")
+ "#"
+ [(set_attr "type" "cmove")
(set_attr "length" "2")])
-(define_insn "*snedi_zero_trunc_sp32"
- [(set (match_operand:SI 0 "register_operand" "=r")
- (ne:DI (match_operand:DI 1 "register_operand" "r")
- (const_int 0)))
- (clobber (reg:CCX 100))]
- "! TARGET_ARCH64 && ! TARGET_LIVE_G0"
- "xor %1,%R1,%0\;subcc %%g0,%0,%%g0\;addx %%g0,0,%0"
- [(set_attr "type" "unary")
- (set_attr "length" "3")])
+(define_split
+ [(set (match_operand:DI 0 "register_operand" "")
+ (neg:DI (ne:DI (match_operand:DI 1 "register_operand" "")
+ (const_int 0))))]
+ "TARGET_ARCH64"
+ [(set (match_dup 0) (const_int 0))
+ (set (match_dup 0) (if_then_else:DI (ne:DI (match_dup 1)
+ (const_int 0))
+ (const_int -1)
+ (match_dup 0)))]
+ "")
-(define_insn "*snedi_zero_trunc_sp64"
- [(set (match_operand:SI 0 "register_operand" "=r")
- (ne:DI (match_operand:DI 1 "register_operand" "r")
- (const_int 0)))
- (clobber (reg:CCX 100))]
+(define_insn "*snedi_zero_trunc"
+ [(set (match_operand:SI 0 "register_operand" "=&r")
+ (ne:SI (match_operand:DI 1 "register_operand" "r")
+ (const_int 0)))]
"TARGET_ARCH64"
- "mov 0,%0\;movrnz %1,1,%0"
- [(set_attr "type" "unary")
+ "#"
+ [(set_attr "type" "cmove")
(set_attr "length" "2")])
+(define_split
+ [(set (match_operand:SI 0 "register_operand" "")
+ (ne:SI (match_operand:DI 1 "register_operand" "")
+ (const_int 0)))]
+ "TARGET_ARCH64"
+ [(set (match_dup 0) (const_int 0))
+ (set (match_dup 0) (if_then_else:SI (ne:DI (match_dup 1)
+ (const_int 0))
+ (const_int 1)
+ (match_dup 0)))]
+ "")
+
(define_insn "*seqsi_zero"
[(set (match_operand:SI 0 "register_operand" "=r")
(eq:SI (match_operand:SI 1 "register_operand" "r")
(const_int 0)))
(clobber (reg:CC 100))]
"! TARGET_LIVE_G0"
- "subcc %%g0,%1,%%g0\;subx %%g0,-1,%0"
- [(set_attr "type" "unary")
- (set_attr "length" "2")])
+ "#"
+ [(set_attr "length" "2")])
+
+(define_split
+ [(set (match_operand:SI 0 "register_operand" "")
+ (eq:SI (match_operand:SI 1 "register_operand" "")
+ (const_int 0)))
+ (clobber (reg:CC 100))]
+ ""
+ [(set (reg:CC_NOOV 100) (compare:CC_NOOV (neg:SI (match_dup 1))
+ (const_int 0)))
+ (set (match_dup 0) (geu:SI (reg:CC 100) (const_int 0)))]
+ "")
(define_insn "*neg_seqsi_zero"
[(set (match_operand:SI 0 "register_operand" "=r")
(const_int 0))))
(clobber (reg:CC 100))]
"! TARGET_LIVE_G0"
- "subcc %%g0,%1,%%g0\;addx %%g0,-1,%0"
- [(set_attr "type" "unary")
- (set_attr "length" "2")])
+ "#"
+ [(set_attr "length" "2")])
+
+(define_split
+ [(set (match_operand:SI 0 "register_operand" "")
+ (neg:SI (eq:SI (match_operand:SI 1 "register_operand" "")
+ (const_int 0))))
+ (clobber (reg:CC 100))]
+ ""
+ [(set (reg:CC_NOOV 100) (compare:CC_NOOV (neg:SI (match_dup 1))
+ (const_int 0)))
+ (set (match_dup 0) (neg:SI (geu:SI (reg:CC 100) (const_int 0))))]
+ "")
(define_insn "*seqsi_zero_extend"
[(set (match_operand:DI 0 "register_operand" "=r")
- (eq:SI (match_operand:SI 1 "register_operand" "r")
+ (eq:DI (match_operand:SI 1 "register_operand" "r")
(const_int 0)))
(clobber (reg:CC 100))]
"TARGET_ARCH64"
- "subcc %%g0,%1,%%g0\;subx %%g0,-1,%0"
+ "#"
[(set_attr "type" "unary")
(set_attr "length" "2")])
+(define_split
+ [(set (match_operand:DI 0 "register_operand" "")
+ (eq:DI (match_operand:SI 1 "register_operand" "")
+ (const_int 0)))
+ (clobber (reg:CC 100))]
+ "TARGET_ARCH64"
+ [(set (reg:CC_NOOV 100) (compare:CC_NOOV (minus:SI (const_int 0) (match_dup 1))
+ (const_int 0)))
+ (set (match_dup 0) (zero_extend:DI (minus:SI (minus:SI (const_int 0)
+ (const_int -1))
+ (ltu:SI (reg:CC_NOOV 100)
+ (const_int 0)))))]
+ "")
+
(define_insn "*seqdi_zero"
- [(set (match_operand:DI 0 "register_operand" "=r")
+ [(set (match_operand:DI 0 "register_operand" "=&r")
(eq:DI (match_operand:DI 1 "register_operand" "r")
- (const_int 0)))
- (clobber (reg:CCX 100))]
+ (const_int 0)))]
"TARGET_ARCH64"
- "mov 0,%0\;movrz %1,1,%0"
- [(set_attr "type" "unary")
+ "#"
+ [(set_attr "type" "cmove")
(set_attr "length" "2")])
+(define_split
+ [(set (match_operand:DI 0 "register_operand" "")
+ (eq:DI (match_operand:DI 1 "register_operand" "")
+ (const_int 0)))]
+ "TARGET_ARCH64"
+ [(set (match_dup 0) (const_int 0))
+ (set (match_dup 0) (if_then_else:DI (eq:DI (match_dup 1)
+ (const_int 0))
+ (const_int 1)
+ (match_dup 0)))]
+ "")
+
(define_insn "*neg_seqdi_zero"
- [(set (match_operand:DI 0 "register_operand" "=r")
+ [(set (match_operand:DI 0 "register_operand" "=&r")
(neg:DI (eq:DI (match_operand:DI 1 "register_operand" "r")
- (const_int 0))))
- (clobber (reg:CCX 100))]
+ (const_int 0))))]
"TARGET_ARCH64"
- "mov 0,%0\;movrz %1,-1,%0"
- [(set_attr "type" "unary")
+ "#"
+ [(set_attr "type" "cmove")
(set_attr "length" "2")])
-(define_insn "*seqdi_zero_trunc_sp32"
- [(set (match_operand:SI 0 "register_operand" "=r")
- (eq:DI (match_operand:DI 1 "register_operand" "r")
- (const_int 0)))
- (clobber (reg:CCX 100))]
- "! TARGET_ARCH64 && ! TARGET_LIVE_G0"
- "xor %1,%R1,%0\;subcc %%g0,%0,%%g0\;subx %%g0,-1,%0"
- [(set_attr "type" "unary")
- (set_attr "length" "3")])
+(define_split
+ [(set (match_operand:DI 0 "register_operand" "")
+ (neg:DI (eq:DI (match_operand:DI 1 "register_operand" "")
+ (const_int 0))))]
+ "TARGET_ARCH64"
+ [(set (match_dup 0) (const_int 0))
+ (set (match_dup 0) (if_then_else:DI (eq:DI (match_dup 1)
+ (const_int 0))
+ (const_int -1)
+ (match_dup 0)))]
+ "")
-(define_insn "*seqdi_zero_trunc_sp64"
- [(set (match_operand:SI 0 "register_operand" "=r")
- (eq:DI (match_operand:DI 1 "register_operand" "r")
- (const_int 0)))
- (clobber (reg:CCX 100))]
+(define_insn "*seqdi_zero_trunc"
+ [(set (match_operand:SI 0 "register_operand" "=&r")
+ (eq:SI (match_operand:DI 1 "register_operand" "r")
+ (const_int 0)))]
"TARGET_ARCH64"
- "mov 0,%0\;movrz %1,1,%0"
- [(set_attr "type" "unary")
+ "#"
+ [(set_attr "type" "cmove")
(set_attr "length" "2")])
+(define_split
+ [(set (match_operand:SI 0 "register_operand" "")
+ (eq:SI (match_operand:DI 1 "register_operand" "")
+ (const_int 0)))]
+ "TARGET_ARCH64"
+ [(set (match_dup 0) (const_int 0))
+ (set (match_dup 0) (if_then_else:SI (eq:DI (match_dup 1)
+ (const_int 0))
+ (const_int 1)
+ (match_dup 0)))]
+ "")
+
;; We can also do (x + (i == 0)) and related, so put them in.
;; ??? The addx/subx insns use the 32 bit carry flag so there are no DImode
;; versions for v9.
(match_operand:SI 2 "register_operand" "r")))
(clobber (reg:CC 100))]
"! TARGET_LIVE_G0"
- "subcc %%g0,%1,%%g0\;addx %2,0,%0"
+ "#"
[(set_attr "length" "2")])
+(define_split
+ [(set (match_operand:SI 0 "register_operand" "")
+ (plus:SI (ne:SI (match_operand:SI 1 "register_operand" "")
+ (const_int 0))
+ (match_operand:SI 2 "register_operand" "")))
+ (clobber (reg:CC 100))]
+ "! TARGET_LIVE_G0"
+ [(set (reg:CC_NOOV 100) (compare:CC_NOOV (neg:SI (match_dup 1))
+ (const_int 0)))
+ (set (match_dup 0) (plus:SI (ltu:SI (reg:CC 100) (const_int 0))
+ (match_dup 2)))]
+ "")
+
(define_insn "*x_minus_i_ne_0"
[(set (match_operand:SI 0 "register_operand" "=r")
(minus:SI (match_operand:SI 2 "register_operand" "r")
(const_int 0))))
(clobber (reg:CC 100))]
"! TARGET_LIVE_G0"
- "subcc %%g0,%1,%%g0\;subx %2,0,%0"
- [(set_attr "length" "2")])
-
-(define_insn "*x_plus_i_eq_0"
- [(set (match_operand:SI 0 "register_operand" "=r")
- (plus:SI (eq:SI (match_operand:SI 1 "register_operand" "r")
- (const_int 0))
- (match_operand:SI 2 "register_operand" "r")))
- (clobber (reg:CC 100))]
- "! TARGET_LIVE_G0"
- "subcc %%g0,%1,%%g0\;subx %2,-1,%0"
+ "#"
[(set_attr "length" "2")])
+(define_split
+ [(set (match_operand:SI 0 "register_operand" "")
+ (minus:SI (match_operand:SI 2 "register_operand" "")
+ (ne:SI (match_operand:SI 1 "register_operand" "")
+ (const_int 0))))
+ (clobber (reg:CC 100))]
+ "! TARGET_LIVE_G0"
+ [(set (reg:CC_NOOV 100) (compare:CC_NOOV (neg:SI (match_dup 1))
+ (const_int 0)))
+ (set (match_dup 0) (minus:SI (match_dup 2)
+ (ltu:SI (reg:CC 100) (const_int 0))))]
+ "")
+
+(define_insn "*x_plus_i_eq_0"
+ [(set (match_operand:SI 0 "register_operand" "=r")
+ (plus:SI (eq:SI (match_operand:SI 1 "register_operand" "r")
+ (const_int 0))
+ (match_operand:SI 2 "register_operand" "r")))
+ (clobber (reg:CC 100))]
+ "! TARGET_LIVE_G0"
+ "#"
+ [(set_attr "length" "2")])
+
+(define_split
+ [(set (match_operand:SI 0 "register_operand" "")
+ (plus:SI (eq:SI (match_operand:SI 1 "register_operand" "")
+ (const_int 0))
+ (match_operand:SI 2 "register_operand" "")))
+ (clobber (reg:CC 100))]
+ "! TARGET_LIVE_G0"
+ [(set (reg:CC_NOOV 100) (compare:CC_NOOV (neg:SI (match_dup 1))
+ (const_int 0)))
+ (set (match_dup 0) (plus:SI (geu:SI (reg:CC 100) (const_int 0))
+ (match_dup 2)))]
+ "")
+
(define_insn "*x_minus_i_eq_0"
[(set (match_operand:SI 0 "register_operand" "=r")
(minus:SI (match_operand:SI 2 "register_operand" "r")
(const_int 0))))
(clobber (reg:CC 100))]
"! TARGET_LIVE_G0"
- "subcc %%g0,%1,%%g0\;addx %2,-1,%0"
+ "#"
[(set_attr "length" "2")])
+(define_split
+ [(set (match_operand:SI 0 "register_operand" "")
+ (minus:SI (match_operand:SI 2 "register_operand" "")
+ (eq:SI (match_operand:SI 1 "register_operand" "")
+ (const_int 0))))
+ (clobber (reg:CC 100))]
+ "! TARGET_LIVE_G0"
+ [(set (reg:CC_NOOV 100) (compare:CC_NOOV (neg:SI (match_dup 1))
+ (const_int 0)))
+ (set (match_dup 0) (minus:SI (match_dup 2)
+ (geu:SI (reg:CC 100) (const_int 0))))]
+ "")
+
;; We can also do GEU and LTU directly, but these operate after a compare.
;; ??? The addx/subx insns use the 32 bit carry flag so there are no DImode
;; versions for v9.
[(set (match_operand:SI 0 "register_operand" "=r")
(ltu:SI (reg:CC 100) (const_int 0)))]
"! TARGET_LIVE_G0"
- "addx %%g0,0,%0"
- [(set_attr "type" "misc")])
+ "addx\\t%%g0, 0, %0"
+ [(set_attr "type" "misc")
+ (set_attr "length" "1")])
(define_insn "*neg_sltu_insn"
[(set (match_operand:SI 0 "register_operand" "=r")
(neg:SI (ltu:SI (reg:CC 100) (const_int 0))))]
"! TARGET_LIVE_G0"
- "subx %%g0,0,%0"
- [(set_attr "type" "misc")])
+ "subx\\t%%g0, 0, %0"
+ [(set_attr "type" "misc")
+ (set_attr "length" "1")])
;; ??? Combine should canonicalize these next two to the same pattern.
(define_insn "*neg_sltu_minus_x"
(minus:SI (neg:SI (ltu:SI (reg:CC 100) (const_int 0)))
(match_operand:SI 1 "arith_operand" "rI")))]
"! TARGET_LIVE_G0"
- "subx %%g0,%1,%0"
- [(set_attr "type" "unary")])
+ "subx\\t%%g0, %1, %0"
+ [(set_attr "type" "misc")
+ (set_attr "length" "1")])
(define_insn "*neg_sltu_plus_x"
[(set (match_operand:SI 0 "register_operand" "=r")
(neg:SI (plus:SI (ltu:SI (reg:CC 100) (const_int 0))
(match_operand:SI 1 "arith_operand" "rI"))))]
"! TARGET_LIVE_G0"
- "subx %%g0,%1,%0"
- [(set_attr "type" "unary")])
+ "subx\\t%%g0, %1, %0"
+ [(set_attr "type" "misc")
+ (set_attr "length" "1")])
(define_insn "*sgeu_insn"
[(set (match_operand:SI 0 "register_operand" "=r")
(geu:SI (reg:CC 100) (const_int 0)))]
"! TARGET_LIVE_G0"
- "subx %%g0,-1,%0"
- [(set_attr "type" "misc")])
+ "subx\\t%%g0, -1, %0"
+ [(set_attr "type" "misc")
+ (set_attr "length" "1")])
(define_insn "*neg_sgeu_insn"
[(set (match_operand:SI 0 "register_operand" "=r")
(neg:SI (geu:SI (reg:CC 100) (const_int 0))))]
"! TARGET_LIVE_G0"
- "addx %%g0,-1,%0"
- [(set_attr "type" "misc")])
+ "addx\\t%%g0, -1, %0"
+ [(set_attr "type" "misc")
+ (set_attr "length" "1")])
;; We can also do (x + ((unsigned) i >= 0)) and related, so put them in.
;; ??? The addx/subx insns use the 32 bit carry flag so there are no DImode
(plus:SI (ltu:SI (reg:CC 100) (const_int 0))
(match_operand:SI 1 "arith_operand" "rI")))]
"! TARGET_LIVE_G0"
- "addx %%g0,%1,%0"
- [(set_attr "type" "unary")])
+ "addx\\t%%g0, %1, %0"
+ [(set_attr "type" "misc")
+ (set_attr "length" "1")])
(define_insn "*sltu_plus_x_plus_y"
[(set (match_operand:SI 0 "register_operand" "=r")
(plus:SI (match_operand:SI 1 "arith_operand" "%r")
(match_operand:SI 2 "arith_operand" "rI"))))]
""
- "addx %1,%2,%0")
+ "addx\\t%1, %2, %0"
+ [(set_attr "type" "misc")
+ (set_attr "length" "1")])
(define_insn "*x_minus_sltu"
[(set (match_operand:SI 0 "register_operand" "=r")
(minus:SI (match_operand:SI 1 "register_operand" "r")
(ltu:SI (reg:CC 100) (const_int 0))))]
""
- "subx %1,0,%0"
- [(set_attr "type" "unary")])
+ "subx\\t%1, 0, %0"
+ [(set_attr "type" "misc")
+ (set_attr "length" "1")])
;; ??? Combine should canonicalize these next two to the same pattern.
(define_insn "*x_minus_y_minus_sltu"
[(set (match_operand:SI 0 "register_operand" "=r")
- (minus:SI (minus:SI (match_operand:SI 1 "register_operand" "r")
+ (minus:SI (minus:SI (match_operand:SI 1 "reg_or_0_operand" "rJ")
(match_operand:SI 2 "arith_operand" "rI"))
(ltu:SI (reg:CC 100) (const_int 0))))]
""
- "subx %1,%2,%0")
+ "subx\\t%r1, %2, %0"
+ [(set_attr "type" "misc")
+ (set_attr "length" "1")])
(define_insn "*x_minus_sltu_plus_y"
[(set (match_operand:SI 0 "register_operand" "=r")
- (minus:SI (match_operand:SI 1 "register_operand" "r")
+ (minus:SI (match_operand:SI 1 "reg_or_0_operand" "rJ")
(plus:SI (ltu:SI (reg:CC 100) (const_int 0))
(match_operand:SI 2 "arith_operand" "rI"))))]
""
- "subx %1,%2,%0")
+ "subx\\t%r1, %2, %0"
+ [(set_attr "type" "misc")
+ (set_attr "length" "1")])
(define_insn "*sgeu_plus_x"
[(set (match_operand:SI 0 "register_operand" "=r")
(plus:SI (geu:SI (reg:CC 100) (const_int 0))
(match_operand:SI 1 "register_operand" "r")))]
""
- "subx %1,-1,%0"
- [(set_attr "type" "unary")])
+ "subx\\t%1, -1, %0"
+ [(set_attr "type" "misc")
+ (set_attr "length" "1")])
(define_insn "*x_minus_sgeu"
[(set (match_operand:SI 0 "register_operand" "=r")
(minus:SI (match_operand:SI 1 "register_operand" "r")
(geu:SI (reg:CC 100) (const_int 0))))]
""
- "addx %1,-1,%0"
- [(set_attr "type" "unary")])
-
-;; Now we have the generic scc insns.
-;; !v9: These will be done using a jump.
-;; v9: Use conditional moves which are defined elsewhere.
-;; We have to exclude the cases above, since we will not want combine to
-;; turn something that does not require a jump into something that does.
+ "addx\\t%1, -1, %0"
+ [(set_attr "type" "misc")
+ (set_attr "length" "1")])
-(define_insn "*scc_si"
+(define_split
[(set (match_operand:SI 0 "register_operand" "=r")
(match_operator:SI 2 "noov_compare_op"
[(match_operand 1 "icc_or_fcc_reg_operand" "")
(const_int 0)]))]
- ""
- "* return output_scc_insn (operands, insn); "
- [(set_attr "type" "multi")
- (set_attr "length" "3")])
+ ;; 32 bit LTU/GEU are better implemented using addx/subx
+ "TARGET_V9 && REGNO (operands[1]) == SPARC_ICC_REG
+ && (GET_MODE (operands[1]) == CCXmode
+ || (GET_CODE (operands[2]) != LTU && GET_CODE (operands[2]) != GEU))"
+ [(set (match_dup 0) (const_int 0))
+ (set (match_dup 0)
+ (if_then_else:SI (match_op_dup:SI 2 [(match_dup 1) (const_int 0)])
+ (const_int 1)
+ (match_dup 0)))]
+ "")
-(define_insn "*scc_di"
- [(set (match_operand:DI 0 "register_operand" "=r")
- (match_operator:DI 2 "noov_compare_op"
- [(match_operand 1 "icc_or_fcc_reg_operand" "")
- (const_int 0)]))]
- "TARGET_ARCH64"
- "* return output_scc_insn (operands, insn); "
- [(set_attr "type" "multi")
- (set_attr "length" "3")])
\f
;; These control RTL generation for conditional jump insns
emit_v9_brxx_insn (EQ, sparc_compare_op0, operands[0]);
DONE;
}
- else if (GET_MODE (sparc_compare_op0) == TFmode && ! TARGET_HARD_QUAD)
- {
- emit_float_lib_cmp (sparc_compare_op0, sparc_compare_op1, EQ);
- emit_jump_insn (gen_bne (operands[0]));
- DONE;
- }
operands[1] = gen_compare_reg (EQ, sparc_compare_op0, sparc_compare_op1);
}")
emit_v9_brxx_insn (NE, sparc_compare_op0, operands[0]);
DONE;
}
- else if (GET_MODE (sparc_compare_op0) == TFmode && ! TARGET_HARD_QUAD)
- {
- emit_float_lib_cmp (sparc_compare_op0, sparc_compare_op1, NE);
- emit_jump_insn (gen_bne (operands[0]));
- DONE;
- }
operands[1] = gen_compare_reg (NE, sparc_compare_op0, sparc_compare_op1);
}")
emit_v9_brxx_insn (GT, sparc_compare_op0, operands[0]);
DONE;
}
- else if (GET_MODE (sparc_compare_op0) == TFmode && ! TARGET_HARD_QUAD)
- {
- emit_float_lib_cmp (sparc_compare_op0, sparc_compare_op1, GT);
- emit_jump_insn (gen_bne (operands[0]));
- DONE;
- }
operands[1] = gen_compare_reg (GT, sparc_compare_op0, sparc_compare_op1);
}")
emit_v9_brxx_insn (LT, sparc_compare_op0, operands[0]);
DONE;
}
- else if (GET_MODE (sparc_compare_op0) == TFmode && ! TARGET_HARD_QUAD)
- {
- emit_float_lib_cmp (sparc_compare_op0, sparc_compare_op1, LT);
- emit_jump_insn (gen_bne (operands[0]));
- DONE;
- }
operands[1] = gen_compare_reg (LT, sparc_compare_op0, sparc_compare_op1);
}")
emit_v9_brxx_insn (GE, sparc_compare_op0, operands[0]);
DONE;
}
- else if (GET_MODE (sparc_compare_op0) == TFmode && ! TARGET_HARD_QUAD)
- {
- emit_float_lib_cmp (sparc_compare_op0, sparc_compare_op1, GE);
- emit_jump_insn (gen_bne (operands[0]));
- DONE;
- }
operands[1] = gen_compare_reg (GE, sparc_compare_op0, sparc_compare_op1);
}")
emit_v9_brxx_insn (LE, sparc_compare_op0, operands[0]);
DONE;
}
- else if (GET_MODE (sparc_compare_op0) == TFmode && ! TARGET_HARD_QUAD)
- {
- emit_float_lib_cmp (sparc_compare_op0, sparc_compare_op1, LE);
- emit_jump_insn (gen_bne (operands[0]));
- DONE;
- }
operands[1] = gen_compare_reg (LE, sparc_compare_op0, sparc_compare_op1);
}")
\f
;; Now match both normal and inverted jump.
+;; XXX fpcmp nop braindamage
(define_insn "*normal_branch"
[(set (pc)
(if_then_else (match_operator 0 "noov_compare_op"
{
return output_cbranch (operands[0], 1, 0,
final_sequence && INSN_ANNULLED_BRANCH_P (insn),
- ! final_sequence);
+ ! final_sequence, insn);
}"
[(set_attr "type" "branch")])
+;; XXX fpcmp nop braindamage
(define_insn "*inverted_branch"
[(set (pc)
(if_then_else (match_operator 0 "noov_compare_op"
{
return output_cbranch (operands[0], 1, 1,
final_sequence && INSN_ANNULLED_BRANCH_P (insn),
- ! final_sequence);
+ ! final_sequence, insn);
}"
[(set_attr "type" "branch")])
+;; XXX fpcmp nop braindamage
(define_insn "*normal_fp_branch"
[(set (pc)
(if_then_else (match_operator 1 "comparison_operator"
{
return output_cbranch (operands[1], 2, 0,
final_sequence && INSN_ANNULLED_BRANCH_P (insn),
- ! final_sequence);
+ ! final_sequence, insn);
}"
[(set_attr "type" "branch")])
+;; XXX fpcmp nop braindamage
(define_insn "*inverted_fp_branch"
[(set (pc)
(if_then_else (match_operator 1 "comparison_operator"
{
return output_cbranch (operands[1], 2, 1,
final_sequence && INSN_ANNULLED_BRANCH_P (insn),
- ! final_sequence);
+ ! final_sequence, insn);
}"
[(set_attr "type" "branch")])
+;; XXX fpcmp nop braindamage
(define_insn "*normal_fpe_branch"
[(set (pc)
(if_then_else (match_operator 1 "comparison_operator"
{
return output_cbranch (operands[1], 2, 0,
final_sequence && INSN_ANNULLED_BRANCH_P (insn),
- ! final_sequence);
+ ! final_sequence, insn);
}"
[(set_attr "type" "branch")])
+;; XXX fpcmp nop braindamage
(define_insn "*inverted_fpe_branch"
[(set (pc)
(if_then_else (match_operator 1 "comparison_operator"
{
return output_cbranch (operands[1], 2, 1,
final_sequence && INSN_ANNULLED_BRANCH_P (insn),
- ! final_sequence);
+ ! final_sequence, insn);
}"
[(set_attr "type" "branch")])
;; There are no 32 bit brreg insns.
+;; XXX
(define_insn "*normal_int_branch_sp64"
[(set (pc)
(if_then_else (match_operator 0 "v9_regcmp_op"
{
return output_v9branch (operands[0], 1, 2, 0,
final_sequence && INSN_ANNULLED_BRANCH_P (insn),
- ! final_sequence);
+ ! final_sequence, insn);
}"
[(set_attr "type" "branch")])
+;; XXX
(define_insn "*inverted_int_branch_sp64"
[(set (pc)
(if_then_else (match_operator 0 "v9_regcmp_op"
{
return output_v9branch (operands[0], 1, 2, 1,
final_sequence && INSN_ANNULLED_BRANCH_P (insn),
- ! final_sequence);
+ ! final_sequence, insn);
}"
[(set_attr "type" "branch")])
\f
-;; Esoteric move insns (lo_sum, high, pic).
-
-(define_insn "*lo_sum_si"
- [(set (match_operand:SI 0 "register_operand" "=r")
- (lo_sum:SI (match_operand:SI 1 "register_operand" "r")
- (match_operand:SI 2 "immediate_operand" "in")))]
- ""
- ;; V9 needs "add" because of the code models. We still use "or" for v8
- ;; so we can compare the old compiler with the new.
- "* return TARGET_ARCH64 ? \"add %1,%%lo(%a2),%0\" : \"or %1,%%lo(%a2),%0\";"
- ;; Need to set length for this arith insn because operand2
- ;; is not an "arith_operand".
- [(set_attr "length" "1")])
+;; Load program counter insns.
+
+(define_insn "get_pc"
+ [(clobber (reg:SI 15))
+ (set (match_operand 0 "register_operand" "=r")
+ (unspec [(match_operand 1 "" "") (match_operand 2 "" "")] 2))]
+ "flag_pic && REGNO (operands[0]) == 23"
+ "sethi\\t%%hi(%a1-4), %0\\n\\tcall\\t%a2\\n\\tadd\\t%0, %%lo(%a1+4), %0"
+ [(set_attr "length" "3")])
+
+;; Currently unused...
+;; (define_insn "get_pc_via_rdpc"
+;; [(set (match_operand 0 "register_operand" "=r") (pc))]
+;; "TARGET_V9"
+;; "rd\\t%%pc, %0"
+;; [(set_attr "type" "move")])
-;; For PIC, symbol_refs are put inside unspec so that the optimizer will not
-;; confuse them with real addresses.
-(define_insn "pic_lo_sum_si"
- [(set (match_operand:SI 0 "register_operand" "=r")
- (lo_sum:SI (match_operand:SI 1 "register_operand" "r")
- (unspec:SI [(match_operand:SI 2 "immediate_operand" "in")] 0)))]
- "flag_pic"
- ;; V9 needs "add" because of the code models. We still use "or" for v8
- ;; so we can compare the old compiler with the new.
- "* return TARGET_ARCH64 ? \"add %1,%%lo(%a2),%0\" : \"or %1,%%lo(%a2),%0\";"
- ;; Need to set length for this arith insn because operand2
- ;; is not an "arith_operand".
- [(set_attr "length" "1")])
+\f
+;; Move instructions
-;; The PIC version of sethi must appear before the non-pic case so that
-;; the unspec will not be matched as part of the operand.
-;; For PIC, symbol_refs are put inside unspec so that the optimizer will not
-;; confuse them with real addresses.
-(define_insn "pic_sethi_si"
- [(set (match_operand:SI 0 "register_operand" "=r")
- (high:SI (unspec:SI [(match_operand 1 "" "")] 0)))]
- "flag_pic && check_pic (1)"
- "sethi %%hi(%a1),%0"
- [(set_attr "type" "move")
- (set_attr "length" "1")])
+(define_expand "movqi"
+ [(set (match_operand:QI 0 "general_operand" "")
+ (match_operand:QI 1 "general_operand" ""))]
+ ""
+ "
+{
+ /* Working with CONST_INTs is easier, so convert
+ a double if needed. */
+ if (GET_CODE (operands[1]) == CONST_DOUBLE)
+ {
+ operands[1] = GEN_INT (CONST_DOUBLE_LOW (operands[1]) & 0xff);
+ }
+ else if (GET_CODE (operands[1]) == CONST_INT)
+ {
+ /* And further, we know for all QI cases that only the
+ low byte is significant, which we can always process
+ in a single insn. So mask it now. */
+ operands[1] = GEN_INT (INTVAL (operands[1]) & 0xff);
+ }
-(define_insn "*sethi_si"
- [(set (match_operand:SI 0 "register_operand" "=r")
- (high:SI (match_operand 1 "" "")))]
- "check_pic (1)"
- "sethi %%hi(%a1),%0"
- [(set_attr "type" "move")
- (set_attr "length" "1")])
+ /* Handle sets of MEM first. */
+ if (GET_CODE (operands[0]) == MEM)
+ {
+ /* This checks TARGET_LIVE_G0 for us. */
+ if (reg_or_0_operand (operands[1], QImode))
+ goto movqi_is_ok;
-(define_insn "*sethi_hi"
- [(set (match_operand:HI 0 "register_operand" "=r")
- (high:HI (match_operand 1 "" "")))]
- "check_pic (1)"
- "sethi %%hi(%a1),%0"
- [(set_attr "type" "move")
- (set_attr "length" "1")])
+ if (! reload_in_progress)
+ {
+ operands[0] = validize_mem (operands[0]);
+ operands[1] = force_reg (QImode, operands[1]);
+ }
+ }
-(define_insn "get_pc_sp32"
- [(set (pc) (label_ref (match_operand 0 "" "")))
- (set (reg:SI 15) (label_ref (match_dup 0)))]
- "! TARGET_PTR64"
- "call %l0%#"
- [(set_attr "type" "uncond_branch")])
+ /* Fixup PIC cases. */
+ if (flag_pic)
+ {
+ if (CONSTANT_P (operands[1])
+ && pic_address_needs_scratch (operands[1]))
+ operands[1] = legitimize_pic_address (operands[1], QImode, 0);
-(define_insn "get_pc_sp64"
- [(set (match_operand:DI 0 "register_operand" "=r") (pc))]
- "TARGET_PTR64"
- "rd %%pc,%0"
- [(set_attr "type" "move")])
+ if (symbolic_operand (operands[1], QImode))
+ {
+ operands[1] = legitimize_pic_address (operands[1],
+ QImode,
+ (reload_in_progress ?
+ operands[0] :
+ NULL_RTX));
+ goto movqi_is_ok;
+ }
+ }
-;; Special pic pattern, for loading the address of a label into a register.
-;; It clobbers o7 because the call puts the return address (i.e. pc value)
-;; there. The pic tablejump pattern also uses this.
+ /* All QI constants require only one insn, so proceed. */
-(define_insn "move_pic_label_si"
- [(set (match_operand:SI 0 "register_operand" "=r")
- (label_ref:SI (match_operand 1 "" "")))
- (set (reg:SI 15) (pc))]
- "flag_pic"
- "*
-{
- if (get_attr_length (insn) == 2)
- return \"\\n1:\;call 2f\;add %%o7,%%lo(%l1-1b),%0\\n2:\";
- else
- return \"\\n1:\;call 2f\;sethi %%hi(%l1-1b),%0\\n2:\\tor %0,%%lo(%l1-1b),%0\;add %0,%%o7,%0\";
-}"
- [(set_attr "type" "multi")
- ; 960 = 4096 bytes / 4 bytes/insn - 64 (for not always perfect length calcs)
- (set (attr "length") (if_then_else (ltu (minus (match_dup 1) (pc))
- (const_int 960))
- (const_int 2)
- (const_int 4)))])
-
-;; Special sparc64 pattern for loading the address of a label into a register.
-;; The pic and non-pic cases are the same since it's the most efficient way.
-;;
-;; ??? The non-pic case doesn't need to use %o7, we could use a scratch
-;; instead. But the pic case doesn't need to use %o7 either. We handle them
-;; both here so that when this is fixed, they can both be fixed together.
-;; Don't forget that the pic jump table stuff uses %o7 (that will need to be
-;; changed too).
+ movqi_is_ok:
+ ;
+}")
-(define_insn "move_label_di"
- [(set (match_operand:DI 0 "register_operand" "=r")
- (label_ref:DI (match_operand 1 "" "")))
- (set (reg:DI 15) (pc))]
- "TARGET_ARCH64"
- "*
-{
- if (get_attr_length (insn) == 2)
- return \"\\n1:\;rd %%pc,%%o7\;add %%o7,%l1-1b,%0\";
- else
- return \"\\n1:\;rd %%pc,%%o7\;sethi %%hi(%l1-1b),%0\;add %0,%%lo(%l1-1b),%0\;sra %0,0,%0\;add %0,%%o7,%0\";
-}"
- [(set_attr "type" "multi")
- ; 960 = 4096 bytes / 4 bytes/insn - 64 (for not always perfect length calcs)
- (set (attr "length") (if_then_else (ltu (minus (match_dup 1) (pc))
- (const_int 960))
- (const_int 2)
- (const_int 5)))])
+(define_insn "*movqi_insn"
+ [(set (match_operand:QI 0 "nonimmediate_operand" "=r,r,m")
+ (match_operand:QI 1 "input_operand" "rI,m,rJ"))]
+ "(register_operand (operands[0], QImode)
+ || reg_or_0_operand (operands[1], QImode))"
+ "@
+ mov\\t%1, %0
+ ldub\\t%1, %0
+ stb\\t%r1, %0"
+ [(set_attr "type" "move,load,store")
+ (set_attr "length" "1")])
-(define_insn "*lo_sum_di_sp32"
- [(set (match_operand:DI 0 "register_operand" "=r")
- (lo_sum:DI (match_operand:DI 1 "register_operand" "0")
- (match_operand:DI 2 "immediate_operand" "in")))]
- "! TARGET_ARCH64"
- "*
+(define_expand "movhi"
+ [(set (match_operand:HI 0 "general_operand" "")
+ (match_operand:HI 1 "general_operand" ""))]
+ ""
+ "
{
- /* Don't output a 64 bit constant, since we can't trust the assembler to
- handle it correctly. */
- if (GET_CODE (operands[2]) == CONST_DOUBLE)
- operands[2] = gen_rtx (CONST_INT, VOIDmode, CONST_DOUBLE_LOW (operands[2]));
- return \"or %R1,%%lo(%a2),%R0\";
-}"
- ;; Need to set length for this arith insn because operand2
- ;; is not an "arith_operand".
- [(set_attr "length" "1")])
-
-;; ??? Optimizer does not handle "or %o1,%lo(0),%o1". How about add?
+ /* Working with CONST_INTs is easier, so convert
+ a double if needed. */
+ if (GET_CODE (operands[1]) == CONST_DOUBLE)
+ operands[1] = GEN_INT (CONST_DOUBLE_LOW (operands[1]));
-(define_insn "*lo_sum_di_sp64"
- [(set (match_operand:DI 0 "register_operand" "=r")
- (lo_sum:DI (match_operand:DI 1 "register_operand" "0")
- (match_operand:DI 2 "immediate_operand" "in")))]
- "TARGET_ARCH64"
- "*
-{
- /* Don't output a 64 bit constant, since we can't trust the assembler to
- handle it correctly. */
- if (GET_CODE (operands[2]) == CONST_DOUBLE)
- operands[2] = gen_rtx (CONST_INT, VOIDmode, CONST_DOUBLE_LOW (operands[2]));
- /* Note that we use add here. This is important because Medium/Anywhere
- code model support depends on it. */
- return \"add %1,%%lo(%a2),%0\";
-}"
- ;; Need to set length for this arith insn because operand2
- ;; is not an "arith_operand".
- [(set_attr "length" "1")])
+ /* Handle sets of MEM first. */
+ if (GET_CODE (operands[0]) == MEM)
+ {
+ /* This checks TARGET_LIVE_G0 for us. */
+ if (reg_or_0_operand (operands[1], HImode))
+ goto movhi_is_ok;
-(define_insn "*sethi_di_sp32"
- [(set (match_operand:DI 0 "register_operand" "=r")
- (high:DI (match_operand 1 "" "")))]
- "! TARGET_ARCH64 && check_pic (1)"
- "*
-{
- rtx op0 = operands[0];
- rtx op1 = operands[1];
+ if (! reload_in_progress)
+ {
+ operands[0] = validize_mem (operands[0]);
+ operands[1] = force_reg (HImode, operands[1]);
+ }
+ }
- if (GET_CODE (op1) == CONST_INT)
+ /* Fixup PIC cases. */
+ if (flag_pic)
{
- operands[0] = operand_subword (op0, 1, 0, DImode);
- output_asm_insn (\"sethi %%hi(%a1),%0\", operands);
+ if (CONSTANT_P (operands[1])
+ && pic_address_needs_scratch (operands[1]))
+ operands[1] = legitimize_pic_address (operands[1], HImode, 0);
- operands[0] = operand_subword (op0, 0, 0, DImode);
- if (INTVAL (op1) < 0)
- return \"mov -1,%0\";
- else
- return \"mov 0,%0\";
+ if (symbolic_operand (operands[1], HImode))
+ {
+ operands[1] = legitimize_pic_address (operands[1],
+ HImode,
+ (reload_in_progress ?
+ operands[0] :
+ NULL_RTX));
+ goto movhi_is_ok;
+ }
}
- else if (GET_CODE (op1) == CONST_DOUBLE)
- {
- operands[0] = operand_subword (op0, 1, 0, DImode);
- operands[1] = gen_rtx (CONST_INT, VOIDmode, CONST_DOUBLE_LOW (op1));
- output_asm_insn (\"sethi %%hi(%a1),%0\", operands);
- operands[0] = operand_subword (op0, 0, 0, DImode);
- operands[1] = gen_rtx (CONST_INT, VOIDmode, CONST_DOUBLE_HIGH (op1));
- return singlemove_string (operands);
+ /* This makes sure we will not get rematched due to splittage. */
+ if (! CONSTANT_P (operands[1]) || input_operand (operands[1], HImode))
+ ;
+ else if (CONSTANT_P (operands[1])
+ && GET_CODE (operands[1]) != HIGH
+ && GET_CODE (operands[1]) != LO_SUM)
+ {
+ sparc_emit_set_const32 (operands[0], operands[1]);
+ DONE;
}
- else
- abort ();
- return \"\";
-}"
- [(set_attr "type" "move")
- (set_attr "length" "2")])
+ movhi_is_ok:
+ ;
+}")
-;;; ??? This pattern originally clobbered a scratch register. However, this
-;;; is invalid, the movdi pattern may not use a temp register because it
-;;; may be called from reload to reload a DImode value. In that case, we
-;;; end up with a scratch register that never gets allocated. To avoid this,
-;;; we use global register 1 which is never otherwise used by gcc as a temp.
-;;; The correct solution here might be to force DImode constants to memory,
-;;; e.g. by using a toc like the romp and rs6000 ports do for addresses, reg
-;;; 1 will then no longer need to be considered a fixed reg.
-
-(define_expand "sethi_di_sp64"
- [(parallel
- [(set (match_operand:DI 0 "register_operand" "")
- (high:DI (match_operand 1 "general_operand" "")))
- (clobber (reg:DI 1))])]
+(define_insn "*movhi_const64_special"
+ [(set (match_operand:HI 0 "register_operand" "=r")
+ (match_operand:HI 1 "const64_high_operand" ""))]
"TARGET_ARCH64"
- "")
+ "sethi\\t%%hi(%a1), %0"
+ [(set_attr "type" "move")
+ (set_attr "length" "1")])
-(define_insn "*sethi_di_sp64_const"
- [(set (match_operand:DI 0 "register_operand" "=r")
- (high:DI (match_operand 1 "const_double_operand" "")))
- (clobber (reg:DI 1))]
- "TARGET_ARCH64 && check_pic (1)"
- "*
+(define_insn "*movhi_insn"
+ [(set (match_operand:HI 0 "nonimmediate_operand" "=r,r,r,m")
+ (match_operand:HI 1 "input_operand" "rI,K,m,rJ"))]
+ "(register_operand (operands[0], HImode)
+ || reg_or_0_operand (operands[1], HImode))"
+ "@
+ mov\\t%1, %0
+ sethi\\t%%hi(%a1), %0
+ lduh\\t%1, %0
+ sth\\t%r1, %0"
+ [(set_attr "type" "move,move,load,store")
+ (set_attr "length" "1")])
+
+;; We always work with constants here.
+(define_insn "*movhi_lo_sum"
+ [(set (match_operand:HI 0 "register_operand" "=r")
+ (ior:HI (match_operand:HI 1 "arith_operand" "%r")
+ (match_operand:HI 2 "arith_operand" "I")))]
+ ""
+ "or\\t%1, %2, %0"
+ [(set_attr "type" "ialu")
+ (set_attr "length" "1")])
+
+(define_expand "movsi"
+ [(set (match_operand:SI 0 "general_operand" "")
+ (match_operand:SI 1 "general_operand" ""))]
+ ""
+ "
{
- rtx high, low;
-
- split_double (operands[1], &high, &low);
+ /* Working with CONST_INTs is easier, so convert
+ a double if needed. */
+ if (GET_CODE (operands[1]) == CONST_DOUBLE)
+ operands[1] = GEN_INT (CONST_DOUBLE_LOW (operands[1]));
- if (high == const0_rtx)
+ /* Handle sets of MEM first. */
+ if (GET_CODE (operands[0]) == MEM)
{
- operands[1] = low;
- output_asm_insn (\"sethi %%hi(%a1),%0\", operands);
+ /* This checks TARGET_LIVE_G0 for us. */
+ if (reg_or_0_operand (operands[1], SImode))
+ goto movsi_is_ok;
+
+ if (! reload_in_progress)
+ {
+ operands[0] = validize_mem (operands[0]);
+ operands[1] = force_reg (SImode, operands[1]);
+ }
}
- else
+
+ /* Fixup PIC cases. */
+ if (flag_pic)
{
- operands[1] = high;
- output_asm_insn (singlemove_string (operands), operands);
+ if (CONSTANT_P (operands[1])
+ && pic_address_needs_scratch (operands[1]))
+ operands[1] = legitimize_pic_address (operands[1], SImode, 0);
- operands[1] = low;
- output_asm_insn (\"sllx %0,32,%0\", operands);
- if (low != const0_rtx)
- output_asm_insn (\"sethi %%hi(%a1),%%g1; or %0,%%g1,%0\", operands);
- }
+ if (GET_CODE (operands[1]) == LABEL_REF)
+ {
+ /* shit */
+ emit_insn (gen_movsi_pic_label_ref (operands[0], operands[1]));
+ DONE;
+ }
- return \"\";
-}"
- [(set_attr "type" "move")
- (set_attr "length" "5")])
+ if (symbolic_operand (operands[1], SImode))
+ {
+ operands[1] = legitimize_pic_address (operands[1],
+ SImode,
+ (reload_in_progress ?
+ operands[0] :
+ NULL_RTX));
+ goto movsi_is_ok;
+ }
+ }
-;; Most of the required support for the various code models is here.
-;; We can do this because sparcs need the high insn to load the address. We
-;; just need to get high to do the right thing for each code model. Then each
-;; uses the same "%X+%lo(...)" in the load/store insn.
+ /* If we are trying to toss an integer constant into the
+ FPU registers, force it into memory. */
+ if (GET_CODE (operands[0]) == REG
+ && REGNO (operands[0]) >= SPARC_FIRST_FP_REG
+ && REGNO (operands[0]) <= SPARC_LAST_V9_FP_REG
+ && CONSTANT_P (operands[1]))
+ operands[1] = validize_mem (force_const_mem (GET_MODE (operands[0]),
+ operands[1]));
+
+ /* This makes sure we will not get rematched due to splittage. */
+ if (! CONSTANT_P (operands[1]) || input_operand (operands[1], SImode))
+ ;
+ else if (CONSTANT_P (operands[1])
+ && GET_CODE (operands[1]) != HIGH
+ && GET_CODE (operands[1]) != LO_SUM)
+ {
+ sparc_emit_set_const32 (operands[0], operands[1]);
+ DONE;
+ }
+ movsi_is_ok:
+ ;
+}")
-;; When TARGET_MEDLOW, assume that the upper 32 bits of symbol addresses are
-;; always 0.
-;; When TARGET_MEDANY, the text and data segments have a maximum size of 32
-;; bits and may be located anywhere. MEDANY_BASE_REG contains the start
-;; address of the data segment, currently %g4.
-;; When TARGET_FULLANY, symbolic addresses are 64 bits.
+;; Special LIVE_G0 pattern to obtain zero in a register.
+(define_insn "*movsi_zero_liveg0"
+ [(set (match_operand:SI 0 "register_operand" "=r")
+ (match_operand:SI 1 "zero_operand" "J"))]
+ "TARGET_LIVE_G0"
+ "and\\t%0, 0, %0"
+ [(set_attr "type" "binary")
+ (set_attr "length" "1")])
-(define_insn "*sethi_di_medlow"
- [(set (match_operand:DI 0 "register_operand" "=r")
- (high:DI (match_operand 1 "" "")))
- ;; The clobber is here because emit_move_sequence assumes the worst case.
- (clobber (reg:DI 1))]
- "TARGET_MEDLOW && check_pic (1)"
- "sethi %%hi(%a1),%0"
+;; This is needed to show CSE exactly which bits are set
+;; in a 64-bit register by sethi instructions.
+(define_insn "*movsi_const64_special"
+ [(set (match_operand:SI 0 "register_operand" "=r")
+ (match_operand:SI 1 "const64_high_operand" ""))]
+ "TARGET_ARCH64"
+ "sethi\\t%%hi(%a1), %0"
[(set_attr "type" "move")
(set_attr "length" "1")])
-(define_insn "*sethi_di_medium_pic"
- [(set (match_operand:DI 0 "register_operand" "=r")
- (high:DI (match_operand 1 "sp64_medium_pic_operand" "")))]
- "(TARGET_MEDLOW || TARGET_MEDANY) && check_pic (1)"
- "sethi %%hi(%a1),%0"
- [(set_attr "type" "move")
+(define_insn "*movsi_insn"
+ [(set (match_operand:SI 0 "nonimmediate_operand" "=r,f,r,r,r,f,m,m,d")
+ (match_operand:SI 1 "input_operand" "rI,!f,K,J,m,!m,rJ,!f,J"))]
+ "(register_operand (operands[0], SImode)
+ || reg_or_0_operand (operands[1], SImode))"
+ "@
+ mov\\t%1, %0
+ fmovs\\t%1, %0
+ sethi\\t%%hi(%a1), %0
+ clr\\t%0
+ ld\\t%1, %0
+ ld\\t%1, %0
+ st\\t%r1, %0
+ st\\t%1, %0
+ fzeros\\t%0"
+ [(set_attr "type" "move,fpmove,move,move,load,fpload,store,fpstore,fpmove")
(set_attr "length" "1")])
-;; WARNING: %0 gets %hi(%1)+%g4.
-;; You cannot OR in %lo(%1), it must be added in.
+(define_insn "*movsi_lo_sum"
+ [(set (match_operand:SI 0 "register_operand" "=r")
+ (lo_sum:SI (match_operand:SI 1 "register_operand" "r")
+ (match_operand:SI 2 "immediate_operand" "in")))]
+ ""
+ "or\\t%1, %%lo(%a2), %0"
+ [(set_attr "type" "ialu")
+ (set_attr "length" "1")])
-(define_insn "*sethi_di_medany_data"
- [(set (match_operand:DI 0 "register_operand" "=r")
- (high:DI (match_operand 1 "data_segment_operand" "")))
- ;; The clobber is here because emit_move_sequence assumes the worst case.
- (clobber (reg:DI 1))]
- "TARGET_MEDANY && check_pic (1)"
- "sethi %%hi(%a1),%0; add %0,%%g4,%0"
+(define_insn "*movsi_high"
+ [(set (match_operand:SI 0 "register_operand" "=r")
+ (high:SI (match_operand:SI 1 "immediate_operand" "in")))]
+ ""
+ "sethi\\t%%hi(%a1), %0"
[(set_attr "type" "move")
- (set_attr "length" "2")])
+ (set_attr "length" "1")])
-(define_insn "*sethi_di_medany_text"
- [(set (match_operand:DI 0 "register_operand" "=r")
- (high:DI (match_operand 1 "text_segment_operand" "")))
- ;; The clobber is here because emit_move_sequence assumes the worst case.
- (clobber (reg:DI 1))]
- "TARGET_MEDANY && check_pic (1)"
- "sethi %%uhi(%a1),%%g1; or %%g1,%%ulo(%a1),%%g1; sllx %%g1,32,%%g1; sethi %%hi(%a1),%0; or %0,%%g1,%0"
+;; The next two patterns must wrap the SYMBOL_REF in an UNSPEC
+;; so that CSE won't optimize the address computation away.
+(define_insn "movsi_lo_sum_pic"
+ [(set (match_operand:SI 0 "register_operand" "=r")
+ (lo_sum:SI (match_operand:SI 1 "register_operand" "r")
+ (unspec:SI [(match_operand:SI 2 "immediate_operand" "in")] 0)))]
+ "flag_pic"
+ "or\\t%1, %%lo(%a2), %0"
+ [(set_attr "type" "ialu")
+ (set_attr "length" "1")])
+
+(define_insn "movsi_high_pic"
+ [(set (match_operand:SI 0 "register_operand" "=r")
+ (high:SI (unspec:SI [(match_operand 1 "" "")] 0)))]
+ "flag_pic && check_pic (1)"
+ "sethi\\t%%hi(%a1), %0"
[(set_attr "type" "move")
- (set_attr "length" "5")])
+ (set_attr "length" "1")])
-(define_insn "*sethi_di_fullany"
- [(set (match_operand:DI 0 "register_operand" "=r")
- (high:DI (match_operand 1 "" "")))
- (clobber (reg:DI 1))]
- "TARGET_FULLANY && check_pic (1)"
- "sethi %%uhi(%a1),%%g1; or %%g1,%%ulo(%a1),%%g1; sllx %%g1,32,%%g1; sethi %%hi(%a1),%0; or %0,%%g1,%0"
+(define_expand "movsi_pic_label_ref"
+ [(set (match_dup 3) (high:SI
+ (unspec:SI [(match_operand:SI 1 "label_ref_operand" "")
+ (match_dup 2)] 5)))
+ (set (match_dup 4) (lo_sum:SI (match_dup 3)
+ (unspec:SI [(match_dup 1) (match_dup 2)] 5)))
+ (set (match_operand:SI 0 "register_operand" "=r")
+ (minus:SI (match_dup 5) (match_dup 4)))]
+ "flag_pic"
+ "
+{
+ current_function_uses_pic_offset_table = 1;
+ operands[2] = gen_rtx_SYMBOL_REF (Pmode, \"_GLOBAL_OFFSET_TABLE_\");
+ operands[3] = gen_reg_rtx (SImode);
+ operands[4] = gen_reg_rtx (SImode);
+ operands[5] = pic_offset_table_rtx;
+}")
+
+(define_insn "*movsi_high_pic_label_ref"
+ [(set (match_operand:SI 0 "register_operand" "=r")
+ (high:SI
+ (unspec:SI [(match_operand:SI 1 "label_ref_operand" "")
+ (match_operand:SI 2 "" "")] 5)))]
+ "flag_pic"
+ "sethi\\t%%hi(%a2-(%a1-.)), %0"
[(set_attr "type" "move")
- (set_attr "length" "5")])
-\f
-;; Move instructions
+ (set_attr "length" "1")])
-(define_expand "movqi"
- [(set (match_operand:QI 0 "general_operand" "")
- (match_operand:QI 1 "general_operand" ""))]
+(define_insn "*movsi_lo_sum_pic_label_ref"
+ [(set (match_operand:SI 0 "register_operand" "=r")
+ (lo_sum:SI (match_operand:SI 1 "register_operand" "r")
+ (unspec:SI [(match_operand:SI 2 "label_ref_operand" "")
+ (match_operand:SI 3 "" "")] 5)))]
+ "flag_pic"
+ "or\\t%1, %%lo(%a3-(%a2-.)), %0"
+ [(set_attr "type" "ialu")
+ (set_attr "length" "1")])
+
+(define_expand "movdi"
+ [(set (match_operand:DI 0 "reg_or_nonsymb_mem_operand" "")
+ (match_operand:DI 1 "general_operand" ""))]
""
"
{
- if (emit_move_sequence (operands, QImode))
- DONE;
+ /* Where possible, convert CONST_DOUBLE into a CONST_INT. */
+ if (GET_CODE (operands[1]) == CONST_DOUBLE
+#if HOST_BITS_PER_WIDE_INT == 32
+ && ((CONST_DOUBLE_HIGH (operands[1]) == 0
+ && (CONST_DOUBLE_LOW (operands[1]) & 0x80000000) == 0)
+ || (CONST_DOUBLE_HIGH (operands[1]) == (HOST_WIDE_INT) 0xffffffff
+ && (CONST_DOUBLE_LOW (operands[1]) & 0x80000000) != 0))
+#endif
+ )
+ operands[1] = GEN_INT (CONST_DOUBLE_LOW (operands[1]));
+
+ /* Handle MEM cases first. */
+ if (GET_CODE (operands[0]) == MEM)
+ {
+ /* If it's a REG, we can always do it.
+ The const zero case is more complex, on v9
+ we can always perform it. */
+ if (register_operand (operands[1], DImode)
+ || (TARGET_ARCH64
+ && (operands[1] == const0_rtx)))
+ goto movdi_is_ok;
+
+ if (! reload_in_progress)
+ {
+ operands[0] = validize_mem (operands[0]);
+ operands[1] = force_reg (DImode, operands[1]);
+ }
+ }
+
+ if (flag_pic)
+ {
+ if (CONSTANT_P (operands[1])
+ && pic_address_needs_scratch (operands[1]))
+ operands[1] = legitimize_pic_address (operands[1], DImode, 0);
+
+ if (GET_CODE (operands[1]) == LABEL_REF)
+ {
+ if (! TARGET_ARCH64)
+ abort ();
+ emit_insn (gen_movdi_pic_label_ref (operands[0], operands[1]));
+ DONE;
+ }
+
+ if (symbolic_operand (operands[1], DImode))
+ {
+ operands[1] = legitimize_pic_address (operands[1],
+ DImode,
+ (reload_in_progress ?
+ operands[0] :
+ NULL_RTX));
+ goto movdi_is_ok;
+ }
+ }
+
+ /* If we are trying to toss an integer constant into the
+ FPU registers, force it into memory. */
+ if (GET_CODE (operands[0]) == REG
+ && REGNO (operands[0]) >= SPARC_FIRST_FP_REG
+ && REGNO (operands[0]) <= SPARC_LAST_V9_FP_REG
+ && CONSTANT_P (operands[1]))
+ operands[1] = validize_mem (force_const_mem (GET_MODE (operands[0]),
+ operands[1]));
+
+ /* This makes sure we will not get rematched due to splittage. */
+ if (! CONSTANT_P (operands[1]) || input_operand (operands[1], DImode))
+ ;
+ else if (TARGET_ARCH64
+ && CONSTANT_P (operands[1])
+ && GET_CODE (operands[1]) != HIGH
+ && GET_CODE (operands[1]) != LO_SUM)
+ {
+ sparc_emit_set_const64 (operands[0], operands[1]);
+ DONE;
+ }
+
+ movdi_is_ok:
+ ;
}")
-(define_insn "*movqi_insn"
- [(set (match_operand:QI 0 "reg_or_nonsymb_mem_operand" "=r,r,r,Q")
- (match_operand:QI 1 "move_operand" "rI,K,Q,rJ"))]
- "! TARGET_LIVE_G0
- && (register_operand (operands[0], QImode)
- || register_operand (operands[1], QImode)
- || operands[1] == const0_rtx)"
+;; Be careful, fmovd does not exist when !arch64.
+;; We match MEM moves directly when we have correct even
+;; numbered registers, but fall into splits otherwise.
+;; The constraint ordering here is really important to
+;; avoid insane problems in reload, especially for patterns
+;; of the form:
+;;
+;; (set (mem:DI (plus:SI (reg:SI 30 %fp)
+;; (const_int -5016)))
+;; (reg:DI 2 %g2))
+;;
+(define_insn "*movdi_insn_sp32"
+ [(set (match_operand:DI 0 "nonimmediate_operand" "=T,U,o,r,r,r,?T,?f,?f,?o,?f")
+ (match_operand:DI 1 "input_operand" "U,T,r,o,i,r,f,T,o,f,f"))]
+ "! TARGET_ARCH64 &&
+ (register_operand (operands[0], DImode)
+ || register_operand (operands[1], DImode))"
"@
- mov %1,%0
- sethi %%hi(%a1),%0
- ldub %1,%0
- stb %r1,%0"
- [(set_attr "type" "move,move,load,store")
+ std\\t%1, %0
+ ldd\\t%1, %0
+ #
+ #
+ #
+ #
+ std\\t%1, %0
+ ldd\\t%1, %0
+ #
+ #
+ #"
+ [(set_attr "type" "store,load,*,*,*,*,fpstore,fpload,*,*,*")
+ (set_attr "length" "1,1,2,2,2,2,1,1,2,2,2")])
+
+;; The following are generated by sparc_emit_set_const64
+(define_insn "*movdi_sp64_dbl"
+ [(set (match_operand:DI 0 "register_operand" "=r")
+ (match_operand:DI 1 "const64_operand" ""))]
+ "(TARGET_ARCH64
+ && HOST_BITS_PER_WIDE_INT != 64)"
+ "mov\\t%1, %0"
+ [(set_attr "type" "move")
(set_attr "length" "1")])
-(define_insn "*movqi_insn_liveg0"
- [(set (match_operand:QI 0 "reg_or_nonsymb_mem_operand" "=r,r,r,r,r,Q")
- (match_operand:QI 1 "move_operand" "r,J,I,K,Q,r"))]
- "TARGET_LIVE_G0
- && (register_operand (operands[0], QImode)
- || register_operand (operands[1], QImode))"
- "@
- mov %1,%0
- and %0,0,%0
- and %0,0,%0\;or %0,%1,%0
- sethi %%hi(%a1),%0
- ldub %1,%0
- stb %1,%0"
- [(set_attr "type" "move,move,move,move,load,store")
- (set_attr "length" "1,1,2,1,1,1")])
-
-(define_insn "*lo_sum_qi"
- [(set (match_operand:QI 0 "register_operand" "=r")
- (subreg:QI (lo_sum:SI (match_operand:QI 1 "register_operand" "r")
- (match_operand 2 "immediate_operand" "in")) 0))]
- ""
- "or %1,%%lo(%a2),%0"
- [(set_attr "length" "1")])
+;; This is needed to show CSE exactly which bits are set
+;; in a 64-bit register by sethi instructions.
+(define_insn "*movdi_const64_special"
+ [(set (match_operand:DI 0 "register_operand" "=r")
+ (match_operand:DI 1 "const64_high_operand" ""))]
+ "TARGET_ARCH64"
+ "sethi\\t%%hi(%a1), %0"
+ [(set_attr "type" "move")
+ (set_attr "length" "1")])
-(define_insn "*store_qi"
- [(set (mem:QI (match_operand:SI 0 "symbolic_operand" ""))
- (match_operand:QI 1 "reg_or_0_operand" "rJ"))
- (clobber (match_scratch:SI 2 "=&r"))]
- "(reload_completed || reload_in_progress)
- && ! TARGET_PTR64"
- "sethi %%hi(%a0),%2\;stb %r1,[%2+%%lo(%a0)]"
- [(set_attr "type" "store")
- (set_attr "length" "2")])
+(define_insn "*movdi_insn_sp64"
+ [(set (match_operand:DI 0 "nonimmediate_operand" "=r,r,r,r,m,?e,?e,?m,b")
+ (match_operand:DI 1 "input_operand" "rI,K,J,m,rJ,e,m,e,J"))]
+ "TARGET_ARCH64 &&
+ (register_operand (operands[0], DImode)
+ || reg_or_0_operand (operands[1], DImode))"
+ "@
+ mov\\t%1, %0
+ sethi\\t%%hi(%a1), %0
+ clr\\t%0
+ ldx\\t%1, %0
+ stx\\t%r1, %0
+ fmovd\\t%1, %0
+ ldd\\t%1, %0
+ std\\t%1, %0
+ fzero\\t%0"
+ [(set_attr "type" "move,move,move,load,store,fpmove,fpload,fpstore,fpmove")
+ (set_attr "length" "1")])
-(define_expand "movhi"
- [(set (match_operand:HI 0 "general_operand" "")
- (match_operand:HI 1 "general_operand" ""))]
- ""
+(define_expand "movdi_pic_label_ref"
+ [(set (match_dup 3) (high:DI
+ (unspec:DI [(match_operand:DI 1 "label_ref_operand" "")
+ (match_dup 2)] 5)))
+ (set (match_dup 4) (lo_sum:DI (match_dup 3)
+ (unspec:DI [(match_dup 1) (match_dup 2)] 5)))
+ (set (match_operand:DI 0 "register_operand" "=r")
+ (minus:DI (match_dup 5) (match_dup 4)))]
+ "TARGET_ARCH64 && flag_pic"
"
{
- if (emit_move_sequence (operands, HImode))
- DONE;
+ current_function_uses_pic_offset_table = 1;
+ operands[2] = gen_rtx_SYMBOL_REF (Pmode, \"_GLOBAL_OFFSET_TABLE_\");
+ operands[3] = gen_reg_rtx (DImode);
+ operands[4] = gen_reg_rtx (DImode);
+ operands[5] = pic_offset_table_rtx;
}")
-(define_insn "*movhi_insn"
- [(set (match_operand:HI 0 "reg_or_nonsymb_mem_operand" "=r,r,r,Q")
- (match_operand:HI 1 "move_operand" "rI,K,Q,rJ"))]
- "! TARGET_LIVE_G0
- && (register_operand (operands[0], HImode)
- || register_operand (operands[1], HImode)
- || operands[1] == const0_rtx)"
- "@
- mov %1,%0
- sethi %%hi(%a1),%0
- lduh %1,%0
- sth %r1,%0"
- [(set_attr "type" "move,move,load,store")
+(define_insn "*movdi_high_pic_label_ref"
+ [(set (match_operand:DI 0 "register_operand" "=r")
+ (high:DI
+ (unspec:DI [(match_operand:DI 1 "label_ref_operand" "")
+ (match_operand:DI 2 "" "")] 5)))]
+ "TARGET_ARCH64 && flag_pic"
+ "sethi\\t%%hi(%a2-(%a1-.)), %0"
+ [(set_attr "type" "move")
(set_attr "length" "1")])
-(define_insn "*movhi_insn_liveg0"
- [(set (match_operand:HI 0 "reg_or_nonsymb_mem_operand" "=r,r,r,r,r,Q")
- (match_operand:HI 1 "move_operand" "r,J,I,K,Q,r"))]
- "TARGET_LIVE_G0
- && (register_operand (operands[0], HImode)
- || register_operand (operands[1], HImode))"
- "@
- mov %1,%0
- and %0,0,%0
- and %0,0,%0\;or %0,%1,%0
- sethi %%hi(%a1),%0
- lduh %1,%0
- sth %1,%0"
- [(set_attr "type" "move,move,move,move,load,store")
- (set_attr "length" "1,1,2,1,1,1")])
-
-(define_insn "*lo_sum_hi"
- [(set (match_operand:HI 0 "register_operand" "=r")
- (lo_sum:HI (match_operand:HI 1 "register_operand" "r")
- (match_operand 2 "immediate_operand" "in")))]
- ""
- "or %1,%%lo(%a2),%0"
+(define_insn "*movdi_lo_sum_pic_label_ref"
+ [(set (match_operand:DI 0 "register_operand" "=r")
+ (lo_sum:DI (match_operand:DI 1 "register_operand" "r")
+ (unspec:DI [(match_operand:DI 2 "label_ref_operand" "")
+ (match_operand:DI 3 "" "")] 5)))]
+ "TARGET_ARCH64 && flag_pic"
+ "or\\t%1, %%lo(%a3-(%a2-.)), %0"
+ [(set_attr "type" "ialu")
+ (set_attr "length" "1")])
+
+;; Sparc-v9 code model support insns. See sparc_emit_set_symbolic_const64
+;; in sparc.c to see what is going on here... PIC stuff comes first.
+
+(define_insn "movdi_lo_sum_pic"
+ [(set (match_operand:DI 0 "register_operand" "=r")
+ (lo_sum:DI (match_operand:DI 1 "register_operand" "r")
+ (unspec:DI [(match_operand:DI 2 "immediate_operand" "in")] 0)))]
+ "TARGET_ARCH64 && flag_pic"
+ "or\\t%1, %%lo(%a2), %0"
+ [(set_attr "type" "ialu")
+ (set_attr "length" "1")])
+
+(define_insn "movdi_high_pic"
+ [(set (match_operand:DI 0 "register_operand" "=r")
+ (high:DI (unspec:DI [(match_operand 1 "" "")] 0)))]
+ "TARGET_ARCH64 && flag_pic && check_pic (1)"
+ "sethi\\t%%hi(%a1), %0"
+ [(set_attr "type" "move")
+ (set_attr "length" "1")])
+
+(define_insn "*sethi_di_medlow_embmedany_pic"
+ [(set (match_operand:DI 0 "register_operand" "=r")
+ (high:DI (match_operand:DI 1 "sp64_medium_pic_operand" "")))
+ ;; The clobber is here because emit_move_sequence assumes the worst case.
+ (clobber (reg:DI 1))]
+ "(TARGET_CM_MEDLOW || TARGET_CM_EMBMEDANY) && check_pic (1)"
+ "sethi\\t%%hi(%a1), %0"
+ [(set_attr "type" "move")
+ (set_attr "length" "1")])
+
+(define_insn "*sethi_di_medlow"
+ [(set (match_operand:DI 0 "register_operand" "=r")
+ (high:DI (match_operand:DI 1 "symbolic_operand" "")))
+ ;; The clobber is here because emit_move_sequence assumes the worst case.
+ (clobber (reg:DI 1))]
+ "TARGET_CM_MEDLOW && check_pic (1)"
+ "sethi\\t%%hi(%a1), %0"
+ [(set_attr "type" "move")
+ (set_attr "length" "1")])
+
+(define_insn "*losum_di_medlow"
+ [(set (match_operand:DI 0 "register_operand" "=r")
+ (lo_sum:DI (match_operand:DI 1 "register_operand" "r")
+ (match_operand:DI 2 "symbolic_operand" "")))]
+ "TARGET_CM_MEDLOW"
+ "or\\t%1, %%lo(%a2), %0"
+ [(set_attr "type" "ialu")
+ (set_attr "length" "1")])
+
+(define_insn "seth44"
+ [(set (match_operand:DI 0 "register_operand" "=r")
+ (high:DI (unspec:DI [(match_operand:DI 1 "symbolic_operand" "")] 6)))]
+ "TARGET_CM_MEDMID"
+ "sethi\\t%%h44(%a1), %0"
+ [(set_attr "type" "move")
+ (set_attr "length" "1")])
+
+(define_insn "setm44"
+ [(set (match_operand:DI 0 "register_operand" "=r")
+ (lo_sum:DI (match_operand:DI 1 "register_operand" "r")
+ (unspec:DI [(match_operand:DI 2 "symbolic_operand" "")] 7)))]
+ "TARGET_CM_MEDMID"
+ "or\\t%1, %%m44(%a2), %0"
+ [(set_attr "type" "move")
+ (set_attr "length" "1")])
+
+(define_insn "setl44"
+ [(set (match_operand:DI 0 "register_operand" "=r")
+ (lo_sum:DI (match_operand:DI 1 "register_operand" "r")
+ (match_operand:DI 2 "symbolic_operand" "")))]
+ "TARGET_CM_MEDMID"
+ "or\\t%1, %%l44(%a2), %0"
+ [(set_attr "type" "ialu")
+ (set_attr "length" "1")])
+
+(define_insn "sethh"
+ [(set (match_operand:DI 0 "register_operand" "=r")
+ (high:DI (unspec:DI [(match_operand:DI 1 "symbolic_operand" "")] 9)))]
+ "TARGET_CM_MEDANY"
+ "sethi\\t%%hh(%a1), %0"
+ [(set_attr "type" "move")
+ (set_attr "length" "1")])
+
+(define_insn "setlm"
+ [(set (match_operand:DI 0 "register_operand" "=r")
+ (high:DI (unspec:DI [(match_operand:DI 1 "symbolic_operand" "")] 10)))]
+ "TARGET_CM_MEDANY"
+ "sethi\\t%%lm(%a1), %0"
+ [(set_attr "type" "move")
+ (set_attr "length" "1")])
+
+(define_insn "sethm"
+ [(set (match_operand:DI 0 "register_operand" "=r")
+ (lo_sum:DI (match_operand:DI 1 "register_operand" "r")
+ (unspec:DI [(match_operand:DI 2 "symbolic_operand" "")] 18)))]
+ "TARGET_CM_MEDANY"
+ "or\\t%1, %%hm(%a2), %0"
+ [(set_attr "type" "ialu")
+ (set_attr "length" "1")])
+
+(define_insn "setlo"
+ [(set (match_operand:DI 0 "register_operand" "=r")
+ (lo_sum:DI (match_operand:DI 1 "register_operand" "r")
+ (match_operand:DI 2 "symbolic_operand" "")))]
+ "TARGET_CM_MEDANY"
+ "or\\t%1, %%lo(%a2), %0"
+ [(set_attr "type" "ialu")
+ (set_attr "length" "1")])
+
+(define_insn "embmedany_sethi"
+ [(set (match_operand:DI 0 "register_operand" "=r")
+ (high:DI (unspec:DI [(match_operand:DI 1 "data_segment_operand" "")] 11)))]
+ "TARGET_CM_EMBMEDANY && check_pic (1)"
+ "sethi\\t%%hi(%a1), %0"
+ [(set_attr "type" "move")
+ (set_attr "length" "1")])
+
+(define_insn "embmedany_losum"
+ [(set (match_operand:DI 0 "register_operand" "=r")
+ (lo_sum:DI (match_operand:DI 1 "register_operand" "r")
+ (match_operand:DI 2 "data_segment_operand" "")))]
+ "TARGET_CM_EMBMEDANY"
+ "add\\t%1, %%lo(%a2), %0"
+ [(set_attr "type" "ialu")
+ (set_attr "length" "1")])
+
+(define_insn "embmedany_brsum"
+ [(set (match_operand:DI 0 "register_operand" "=r")
+ (unspec:DI [(match_operand:DI 1 "register_operand" "r")] 11))]
+ "TARGET_CM_EMBMEDANY"
+ "add\\t%1, %_, %0"
[(set_attr "length" "1")])
-(define_insn "*store_hi"
- [(set (mem:HI (match_operand:SI 0 "symbolic_operand" ""))
- (match_operand:HI 1 "reg_or_0_operand" "rJ"))
- (clobber (match_scratch:SI 2 "=&r"))]
- "(reload_completed || reload_in_progress)
- && ! TARGET_PTR64"
- "sethi %%hi(%a0),%2\;sth %r1,[%2+%%lo(%a0)]"
- [(set_attr "type" "store")
- (set_attr "length" "2")])
+(define_insn "embmedany_textuhi"
+ [(set (match_operand:DI 0 "register_operand" "=r")
+ (high:DI (unspec:DI [(match_operand:DI 1 "text_segment_operand" "")] 13)))]
+ "TARGET_CM_EMBMEDANY && check_pic (1)"
+ "sethi\\t%%uhi(%a1), %0"
+ [(set_attr "type" "move")
+ (set_attr "length" "1")])
-(define_expand "movsi"
- [(set (match_operand:SI 0 "general_operand" "")
- (match_operand:SI 1 "general_operand" ""))]
- ""
+(define_insn "embmedany_texthi"
+ [(set (match_operand:DI 0 "register_operand" "=r")
+ (high:DI (unspec:DI [(match_operand:DI 1 "text_segment_operand" "")] 14)))]
+ "TARGET_CM_EMBMEDANY && check_pic (1)"
+ "sethi\\t%%hi(%a1), %0"
+ [(set_attr "type" "move")
+ (set_attr "length" "1")])
+
+(define_insn "embmedany_textulo"
+ [(set (match_operand:DI 0 "register_operand" "=r")
+ (lo_sum:DI (match_operand:DI 1 "register_operand" "r")
+ (unspec:DI [(match_operand:DI 2 "text_segment_operand" "")] 15)))]
+ "TARGET_CM_EMBMEDANY"
+ "or\\t%1, %%ulo(%a2), %0"
+ [(set_attr "type" "ialu")
+ (set_attr "length" "1")])
+
+(define_insn "embmedany_textlo"
+ [(set (match_operand:DI 0 "register_operand" "=r")
+ (lo_sum:DI (match_operand:DI 1 "register_operand" "r")
+ (match_operand:DI 2 "text_segment_operand" "")))]
+ "TARGET_CM_EMBMEDANY"
+ "or\\t%1, %%lo(%a2), %0"
+ [(set_attr "type" "ialu")
+ (set_attr "length" "1")])
+
+;; Now some patterns to help reload out a bit.
+(define_expand "reload_indi"
+ [(parallel [(match_operand:DI 0 "register_operand" "=r")
+ (match_operand:DI 1 "immediate_operand" "")
+ (match_operand:TI 2 "register_operand" "=&r")])]
+ "(TARGET_CM_MEDANY
+ || TARGET_CM_EMBMEDANY)
+ && ! flag_pic"
"
{
- if (emit_move_sequence (operands, SImode))
- DONE;
+ sparc_emit_set_symbolic_const64 (operands[0], operands[1],
+ gen_rtx_REG (DImode, REGNO (operands[2])));
+ DONE;
}")
-;; We must support both 'r' and 'f' registers here, because combine may
-;; convert SFmode hard registers to SImode hard registers when simplifying
-;; subreg sets.
-
-;; We cannot combine the similar 'r' and 'f' constraints, because it causes
-;; problems with register allocation. Reload might try to put an integer
-;; in an fp register, or an fp number is an integer register.
+(define_expand "reload_outdi"
+ [(parallel [(match_operand:DI 0 "register_operand" "=r")
+ (match_operand:DI 1 "immediate_operand" "")
+ (match_operand:TI 2 "register_operand" "=&r")])]
+ "(TARGET_CM_MEDANY
+ || TARGET_CM_EMBMEDANY)
+ && ! flag_pic"
+ "
+{
+ sparc_emit_set_symbolic_const64 (operands[0], operands[1],
+ gen_rtx_REG (DImode, REGNO (operands[2])));
+ DONE;
+}")
-(define_insn "*movsi_insn"
- [(set (match_operand:SI 0 "reg_or_nonsymb_mem_operand" "=r,f,r,r,f,Q,Q")
- (match_operand:SI 1 "move_operand" "rI,!f,K,Q,!Q,rJ,!f"))]
- "! TARGET_LIVE_G0
- && (register_operand (operands[0], SImode)
- || register_operand (operands[1], SImode)
- || operands[1] == const0_rtx)"
- "@
- mov %1,%0
- fmovs %1,%0
- sethi %%hi(%a1),%0
- ld %1,%0
- ld %1,%0
- st %r1,%0
- st %1,%0"
- [(set_attr "type" "move,fp,move,load,fpload,store,fpstore")
- (set_attr "length" "1")])
-
-(define_insn "*movsi_insn_liveg0"
- [(set (match_operand:SI 0 "reg_or_nonsymb_mem_operand" "=r,r,r,f,r,r,f,Q,Q")
- (match_operand:SI 1 "move_operand" "r,J,I,!f,K,Q,!Q,r,!f"))]
- "TARGET_LIVE_G0
- && (register_operand (operands[0], SImode)
- || register_operand (operands[1], SImode))"
- "@
- mov %1,%0
- and %0,0,%0
- and %0,0,%0\;or %0,%1,%0
- fmovs %1,%0
- sethi %%hi(%a1),%0
- ld %1,%0
- ld %1,%0
- st %1,%0
- st %1,%0"
- [(set_attr "type" "move,move,move,fp,move,load,fpload,store,fpstore")
- (set_attr "length" "1,1,2,1,1,1,1,1,1")])
-
-(define_insn "*store_si"
- [(set (mem:SI (match_operand:SI 0 "symbolic_operand" ""))
- (match_operand:SI 1 "reg_or_0_operand" "rJ"))
- (clobber (match_scratch:SI 2 "=&r"))]
- "(reload_completed || reload_in_progress)
- && ! TARGET_PTR64"
- "sethi %%hi(%a0),%2\;st %r1,[%2+%%lo(%a0)]"
- [(set_attr "type" "store")
- (set_attr "length" "2")])
+;; Split up putting CONSTs and REGs into DI regs when !arch64
+(define_split
+ [(set (match_operand:DI 0 "register_operand" "")
+ (match_operand:DI 1 "const_int_operand" ""))]
+ "! TARGET_ARCH64 && reload_completed"
+ [(clobber (const_int 0))]
+ "
+{
+ emit_insn (gen_movsi (gen_highpart (SImode, operands[0]),
+ (INTVAL (operands[1]) < 0) ?
+ constm1_rtx :
+ const0_rtx));
+ emit_insn (gen_movsi (gen_lowpart (SImode, operands[0]),
+ operands[1]));
+ DONE;
+}")
-(define_expand "movdi"
- [(set (match_operand:DI 0 "reg_or_nonsymb_mem_operand" "")
- (match_operand:DI 1 "general_operand" ""))]
- ""
+(define_split
+ [(set (match_operand:DI 0 "register_operand" "")
+ (match_operand:DI 1 "const_double_operand" ""))]
+ "! TARGET_ARCH64 && reload_completed"
+ [(clobber (const_int 0))]
"
{
- if (emit_move_sequence (operands, DImode))
- DONE;
+ emit_insn (gen_movsi (gen_highpart (SImode, operands[0]),
+ GEN_INT (CONST_DOUBLE_HIGH (operands[1]))));
+
+ /* Slick... but this trick loses if this subreg constant part
+ can be done in one insn. */
+ if (CONST_DOUBLE_LOW (operands[1]) == CONST_DOUBLE_HIGH (operands[1])
+ && !(SPARC_SETHI_P (CONST_DOUBLE_HIGH (operands[1]))
+ || SPARC_SIMM13_P (CONST_DOUBLE_HIGH (operands[1]))))
+ {
+ emit_insn (gen_movsi (gen_lowpart (SImode, operands[0]),
+ gen_highpart (SImode, operands[0])));
+ }
+ else
+ {
+ emit_insn (gen_movsi (gen_lowpart (SImode, operands[0]),
+ GEN_INT (CONST_DOUBLE_LOW (operands[1]))));
+ }
+ DONE;
}")
-(define_insn "*movdi_sp32_insn"
- [(set (match_operand:DI 0 "reg_or_nonsymb_mem_operand" "=r,T,U,Q,r,r,?f,?f,?Q")
- (match_operand:DI 1 "general_operand" "r,U,T,r,Q,i,f,Q,f"))]
- "! TARGET_ARCH64
- && (register_operand (operands[0], DImode)
- || register_operand (operands[1], DImode)
- || operands[1] == const0_rtx)"
- "*
+(define_split
+ [(set (match_operand:DI 0 "register_operand" "")
+ (match_operand:DI 1 "register_operand" ""))]
+ "! TARGET_ARCH64 && reload_completed"
+ [(clobber (const_int 0))]
+ "
{
- if (FP_REG_P (operands[0]) || FP_REG_P (operands[1]))
- return output_fp_move_double (operands);
- return output_move_double (operands);
-}"
- [(set_attr "type" "move,store,load,store,load,multi,fp,fpload,fpstore")
- (set_attr "length" "2,1,1,3,3,3,2,3,3")])
-
-;;; ??? The trick used below can be extended to load any negative 32 bit
-;;; constant in two instructions. Currently the compiler will use HIGH/LO_SUM
-;;; for anything not matching the HIK constraints, which results in 5
-;;; instructions. Positive 32 bit constants can be loaded in the obvious way
-;;; with sethi/ori. To extend the trick, in the xor instruction, use
-;;; xor %o0, ((op1 & 0x3ff) | -0x400), %o0
-;;; This needs the original value of operands[1], not the inverted value.
-
-(define_insn "*movdi_sp64_insn"
- [(set (match_operand:DI 0 "reg_or_nonsymb_mem_operand" "=r,r,r,Q,?f,?f,?Q")
- (match_operand:DI 1 "move_operand" "rI,K,Q,rJ,f,Q,f"))]
- "TARGET_ARCH64
- && (register_operand (operands[0], DImode)
- || register_operand (operands[1], DImode)
- || operands[1] == const0_rtx)"
- "*
+ rtx set_dest = operands[0];
+ rtx set_src = operands[1];
+ rtx dest1, dest2;
+ rtx src1, src2;
+
+ if (GET_CODE (set_dest) == SUBREG)
+ set_dest = alter_subreg (set_dest);
+ if (GET_CODE (set_src) == SUBREG)
+ set_src = alter_subreg (set_src);
+
+ dest1 = gen_highpart (SImode, set_dest);
+ dest2 = gen_lowpart (SImode, set_dest);
+ src1 = gen_highpart (SImode, set_src);
+ src2 = gen_lowpart (SImode, set_src);
+
+ /* Now emit using the real source and destination we found, swapping
+ the order if we detect overlap. */
+ if (reg_overlap_mentioned_p (dest1, src2))
+ {
+ emit_insn (gen_movsi (dest2, src2));
+ emit_insn (gen_movsi (dest1, src1));
+ }
+ else
+ {
+ emit_insn (gen_movsi (dest1, src1));
+ emit_insn (gen_movsi (dest2, src2));
+ }
+ DONE;
+}")
+
+;; Now handle the cases of memory moves from/to non-even
+;; DI mode register pairs.
+(define_split
+ [(set (match_operand:DI 0 "register_operand" "")
+ (match_operand:DI 1 "memory_operand" ""))]
+ "(! TARGET_ARCH64
+ && reload_completed
+ && sparc_splitdi_legitimate (operands[0], operands[1]))"
+ [(clobber (const_int 0))]
+ "
{
- switch (which_alternative)
+ rtx word0 = change_address (operands[1], SImode, NULL_RTX);
+ rtx word1 = change_address (operands[1], SImode,
+ plus_constant_for_output (XEXP (word0, 0), 4));
+ rtx high_part = gen_highpart (SImode, operands[0]);
+ rtx low_part = gen_lowpart (SImode, operands[0]);
+
+ if (reg_overlap_mentioned_p (high_part, word1))
{
- case 0:
- return \"mov %1,%0\";
- case 1:
- /* Sethi does not sign extend, so we must use a little trickery
- to use it for negative numbers. Invert the constant before
- loading it in, then use a xor immediate to invert the loaded bits
- (along with the upper 32 bits) to the desired constant. This
- works because the sethi and immediate fields overlap. */
-
- if ((INTVAL (operands[1]) & 0x80000000) == 0)
- return \"sethi %%hi(%a1),%0\";
- else
- {
- operands[1] = gen_rtx (CONST_INT, VOIDmode,
- ~ INTVAL (operands[1]));
- output_asm_insn (\"sethi %%hi(%a1),%0\", operands);
- /* The low 10 bits are already zero, but invert the rest.
- Assemblers don't accept 0x1c00, so use -0x400 instead. */
- return \"xor %0,-0x400,%0\";
- }
- case 2:
- return \"ldx %1,%0\";
- case 3:
- return \"stx %r1,%0\";
- case 4:
- return \"mov %1,%0\";
- case 5:
- return \"ldd %1,%0\";
- case 6:
- return \"std %1,%0\";
+ emit_insn (gen_movsi (low_part, word1));
+ emit_insn (gen_movsi (high_part, word0));
}
-}"
- [(set_attr "type" "move,move,load,store,fp,fpload,fpstore")
- (set_attr "length" "1,2,1,1,1,1,1")])
+ else
+ {
+ emit_insn (gen_movsi (high_part, word0));
+ emit_insn (gen_movsi (low_part, word1));
+ }
+ DONE;
+}")
+
+(define_split
+ [(set (match_operand:DI 0 "memory_operand" "")
+ (match_operand:DI 1 "register_operand" ""))]
+ "(! TARGET_ARCH64
+ && reload_completed
+ && sparc_splitdi_legitimate (operands[1], operands[0]))"
+ [(clobber (const_int 0))]
+ "
+{
+ rtx word0 = change_address (operands[0], SImode, NULL_RTX);
+ rtx word1 = change_address (operands[0], SImode,
+ plus_constant_for_output (XEXP (word0, 0), 4));
+ rtx high_part = gen_highpart (SImode, operands[1]);
+ rtx low_part = gen_lowpart (SImode, operands[1]);
+
+ emit_insn (gen_movsi (word0, high_part));
+ emit_insn (gen_movsi (word1, low_part));
+ DONE;
+}")
-;; ??? There's no symbolic (set (mem:DI ...) ...).
-;; Experimentation with v9 suggested one isn't needed.
-\f
-;; Block move insns.
-
-;; ??? We get better code without it. See output_block_move in sparc.c.
-
-;; The definition of this insn does not really explain what it does,
-;; but it should suffice
-;; that anything generated as this insn will be recognized as one
-;; and that it will not successfully combine with anything.
-;(define_expand "movstrsi"
-; [(parallel [(set (mem:BLK (match_operand:BLK 0 "general_operand" ""))
-; (mem:BLK (match_operand:BLK 1 "general_operand" "")))
-; (use (match_operand:SI 2 "nonmemory_operand" ""))
-; (use (match_operand:SI 3 "immediate_operand" ""))
-; (clobber (match_dup 0))
-; (clobber (match_dup 1))
-; (clobber (match_scratch:SI 4 ""))
-; (clobber (reg:SI 100))
-; (clobber (reg:SI 1))])]
-; ""
-; "
-;{
-; /* If the size isn't known, don't emit inline code. output_block_move
-; would output code that's much slower than the library function.
-; Also don't output code for large blocks. */
-; if (GET_CODE (operands[2]) != CONST_INT
-; || GET_CODE (operands[3]) != CONST_INT
-; || INTVAL (operands[2]) / INTVAL (operands[3]) > 16)
-; FAIL;
-;
-; operands[0] = copy_to_mode_reg (Pmode, XEXP (operands[0], 0));
-; operands[1] = copy_to_mode_reg (Pmode, XEXP (operands[1], 0));
-; operands[2] = force_not_mem (operands[2]);
-;}")
-
-;(define_insn "*block_move_insn"
-; [(set (mem:BLK (match_operand:SI 0 "register_operand" "+r"))
-; (mem:BLK (match_operand:SI 1 "register_operand" "+r")))
-; (use (match_operand:SI 2 "nonmemory_operand" "rn"))
-; (use (match_operand:SI 3 "immediate_operand" "i"))
-; (clobber (match_dup 0))
-; (clobber (match_dup 1))
-; (clobber (match_scratch:SI 4 "=&r"))
-; (clobber (reg:SI 100))
-; (clobber (reg:SI 1))]
-; ""
-; "* return output_block_move (operands);"
-; [(set_attr "type" "multi")
-; (set_attr "length" "6")])
\f
;; Floating point move insns
-;; This pattern forces (set (reg:SF ...) (const_double ...))
-;; to be reloaded by putting the constant into memory.
-;; It must come before the more general movsf pattern.
-(define_insn "*movsf_const_insn"
- [(set (match_operand:SF 0 "general_operand" "=?r,f,m")
- (match_operand:SF 1 "" "?F,m,G"))]
- "TARGET_FPU && GET_CODE (operands[1]) == CONST_DOUBLE"
+(define_insn "*clear_sf"
+ [(set (match_operand:SF 0 "register_operand" "=f")
+ (match_operand:SF 1 "" ""))]
+ "TARGET_VIS
+ && GET_CODE (operands[1]) == CONST_DOUBLE
+ && GET_CODE (operands[0]) == REG
+ && fp_zero_operand (operands[1])"
+ "fzeros\\t%0"
+ [(set_attr "type" "fpmove")
+ (set_attr "length" "1")])
+
+(define_insn "*movsf_const_intreg"
+ [(set (match_operand:SF 0 "register_operand" "=f,r")
+ (match_operand:SF 1 "const_double_operand" "m,F"))]
+ "TARGET_FPU"
"*
{
- switch (which_alternative)
+ REAL_VALUE_TYPE r;
+ long i;
+
+ if (which_alternative == 0)
+ return \"ld\\t%1, %0\";
+
+ REAL_VALUE_FROM_CONST_DOUBLE (r, operands[1]);
+ REAL_VALUE_TO_TARGET_SINGLE (r, i);
+ if (SPARC_SIMM13_P (i) || SPARC_SETHI_P (i))
{
- case 0:
- return singlemove_string (operands);
- case 1:
- return \"ld %1,%0\";
- case 2:
- return \"st %%g0,%0\";
+ operands[1] = GEN_INT (i);
+ if (SPARC_SIMM13_P (INTVAL (operands[1])))
+ return \"mov\\t%1, %0\";
+ else if (SPARC_SETHI_P (INTVAL (operands[1])))
+ return \"sethi\\t%%hi(%a1), %0\";
+ else
+ abort ();
}
+ else
+ return \"#\";
}"
- [(set_attr "type" "load,fpload,store")
- (set_attr "length" "2,1,1")])
+ [(set_attr "type" "move")
+ (set_attr "length" "1,2")])
+
+;; There isn't much I can do about this, if I change the
+;; mode then flow info gets really confused because the
+;; destination no longer looks the same. Ho hum...
+(define_insn "*movsf_const_high"
+ [(set (match_operand:SF 0 "register_operand" "=r")
+ (unspec:SF [(match_operand 1 "const_int_operand" "")] 12))]
+ ""
+ "sethi\\t%%hi(%a1), %0"
+ [(set_attr "type" "move")
+ (set_attr "length" "1")])
+
+(define_insn "*movsf_const_lo"
+ [(set (match_operand:SF 0 "register_operand" "=r")
+ (unspec:SF [(match_operand:SF 1 "register_operand" "r")
+ (match_operand 2 "const_int_operand" "")] 17))]
+ ""
+ "or\\t%1, %%lo(%a2), %0"
+ [(set_attr "type" "move")
+ (set_attr "length" "1")])
+
+(define_split
+ [(set (match_operand:SF 0 "register_operand" "")
+ (match_operand:SF 1 "const_double_operand" ""))]
+ "TARGET_FPU
+ && (GET_CODE (operands[0]) == REG
+ && REGNO (operands[0]) < 32)"
+ [(set (match_dup 0) (unspec:SF [(match_dup 1)] 12))
+ (set (match_dup 0) (unspec:SF [(match_dup 0) (match_dup 1)] 17))]
+ "
+{
+ REAL_VALUE_TYPE r;
+ long i;
+
+ REAL_VALUE_FROM_CONST_DOUBLE (r, operands[1]);
+ REAL_VALUE_TO_TARGET_SINGLE (r, i);
+ operands[1] = GEN_INT (i);
+}")
(define_expand "movsf"
[(set (match_operand:SF 0 "general_operand" "")
""
"
{
- if (emit_move_sequence (operands, SFmode))
- DONE;
+ /* Force SFmode constants into memory. */
+ if (GET_CODE (operands[0]) == REG
+ && CONSTANT_P (operands[1]))
+ {
+ if (TARGET_VIS
+ && GET_CODE (operands[1]) == CONST_DOUBLE
+ && fp_zero_operand (operands[1]))
+ goto movsf_is_ok;
+
+ /* emit_group_store will send such bogosity to us when it is
+ not storing directly into memory. So fix this up to avoid
+ crashes in output_constant_pool. */
+ if (operands [1] == const0_rtx)
+ operands[1] = CONST0_RTX (SFmode);
+ operands[1] = validize_mem (force_const_mem (GET_MODE (operands[0]),
+ operands[1]));
+ }
+
+ /* Handle sets of MEM first. */
+ if (GET_CODE (operands[0]) == MEM)
+ {
+ if (register_operand (operands[1], SFmode))
+ goto movsf_is_ok;
+
+ if (! reload_in_progress)
+ {
+ operands[0] = validize_mem (operands[0]);
+ operands[1] = force_reg (SFmode, operands[1]);
+ }
+ }
+
+ /* Fixup PIC cases. */
+ if (flag_pic)
+ {
+ if (CONSTANT_P (operands[1])
+ && pic_address_needs_scratch (operands[1]))
+ operands[1] = legitimize_pic_address (operands[1], SFmode, 0);
+
+ if (symbolic_operand (operands[1], SFmode))
+ {
+ operands[1] = legitimize_pic_address (operands[1],
+ SFmode,
+ (reload_in_progress ?
+ operands[0] :
+ NULL_RTX));
+ }
+ }
+
+ movsf_is_ok:
+ ;
}")
(define_insn "*movsf_insn"
- [(set (match_operand:SF 0 "reg_or_nonsymb_mem_operand" "=f,r,f,r,Q,Q")
- (match_operand:SF 1 "reg_or_nonsymb_mem_operand" "f,r,Q,Q,f,r"))]
+ [(set (match_operand:SF 0 "nonimmediate_operand" "=f,f,m,r,r,m")
+ (match_operand:SF 1 "input_operand" "f,m,f,r,m,r"))]
"TARGET_FPU
&& (register_operand (operands[0], SFmode)
|| register_operand (operands[1], SFmode))"
"@
- fmovs %1,%0
- mov %1,%0
- ld %1,%0
- ld %1,%0
- st %1,%0
- st %1,%0"
- [(set_attr "type" "fp,move,fpload,load,fpstore,store")])
+ fmovs\\t%1, %0
+ ld\\t%1, %0
+ st\\t%1, %0
+ mov\\t%1, %0
+ ld\\t%1, %0
+ st\\t%1, %0"
+ [(set_attr "type" "fpmove,fpload,fpstore,move,load,store")
+ (set_attr "length" "1")])
;; Exactly the same as above, except that all `f' cases are deleted.
;; This is necessary to prevent reload from ever trying to use a `f' reg
;; when -mno-fpu.
(define_insn "*movsf_no_f_insn"
- [(set (match_operand:SF 0 "reg_or_nonsymb_mem_operand" "=r,r,Q")
- (match_operand:SF 1 "reg_or_nonsymb_mem_operand" "r,Q,r"))]
+ [(set (match_operand:SF 0 "nonimmediate_operand" "=r,r,m")
+ (match_operand:SF 1 "input_operand" "r,m,r"))]
"! TARGET_FPU
&& (register_operand (operands[0], SFmode)
|| register_operand (operands[1], SFmode))"
"@
- mov %1,%0
- ld %1,%0
- st %1,%0"
- [(set_attr "type" "move,load,store")])
-
-(define_insn "*store_sf"
- [(set (mem:SF (match_operand:SI 0 "symbolic_operand" "i"))
- (match_operand:SF 1 "reg_or_0_operand" "rfG"))
- (clobber (match_scratch:SI 2 "=&r"))]
- "(reload_completed || reload_in_progress)
- && ! TARGET_PTR64"
- "sethi %%hi(%a0),%2\;st %r1,[%2+%%lo(%a0)]"
- [(set_attr "type" "store")
- (set_attr "length" "2")])
+ mov\\t%1, %0
+ ld\\t%1, %0
+ st\\t%1, %0"
+ [(set_attr "type" "move,load,store")
+ (set_attr "length" "1")])
-;; This pattern forces (set (reg:DF ...) (const_double ...))
-;; to be reloaded by putting the constant into memory.
-;; It must come before the more general movdf pattern.
+(define_insn "*clear_df"
+ [(set (match_operand:DF 0 "register_operand" "=e")
+ (match_operand:DF 1 "const_double_operand" ""))]
+ "TARGET_VIS
+ && fp_zero_operand (operands[1])"
+ "fzero\\t%0"
+ [(set_attr "type" "fpmove")
+ (set_attr "length" "1")])
-(define_insn "*movdf_const_insn"
- [(set (match_operand:DF 0 "general_operand" "=?r,e,o")
- (match_operand:DF 1 "" "?F,m,G"))]
- "TARGET_FPU && GET_CODE (operands[1]) == CONST_DOUBLE"
- "*
+(define_insn "*movdf_const_intreg_sp32"
+ [(set (match_operand:DF 0 "register_operand" "=e,e,r")
+ (match_operand:DF 1 "const_double_operand" "T,o,F"))]
+ "TARGET_FPU && ! TARGET_ARCH64"
+ "@
+ ldd\\t%1, %0
+ #
+ #"
+ [(set_attr "type" "move")
+ (set_attr "length" "1,2,2")])
+
+;; Now that we redo life analysis with a clean slate after
+;; instruction splitting for sched2 this can work.
+(define_insn "*movdf_const_intreg_sp64"
+ [(set (match_operand:DF 0 "register_operand" "=e,r")
+ (match_operand:DF 1 "const_double_operand" "m,F"))]
+ "TARGET_FPU && TARGET_ARCH64"
+ "@
+ ldd\\t%1, %0
+ #"
+ [(set_attr "type" "move")
+ (set_attr "length" "1,2")])
+
+(define_split
+ [(set (match_operand:DF 0 "register_operand" "")
+ (match_operand:DF 1 "const_double_operand" ""))]
+ "TARGET_FPU
+ && (GET_CODE (operands[0]) == REG
+ && REGNO (operands[0]) < 32)
+ && reload_completed"
+ [(clobber (const_int 0))]
+ "
{
- switch (which_alternative)
+ REAL_VALUE_TYPE r;
+ long l[2];
+
+ REAL_VALUE_FROM_CONST_DOUBLE (r, operands[1]);
+ REAL_VALUE_TO_TARGET_DOUBLE (r, l);
+ if (GET_CODE (operands[0]) == SUBREG)
+ operands[0] = alter_subreg (operands[0]);
+ operands[0] = gen_rtx_raw_REG (DImode, REGNO (operands[0]));
+
+ if (TARGET_ARCH64)
{
- case 0:
- return output_move_double (operands);
- case 1:
- return output_fp_move_double (operands);
- case 2:
- if (TARGET_ARCH64)
- {
- return \"stx %%g0,%0\";
- }
+#if HOST_BITS_PER_WIDE_INT == 64
+ HOST_WIDE_INT val;
+
+ val = ((HOST_WIDE_INT)l[1] |
+ ((HOST_WIDE_INT)l[0] << 32));
+ emit_insn (gen_movdi (operands[0], GEN_INT (val)));
+#else
+ emit_insn (gen_movdi (operands[0],
+ gen_rtx_CONST_DOUBLE (VOIDmode, const0_rtx,
+ l[1], l[0])));
+#endif
+ }
+ else
+ {
+ emit_insn (gen_movsi (gen_highpart (SImode, operands[0]),
+ GEN_INT (l[0])));
+
+ /* Slick... but this trick loses if this subreg constant part
+ can be done in one insn. */
+ if (l[1] == l[0]
+ && !(SPARC_SETHI_P (l[0])
+ || SPARC_SIMM13_P (l[0])))
+ {
+ emit_insn (gen_movsi (gen_lowpart (SImode, operands[0]),
+ gen_highpart (SImode, operands[0])));
+ }
else
- {
- operands[1] = adj_offsettable_operand (operands[0], 4);
- return \"st %%g0,%0\;st %%g0,%1\";
- }
+ {
+ emit_insn (gen_movsi (gen_lowpart (SImode, operands[0]),
+ GEN_INT (l[1])));
+ }
}
-}"
- [(set_attr "type" "load,fpload,store")
- (set_attr "length" "3,3,3")])
+ DONE;
+}")
(define_expand "movdf"
[(set (match_operand:DF 0 "general_operand" "")
""
"
{
- if (emit_move_sequence (operands, DFmode))
- DONE;
+ /* Force DFmode constants into memory. */
+ if (GET_CODE (operands[0]) == REG
+ && CONSTANT_P (operands[1]))
+ {
+ if (TARGET_VIS
+ && GET_CODE (operands[1]) == CONST_DOUBLE
+ && fp_zero_operand (operands[1]))
+ goto movdf_is_ok;
+
+ /* emit_group_store will send such bogosity to us when it is
+ not storing directly into memory. So fix this up to avoid
+ crashes in output_constant_pool. */
+ if (operands [1] == const0_rtx)
+ operands[1] = CONST0_RTX (DFmode);
+ operands[1] = validize_mem (force_const_mem (GET_MODE (operands[0]),
+ operands[1]));
+ }
+
+ /* Handle MEM cases first. */
+ if (GET_CODE (operands[0]) == MEM)
+ {
+ if (register_operand (operands[1], DFmode))
+ goto movdf_is_ok;
+
+ if (! reload_in_progress)
+ {
+ operands[0] = validize_mem (operands[0]);
+ operands[1] = force_reg (DFmode, operands[1]);
+ }
+ }
+
+ /* Fixup PIC cases. */
+ if (flag_pic)
+ {
+ if (CONSTANT_P (operands[1])
+ && pic_address_needs_scratch (operands[1]))
+ operands[1] = legitimize_pic_address (operands[1], DFmode, 0);
+
+ if (symbolic_operand (operands[1], DFmode))
+ {
+ operands[1] = legitimize_pic_address (operands[1],
+ DFmode,
+ (reload_in_progress ?
+ operands[0] :
+ NULL_RTX));
+ }
+ }
+
+ movdf_is_ok:
+ ;
}")
-(define_insn "*movdf_insn"
- [(set (match_operand:DF 0 "reg_or_nonsymb_mem_operand" "=T,U,e,r,Q,Q,e,r")
- (match_operand:DF 1 "reg_or_nonsymb_mem_operand" "U,T,e,r,e,r,Q,Q"))]
+;; Be careful, fmovd does not exist when !v9.
+(define_insn "*movdf_insn_sp32"
+ [(set (match_operand:DF 0 "nonimmediate_operand" "=e,T,U,T,e,r,r,o,e,o")
+ (match_operand:DF 1 "input_operand" "T,e,T,U,e,r,o,r,o,e"))]
"TARGET_FPU
+ && ! TARGET_V9
&& (register_operand (operands[0], DFmode)
|| register_operand (operands[1], DFmode))"
- "*
-{
- if (FP_REG_P (operands[0]) || FP_REG_P (operands[1]))
- return output_fp_move_double (operands);
- return output_move_double (operands);
-}"
- [(set_attr "type" "fpstore,fpload,fp,move,fpstore,store,fpload,load")
- (set_attr "length" "1,1,2,2,3,3,3,3")])
-
-;; Exactly the same as above, except that all `e' cases are deleted.
-;; This is necessary to prevent reload from ever trying to use a `e' reg
-;; when -mno-fpu.
-
-(define_insn "*movdf_no_e_insn"
- [(set (match_operand:DF 0 "reg_or_nonsymb_mem_operand" "=T,U,r,Q,&r")
- (match_operand:DF 1 "reg_or_nonsymb_mem_operand" "U,T,r,r,Q"))]
+ "@
+ ldd\\t%1, %0
+ std\\t%1, %0
+ ldd\\t%1, %0
+ std\\t%1, %0
+ #
+ #
+ #
+ #
+ #
+ #"
+ [(set_attr "type" "fpload,fpstore,load,store,*,*,*,*,*,*")
+ (set_attr "length" "1,1,1,1,2,2,2,2,2,2")])
+
+(define_insn "*movdf_no_e_insn_sp32"
+ [(set (match_operand:DF 0 "nonimmediate_operand" "=U,T,r,r,o")
+ (match_operand:DF 1 "input_operand" "T,U,r,o,r"))]
"! TARGET_FPU
+ && ! TARGET_ARCH64
+ && (register_operand (operands[0], DFmode)
+ || register_operand (operands[1], DFmode))"
+ "@
+ ldd\\t%1, %0
+ std\\t%1, %0
+ #
+ #
+ #"
+ [(set_attr "type" "load,store,*,*,*")
+ (set_attr "length" "1,1,2,2,2")])
+
+;; We have available v9 double floats but not 64-bit
+;; integer registers.
+(define_insn "*movdf_insn_v9only"
+ [(set (match_operand:DF 0 "nonimmediate_operand" "=e,e,m,U,T,r,r,o")
+ (match_operand:DF 1 "input_operand" "e,m,e,T,U,r,o,r"))]
+ "TARGET_FPU
+ && TARGET_V9
+ && ! TARGET_ARCH64
+ && (register_operand (operands[0], DFmode)
+ || register_operand (operands[1], DFmode))"
+ "@
+ fmovd\\t%1, %0
+ ldd\\t%1, %0
+ std\\t%1, %0
+ ldd\\t%1, %0
+ std\\t%1, %0
+ #
+ #
+ #"
+ [(set_attr "type" "fpmove,load,store,load,store,*,*,*")
+ (set_attr "length" "1,1,1,1,1,2,2,2")])
+
+;; We have available both v9 double floats and 64-bit
+;; integer registers.
+(define_insn "*movdf_insn_sp64"
+ [(set (match_operand:DF 0 "nonimmediate_operand" "=e,e,m,r,r,m")
+ (match_operand:DF 1 "input_operand" "e,m,e,r,m,r"))]
+ "TARGET_FPU
+ && TARGET_V9
+ && TARGET_ARCH64
&& (register_operand (operands[0], DFmode)
|| register_operand (operands[1], DFmode))"
- "* return output_move_double (operands);"
- [(set_attr "type" "store,load,move,store,load")
- (set_attr "length" "1,1,2,3,3")])
+ "@
+ fmovd\\t%1, %0
+ ldd\\t%1, %0
+ std\\t%1, %0
+ mov\\t%1, %0
+ ldx\\t%1, %0
+ stx\\t%1, %0"
+ [(set_attr "type" "fpmove,load,store,move,load,store")
+ (set_attr "length" "1")])
-;; Must handle overlapping registers here, since parameters can be unaligned
-;; in registers.
+(define_insn "*movdf_no_e_insn_sp64"
+ [(set (match_operand:DF 0 "nonimmediate_operand" "=r,r,m")
+ (match_operand:DF 1 "input_operand" "r,m,r"))]
+ "! TARGET_FPU
+ && TARGET_ARCH64
+ && (register_operand (operands[0], DFmode)
+ || register_operand (operands[1], DFmode))"
+ "@
+ mov\\t%1, %0
+ ldx\\t%1, %0
+ stx\\t%1, %0"
+ [(set_attr "type" "move,load,store")
+ (set_attr "length" "1")])
+;; Ok, now the splits to handle all the multi insn and
+;; mis-aligned memory address cases.
+;; In these splits please take note that we must be
+;; careful when V9 but not ARCH64 because the integer
+;; register DFmode cases must be handled.
(define_split
[(set (match_operand:DF 0 "register_operand" "")
- (match_operand:DF 1 "register_operand" ""))]
- "! TARGET_ARCH64 && reload_completed
- && REGNO (operands[0]) < SPARC_FIRST_V9_FP_REG
- && REGNO (operands[1]) < SPARC_FIRST_V9_FP_REG"
- [(set (match_dup 2) (match_dup 3))
- (set (match_dup 4) (match_dup 5))]
+ (match_operand:DF 1 "register_operand" ""))]
+ "(! TARGET_V9
+ || (! TARGET_ARCH64
+ && ((GET_CODE (operands[0]) == REG
+ && REGNO (operands[0]) < 32)
+ || (GET_CODE (operands[0]) == SUBREG
+ && GET_CODE (SUBREG_REG (operands[0])) == REG
+ && REGNO (SUBREG_REG (operands[0])) < 32))))
+ && reload_completed"
+ [(clobber (const_int 0))]
"
{
- rtx first_set = operand_subword (operands[0], 0, 0, DFmode);
- rtx second_use = operand_subword (operands[1], 1, 0, DFmode);
+ rtx set_dest = operands[0];
+ rtx set_src = operands[1];
+ rtx dest1, dest2;
+ rtx src1, src2;
+
+ if (GET_CODE (set_dest) == SUBREG)
+ set_dest = alter_subreg (set_dest);
+ if (GET_CODE (set_src) == SUBREG)
+ set_src = alter_subreg (set_src);
- if (REGNO (first_set) == REGNO (second_use))
+ dest1 = gen_highpart (SFmode, set_dest);
+ dest2 = gen_lowpart (SFmode, set_dest);
+ src1 = gen_highpart (SFmode, set_src);
+ src2 = gen_lowpart (SFmode, set_src);
+
+ /* Now emit using the real source and destination we found, swapping
+ the order if we detect overlap. */
+ if (reg_overlap_mentioned_p (dest1, src2))
{
- operands[2] = operand_subword (operands[0], 1, 0, DFmode);
- operands[3] = second_use;
- operands[4] = first_set;
- operands[5] = operand_subword (operands[1], 0, 0, DFmode);
+ emit_insn (gen_movsf (dest2, src2));
+ emit_insn (gen_movsf (dest1, src1));
}
else
{
- operands[2] = first_set;
- operands[3] = operand_subword (operands[1], 0, 0, DFmode);
- operands[4] = operand_subword (operands[0], 1, 0, DFmode);
- operands[5] = second_use;
+ emit_insn (gen_movsf (dest1, src1));
+ emit_insn (gen_movsf (dest2, src2));
}
+ DONE;
}")
-(define_insn "*store_df"
- [(set (mem:DF (match_operand:SI 0 "symbolic_operand" "i,i"))
- (match_operand:DF 1 "reg_or_0_operand" "re,G"))
- (clobber (match_scratch:SI 2 "=&r,&r"))]
- "(reload_completed || reload_in_progress)
- && ! TARGET_PTR64"
- "*
+(define_split
+ [(set (match_operand:DF 0 "register_operand" "")
+ (match_operand:DF 1 "memory_operand" ""))]
+ "((! TARGET_V9
+ || (! TARGET_ARCH64
+ && ((GET_CODE (operands[0]) == REG
+ && REGNO (operands[0]) < 32)
+ || (GET_CODE (operands[0]) == SUBREG
+ && GET_CODE (SUBREG_REG (operands[0])) == REG
+ && REGNO (SUBREG_REG (operands[0])) < 32))))
+ && (reload_completed
+ && (((REGNO (operands[0])) % 2) != 0
+ || ! mem_min_alignment (operands[1], 8))
+ && offsettable_memref_p (operands[1])))"
+ [(clobber (const_int 0))]
+ "
{
- output_asm_insn (\"sethi %%hi(%a0),%2\", operands);
- if (which_alternative == 0)
- return \"std %1,[%2+%%lo(%a0)]\";
+ rtx word0 = change_address (operands[1], SFmode, NULL_RTX);
+ rtx word1 = change_address (operands[1], SFmode,
+ plus_constant_for_output (XEXP (word0, 0), 4));
+
+ if (GET_CODE (operands[0]) == SUBREG)
+ operands[0] = alter_subreg (operands[0]);
+
+ if (reg_overlap_mentioned_p (gen_highpart (SFmode, operands[0]), word1))
+ {
+ emit_insn (gen_movsf (gen_lowpart (SFmode, operands[0]),
+ word1));
+ emit_insn (gen_movsf (gen_highpart (SFmode, operands[0]),
+ word0));
+ }
else
- return \"st %%g0,[%2+%%lo(%a0)]\;st %%g0,[%2+%%lo(%a0+4)]\";
-}"
- [(set_attr "type" "store")
- (set_attr "length" "3")])
-
-;; This pattern forces (set (reg:TF ...) (const_double ...))
-;; to be reloaded by putting the constant into memory.
-;; It must come before the more general movtf pattern.
-(define_insn "*movtf_const_insn"
- [(set (match_operand:TF 0 "general_operand" "=?r,e,o")
- (match_operand:TF 1 "" "?F,m,G"))]
- "TARGET_FPU && GET_CODE (operands[1]) == CONST_DOUBLE"
- "*
-{
- switch (which_alternative)
{
- case 0:
- return output_move_quad (operands);
- case 1:
- return output_fp_move_quad (operands);
- case 2:
- if (TARGET_ARCH64)
- {
- operands[1] = adj_offsettable_operand (operands[0], 8);
- return \"stx %%g0,%0\;stx %%g0,%1\";
- }
- else
- {
- /* ??? Do we run off the end of the array here? */
- operands[1] = adj_offsettable_operand (operands[0], 4);
- operands[2] = adj_offsettable_operand (operands[0], 8);
- operands[3] = adj_offsettable_operand (operands[0], 12);
- return \"st %%g0,%0\;st %%g0,%1\;st %%g0,%2\;st %%g0,%3\";
- }
+ emit_insn (gen_movsf (gen_highpart (SFmode, operands[0]),
+ word0));
+ emit_insn (gen_movsf (gen_lowpart (SFmode, operands[0]),
+ word1));
}
-}"
- [(set_attr "type" "load,fpload,store")
- (set_attr "length" "5,5,5")])
+ DONE;
+}")
+
+(define_split
+ [(set (match_operand:DF 0 "memory_operand" "")
+ (match_operand:DF 1 "register_operand" ""))]
+ "((! TARGET_V9
+ || (! TARGET_ARCH64
+ && ((GET_CODE (operands[1]) == REG
+ && REGNO (operands[1]) < 32)
+ || (GET_CODE (operands[1]) == SUBREG
+ && GET_CODE (SUBREG_REG (operands[1])) == REG
+ && REGNO (SUBREG_REG (operands[1])) < 32))))
+ && (reload_completed
+ && (((REGNO (operands[1])) % 2) != 0
+ || ! mem_min_alignment (operands[0], 8))
+ && offsettable_memref_p (operands[0])))"
+ [(clobber (const_int 0))]
+ "
+{
+ rtx word0 = change_address (operands[0], SFmode, NULL_RTX);
+ rtx word1 = change_address (operands[0], SFmode,
+ plus_constant_for_output (XEXP (word0, 0), 4));
+
+ if (GET_CODE (operands[1]) == SUBREG)
+ operands[1] = alter_subreg (operands[1]);
+ emit_insn (gen_movsf (word0,
+ gen_highpart (SFmode, operands[1])));
+ emit_insn (gen_movsf (word1,
+ gen_lowpart (SFmode, operands[1])));
+ DONE;
+}")
(define_expand "movtf"
[(set (match_operand:TF 0 "general_operand" "")
""
"
{
- if (emit_move_sequence (operands, TFmode))
- DONE;
+ /* Force TFmode constants into memory. */
+ if (GET_CODE (operands[0]) == REG
+ && CONSTANT_P (operands[1]))
+ {
+ /* emit_group_store will send such bogosity to us when it is
+ not storing directly into memory. So fix this up to avoid
+ crashes in output_constant_pool. */
+ if (operands [1] == const0_rtx)
+ operands[1] = CONST0_RTX (TFmode);
+ operands[1] = validize_mem (force_const_mem (GET_MODE (operands[0]),
+ operands[1]));
+ }
+
+ /* Handle MEM cases first, note that only v9 guarentees
+ full 16-byte alignment for quads. */
+ if (GET_CODE (operands[0]) == MEM)
+ {
+ if (register_operand (operands[1], TFmode))
+ goto movtf_is_ok;
+
+ if (! reload_in_progress)
+ {
+ operands[0] = validize_mem (operands[0]);
+ operands[1] = force_reg (TFmode, operands[1]);
+ }
+ }
+
+ /* Fixup PIC cases. */
+ if (flag_pic)
+ {
+ if (CONSTANT_P (operands[1])
+ && pic_address_needs_scratch (operands[1]))
+ operands[1] = legitimize_pic_address (operands[1], TFmode, 0);
+
+ if (symbolic_operand (operands[1], TFmode))
+ {
+ operands[1] = legitimize_pic_address (operands[1],
+ TFmode,
+ (reload_in_progress ?
+ operands[0] :
+ NULL_RTX));
+ }
+ }
+
+ movtf_is_ok:
+ ;
}")
-(define_insn "*movtf_insn"
- [(set (match_operand:TF 0 "reg_or_nonsymb_mem_operand" "=e,r,Q,Q,e,&r")
- (match_operand:TF 1 "reg_or_nonsymb_mem_operand" "e,r,e,r,Q,Q"))]
+;; Be careful, fmovq and {st,ld}{x,q} do not exist when !arch64 so
+;; we must split them all. :-(
+(define_insn "*movtf_insn_sp32"
+ [(set (match_operand:TF 0 "nonimmediate_operand" "=e,o,U,o,e,r,r,o")
+ (match_operand:TF 1 "input_operand" "o,e,o,U,e,r,o,r"))]
"TARGET_FPU
+ && ! TARGET_ARCH64
&& (register_operand (operands[0], TFmode)
|| register_operand (operands[1], TFmode))"
- "*
-{
- if (FP_REG_P (operands[0]) || FP_REG_P (operands[1]))
- return output_fp_move_quad (operands);
- return output_move_quad (operands);
-}"
- [(set_attr "type" "fp,move,fpstore,store,fpload,load")
- (set_attr "length" "4,4,5,5,5,5")])
+ "#"
+ [(set_attr "length" "4")])
;; Exactly the same as above, except that all `e' cases are deleted.
;; This is necessary to prevent reload from ever trying to use a `e' reg
;; when -mno-fpu.
-(define_insn "*movtf_no_e_insn"
- [(set (match_operand:TF 0 "reg_or_nonsymb_mem_operand" "=r,Q,&r")
- (match_operand:TF 1 "reg_or_nonsymb_mem_operand" "r,r,Q"))]
+(define_insn "*movtf_no_e_insn_sp32"
+ [(set (match_operand:TF 0 "nonimmediate_operand" "=U,o,r,r,o")
+ (match_operand:TF 1 "input_operand" "o,U,r,o,r"))]
"! TARGET_FPU
+ && ! TARGET_ARCH64
&& (register_operand (operands[0], TFmode)
|| register_operand (operands[1], TFmode))"
- "*
+ "#"
+ [(set_attr "length" "4")])
+
+;; Now handle the float reg cases directly when arch64,
+;; hard_quad, and proper reg number alignment are all true.
+(define_insn "*movtf_insn_hq_sp64"
+ [(set (match_operand:TF 0 "nonimmediate_operand" "=e,e,m,r,r,o")
+ (match_operand:TF 1 "input_operand" "e,m,e,r,o,r"))]
+ "TARGET_FPU
+ && TARGET_ARCH64
+ && TARGET_V9
+ && TARGET_HARD_QUAD
+ && (register_operand (operands[0], TFmode)
+ || register_operand (operands[1], TFmode))"
+ "@
+ fmovq\\t%1, %0
+ ldq\\t%1, %0
+ stq\\t%1, %0
+ #
+ #
+ #"
+ [(set_attr "type" "fpmove,fpload,fpstore,*,*,*")
+ (set_attr "length" "1,1,1,2,2,2")])
+
+;; Now we allow the integer register cases even when
+;; only arch64 is true.
+(define_insn "*movtf_insn_sp64"
+ [(set (match_operand:TF 0 "nonimmediate_operand" "=e,o,r,o,e,r")
+ (match_operand:TF 1 "input_operand" "o,e,o,r,e,r"))]
+ "TARGET_FPU
+ && TARGET_ARCH64
+ && ! TARGET_HARD_QUAD
+ && (register_operand (operands[0], TFmode)
+ || register_operand (operands[1], TFmode))"
+ "#"
+ [(set_attr "length" "2")])
+
+(define_insn "*movtf_no_e_insn_sp64"
+ [(set (match_operand:TF 0 "nonimmediate_operand" "=r,o,r")
+ (match_operand:TF 1 "input_operand" "o,r,r"))]
+ "! TARGET_FPU
+ && TARGET_ARCH64
+ && (register_operand (operands[0], TFmode)
+ || register_operand (operands[1], TFmode))"
+ "#"
+ [(set_attr "length" "2")])
+
+;; Now all the splits to handle multi-insn TF mode moves.
+(define_split
+ [(set (match_operand:TF 0 "register_operand" "")
+ (match_operand:TF 1 "register_operand" ""))]
+ "reload_completed
+ && (! TARGET_ARCH64
+ || (TARGET_FPU
+ && ! TARGET_HARD_QUAD))"
+ [(clobber (const_int 0))]
+ "
{
- if (FP_REG_P (operands[0]) || FP_REG_P (operands[1]))
- return output_fp_move_quad (operands);
- return output_move_quad (operands);
-}"
- [(set_attr "type" "move,store,load")
- (set_attr "length" "4,5,5")])
-
-;; This is disabled because it does not work. Long doubles have only 8
-;; byte alignment. Adding an offset of 8 or 12 to an 8 byte aligned %lo may
-;; cause it to overflow. See also GO_IF_LEGITIMATE_ADDRESS.
-(define_insn "*store_tf"
- [(set (mem:TF (match_operand:SI 0 "symbolic_operand" "i,i"))
- (match_operand:TF 1 "reg_or_0_operand" "re,G"))
- (clobber (match_scratch:SI 2 "=&r,&r"))]
- "0 && (reload_completed || reload_in_progress)
- && ! TARGET_PTR64"
- "*
+ rtx set_dest = operands[0];
+ rtx set_src = operands[1];
+ rtx dest1, dest2;
+ rtx src1, src2;
+
+ if (GET_CODE (set_dest) == SUBREG)
+ set_dest = alter_subreg (set_dest);
+ if (GET_CODE (set_src) == SUBREG)
+ set_src = alter_subreg (set_src);
+
+ /* Ugly, but gen_highpart will crap out here for 32-bit targets. */
+ dest1 = gen_rtx_SUBREG (DFmode, set_dest, WORDS_BIG_ENDIAN == 0);
+ dest2 = gen_rtx_SUBREG (DFmode, set_dest, WORDS_BIG_ENDIAN != 0);
+ src1 = gen_rtx_SUBREG (DFmode, set_src, WORDS_BIG_ENDIAN == 0);
+ src2 = gen_rtx_SUBREG (DFmode, set_src, WORDS_BIG_ENDIAN != 0);
+
+ /* Now emit using the real source and destination we found, swapping
+ the order if we detect overlap. */
+ if (reg_overlap_mentioned_p (dest1, src2))
+ {
+ emit_insn (gen_movdf (dest2, src2));
+ emit_insn (gen_movdf (dest1, src1));
+ }
+ else
+ {
+ emit_insn (gen_movdf (dest1, src1));
+ emit_insn (gen_movdf (dest2, src2));
+ }
+ DONE;
+}")
+
+(define_split
+ [(set (match_operand:TF 0 "register_operand" "")
+ (match_operand:TF 1 "memory_operand" ""))]
+ "(reload_completed
+ && offsettable_memref_p (operands[1]))"
+ [(clobber (const_int 0))]
+ "
{
- output_asm_insn (\"sethi %%hi(%a0),%2\", operands);
- if (which_alternative == 0)
- return \"std %1,[%2+%%lo(%a0)]\;std %S1,[%2+%%lo(%a0+8)]\";
+ rtx word0 = change_address (operands[1], DFmode, NULL_RTX);
+ rtx word1 = change_address (operands[1], DFmode,
+ plus_constant_for_output (XEXP (word0, 0), 8));
+ rtx dest1, dest2;
+
+ /* Ugly, but gen_highpart will crap out here for 32-bit targets. */
+ dest1 = gen_rtx_SUBREG (DFmode, operands[0], WORDS_BIG_ENDIAN == 0);
+ dest2 = gen_rtx_SUBREG (DFmode, operands[0], WORDS_BIG_ENDIAN != 0);
+
+ /* Now output, ordering such that we don't clobber any registers
+ mentioned in the address. */
+ if (reg_overlap_mentioned_p (dest1, word1))
+
+ {
+ emit_insn (gen_movdf (dest2, word1));
+ emit_insn (gen_movdf (dest1, word0));
+ }
else
- return \"st %%g0,[%2+%%lo(%a0)]\;st %%g0,[%2+%%lo(%a0+4)]\; st %%g0,[%2+%%lo(%a0+8)]\;st %%g0,[%2+%%lo(%a0+12)]\";
-}"
- [(set_attr "type" "store")
- (set_attr "length" "5")])
+ {
+ emit_insn (gen_movdf (dest1, word0));
+ emit_insn (gen_movdf (dest2, word1));
+ }
+ DONE;
+}")
+
+(define_split
+ [(set (match_operand:TF 0 "memory_operand" "")
+ (match_operand:TF 1 "register_operand" ""))]
+ "(reload_completed
+ && offsettable_memref_p (operands[0]))"
+ [(clobber (const_int 0))]
+ "
+{
+ rtx word0 = change_address (operands[0], DFmode, NULL_RTX);
+ rtx word1 = change_address (operands[0], DFmode,
+ plus_constant_for_output (XEXP (word0, 0), 8));
+ rtx src1, src2;
+
+ /* Ugly, but gen_highpart will crap out here for 32-bit targets. */
+ src1 = gen_rtx_SUBREG (DFmode, operands[1], WORDS_BIG_ENDIAN == 0);
+ src2 = gen_rtx_SUBREG (DFmode, operands[1], WORDS_BIG_ENDIAN != 0);
+ emit_insn (gen_movdf (word0, src1));
+ emit_insn (gen_movdf (word1, src2));
+ DONE;
+}")
\f
;; Sparc V9 conditional move instructions.
&& GET_MODE (sparc_compare_op0) == DImode
&& v9_regcmp_p (code))
{
- operands[1] = gen_rtx (code, DImode,
+ operands[1] = gen_rtx_fmt_ee (code, DImode,
sparc_compare_op0, sparc_compare_op1);
}
else
{
rtx cc_reg = gen_compare_reg (code,
sparc_compare_op0, sparc_compare_op1);
- operands[1] = gen_rtx (code, GET_MODE (cc_reg), cc_reg, const0_rtx);
+ operands[1] = gen_rtx_fmt_ee (code, GET_MODE (cc_reg), cc_reg, const0_rtx);
}
}")
&& GET_MODE (sparc_compare_op0) == DImode
&& v9_regcmp_p (code))
{
- operands[1] = gen_rtx (code, DImode,
+ operands[1] = gen_rtx_fmt_ee (code, DImode,
sparc_compare_op0, sparc_compare_op1);
}
else
{
rtx cc_reg = gen_compare_reg (code,
sparc_compare_op0, sparc_compare_op1);
- operands[1] = gen_rtx (code, GET_MODE (cc_reg), cc_reg, const0_rtx);
+ operands[1] = gen_rtx_fmt_ee (code, GET_MODE (cc_reg), cc_reg, const0_rtx);
}
}")
"
{
enum rtx_code code = GET_CODE (operands[1]);
-
- if (GET_MODE (sparc_compare_op0) == DImode
- && ! TARGET_ARCH64)
- FAIL;
+ enum machine_mode op0_mode = GET_MODE (sparc_compare_op0);
if (sparc_compare_op1 == const0_rtx
&& GET_CODE (sparc_compare_op0) == REG
- && GET_MODE (sparc_compare_op0) == DImode
- && v9_regcmp_p (code))
+ && (TARGET_ARCH64 && op0_mode == DImode && v9_regcmp_p (code)))
{
- operands[1] = gen_rtx (code, DImode,
+ operands[1] = gen_rtx_fmt_ee (code, op0_mode,
sparc_compare_op0, sparc_compare_op1);
}
else
{
rtx cc_reg = gen_compare_reg (code,
sparc_compare_op0, sparc_compare_op1);
- operands[1] = gen_rtx (code, GET_MODE (cc_reg), cc_reg, const0_rtx);
+ operands[1] = gen_rtx_fmt_ee (code, GET_MODE (cc_reg),
+ cc_reg, const0_rtx);
}
}")
&& GET_MODE (sparc_compare_op0) == DImode
&& v9_regcmp_p (code))
{
- operands[1] = gen_rtx (code, DImode,
+ operands[1] = gen_rtx_fmt_ee (code, DImode,
sparc_compare_op0, sparc_compare_op1);
}
else
{
rtx cc_reg = gen_compare_reg (code,
sparc_compare_op0, sparc_compare_op1);
- operands[1] = gen_rtx (code, GET_MODE (cc_reg), cc_reg, const0_rtx);
+ operands[1] = gen_rtx_fmt_ee (code, GET_MODE (cc_reg),
+ cc_reg, const0_rtx);
}
}")
&& GET_MODE (sparc_compare_op0) == DImode
&& v9_regcmp_p (code))
{
- operands[1] = gen_rtx (code, DImode,
+ operands[1] = gen_rtx_fmt_ee (code, DImode,
sparc_compare_op0, sparc_compare_op1);
}
else
{
rtx cc_reg = gen_compare_reg (code,
sparc_compare_op0, sparc_compare_op1);
- operands[1] = gen_rtx (code, GET_MODE (cc_reg), cc_reg, const0_rtx);
+ operands[1] = gen_rtx_fmt_ee (code, GET_MODE (cc_reg), cc_reg, const0_rtx);
}
}")
&& GET_MODE (sparc_compare_op0) == DImode
&& v9_regcmp_p (code))
{
- operands[1] = gen_rtx (code, DImode,
+ operands[1] = gen_rtx_fmt_ee (code, DImode,
sparc_compare_op0, sparc_compare_op1);
}
else
{
rtx cc_reg = gen_compare_reg (code,
sparc_compare_op0, sparc_compare_op1);
- operands[1] = gen_rtx (code, GET_MODE (cc_reg), cc_reg, const0_rtx);
+ operands[1] = gen_rtx_fmt_ee (code, GET_MODE (cc_reg), cc_reg, const0_rtx);
}
}")
&& GET_MODE (sparc_compare_op0) == DImode
&& v9_regcmp_p (code))
{
- operands[1] = gen_rtx (code, DImode,
+ operands[1] = gen_rtx_fmt_ee (code, DImode,
sparc_compare_op0, sparc_compare_op1);
}
else
{
rtx cc_reg = gen_compare_reg (code,
sparc_compare_op0, sparc_compare_op1);
- operands[1] = gen_rtx (code, GET_MODE (cc_reg), cc_reg, const0_rtx);
+ operands[1] = gen_rtx_fmt_ee (code, GET_MODE (cc_reg), cc_reg, const0_rtx);
}
}")
(if_then_else:QI (match_operator 1 "comparison_operator"
[(match_operand 2 "icc_or_fcc_reg_operand" "X,X")
(const_int 0)])
- (match_operand:QI 3 "arith11_operand" "rL,0")
- (match_operand:QI 4 "arith11_operand" "0,rL")))]
+ (match_operand:QI 3 "arith11_operand" "rL,0")
+ (match_operand:QI 4 "arith11_operand" "0,rL")))]
"TARGET_V9"
"@
- mov%C1 %x2,%3,%0
- mov%c1 %x2,%4,%0"
- [(set_attr "type" "cmove")])
+ mov%C1\\t%x2, %3, %0
+ mov%c1\\t%x2, %4, %0"
+ [(set_attr "type" "cmove")
+ (set_attr "length" "1")])
(define_insn "*movhi_cc_sp64"
[(set (match_operand:HI 0 "register_operand" "=r,r")
(if_then_else:HI (match_operator 1 "comparison_operator"
[(match_operand 2 "icc_or_fcc_reg_operand" "X,X")
(const_int 0)])
- (match_operand:HI 3 "arith11_operand" "rL,0")
- (match_operand:HI 4 "arith11_operand" "0,rL")))]
+ (match_operand:HI 3 "arith11_operand" "rL,0")
+ (match_operand:HI 4 "arith11_operand" "0,rL")))]
"TARGET_V9"
"@
- mov%C1 %x2,%3,%0
- mov%c1 %x2,%4,%0"
- [(set_attr "type" "cmove")])
+ mov%C1\\t%x2, %3, %0
+ mov%c1\\t%x2, %4, %0"
+ [(set_attr "type" "cmove")
+ (set_attr "length" "1")])
(define_insn "*movsi_cc_sp64"
[(set (match_operand:SI 0 "register_operand" "=r,r")
(if_then_else:SI (match_operator 1 "comparison_operator"
[(match_operand 2 "icc_or_fcc_reg_operand" "X,X")
(const_int 0)])
- (match_operand:SI 3 "arith11_operand" "rL,0")
- (match_operand:SI 4 "arith11_operand" "0,rL")))]
+ (match_operand:SI 3 "arith11_operand" "rL,0")
+ (match_operand:SI 4 "arith11_operand" "0,rL")))]
"TARGET_V9"
"@
- mov%C1 %x2,%3,%0
- mov%c1 %x2,%4,%0"
- [(set_attr "type" "cmove")])
+ mov%C1\\t%x2, %3, %0
+ mov%c1\\t%x2, %4, %0"
+ [(set_attr "type" "cmove")
+ (set_attr "length" "1")])
;; ??? The constraints of operands 3,4 need work.
(define_insn "*movdi_cc_sp64"
(if_then_else:DI (match_operator 1 "comparison_operator"
[(match_operand 2 "icc_or_fcc_reg_operand" "X,X")
(const_int 0)])
- (match_operand:DI 3 "arith11_double_operand" "rLH,0")
- (match_operand:DI 4 "arith11_double_operand" "0,rLH")))]
+ (match_operand:DI 3 "arith11_double_operand" "rLH,0")
+ (match_operand:DI 4 "arith11_double_operand" "0,rLH")))]
+ "TARGET_ARCH64"
+ "@
+ mov%C1\\t%x2, %3, %0
+ mov%c1\\t%x2, %4, %0"
+ [(set_attr "type" "cmove")
+ (set_attr "length" "1")])
+
+(define_insn "*movdi_cc_sp64_trunc"
+ [(set (match_operand:SI 0 "register_operand" "=r,r")
+ (if_then_else:SI (match_operator 1 "comparison_operator"
+ [(match_operand 2 "icc_or_fcc_reg_operand" "X,X")
+ (const_int 0)])
+ (match_operand:SI 3 "arith11_double_operand" "rLH,0")
+ (match_operand:SI 4 "arith11_double_operand" "0,rLH")))]
"TARGET_ARCH64"
"@
- mov%C1 %x2,%3,%0
- mov%c1 %x2,%4,%0"
- [(set_attr "type" "cmove")])
+ mov%C1\\t%x2, %3, %0
+ mov%c1\\t%x2, %4, %0"
+ [(set_attr "type" "cmove")
+ (set_attr "length" "1")])
(define_insn "*movsf_cc_sp64"
[(set (match_operand:SF 0 "register_operand" "=f,f")
(if_then_else:SF (match_operator 1 "comparison_operator"
[(match_operand 2 "icc_or_fcc_reg_operand" "X,X")
(const_int 0)])
- (match_operand:SF 3 "register_operand" "f,0")
- (match_operand:SF 4 "register_operand" "0,f")))]
+ (match_operand:SF 3 "register_operand" "f,0")
+ (match_operand:SF 4 "register_operand" "0,f")))]
"TARGET_V9 && TARGET_FPU"
"@
- fmovs%C1 %x2,%3,%0
- fmovs%c1 %x2,%4,%0"
- [(set_attr "type" "cmove")])
+ fmovs%C1\\t%x2, %3, %0
+ fmovs%c1\\t%x2, %4, %0"
+ [(set_attr "type" "fpcmove")
+ (set_attr "length" "1")])
(define_insn "*movdf_cc_sp64"
[(set (match_operand:DF 0 "register_operand" "=e,e")
(if_then_else:DF (match_operator 1 "comparison_operator"
[(match_operand 2 "icc_or_fcc_reg_operand" "X,X")
(const_int 0)])
- (match_operand:DF 3 "register_operand" "e,0")
- (match_operand:DF 4 "register_operand" "0,e")))]
+ (match_operand:DF 3 "register_operand" "e,0")
+ (match_operand:DF 4 "register_operand" "0,e")))]
"TARGET_V9 && TARGET_FPU"
"@
- fmovd%C1 %x2,%3,%0
- fmovd%c1 %x2,%4,%0"
- [(set_attr "type" "cmove")])
+ fmovd%C1\\t%x2, %3, %0
+ fmovd%c1\\t%x2, %4, %0"
+ [(set_attr "type" "fpcmove")
+ (set_attr "length" "1")])
(define_insn "*movtf_cc_sp64"
[(set (match_operand:TF 0 "register_operand" "=e,e")
(if_then_else:TF (match_operator 1 "comparison_operator"
[(match_operand 2 "icc_or_fcc_reg_operand" "X,X")
(const_int 0)])
- (match_operand:TF 3 "register_operand" "e,0")
- (match_operand:TF 4 "register_operand" "0,e")))]
- "TARGET_V9 && TARGET_FPU"
+ (match_operand:TF 3 "register_operand" "e,0")
+ (match_operand:TF 4 "register_operand" "0,e")))]
+ "TARGET_V9 && TARGET_FPU && TARGET_HARD_QUAD"
"@
- fmovq%C1 %x2,%3,%0
- fmovq%c1 %x2,%4,%0"
- [(set_attr "type" "cmove")])
+ fmovq%C1\\t%x2, %3, %0
+ fmovq%c1\\t%x2, %4, %0"
+ [(set_attr "type" "fpcmove")
+ (set_attr "length" "1")])
(define_insn "*movqi_cc_reg_sp64"
[(set (match_operand:QI 0 "register_operand" "=r,r")
(if_then_else:QI (match_operator 1 "v9_regcmp_op"
[(match_operand:DI 2 "register_operand" "r,r")
(const_int 0)])
- (match_operand:QI 3 "arith10_operand" "rM,0")
- (match_operand:QI 4 "arith10_operand" "0,rM")))]
+ (match_operand:QI 3 "arith10_operand" "rM,0")
+ (match_operand:QI 4 "arith10_operand" "0,rM")))]
"TARGET_ARCH64"
"@
- movr%D1 %2,%r3,%0
- movr%d1 %2,%r4,%0"
- [(set_attr "type" "cmove")])
+ movr%D1\\t%2, %r3, %0
+ movr%d1\\t%2, %r4, %0"
+ [(set_attr "type" "cmove")
+ (set_attr "length" "1")])
(define_insn "*movhi_cc_reg_sp64"
[(set (match_operand:HI 0 "register_operand" "=r,r")
(if_then_else:HI (match_operator 1 "v9_regcmp_op"
[(match_operand:DI 2 "register_operand" "r,r")
(const_int 0)])
- (match_operand:HI 3 "arith10_operand" "rM,0")
- (match_operand:HI 4 "arith10_operand" "0,rM")))]
+ (match_operand:HI 3 "arith10_operand" "rM,0")
+ (match_operand:HI 4 "arith10_operand" "0,rM")))]
"TARGET_ARCH64"
"@
- movr%D1 %2,%r3,%0
- movr%d1 %2,%r4,%0"
- [(set_attr "type" "cmove")])
+ movr%D1\\t%2, %r3, %0
+ movr%d1\\t%2, %r4, %0"
+ [(set_attr "type" "cmove")
+ (set_attr "length" "1")])
(define_insn "*movsi_cc_reg_sp64"
[(set (match_operand:SI 0 "register_operand" "=r,r")
(if_then_else:SI (match_operator 1 "v9_regcmp_op"
[(match_operand:DI 2 "register_operand" "r,r")
(const_int 0)])
- (match_operand:SI 3 "arith10_operand" "rM,0")
- (match_operand:SI 4 "arith10_operand" "0,rM")))]
+ (match_operand:SI 3 "arith10_operand" "rM,0")
+ (match_operand:SI 4 "arith10_operand" "0,rM")))]
"TARGET_ARCH64"
"@
- movr%D1 %2,%r3,%0
- movr%d1 %2,%r4,%0"
- [(set_attr "type" "cmove")])
+ movr%D1\\t%2, %r3, %0
+ movr%d1\\t%2, %r4, %0"
+ [(set_attr "type" "cmove")
+ (set_attr "length" "1")])
;; ??? The constraints of operands 3,4 need work.
(define_insn "*movdi_cc_reg_sp64"
(if_then_else:DI (match_operator 1 "v9_regcmp_op"
[(match_operand:DI 2 "register_operand" "r,r")
(const_int 0)])
- (match_operand:DI 3 "arith10_double_operand" "rMH,0")
- (match_operand:DI 4 "arith10_double_operand" "0,rMH")))]
+ (match_operand:DI 3 "arith10_double_operand" "rMH,0")
+ (match_operand:DI 4 "arith10_double_operand" "0,rMH")))]
+ "TARGET_ARCH64"
+ "@
+ movr%D1\\t%2, %r3, %0
+ movr%d1\\t%2, %r4, %0"
+ [(set_attr "type" "cmove")
+ (set_attr "length" "1")])
+
+(define_insn "*movdi_cc_reg_sp64_trunc"
+ [(set (match_operand:SI 0 "register_operand" "=r,r")
+ (if_then_else:SI (match_operator 1 "v9_regcmp_op"
+ [(match_operand:DI 2 "register_operand" "r,r")
+ (const_int 0)])
+ (match_operand:SI 3 "arith10_double_operand" "rMH,0")
+ (match_operand:SI 4 "arith10_double_operand" "0,rMH")))]
"TARGET_ARCH64"
"@
- movr%D1 %2,%r3,%0
- movr%d1 %2,%r4,%0"
- [(set_attr "type" "cmove")])
+ movr%D1\\t%2, %r3, %0
+ movr%d1\\t%2, %r4, %0"
+ [(set_attr "type" "cmove")
+ (set_attr "length" "1")])
(define_insn "*movsf_cc_reg_sp64"
[(set (match_operand:SF 0 "register_operand" "=f,f")
(if_then_else:SF (match_operator 1 "v9_regcmp_op"
[(match_operand:DI 2 "register_operand" "r,r")
(const_int 0)])
- (match_operand:SF 3 "register_operand" "f,0")
- (match_operand:SF 4 "register_operand" "0,f")))]
+ (match_operand:SF 3 "register_operand" "f,0")
+ (match_operand:SF 4 "register_operand" "0,f")))]
"TARGET_ARCH64 && TARGET_FPU"
"@
- fmovrs%D1 %2,%3,%0
- fmovrs%d1 %2,%4,%0"
- [(set_attr "type" "cmove")])
+ fmovrs%D1\\t%2, %3, %0
+ fmovrs%d1\\t%2, %4, %0"
+ [(set_attr "type" "fpcmove")
+ (set_attr "length" "1")])
(define_insn "*movdf_cc_reg_sp64"
[(set (match_operand:DF 0 "register_operand" "=e,e")
(if_then_else:DF (match_operator 1 "v9_regcmp_op"
[(match_operand:DI 2 "register_operand" "r,r")
(const_int 0)])
- (match_operand:DF 3 "register_operand" "e,0")
- (match_operand:DF 4 "register_operand" "0,e")))]
+ (match_operand:DF 3 "register_operand" "e,0")
+ (match_operand:DF 4 "register_operand" "0,e")))]
"TARGET_ARCH64 && TARGET_FPU"
"@
- fmovrd%D1 %2,%3,%0
- fmovrd%d1 %2,%4,%0"
- [(set_attr "type" "cmove")])
+ fmovrd%D1\\t%2, %3, %0
+ fmovrd%d1\\t%2, %4, %0"
+ [(set_attr "type" "fpcmove")
+ (set_attr "length" "1")])
(define_insn "*movtf_cc_reg_sp64"
[(set (match_operand:TF 0 "register_operand" "=e,e")
(if_then_else:TF (match_operator 1 "v9_regcmp_op"
[(match_operand:DI 2 "register_operand" "r,r")
(const_int 0)])
- (match_operand:TF 3 "register_operand" "e,0")
- (match_operand:TF 4 "register_operand" "0,e")))]
+ (match_operand:TF 3 "register_operand" "e,0")
+ (match_operand:TF 4 "register_operand" "0,e")))]
"TARGET_ARCH64 && TARGET_FPU"
"@
- fmovrq%D1 %2,%3,%0
- fmovrq%d1 %2,%4,%0"
- [(set_attr "type" "cmove")])
+ fmovrq%D1\\t%2, %3, %0
+ fmovrq%d1\\t%2, %4, %0"
+ [(set_attr "type" "fpcmove")
+ (set_attr "length" "1")])
\f
;;- zero extension instructions
"
{
rtx temp = gen_reg_rtx (SImode);
- rtx shift_16 = gen_rtx (CONST_INT, VOIDmode, 16);
+ rtx shift_16 = GEN_INT (16);
int op1_subword = 0;
if (GET_CODE (operand1) == SUBREG)
operand1 = XEXP (operand1, 0);
}
- emit_insn (gen_ashlsi3 (temp, gen_rtx (SUBREG, SImode, operand1,
- op1_subword),
+ emit_insn (gen_ashlsi3 (temp, gen_rtx_SUBREG (SImode, operand1, op1_subword),
shift_16));
emit_insn (gen_lshrsi3 (operand0, temp, shift_16));
DONE;
[(set (match_operand:SI 0 "register_operand" "=r")
(zero_extend:SI (match_operand:HI 1 "memory_operand" "m")))]
""
- "lduh %1,%0"
- [(set_attr "type" "load")])
+ "lduh\\t%1, %0"
+ [(set_attr "type" "load")
+ (set_attr "length" "1")])
(define_expand "zero_extendqihi2"
[(set (match_operand:HI 0 "register_operand" "")
(define_insn "*zero_extendqihi2_insn"
[(set (match_operand:HI 0 "register_operand" "=r,r")
- (zero_extend:HI (match_operand:QI 1 "sparc_operand" "r,Q")))]
+ (zero_extend:HI (match_operand:QI 1 "input_operand" "r,m")))]
"GET_CODE (operands[1]) != CONST_INT"
"@
- and %1,0xff,%0
- ldub %1,%0"
+ and\\t%1, 0xff, %0
+ ldub\\t%1, %0"
[(set_attr "type" "unary,load")
(set_attr "length" "1")])
(define_insn "*zero_extendqisi2_insn"
[(set (match_operand:SI 0 "register_operand" "=r,r")
- (zero_extend:SI (match_operand:QI 1 "sparc_operand" "r,Q")))]
+ (zero_extend:SI (match_operand:QI 1 "input_operand" "r,m")))]
"GET_CODE (operands[1]) != CONST_INT"
"@
- and %1,0xff,%0
- ldub %1,%0"
+ and\\t%1, 0xff, %0
+ ldub\\t%1, %0"
[(set_attr "type" "unary,load")
(set_attr "length" "1")])
(define_insn "*zero_extendqidi2_insn"
[(set (match_operand:DI 0 "register_operand" "=r,r")
- (zero_extend:DI (match_operand:QI 1 "sparc_operand" "r,Q")))]
+ (zero_extend:DI (match_operand:QI 1 "input_operand" "r,m")))]
"TARGET_ARCH64 && GET_CODE (operands[1]) != CONST_INT"
"@
- and %1,0xff,%0
- ldub %1,%0"
+ and\\t%1, 0xff, %0
+ ldub\\t%1, %0"
[(set_attr "type" "unary,load")
(set_attr "length" "1")])
"
{
rtx temp = gen_reg_rtx (DImode);
- rtx shift_48 = gen_rtx (CONST_INT, VOIDmode, 48);
+ rtx shift_48 = GEN_INT (48);
int op1_subword = 0;
if (GET_CODE (operand1) == SUBREG)
operand1 = XEXP (operand1, 0);
}
- emit_insn (gen_ashldi3 (temp, gen_rtx (SUBREG, DImode, operand1,
- op1_subword),
+ emit_insn (gen_ashldi3 (temp, gen_rtx_SUBREG (DImode, operand1, op1_subword),
shift_48));
emit_insn (gen_lshrdi3 (operand0, temp, shift_48));
DONE;
[(set (match_operand:DI 0 "register_operand" "=r")
(zero_extend:DI (match_operand:HI 1 "memory_operand" "m")))]
"TARGET_ARCH64"
- "lduh %1,%0"
- [(set_attr "type" "load")])
+ "lduh\\t%1, %0"
+ [(set_attr "type" "load")
+ (set_attr "length" "1")])
+
;; ??? Write truncdisi pattern using sra?
(define_expand "zero_extendsidi2"
[(set (match_operand:DI 0 "register_operand" "")
(zero_extend:DI (match_operand:SI 1 "register_operand" "")))]
- "TARGET_ARCH64"
+ ""
"")
-(define_insn "*zero_extendsidi2_insn"
+(define_insn "*zero_extendsidi2_insn_sp64"
[(set (match_operand:DI 0 "register_operand" "=r,r")
- (zero_extend:DI (match_operand:SI 1 "sparc_operand" "r,Q")))]
+ (zero_extend:DI (match_operand:SI 1 "input_operand" "r,m")))]
"TARGET_ARCH64 && GET_CODE (operands[1]) != CONST_INT"
"@
- srl %1,0,%0
- lduw %1,%0"
- [(set_attr "type" "unary,load")
+ srl\\t%1, 0, %0
+ lduw\\t%1, %0"
+ [(set_attr "type" "shift,load")
(set_attr "length" "1")])
+(define_insn "*zero_extendsidi2_insn_sp32"
+ [(set (match_operand:DI 0 "register_operand" "=r")
+ (zero_extend:DI (match_operand:SI 1 "register_operand" "r")))]
+ "! TARGET_ARCH64"
+ "#"
+ [(set_attr "type" "unary")
+ (set_attr "length" "2")])
+
+(define_split
+ [(set (match_operand:DI 0 "register_operand" "")
+ (zero_extend:DI (match_operand:SI 1 "register_operand" "")))]
+ "! TARGET_ARCH64 && reload_completed"
+ [(set (match_dup 2) (match_dup 3))
+ (set (match_dup 4) (match_dup 5))]
+ "
+{
+ rtx dest1, dest2;
+
+ if (GET_CODE (operands[0]) == SUBREG)
+ operands[0] = alter_subreg (operands[0]);
+
+ dest1 = gen_highpart (SImode, operands[0]);
+ dest2 = gen_lowpart (SImode, operands[0]);
+
+ /* Swap the order in case of overlap. */
+ if (REGNO (dest1) == REGNO (operands[1]))
+ {
+ operands[2] = dest2;
+ operands[3] = operands[1];
+ operands[4] = dest1;
+ operands[5] = const0_rtx;
+ }
+ else
+ {
+ operands[2] = dest1;
+ operands[3] = const0_rtx;
+ operands[4] = dest2;
+ operands[5] = operands[1];
+ }
+}")
+
;; Simplify comparisons of extended values.
(define_insn "*cmp_zero_extendqisi2"
[(set (reg:CC 100)
(compare:CC (zero_extend:SI (match_operand:QI 0 "register_operand" "r"))
(const_int 0)))]
- ""
- "andcc %0,0xff,%%g0"
- [(set_attr "type" "compare")])
+ "! TARGET_LIVE_G0"
+ "andcc\\t%0, 0xff, %%g0"
+ [(set_attr "type" "compare")
+ (set_attr "length" "1")])
(define_insn "*cmp_zero_extendqisi2_set"
[(set (reg:CC 100)
(set (match_operand:SI 0 "register_operand" "=r")
(zero_extend:SI (match_dup 1)))]
""
- "andcc %1,0xff,%0"
- [(set_attr "type" "unary")])
+ "andcc\\t%1, 0xff, %0"
+ [(set_attr "type" "compare")
+ (set_attr "length" "1")])
+
+(define_insn "*cmp_zero_extendqidi2"
+ [(set (reg:CCX 100)
+ (compare:CCX (zero_extend:DI (match_operand:QI 0 "register_operand" "r"))
+ (const_int 0)))]
+ "TARGET_ARCH64"
+ "andcc\\t%0, 0xff, %%g0"
+ [(set_attr "type" "compare")
+ (set_attr "length" "1")])
+
+(define_insn "*cmp_zero_extendqidi2_set"
+ [(set (reg:CCX 100)
+ (compare:CCX (zero_extend:DI (match_operand:QI 1 "register_operand" "r"))
+ (const_int 0)))
+ (set (match_operand:DI 0 "register_operand" "=r")
+ (zero_extend:DI (match_dup 1)))]
+ "TARGET_ARCH64"
+ "andcc\\t%1, 0xff, %0"
+ [(set_attr "type" "compare")
+ (set_attr "length" "1")])
-;; Similarly, handle SI->QI mode truncation followed by a compare.
+;; Similarly, handle {SI,DI}->QI mode truncation followed by a compare.
(define_insn "*cmp_siqi_trunc"
[(set (reg:CC 100)
(compare:CC (subreg:QI (match_operand:SI 0 "register_operand" "r") 0)
(const_int 0)))]
- ""
- "andcc %0,0xff,%%g0"
- [(set_attr "type" "compare")])
+ "! TARGET_LIVE_G0"
+ "andcc\\t%0, 0xff, %%g0"
+ [(set_attr "type" "compare")
+ (set_attr "length" "1")])
(define_insn "*cmp_siqi_trunc_set"
[(set (reg:CC 100)
(compare:CC (subreg:QI (match_operand:SI 1 "register_operand" "r") 0)
(const_int 0)))
(set (match_operand:QI 0 "register_operand" "=r")
- (match_dup 1))]
+ (subreg:QI (match_dup 1) 0))]
""
- "andcc %1,0xff,%0"
- [(set_attr "type" "unary")])
+ "andcc\\t%1, 0xff, %0"
+ [(set_attr "type" "compare")
+ (set_attr "length" "1")])
+
+(define_insn "*cmp_diqi_trunc"
+ [(set (reg:CC 100)
+ (compare:CC (subreg:QI (match_operand:DI 0 "register_operand" "r") 0)
+ (const_int 0)))]
+ "TARGET_ARCH64"
+ "andcc\\t%0, 0xff, %%g0"
+ [(set_attr "type" "compare")
+ (set_attr "length" "1")])
+
+(define_insn "*cmp_diqi_trunc_set"
+ [(set (reg:CC 100)
+ (compare:CC (subreg:QI (match_operand:DI 1 "register_operand" "r") 0)
+ (const_int 0)))
+ (set (match_operand:QI 0 "register_operand" "=r")
+ (subreg:QI (match_dup 1) 0))]
+ "TARGET_ARCH64"
+ "andcc\\t%1, 0xff, %0"
+ [(set_attr "type" "compare")
+ (set_attr "length" "1")])
\f
;;- sign extension instructions
"
{
rtx temp = gen_reg_rtx (SImode);
- rtx shift_16 = gen_rtx (CONST_INT, VOIDmode, 16);
+ rtx shift_16 = GEN_INT (16);
int op1_subword = 0;
if (GET_CODE (operand1) == SUBREG)
operand1 = XEXP (operand1, 0);
}
- emit_insn (gen_ashlsi3 (temp, gen_rtx (SUBREG, SImode, operand1,
- op1_subword),
+ emit_insn (gen_ashlsi3 (temp, gen_rtx_SUBREG (SImode, operand1, op1_subword),
shift_16));
emit_insn (gen_ashrsi3 (operand0, temp, shift_16));
DONE;
[(set (match_operand:SI 0 "register_operand" "=r")
(sign_extend:SI (match_operand:HI 1 "memory_operand" "m")))]
""
- "ldsh %1,%0"
- [(set_attr "type" "load")])
+ "ldsh\\t%1, %0"
+ [(set_attr "type" "sload")
+ (set_attr "length" "1")])
(define_expand "extendqihi2"
[(set (match_operand:HI 0 "register_operand" "")
"
{
rtx temp = gen_reg_rtx (SImode);
- rtx shift_24 = gen_rtx (CONST_INT, VOIDmode, 24);
+ rtx shift_24 = GEN_INT (24);
int op1_subword = 0;
int op0_subword = 0;
op0_subword = SUBREG_WORD (operand0);
operand0 = XEXP (operand0, 0);
}
- emit_insn (gen_ashlsi3 (temp, gen_rtx (SUBREG, SImode, operand1,
- op1_subword),
+ emit_insn (gen_ashlsi3 (temp, gen_rtx_SUBREG (SImode, operand1, op1_subword),
shift_24));
if (GET_MODE (operand0) != SImode)
- operand0 = gen_rtx (SUBREG, SImode, operand0, op0_subword);
+ operand0 = gen_rtx_SUBREG (SImode, operand0, op0_subword);
emit_insn (gen_ashrsi3 (operand0, temp, shift_24));
DONE;
}")
[(set (match_operand:HI 0 "register_operand" "=r")
(sign_extend:HI (match_operand:QI 1 "memory_operand" "m")))]
""
- "ldsb %1,%0"
- [(set_attr "type" "load")])
+ "ldsb\\t%1, %0"
+ [(set_attr "type" "sload")
+ (set_attr "length" "1")])
(define_expand "extendqisi2"
[(set (match_operand:SI 0 "register_operand" "")
"
{
rtx temp = gen_reg_rtx (SImode);
- rtx shift_24 = gen_rtx (CONST_INT, VOIDmode, 24);
+ rtx shift_24 = GEN_INT (24);
int op1_subword = 0;
if (GET_CODE (operand1) == SUBREG)
operand1 = XEXP (operand1, 0);
}
- emit_insn (gen_ashlsi3 (temp, gen_rtx (SUBREG, SImode, operand1,
- op1_subword),
+ emit_insn (gen_ashlsi3 (temp, gen_rtx_SUBREG (SImode, operand1, op1_subword),
shift_24));
emit_insn (gen_ashrsi3 (operand0, temp, shift_24));
DONE;
[(set (match_operand:SI 0 "register_operand" "=r")
(sign_extend:SI (match_operand:QI 1 "memory_operand" "m")))]
""
- "ldsb %1,%0"
- [(set_attr "type" "load")])
+ "ldsb\\t%1, %0"
+ [(set_attr "type" "sload")
+ (set_attr "length" "1")])
(define_expand "extendqidi2"
[(set (match_operand:DI 0 "register_operand" "")
"
{
rtx temp = gen_reg_rtx (DImode);
- rtx shift_56 = gen_rtx (CONST_INT, VOIDmode, 56);
+ rtx shift_56 = GEN_INT (56);
int op1_subword = 0;
if (GET_CODE (operand1) == SUBREG)
operand1 = XEXP (operand1, 0);
}
- emit_insn (gen_ashldi3 (temp, gen_rtx (SUBREG, DImode, operand1,
- op1_subword),
+ emit_insn (gen_ashldi3 (temp, gen_rtx_SUBREG (DImode, operand1, op1_subword),
shift_56));
emit_insn (gen_ashrdi3 (operand0, temp, shift_56));
DONE;
[(set (match_operand:DI 0 "register_operand" "=r")
(sign_extend:DI (match_operand:QI 1 "memory_operand" "m")))]
"TARGET_ARCH64"
- "ldsb %1,%0"
- [(set_attr "type" "load")])
+ "ldsb\\t%1, %0"
+ [(set_attr "type" "sload")
+ (set_attr "length" "1")])
(define_expand "extendhidi2"
[(set (match_operand:DI 0 "register_operand" "")
"
{
rtx temp = gen_reg_rtx (DImode);
- rtx shift_48 = gen_rtx (CONST_INT, VOIDmode, 48);
+ rtx shift_48 = GEN_INT (48);
int op1_subword = 0;
if (GET_CODE (operand1) == SUBREG)
operand1 = XEXP (operand1, 0);
}
- emit_insn (gen_ashldi3 (temp, gen_rtx (SUBREG, DImode, operand1,
- op1_subword),
+ emit_insn (gen_ashldi3 (temp, gen_rtx_SUBREG (DImode, operand1, op1_subword),
shift_48));
emit_insn (gen_ashrdi3 (operand0, temp, shift_48));
DONE;
[(set (match_operand:DI 0 "register_operand" "=r")
(sign_extend:DI (match_operand:HI 1 "memory_operand" "m")))]
"TARGET_ARCH64"
- "ldsh %1,%0"
- [(set_attr "type" "load")])
+ "ldsh\\t%1, %0"
+ [(set_attr "type" "sload")
+ (set_attr "length" "1")])
(define_expand "extendsidi2"
[(set (match_operand:DI 0 "register_operand" "")
(define_insn "*sign_extendsidi2_insn"
[(set (match_operand:DI 0 "register_operand" "=r,r")
- (sign_extend:DI (match_operand:SI 1 "sparc_operand" "r,Q")))]
+ (sign_extend:DI (match_operand:SI 1 "input_operand" "r,m")))]
"TARGET_ARCH64"
"@
- sra %1,0,%0
- ldsw %1,%0"
- [(set_attr "type" "unary,load")
+ sra\\t%1, 0, %0
+ ldsw\\t%1, %0"
+ [(set_attr "type" "shift,sload")
(set_attr "length" "1")])
\f
;; Special pattern for optimizing bit-field compares. This is needed
[(set (reg:CC 100)
(compare:CC
(zero_extract:SI (match_operand:SI 0 "register_operand" "r")
- (match_operand:SI 1 "small_int" "n")
- (match_operand:SI 2 "small_int" "n"))
+ (match_operand:SI 1 "small_int_or_double" "n")
+ (match_operand:SI 2 "small_int_or_double" "n"))
(const_int 0)))]
- "INTVAL (operands[2]) > 19"
+ "! TARGET_LIVE_G0
+ && ((GET_CODE (operands[2]) == CONST_INT
+ && INTVAL (operands[2]) > 19)
+ || (GET_CODE (operands[2]) == CONST_DOUBLE
+ && CONST_DOUBLE_LOW (operands[2]) > 19))"
"*
{
- int len = INTVAL (operands[1]);
- int pos = 32 - INTVAL (operands[2]) - len;
- unsigned mask = ((1 << len) - 1) << pos;
-
- operands[1] = gen_rtx (CONST_INT, VOIDmode, mask);
- return \"andcc %0,%1,%%g0\";
-}")
+ int len = (GET_CODE (operands[1]) == CONST_INT
+ ? INTVAL (operands[1])
+ : CONST_DOUBLE_LOW (operands[1]));
+ int pos = 32 -
+ (GET_CODE (operands[2]) == CONST_INT
+ ? INTVAL (operands[2])
+ : CONST_DOUBLE_LOW (operands[2])) - len;
+ HOST_WIDE_INT mask = ((1 << len) - 1) << pos;
+
+ operands[1] = GEN_INT (mask);
+ return \"andcc\\t%0, %1, %%g0\";
+}"
+ [(set_attr "type" "compare")
+ (set_attr "length" "1")])
(define_insn "*cmp_zero_extract_sp64"
[(set (reg:CCX 100)
(compare:CCX
(zero_extract:DI (match_operand:DI 0 "register_operand" "r")
- (match_operand:SI 1 "small_int" "n")
- (match_operand:SI 2 "small_int" "n"))
+ (match_operand:SI 1 "small_int_or_double" "n")
+ (match_operand:SI 2 "small_int_or_double" "n"))
(const_int 0)))]
- "TARGET_ARCH64 && INTVAL (operands[2]) > 51"
+ "TARGET_ARCH64
+ && ((GET_CODE (operands[2]) == CONST_INT
+ && INTVAL (operands[2]) > 51)
+ || (GET_CODE (operands[2]) == CONST_DOUBLE
+ && CONST_DOUBLE_LOW (operands[2]) > 51))"
"*
{
- int len = INTVAL (operands[1]);
- int pos = 64 - INTVAL (operands[2]) - len;
- unsigned mask = ((1 << len) - 1) << pos;
-
- operands[1] = gen_rtx (CONST_INT, VOIDmode, mask);
- return \"andcc %0,%1,%%g0\";
-}")
+ int len = (GET_CODE (operands[1]) == CONST_INT
+ ? INTVAL (operands[1])
+ : CONST_DOUBLE_LOW (operands[1]));
+ int pos = 64 -
+ (GET_CODE (operands[2]) == CONST_INT
+ ? INTVAL (operands[2])
+ : CONST_DOUBLE_LOW (operands[2])) - len;
+ HOST_WIDE_INT mask = (((unsigned HOST_WIDE_INT) 1 << len) - 1) << pos;
+
+ operands[1] = GEN_INT (mask);
+ return \"andcc\\t%0, %1, %%g0\";
+}"
+ [(set_attr "type" "compare")
+ (set_attr "length" "1")])
\f
;; Conversions between float, double and long double.
(float_extend:DF
(match_operand:SF 1 "register_operand" "f")))]
"TARGET_FPU"
- "fstod %1,%0"
- [(set_attr "type" "fp")])
+ "fstod\\t%1, %0"
+ [(set_attr "type" "fp")
+ (set_attr "length" "1")])
(define_insn "extendsftf2"
[(set (match_operand:TF 0 "register_operand" "=e")
(float_extend:TF
(match_operand:SF 1 "register_operand" "f")))]
"TARGET_FPU && TARGET_HARD_QUAD"
- "fstoq %1,%0"
- [(set_attr "type" "fp")])
+ "fstoq\\t%1, %0"
+ [(set_attr "type" "fp")
+ (set_attr "length" "1")])
(define_insn "extenddftf2"
[(set (match_operand:TF 0 "register_operand" "=e")
(float_extend:TF
(match_operand:DF 1 "register_operand" "e")))]
"TARGET_FPU && TARGET_HARD_QUAD"
- "fdtoq %1,%0"
- [(set_attr "type" "fp")])
+ "fdtoq\\t%1, %0"
+ [(set_attr "type" "fp")
+ (set_attr "length" "1")])
(define_insn "truncdfsf2"
[(set (match_operand:SF 0 "register_operand" "=f")
(float_truncate:SF
(match_operand:DF 1 "register_operand" "e")))]
"TARGET_FPU"
- "fdtos %1,%0"
- [(set_attr "type" "fp")])
+ "fdtos\\t%1, %0"
+ [(set_attr "type" "fp")
+ (set_attr "length" "1")])
(define_insn "trunctfsf2"
[(set (match_operand:SF 0 "register_operand" "=f")
(float_truncate:SF
(match_operand:TF 1 "register_operand" "e")))]
"TARGET_FPU && TARGET_HARD_QUAD"
- "fqtos %1,%0"
- [(set_attr "type" "fp")])
+ "fqtos\\t%1, %0"
+ [(set_attr "type" "fp")
+ (set_attr "length" "1")])
(define_insn "trunctfdf2"
[(set (match_operand:DF 0 "register_operand" "=e")
(float_truncate:DF
(match_operand:TF 1 "register_operand" "e")))]
"TARGET_FPU && TARGET_HARD_QUAD"
- "fqtod %1,%0"
- [(set_attr "type" "fp")])
+ "fqtod\\t%1, %0"
+ [(set_attr "type" "fp")
+ (set_attr "length" "1")])
\f
;; Conversion between fixed point and floating point.
[(set (match_operand:SF 0 "register_operand" "=f")
(float:SF (match_operand:SI 1 "register_operand" "f")))]
"TARGET_FPU"
- "fitos %1,%0"
- [(set_attr "type" "fp")])
+ "fitos\\t%1, %0"
+ [(set_attr "type" "fp")
+ (set_attr "length" "1")])
(define_insn "floatsidf2"
[(set (match_operand:DF 0 "register_operand" "=e")
(float:DF (match_operand:SI 1 "register_operand" "f")))]
"TARGET_FPU"
- "fitod %1,%0"
- [(set_attr "type" "fp")])
+ "fitod\\t%1, %0"
+ [(set_attr "type" "fp")
+ (set_attr "length" "1")])
(define_insn "floatsitf2"
[(set (match_operand:TF 0 "register_operand" "=e")
(float:TF (match_operand:SI 1 "register_operand" "f")))]
"TARGET_FPU && TARGET_HARD_QUAD"
- "fitoq %1,%0"
- [(set_attr "type" "fp")])
-
-;; Now the same for 64 bit sources.
-;; ??? We cannot put DImode values in fp regs (see below near fix_truncdfsi2).
-
-(define_expand "floatdisf2"
- [(parallel [(set (match_operand:SF 0 "register_operand" "")
- (float:SF (match_operand:DI 1 "general_operand" "")))
- (clobber (match_dup 2))
- (clobber (match_dup 3))])]
- "TARGET_ARCH64 && TARGET_FPU"
- "
-{
- operands[2] = gen_reg_rtx (DFmode);
- operands[3] = sparc64_fpconv_stack_temp ();
-}")
-
-(define_expand "floatdidf2"
- [(parallel [(set (match_operand:DF 0 "register_operand" "")
- (float:DF (match_operand:DI 1 "general_operand" "")))
- (clobber (match_dup 2))
- (clobber (match_dup 3))])]
- "TARGET_ARCH64 && TARGET_FPU"
- "
-{
- operands[2] = gen_reg_rtx (DFmode);
- operands[3] = sparc64_fpconv_stack_temp ();
-}")
-
-(define_expand "floatditf2"
- [(parallel [(set (match_operand:TF 0 "register_operand" "")
- (float:TF (match_operand:DI 1 "general_operand" "")))
- (clobber (match_dup 2))
- (clobber (match_dup 3))])]
- "TARGET_ARCH64 && TARGET_FPU && TARGET_HARD_QUAD"
- "
-{
- operands[2] = gen_reg_rtx (DFmode);
- operands[3] = sparc64_fpconv_stack_temp ();
-}")
-
-(define_insn "*floatdisf2_insn"
- [(parallel [(set (match_operand:SF 0 "register_operand" "=f")
- (float:SF (match_operand:DI 1 "general_operand" "rm")))
- (clobber (match_operand:DF 2 "register_operand" "=&e"))
- (clobber (match_operand:DI 3 "memory_operand" "m"))])]
- "TARGET_ARCH64 && TARGET_FPU"
- "*
-{
- if (GET_CODE (operands[1]) == MEM)
- output_asm_insn (\"ldd %1,%2\", operands);
- else
- output_asm_insn (\"stx %1,%3\;ldd %3,%2\", operands);
- return \"fxtos %2,%0\";
-}"
- [(set_attr "type" "fp")
- (set_attr "length" "3")])
-
-(define_insn "*floatdidf2_insn"
- [(parallel [(set (match_operand:DF 0 "register_operand" "=e")
- (float:DF (match_operand:DI 1 "general_operand" "rm")))
- (clobber (match_operand:DF 2 "register_operand" "=&e"))
- (clobber (match_operand:DI 3 "memory_operand" "m"))])]
- "TARGET_ARCH64 && TARGET_FPU"
- "*
-{
- if (GET_CODE (operands[1]) == MEM)
- output_asm_insn (\"ldd %1,%2\", operands);
- else
- output_asm_insn (\"stx %1,%3\;ldd %3,%2\", operands);
- return \"fxtod %2,%0\";
-}"
- [(set_attr "type" "fp")
- (set_attr "length" "3")])
-
-(define_insn "*floatditf2_insn"
- [(parallel [(set (match_operand:TF 0 "register_operand" "=e")
- (float:TF (match_operand:DI 1 "general_operand" "rm")))
- (clobber (match_operand:DF 2 "register_operand" "=&e"))
- (clobber (match_operand:DI 3 "memory_operand" "m"))])]
- "TARGET_ARCH64 && TARGET_FPU && TARGET_HARD_QUAD"
- "*
-{
- if (GET_CODE (operands[1]) == MEM)
- output_asm_insn (\"ldd %1,%2\", operands);
- else
- output_asm_insn (\"stx %1,%3\;ldd %3,%2\", operands);
- return \"fxtoq %2,%0\";
-}"
+ "fitoq\\t%1, %0"
[(set_attr "type" "fp")
- (set_attr "length" "3")])
+ (set_attr "length" "1")])
-;; ??? Ideally, these are what we would like to use.
+;; Now the same for 64 bit sources.
-(define_insn "floatdisf2_sp64"
+(define_insn "floatdisf2"
[(set (match_operand:SF 0 "register_operand" "=f")
(float:SF (match_operand:DI 1 "register_operand" "e")))]
- "0 && TARGET_ARCH64 && TARGET_FPU"
- "fxtos %1,%0"
- [(set_attr "type" "fp")])
+ "TARGET_V9 && TARGET_FPU"
+ "fxtos\\t%1, %0"
+ [(set_attr "type" "fp")
+ (set_attr "length" "1")])
-(define_insn "floatdidf2_sp64"
+(define_insn "floatdidf2"
[(set (match_operand:DF 0 "register_operand" "=e")
(float:DF (match_operand:DI 1 "register_operand" "e")))]
- "0 && TARGET_ARCH64 && TARGET_FPU"
- "fxtod %1,%0"
- [(set_attr "type" "fp")])
+ "TARGET_V9 && TARGET_FPU"
+ "fxtod\\t%1, %0"
+ [(set_attr "type" "fp")
+ (set_attr "length" "1")])
-(define_insn "floatditf2_sp64"
+(define_insn "floatditf2"
[(set (match_operand:TF 0 "register_operand" "=e")
(float:TF (match_operand:DI 1 "register_operand" "e")))]
- "0 && TARGET_ARCH64 && TARGET_FPU && TARGET_HARD_QUAD"
- "fxtoq %1,%0"
- [(set_attr "type" "fp")])
+ "TARGET_V9 && TARGET_FPU && TARGET_HARD_QUAD"
+ "fxtoq\\t%1, %0"
+ [(set_attr "type" "fp")
+ (set_attr "length" "1")])
;; Convert a float to an actual integer.
;; Truncation is performed as part of the conversion.
[(set (match_operand:SI 0 "register_operand" "=f")
(fix:SI (fix:SF (match_operand:SF 1 "register_operand" "f"))))]
"TARGET_FPU"
- "fstoi %1,%0"
- [(set_attr "type" "fp")])
-
-(define_insn "fix_truncdfsi2"
- [(set (match_operand:SI 0 "register_operand" "=f")
- (fix:SI (fix:DF (match_operand:DF 1 "register_operand" "e"))))]
- "TARGET_FPU"
- "fdtoi %1,%0"
- [(set_attr "type" "fp")])
-
-(define_insn "fix_trunctfsi2"
- [(set (match_operand:SI 0 "register_operand" "=f")
- (fix:SI (fix:TF (match_operand:TF 1 "register_operand" "e"))))]
- "TARGET_FPU && TARGET_HARD_QUAD"
- "fqtoi %1,%0"
- [(set_attr "type" "fp")])
-
-;; Now the same, for 64-bit targets
-;; ??? We try to work around an interesting problem.
-;; If gcc tries to do a subreg on the result it will get the wrong answer:
-;; "(subreg:SI (reg:DI M int-reg) 0)" is the same as
-;; "(subreg:SI (reg:DI N float-reg) 1)", but gcc does not know how to change
-;; the "0" to a "1". One could enhance alter_subreg but it is not clear how to
-;; do this cleanly.
-
-(define_expand "fix_truncsfdi2"
- [(parallel [(set (match_operand:DI 0 "general_operand" "")
- (fix:DI (fix:SF (match_operand:SF 1 "register_operand" ""))))
- (clobber (match_dup 2))
- (clobber (match_dup 3))])]
- "TARGET_ARCH64 && TARGET_FPU"
- "
-{
- operands[2] = gen_reg_rtx (DFmode);
- operands[3] = sparc64_fpconv_stack_temp ();
-}")
-
-(define_expand "fix_truncdfdi2"
- [(parallel [(set (match_operand:DI 0 "general_operand" "")
- (fix:DI (fix:DF (match_operand:DF 1 "register_operand" ""))))
- (clobber (match_dup 2))
- (clobber (match_dup 3))])]
- "TARGET_ARCH64 && TARGET_FPU"
- "
-{
- operands[2] = gen_reg_rtx (DFmode);
- operands[3] = sparc64_fpconv_stack_temp ();
-}")
-
-(define_expand "fix_trunctfdi2"
- [(parallel [(set (match_operand:DI 0 "general_operand" "")
- (fix:DI (fix:TF (match_operand:TF 1 "register_operand" ""))))
- (clobber (match_dup 2))
- (clobber (match_dup 3))])]
- "TARGET_ARCH64 && TARGET_FPU && TARGET_HARD_QUAD"
- "
-{
- operands[2] = gen_reg_rtx (DFmode);
- operands[3] = sparc64_fpconv_stack_temp ();
-}")
-
-(define_insn "*fix_truncsfdi2_insn"
- [(parallel [(set (match_operand:DI 0 "general_operand" "=rm")
- (fix:DI (fix:SF (match_operand:SF 1 "register_operand" "f"))))
- (clobber (match_operand:DF 2 "register_operand" "=&e"))
- (clobber (match_operand:DI 3 "memory_operand" "m"))])]
- "TARGET_ARCH64 && TARGET_FPU"
- "*
-{
- output_asm_insn (\"fstox %1,%2\", operands);
- if (GET_CODE (operands[0]) == MEM)
- return \"std %2,%0\";
- else
- return \"std %2,%3\;ldx %3,%0\";
-}"
+ "fstoi\\t%1, %0"
[(set_attr "type" "fp")
- (set_attr "length" "3")])
-
-(define_insn "*fix_truncdfdi2_insn"
- [(parallel [(set (match_operand:DI 0 "general_operand" "=rm")
- (fix:DI (fix:DF (match_operand:DF 1 "register_operand" "e"))))
- (clobber (match_operand:DF 2 "register_operand" "=&e"))
- (clobber (match_operand:DI 3 "memory_operand" "m"))])]
- "TARGET_ARCH64 && TARGET_FPU"
- "*
-{
- output_asm_insn (\"fdtox %1,%2\", operands);
- if (GET_CODE (operands[0]) == MEM)
- return \"std %2,%0\";
- else
- return \"std %2,%3\;ldx %3,%0\";
-}"
+ (set_attr "length" "1")])
+
+(define_insn "fix_truncdfsi2"
+ [(set (match_operand:SI 0 "register_operand" "=f")
+ (fix:SI (fix:DF (match_operand:DF 1 "register_operand" "e"))))]
+ "TARGET_FPU"
+ "fdtoi\\t%1, %0"
[(set_attr "type" "fp")
- (set_attr "length" "3")])
-
-(define_insn "*fix_trunctfdi2_insn"
- [(parallel [(set (match_operand:DI 0 "general_operand" "=rm")
- (fix:DI (fix:TF (match_operand:TF 1 "register_operand" "e"))))
- (clobber (match_operand:DF 2 "register_operand" "=&e"))
- (clobber (match_operand:DI 3 "memory_operand" "m"))])]
- "TARGET_ARCH64 && TARGET_FPU && TARGET_HARD_QUAD"
- "*
-{
- output_asm_insn (\"fqtox %1,%2\", operands);
- if (GET_CODE (operands[0]) == MEM)
- return \"std %2,%0\";
- else
- return \"std %2,%3\;ldx %3,%0\";
-}"
+ (set_attr "length" "1")])
+
+(define_insn "fix_trunctfsi2"
+ [(set (match_operand:SI 0 "register_operand" "=f")
+ (fix:SI (fix:TF (match_operand:TF 1 "register_operand" "e"))))]
+ "TARGET_FPU && TARGET_HARD_QUAD"
+ "fqtoi\\t%1, %0"
[(set_attr "type" "fp")
- (set_attr "length" "3")])
+ (set_attr "length" "1")])
-;; ??? Ideally, these are what we would like to use.
+;; Now the same, for V9 targets
-(define_insn "fix_truncsfdi2_sp64"
+(define_insn "fix_truncsfdi2"
[(set (match_operand:DI 0 "register_operand" "=e")
(fix:DI (fix:SF (match_operand:SF 1 "register_operand" "f"))))]
- "0 && TARGET_ARCH64 && TARGET_FPU"
- "fstox %1,%0"
- [(set_attr "type" "fp")])
+ "TARGET_V9 && TARGET_FPU"
+ "fstox\\t%1, %0"
+ [(set_attr "type" "fp")
+ (set_attr "length" "1")])
-(define_insn "fix_truncdfdi2_sp64"
+(define_insn "fix_truncdfdi2"
[(set (match_operand:DI 0 "register_operand" "=e")
(fix:DI (fix:DF (match_operand:DF 1 "register_operand" "e"))))]
- "0 && TARGET_ARCH64 && TARGET_FPU"
- "fdtox %1,%0"
- [(set_attr "type" "fp")])
+ "TARGET_V9 && TARGET_FPU"
+ "fdtox\\t%1, %0"
+ [(set_attr "type" "fp")
+ (set_attr "length" "1")])
-(define_insn "fix_trunctfdi2_sp64"
+(define_insn "fix_trunctfdi2"
[(set (match_operand:DI 0 "register_operand" "=e")
(fix:DI (fix:TF (match_operand:TF 1 "register_operand" "e"))))]
- "0 && TARGET_ARCH64 && TARGET_FPU && TARGET_HARD_QUAD"
- "fqtox %1,%0"
- [(set_attr "type" "fp")])
+ "TARGET_V9 && TARGET_FPU && TARGET_HARD_QUAD"
+ "fqtox\\t%1, %0"
+ [(set_attr "type" "fp")
+ (set_attr "length" "1")])
\f
;;- arithmetic instructions
(define_expand "adddi3"
[(set (match_operand:DI 0 "register_operand" "=r")
(plus:DI (match_operand:DI 1 "arith_double_operand" "%r")
- (match_operand:DI 2 "arith_double_operand" "rHI")))]
+ (match_operand:DI 2 "arith_double_add_operand" "rHI")))]
""
"
{
if (! TARGET_ARCH64)
{
- emit_insn (gen_rtx (PARALLEL, VOIDmode, gen_rtvec (2,
- gen_rtx (SET, VOIDmode, operands[0],
- gen_rtx (PLUS, DImode, operands[1],
- operands[2])),
- gen_rtx (CLOBBER, VOIDmode,
- gen_rtx (REG, SImode, SPARC_ICC_REG)))));
+ emit_insn (gen_rtx_PARALLEL (VOIDmode, gen_rtvec (2,
+ gen_rtx_SET (VOIDmode, operands[0],
+ gen_rtx_PLUS (DImode, operands[1],
+ operands[2])),
+ gen_rtx_CLOBBER (VOIDmode,
+ gen_rtx_REG (CCmode, SPARC_ICC_REG)))));
+ DONE;
+ }
+ if (arith_double_4096_operand(operands[2], DImode))
+ {
+ emit_insn (gen_rtx_SET (VOIDmode, operands[0],
+ gen_rtx_MINUS (DImode, operands[1],
+ GEN_INT(-4096))));
DONE;
}
}")
-(define_insn "*adddi3_sp32"
+(define_insn "adddi3_insn_sp32"
[(set (match_operand:DI 0 "register_operand" "=r")
(plus:DI (match_operand:DI 1 "arith_double_operand" "%r")
(match_operand:DI 2 "arith_double_operand" "rHI")))
- (clobber (reg:SI 100))]
+ (clobber (reg:CC 100))]
"! TARGET_ARCH64"
- "*
-{
- rtx op2 = operands[2];
+ "#"
+ [(set_attr "length" "2")])
- /* If constant is positive, upper bits zeroed, otherwise unchanged.
- Give the assembler a chance to pick the move instruction. */
- if (GET_CODE (op2) == CONST_INT)
+(define_split
+ [(set (match_operand:DI 0 "register_operand" "=r")
+ (plus:DI (match_operand:DI 1 "arith_double_operand" "%r")
+ (match_operand:DI 2 "arith_double_operand" "rHI")))
+ (clobber (reg:CC 100))]
+ "! TARGET_ARCH64 && reload_completed"
+ [(parallel [(set (reg:CC_NOOV 100)
+ (compare:CC_NOOV (plus:SI (match_dup 4)
+ (match_dup 5))
+ (const_int 0)))
+ (set (match_dup 3)
+ (plus:SI (match_dup 4) (match_dup 5)))])
+ (set (match_dup 6)
+ (plus:SI (plus:SI (match_dup 7)
+ (match_dup 8))
+ (ltu:SI (reg:CC_NOOV 100) (const_int 0))))]
+ "
+{
+ operands[3] = gen_lowpart (SImode, operands[0]);
+ operands[4] = gen_lowpart (SImode, operands[1]);
+ operands[5] = gen_lowpart (SImode, operands[2]);
+ operands[6] = gen_highpart (SImode, operands[0]);
+ operands[7] = gen_highpart (SImode, operands[1]);
+ if (GET_CODE (operands[2]) == CONST_INT)
{
- int sign = INTVAL (op2);
- if (sign < 0)
- return \"addcc %R1,%2,%R0\;addx %1,-1,%0\";
- return \"addcc %R1,%2,%R0\;addx %1,0,%0\";
+ if (INTVAL (operands[2]) < 0)
+ operands[8] = constm1_rtx;
+ else
+ operands[8] = const0_rtx;
}
- else if (GET_CODE (op2) == CONST_DOUBLE)
+ else
+ operands[8] = gen_highpart (SImode, operands[2]);
+}")
+
+(define_split
+ [(set (match_operand:DI 0 "register_operand" "=r")
+ (minus:DI (match_operand:DI 1 "arith_double_operand" "r")
+ (match_operand:DI 2 "arith_double_operand" "rHI")))
+ (clobber (reg:CC 100))]
+ "! TARGET_ARCH64 && reload_completed"
+ [(parallel [(set (reg:CC_NOOV 100)
+ (compare:CC_NOOV (minus:SI (match_dup 4)
+ (match_dup 5))
+ (const_int 0)))
+ (set (match_dup 3)
+ (minus:SI (match_dup 4) (match_dup 5)))])
+ (set (match_dup 6)
+ (minus:SI (minus:SI (match_dup 7)
+ (match_dup 8))
+ (ltu:SI (reg:CC_NOOV 100) (const_int 0))))]
+ "
+{
+ operands[3] = gen_lowpart (SImode, operands[0]);
+ operands[4] = gen_lowpart (SImode, operands[1]);
+ operands[5] = gen_lowpart (SImode, operands[2]);
+ operands[6] = gen_highpart (SImode, operands[0]);
+ operands[7] = gen_highpart (SImode, operands[1]);
+ if (GET_CODE (operands[2]) == CONST_INT)
{
- rtx xoperands[4];
- xoperands[0] = operands[0];
- xoperands[1] = operands[1];
- xoperands[2] = GEN_INT (CONST_DOUBLE_LOW (op2));
- xoperands[3] = GEN_INT (CONST_DOUBLE_HIGH (op2));
- if (xoperands[2] == const0_rtx && xoperands[0] == xoperands[1])
- output_asm_insn (\"add %1,%3,%0\", xoperands);
+ if (INTVAL (operands[2]) < 0)
+ operands[8] = constm1_rtx;
else
- output_asm_insn (\"addcc %R1,%2,%R0\;addx %1,%3,%0\", xoperands);
- return \"\";
+ operands[8] = const0_rtx;
}
- return \"addcc %R1,%R2,%R0\;addx %1,%2,%0\";
-}"
- [(set_attr "length" "2")])
+ else
+ operands[8] = gen_highpart (SImode, operands[2]);
+}")
+
+;; LTU here means "carry set"
+(define_insn "addx"
+ [(set (match_operand:SI 0 "register_operand" "=r")
+ (plus:SI (plus:SI (match_operand:SI 1 "arith_operand" "%r")
+ (match_operand:SI 2 "arith_operand" "rI"))
+ (ltu:SI (reg:CC_NOOV 100) (const_int 0))))]
+ ""
+ "addx\\t%1, %2, %0"
+ [(set_attr "type" "unary")
+ (set_attr "length" "1")])
+
+(define_insn "*addx_extend_sp32"
+ [(set (match_operand:DI 0 "register_operand" "=r")
+ (zero_extend:DI (plus:SI (plus:SI (match_operand:SI 1 "reg_or_0_operand" "%rJ")
+ (match_operand:SI 2 "arith_operand" "rI"))
+ (ltu:SI (reg:CC_NOOV 100) (const_int 0)))))]
+ "! TARGET_ARCH64"
+ "#"
+ [(set_attr "type" "unary")
+ (set_attr "length" "2")])
+
+(define_split
+ [(set (match_operand:DI 0 "register_operand" "")
+ (zero_extend:DI (plus:SI (plus:SI (match_operand:SI 1 "reg_or_0_operand" "")
+ (match_operand:SI 2 "arith_operand" ""))
+ (ltu:SI (reg:CC_NOOV 100) (const_int 0)))))]
+ "! TARGET_ARCH64 && reload_completed"
+ [(set (match_dup 3) (plus:SI (plus:SI (match_dup 1) (match_dup 2))
+ (ltu:SI (reg:CC_NOOV 100) (const_int 0))))
+ (set (match_dup 4) (const_int 0))]
+ "operands[3] = gen_lowpart (SImode, operands[0]);
+ operands[4] = gen_highpart (SImode, operands[1]);")
+
+(define_insn "*addx_extend_sp64"
+ [(set (match_operand:DI 0 "register_operand" "=r")
+ (zero_extend:DI (plus:SI (plus:SI (match_operand:SI 1 "reg_or_0_operand" "%rJ")
+ (match_operand:SI 2 "arith_operand" "rI"))
+ (ltu:SI (reg:CC_NOOV 100) (const_int 0)))))]
+ "TARGET_ARCH64"
+ "addx\\t%r1, %2, %0"
+ [(set_attr "type" "misc")
+ (set_attr "length" "1")])
+
+(define_insn "subx"
+ [(set (match_operand:SI 0 "register_operand" "=r")
+ (minus:SI (minus:SI (match_operand:SI 1 "reg_or_0_operand" "rJ")
+ (match_operand:SI 2 "arith_operand" "rI"))
+ (ltu:SI (reg:CC_NOOV 100) (const_int 0))))]
+ ""
+ "subx\\t%r1, %2, %0"
+ [(set_attr "type" "misc")
+ (set_attr "length" "1")])
+
+(define_insn "*subx_extend_sp64"
+ [(set (match_operand:DI 0 "register_operand" "=r")
+ (zero_extend:DI (minus:SI (minus:SI (match_operand:SI 1 "reg_or_0_operand" "rJ")
+ (match_operand:SI 2 "arith_operand" "rI"))
+ (ltu:SI (reg:CC_NOOV 100) (const_int 0)))))]
+ "TARGET_ARCH64"
+ "subx\\t%r1, %2, %0"
+ [(set_attr "type" "misc")
+ (set_attr "length" "1")])
+
+(define_insn "*subx_extend"
+ [(set (match_operand:DI 0 "register_operand" "=r")
+ (zero_extend:DI (minus:SI (minus:SI (match_operand:SI 1 "reg_or_0_operand" "rJ")
+ (match_operand:SI 2 "arith_operand" "rI"))
+ (ltu:SI (reg:CC_NOOV 100) (const_int 0)))))]
+ "! TARGET_ARCH64"
+ "#"
+ [(set_attr "type" "unary")
+ (set_attr "length" "2")])
+
+(define_split
+ [(set (match_operand:DI 0 "register_operand" "=r")
+ (zero_extend:DI (minus:SI (minus:SI (match_operand:SI 1 "reg_or_0_operand" "rJ")
+ (match_operand:SI 2 "arith_operand" "rI"))
+ (ltu:SI (reg:CC_NOOV 100) (const_int 0)))))]
+ "! TARGET_ARCH64 && reload_completed"
+ [(set (match_dup 3) (minus:SI (minus:SI (match_dup 1) (match_dup 2))
+ (ltu:SI (reg:CC_NOOV 100) (const_int 0))))
+ (set (match_dup 4) (const_int 0))]
+ "operands[3] = gen_lowpart (SImode, operands[0]);
+ operands[4] = gen_highpart (SImode, operands[0]);")
+
+(define_insn ""
+ [(set (match_operand:DI 0 "register_operand" "=r")
+ (plus:DI (zero_extend:DI (match_operand:SI 1 "register_operand" "r"))
+ (match_operand:DI 2 "register_operand" "r")))
+ (clobber (reg:CC 100))]
+ "! TARGET_ARCH64"
+ "#"
+ [(set_attr "type" "multi")
+ (set_attr "length" "2")])
+
+(define_split
+ [(set (match_operand:DI 0 "register_operand" "")
+ (plus:DI (zero_extend:DI (match_operand:SI 1 "register_operand" ""))
+ (match_operand:DI 2 "register_operand" "")))
+ (clobber (reg:CC 100))]
+ "! TARGET_ARCH64 && reload_completed"
+ [(parallel [(set (reg:CC_NOOV 100)
+ (compare:CC_NOOV (plus:SI (match_dup 3) (match_dup 1))
+ (const_int 0)))
+ (set (match_dup 5) (plus:SI (match_dup 3) (match_dup 1)))])
+ (set (match_dup 6)
+ (plus:SI (plus:SI (match_dup 4) (const_int 0))
+ (ltu:SI (reg:CC_NOOV 100) (const_int 0))))]
+ "operands[3] = gen_lowpart (SImode, operands[2]);
+ operands[4] = gen_highpart (SImode, operands[2]);
+ operands[5] = gen_lowpart (SImode, operands[0]);
+ operands[6] = gen_highpart (SImode, operands[0]);")
(define_insn "*adddi3_sp64"
[(set (match_operand:DI 0 "register_operand" "=r")
(plus:DI (match_operand:DI 1 "arith_double_operand" "%r")
(match_operand:DI 2 "arith_double_operand" "rHI")))]
"TARGET_ARCH64"
- "add %1,%2,%0")
+ "add\\t%1, %2, %0"
+ [(set_attr "type" "binary")
+ (set_attr "length" "1")])
-(define_insn "addsi3"
- [(set (match_operand:SI 0 "register_operand" "=r")
- (plus:SI (match_operand:SI 1 "arith_operand" "%r")
- (match_operand:SI 2 "arith_operand" "rI")))]
+(define_expand "addsi3"
+ [(set (match_operand:SI 0 "register_operand" "=r,d")
+ (plus:SI (match_operand:SI 1 "arith_operand" "%r,d")
+ (match_operand:SI 2 "arith_add_operand" "rI,d")))]
+ ""
+ "
+{
+ if (arith_4096_operand(operands[2], DImode))
+ {
+ emit_insn (gen_rtx_SET (VOIDmode, operands[0],
+ gen_rtx_MINUS (SImode, operands[1],
+ GEN_INT(-4096))));
+ DONE;
+ }
+}")
+
+(define_insn "*addsi3"
+ [(set (match_operand:SI 0 "register_operand" "=r,d")
+ (plus:SI (match_operand:SI 1 "arith_operand" "%r,d")
+ (match_operand:SI 2 "arith_operand" "rI,d")))]
""
- "add %1,%2,%0"
- [(set_attr "type" "ialu")])
+ "@
+ add\\t%1, %2, %0
+ fpadd32s\\t%1, %2, %0"
+ [(set_attr "type" "ialu,fp")
+ (set_attr "length" "1")])
(define_insn "*cmp_cc_plus"
[(set (reg:CC_NOOV 100)
(compare:CC_NOOV (plus:SI (match_operand:SI 0 "arith_operand" "%r")
(match_operand:SI 1 "arith_operand" "rI"))
(const_int 0)))]
- ""
- "addcc %0,%1,%%g0"
- [(set_attr "type" "compare")])
+ "! TARGET_LIVE_G0"
+ "addcc\\t%0, %1, %%g0"
+ [(set_attr "type" "compare")
+ (set_attr "length" "1")])
(define_insn "*cmp_ccx_plus"
[(set (reg:CCX_NOOV 100)
(match_operand:DI 1 "arith_double_operand" "rHI"))
(const_int 0)))]
"TARGET_ARCH64"
- "addcc %0,%1,%%g0"
- [(set_attr "type" "compare")])
+ "addcc\\t%0, %1, %%g0"
+ [(set_attr "type" "compare")
+ (set_attr "length" "1")])
(define_insn "*cmp_cc_plus_set"
[(set (reg:CC_NOOV 100)
(set (match_operand:SI 0 "register_operand" "=r")
(plus:SI (match_dup 1) (match_dup 2)))]
""
- "addcc %1,%2,%0")
+ "addcc\\t%1, %2, %0"
+ [(set_attr "type" "compare")
+ (set_attr "length" "1")])
(define_insn "*cmp_ccx_plus_set"
[(set (reg:CCX_NOOV 100)
(set (match_operand:DI 0 "register_operand" "=r")
(plus:DI (match_dup 1) (match_dup 2)))]
"TARGET_ARCH64"
- "addcc %1,%2,%0")
+ "addcc\\t%1, %2, %0"
+ [(set_attr "type" "compare")
+ (set_attr "length" "1")])
(define_expand "subdi3"
[(set (match_operand:DI 0 "register_operand" "=r")
(minus:DI (match_operand:DI 1 "register_operand" "r")
- (match_operand:DI 2 "arith_double_operand" "rHI")))]
+ (match_operand:DI 2 "arith_double_add_operand" "rHI")))]
""
"
{
if (! TARGET_ARCH64)
{
- emit_insn (gen_rtx (PARALLEL, VOIDmode, gen_rtvec (2,
- gen_rtx (SET, VOIDmode, operands[0],
- gen_rtx (MINUS, DImode, operands[1],
- operands[2])),
- gen_rtx (CLOBBER, VOIDmode,
- gen_rtx (REG, SImode, SPARC_ICC_REG)))));
+ emit_insn (gen_rtx_PARALLEL (VOIDmode, gen_rtvec (2,
+ gen_rtx_SET (VOIDmode, operands[0],
+ gen_rtx_MINUS (DImode, operands[1],
+ operands[2])),
+ gen_rtx_CLOBBER (VOIDmode,
+ gen_rtx_REG (CCmode, SPARC_ICC_REG)))));
+ DONE;
+ }
+ if (arith_double_4096_operand(operands[2], DImode))
+ {
+ emit_insn (gen_rtx_SET (VOIDmode, operands[0],
+ gen_rtx_PLUS (DImode, operands[1],
+ GEN_INT(-4096))));
DONE;
}
}")
[(set (match_operand:DI 0 "register_operand" "=r")
(minus:DI (match_operand:DI 1 "register_operand" "r")
(match_operand:DI 2 "arith_double_operand" "rHI")))
- (clobber (reg:SI 100))]
+ (clobber (reg:CC 100))]
"! TARGET_ARCH64"
- "*
+ "#"
+ [(set_attr "length" "2")])
+
+(define_split
+ [(set (match_operand:DI 0 "register_operand" "")
+ (minus:DI (match_operand:DI 1 "register_operand" "")
+ (match_operand:DI 2 "arith_double_operand" "")))
+ (clobber (reg:CC 100))]
+ "! TARGET_ARCH64
+ && reload_completed
+ && (GET_CODE (operands[2]) == CONST_INT
+ || GET_CODE (operands[2]) == CONST_DOUBLE)"
+ [(clobber (const_int 0))]
+ "
{
- rtx op2 = operands[2];
+ rtx highp, lowp;
- /* If constant is positive, upper bits zeroed, otherwise unchanged.
- Give the assembler a chance to pick the move instruction. */
- if (GET_CODE (op2) == CONST_INT)
+ highp = gen_highpart (SImode, operands[2]);
+ lowp = gen_lowpart (SImode, operands[2]);
+ if ((lowp == const0_rtx)
+ && (operands[0] == operands[1]))
{
- int sign = INTVAL (op2);
- if (sign < 0)
- return \"subcc %R1,%2,%R0\;subx %1,-1,%0\";
- return \"subcc %R1,%2,%R0\;subx %1,0,%0\";
+ emit_insn (gen_rtx_SET (VOIDmode,
+ gen_highpart (SImode, operands[0]),
+ gen_rtx_MINUS (SImode,
+ gen_highpart (SImode, operands[1]),
+ highp)));
}
- else if (GET_CODE (op2) == CONST_DOUBLE)
+ else
{
- rtx xoperands[4];
- xoperands[0] = operands[0];
- xoperands[1] = operands[1];
- xoperands[2] = GEN_INT (CONST_DOUBLE_LOW (op2));
- xoperands[3] = GEN_INT (CONST_DOUBLE_HIGH (op2));
- if (xoperands[2] == const0_rtx && xoperands[0] == xoperands[1])
- output_asm_insn (\"sub %1,%3,%0\", xoperands);
- else
- output_asm_insn (\"subcc %R1,%2,%R0\;subx %1,%3,%0\", xoperands);
- return \"\";
+ emit_insn (gen_cmp_minus_cc_set (gen_lowpart (SImode, operands[0]),
+ gen_lowpart (SImode, operands[1]),
+ lowp));
+ emit_insn (gen_subx (gen_highpart (SImode, operands[0]),
+ gen_highpart (SImode, operands[1]),
+ highp));
}
- return \"subcc %R1,%R2,%R0\;subx %1,%2,%0\";
-}"
- [(set_attr "length" "2")])
+ DONE;
+}")
+
+(define_split
+ [(set (match_operand:DI 0 "register_operand" "")
+ (minus:DI (match_operand:DI 1 "register_operand" "")
+ (match_operand:DI 2 "register_operand" "")))
+ (clobber (reg:CC 100))]
+ "! TARGET_ARCH64
+ && reload_completed"
+ [(clobber (const_int 0))]
+ "
+{
+ emit_insn (gen_cmp_minus_cc_set (gen_lowpart (SImode, operands[0]),
+ gen_lowpart (SImode, operands[1]),
+ gen_lowpart (SImode, operands[2])));
+ emit_insn (gen_subx (gen_highpart (SImode, operands[0]),
+ gen_highpart (SImode, operands[1]),
+ gen_highpart (SImode, operands[2])));
+ DONE;
+}")
+
+(define_insn ""
+ [(set (match_operand:DI 0 "register_operand" "=r")
+ (minus:DI (match_operand:DI 1 "register_operand" "r")
+ (zero_extend:DI (match_operand:SI 2 "register_operand" "r"))))
+ (clobber (reg:CC 100))]
+ "! TARGET_ARCH64"
+ "#"
+ [(set_attr "type" "multi")
+ (set_attr "length" "2")])
+
+(define_split
+ [(set (match_operand:DI 0 "register_operand" "")
+ (minus:DI (match_operand:DI 1 "register_operand" "")
+ (zero_extend:DI (match_operand:SI 2 "register_operand" ""))))
+ (clobber (reg:CC 100))]
+ "! TARGET_ARCH64 && reload_completed"
+ [(parallel [(set (reg:CC_NOOV 100)
+ (compare:CC_NOOV (minus:SI (match_dup 3) (match_dup 2))
+ (const_int 0)))
+ (set (match_dup 5) (minus:SI (match_dup 3) (match_dup 2)))])
+ (set (match_dup 6)
+ (minus:SI (minus:SI (match_dup 4) (const_int 0))
+ (ltu:SI (reg:CC_NOOV 100) (const_int 0))))]
+ "operands[3] = gen_lowpart (SImode, operands[1]);
+ operands[4] = gen_highpart (SImode, operands[1]);
+ operands[5] = gen_lowpart (SImode, operands[0]);
+ operands[6] = gen_highpart (SImode, operands[0]);")
(define_insn "*subdi3_sp64"
[(set (match_operand:DI 0 "register_operand" "=r")
(minus:DI (match_operand:DI 1 "register_operand" "r")
(match_operand:DI 2 "arith_double_operand" "rHI")))]
"TARGET_ARCH64"
- "sub %1,%2,%0")
+ "sub\\t%1, %2, %0"
+ [(set_attr "type" "binary")
+ (set_attr "length" "1")])
-(define_insn "subsi3"
- [(set (match_operand:SI 0 "register_operand" "=r")
- (minus:SI (match_operand:SI 1 "register_operand" "r")
- (match_operand:SI 2 "arith_operand" "rI")))]
+(define_expand "subsi3"
+ [(set (match_operand:SI 0 "register_operand" "=r,d")
+ (minus:SI (match_operand:SI 1 "register_operand" "r,d")
+ (match_operand:SI 2 "arith_add_operand" "rI,d")))]
+ ""
+ "
+{
+ if (arith_4096_operand(operands[2], DImode))
+ {
+ emit_insn (gen_rtx_SET (VOIDmode, operands[0],
+ gen_rtx_PLUS (SImode, operands[1],
+ GEN_INT(-4096))));
+ DONE;
+ }
+}")
+
+(define_insn "*subsi3"
+ [(set (match_operand:SI 0 "register_operand" "=r,d")
+ (minus:SI (match_operand:SI 1 "register_operand" "r,d")
+ (match_operand:SI 2 "arith_operand" "rI,d")))]
""
- "sub %1,%2,%0"
- [(set_attr "type" "ialu")])
+ "@
+ sub\\t%1, %2, %0
+ fpsub32s\\t%1, %2, %0"
+ [(set_attr "type" "ialu,fp")
+ (set_attr "length" "1")])
(define_insn "*cmp_minus_cc"
[(set (reg:CC_NOOV 100)
- (compare:CC_NOOV (minus:SI (match_operand:SI 0 "register_operand" "r")
+ (compare:CC_NOOV (minus:SI (match_operand:SI 0 "reg_or_0_operand" "rJ")
(match_operand:SI 1 "arith_operand" "rI"))
(const_int 0)))]
- ""
- "subcc %0,%1,%%g0"
- [(set_attr "type" "compare")])
+ "! TARGET_LIVE_G0"
+ "subcc\\t%r0, %1, %%g0"
+ [(set_attr "type" "compare")
+ (set_attr "length" "1")])
(define_insn "*cmp_minus_ccx"
[(set (reg:CCX_NOOV 100)
(match_operand:DI 1 "arith_double_operand" "rHI"))
(const_int 0)))]
"TARGET_ARCH64"
- "subcc %0,%1,%%g0"
- [(set_attr "type" "compare")])
+ "subcc\\t%0, %1, %%g0"
+ [(set_attr "type" "compare")
+ (set_attr "length" "1")])
-(define_insn "*cmp_minus_cc_set"
+(define_insn "cmp_minus_cc_set"
[(set (reg:CC_NOOV 100)
- (compare:CC_NOOV (minus:SI (match_operand:SI 1 "register_operand" "r")
+ (compare:CC_NOOV (minus:SI (match_operand:SI 1 "reg_or_0_operand" "rJ")
(match_operand:SI 2 "arith_operand" "rI"))
(const_int 0)))
(set (match_operand:SI 0 "register_operand" "=r")
(minus:SI (match_dup 1) (match_dup 2)))]
""
- "subcc %1,%2,%0")
+ "subcc\\t%r1, %2, %0"
+ [(set_attr "type" "compare")
+ (set_attr "length" "1")])
(define_insn "*cmp_minus_ccx_set"
[(set (reg:CCX_NOOV 100)
(set (match_operand:DI 0 "register_operand" "=r")
(minus:DI (match_dup 1) (match_dup 2)))]
"TARGET_ARCH64"
- "subcc %1,%2,%0")
+ "subcc\\t%1, %2, %0"
+ [(set_attr "type" "compare")
+ (set_attr "length" "1")])
\f
;; Integer Multiply/Divide.
-;; The 32 bit multiply/divide instructions are deprecated on v9 and shouldn't
-;; we used. We still use them in 32 bit v9 compilers.
-;; The 64 bit v9 compiler will (/should) widen the args and use muldi3.
+;; The 32 bit multiply/divide instructions are deprecated on v9, but at
+;; least in UltraSPARC I, II and IIi it is a win tick-wise.
(define_insn "mulsi3"
[(set (match_operand:SI 0 "register_operand" "=r")
(mult:SI (match_operand:SI 1 "arith_operand" "%r")
(match_operand:SI 2 "arith_operand" "rI")))]
- "TARGET_V8 || TARGET_SPARCLITE || TARGET_SPARCLET || TARGET_DEPRECATED_V8_INSNS"
- "smul %1,%2,%0"
- [(set_attr "type" "imul")])
+ "TARGET_HARD_MUL"
+ "smul\\t%1, %2, %0"
+ [(set_attr "type" "imul")
+ (set_attr "length" "1")])
+
+(define_expand "muldi3"
+ [(set (match_operand:DI 0 "register_operand" "=r")
+ (mult:DI (match_operand:DI 1 "arith_double_operand" "%r")
+ (match_operand:DI 2 "arith_double_operand" "rHI")))]
+ "TARGET_ARCH64 || TARGET_V8PLUS"
+ "
+{
+ if (TARGET_V8PLUS)
+ {
+ emit_insn (gen_muldi3_v8plus (operands[0], operands[1], operands[2]));
+ DONE;
+ }
+}")
-(define_insn "muldi3"
+(define_insn "*muldi3_sp64"
[(set (match_operand:DI 0 "register_operand" "=r")
(mult:DI (match_operand:DI 1 "arith_double_operand" "%r")
(match_operand:DI 2 "arith_double_operand" "rHI")))]
"TARGET_ARCH64"
- "mulx %1,%2,%0")
+ "mulx\\t%1, %2, %0"
+ [(set_attr "type" "imul")
+ (set_attr "length" "1")])
-;; It is not known whether this will match.
+;; V8plus wide multiply.
+;; XXX
+(define_insn "muldi3_v8plus"
+ [(set (match_operand:DI 0 "register_operand" "=r,h")
+ (mult:DI (match_operand:DI 1 "arith_double_operand" "%r,0")
+ (match_operand:DI 2 "arith_double_operand" "rHI,rHI")))
+ (clobber (match_scratch:SI 3 "=&h,X"))
+ (clobber (match_scratch:SI 4 "=&h,X"))]
+ "TARGET_V8PLUS"
+ "*
+{
+ if (sparc_check_64 (operands[1], insn) <= 0)
+ output_asm_insn (\"srl\\t%L1, 0, %L1\", operands);
+ if (which_alternative == 1)
+ output_asm_insn (\"sllx\\t%H1, 32, %H1\", operands);
+ if (sparc_check_64 (operands[2], insn) <= 0)
+ output_asm_insn (\"srl\\t%L2, 0, %L2\", operands);
+ if (which_alternative == 1)
+ return \"or\\t%L1, %H1, %H1\\n\\tsllx\\t%H2, 32, %L1\\n\\tor\\t%L2, %L1, %L1\\n\\tmulx\\t%H1, %L1, %L0\;srlx\\t%L0, 32, %H0\";
+ else
+ return \"sllx\\t%H1, 32, %3\\n\\tsllx\\t%H2, 32, %4\\n\\tor\\t%L1, %3, %3\\n\\tor\\t%L2, %4, %4\\n\\tmulx\\t%3, %4, %3\\n\\tsrlx\\t%3, 32, %H0\\n\\tmov\\t%3, %L0\";
+}"
+ [(set_attr "length" "9,8")])
(define_insn "*cmp_mul_set"
- [(set (match_operand:SI 0 "register_operand" "=r")
- (mult:SI (match_operand:SI 1 "arith_operand" "%r")
- (match_operand:SI 2 "arith_operand" "rI")))
- (set (reg:CC_NOOV 100)
- (compare:CC_NOOV (mult:SI (match_dup 1) (match_dup 2))
- (const_int 0)))]
+ [(set (reg:CC 100)
+ (compare:CC (mult:SI (match_operand:SI 1 "arith_operand" "%r")
+ (match_operand:SI 2 "arith_operand" "rI"))
+ (const_int 0)))
+ (set (match_operand:SI 0 "register_operand" "=r")
+ (mult:SI (match_dup 1) (match_dup 2)))]
"TARGET_V8 || TARGET_SPARCLITE || TARGET_DEPRECATED_V8_INSNS"
- "smulcc %1,%2,%0"
- [(set_attr "type" "imul")])
+ "smulcc\\t%1, %2, %0"
+ [(set_attr "type" "imul")
+ (set_attr "length" "1")])
(define_expand "mulsidi3"
[(set (match_operand:DI 0 "register_operand" "")
(mult:DI (sign_extend:DI (match_operand:SI 1 "register_operand" ""))
(sign_extend:DI (match_operand:SI 2 "arith_operand" ""))))]
- "TARGET_V8 || TARGET_SPARCLITE || TARGET_SPARCLET || TARGET_DEPRECATED_V8_INSNS"
+ "TARGET_HARD_MUL"
"
{
if (CONSTANT_P (operands[2]))
{
- emit_insn (gen_const_mulsidi3 (operands[0], operands[1], operands[2]));
+ if (TARGET_V8PLUS)
+ emit_insn (gen_const_mulsidi3_v8plus (operands[0], operands[1],
+ operands[2]));
+ else
+ emit_insn (gen_const_mulsidi3_sp32 (operands[0], operands[1],
+ operands[2]));
+ DONE;
+ }
+ if (TARGET_V8PLUS)
+ {
+ emit_insn (gen_mulsidi3_v8plus (operands[0], operands[1], operands[2]));
DONE;
}
}")
+;; V9 puts the 64 bit product in a 64 bit register. Only out or global
+;; registers can hold 64 bit values in the V8plus environment.
+;; XXX
+(define_insn "mulsidi3_v8plus"
+ [(set (match_operand:DI 0 "register_operand" "=h,r")
+ (mult:DI (sign_extend:DI (match_operand:SI 1 "register_operand" "r,r"))
+ (sign_extend:DI (match_operand:SI 2 "register_operand" "r,r"))))
+ (clobber (match_scratch:SI 3 "=X,&h"))]
+ "TARGET_V8PLUS"
+ "@
+ smul\\t%1, %2, %L0\\n\\tsrlx\\t%L0, 32, %H0
+ smul\\t%1, %2, %3\\n\\tsrlx\\t%3, 32, %H0\\n\\tmov\\t%3, %L0"
+ [(set_attr "length" "2,3")])
+
+;; XXX
+(define_insn "const_mulsidi3_v8plus"
+ [(set (match_operand:DI 0 "register_operand" "=h,r")
+ (mult:DI (sign_extend:DI (match_operand:SI 1 "register_operand" "r,r"))
+ (match_operand:SI 2 "small_int" "I,I")))
+ (clobber (match_scratch:SI 3 "=X,&h"))]
+ "TARGET_V8PLUS"
+ "@
+ smul\\t%1, %2, %L0\\n\\tsrlx\\t%L0, 32, %H0
+ smul\\t%1, %2, %3\\n\\tsrlx\\t%3, 32, %H0\\n\\tmov\\t%3, %L0"
+ [(set_attr "length" "2,3")])
+
+;; XXX
(define_insn "*mulsidi3_sp32"
[(set (match_operand:DI 0 "register_operand" "=r")
(mult:DI (sign_extend:DI (match_operand:SI 1 "register_operand" "r"))
(sign_extend:DI (match_operand:SI 2 "register_operand" "r"))))]
- "TARGET_V8 || TARGET_SPARCLITE || TARGET_SPARCLET || TARGET_DEPRECATED_V8_INSNS"
+ "TARGET_HARD_MUL32"
"*
{
- return TARGET_SPARCLET ? \"smuld %1,%2,%R0\" : \"smul %1,%2,%R0\;rd %%y,%0\";
+ return TARGET_SPARCLET ? \"smuld\\t%1, %2, %L0\" : \"smul\\t%1, %2, %L0\\n\\trd\\t%%y, %H0\";
}"
[(set (attr "length")
(if_then_else (eq_attr "isa" "sparclet")
(const_int 1) (const_int 2)))])
+(define_insn "*mulsidi3_sp64"
+ [(set (match_operand:DI 0 "register_operand" "=r")
+ (mult:DI (sign_extend:DI (match_operand:SI 1 "register_operand" "r"))
+ (sign_extend:DI (match_operand:SI 2 "register_operand" "r"))))]
+ "TARGET_DEPRECATED_V8_INSNS && TARGET_ARCH64"
+ "smul\\t%1, %2, %0"
+ [(set_attr "length" "1")])
+
;; Extra pattern, because sign_extend of a constant isn't valid.
-(define_insn "const_mulsidi3"
+;; XXX
+(define_insn "const_mulsidi3_sp32"
[(set (match_operand:DI 0 "register_operand" "=r")
(mult:DI (sign_extend:DI (match_operand:SI 1 "register_operand" "r"))
(match_operand:SI 2 "small_int" "I")))]
- "TARGET_V8 || TARGET_SPARCLITE || TARGET_SPARCLET || TARGET_DEPRECATED_V8_INSNS"
+ "TARGET_HARD_MUL32"
"*
{
- return TARGET_SPARCLET ? \"smuld %1,%2,%R0\" : \"smul %1,%2,%R0\;rd %%y,%0\";
+ return TARGET_SPARCLET ? \"smuld\\t%1, %2, %L0\" : \"smul\\t%1, %2, %L0\\n\\trd\\t%%y, %H0\";
}"
[(set (attr "length")
(if_then_else (eq_attr "isa" "sparclet")
(const_int 1) (const_int 2)))])
+(define_insn "const_mulsidi3_sp64"
+ [(set (match_operand:DI 0 "register_operand" "=r")
+ (mult:DI (sign_extend:DI (match_operand:SI 1 "register_operand" "r"))
+ (match_operand:SI 2 "small_int" "I")))]
+ "TARGET_DEPRECATED_V8_INSNS && TARGET_ARCH64"
+ "smul\\t%1, %2, %0"
+ [(set_attr "length" "1")])
+
(define_expand "smulsi3_highpart"
[(set (match_operand:SI 0 "register_operand" "")
(truncate:SI
(lshiftrt:DI (mult:DI (sign_extend:DI (match_operand:SI 1 "register_operand" ""))
(sign_extend:DI (match_operand:SI 2 "arith_operand" "")))
(const_int 32))))]
- "TARGET_V8 || TARGET_SPARCLITE || TARGET_SPARCLET || TARGET_DEPRECATED_V8_INSNS"
+ "TARGET_HARD_MUL && TARGET_ARCH32"
"
{
if (CONSTANT_P (operands[2]))
{
+ if (TARGET_V8PLUS)
+ {
+ emit_insn (gen_const_smulsi3_highpart_v8plus (operands[0],
+ operands[1],
+ operands[2],
+ GEN_INT (32)));
+ DONE;
+ }
emit_insn (gen_const_smulsi3_highpart (operands[0], operands[1], operands[2]));
DONE;
}
+ if (TARGET_V8PLUS)
+ {
+ emit_insn (gen_smulsi3_highpart_v8plus (operands[0], operands[1],
+ operands[2], GEN_INT (32)));
+ DONE;
+ }
}")
-(define_insn "*smulsidi3_highpart_sp32"
+;; XXX
+(define_insn "smulsi3_highpart_v8plus"
+ [(set (match_operand:SI 0 "register_operand" "=h,r")
+ (truncate:SI
+ (lshiftrt:DI (mult:DI (sign_extend:DI (match_operand:SI 1 "register_operand" "r,r"))
+ (sign_extend:DI (match_operand:SI 2 "register_operand" "r,r")))
+ (match_operand:SI 3 "const_int_operand" "i,i"))))
+ (clobber (match_scratch:SI 4 "=X,&h"))]
+ "TARGET_V8PLUS"
+ "@
+ smul %1,%2,%0\;srlx %0,%3,%0
+ smul %1,%2,%4\;srlx %4,%3,%0"
+ [(set_attr "length" "2")])
+
+;; The combiner changes TRUNCATE in the previous pattern to SUBREG.
+;; XXX
+(define_insn ""
+ [(set (match_operand:SI 0 "register_operand" "=h,r")
+ (subreg:SI
+ (lshiftrt:DI
+ (mult:DI (sign_extend:DI (match_operand:SI 1 "register_operand" "r,r"))
+ (sign_extend:DI (match_operand:SI 2 "register_operand" "r,r")))
+ (match_operand:SI 3 "const_int_operand" "i,i"))
+ 1))
+ (clobber (match_scratch:SI 4 "=X,&h"))]
+ "TARGET_V8PLUS"
+ "@
+ smul\\t%1, %2, %0\\n\\tsrlx\\t%0, %3, %0
+ smul\\t%1, %2, %4\\n\\tsrlx\\t%4, %3, %0"
+ [(set_attr "length" "2")])
+
+;; XXX
+(define_insn "const_smulsi3_highpart_v8plus"
+ [(set (match_operand:SI 0 "register_operand" "=h,r")
+ (truncate:SI
+ (lshiftrt:DI (mult:DI (sign_extend:DI (match_operand:SI 1 "register_operand" "r,r"))
+ (match_operand 2 "small_int" "i,i"))
+ (match_operand:SI 3 "const_int_operand" "i,i"))))
+ (clobber (match_scratch:SI 4 "=X,&h"))]
+ "TARGET_V8PLUS"
+ "@
+ smul\\t%1, %2, %0\\n\\tsrlx\\t%0, %3, %0
+ smul\\t%1, %2, %4\\n\\tsrlx\\t%4, %3, %0"
+ [(set_attr "length" "2")])
+
+;; XXX
+(define_insn "*smulsi3_highpart_sp32"
[(set (match_operand:SI 0 "register_operand" "=r")
(truncate:SI
(lshiftrt:DI (mult:DI (sign_extend:DI (match_operand:SI 1 "register_operand" "r"))
(sign_extend:DI (match_operand:SI 2 "register_operand" "r")))
(const_int 32))))]
- "TARGET_V8 || TARGET_SPARCLITE || TARGET_SPARCLET || TARGET_DEPRECATED_V8_INSNS"
- "smul %1,%2,%%g0\;rd %%y,%0"
+ "TARGET_HARD_MUL32 && ! TARGET_LIVE_G0"
+ "smul\\t%1, %2, %%g0\\n\\trd\\t%%y, %0"
[(set_attr "length" "2")])
+;; XXX
(define_insn "const_smulsi3_highpart"
[(set (match_operand:SI 0 "register_operand" "=r")
(truncate:SI
(lshiftrt:DI (mult:DI (sign_extend:DI (match_operand:SI 1 "register_operand" "r"))
(match_operand:SI 2 "register_operand" "r"))
(const_int 32))))]
- "TARGET_V8 || TARGET_SPARCLITE || TARGET_SPARCLET || TARGET_DEPRECATED_V8_INSNS"
- "smul %1,%2,%%g0\;rd %%y,%0"
+ "TARGET_HARD_MUL32 && ! TARGET_LIVE_G0"
+ "smul\\t%1, %2, %%g0\\n\\trd\\t%%y, %0"
[(set_attr "length" "2")])
(define_expand "umulsidi3"
[(set (match_operand:DI 0 "register_operand" "")
(mult:DI (zero_extend:DI (match_operand:SI 1 "register_operand" ""))
(zero_extend:DI (match_operand:SI 2 "uns_arith_operand" ""))))]
- "TARGET_V8 || TARGET_SPARCLITE || TARGET_SPARCLET || TARGET_DEPRECATED_V8_INSNS"
+ "TARGET_HARD_MUL"
"
{
if (CONSTANT_P (operands[2]))
{
- emit_insn (gen_const_umulsidi3 (operands[0], operands[1], operands[2]));
+ if (TARGET_V8PLUS)
+ emit_insn (gen_const_umulsidi3_v8plus (operands[0], operands[1],
+ operands[2]));
+ else
+ emit_insn (gen_const_umulsidi3_sp32 (operands[0], operands[1],
+ operands[2]));
+ DONE;
+ }
+ if (TARGET_V8PLUS)
+ {
+ emit_insn (gen_umulsidi3_v8plus (operands[0], operands[1], operands[2]));
DONE;
}
}")
+;; XXX
+(define_insn "umulsidi3_v8plus"
+ [(set (match_operand:DI 0 "register_operand" "=h,r")
+ (mult:DI (zero_extend:DI (match_operand:SI 1 "register_operand" "r,r"))
+ (zero_extend:DI (match_operand:SI 2 "register_operand" "r,r"))))
+ (clobber (match_scratch:SI 3 "=X,&h"))]
+ "TARGET_V8PLUS"
+ "@
+ umul\\t%1, %2, %L0\\n\\tsrlx\\t%L0, 32, %H0
+ umul\\t%1, %2, %3\\n\\tsrlx\\t%3, 32, %H0\\n\\tmov\\t%3, %L0"
+ [(set_attr "length" "2,3")])
+
+;; XXX
(define_insn "*umulsidi3_sp32"
[(set (match_operand:DI 0 "register_operand" "=r")
(mult:DI (zero_extend:DI (match_operand:SI 1 "register_operand" "r"))
(zero_extend:DI (match_operand:SI 2 "register_operand" "r"))))]
- "TARGET_V8 || TARGET_SPARCLITE || TARGET_SPARCLET || TARGET_DEPRECATED_V8_INSNS"
+ "TARGET_HARD_MUL32"
"*
{
- return TARGET_SPARCLET ? \"umuld %1,%2,%R0\" : \"umul %1,%2,%R0\;rd %%y,%0\";
+ return TARGET_SPARCLET ? \"umuld\\t%1, %2, %L0\" : \"umul\\t%1, %2, %L0\\n\\trd\\t%%y, %H0\";
}"
[(set (attr "length")
(if_then_else (eq_attr "isa" "sparclet")
(const_int 1) (const_int 2)))])
+(define_insn "*umulsidi3_sp64"
+ [(set (match_operand:DI 0 "register_operand" "=r")
+ (mult:DI (zero_extend:DI (match_operand:SI 1 "register_operand" "r"))
+ (zero_extend:DI (match_operand:SI 2 "register_operand" "r"))))]
+ "TARGET_DEPRECATED_V8_INSNS && TARGET_ARCH64"
+ "umul\\t%1, %2, %0"
+ [(set_attr "length" "1")])
+
;; Extra pattern, because sign_extend of a constant isn't valid.
-(define_insn "const_umulsidi3"
+;; XXX
+(define_insn "const_umulsidi3_sp32"
[(set (match_operand:DI 0 "register_operand" "=r")
(mult:DI (zero_extend:DI (match_operand:SI 1 "register_operand" "r"))
(match_operand:SI 2 "uns_small_int" "")))]
- "TARGET_V8 || TARGET_SPARCLITE || TARGET_SPARCLET || TARGET_DEPRECATED_V8_INSNS"
+ "TARGET_HARD_MUL32"
"*
{
- return TARGET_SPARCLET ? \"umuld %1,%2,%R0\" : \"umul %1,%2,%R0\;rd %%y,%0\";
+ return TARGET_SPARCLET ? \"umuld\\t%1, %2, %L0\" : \"umul\\t%1, %2, %L0\\n\\trd\\t%%y, %H0\";
}"
[(set (attr "length")
(if_then_else (eq_attr "isa" "sparclet")
(const_int 1) (const_int 2)))])
+(define_insn "const_umulsidi3_sp64"
+ [(set (match_operand:DI 0 "register_operand" "=r")
+ (mult:DI (zero_extend:DI (match_operand:SI 1 "register_operand" "r"))
+ (match_operand:SI 2 "uns_small_int" "")))]
+ "TARGET_DEPRECATED_V8_INSNS && TARGET_ARCH64"
+ "umul\\t%1, %2, %0"
+ [(set_attr "length" "1")])
+
+;; XXX
+(define_insn "const_umulsidi3_v8plus"
+ [(set (match_operand:DI 0 "register_operand" "=h,r")
+ (mult:DI (zero_extend:DI (match_operand:SI 1 "register_operand" "r,r"))
+ (match_operand:SI 2 "uns_small_int" "")))
+ (clobber (match_scratch:SI 3 "=X,h"))]
+ "TARGET_V8PLUS"
+ "@
+ umul\\t%1, %2, %L0\\n\\tsrlx\\t%L0, 32, %H0
+ umul\\t%1, %2, %3\\n\\tsrlx\\t%3, 32, %H0\\n\\tmov\\t%3, %L0"
+ [(set_attr "length" "2,3")])
+
(define_expand "umulsi3_highpart"
[(set (match_operand:SI 0 "register_operand" "")
(truncate:SI
(lshiftrt:DI (mult:DI (zero_extend:DI (match_operand:SI 1 "register_operand" ""))
(zero_extend:DI (match_operand:SI 2 "uns_arith_operand" "")))
(const_int 32))))]
- "TARGET_V8 || TARGET_SPARCLITE || TARGET_SPARCLET || TARGET_DEPRECATED_V8_INSNS"
+ "TARGET_HARD_MUL && TARGET_ARCH32"
"
{
if (CONSTANT_P (operands[2]))
{
+ if (TARGET_V8PLUS)
+ {
+ emit_insn (gen_const_umulsi3_highpart_v8plus (operands[0],
+ operands[1],
+ operands[2],
+ GEN_INT (32)));
+ DONE;
+ }
emit_insn (gen_const_umulsi3_highpart (operands[0], operands[1], operands[2]));
DONE;
}
+ if (TARGET_V8PLUS)
+ {
+ emit_insn (gen_umulsi3_highpart_v8plus (operands[0], operands[1],
+ operands[2], GEN_INT (32)));
+ DONE;
+ }
}")
-(define_insn "*umulsidi3_highpart_sp32"
+;; XXX
+(define_insn "umulsi3_highpart_v8plus"
+ [(set (match_operand:SI 0 "register_operand" "=h,r")
+ (truncate:SI
+ (lshiftrt:DI (mult:DI (zero_extend:DI (match_operand:SI 1 "register_operand" "r,r"))
+ (zero_extend:DI (match_operand:SI 2 "register_operand" "r,r")))
+ (match_operand:SI 3 "const_int_operand" "i,i"))))
+ (clobber (match_scratch:SI 4 "=X,h"))]
+ "TARGET_V8PLUS"
+ "@
+ umul\\t%1, %2, %0\\n\\tsrlx\\t%0, %3, %0
+ umul\\t%1, %2, %4\\n\\tsrlx\\t%4, %3, %0"
+ [(set_attr "length" "2")])
+
+;; XXX
+(define_insn "const_umulsi3_highpart_v8plus"
+ [(set (match_operand:SI 0 "register_operand" "=h,r")
+ (truncate:SI
+ (lshiftrt:DI (mult:DI (zero_extend:DI (match_operand:SI 1 "register_operand" "r,r"))
+ (match_operand:SI 2 "uns_small_int" ""))
+ (match_operand:SI 3 "const_int_operand" "i,i"))))
+ (clobber (match_scratch:SI 4 "=X,h"))]
+ "TARGET_V8PLUS"
+ "@
+ umul\\t%1, %2, %0\\n\\tsrlx\\t%0, %3, %0
+ umul\\t%1, %2, %4\\n\\tsrlx\\t%4, %3, %0"
+ [(set_attr "length" "2")])
+
+;; XXX
+(define_insn "*umulsi3_highpart_sp32"
[(set (match_operand:SI 0 "register_operand" "=r")
(truncate:SI
(lshiftrt:DI (mult:DI (zero_extend:DI (match_operand:SI 1 "register_operand" "r"))
(zero_extend:DI (match_operand:SI 2 "register_operand" "r")))
(const_int 32))))]
- "TARGET_V8 || TARGET_SPARCLITE || TARGET_SPARCLET || TARGET_DEPRECATED_V8_INSNS"
- "umul %1,%2,%%g0\;rd %%y,%0"
+ "TARGET_HARD_MUL32 && ! TARGET_LIVE_G0"
+ "umul\\t%1, %2, %%g0\\n\\trd\\t%%y, %0"
[(set_attr "length" "2")])
+;; XXX
(define_insn "const_umulsi3_highpart"
[(set (match_operand:SI 0 "register_operand" "=r")
(truncate:SI
(lshiftrt:DI (mult:DI (zero_extend:DI (match_operand:SI 1 "register_operand" "r"))
(match_operand:SI 2 "uns_small_int" ""))
(const_int 32))))]
- "TARGET_V8 || TARGET_SPARCLITE || TARGET_SPARCLET || TARGET_DEPRECATED_V8_INSNS"
- "umul %1,%2,%%g0\;rd %%y,%0"
+ "TARGET_HARD_MUL32 && ! TARGET_LIVE_G0"
+ "umul\\t%1, %2, %%g0\\n\\trd\\t%%y, %0"
[(set_attr "length" "2")])
;; The v8 architecture specifies that there must be 3 instructions between
;; a y register write and a use of it for correct results.
-(define_insn "divsi3"
- [(set (match_operand:SI 0 "register_operand" "=r")
- (div:SI (match_operand:SI 1 "register_operand" "r")
- (match_operand:SI 2 "arith_operand" "rI")))
- (clobber (match_scratch:SI 3 "=&r"))]
+(define_expand "divsi3"
+ [(parallel [(set (match_operand:SI 0 "register_operand" "=r,r")
+ (div:SI (match_operand:SI 1 "register_operand" "r,r")
+ (match_operand:SI 2 "input_operand" "rI,m")))
+ (clobber (match_scratch:SI 3 "=&r,&r"))])]
"TARGET_V8 || TARGET_DEPRECATED_V8_INSNS"
+ "
+{
+ if (TARGET_ARCH64)
+ {
+ operands[3] = gen_reg_rtx(SImode);
+ emit_insn (gen_ashrsi3 (operands[3], operands[1], GEN_INT (31)));
+ emit_insn (gen_divsi3_sp64 (operands[0], operands[1], operands[2],
+ operands[3]));
+ DONE;
+ }
+}")
+
+(define_insn "divsi3_sp32"
+ [(set (match_operand:SI 0 "register_operand" "=r,r")
+ (div:SI (match_operand:SI 1 "register_operand" "r,r")
+ (match_operand:SI 2 "input_operand" "rI,m")))
+ (clobber (match_scratch:SI 3 "=&r,&r"))]
+ "(TARGET_V8 || TARGET_DEPRECATED_V8_INSNS)
+ && TARGET_ARCH32"
"*
{
- if (TARGET_V9)
- return \"sra %1,31,%3\;wr %%g0,%3,%%y\;sdiv %1,%2,%0\";
+ if (which_alternative == 0)
+ if (TARGET_V9)
+ return \"sra\\t%1, 31, %3\\n\\twr\\t%3, 0, %%y\\n\\tsdiv\\t%1, %2, %0\";
+ else
+ return \"sra\\t%1, 31, %3\\n\\twr\\t%3, 0, %%y\\n\\tnop\\n\\tnop\\n\\tnop\\n\\tsdiv\\t%1, %2, %0\";
else
- return \"sra %1,31,%3\;wr %%g0,%3,%%y\;nop\;nop\;nop\;sdiv %1,%2,%0\";
+ if (TARGET_V9)
+ return \"sra\\t%1, 31, %3\\n\\twr\\t%3, 0, %%y\\n\\tld\\t%2, %3\\n\\tsdiv\\t%1, %3, %0\";
+ else
+ return \"sra\\t%1, 31, %3\\n\\twr\\t%3, 0, %%y\\n\\tld\\t%2, %3\\n\\tnop\\n\\tnop\\n\\tsdiv\\t%1, %3, %0\";
}"
[(set (attr "length")
(if_then_else (eq_attr "isa" "v9")
- (const_int 3) (const_int 6)))])
+ (const_int 4) (const_int 7)))])
+
+(define_insn "divsi3_sp64"
+ [(set (match_operand:SI 0 "register_operand" "=r")
+ (div:SI (match_operand:SI 1 "register_operand" "r")
+ (match_operand:SI 2 "input_operand" "rI")))
+ (use (match_operand:SI 3 "register_operand" "r"))]
+ "TARGET_DEPRECATED_V8_INSNS && TARGET_ARCH64"
+ "wr\\t%%g0, %3, %%y\\n\\tsdiv\\t%1, %2, %0"
+ [(set_attr "length" "2")])
(define_insn "divdi3"
[(set (match_operand:DI 0 "register_operand" "=r")
(div:DI (match_operand:DI 1 "register_operand" "r")
(match_operand:DI 2 "arith_double_operand" "rHI")))]
"TARGET_ARCH64"
- "sdivx %1,%2,%0")
-
-;; It is not known whether this will match.
+ "sdivx\\t%1, %2, %0")
(define_insn "*cmp_sdiv_cc_set"
- [(set (match_operand:SI 0 "register_operand" "=r")
- (div:SI (match_operand:SI 1 "register_operand" "r")
- (match_operand:SI 2 "arith_operand" "rI")))
- (set (reg:CC 100)
- (compare:CC (div:SI (match_dup 1) (match_dup 2))
+ [(set (reg:CC 100)
+ (compare:CC (div:SI (match_operand:SI 1 "register_operand" "r")
+ (match_operand:SI 2 "arith_operand" "rI"))
(const_int 0)))
+ (set (match_operand:SI 0 "register_operand" "=r")
+ (div:SI (match_dup 1) (match_dup 2)))
(clobber (match_scratch:SI 3 "=&r"))]
"TARGET_V8 || TARGET_DEPRECATED_V8_INSNS"
"*
{
if (TARGET_V9)
- return \"sra %1,31,%3\;wr %%g0,%3,%%y\;sdivcc %1,%2,%0\";
+ return \"sra\\t%1, 31, %3\\n\\twr\\t%3, 0, %%y\\n\\tsdivcc\\t%1, %2, %0\";
else
- return \"sra %1,31,%3\;wr %%g0,%3,%%y\;nop\;nop\;nop\;sdivcc %1,%2,%0\";
+ return \"sra\\t%1, 31, %3\\n\\twr\\t%3, 0, %%y\\n\\tnop\\n\\tnop\\n\\tnop\\n\\tsdivcc\\t%1, %2, %0\";
}"
[(set (attr "length")
(if_then_else (eq_attr "isa" "v9")
(const_int 3) (const_int 6)))])
-(define_insn "udivsi3"
- [(set (match_operand:SI 0 "register_operand" "=r")
- (udiv:SI (match_operand:SI 1 "register_operand" "r")
- (match_operand:SI 2 "arith_operand" "rI")))]
- "TARGET_V8 || TARGET_DEPRECATED_V8_INSNS"
+;; XXX
+(define_expand "udivsi3"
+ [(set (match_operand:SI 0 "register_operand" "")
+ (udiv:SI (match_operand:SI 1 "reg_or_nonsymb_mem_operand" "")
+ (match_operand:SI 2 "input_operand" "")))]
+ "(TARGET_V8 || TARGET_DEPRECATED_V8_INSNS) && ! TARGET_LIVE_G0"
+ "")
+
+(define_insn "udivsi3_sp32"
+ [(set (match_operand:SI 0 "register_operand" "=r,&r,&r")
+ (udiv:SI (match_operand:SI 1 "reg_or_nonsymb_mem_operand" "r,r,m")
+ (match_operand:SI 2 "input_operand" "rI,m,r")))]
+ "(TARGET_V8
+ || TARGET_DEPRECATED_V8_INSNS)
+ && TARGET_ARCH32 && ! TARGET_LIVE_G0"
"*
{
- if (TARGET_V9)
- return \"wr %%g0,%%g0,%%y\;udiv %1,%2,%0\";
- else
- return \"wr %%g0,%%g0,%%y\;nop\;nop\;nop\;udiv %1,%2,%0\";
+ output_asm_insn (\"wr\\t%%g0, %%g0, %%y\", operands);
+ switch (which_alternative)
+ {
+ default:
+ return \"nop\\n\\tnop\\n\\tnop\\n\\tudiv\\t%1, %2, %0\";
+ case 1:
+ return \"ld\\t%2, %0\\n\\tnop\\n\\tnop\\n\\tudiv\\t%1, %0, %0\";
+ case 2:
+ return \"ld\\t%1, %0\\n\\tnop\\n\\tnop\\n\\tudiv\\t%0, %2, %0\";
+ }
}"
- [(set (attr "length")
- (if_then_else (eq_attr "isa" "v9")
- (const_int 2) (const_int 5)))])
+ [(set_attr "length" "5")])
+
+(define_insn "udivsi3_sp64"
+ [(set (match_operand:SI 0 "register_operand" "=r")
+ (udiv:SI (match_operand:SI 1 "reg_or_nonsymb_mem_operand" "r")
+ (match_operand:SI 2 "input_operand" "rI")))]
+ "TARGET_DEPRECATED_V8_INSNS && TARGET_ARCH64"
+ "wr\\t%%g0, 0, %%y\\n\\tudiv\\t%1, %2, %0"
+ [(set_attr "length" "2")])
(define_insn "udivdi3"
[(set (match_operand:DI 0 "register_operand" "=r")
(udiv:DI (match_operand:DI 1 "register_operand" "r")
(match_operand:DI 2 "arith_double_operand" "rHI")))]
"TARGET_ARCH64"
- "udivx %1,%2,%0")
-
-;; It is not known whether this will match.
+ "udivx\\t%1, %2, %0")
(define_insn "*cmp_udiv_cc_set"
- [(set (match_operand:SI 0 "register_operand" "=r")
- (udiv:SI (match_operand:SI 1 "register_operand" "r")
- (match_operand:SI 2 "arith_operand" "rI")))
- (set (reg:CC 100)
- (compare:CC (udiv:SI (match_dup 1) (match_dup 2))
- (const_int 0)))]
- "TARGET_V8 || TARGET_DEPRECATED_V8_INSNS"
+ [(set (reg:CC 100)
+ (compare:CC (udiv:SI (match_operand:SI 1 "register_operand" "r")
+ (match_operand:SI 2 "arith_operand" "rI"))
+ (const_int 0)))
+ (set (match_operand:SI 0 "register_operand" "=r")
+ (udiv:SI (match_dup 1) (match_dup 2)))]
+ "(TARGET_V8
+ || TARGET_DEPRECATED_V8_INSNS)
+ && ! TARGET_LIVE_G0"
"*
{
if (TARGET_V9)
- return \"wr %%g0,%%g0,%%y\;udivcc %1,%2,%0\";
+ return \"wr\\t%%g0, %%g0, %%y\\n\\tudivcc\\t%1, %2, %0\";
else
- return \"wr %%g0,%%g0,%%y\;nop\;nop\;nop\;udivcc %1,%2,%0\";
+ return \"wr\\t%%g0, %%g0, %%y\\n\\tnop\\n\\tnop\\n\\tnop\\n\\tudivcc\\t%1, %2, %0\";
}"
[(set (attr "length")
(if_then_else (eq_attr "isa" "v9")
(match_operand:SI 2 "arith_operand" "rI"))
(match_operand:SI 3 "register_operand" "0")))]
"TARGET_SPARCLET"
- "smac %1,%2,%0"
- [(set_attr "type" "imul")])
+ "smac\\t%1, %2, %0"
+ [(set_attr "type" "imul")
+ (set_attr "length" "1")])
(define_insn "*smacdi"
[(set (match_operand:DI 0 "register_operand" "=r")
(match_operand:SI 2 "register_operand" "r")))
(match_operand:DI 3 "register_operand" "0")))]
"TARGET_SPARCLET"
- "smacd %1,%2,%R0"
- [(set_attr "type" "imul")])
+ "smacd\\t%1, %2, %L0"
+ [(set_attr "type" "imul")
+ (set_attr "length" "1")])
(define_insn "*umacdi"
[(set (match_operand:DI 0 "register_operand" "=r")
(match_operand:SI 2 "register_operand" "r")))
(match_operand:DI 3 "register_operand" "0")))]
"TARGET_SPARCLET"
- "umacd %1,%2,%R0"
- [(set_attr "type" "imul")])
+ "umacd\\t%1, %2, %L0"
+ [(set_attr "type" "imul")
+ (set_attr "length" "1")])
\f
;;- Boolean instructions
;; We define DImode `and' so with DImode `not' we can get
"")
(define_insn "*anddi3_sp32"
- [(set (match_operand:DI 0 "register_operand" "=r")
- (and:DI (match_operand:DI 1 "arith_double_operand" "%r")
- (match_operand:DI 2 "arith_double_operand" "rHI")))]
+ [(set (match_operand:DI 0 "register_operand" "=r,b")
+ (and:DI (match_operand:DI 1 "arith_double_operand" "%r,b")
+ (match_operand:DI 2 "arith_double_operand" "rHI,b")))]
"! TARGET_ARCH64"
- "*
-{
- rtx op2 = operands[2];
-
- /* If constant is positive, upper bits zeroed, otherwise unchanged.
- Give the assembler a chance to pick the move instruction. */
- if (GET_CODE (op2) == CONST_INT)
- {
- int sign = INTVAL (op2);
- if (sign < 0)
- return \"mov %1,%0\;and %R1,%2,%R0\";
- return \"mov 0,%0\;and %R1,%2,%R0\";
- }
- else if (GET_CODE (op2) == CONST_DOUBLE)
- {
- rtx xoperands[4];
- xoperands[0] = operands[0];
- xoperands[1] = operands[1];
- xoperands[2] = GEN_INT (CONST_DOUBLE_LOW (op2));
- xoperands[3] = GEN_INT (CONST_DOUBLE_HIGH (op2));
- /* We could optimize then operands[1] == operands[0]
- and either half of the constant is -1. */
- output_asm_insn (\"and %R1,%2,%R0\;and %1,%3,%0\", xoperands);
- return \"\";
- }
- return \"and %1,%2,%0\;and %R1,%R2,%R0\";
-}"
- [(set_attr "length" "2")])
+ "@
+ #
+ fand\\t%1, %2, %0"
+ [(set_attr "type" "ialu,fp")
+ (set_attr "length" "2,1")])
(define_insn "*anddi3_sp64"
- [(set (match_operand:DI 0 "register_operand" "=r")
- (and:DI (match_operand:DI 1 "arith_double_operand" "%r")
- (match_operand:DI 2 "arith_double_operand" "rHI")))]
+ [(set (match_operand:DI 0 "register_operand" "=r,b")
+ (and:DI (match_operand:DI 1 "arith_double_operand" "%r,b")
+ (match_operand:DI 2 "arith_double_operand" "rHI,b")))]
"TARGET_ARCH64"
- "and %1,%2,%0")
+ "@
+ and\\t%1, %2, %0
+ fand\\t%1, %2, %0"
+ [(set_attr "type" "ialu,fp")
+ (set_attr "length" "1,1")])
(define_insn "andsi3"
- [(set (match_operand:SI 0 "register_operand" "=r")
- (and:SI (match_operand:SI 1 "arith_operand" "%r")
- (match_operand:SI 2 "arith_operand" "rI")))]
+ [(set (match_operand:SI 0 "register_operand" "=r,d")
+ (and:SI (match_operand:SI 1 "arith_operand" "%r,d")
+ (match_operand:SI 2 "arith_operand" "rI,d")))]
""
- "and %1,%2,%0"
- [(set_attr "type" "ialu")])
+ "@
+ and\\t%1, %2, %0
+ fands\\t%1, %2, %0"
+ [(set_attr "type" "ialu,fp")
+ (set_attr "length" "1,1")])
(define_split
[(set (match_operand:SI 0 "register_operand" "")
(match_operand:SI 2 "" "")))
(clobber (match_operand:SI 3 "register_operand" ""))]
"GET_CODE (operands[2]) == CONST_INT
- && !SMALL_INT (operands[2])
+ && !SMALL_INT32 (operands[2])
&& (INTVAL (operands[2]) & 0x3ff) == 0x3ff"
[(set (match_dup 3) (match_dup 4))
(set (match_dup 0) (and:SI (not:SI (match_dup 3)) (match_dup 1)))]
"
{
- operands[4] = gen_rtx (CONST_INT, VOIDmode, ~INTVAL (operands[2]));
+ operands[4] = GEN_INT (~INTVAL (operands[2]) & 0xffffffff);
+}")
+
+;; Split DImode logical operations requiring two instructions.
+(define_split
+ [(set (match_operand:DI 0 "register_operand" "")
+ (match_operator:DI 1 "cc_arithop" ; AND, IOR, XOR
+ [(match_operand:DI 2 "register_operand" "")
+ (match_operand:DI 3 "arith_double_operand" "")]))]
+ "! TARGET_ARCH64
+ && reload_completed
+ && ((GET_CODE (operands[0]) == REG
+ && REGNO (operands[0]) < 32)
+ || (GET_CODE (operands[0]) == SUBREG
+ && GET_CODE (SUBREG_REG (operands[0])) == REG
+ && REGNO (SUBREG_REG (operands[0])) < 32))"
+ [(set (match_dup 4) (match_op_dup:SI 1 [(match_dup 6) (match_dup 8)]))
+ (set (match_dup 5) (match_op_dup:SI 1 [(match_dup 7) (match_dup 9)]))]
+ "
+{
+ if (GET_CODE (operands[0]) == SUBREG)
+ operands[0] = alter_subreg (operands[0]);
+ operands[4] = gen_highpart (SImode, operands[0]);
+ operands[5] = gen_lowpart (SImode, operands[0]);
+ operands[6] = gen_highpart (SImode, operands[2]);
+ operands[7] = gen_lowpart (SImode, operands[2]);
+ if (GET_CODE (operands[3]) == CONST_INT)
+ {
+ if (INTVAL (operands[3]) < 0)
+ operands[8] = constm1_rtx;
+ else
+ operands[8] = const0_rtx;
+ }
+ else
+ operands[8] = gen_highpart (SImode, operands[3]);
+ operands[9] = gen_lowpart (SImode, operands[3]);
}")
(define_insn "*and_not_di_sp32"
- [(set (match_operand:DI 0 "register_operand" "=r")
- (and:DI (not:DI (match_operand:DI 1 "register_operand" "r"))
- (match_operand:DI 2 "register_operand" "r")))]
+ [(set (match_operand:DI 0 "register_operand" "=r,b")
+ (and:DI (not:DI (match_operand:DI 1 "register_operand" "r,b"))
+ (match_operand:DI 2 "register_operand" "r,b")))]
"! TARGET_ARCH64"
- "andn %2,%1,%0\;andn %R2,%R1,%R0"
- [(set_attr "length" "2")])
+ "@
+ #
+ fandnot1\\t%1, %2, %0"
+ [(set_attr "type" "ialu,fp")
+ (set_attr "length" "2,1")])
+
+(define_split
+ [(set (match_operand:DI 0 "register_operand" "")
+ (and:DI (not:DI (match_operand:DI 1 "register_operand" ""))
+ (match_operand:DI 2 "register_operand" "")))]
+ "! TARGET_ARCH64
+ && reload_completed
+ && ((GET_CODE (operands[0]) == REG
+ && REGNO (operands[0]) < 32)
+ || (GET_CODE (operands[0]) == SUBREG
+ && GET_CODE (SUBREG_REG (operands[0])) == REG
+ && REGNO (SUBREG_REG (operands[0])) < 32))"
+ [(set (match_dup 3) (and:SI (not:SI (match_dup 4)) (match_dup 5)))
+ (set (match_dup 6) (and:SI (not:SI (match_dup 7)) (match_dup 8)))]
+ "if (GET_CODE (operands[0]) == SUBREG)
+ operands[0] = alter_subreg (operands[0]);
+ operands[3] = gen_highpart (SImode, operands[0]);
+ operands[4] = gen_highpart (SImode, operands[1]);
+ operands[5] = gen_highpart (SImode, operands[2]);
+ operands[6] = gen_lowpart (SImode, operands[0]);
+ operands[7] = gen_lowpart (SImode, operands[1]);
+ operands[8] = gen_lowpart (SImode, operands[2]);")
(define_insn "*and_not_di_sp64"
- [(set (match_operand:DI 0 "register_operand" "=r")
- (and:DI (not:DI (match_operand:DI 1 "register_operand" "r"))
- (match_operand:DI 2 "register_operand" "r")))]
+ [(set (match_operand:DI 0 "register_operand" "=r,b")
+ (and:DI (not:DI (match_operand:DI 1 "register_operand" "r,b"))
+ (match_operand:DI 2 "register_operand" "r,b")))]
"TARGET_ARCH64"
- "andn %2,%1,%0")
+ "@
+ andn\\t%2, %1, %0
+ fandnot1\\t%1, %2, %0"
+ [(set_attr "type" "ialu,fp")
+ (set_attr "length" "1,1")])
(define_insn "*and_not_si"
- [(set (match_operand:SI 0 "register_operand" "=r")
- (and:SI (not:SI (match_operand:SI 1 "register_operand" "r"))
- (match_operand:SI 2 "register_operand" "r")))]
+ [(set (match_operand:SI 0 "register_operand" "=r,d")
+ (and:SI (not:SI (match_operand:SI 1 "register_operand" "r,d"))
+ (match_operand:SI 2 "register_operand" "r,d")))]
""
- "andn %2,%1,%0"
- [(set_attr "type" "ialu")])
+ "@
+ andn\\t%2, %1, %0
+ fandnot1s\\t%1, %2, %0"
+ [(set_attr "type" "ialu,fp")
+ (set_attr "length" "1,1")])
(define_expand "iordi3"
[(set (match_operand:DI 0 "register_operand" "")
"")
(define_insn "*iordi3_sp32"
- [(set (match_operand:DI 0 "register_operand" "=r")
- (ior:DI (match_operand:DI 1 "arith_double_operand" "%r")
- (match_operand:DI 2 "arith_double_operand" "rHI")))]
+ [(set (match_operand:DI 0 "register_operand" "=r,b")
+ (ior:DI (match_operand:DI 1 "arith_double_operand" "%r,b")
+ (match_operand:DI 2 "arith_double_operand" "rHI,b")))]
"! TARGET_ARCH64"
- "*
-{
- rtx op2 = operands[2];
-
- /* If constant is positive, upper bits zeroed, otherwise unchanged.
- Give the assembler a chance to pick the move instruction. */
- if (GET_CODE (op2) == CONST_INT)
- {
- int sign = INTVAL (op2);
- if (sign < 0)
- {
- if (TARGET_LIVE_G0)
- return \"and %0,0,%0\;add %0,-1,%0\;or %R1,%2,%R0\";
- else
- return \"mov -1,%0\;or %R1,%2,%R0\";
- }
- return \"mov %1,%0\;or %R1,%2,%R0\";
- }
- else if (GET_CODE (op2) == CONST_DOUBLE)
- {
- rtx xoperands[4];
- xoperands[0] = operands[0];
- xoperands[1] = operands[1];
- xoperands[2] = GEN_INT (CONST_DOUBLE_LOW (op2));
- xoperands[3] = GEN_INT (CONST_DOUBLE_HIGH (op2));
- /* We could optimize then operands[1] == operands[0]
- and either half of the constant is 0. */
- output_asm_insn (\"or %R1,%2,%R0\;or %1,%3,%0\", xoperands);
- return \"\";
- }
- return \"or %1,%2,%0\;or %R1,%R2,%R0\";
-}"
- [(set_attr "length" "2")])
+ "@
+ #
+ for\\t%1, %2, %0"
+ [(set_attr "type" "ialu,fp")
+ (set_attr "length" "2,1")])
(define_insn "*iordi3_sp64"
- [(set (match_operand:DI 0 "register_operand" "=r")
- (ior:DI (match_operand:DI 1 "arith_double_operand" "%r")
- (match_operand:DI 2 "arith_double_operand" "rHI")))]
+ [(set (match_operand:DI 0 "register_operand" "=r,b")
+ (ior:DI (match_operand:DI 1 "arith_double_operand" "%r,b")
+ (match_operand:DI 2 "arith_double_operand" "rHI,b")))]
"TARGET_ARCH64"
- "or %1,%2,%0")
+ "@
+ or\\t%1, %2, %0
+ for\\t%1, %2, %0"
+ [(set_attr "type" "ialu,fp")
+ (set_attr "length" "1,1")])
(define_insn "iorsi3"
- [(set (match_operand:SI 0 "register_operand" "=r")
- (ior:SI (match_operand:SI 1 "arith_operand" "%r")
- (match_operand:SI 2 "arith_operand" "rI")))]
+ [(set (match_operand:SI 0 "register_operand" "=r,d")
+ (ior:SI (match_operand:SI 1 "arith_operand" "%r,d")
+ (match_operand:SI 2 "arith_operand" "rI,d")))]
""
- "or %1,%2,%0"
- [(set_attr "type" "ialu")])
+ "@
+ or\\t%1, %2, %0
+ fors\\t%1, %2, %0"
+ [(set_attr "type" "ialu,fp")
+ (set_attr "length" "1,1")])
(define_split
[(set (match_operand:SI 0 "register_operand" "")
(match_operand:SI 2 "" "")))
(clobber (match_operand:SI 3 "register_operand" ""))]
"GET_CODE (operands[2]) == CONST_INT
- && !SMALL_INT (operands[2])
+ && !SMALL_INT32 (operands[2])
&& (INTVAL (operands[2]) & 0x3ff) == 0x3ff"
[(set (match_dup 3) (match_dup 4))
(set (match_dup 0) (ior:SI (not:SI (match_dup 3)) (match_dup 1)))]
"
{
- operands[4] = gen_rtx (CONST_INT, VOIDmode, ~INTVAL (operands[2]));
+ operands[4] = GEN_INT (~INTVAL (operands[2]) & 0xffffffff);
}")
(define_insn "*or_not_di_sp32"
- [(set (match_operand:DI 0 "register_operand" "=r")
- (ior:DI (not:DI (match_operand:DI 1 "register_operand" "r"))
- (match_operand:DI 2 "register_operand" "r")))]
+ [(set (match_operand:DI 0 "register_operand" "=r,b")
+ (ior:DI (not:DI (match_operand:DI 1 "register_operand" "r,b"))
+ (match_operand:DI 2 "register_operand" "r,b")))]
"! TARGET_ARCH64"
- "orn %2,%1,%0\;orn %R2,%R1,%R0"
- [(set_attr "length" "2")])
+ "@
+ #
+ fornot1\\t%1, %2, %0"
+ [(set_attr "type" "ialu,fp")
+ (set_attr "length" "2,1")])
+
+(define_split
+ [(set (match_operand:DI 0 "register_operand" "")
+ (ior:DI (not:DI (match_operand:DI 1 "register_operand" ""))
+ (match_operand:DI 2 "register_operand" "")))]
+ "! TARGET_ARCH64
+ && reload_completed
+ && ((GET_CODE (operands[0]) == REG
+ && REGNO (operands[0]) < 32)
+ || (GET_CODE (operands[0]) == SUBREG
+ && GET_CODE (SUBREG_REG (operands[0])) == REG
+ && REGNO (SUBREG_REG (operands[0])) < 32))"
+ [(set (match_dup 3) (ior:SI (not:SI (match_dup 4)) (match_dup 5)))
+ (set (match_dup 6) (ior:SI (not:SI (match_dup 7)) (match_dup 8)))]
+ "if (GET_CODE (operands[0]) == SUBREG)
+ operands[0] = alter_subreg (operands[0]);
+ operands[3] = gen_highpart (SImode, operands[0]);
+ operands[4] = gen_highpart (SImode, operands[1]);
+ operands[5] = gen_highpart (SImode, operands[2]);
+ operands[6] = gen_lowpart (SImode, operands[0]);
+ operands[7] = gen_lowpart (SImode, operands[1]);
+ operands[8] = gen_lowpart (SImode, operands[2]);")
(define_insn "*or_not_di_sp64"
- [(set (match_operand:DI 0 "register_operand" "=r")
- (ior:DI (not:DI (match_operand:DI 1 "register_operand" "r"))
- (match_operand:DI 2 "register_operand" "r")))]
+ [(set (match_operand:DI 0 "register_operand" "=r,b")
+ (ior:DI (not:DI (match_operand:DI 1 "register_operand" "r,b"))
+ (match_operand:DI 2 "register_operand" "r,b")))]
"TARGET_ARCH64"
- "orn %2,%1,%0")
+ "@
+ orn\\t%2, %1, %0
+ fornot1\\t%1, %2, %0"
+ [(set_attr "type" "ialu,fp")
+ (set_attr "length" "1,1")])
(define_insn "*or_not_si"
- [(set (match_operand:SI 0 "register_operand" "=r")
- (ior:SI (not:SI (match_operand:SI 1 "register_operand" "r"))
- (match_operand:SI 2 "register_operand" "r")))]
+ [(set (match_operand:SI 0 "register_operand" "=r,d")
+ (ior:SI (not:SI (match_operand:SI 1 "register_operand" "r,d"))
+ (match_operand:SI 2 "register_operand" "r,d")))]
""
- "orn %2,%1,%0"
- [(set_attr "type" "ialu")])
+ "@
+ orn\\t%2, %1, %0
+ fornot1s\\t%1, %2, %0"
+ [(set_attr "type" "ialu,fp")
+ (set_attr "length" "1,1")])
(define_expand "xordi3"
[(set (match_operand:DI 0 "register_operand" "")
""
"")
-(define_insn "*xorsi3_sp32"
- [(set (match_operand:DI 0 "register_operand" "=r")
- (xor:DI (match_operand:DI 1 "arith_double_operand" "%r")
- (match_operand:DI 2 "arith_double_operand" "rHI")))]
+(define_insn "*xordi3_sp32"
+ [(set (match_operand:DI 0 "register_operand" "=r,b")
+ (xor:DI (match_operand:DI 1 "arith_double_operand" "%r,b")
+ (match_operand:DI 2 "arith_double_operand" "rHI,b")))]
"! TARGET_ARCH64"
- "*
-{
- rtx op2 = operands[2];
-
- /* If constant is positive, upper bits zeroed, otherwise unchanged.
- Give the assembler a chance to pick the move instruction. */
- if (GET_CODE (op2) == CONST_INT)
- {
- int sign = INTVAL (op2);
- if (sign < 0)
- return \"xor %1,-1,%0\;xor %R1,%2,%R0\";
- return \"mov %1,%0\;xor %R1,%2,%R0\";
- }
- else if (GET_CODE (op2) == CONST_DOUBLE)
- {
- rtx xoperands[4];
- xoperands[0] = operands[0];
- xoperands[1] = operands[1];
- xoperands[2] = GEN_INT (CONST_DOUBLE_LOW (op2));
- xoperands[3] = GEN_INT (CONST_DOUBLE_HIGH (op2));
- /* We could optimize then operands[1] == operands[0]
- and either half of the constant is 0. */
- output_asm_insn (\"xor %R1,%2,%R0\;xor %1,%3,%0\", xoperands);
- return \"\";
- }
- return \"xor %1,%2,%0\;xor %R1,%R2,%R0\";
-}"
- [(set_attr "length" "2")])
+ "@
+ #
+ fxor\\t%1, %2, %0"
+ [(set_attr "length" "2,1")
+ (set_attr "type" "ialu,fp")])
(define_insn "*xordi3_sp64"
- [(set (match_operand:DI 0 "register_operand" "=r")
- (xor:DI (match_operand:DI 1 "arith_double_operand" "%rJ")
- (match_operand:DI 2 "arith_double_operand" "rHI")))]
+ [(set (match_operand:DI 0 "register_operand" "=r,b")
+ (xor:DI (match_operand:DI 1 "arith_double_operand" "%rJ,b")
+ (match_operand:DI 2 "arith_double_operand" "rHI,b")))]
"TARGET_ARCH64"
- "xor %r1,%2,%0")
+ "@
+ xor\\t%r1, %2, %0
+ fxor\\t%1, %2, %0"
+ [(set_attr "type" "ialu,fp")
+ (set_attr "length" "1,1")])
+
+(define_insn "*xordi3_sp64_dbl"
+ [(set (match_operand:DI 0 "register_operand" "=r")
+ (xor:DI (match_operand:DI 1 "register_operand" "r")
+ (match_operand:DI 2 "const64_operand" "")))]
+ "(TARGET_ARCH64
+ && HOST_BITS_PER_WIDE_INT != 64)"
+ "xor\\t%1, %2, %0"
+ [(set_attr "type" "ialu")
+ (set_attr "length" "1")])
(define_insn "xorsi3"
- [(set (match_operand:SI 0 "register_operand" "=r")
- (xor:SI (match_operand:SI 1 "arith_operand" "%rJ")
- (match_operand:SI 2 "arith_operand" "rI")))]
+ [(set (match_operand:SI 0 "register_operand" "=r,d")
+ (xor:SI (match_operand:SI 1 "arith_operand" "%rJ,d")
+ (match_operand:SI 2 "arith_operand" "rI,d")))]
""
- "xor %r1,%2,%0"
- [(set_attr "type" "ialu")])
+ "@
+ xor\\t%r1, %2, %0
+ fxors\\t%1, %2, %0"
+ [(set_attr "type" "ialu,fp")
+ (set_attr "length" "1,1")])
(define_split
[(set (match_operand:SI 0 "register_operand" "")
(match_operand:SI 2 "" "")))
(clobber (match_operand:SI 3 "register_operand" ""))]
"GET_CODE (operands[2]) == CONST_INT
- && !SMALL_INT (operands[2])
+ && !SMALL_INT32 (operands[2])
&& (INTVAL (operands[2]) & 0x3ff) == 0x3ff"
[(set (match_dup 3) (match_dup 4))
(set (match_dup 0) (not:SI (xor:SI (match_dup 3) (match_dup 1))))]
"
{
- operands[4] = gen_rtx (CONST_INT, VOIDmode, ~INTVAL (operands[2]));
+ operands[4] = GEN_INT (~INTVAL (operands[2]) & 0xffffffff);
}")
(define_split
(match_operand:SI 2 "" ""))))
(clobber (match_operand:SI 3 "register_operand" ""))]
"GET_CODE (operands[2]) == CONST_INT
- && !SMALL_INT (operands[2])
+ && !SMALL_INT32 (operands[2])
&& (INTVAL (operands[2]) & 0x3ff) == 0x3ff"
[(set (match_dup 3) (match_dup 4))
(set (match_dup 0) (xor:SI (match_dup 3) (match_dup 1)))]
"
{
- operands[4] = gen_rtx (CONST_INT, VOIDmode, ~INTVAL (operands[2]));
+ operands[4] = GEN_INT (~INTVAL (operands[2]) & 0xffffffff);
}")
;; xnor patterns. Note that (a ^ ~b) == (~a ^ b) == ~(a ^ b).
;; Combine now canonicalizes to the rightmost expression.
(define_insn "*xor_not_di_sp32"
- [(set (match_operand:DI 0 "register_operand" "=r")
- (not:DI (xor:DI (match_operand:DI 1 "register_operand" "r")
- (match_operand:DI 2 "register_operand" "r"))))]
+ [(set (match_operand:DI 0 "register_operand" "=r,b")
+ (not:DI (xor:DI (match_operand:DI 1 "register_operand" "r,b")
+ (match_operand:DI 2 "register_operand" "r,b"))))]
"! TARGET_ARCH64"
- "xnor %1,%2,%0\;xnor %R1,%R2,%R0"
- [(set_attr "length" "2")])
+ "@
+ #
+ fxnor\\t%1, %2, %0"
+ [(set_attr "length" "2,1")
+ (set_attr "type" "ialu,fp")])
+
+(define_split
+ [(set (match_operand:DI 0 "register_operand" "")
+ (not:DI (xor:DI (match_operand:DI 1 "register_operand" "")
+ (match_operand:DI 2 "register_operand" ""))))]
+ "! TARGET_ARCH64
+ && reload_completed
+ && ((GET_CODE (operands[0]) == REG
+ && REGNO (operands[0]) < 32)
+ || (GET_CODE (operands[0]) == SUBREG
+ && GET_CODE (SUBREG_REG (operands[0])) == REG
+ && REGNO (SUBREG_REG (operands[0])) < 32))"
+ [(set (match_dup 3) (not:SI (xor:SI (match_dup 4) (match_dup 5))))
+ (set (match_dup 6) (not:SI (xor:SI (match_dup 7) (match_dup 8))))]
+ "if (GET_CODE (operands[0]) == SUBREG)
+ operands[0] = alter_subreg (operands[0]);
+ operands[3] = gen_highpart (SImode, operands[0]);
+ operands[4] = gen_highpart (SImode, operands[1]);
+ operands[5] = gen_highpart (SImode, operands[2]);
+ operands[6] = gen_lowpart (SImode, operands[0]);
+ operands[7] = gen_lowpart (SImode, operands[1]);
+ operands[8] = gen_lowpart (SImode, operands[2]);")
(define_insn "*xor_not_di_sp64"
- [(set (match_operand:DI 0 "register_operand" "=r")
- (not:DI (xor:DI (match_operand:DI 1 "reg_or_0_operand" "rJ")
- (match_operand:DI 2 "arith_double_operand" "rHI"))))]
+ [(set (match_operand:DI 0 "register_operand" "=r,b")
+ (not:DI (xor:DI (match_operand:DI 1 "reg_or_0_operand" "rJ,b")
+ (match_operand:DI 2 "arith_double_operand" "rHI,b"))))]
"TARGET_ARCH64"
- "xnor %r1,%2,%0")
+ "@
+ xnor\\t%r1, %2, %0
+ fxnor\\t%1, %2, %0"
+ [(set_attr "type" "ialu,fp")
+ (set_attr "length" "1,1")])
(define_insn "*xor_not_si"
- [(set (match_operand:SI 0 "register_operand" "=r")
- (not:SI (xor:SI (match_operand:SI 1 "reg_or_0_operand" "rJ")
- (match_operand:SI 2 "arith_operand" "rI"))))]
+ [(set (match_operand:SI 0 "register_operand" "=r,d")
+ (not:SI (xor:SI (match_operand:SI 1 "reg_or_0_operand" "rJ,d")
+ (match_operand:SI 2 "arith_operand" "rI,d"))))]
""
- "xnor %r1,%2,%0"
- [(set_attr "type" "ialu")])
+ "@
+ xnor\\t%r1, %2, %0
+ fxnors\\t%1, %2, %0"
+ [(set_attr "type" "ialu,fp")
+ (set_attr "length" "1,1")])
;; These correspond to the above in the case where we also (or only)
;; want to set the condition code.
[(match_operand:SI 0 "arith_operand" "%r")
(match_operand:SI 1 "arith_operand" "rI")])
(const_int 0)))]
- ""
- "%A2cc %0,%1,%%g0"
- [(set_attr "type" "compare")])
+ "! TARGET_LIVE_G0"
+ "%A2cc\\t%0, %1, %%g0"
+ [(set_attr "type" "compare")
+ (set_attr "length" "1")])
(define_insn "*cmp_ccx_arith_op"
[(set (reg:CCX 100)
(match_operand:DI 1 "arith_double_operand" "rHI")])
(const_int 0)))]
"TARGET_ARCH64"
- "%A2cc %0,%1,%%g0"
- [(set_attr "type" "compare")])
+ "%A2cc\\t%0, %1, %%g0"
+ [(set_attr "type" "compare")
+ (set_attr "length" "1")])
(define_insn "*cmp_cc_arith_op_set"
[(set (reg:CC 100)
(set (match_operand:SI 0 "register_operand" "=r")
(match_dup 3))]
""
- "%A3cc %1,%2,%0")
+ "%A3cc\\t%1, %2, %0"
+ [(set_attr "type" "compare")
+ (set_attr "length" "1")])
(define_insn "*cmp_ccx_arith_op_set"
[(set (reg:CCX 100)
(set (match_operand:DI 0 "register_operand" "=r")
(match_dup 3))]
"TARGET_ARCH64"
- "%A3cc %1,%2,%0")
+ "%A3cc\\t%1, %2, %0"
+ [(set_attr "type" "compare")
+ (set_attr "length" "1")])
(define_insn "*cmp_cc_xor_not"
[(set (reg:CC 100)
(not:SI (xor:SI (match_operand:SI 0 "reg_or_0_operand" "%rJ")
(match_operand:SI 1 "arith_operand" "rI")))
(const_int 0)))]
- ""
- "xnorcc %r0,%1,%%g0"
- [(set_attr "type" "compare")])
+ "! TARGET_LIVE_G0"
+ "xnorcc\\t%r0, %1, %%g0"
+ [(set_attr "type" "compare")
+ (set_attr "length" "1")])
(define_insn "*cmp_ccx_xor_not"
[(set (reg:CCX 100)
(match_operand:DI 1 "arith_double_operand" "rHI")))
(const_int 0)))]
"TARGET_ARCH64"
- "xnorcc %r0,%1,%%g0"
- [(set_attr "type" "compare")])
+ "xnorcc\\t%r0, %1, %%g0"
+ [(set_attr "type" "compare")
+ (set_attr "length" "1")])
(define_insn "*cmp_cc_xor_not_set"
[(set (reg:CC 100)
(set (match_operand:SI 0 "register_operand" "=r")
(not:SI (xor:SI (match_dup 1) (match_dup 2))))]
""
- "xnorcc %r1,%2,%0")
+ "xnorcc\\t%r1, %2, %0"
+ [(set_attr "type" "compare")
+ (set_attr "length" "1")])
(define_insn "*cmp_ccx_xor_not_set"
[(set (reg:CCX 100)
(set (match_operand:DI 0 "register_operand" "=r")
(not:DI (xor:DI (match_dup 1) (match_dup 2))))]
"TARGET_ARCH64"
- "xnorcc %r1,%2,%0")
+ "xnorcc\\t%r1, %2, %0"
+ [(set_attr "type" "compare")
+ (set_attr "length" "1")])
(define_insn "*cmp_cc_arith_op_not"
[(set (reg:CC 100)
[(not:SI (match_operand:SI 0 "arith_operand" "rI"))
(match_operand:SI 1 "reg_or_0_operand" "rJ")])
(const_int 0)))]
- ""
- "%B2cc %r1,%0,%%g0"
- [(set_attr "type" "compare")])
+ "! TARGET_LIVE_G0"
+ "%B2cc\\t%r1, %0, %%g0"
+ [(set_attr "type" "compare")
+ (set_attr "length" "1")])
(define_insn "*cmp_ccx_arith_op_not"
[(set (reg:CCX 100)
(match_operand:DI 1 "reg_or_0_operand" "rJ")])
(const_int 0)))]
"TARGET_ARCH64"
- "%B2cc %r1,%0,%%g0"
- [(set_attr "type" "compare")])
+ "%B2cc\\t%r1, %0, %%g0"
+ [(set_attr "type" "compare")
+ (set_attr "length" "1")])
(define_insn "*cmp_cc_arith_op_not_set"
[(set (reg:CC 100)
(set (match_operand:SI 0 "register_operand" "=r")
(match_dup 3))]
""
- "%B3cc %r2,%1,%0")
+ "%B3cc\\t%r2, %1, %0"
+ [(set_attr "type" "compare")
+ (set_attr "length" "1")])
(define_insn "*cmp_ccx_arith_op_not_set"
[(set (reg:CCX 100)
(set (match_operand:DI 0 "register_operand" "=r")
(match_dup 3))]
"TARGET_ARCH64"
- "%B3cc %r2,%1,%0")
+ "%B3cc\\t%r2, %1, %0"
+ [(set_attr "type" "compare")
+ (set_attr "length" "1")])
;; We cannot use the "neg" pseudo insn because the Sun assembler
;; does not know how to make it work for constants.
{
if (! TARGET_ARCH64)
{
- emit_insn (gen_rtx (PARALLEL, VOIDmode, gen_rtvec (2,
- gen_rtx (SET, VOIDmode, operand0,
- gen_rtx (NEG, DImode, operand1)),
- gen_rtx (CLOBBER, VOIDmode,
- gen_rtx (REG, SImode, SPARC_ICC_REG)))));
+ emit_insn (gen_rtx_PARALLEL
+ (VOIDmode,
+ gen_rtvec (2,
+ gen_rtx_SET (VOIDmode, operand0,
+ gen_rtx_NEG (DImode, operand1)),
+ gen_rtx_CLOBBER (VOIDmode,
+ gen_rtx_REG (CCmode,
+ SPARC_ICC_REG)))));
DONE;
}
}")
(define_insn "*negdi2_sp32"
[(set (match_operand:DI 0 "register_operand" "=r")
(neg:DI (match_operand:DI 1 "register_operand" "r")))
- (clobber (reg:SI 100))]
- "! TARGET_ARCH64"
- "*
-{
- if (TARGET_LIVE_G0)
- output_asm_insn (\"and %%g0,0,%%g0\", operands);
- return \"subcc %%g0,%R1,%R0\;subx %%g0,%1,%0\";
-}"
+ (clobber (reg:CC 100))]
+ "! TARGET_ARCH64
+ && ! TARGET_LIVE_G0"
+ "#"
[(set_attr "type" "unary")
- ;; ??? This is wrong for TARGET_LIVE_G0 but it's not critical.
(set_attr "length" "2")])
+(define_split
+ [(set (match_operand:DI 0 "register_operand" "")
+ (neg:DI (match_operand:DI 1 "register_operand" "")))
+ (clobber (reg:CC 100))]
+ "! TARGET_ARCH64
+ && ! TARGET_LIVE_G0
+ && reload_completed"
+ [(parallel [(set (reg:CC_NOOV 100)
+ (compare:CC_NOOV (minus:SI (const_int 0) (match_dup 5))
+ (const_int 0)))
+ (set (match_dup 4) (minus:SI (const_int 0) (match_dup 5)))])
+ (set (match_dup 2) (minus:SI (minus:SI (const_int 0) (match_dup 3))
+ (ltu:SI (reg:CC 100) (const_int 0))))]
+ "operands[2] = gen_highpart (SImode, operands[0]);
+ operands[3] = gen_highpart (SImode, operands[1]);
+ operands[4] = gen_lowpart (SImode, operands[0]);
+ operands[5] = gen_lowpart (SImode, operands[1]);")
+
(define_insn "*negdi2_sp64"
[(set (match_operand:DI 0 "register_operand" "=r")
(neg:DI (match_operand:DI 1 "register_operand" "r")))]
"TARGET_ARCH64"
- "sub %%g0,%1,%0"
+ "sub\\t%%g0, %1, %0"
[(set_attr "type" "unary")
(set_attr "length" "1")])
-(define_insn "negsi2"
- [(set (match_operand:SI 0 "register_operand" "=r")
- (neg:SI (match_operand:SI 1 "arith_operand" "rI")))]
+(define_expand "negsi2"
+ [(set (match_operand:SI 0 "register_operand" "")
+ (neg:SI (match_operand:SI 1 "arith_operand" "")))]
""
- "*
+ "
{
if (TARGET_LIVE_G0)
- return \"and %%g0,0,%%g0\;sub %%g0,%1,%0\";
- return \"sub %%g0,%1,%0\";
-}"
+ {
+ rtx zero_reg = gen_reg_rtx (SImode);
+
+ emit_insn (gen_rtx_SET (VOIDmode, zero_reg, const0_rtx));
+ emit_insn (gen_rtx_SET (VOIDmode, operands[0],
+ gen_rtx_MINUS (SImode, zero_reg,
+ operands[1])));
+ DONE;
+ }
+}")
+
+(define_insn "*negsi2_not_liveg0"
+ [(set (match_operand:SI 0 "register_operand" "=r")
+ (neg:SI (match_operand:SI 1 "arith_operand" "rI")))]
+ "! TARGET_LIVE_G0"
+ "sub\\t%%g0, %1, %0"
[(set_attr "type" "unary")
- (set (attr "length")
- (if_then_else (eq_attr "live_g0" "yes") (const_int 2) (const_int 1)))])
+ (set_attr "length" "1")])
(define_insn "*cmp_cc_neg"
[(set (reg:CC_NOOV 100)
(compare:CC_NOOV (neg:SI (match_operand:SI 0 "arith_operand" "rI"))
(const_int 0)))]
"! TARGET_LIVE_G0"
- "subcc %%g0,%0,%%g0"
- [(set_attr "type" "compare")])
+ "subcc\\t%%g0, %0, %%g0"
+ [(set_attr "type" "compare")
+ (set_attr "length" "1")])
(define_insn "*cmp_ccx_neg"
[(set (reg:CCX_NOOV 100)
(compare:CCX_NOOV (neg:DI (match_operand:DI 0 "arith_double_operand" "rHI"))
(const_int 0)))]
"TARGET_ARCH64"
- "subcc %%g0,%0,%%g0"
- [(set_attr "type" "compare")])
+ "subcc\\t%%g0, %0, %%g0"
+ [(set_attr "type" "compare")
+ (set_attr "length" "1")])
(define_insn "*cmp_cc_set_neg"
[(set (reg:CC_NOOV 100)
(set (match_operand:SI 0 "register_operand" "=r")
(neg:SI (match_dup 1)))]
"! TARGET_LIVE_G0"
- "subcc %%g0,%1,%0"
- [(set_attr "type" "unary")])
+ "subcc\\t%%g0, %1, %0"
+ [(set_attr "type" "compare")
+ (set_attr "length" "1")])
(define_insn "*cmp_ccx_set_neg"
[(set (reg:CCX_NOOV 100)
(set (match_operand:DI 0 "register_operand" "=r")
(neg:DI (match_dup 1)))]
"TARGET_ARCH64"
- "subcc %%g0,%1,%0"
- [(set_attr "type" "unary")])
+ "subcc\\t%%g0, %1, %0"
+ [(set_attr "type" "compare")
+ (set_attr "length" "1")])
;; We cannot use the "not" pseudo insn because the Sun assembler
;; does not know how to make it work for constants.
"")
(define_insn "*one_cmpldi2_sp32"
- [(set (match_operand:DI 0 "register_operand" "=r")
- (not:DI (match_operand:DI 1 "register_operand" "r")))]
+ [(set (match_operand:DI 0 "register_operand" "=r,b")
+ (not:DI (match_operand:DI 1 "register_operand" "r,b")))]
"! TARGET_ARCH64"
- "xnor %1,0,%0\;xnor %R1,0,%R0"
- [(set_attr "type" "unary")
- (set_attr "length" "2")])
+ "@
+ #
+ fnot1\\t%1, %0"
+ [(set_attr "type" "unary,fp")
+ (set_attr "length" "2,1")])
+
+(define_split
+ [(set (match_operand:DI 0 "register_operand" "")
+ (not:DI (match_operand:DI 1 "register_operand" "")))]
+ "! TARGET_ARCH64
+ && reload_completed
+ && ((GET_CODE (operands[0]) == REG
+ && REGNO (operands[0]) < 32)
+ || (GET_CODE (operands[0]) == SUBREG
+ && GET_CODE (SUBREG_REG (operands[0])) == REG
+ && REGNO (SUBREG_REG (operands[0])) < 32))"
+ [(set (match_dup 2) (not:SI (xor:SI (match_dup 3) (const_int 0))))
+ (set (match_dup 4) (not:SI (xor:SI (match_dup 5) (const_int 0))))]
+ "if (GET_CODE (operands[0]) == SUBREG)
+ operands[0] = alter_subreg (operands[0]);
+ operands[2] = gen_highpart (SImode, operands[0]);
+ operands[3] = gen_highpart (SImode, operands[1]);
+ operands[4] = gen_lowpart (SImode, operands[0]);
+ operands[5] = gen_lowpart (SImode, operands[1]);")
(define_insn "*one_cmpldi2_sp64"
- [(set (match_operand:DI 0 "register_operand" "=r")
- (not:DI (match_operand:DI 1 "arith_double_operand" "rHI")))]
+ [(set (match_operand:DI 0 "register_operand" "=r,b")
+ (not:DI (match_operand:DI 1 "arith_double_operand" "rHI,b")))]
"TARGET_ARCH64"
- "xnor %1,0,%0"
- [(set_attr "type" "unary")])
+ "@
+ xnor\\t%%g0, %1, %0
+ fnot1\\t%1, %0"
+ [(set_attr "type" "unary,fp")
+ (set_attr "length" "1")])
-(define_insn "one_cmplsi2"
- [(set (match_operand:SI 0 "register_operand" "=r,r")
- (not:SI (match_operand:SI 1 "arith_operand" "r,I")))]
+(define_expand "one_cmplsi2"
+ [(set (match_operand:SI 0 "register_operand" "")
+ (not:SI (match_operand:SI 1 "arith_operand" "")))]
""
- "*
+ "
{
- if (which_alternative == 0)
- return \"xnor %1,0,%0\";
- if (TARGET_LIVE_G0)
- output_asm_insn (\"and %%g0,0,%%g0\", operands);
- return \"xnor %%g0,%1,%0\";
-}"
- [(set_attr "type" "unary")
- (set_attr_alternative "length"
- [(const_int 1)
- (if_then_else (eq_attr "live_g0" "yes") (const_int 2) (const_int 1))])])
+ if (TARGET_LIVE_G0
+ && GET_CODE (operands[1]) == CONST_INT)
+ {
+ rtx zero_reg = gen_reg_rtx (SImode);
+
+ emit_insn (gen_rtx_SET (VOIDmode, zero_reg, const0_rtx));
+ emit_insn (gen_rtx_SET (VOIDmode,
+ operands[0],
+ gen_rtx_NOT (SImode,
+ gen_rtx_XOR (SImode,
+ zero_reg,
+ operands[1]))));
+ DONE;
+ }
+}")
+
+(define_insn "*one_cmplsi2_not_liveg0"
+ [(set (match_operand:SI 0 "register_operand" "=r,d")
+ (not:SI (match_operand:SI 1 "arith_operand" "rI,d")))]
+ "! TARGET_LIVE_G0"
+ "@
+ xnor\\t%%g0, %1, %0
+ fnot1s\\t%1, %0"
+ [(set_attr "type" "unary,fp")
+ (set_attr "length" "1,1")])
+
+(define_insn "*one_cmplsi2_liveg0"
+ [(set (match_operand:SI 0 "register_operand" "=r,d")
+ (not:SI (match_operand:SI 1 "arith_operand" "r,d")))]
+ "TARGET_LIVE_G0"
+ "@
+ xnor\\t%1, 0, %0
+ fnot1s\\t%1, %0"
+ [(set_attr "type" "unary,fp")
+ (set_attr "length" "1,1")])
(define_insn "*cmp_cc_not"
[(set (reg:CC 100)
(compare:CC (not:SI (match_operand:SI 0 "arith_operand" "rI"))
(const_int 0)))]
"! TARGET_LIVE_G0"
- "xnorcc %%g0,%0,%%g0"
- [(set_attr "type" "compare")])
+ "xnorcc\\t%%g0, %0, %%g0"
+ [(set_attr "type" "compare")
+ (set_attr "length" "1")])
(define_insn "*cmp_ccx_not"
[(set (reg:CCX 100)
(compare:CCX (not:DI (match_operand:DI 0 "arith_double_operand" "rHI"))
(const_int 0)))]
"TARGET_ARCH64"
- "xnorcc %%g0,%0,%%g0"
- [(set_attr "type" "compare")])
+ "xnorcc\\t%%g0, %0, %%g0"
+ [(set_attr "type" "compare")
+ (set_attr "length" "1")])
(define_insn "*cmp_cc_set_not"
[(set (reg:CC 100)
(set (match_operand:SI 0 "register_operand" "=r")
(not:SI (match_dup 1)))]
"! TARGET_LIVE_G0"
- "xnorcc %%g0,%1,%0"
- [(set_attr "type" "unary")])
+ "xnorcc\\t%%g0, %1, %0"
+ [(set_attr "type" "compare")
+ (set_attr "length" "1")])
(define_insn "*cmp_ccx_set_not"
[(set (reg:CCX 100)
(set (match_operand:DI 0 "register_operand" "=r")
(not:DI (match_dup 1)))]
"TARGET_ARCH64"
- "xnorcc %%g0,%1,%0"
- [(set_attr "type" "unary")])
+ "xnorcc\\t%%g0, %1, %0"
+ [(set_attr "type" "compare")
+ (set_attr "length" "1")])
\f
;; Floating point arithmetic instructions.
(plus:TF (match_operand:TF 1 "register_operand" "e")
(match_operand:TF 2 "register_operand" "e")))]
"TARGET_FPU && TARGET_HARD_QUAD"
- "faddq %1,%2,%0"
- [(set_attr "type" "fp")])
+ "faddq\\t%1, %2, %0"
+ [(set_attr "type" "fp")
+ (set_attr "length" "1")])
(define_insn "adddf3"
[(set (match_operand:DF 0 "register_operand" "=e")
(plus:DF (match_operand:DF 1 "register_operand" "e")
(match_operand:DF 2 "register_operand" "e")))]
"TARGET_FPU"
- "faddd %1,%2,%0"
- [(set_attr "type" "fp")])
+ "faddd\\t%1, %2, %0"
+ [(set_attr "type" "fp")
+ (set_attr "length" "1")])
(define_insn "addsf3"
[(set (match_operand:SF 0 "register_operand" "=f")
(plus:SF (match_operand:SF 1 "register_operand" "f")
(match_operand:SF 2 "register_operand" "f")))]
"TARGET_FPU"
- "fadds %1,%2,%0"
- [(set_attr "type" "fp")])
+ "fadds\\t%1, %2, %0"
+ [(set_attr "type" "fp")
+ (set_attr "length" "1")])
(define_insn "subtf3"
[(set (match_operand:TF 0 "register_operand" "=e")
(minus:TF (match_operand:TF 1 "register_operand" "e")
(match_operand:TF 2 "register_operand" "e")))]
"TARGET_FPU && TARGET_HARD_QUAD"
- "fsubq %1,%2,%0"
- [(set_attr "type" "fp")])
+ "fsubq\\t%1, %2, %0"
+ [(set_attr "type" "fp")
+ (set_attr "length" "1")])
(define_insn "subdf3"
[(set (match_operand:DF 0 "register_operand" "=e")
(minus:DF (match_operand:DF 1 "register_operand" "e")
(match_operand:DF 2 "register_operand" "e")))]
"TARGET_FPU"
- "fsubd %1,%2,%0"
- [(set_attr "type" "fp")])
+ "fsubd\\t%1, %2, %0"
+ [(set_attr "type" "fp")
+ (set_attr "length" "1")])
(define_insn "subsf3"
[(set (match_operand:SF 0 "register_operand" "=f")
(minus:SF (match_operand:SF 1 "register_operand" "f")
(match_operand:SF 2 "register_operand" "f")))]
"TARGET_FPU"
- "fsubs %1,%2,%0"
- [(set_attr "type" "fp")])
+ "fsubs\\t%1, %2, %0"
+ [(set_attr "type" "fp")
+ (set_attr "length" "1")])
(define_insn "multf3"
[(set (match_operand:TF 0 "register_operand" "=e")
(mult:TF (match_operand:TF 1 "register_operand" "e")
(match_operand:TF 2 "register_operand" "e")))]
"TARGET_FPU && TARGET_HARD_QUAD"
- "fmulq %1,%2,%0"
- [(set_attr "type" "fpmul")])
+ "fmulq\\t%1, %2, %0"
+ [(set_attr "type" "fpmul")
+ (set_attr "length" "1")])
(define_insn "muldf3"
[(set (match_operand:DF 0 "register_operand" "=e")
(mult:DF (match_operand:DF 1 "register_operand" "e")
(match_operand:DF 2 "register_operand" "e")))]
"TARGET_FPU"
- "fmuld %1,%2,%0"
- [(set_attr "type" "fpmul")])
+ "fmuld\\t%1, %2, %0"
+ [(set_attr "type" "fpmul")
+ (set_attr "length" "1")])
(define_insn "mulsf3"
[(set (match_operand:SF 0 "register_operand" "=f")
(mult:SF (match_operand:SF 1 "register_operand" "f")
(match_operand:SF 2 "register_operand" "f")))]
"TARGET_FPU"
- "fmuls %1,%2,%0"
- [(set_attr "type" "fpmul")])
+ "fmuls\\t%1, %2, %0"
+ [(set_attr "type" "fpmul")
+ (set_attr "length" "1")])
(define_insn "*muldf3_extend"
[(set (match_operand:DF 0 "register_operand" "=e")
(mult:DF (float_extend:DF (match_operand:SF 1 "register_operand" "f"))
(float_extend:DF (match_operand:SF 2 "register_operand" "f"))))]
"(TARGET_V8 || TARGET_V9) && TARGET_FPU"
- "fsmuld %1,%2,%0"
- [(set_attr "type" "fpmul")])
+ "fsmuld\\t%1, %2, %0"
+ [(set_attr "type" "fpmul")
+ (set_attr "length" "1")])
(define_insn "*multf3_extend"
[(set (match_operand:TF 0 "register_operand" "=e")
(mult:TF (float_extend:TF (match_operand:DF 1 "register_operand" "e"))
(float_extend:TF (match_operand:DF 2 "register_operand" "e"))))]
"(TARGET_V8 || TARGET_V9) && TARGET_FPU && TARGET_HARD_QUAD"
- "fdmulq %1,%2,%0"
- [(set_attr "type" "fpmul")])
+ "fdmulq\\t%1, %2, %0"
+ [(set_attr "type" "fpmul")
+ (set_attr "length" "1")])
;; don't have timing for quad-prec. divide.
(define_insn "divtf3"
(div:TF (match_operand:TF 1 "register_operand" "e")
(match_operand:TF 2 "register_operand" "e")))]
"TARGET_FPU && TARGET_HARD_QUAD"
- "fdivq %1,%2,%0"
- [(set_attr "type" "fpdivd")])
+ "fdivq\\t%1, %2, %0"
+ [(set_attr "type" "fpdivd")
+ (set_attr "length" "1")])
(define_insn "divdf3"
[(set (match_operand:DF 0 "register_operand" "=e")
(div:DF (match_operand:DF 1 "register_operand" "e")
(match_operand:DF 2 "register_operand" "e")))]
"TARGET_FPU"
- "fdivd %1,%2,%0"
- [(set_attr "type" "fpdivd")])
+ "fdivd\\t%1, %2, %0"
+ [(set_attr "type" "fpdivd")
+ (set_attr "length" "1")])
(define_insn "divsf3"
[(set (match_operand:SF 0 "register_operand" "=f")
(div:SF (match_operand:SF 1 "register_operand" "f")
(match_operand:SF 2 "register_operand" "f")))]
"TARGET_FPU"
- "fdivs %1,%2,%0"
- [(set_attr "type" "fpdivs")])
+ "fdivs\\t%1, %2, %0"
+ [(set_attr "type" "fpdivs")
+ (set_attr "length" "1")])
+
+(define_expand "negtf2"
+ [(set (match_operand:TF 0 "register_operand" "=e,e")
+ (neg:TF (match_operand:TF 1 "register_operand" "0,e")))]
+ "TARGET_FPU"
+ "")
+
+(define_insn "*negtf2_notv9"
+ [(set (match_operand:TF 0 "register_operand" "=e,e")
+ (neg:TF (match_operand:TF 1 "register_operand" "0,e")))]
+ ; We don't use quad float insns here so we don't need TARGET_HARD_QUAD.
+ "TARGET_FPU
+ && ! TARGET_V9"
+ "@
+ fnegs\\t%0, %0
+ #"
+ [(set_attr "type" "fpmove")
+ (set_attr "length" "1,2")])
-(define_insn "negtf2"
+(define_split
+ [(set (match_operand:TF 0 "register_operand" "")
+ (neg:TF (match_operand:TF 1 "register_operand" "")))]
+ "TARGET_FPU
+ && ! TARGET_V9
+ && reload_completed
+ && sparc_absnegfloat_split_legitimate (operands[0], operands[1])"
+ [(set (match_dup 2) (neg:SF (match_dup 3)))
+ (set (match_dup 4) (match_dup 5))
+ (set (match_dup 6) (match_dup 7))]
+ "if (GET_CODE (operands[0]) == SUBREG)
+ operands[0] = alter_subreg (operands[0]);
+ if (GET_CODE (operands[1]) == SUBREG)
+ operands[1] = alter_subreg (operands[1]);
+ operands[2] = gen_rtx_raw_REG (SFmode, REGNO (operands[0]));
+ operands[3] = gen_rtx_raw_REG (SFmode, REGNO (operands[1]));
+ operands[4] = gen_rtx_raw_REG (SFmode, REGNO (operands[0]) + 1);
+ operands[5] = gen_rtx_raw_REG (SFmode, REGNO (operands[1]) + 1);
+ operands[6] = gen_rtx_raw_REG (DFmode, REGNO (operands[0]) + 2);
+ operands[7] = gen_rtx_raw_REG (DFmode, REGNO (operands[1]) + 2);")
+
+(define_insn "*negtf2_v9"
[(set (match_operand:TF 0 "register_operand" "=e,e")
(neg:TF (match_operand:TF 1 "register_operand" "0,e")))]
; We don't use quad float insns here so we don't need TARGET_HARD_QUAD.
+ "TARGET_FPU && TARGET_V9"
+ "@
+ fnegd\\t%0, %0
+ #"
+ [(set_attr "type" "fpmove")
+ (set_attr "length" "1,2")])
+
+(define_split
+ [(set (match_operand:TF 0 "register_operand" "")
+ (neg:TF (match_operand:TF 1 "register_operand" "")))]
+ "TARGET_FPU
+ && TARGET_V9
+ && reload_completed
+ && sparc_absnegfloat_split_legitimate (operands[0], operands[1])"
+ [(set (match_dup 2) (neg:DF (match_dup 3)))
+ (set (match_dup 4) (match_dup 5))]
+ "if (GET_CODE (operands[0]) == SUBREG)
+ operands[0] = alter_subreg (operands[0]);
+ if (GET_CODE (operands[1]) == SUBREG)
+ operands[1] = alter_subreg (operands[1]);
+ operands[2] = gen_rtx_raw_REG (DFmode, REGNO (operands[0]));
+ operands[3] = gen_rtx_raw_REG (DFmode, REGNO (operands[1]));
+ operands[4] = gen_rtx_raw_REG (DFmode, REGNO (operands[0]) + 2);
+ operands[5] = gen_rtx_raw_REG (DFmode, REGNO (operands[1]) + 2);")
+
+(define_expand "negdf2"
+ [(set (match_operand:DF 0 "register_operand" "")
+ (neg:DF (match_operand:DF 1 "register_operand" "")))]
"TARGET_FPU"
- "*
-{
- /* v9: can't use fnegs, won't work with upper regs. */
- if (which_alternative == 0)
- return TARGET_V9 ? \"fnegd %0,%0\" : \"fnegs %0,%0\";
- else
- return TARGET_V9 ? \"fnegd %1,%0\;fmovd %S1,%S0\"
- : \"fnegs %1,%0\;fmovs %R1,%R0\;fmovs %S1,%S0\;fmovs %T1,%T0\";
-}"
- [(set_attr "type" "fp")
- (set_attr_alternative "length"
- [(const_int 1)
- (if_then_else (eq_attr "isa" "v9") (const_int 2) (const_int 4))])])
+ "")
-(define_insn "negdf2"
+(define_insn "*negdf2_notv9"
[(set (match_operand:DF 0 "register_operand" "=e,e")
(neg:DF (match_operand:DF 1 "register_operand" "0,e")))]
- "TARGET_FPU"
- "*
-{
- if (TARGET_V9)
- return \"fnegd %1,%0\";
- else if (which_alternative == 0)
- return \"fnegs %0,%0\";
- else
- return \"fnegs %1,%0\;fmovs %R1,%R0\";
-}"
- [(set_attr "type" "fp")
- (set_attr_alternative "length"
- [(const_int 1)
- (if_then_else (eq_attr "isa" "v9") (const_int 1) (const_int 2))])])
+ "TARGET_FPU && ! TARGET_V9"
+ "@
+ fnegs\\t%0, %0
+ #"
+ [(set_attr "type" "fpmove")
+ (set_attr "length" "1,2")])
+
+(define_split
+ [(set (match_operand:DF 0 "register_operand" "")
+ (neg:DF (match_operand:DF 1 "register_operand" "")))]
+ "TARGET_FPU
+ && ! TARGET_V9
+ && reload_completed
+ && sparc_absnegfloat_split_legitimate (operands[0], operands[1])"
+ [(set (match_dup 2) (neg:SF (match_dup 3)))
+ (set (match_dup 4) (match_dup 5))]
+ "if (GET_CODE (operands[0]) == SUBREG)
+ operands[0] = alter_subreg (operands[0]);
+ if (GET_CODE (operands[1]) == SUBREG)
+ operands[1] = alter_subreg (operands[1]);
+ operands[2] = gen_rtx_raw_REG (SFmode, REGNO (operands[0]));
+ operands[3] = gen_rtx_raw_REG (SFmode, REGNO (operands[1]));
+ operands[4] = gen_rtx_raw_REG (SFmode, REGNO (operands[0]) + 1);
+ operands[5] = gen_rtx_raw_REG (SFmode, REGNO (operands[1]) + 1);")
+
+(define_insn "*negdf2_v9"
+ [(set (match_operand:DF 0 "register_operand" "=e")
+ (neg:DF (match_operand:DF 1 "register_operand" "e")))]
+ "TARGET_FPU && TARGET_V9"
+ "fnegd\\t%1, %0"
+ [(set_attr "type" "fpmove")
+ (set_attr "length" "1")])
(define_insn "negsf2"
[(set (match_operand:SF 0 "register_operand" "=f")
(neg:SF (match_operand:SF 1 "register_operand" "f")))]
"TARGET_FPU"
- "fnegs %1,%0"
- [(set_attr "type" "fp")])
+ "fnegs\\t%1, %0"
+ [(set_attr "type" "fpmove")
+ (set_attr "length" "1")])
+
+(define_expand "abstf2"
+ [(set (match_operand:TF 0 "register_operand" "")
+ (abs:TF (match_operand:TF 1 "register_operand" "")))]
+ "TARGET_FPU"
+ "")
-(define_insn "abstf2"
+(define_insn "*abstf2_notv9"
[(set (match_operand:TF 0 "register_operand" "=e,e")
(abs:TF (match_operand:TF 1 "register_operand" "0,e")))]
; We don't use quad float insns here so we don't need TARGET_HARD_QUAD.
+ "TARGET_FPU && ! TARGET_V9"
+ "@
+ fabss\\t%0, %0
+ #"
+ [(set_attr "type" "fpmove")
+ (set_attr "length" "1,2")])
+
+(define_split
+ [(set (match_operand:TF 0 "register_operand" "=e,e")
+ (abs:TF (match_operand:TF 1 "register_operand" "0,e")))]
+ "TARGET_FPU
+ && ! TARGET_V9
+ && reload_completed
+ && sparc_absnegfloat_split_legitimate (operands[0], operands[1])"
+ [(set (match_dup 2) (abs:SF (match_dup 3)))
+ (set (match_dup 4) (match_dup 5))
+ (set (match_dup 6) (match_dup 7))]
+ "if (GET_CODE (operands[0]) == SUBREG)
+ operands[0] = alter_subreg (operands[0]);
+ if (GET_CODE (operands[1]) == SUBREG)
+ operands[1] = alter_subreg (operands[1]);
+ operands[2] = gen_rtx_raw_REG (SFmode, REGNO (operands[0]));
+ operands[3] = gen_rtx_raw_REG (SFmode, REGNO (operands[1]));
+ operands[4] = gen_rtx_raw_REG (SFmode, REGNO (operands[0]) + 1);
+ operands[5] = gen_rtx_raw_REG (SFmode, REGNO (operands[1]) + 1);
+ operands[6] = gen_rtx_raw_REG (DFmode, REGNO (operands[0]) + 2);
+ operands[7] = gen_rtx_raw_REG (DFmode, REGNO (operands[1]) + 2);")
+
+(define_insn "*abstf2_hq_v9"
+ [(set (match_operand:TF 0 "register_operand" "=e,e")
+ (abs:TF (match_operand:TF 1 "register_operand" "0,e")))]
+ "TARGET_FPU && TARGET_V9 && TARGET_HARD_QUAD"
+ "@
+ fabsd\\t%0, %0
+ fabsq\\t%1, %0"
+ [(set_attr "type" "fpmove")
+ (set_attr "length" "1")])
+
+(define_insn "*abstf2_v9"
+ [(set (match_operand:TF 0 "register_operand" "=e,e")
+ (abs:TF (match_operand:TF 1 "register_operand" "0,e")))]
+ "TARGET_FPU && TARGET_V9 && !TARGET_HARD_QUAD"
+ "@
+ fabsd\\t%0, %0
+ #"
+ [(set_attr "type" "fpmove")
+ (set_attr "length" "1,2")])
+
+(define_split
+ [(set (match_operand:TF 0 "register_operand" "=e,e")
+ (abs:TF (match_operand:TF 1 "register_operand" "0,e")))]
+ "TARGET_FPU
+ && TARGET_V9
+ && reload_completed
+ && sparc_absnegfloat_split_legitimate (operands[0], operands[1])"
+ [(set (match_dup 2) (abs:DF (match_dup 3)))
+ (set (match_dup 4) (match_dup 5))]
+ "if (GET_CODE (operands[0]) == SUBREG)
+ operands[0] = alter_subreg (operands[0]);
+ if (GET_CODE (operands[1]) == SUBREG)
+ operands[1] = alter_subreg (operands[1]);
+ operands[2] = gen_rtx_raw_REG (DFmode, REGNO (operands[0]));
+ operands[3] = gen_rtx_raw_REG (DFmode, REGNO (operands[1]));
+ operands[4] = gen_rtx_raw_REG (DFmode, REGNO (operands[0]) + 2);
+ operands[5] = gen_rtx_raw_REG (DFmode, REGNO (operands[1]) + 2);")
+
+(define_expand "absdf2"
+ [(set (match_operand:DF 0 "register_operand" "")
+ (abs:DF (match_operand:DF 1 "register_operand" "")))]
"TARGET_FPU"
- "*
-{
- /* v9: can't use fabss, won't work with upper regs. */
- if (which_alternative == 0)
- return TARGET_V9 ? \"fabsd %0,%0\" : \"fabss %0,%0\";
- else
- return TARGET_V9 ? \"fabsd %1,%0\;fmovd %S1,%S0\"
- : \"fabss %1,%0\;fmovs %R1,%R0\;fmovs %S1,%S0\;fmovs %T1,%T0\";
-}"
- [(set_attr "type" "fp")
- (set_attr_alternative "length"
- [(const_int 1)
- (if_then_else (eq_attr "isa" "v9") (const_int 2) (const_int 4))])])
+ "")
-(define_insn "absdf2"
+(define_insn "*absdf2_notv9"
[(set (match_operand:DF 0 "register_operand" "=e,e")
(abs:DF (match_operand:DF 1 "register_operand" "0,e")))]
- "TARGET_FPU"
- "*
-{
- if (TARGET_V9)
- return \"fabsd %1,%0\";
- else if (which_alternative == 0)
- return \"fabss %0,%0\";
- else
- return \"fabss %1,%0\;fmovs %R1,%R0\";
-}"
- [(set_attr "type" "fp")
- (set_attr_alternative "length"
- [(const_int 1)
- (if_then_else (eq_attr "isa" "v9") (const_int 1) (const_int 2))])])
+ "TARGET_FPU && ! TARGET_V9"
+ "@
+ fabss\\t%0, %0
+ #"
+ [(set_attr "type" "fpmove")
+ (set_attr "length" "1,2")])
+
+(define_split
+ [(set (match_operand:DF 0 "register_operand" "=e,e")
+ (abs:DF (match_operand:DF 1 "register_operand" "0,e")))]
+ "TARGET_FPU
+ && ! TARGET_V9
+ && reload_completed
+ && sparc_absnegfloat_split_legitimate (operands[0], operands[1])"
+ [(set (match_dup 2) (abs:SF (match_dup 3)))
+ (set (match_dup 4) (match_dup 5))]
+ "if (GET_CODE (operands[0]) == SUBREG)
+ operands[0] = alter_subreg (operands[0]);
+ if (GET_CODE (operands[1]) == SUBREG)
+ operands[1] = alter_subreg (operands[1]);
+ operands[2] = gen_rtx_raw_REG (SFmode, REGNO (operands[0]));
+ operands[3] = gen_rtx_raw_REG (SFmode, REGNO (operands[1]));
+ operands[4] = gen_rtx_raw_REG (SFmode, REGNO (operands[0]) + 1);
+ operands[5] = gen_rtx_raw_REG (SFmode, REGNO (operands[1]) + 1);")
+
+(define_insn "*absdf2_v9"
+ [(set (match_operand:DF 0 "register_operand" "=e")
+ (abs:DF (match_operand:DF 1 "register_operand" "e")))]
+ "TARGET_FPU && TARGET_V9"
+ "fabsd\\t%1, %0"
+ [(set_attr "type" "fpmove")
+ (set_attr "length" "1")])
(define_insn "abssf2"
[(set (match_operand:SF 0 "register_operand" "=f")
(abs:SF (match_operand:SF 1 "register_operand" "f")))]
"TARGET_FPU"
- "fabss %1,%0"
- [(set_attr "type" "fp")])
+ "fabss\\t%1, %0"
+ [(set_attr "type" "fpmove")
+ (set_attr "length" "1")])
(define_insn "sqrttf2"
[(set (match_operand:TF 0 "register_operand" "=e")
(sqrt:TF (match_operand:TF 1 "register_operand" "e")))]
"TARGET_FPU && TARGET_HARD_QUAD"
- "fsqrtq %1,%0"
- [(set_attr "type" "fpsqrt")])
+ "fsqrtq\\t%1, %0"
+ [(set_attr "type" "fpsqrtd")
+ (set_attr "length" "1")])
(define_insn "sqrtdf2"
[(set (match_operand:DF 0 "register_operand" "=e")
(sqrt:DF (match_operand:DF 1 "register_operand" "e")))]
"TARGET_FPU"
- "fsqrtd %1,%0"
- [(set_attr "type" "fpsqrt")])
+ "fsqrtd\\t%1, %0"
+ [(set_attr "type" "fpsqrtd")
+ (set_attr "length" "1")])
(define_insn "sqrtsf2"
[(set (match_operand:SF 0 "register_operand" "=f")
(sqrt:SF (match_operand:SF 1 "register_operand" "f")))]
"TARGET_FPU"
- "fsqrts %1,%0"
- [(set_attr "type" "fpsqrt")])
+ "fsqrts\\t%1, %0"
+ [(set_attr "type" "fpsqrts")
+ (set_attr "length" "1")])
\f
;;- arithmetic shift instructions
"*
{
if (GET_CODE (operands[2]) == CONST_INT
- && (unsigned) INTVAL (operands[2]) > 31)
+ && (unsigned HOST_WIDE_INT) INTVAL (operands[2]) > 31)
operands[2] = GEN_INT (INTVAL (operands[2]) & 0x1f);
- return \"sll %1,%2,%0\";
+ return \"sll\\t%1, %2, %0\";
}"
- [(set_attr "type" "shift")])
+ [(set_attr "type" "shift")
+ (set_attr "length" "1")])
+
+;; We special case multiplication by two, as add can be done
+;; in both ALUs, while shift only in IEU0 on UltraSPARC.
+(define_insn "*ashlsi3_const1"
+ [(set (match_operand:SI 0 "register_operand" "=r")
+ (ashift:SI (match_operand:SI 1 "register_operand" "r")
+ (const_int 1)))]
+ ""
+ "add\\t%1, %1, %0"
+ [(set_attr "type" "binary")
+ (set_attr "length" "1")])
+
+(define_expand "ashldi3"
+ [(set (match_operand:DI 0 "register_operand" "=r")
+ (ashift:DI (match_operand:DI 1 "register_operand" "r")
+ (match_operand:SI 2 "arith_operand" "rI")))]
+ "TARGET_ARCH64 || TARGET_V8PLUS"
+ "
+{
+ if (! TARGET_ARCH64)
+ {
+ if (GET_CODE (operands[2]) == CONST_INT)
+ FAIL;
+ emit_insn (gen_ashldi3_v8plus (operands[0], operands[1], operands[2]));
+ DONE;
+ }
+}")
+
+;; We special case multiplication by two, as add can be done
+;; in both ALUs, while shift only in IEU0 on UltraSPARC.
+(define_insn "*ashldi3_const1"
+ [(set (match_operand:DI 0 "register_operand" "=r")
+ (ashift:DI (match_operand:DI 1 "register_operand" "r")
+ (const_int 1)))]
+ "TARGET_ARCH64"
+ "add\\t%1, %1, %0"
+ [(set_attr "type" "binary")
+ (set_attr "length" "1")])
-(define_insn "ashldi3"
+(define_insn "*ashldi3_sp64"
[(set (match_operand:DI 0 "register_operand" "=r")
(ashift:DI (match_operand:DI 1 "register_operand" "r")
(match_operand:SI 2 "arith_operand" "rI")))]
"*
{
if (GET_CODE (operands[2]) == CONST_INT
- && (unsigned) INTVAL (operands[2]) > 63)
+ && (unsigned HOST_WIDE_INT) INTVAL (operands[2]) > 63)
operands[2] = GEN_INT (INTVAL (operands[2]) & 0x3f);
- return \"sllx %1,%2,%0\";
-}")
+ return \"sllx\\t%1, %2, %0\";
+}"
+ [(set_attr "type" "shift")
+ (set_attr "length" "1")])
+
+;; XXX UGH!
+(define_insn "ashldi3_v8plus"
+ [(set (match_operand:DI 0 "register_operand" "=&h,&h,r")
+ (ashift:DI (match_operand:DI 1 "arith_operand" "rI,0,rI")
+ (match_operand:SI 2 "arith_operand" "rI,rI,rI")))
+ (clobber (match_scratch:SI 3 "=X,X,&h"))]
+ "TARGET_V8PLUS"
+ "*return sparc_v8plus_shift (operands, insn, \"sllx\");"
+ [(set_attr "length" "5,5,6")])
+
+;; Optimize (1LL<<x)-1
+;; XXX this also needs to be fixed to handle equal subregs
+;; XXX first before we could re-enable it.
+(define_insn ""
+ [(set (match_operand:DI 0 "register_operand" "=h")
+ (plus:DI (ashift:DI (const_int 1)
+ (match_operand:SI 2 "arith_operand" "rI"))
+ (const_int -1)))]
+ "0 && TARGET_V8PLUS"
+ "*
+{
+ if (GET_CODE (operands[2]) == REG && REGNO (operands[2]) == REGNO (operands[0]))
+ return \"mov 1,%L0\;sllx %L0,%2,%L0\;sub %L0,1,%L0\;srlx %L0,32,%H0\";
+ return \"mov 1,%H0\;sllx %H0,%2,%L0\;sub %L0,1,%L0\;srlx %L0,32,%H0\";
+}"
+ [(set_attr "length" "4")])
(define_insn "*cmp_cc_ashift_1"
[(set (reg:CC_NOOV 100)
(compare:CC_NOOV (ashift:SI (match_operand:SI 0 "register_operand" "r")
(const_int 1))
(const_int 0)))]
- ""
- "addcc %0,%0,%%g0"
- [(set_attr "type" "compare")])
+ "! TARGET_LIVE_G0"
+ "addcc\\t%0, %0, %%g0"
+ [(set_attr "type" "compare")
+ (set_attr "length" "1")])
(define_insn "*cmp_cc_set_ashift_1"
[(set (reg:CC_NOOV 100)
(set (match_operand:SI 0 "register_operand" "=r")
(ashift:SI (match_dup 1) (const_int 1)))]
""
- "addcc %1,%1,%0")
+ "addcc\\t%1, %1, %0"
+ [(set_attr "type" "compare")
+ (set_attr "length" "1")])
(define_insn "ashrsi3"
[(set (match_operand:SI 0 "register_operand" "=r")
"*
{
if (GET_CODE (operands[2]) == CONST_INT
- && (unsigned) INTVAL (operands[2]) > 31)
+ && (unsigned HOST_WIDE_INT) INTVAL (operands[2]) > 31)
operands[2] = GEN_INT (INTVAL (operands[2]) & 0x1f);
- return \"sra %1,%2,%0\";
+ return \"sra\\t%1, %2, %0\";
+}"
+ [(set_attr "type" "shift")
+ (set_attr "length" "1")])
+
+(define_insn "*ashrsi3_extend"
+ [(set (match_operand:DI 0 "register_operand" "=r")
+ (sign_extend:DI (ashiftrt:SI (match_operand:SI 1 "register_operand" "r")
+ (match_operand:SI 2 "arith_operand" "r"))))]
+ "TARGET_ARCH64"
+ "sra\\t%1, %2, %0"
+ [(set_attr "type" "shift")
+ (set_attr "length" "1")])
+
+;; This handles the case as above, but with constant shift instead of
+;; register. Combiner "simplifies" it for us a little bit though.
+(define_insn "*ashrsi3_extend2"
+ [(set (match_operand:DI 0 "register_operand" "=r")
+ (ashiftrt:DI (ashift:DI (subreg:DI (match_operand:SI 1 "register_operand" "r") 0)
+ (const_int 32))
+ (match_operand:SI 2 "small_int_or_double" "n")))]
+ "TARGET_ARCH64
+ && ((GET_CODE (operands[2]) == CONST_INT
+ && INTVAL (operands[2]) >= 32 && INTVAL (operands[2]) < 64)
+ || (GET_CODE (operands[2]) == CONST_DOUBLE
+ && !CONST_DOUBLE_HIGH (operands[2])
+ && CONST_DOUBLE_LOW (operands[2]) >= 32
+ && CONST_DOUBLE_LOW (operands[2]) < 64))"
+ "*
+{
+ operands[2] = GEN_INT (INTVAL (operands[2]) - 32);
+
+ return \"sra\\t%1, %2, %0\";
}"
- [(set_attr "type" "shift")])
+ [(set_attr "type" "shift")
+ (set_attr "length" "1")])
+
+(define_expand "ashrdi3"
+ [(set (match_operand:DI 0 "register_operand" "=r")
+ (ashiftrt:DI (match_operand:DI 1 "register_operand" "r")
+ (match_operand:SI 2 "arith_operand" "rI")))]
+ "TARGET_ARCH64 || TARGET_V8PLUS"
+ "
+{
+ if (! TARGET_ARCH64)
+ {
+ if (GET_CODE (operands[2]) == CONST_INT)
+ FAIL; /* prefer generic code in this case */
+ emit_insn (gen_ashrdi3_v8plus (operands[0], operands[1], operands[2]));
+ DONE;
+ }
+}")
-(define_insn "ashrdi3"
+(define_insn ""
[(set (match_operand:DI 0 "register_operand" "=r")
(ashiftrt:DI (match_operand:DI 1 "register_operand" "r")
(match_operand:SI 2 "arith_operand" "rI")))]
"*
{
if (GET_CODE (operands[2]) == CONST_INT
- && (unsigned) INTVAL (operands[2]) > 63)
+ && (unsigned HOST_WIDE_INT) INTVAL (operands[2]) > 63)
operands[2] = GEN_INT (INTVAL (operands[2]) & 0x3f);
- return \"srax %1,%2,%0\";
-}")
+ return \"srax\\t%1, %2, %0\";
+}"
+ [(set_attr "type" "shift")
+ (set_attr "length" "1")])
+
+;; XXX
+(define_insn "ashrdi3_v8plus"
+ [(set (match_operand:DI 0 "register_operand" "=&h,&h,r")
+ (ashiftrt:DI (match_operand:DI 1 "arith_operand" "rI,0,rI")
+ (match_operand:SI 2 "arith_operand" "rI,rI,rI")))
+ (clobber (match_scratch:SI 3 "=X,X,&h"))]
+ "TARGET_V8PLUS"
+ "*return sparc_v8plus_shift (operands, insn, \"srax\");"
+ [(set_attr "length" "5,5,6")])
(define_insn "lshrsi3"
[(set (match_operand:SI 0 "register_operand" "=r")
"*
{
if (GET_CODE (operands[2]) == CONST_INT
- && (unsigned) INTVAL (operands[2]) > 31)
+ && (unsigned HOST_WIDE_INT) INTVAL (operands[2]) > 31)
operands[2] = GEN_INT (INTVAL (operands[2]) & 0x1f);
- return \"srl %1,%2,%0\";
+ return \"srl\\t%1, %2, %0\";
+}"
+ [(set_attr "type" "shift")
+ (set_attr "length" "1")])
+
+;; This handles the case where
+;; (zero_extend:DI (lshiftrt:SI (match_operand:SI) (match_operand:SI))),
+;; but combiner "simplifies" it for us.
+(define_insn "*lshrsi3_extend"
+ [(set (match_operand:DI 0 "register_operand" "=r")
+ (and:DI (subreg:DI (lshiftrt:SI (match_operand:SI 1 "register_operand" "r")
+ (match_operand:SI 2 "arith_operand" "r")) 0)
+ (match_operand 3 "" "")))]
+ "TARGET_ARCH64
+ && ((GET_CODE (operands[3]) == CONST_DOUBLE
+ && CONST_DOUBLE_HIGH (operands[3]) == 0
+ && CONST_DOUBLE_LOW (operands[3]) == 0xffffffff)
+#if HOST_BITS_PER_WIDE_INT >= 64
+ || (GET_CODE (operands[3]) == CONST_INT
+ && (unsigned HOST_WIDE_INT) INTVAL (operands[3]) == 0xffffffff)
+#endif
+ )"
+ "srl\\t%1, %2, %0"
+ [(set_attr "type" "shift")
+ (set_attr "length" "1")])
+
+;; This handles the case where
+;; (lshiftrt:DI (zero_extend:DI (match_operand:SI)) (const_int >=0 < 32))
+;; but combiner "simplifies" it for us.
+(define_insn "*lshrsi3_extend2"
+ [(set (match_operand:DI 0 "register_operand" "=r")
+ (zero_extract:DI (subreg:DI (match_operand:SI 1 "register_operand" "r") 0)
+ (match_operand 2 "small_int_or_double" "n")
+ (const_int 32)))]
+ "TARGET_ARCH64
+ && ((GET_CODE (operands[2]) == CONST_INT
+ && (unsigned HOST_WIDE_INT) INTVAL (operands[2]) < 32)
+ || (GET_CODE (operands[2]) == CONST_DOUBLE
+ && CONST_DOUBLE_HIGH (operands[2]) == 0
+ && (unsigned HOST_WIDE_INT) CONST_DOUBLE_LOW (operands[2]) < 32))"
+ "*
+{
+ operands[2] = GEN_INT (32 - INTVAL (operands[2]));
+
+ return \"srl\\t%1, %2, %0\";
}"
- [(set_attr "type" "shift")])
+ [(set_attr "type" "shift")
+ (set_attr "length" "1")])
+
+(define_expand "lshrdi3"
+ [(set (match_operand:DI 0 "register_operand" "=r")
+ (lshiftrt:DI (match_operand:DI 1 "register_operand" "r")
+ (match_operand:SI 2 "arith_operand" "rI")))]
+ "TARGET_ARCH64 || TARGET_V8PLUS"
+ "
+{
+ if (! TARGET_ARCH64)
+ {
+ if (GET_CODE (operands[2]) == CONST_INT)
+ FAIL;
+ emit_insn (gen_lshrdi3_v8plus (operands[0], operands[1], operands[2]));
+ DONE;
+ }
+}")
-(define_insn "lshrdi3"
+(define_insn ""
[(set (match_operand:DI 0 "register_operand" "=r")
(lshiftrt:DI (match_operand:DI 1 "register_operand" "r")
(match_operand:SI 2 "arith_operand" "rI")))]
"*
{
if (GET_CODE (operands[2]) == CONST_INT
- && (unsigned) INTVAL (operands[2]) > 63)
+ && (unsigned HOST_WIDE_INT) INTVAL (operands[2]) > 63)
operands[2] = GEN_INT (INTVAL (operands[2]) & 0x3f);
- return \"srlx %1,%2,%0\";
-}")
+ return \"srlx\\t%1, %2, %0\";
+}"
+ [(set_attr "type" "shift")
+ (set_attr "length" "1")])
+
+;; XXX
+(define_insn "lshrdi3_v8plus"
+ [(set (match_operand:DI 0 "register_operand" "=&h,&h,r")
+ (lshiftrt:DI (match_operand:DI 1 "arith_operand" "rI,0,rI")
+ (match_operand:SI 2 "arith_operand" "rI,rI,rI")))
+ (clobber (match_scratch:SI 3 "=X,X,&h"))]
+ "TARGET_V8PLUS"
+ "*return sparc_v8plus_shift (operands, insn, \"srlx\");"
+ [(set_attr "length" "5,5,6")])
+
+(define_insn ""
+ [(set (match_operand:SI 0 "register_operand" "=r")
+ (ashiftrt:SI (subreg:SI (lshiftrt:DI (match_operand:DI 1 "register_operand" "r")
+ (const_int 32)) 0)
+ (match_operand:SI 2 "small_int_or_double" "n")))]
+ "TARGET_ARCH64
+ && ((GET_CODE (operands[2]) == CONST_INT
+ && (unsigned HOST_WIDE_INT) INTVAL (operands[2]) < 32)
+ || (GET_CODE (operands[2]) == CONST_DOUBLE
+ && !CONST_DOUBLE_HIGH (operands[2])
+ && (unsigned HOST_WIDE_INT) CONST_DOUBLE_LOW (operands[2]) < 32))"
+ "*
+{
+ operands[2] = GEN_INT (INTVAL (operands[2]) + 32);
+
+ return \"srax\\t%1, %2, %0\";
+}"
+ [(set_attr "type" "shift")
+ (set_attr "length" "1")])
+
+(define_insn ""
+ [(set (match_operand:SI 0 "register_operand" "=r")
+ (lshiftrt:SI (subreg:SI (ashiftrt:DI (match_operand:DI 1 "register_operand" "r")
+ (const_int 32)) 0)
+ (match_operand:SI 2 "small_int_or_double" "n")))]
+ "TARGET_ARCH64
+ && ((GET_CODE (operands[2]) == CONST_INT
+ && (unsigned HOST_WIDE_INT) INTVAL (operands[2]) < 32)
+ || (GET_CODE (operands[2]) == CONST_DOUBLE
+ && !CONST_DOUBLE_HIGH (operands[2])
+ && (unsigned HOST_WIDE_INT) CONST_DOUBLE_LOW (operands[2]) < 32))"
+ "*
+{
+ operands[2] = GEN_INT (INTVAL (operands[2]) + 32);
+
+ return \"srlx\\t%1, %2, %0\";
+}"
+ [(set_attr "type" "shift")
+ (set_attr "length" "1")])
+
+(define_insn ""
+ [(set (match_operand:SI 0 "register_operand" "=r")
+ (ashiftrt:SI (subreg:SI (ashiftrt:DI (match_operand:DI 1 "register_operand" "r")
+ (match_operand:SI 2 "small_int_or_double" "n")) 0)
+ (match_operand:SI 3 "small_int_or_double" "n")))]
+ "TARGET_ARCH64
+ && GET_CODE (operands[2]) == CONST_INT && GET_CODE (operands[3]) == CONST_INT
+ && (unsigned HOST_WIDE_INT) INTVAL (operands[2]) >= 32
+ && (unsigned HOST_WIDE_INT) INTVAL (operands[3]) < 32
+ && (unsigned HOST_WIDE_INT) (INTVAL (operands[2]) + INTVAL (operands[3])) < 64"
+ "*
+{
+ operands[2] = GEN_INT (INTVAL (operands[2]) + INTVAL (operands[3]));
+
+ return \"srax\\t%1, %2, %0\";
+}"
+ [(set_attr "type" "shift")
+ (set_attr "length" "1")])
+
+(define_insn ""
+ [(set (match_operand:SI 0 "register_operand" "=r")
+ (lshiftrt:SI (subreg:SI (lshiftrt:DI (match_operand:DI 1 "register_operand" "r")
+ (match_operand:SI 2 "small_int_or_double" "n")) 0)
+ (match_operand:SI 3 "small_int_or_double" "n")))]
+ "TARGET_ARCH64
+ && GET_CODE (operands[2]) == CONST_INT && GET_CODE (operands[3]) == CONST_INT
+ && (unsigned HOST_WIDE_INT) INTVAL (operands[2]) >= 32
+ && (unsigned HOST_WIDE_INT) INTVAL (operands[3]) < 32
+ && (unsigned HOST_WIDE_INT) (INTVAL (operands[2]) + INTVAL (operands[3])) < 64"
+ "*
+{
+ operands[2] = GEN_INT (INTVAL (operands[2]) + INTVAL (operands[3]));
+
+ return \"srlx\\t%1, %2, %0\";
+}"
+ [(set_attr "type" "shift")
+ (set_attr "length" "1")])
\f
;; Unconditional and other jump instructions
;; On the Sparc, by setting the annul bit on an unconditional branch, the
(define_insn "jump"
[(set (pc) (label_ref (match_operand 0 "" "")))]
""
- "b%* %l0%("
+ "*
+{
+ /* TurboSparc is reported to have problems with
+ with
+ foo: b,a foo
+ i.e. an empty loop with the annul bit set. The workaround is to use
+ foo: b foo; nop
+ instead. */
+
+ if (! TARGET_V9 && flag_delayed_branch
+ && (insn_addresses[INSN_UID (operands[0])]
+ == insn_addresses[INSN_UID (insn)]))
+ return \"b\\t%l0%#\";
+ else
+ return TARGET_V9 ? \"ba,pt%*\\t%%xcc, %l0%(\" : \"b%*\\t%l0%(\";
+}"
[(set_attr "type" "uncond_branch")])
(define_expand "tablejump"
""
"
{
- if (GET_MODE (operands[0]) != Pmode)
+ if (GET_MODE (operands[0]) != CASE_VECTOR_MODE)
abort ();
- /* We need to use the PC value in %o7 that was set up when the address
- of the label was loaded into a register, so we need different RTL. */
+ /* In pic mode, our address differences are against the base of the
+ table. Add that base value back in; CSE ought to be able to combine
+ the two address loads. */
if (flag_pic)
- {
- if (!TARGET_PTR64)
- emit_jump_insn (gen_pic_tablejump_32 (operands[0], operands[1]));
- else
- emit_jump_insn (gen_pic_tablejump_64 (operands[0], operands[1]));
- DONE;
+ {
+ rtx tmp, tmp2;
+ tmp = gen_rtx_LABEL_REF (Pmode, operands[1]);
+ tmp2 = operands[0];
+ if (CASE_VECTOR_MODE != Pmode)
+ tmp2 = gen_rtx_SIGN_EXTEND (Pmode, tmp2);
+ tmp = gen_rtx_PLUS (Pmode, tmp2, tmp);
+ operands[0] = memory_address (Pmode, tmp);
}
}")
-(define_insn "pic_tablejump_32"
- [(set (pc) (match_operand:SI 0 "register_operand" "r"))
- (use (label_ref (match_operand 1 "" "")))
- (use (reg:SI 15))]
- "! TARGET_PTR64"
- "jmp %%o7+%0%#"
- [(set_attr "type" "uncond_branch")])
-
-(define_insn "pic_tablejump_64"
- [(set (pc) (match_operand:DI 0 "register_operand" "r"))
- (use (label_ref (match_operand 1 "" "")))
- (use (reg:DI 15))]
- "TARGET_PTR64"
- "jmp %%o7+%0%#"
- [(set_attr "type" "uncond_branch")])
-
(define_insn "*tablejump_sp32"
[(set (pc) (match_operand:SI 0 "address_operand" "p"))
(use (label_ref (match_operand 1 "" "")))]
"! TARGET_PTR64"
- "jmp %a0%#"
+ "jmp\\t%a0%#"
[(set_attr "type" "uncond_branch")])
(define_insn "*tablejump_sp64"
[(set (pc) (match_operand:DI 0 "address_operand" "p"))
(use (label_ref (match_operand 1 "" "")))]
"TARGET_PTR64"
- "jmp %a0%#"
+ "jmp\\t%a0%#"
[(set_attr "type" "uncond_branch")])
;; This pattern recognizes the "instruction" that appears in
;(define_insn "*unimp_insn"
; [(match_operand:SI 0 "immediate_operand" "")]
; "GET_CODE (operands[0]) == CONST_INT && INTVAL (operands[0]) > 0"
-; "unimp %0"
+; "unimp\\t%0"
; [(set_attr "type" "marker")])
;;- jump to subroutine
if (GET_CODE (XEXP (operands[0], 0)) == LABEL_REF)
{
/* This is really a PIC sequence. We want to represent
- it as a funny jump so it's delay slots can be filled.
+ it as a funny jump so its delay slots can be filled.
??? But if this really *is* a CALL, will not it clobber the
call-clobbered registers? We lose this if it is a JUMP_INSN.
if (! TARGET_ARCH64 && INTVAL (operands[3]) != 0)
emit_jump_insn
- (gen_rtx (PARALLEL, VOIDmode,
- gen_rtvec (3,
- gen_rtx (SET, VOIDmode, pc_rtx,
- XEXP (operands[0], 0)),
- GEN_INT (INTVAL (operands[3]) & 0xfff),
- gen_rtx (CLOBBER, VOIDmode,
- gen_rtx (REG, Pmode, 15)))));
+ (gen_rtx_PARALLEL
+ (VOIDmode,
+ gen_rtvec (3,
+ gen_rtx_SET (VOIDmode, pc_rtx, XEXP (operands[0], 0)),
+ operands[3],
+ gen_rtx_CLOBBER (VOIDmode, gen_rtx_REG (Pmode, 15)))));
else
emit_jump_insn
- (gen_rtx (PARALLEL, VOIDmode,
- gen_rtvec (2,
- gen_rtx (SET, VOIDmode, pc_rtx,
- XEXP (operands[0], 0)),
- gen_rtx (CLOBBER, VOIDmode,
- gen_rtx (REG, Pmode, 15)))));
+ (gen_rtx_PARALLEL
+ (VOIDmode,
+ gen_rtvec (2,
+ gen_rtx_SET (VOIDmode, pc_rtx, XEXP (operands[0], 0)),
+ gen_rtx_CLOBBER (VOIDmode, gen_rtx_REG (Pmode, 15)))));
goto finish_call;
}
means 6 on the sparc. */
#if 0
if (operands[2])
- nregs_rtx = gen_rtx (CONST_INT, VOIDmode, REGNO (operands[2]) - 8);
+ nregs_rtx = GEN_INT (REGNO (operands[2]) - 8);
else
- nregs_rtx = gen_rtx (CONST_INT, VOIDmode, 6);
+ nregs_rtx = GEN_INT (6);
#else
nregs_rtx = const0_rtx;
#endif
if (! TARGET_ARCH64 && INTVAL (operands[3]) != 0)
emit_call_insn
- (gen_rtx (PARALLEL, VOIDmode,
- gen_rtvec (3, gen_rtx (CALL, VOIDmode, fn_rtx, nregs_rtx),
- GEN_INT (INTVAL (operands[3]) & 0xfff),
- gen_rtx (CLOBBER, VOIDmode,
- gen_rtx (REG, Pmode, 15)))));
+ (gen_rtx_PARALLEL
+ (VOIDmode,
+ gen_rtvec (3, gen_rtx_CALL (VOIDmode, fn_rtx, nregs_rtx),
+ operands[3],
+ gen_rtx_CLOBBER (VOIDmode, gen_rtx_REG (Pmode, 15)))));
else
emit_call_insn
- (gen_rtx (PARALLEL, VOIDmode,
- gen_rtvec (2, gen_rtx (CALL, VOIDmode, fn_rtx, nregs_rtx),
- gen_rtx (CLOBBER, VOIDmode,
- gen_rtx (REG, Pmode, 15)))));
+ (gen_rtx_PARALLEL
+ (VOIDmode,
+ gen_rtvec (2, gen_rtx_CALL (VOIDmode, fn_rtx, nregs_rtx),
+ gen_rtx_CLOBBER (VOIDmode, gen_rtx_REG (Pmode, 15)))));
finish_call:
#if 0
(clobber (reg:SI 15))]
;;- Do not use operand 1 for most machines.
"! TARGET_PTR64"
- "call %a0,%1%#"
+ "call\\t%a0, %1%#"
[(set_attr "type" "call")])
(define_insn "*call_symbolic_sp32"
(clobber (reg:SI 15))]
;;- Do not use operand 1 for most machines.
"! TARGET_PTR64"
- "call %a0,%1%#"
+ "call\\t%a0, %1%#"
[(set_attr "type" "call")])
(define_insn "*call_address_sp64"
(clobber (reg:DI 15))]
;;- Do not use operand 1 for most machines.
"TARGET_PTR64"
- "call %a0,%1%#"
+ "call\\t%a0, %1%#"
[(set_attr "type" "call")])
(define_insn "*call_symbolic_sp64"
(clobber (reg:DI 15))]
;;- Do not use operand 1 for most machines.
"TARGET_PTR64"
- "call %a0,%1%#"
+ "call\\t%a0, %1%#"
[(set_attr "type" "call")])
;; This is a call that wants a structure value.
(clobber (reg:SI 15))]
;;- Do not use operand 1 for most machines.
"! TARGET_ARCH64 && GET_CODE (operands[2]) == CONST_INT && INTVAL (operands[2]) >= 0"
- "call %a0,%1\;nop\;unimp %2"
+ "call\\t%a0, %1\\n\\tnop\\n\\tunimp\\t%2"
[(set_attr "type" "call_no_delay_slot")])
;; This is a call that wants a structure value.
(clobber (reg:SI 15))]
;;- Do not use operand 1 for most machines.
"! TARGET_ARCH64 && GET_CODE (operands[2]) == CONST_INT && INTVAL (operands[2]) >= 0"
- "call %a0,%1\;nop\;unimp %2"
+ "call\\t%a0, %1\\n\\tnop\\n\\tunimp\\t%2"
[(set_attr "type" "call_no_delay_slot")])
;; This is a call that may want a structure value. This is used for
(clobber (reg:SI 15))]
;;- Do not use operand 1 for most machines.
"! TARGET_ARCH64 && GET_CODE (operands[2]) == CONST_INT && INTVAL (operands[2]) < 0"
- "call %a0,%1\;nop\;nop"
+ "call\\t%a0, %1\\n\\tnop\\n\\tnop"
[(set_attr "type" "call_no_delay_slot")])
;; This is a call that wants a structure value.
(clobber (reg:SI 15))]
;;- Do not use operand 1 for most machines.
"! TARGET_ARCH64 && GET_CODE (operands[2]) == CONST_INT && INTVAL (operands[2]) < 0"
- "call %a0,%1\;nop\;nop"
+ "call\\t%a0, %1\\n\\tnop\\n\\tnop"
[(set_attr "type" "call_no_delay_slot")])
(define_expand "call_value"
#if 0
if (operands[3])
- nregs_rtx = gen_rtx (CONST_INT, VOIDmode, REGNO (operands[3]) - 8);
+ nregs_rtx = GEN_INT (REGNO (operands[3]) - 8);
else
- nregs_rtx = gen_rtx (CONST_INT, VOIDmode, 6);
+ nregs_rtx = GEN_INT (6);
#else
nregs_rtx = const0_rtx;
#endif
vec = gen_rtvec (2,
- gen_rtx (SET, VOIDmode, operands[0],
- gen_rtx (CALL, VOIDmode, fn_rtx, nregs_rtx)),
- gen_rtx (CLOBBER, VOIDmode, gen_rtx (REG, Pmode, 15)));
+ gen_rtx_SET (VOIDmode, operands[0],
+ gen_rtx_CALL (VOIDmode, fn_rtx, nregs_rtx)),
+ gen_rtx_CLOBBER (VOIDmode, gen_rtx_REG (Pmode, 15)));
- emit_call_insn (gen_rtx (PARALLEL, VOIDmode, vec));
+ emit_call_insn (gen_rtx_PARALLEL (VOIDmode, vec));
DONE;
}")
(clobber (reg:SI 15))]
;;- Do not use operand 2 for most machines.
"! TARGET_PTR64"
- "call %a1,%2%#"
+ "call\\t%a1, %2%#"
[(set_attr "type" "call")])
(define_insn "*call_value_symbolic_sp32"
(clobber (reg:SI 15))]
;;- Do not use operand 2 for most machines.
"! TARGET_PTR64"
- "call %a1,%2%#"
+ "call\\t%a1, %2%#"
[(set_attr "type" "call")])
(define_insn "*call_value_address_sp64"
- [(set (match_operand 0 "" "=rf")
+ [(set (match_operand 0 "" "")
(call (mem:SI (match_operand:DI 1 "address_operand" "p"))
(match_operand 2 "" "")))
(clobber (reg:DI 15))]
;;- Do not use operand 2 for most machines.
"TARGET_PTR64"
- "call %a1,%2%#"
+ "call\\t%a1, %2%#"
[(set_attr "type" "call")])
(define_insn "*call_value_symbolic_sp64"
- [(set (match_operand 0 "" "=rf")
+ [(set (match_operand 0 "" "")
(call (mem:SI (match_operand:DI 1 "symbolic_operand" "s"))
(match_operand 2 "" "")))
(clobber (reg:DI 15))]
;;- Do not use operand 2 for most machines.
"TARGET_PTR64"
- "call %a1,%2%#"
+ "call\\t%a1, %2%#"
[(set_attr "type" "call")])
(define_expand "untyped_call"
(define_insn "blockage"
[(unspec_volatile [(const_int 0)] 0)]
""
- "")
+ ""
+ [(set_attr "length" "0")])
;; Prepare to return any type including a structure value.
""
"
{
- rtx valreg1 = gen_rtx (REG, DImode, 24);
- rtx valreg2 = gen_rtx (REG, TARGET_ARCH64 ? TFmode : DFmode, 32);
+ rtx valreg1 = gen_rtx_REG (DImode, 24);
+ rtx valreg2 = gen_rtx_REG (TARGET_ARCH64 ? TFmode : DFmode, 32);
rtx result = operands[0];
if (! TARGET_ARCH64)
{
- rtx rtnreg = gen_rtx (REG, SImode, (leaf_function ? 15 : 31));
+ rtx rtnreg = gen_rtx_REG (SImode, (current_function_uses_only_leaf_regs
+ ? 15 : 31));
rtx value = gen_reg_rtx (SImode);
/* Fetch the instruction where we will return to and see if it's an unimp
instruction (the most significant 10 bits will be zero). If so,
update the return address to skip the unimp instruction. */
emit_move_insn (value,
- gen_rtx (MEM, SImode, plus_constant (rtnreg, 8)));
+ gen_rtx_MEM (SImode, plus_constant (rtnreg, 8)));
emit_insn (gen_lshrsi3 (value, value, GEN_INT (22)));
emit_insn (gen_update_return (rtnreg, value));
}
plus_constant (XEXP (result, 0), 8)));
/* Put USE insns before the return. */
- emit_insn (gen_rtx (USE, VOIDmode, valreg1));
- emit_insn (gen_rtx (USE, VOIDmode, valreg2));
+ emit_insn (gen_rtx_USE (VOIDmode, valreg1));
+ emit_insn (gen_rtx_USE (VOIDmode, valreg2));
/* Construct the return. */
expand_null_return ();
(define_insn "update_return"
[(unspec:SI [(match_operand:SI 0 "register_operand" "r")
- (match_operand:SI 1 "register_operand" "r")] 0)]
+ (match_operand:SI 1 "register_operand" "r")] 1)]
"! TARGET_ARCH64"
"cmp %1,0\;be,a .+8\;add %0,4,%0"
[(set_attr "type" "multi")])
(use (reg:SI 31))]
"! TARGET_EPILOGUE"
"* return output_return (operands);"
- [(set_attr "type" "multi")])
+ [(set_attr "type" "return")])
+
+(define_peephole
+ [(set (match_operand:SI 0 "register_operand" "=r")
+ (match_operand:SI 1 "arith_operand" "rI"))
+ (parallel [(return)
+ (use (reg:SI 31))])]
+ "sparc_return_peephole_ok (operands[0], operands[1])"
+ "return\\t%%i7+8\\n\\tmov\\t%Y1, %Y0")
(define_insn "nop"
[(const_int 0)]
""
- "nop")
+ "nop"
+ [(set_attr "type" "ialu")
+ (set_attr "length" "1")])
(define_expand "indirect_jump"
[(set (pc) (match_operand 0 "address_operand" "p"))]
(define_insn "*branch_sp32"
[(set (pc) (match_operand:SI 0 "address_operand" "p"))]
"! TARGET_PTR64"
- "jmp %a0%#"
+ "jmp\\t%a0%#"
[(set_attr "type" "uncond_branch")])
(define_insn "*branch_sp64"
[(set (pc) (match_operand:DI 0 "address_operand" "p"))]
"TARGET_PTR64"
- "jmp %a0%#"
+ "jmp\\t%a0%#"
[(set_attr "type" "uncond_branch")])
;; ??? Doesn't work with -mflat.
""
"
{
+#if 0
+ rtx chain = operands[0];
+#endif
+ rtx fp = operands[1];
+ rtx stack = operands[2];
+ rtx lab = operands[3];
+ rtx labreg;
+
/* Trap instruction to flush all the register windows. */
emit_insn (gen_flush_register_windows ());
- /* Load the fp value for the containing fn into %fp.
- This is needed because operands[2] refers to %fp.
- Virtual register instantiation fails if the virtual %fp isn't set from a
- register. Thus we must copy operands[0] into a register if it isn't
- already one. */
- if (GET_CODE (operands[0]) != REG)
- operands[0] = force_reg (Pmode, operands[0]);
- emit_move_insn (virtual_stack_vars_rtx, operands[0]);
+
+ /* Load the fp value for the containing fn into %fp. This is needed
+ because STACK refers to %fp. Note that virtual register instantiation
+ fails if the virtual %fp isn't set from a register. */
+ if (GET_CODE (fp) != REG)
+ fp = force_reg (Pmode, fp);
+ emit_move_insn (virtual_stack_vars_rtx, fp);
+
/* Find the containing function's current nonlocal goto handler,
which will do any cleanups and then jump to the label. */
- emit_move_insn (gen_rtx (REG, Pmode, 8), operands[1]);
+ labreg = gen_rtx_REG (Pmode, 8);
+ emit_move_insn (labreg, lab);
+
/* Restore %fp from stack pointer value for containing function.
The restore insn that follows will move this to %sp,
and reload the appropriate value into %fp. */
- emit_move_insn (frame_pointer_rtx, operands[2]);
- /* Put in the static chain register the nonlocal label address. */
- emit_move_insn (static_chain_rtx, operands[3]);
+ emit_move_insn (frame_pointer_rtx, stack);
+
/* USE of frame_pointer_rtx added for consistency; not clear if
really needed. */
- emit_insn (gen_rtx (USE, VOIDmode, frame_pointer_rtx));
- emit_insn (gen_rtx (USE, VOIDmode, stack_pointer_rtx));
- emit_insn (gen_rtx (USE, VOIDmode, static_chain_rtx));
+ /*emit_insn (gen_rtx_USE (VOIDmode, frame_pointer_rtx));*/
+ emit_insn (gen_rtx_USE (VOIDmode, stack_pointer_rtx));
+
+#if 0
/* Return, restoring reg window and jumping to goto handler. */
- emit_insn (gen_goto_handler_and_restore ());
+ if (TARGET_V9 && GET_CODE (chain) == CONST_INT
+ && ! (INTVAL (chain) & ~(HOST_WIDE_INT)0xffffffff))
+ {
+ emit_insn (gen_goto_handler_and_restore_v9 (labreg, static_chain_rtx,
+ chain));
+ emit_barrier ();
+ DONE;
+ }
+ /* Put in the static chain register the nonlocal label address. */
+ emit_move_insn (static_chain_rtx, chain);
+#endif
+
+ emit_insn (gen_rtx_USE (VOIDmode, static_chain_rtx));
+ emit_insn (gen_goto_handler_and_restore (labreg));
emit_barrier ();
DONE;
}")
(define_insn "flush_register_windows"
[(unspec_volatile [(const_int 0)] 1)]
""
- ;; ??? Use TARGET_V9 instead?
- "* return TARGET_ARCH64 ? \"flushw\" : \"ta 3\";"
- [(set_attr "type" "misc")])
+ "* return TARGET_V9 ? \"flushw\" : \"ta\\t3\";"
+ [(set_attr "type" "misc")
+ (set_attr "length" "1")])
(define_insn "goto_handler_and_restore"
- [(unspec_volatile [(const_int 0)] 2)
- (use (reg:SI 8))]
- ""
- "jmp %%o0+0\;restore"
+ [(unspec_volatile [(match_operand 0 "register_operand" "=r")] 2)]
+ "GET_MODE (operands[0]) == Pmode"
+ "jmp\\t%0+0\\n\\trestore"
[(set_attr "type" "misc")
(set_attr "length" "2")])
+;;(define_insn "goto_handler_and_restore_v9"
+;; [(unspec_volatile [(match_operand:SI 0 "register_operand" "=r,r")
+;; (match_operand:SI 1 "register_operand" "=r,r")
+;; (match_operand:SI 2 "const_int_operand" "I,n")] 3)]
+;; "TARGET_V9 && ! TARGET_ARCH64"
+;; "@
+;; return\\t%0+0\\n\\tmov\\t%2, %Y1
+;; sethi\\t%%hi(%2), %1\\n\\treturn\\t%0+0\\n\\tor\\t%Y1, %%lo(%2), %Y1"
+;; [(set_attr "type" "misc")
+;; (set_attr "length" "2,3")])
+;;
+;;(define_insn "*goto_handler_and_restore_v9_sp64"
+;; [(unspec_volatile [(match_operand:DI 0 "register_operand" "=r,r")
+;; (match_operand:DI 1 "register_operand" "=r,r")
+;; (match_operand:SI 2 "const_int_operand" "I,n")] 3)]
+;; "TARGET_V9 && TARGET_ARCH64"
+;; "@
+;; return\\t%0+0\\n\\tmov\\t%2, %Y1
+;; sethi\\t%%hi(%2), %1\\n\\treturn\\t%0+0\\n\\tor\\t%Y1, %%lo(%2), %Y1"
+;; [(set_attr "type" "misc")
+;; (set_attr "length" "2,3")])
+
;; Pattern for use after a setjmp to store FP and the return register
;; into the stack area.
emit_insn (gen_setjmp_64 ());
else
emit_insn (gen_setjmp_32 ());
-
DONE;
}")
;; Special pattern for the FLUSH instruction.
+; We do SImode and DImode versions of this to quiet down genrecog's complaints
+; of the define_insn otherwise missing a mode. We make "flush", aka
+; gen_flush, the default one since sparc_initialize_trampoline uses
+; it on SImode mem values.
+
(define_insn "flush"
- [(unspec_volatile [(match_operand 0 "memory_operand" "m")] 3)]
+ [(unspec_volatile [(match_operand:SI 0 "memory_operand" "m")] 3)]
+ ""
+ "* return TARGET_V9 ? \"flush %f0\" : \"iflush %f0\";"
+ [(set_attr "type" "misc")])
+
+(define_insn "flushdi"
+ [(unspec_volatile [(match_operand:DI 0 "memory_operand" "m")] 3)]
""
"* return TARGET_V9 ? \"flush %f0\" : \"iflush %f0\";"
[(set_attr "type" "misc")])
+
\f
;; find first set.
;; zero also differ. It takes at least 7 instructions to get the proper
;; result. Here is an obvious 8 instruction sequence.
+;; XXX
(define_insn "ffssi2"
[(set (match_operand:SI 0 "register_operand" "=&r")
(ffs:SI (match_operand:SI 1 "register_operand" "r")))
;; ??? This should be a define expand, so that the extra instruction have
;; a chance of being optimized away.
-(define_insn "ffsdi2"
- [(set (match_operand:DI 0 "register_operand" "=&r")
- (ffs:DI (match_operand:DI 1 "register_operand" "r")))
- (clobber (match_scratch:DI 2 "=&r"))]
- "TARGET_ARCH64"
- "neg %1,%2\;not %2,%2\;xor %1,%2,%2\;popc %2,%0\;movrz %1,0,%0"
- [(set_attr "type" "multi")
- (set_attr "length" "5")])
-\f
-;; Split up troublesome insns for better scheduling. */
-
-;; The following patterns are straightforward. They can be applied
-;; either before or after register allocation.
-
-(define_split
- [(set (match_operand 0 "splittable_symbolic_memory_operand" "")
- (match_operand 1 "reg_or_0_operand" ""))
- (clobber (match_operand:SI 2 "register_operand" ""))]
- "! flag_pic"
- [(set (match_dup 2) (high:SI (match_dup 3)))
- (set (match_dup 4) (match_dup 1))]
- "
-{
- operands[3] = XEXP (operands[0], 0);
- operands[4] = gen_rtx (MEM, GET_MODE (operands[0]),
- gen_rtx (LO_SUM, SImode, operands[2], operands[3]));
- MEM_IN_STRUCT_P (operands[4]) = MEM_IN_STRUCT_P (operands[0]);
- MEM_VOLATILE_P (operands[4]) = MEM_VOLATILE_P (operands[0]);
- RTX_UNCHANGING_P (operands[4]) = RTX_UNCHANGING_P (operands[0]);
-}")
-
-(define_split
- [(set (match_operand 0 "splittable_immediate_memory_operand" "")
- (match_operand 1 "general_operand" ""))
- (clobber (match_operand:SI 2 "register_operand" ""))]
- "flag_pic"
- [(set (match_dup 3) (match_dup 1))]
- "
-{
- rtx addr = legitimize_pic_address (XEXP (operands[0], 0),
- GET_MODE (operands[0]),
- operands[2]);
- operands[3] = gen_rtx (MEM, GET_MODE (operands[0]), addr);
- MEM_IN_STRUCT_P (operands[3]) = MEM_IN_STRUCT_P (operands[0]);
- MEM_VOLATILE_P (operands[3]) = MEM_VOLATILE_P (operands[0]);
- RTX_UNCHANGING_P (operands[3]) = RTX_UNCHANGING_P (operands[0]);
-}")
-
-(define_split
- [(set (match_operand 0 "register_operand" "")
- (match_operand 1 "splittable_immediate_memory_operand" ""))]
- "flag_pic"
- [(set (match_dup 0) (match_dup 2))]
- "
-{
- rtx addr = legitimize_pic_address (XEXP (operands[1], 0),
- GET_MODE (operands[1]),
- operands[0]);
- operands[2] = gen_rtx (MEM, GET_MODE (operands[1]), addr);
- MEM_IN_STRUCT_P (operands[2]) = MEM_IN_STRUCT_P (operands[1]);
- MEM_VOLATILE_P (operands[2]) = MEM_VOLATILE_P (operands[1]);
- RTX_UNCHANGING_P (operands[2]) = RTX_UNCHANGING_P (operands[1]);
-}")
-
-;; Sign- and Zero-extend operations can have symbolic memory operands.
-
-(define_split
- [(set (match_operand 0 "register_operand" "")
- (match_operator 1 "extend_op" [(match_operand 2 "splittable_immediate_memory_operand" "")]))]
- "flag_pic"
- [(set (match_dup 0) (match_op_dup 1 [(match_dup 3)]))]
- "
-{
- rtx addr = legitimize_pic_address (XEXP (operands[2], 0),
- GET_MODE (operands[2]),
- operands[0]);
- operands[3] = gen_rtx (MEM, GET_MODE (operands[2]), addr);
- MEM_IN_STRUCT_P (operands[3]) = MEM_IN_STRUCT_P (operands[2]);
- MEM_VOLATILE_P (operands[3]) = MEM_VOLATILE_P (operands[2]);
- RTX_UNCHANGING_P (operands[3]) = RTX_UNCHANGING_P (operands[2]);
-}")
-
-(define_split
- [(set (match_operand:SI 0 "register_operand" "")
- (match_operand:SI 1 "immediate_operand" ""))]
- "! flag_pic && (GET_CODE (operands[1]) == SYMBOL_REF
- || GET_CODE (operands[1]) == CONST
- || GET_CODE (operands[1]) == LABEL_REF)"
- [(set (match_dup 0) (high:SI (match_dup 1)))
- (set (match_dup 0)
- (lo_sum:SI (match_dup 0) (match_dup 1)))]
- "")
-
-;; LABEL_REFs are not modified by `legitimize_pic_address'
-;; so do not recurse infinitely in the PIC case.
-(define_split
- [(set (match_operand:SI 0 "register_operand" "")
- (match_operand:SI 1 "immediate_operand" ""))]
- "flag_pic && (GET_CODE (operands[1]) == SYMBOL_REF
- || GET_CODE (operands[1]) == CONST)"
- [(set (match_dup 0) (match_dup 1))]
- "
-{
- operands[1] = legitimize_pic_address (operands[1], Pmode, operands[0]);
-}")
-\f
-;; These split sne/seq insns. The forms of the resulting insns are
-;; somewhat bogus, but they avoid extra patterns and show data dependency.
-;; Nothing will look at these in detail after splitting has occurred.
-
-;; ??? v9 DImode versions are missing because addc and subc use %icc.
-
-(define_split
- [(set (match_operand:SI 0 "register_operand" "")
- (ne:SI (match_operand:SI 1 "register_operand" "")
- (const_int 0)))
- (clobber (reg:CC 100))]
- ""
- [(set (reg:CC_NOOV 100) (compare:CC_NOOV (neg:SI (match_dup 1))
- (const_int 0)))
- (set (match_dup 0) (ltu:SI (reg:CC 100) (const_int 0)))]
- "")
-
-(define_split
- [(set (match_operand:SI 0 "register_operand" "")
- (neg:SI (ne:SI (match_operand:SI 1 "register_operand" "")
- (const_int 0))))
- (clobber (reg:CC 100))]
- ""
- [(set (reg:CC_NOOV 100) (compare:CC_NOOV (neg:SI (match_dup 1))
- (const_int 0)))
- (set (match_dup 0) (neg:SI (ltu:SI (reg:CC 100) (const_int 0))))]
- "")
-
-(define_split
- [(set (match_operand:SI 0 "register_operand" "")
- (eq:SI (match_operand:SI 1 "register_operand" "")
- (const_int 0)))
- (clobber (reg:CC 100))]
- ""
- [(set (reg:CC_NOOV 100) (compare:CC_NOOV (neg:SI (match_dup 1))
- (const_int 0)))
- (set (match_dup 0) (geu:SI (reg:CC 100) (const_int 0)))]
- "")
-
-(define_split
- [(set (match_operand:SI 0 "register_operand" "")
- (neg:SI (eq:SI (match_operand:SI 1 "register_operand" "")
- (const_int 0))))
- (clobber (reg:CC 100))]
- ""
- [(set (reg:CC_NOOV 100) (compare:CC_NOOV (neg:SI (match_dup 1))
- (const_int 0)))
- (set (match_dup 0) (neg:SI (geu:SI (reg:CC 100) (const_int 0))))]
- "")
-
-(define_split
- [(set (match_operand:SI 0 "register_operand" "")
- (plus:SI (ne:SI (match_operand:SI 1 "register_operand" "")
- (const_int 0))
- (match_operand:SI 2 "register_operand" "")))
- (clobber (reg:CC 100))]
- ""
- [(set (reg:CC_NOOV 100) (compare:CC_NOOV (neg:SI (match_dup 1))
- (const_int 0)))
- (set (match_dup 0) (plus:SI (ltu:SI (reg:CC 100) (const_int 0))
- (match_dup 2)))]
- "")
-
-(define_split
- [(set (match_operand:SI 0 "register_operand" "")
- (minus:SI (match_operand:SI 2 "register_operand" "")
- (ne:SI (match_operand:SI 1 "register_operand" "")
- (const_int 0))))
- (clobber (reg:CC 100))]
- ""
- [(set (reg:CC_NOOV 100) (compare:CC_NOOV (neg:SI (match_dup 1))
- (const_int 0)))
- (set (match_dup 0) (minus:SI (match_dup 2)
- (ltu:SI (reg:CC 100) (const_int 0))))]
- "")
+;; Disabled because none of the UltraSparcs implement popc. The HAL R1
+;; does, but no one uses that and we don't have a switch for it.
+;
+;(define_insn "ffsdi2"
+; [(set (match_operand:DI 0 "register_operand" "=&r")
+; (ffs:DI (match_operand:DI 1 "register_operand" "r")))
+; (clobber (match_scratch:DI 2 "=&r"))]
+; "TARGET_ARCH64"
+; "neg %1,%2\;xnor %1,%2,%2\;popc %2,%0\;movzr %1,0,%0"
+; [(set_attr "type" "multi")
+; (set_attr "length" "4")])
-(define_split
- [(set (match_operand:SI 0 "register_operand" "")
- (plus:SI (eq:SI (match_operand:SI 1 "register_operand" "")
- (const_int 0))
- (match_operand:SI 2 "register_operand" "")))
- (clobber (reg:CC 100))]
- ""
- [(set (reg:CC_NOOV 100) (compare:CC_NOOV (neg:SI (match_dup 1))
- (const_int 0)))
- (set (match_dup 0) (plus:SI (geu:SI (reg:CC 100) (const_int 0))
- (match_dup 2)))]
- "")
-(define_split
- [(set (match_operand:SI 0 "register_operand" "")
- (minus:SI (match_operand:SI 2 "register_operand" "")
- (eq:SI (match_operand:SI 1 "register_operand" "")
- (const_int 0))))
- (clobber (reg:CC 100))]
- ""
- [(set (reg:CC_NOOV 100) (compare:CC_NOOV (neg:SI (match_dup 1))
- (const_int 0)))
- (set (match_dup 0) (minus:SI (match_dup 2)
- (geu:SI (reg:CC 100) (const_int 0))))]
- "")
\f
;; Peepholes go at the end.
;; explained in the code for {registers,memory}_ok_for_ldd functions.
(define_peephole
+ [(set (match_operand:SI 0 "memory_operand" "")
+ (const_int 0))
+ (set (match_operand:SI 1 "memory_operand" "")
+ (const_int 0))]
+ "TARGET_V9
+ && ! MEM_VOLATILE_P (operands[0])
+ && ! MEM_VOLATILE_P (operands[1])
+ && addrs_ok_for_ldd_peep (XEXP (operands[0], 0), XEXP (operands[1], 0))"
+ "stx\\t%%g0, %0")
+
+(define_peephole
+ [(set (match_operand:SI 0 "memory_operand" "")
+ (const_int 0))
+ (set (match_operand:SI 1 "memory_operand" "")
+ (const_int 0))]
+ "TARGET_V9
+ && ! MEM_VOLATILE_P (operands[0])
+ && ! MEM_VOLATILE_P (operands[1])
+ && addrs_ok_for_ldd_peep (XEXP (operands[1], 0), XEXP (operands[0], 0))"
+ "stx\\t%%g0, %1")
+
+(define_peephole
[(set (match_operand:SI 0 "register_operand" "=rf")
(match_operand:SI 1 "memory_operand" ""))
(set (match_operand:SI 2 "register_operand" "=rf")
(match_operand:SI 3 "memory_operand" ""))]
- "! TARGET_ARCH64
- && registers_ok_for_ldd_peep (operands[0], operands[2])
- && ! MEM_VOLATILE_P (operands[1]) && ! MEM_VOLATILE_P (operands[3])
+ "registers_ok_for_ldd_peep (operands[0], operands[2])
+ && ! MEM_VOLATILE_P (operands[1])
+ && ! MEM_VOLATILE_P (operands[3])
&& addrs_ok_for_ldd_peep (XEXP (operands[1], 0), XEXP (operands[3], 0))"
- "ldd %1,%0")
+ "ldd\\t%1, %0")
(define_peephole
[(set (match_operand:SI 0 "memory_operand" "")
(match_operand:SI 1 "register_operand" "rf"))
(set (match_operand:SI 2 "memory_operand" "")
(match_operand:SI 3 "register_operand" "rf"))]
- "! TARGET_ARCH64
- && registers_ok_for_ldd_peep (operands[1], operands[3])
- && ! MEM_VOLATILE_P (operands[0]) && ! MEM_VOLATILE_P (operands[2])
+ "registers_ok_for_ldd_peep (operands[1], operands[3])
+ && ! MEM_VOLATILE_P (operands[0])
+ && ! MEM_VOLATILE_P (operands[2])
&& addrs_ok_for_ldd_peep (XEXP (operands[0], 0), XEXP (operands[2], 0))"
- "std %1,%0")
+ "std\\t%1, %0")
(define_peephole
[(set (match_operand:SF 0 "register_operand" "=fr")
(match_operand:SF 1 "memory_operand" ""))
(set (match_operand:SF 2 "register_operand" "=fr")
(match_operand:SF 3 "memory_operand" ""))]
- "! TARGET_ARCH64
- && registers_ok_for_ldd_peep (operands[0], operands[2])
- && ! MEM_VOLATILE_P (operands[1]) && ! MEM_VOLATILE_P (operands[3])
+ "registers_ok_for_ldd_peep (operands[0], operands[2])
+ && ! MEM_VOLATILE_P (operands[1])
+ && ! MEM_VOLATILE_P (operands[3])
&& addrs_ok_for_ldd_peep (XEXP (operands[1], 0), XEXP (operands[3], 0))"
- "ldd %1,%0")
+ "ldd\\t%1, %0")
(define_peephole
[(set (match_operand:SF 0 "memory_operand" "")
(match_operand:SF 1 "register_operand" "fr"))
(set (match_operand:SF 2 "memory_operand" "")
(match_operand:SF 3 "register_operand" "fr"))]
- "! TARGET_ARCH64
- && registers_ok_for_ldd_peep (operands[1], operands[3])
- && ! MEM_VOLATILE_P (operands[0]) && ! MEM_VOLATILE_P (operands[2])
- && addrs_ok_for_ldd_peep (XEXP (operands[0], 0), XEXP (operands[2], 0))"
- "std %1,%0")
+ "registers_ok_for_ldd_peep (operands[1], operands[3])
+ && ! MEM_VOLATILE_P (operands[0])
+ && ! MEM_VOLATILE_P (operands[2])
+ && addrs_ok_for_ldd_peep (XEXP (operands[0], 0), XEXP (operands[2], 0))"
+ "std\\t%1, %0")
(define_peephole
[(set (match_operand:SI 0 "register_operand" "=rf")
(match_operand:SI 1 "memory_operand" ""))
(set (match_operand:SI 2 "register_operand" "=rf")
(match_operand:SI 3 "memory_operand" ""))]
- "! TARGET_ARCH64
- && registers_ok_for_ldd_peep (operands[2], operands[0])
- && ! MEM_VOLATILE_P (operands[3]) && ! MEM_VOLATILE_P (operands[1])
- && addrs_ok_for_ldd_peep (XEXP (operands[3], 0), XEXP (operands[1], 0))"
- "ldd %3,%2")
+ "registers_ok_for_ldd_peep (operands[2], operands[0])
+ && ! MEM_VOLATILE_P (operands[3])
+ && ! MEM_VOLATILE_P (operands[1])
+ && addrs_ok_for_ldd_peep (XEXP (operands[3], 0), XEXP (operands[1], 0))"
+ "ldd\\t%3, %2")
(define_peephole
[(set (match_operand:SI 0 "memory_operand" "")
(match_operand:SI 1 "register_operand" "rf"))
(set (match_operand:SI 2 "memory_operand" "")
(match_operand:SI 3 "register_operand" "rf"))]
- "! TARGET_ARCH64
- && registers_ok_for_ldd_peep (operands[3], operands[1])
- && ! MEM_VOLATILE_P (operands[2]) && ! MEM_VOLATILE_P (operands[0])
- && addrs_ok_for_ldd_peep (XEXP (operands[2], 0), XEXP (operands[0], 0))"
- "std %3,%2")
+ "registers_ok_for_ldd_peep (operands[3], operands[1])
+ && ! MEM_VOLATILE_P (operands[2])
+ && ! MEM_VOLATILE_P (operands[0])
+ && addrs_ok_for_ldd_peep (XEXP (operands[2], 0), XEXP (operands[0], 0))"
+ "std\\t%3, %2")
(define_peephole
[(set (match_operand:SF 0 "register_operand" "=fr")
(match_operand:SF 1 "memory_operand" ""))
(set (match_operand:SF 2 "register_operand" "=fr")
(match_operand:SF 3 "memory_operand" ""))]
- "! TARGET_ARCH64
- && registers_ok_for_ldd_peep (operands[2], operands[0])
- && ! MEM_VOLATILE_P (operands[3]) && ! MEM_VOLATILE_P (operands[1])
- && addrs_ok_for_ldd_peep (XEXP (operands[3], 0), XEXP (operands[1], 0))"
- "ldd %3,%2")
+ "registers_ok_for_ldd_peep (operands[2], operands[0])
+ && ! MEM_VOLATILE_P (operands[3])
+ && ! MEM_VOLATILE_P (operands[1])
+ && addrs_ok_for_ldd_peep (XEXP (operands[3], 0), XEXP (operands[1], 0))"
+ "ldd\\t%3, %2")
(define_peephole
[(set (match_operand:SF 0 "memory_operand" "")
(match_operand:SF 1 "register_operand" "fr"))
(set (match_operand:SF 2 "memory_operand" "")
(match_operand:SF 3 "register_operand" "fr"))]
- "! TARGET_ARCH64
- && registers_ok_for_ldd_peep (operands[3], operands[1])
- && ! MEM_VOLATILE_P (operands[2]) && ! MEM_VOLATILE_P (operands[0])
- && addrs_ok_for_ldd_peep (XEXP (operands[2], 0), XEXP (operands[0], 0))"
- "std %3,%2")
+ "registers_ok_for_ldd_peep (operands[3], operands[1])
+ && ! MEM_VOLATILE_P (operands[2])
+ && ! MEM_VOLATILE_P (operands[0])
+ && addrs_ok_for_ldd_peep (XEXP (operands[2], 0), XEXP (operands[0], 0))"
+ "std\\t%3, %2")
;; Optimize the case of following a reg-reg move with a test
;; of reg just moved. Don't allow floating point regs for operand 0 or 1.
(const_int 0)))]
"(rtx_equal_p (operands[2], operands[0])
|| rtx_equal_p (operands[2], operands[1]))
- && ! FP_REG_P (operands[0]) && ! FP_REG_P (operands[1])"
- "orcc %1,0,%0")
+ && ! FP_REG_P (operands[0])
+ && ! FP_REG_P (operands[1])"
+ "orcc\\t%1, 0, %0")
(define_peephole
[(set (match_operand:DI 0 "register_operand" "=r")
"TARGET_ARCH64
&& (rtx_equal_p (operands[2], operands[0])
|| rtx_equal_p (operands[2], operands[1]))
- && ! FP_REG_P (operands[0]) && ! FP_REG_P (operands[1])"
- "orcc %1,0,%0")
-
-;; Do {sign,zero}-extended compares somewhat more efficiently.
-;; ??? Is this now the Right Way to do this? Or will SCRATCH
-;; eventually have some impact here?
-
-(define_peephole
- [(set (match_operand:HI 0 "register_operand" "")
- (match_operand:HI 1 "memory_operand" ""))
- (set (match_operand:SI 2 "register_operand" "")
- (sign_extend:SI (match_dup 0)))
- (set (reg:CC 100)
- (compare:CC (match_dup 2)
- (const_int 0)))]
- ""
- "ldsh %1,%0\;orcc %0,0,%2")
-
-(define_peephole
- [(set (match_operand:HI 0 "register_operand" "")
- (match_operand:HI 1 "memory_operand" ""))
- (set (match_operand:DI 2 "register_operand" "")
- (sign_extend:DI (match_dup 0)))
- (set (reg:CCX 100)
- (compare:CCX (match_dup 2)
- (const_int 0)))]
- "TARGET_ARCH64"
- "ldsh %1,%0\;orcc %0,0,%2")
-
-(define_peephole
- [(set (match_operand:QI 0 "register_operand" "")
- (match_operand:QI 1 "memory_operand" ""))
- (set (match_operand:SI 2 "register_operand" "")
- (sign_extend:SI (match_dup 0)))
- (set (reg:CC 100)
- (compare:CC (match_dup 2)
- (const_int 0)))]
- ""
- "ldsb %1,%0\;orcc %0,0,%2")
-
-(define_peephole
- [(set (match_operand:QI 0 "register_operand" "")
- (match_operand:QI 1 "memory_operand" ""))
- (set (match_operand:DI 2 "register_operand" "")
- (sign_extend:DI (match_dup 0)))
- (set (reg:CCX 100)
- (compare:CCX (match_dup 2)
- (const_int 0)))]
- "TARGET_ARCH64"
- "ldsb %1,%0\;orcc %0,0,%2")
-
-;; Floating-point move peepholes
-;; ??? v9: Do we want similar ones?
-
-(define_peephole
- [(set (match_operand:SI 0 "register_operand" "=r")
- (lo_sum:SI (match_dup 0)
- (match_operand:SI 1 "immediate_operand" "i")))
- (set (match_operand:DF 2 "register_operand" "=er")
- (mem:DF (match_dup 0)))]
- "RTX_UNCHANGING_P (operands[1]) && reg_unused_after (operands[0], insn)"
- "*
-{
- /* Go by way of output_move_double in case the register in operand 2
- is not properly aligned for ldd. */
- operands[1] = gen_rtx (MEM, DFmode,
- gen_rtx (LO_SUM, SImode, operands[0], operands[1]));
- operands[0] = operands[2];
- return output_move_double (operands);
-}")
-
-(define_peephole
- [(set (match_operand:SI 0 "register_operand" "=r")
- (lo_sum:SI (match_dup 0)
- (match_operand:SI 1 "immediate_operand" "i")))
- (set (match_operand:SF 2 "register_operand" "=fr")
- (mem:SF (match_dup 0)))]
- "RTX_UNCHANGING_P (operands[1]) && reg_unused_after (operands[0], insn)"
- "ld [%0+%%lo(%a1)],%2")
+ && ! FP_REG_P (operands[0])
+ && ! FP_REG_P (operands[1])"
+ "orcc\\t%1, 0, %0")
;; Return peepholes. First the "normal" ones.
;; These are necessary to catch insns ending up in the epilogue delay list.
"*
{
if (! TARGET_ARCH64 && current_function_returns_struct)
- return \"jmp %%i7+12\;restore %%g0,%1,%Y0\";
+ return \"jmp\\t%%i7+12\\n\\trestore %%g0, %1, %Y0\";
+ else if (TARGET_V9 && (GET_CODE (operands[1]) == CONST_INT
+ || IN_OR_GLOBAL_P (operands[1])))
+ return \"return\\t%%i7+8\\n\\tmov\\t%Y1, %Y0\";
else
- return \"ret\;restore %%g0,%1,%Y0\";
+ return \"ret\\n\\trestore %%g0, %1, %Y0\";
}"
[(set_attr "type" "multi")])
"*
{
if (! TARGET_ARCH64 && current_function_returns_struct)
- return \"jmp %%i7+12\;restore %%g0,%1,%Y0\";
+ return \"jmp\\t%%i7+12\\n\\trestore %%g0, %1, %Y0\";
+ else if (TARGET_V9 && (GET_CODE (operands[1]) == CONST_INT
+ || IN_OR_GLOBAL_P (operands[1])))
+ return \"return\\t%%i7+8\\n\\tmov\\t%Y1, %Y0\";
else
- return \"ret\;restore %%g0,%1,%Y0\";
+ return \"ret\;restore %%g0, %1, %Y0\";
}"
[(set_attr "type" "multi")])
"*
{
if (! TARGET_ARCH64 && current_function_returns_struct)
- return \"jmp %%i7+12\;restore %%g0,%1,%Y0\";
+ return \"jmp\\t%%i7+12\\n\\trestore %%g0, %1, %Y0\";
+ else if (TARGET_V9 && (GET_CODE (operands[1]) == CONST_INT
+ || IN_OR_GLOBAL_P (operands[1])))
+ return \"return\\t%%i7+8\\n\\tmov\\t%Y1, %Y0\";
else
- return \"ret\;restore %%g0,%1,%Y0\";
+ return \"ret\;restore %%g0, %1, %Y0\";
}"
[(set_attr "type" "multi")])
;; The following pattern is only generated by delayed-branch scheduling,
-;; when the insn winds up in the epilogue. This can only happen when
-;; ! TARGET_FPU because otherwise fp return values are in %f0.
+;; when the insn winds up in the epilogue. This can happen not only when
+;; ! TARGET_FPU because we move complex types around by parts using
+;; SF mode SUBREGs.
(define_insn "*return_sf_no_fpu"
- [(set (match_operand:SF 0 "restore_operand" "r")
+ [(set (match_operand:SF 0 "restore_operand" "=r")
(match_operand:SF 1 "register_operand" "r"))
(return)]
- "! TARGET_FPU && ! TARGET_EPILOGUE && ! TARGET_LIVE_G0"
+ "! TARGET_EPILOGUE && ! TARGET_LIVE_G0"
"*
{
if (! TARGET_ARCH64 && current_function_returns_struct)
- return \"jmp %%i7+12\;restore %%g0,%1,%Y0\";
+ return \"jmp\\t%%i7+12\\n\\trestore %%g0, %1, %Y0\";
+ else if (TARGET_V9 && IN_OR_GLOBAL_P (operands[1]))
+ return \"return\\t%%i7+8\\n\\tmov\\t%Y1, %Y0\";
else
- return \"ret\;restore %%g0,%1,%Y0\";
+ return \"ret\;restore %%g0, %1, %Y0\";
}"
[(set_attr "type" "multi")])
(define_insn "*return_addsi"
[(set (match_operand:SI 0 "restore_operand" "")
- (plus:SI (match_operand:SI 1 "arith_operand" "%r")
+ (plus:SI (match_operand:SI 1 "register_operand" "r")
(match_operand:SI 2 "arith_operand" "rI")))
(return)]
"! TARGET_EPILOGUE && ! TARGET_LIVE_G0"
"*
{
if (! TARGET_ARCH64 && current_function_returns_struct)
- return \"jmp %%i7+12\;restore %r1,%2,%Y0\";
+ return \"jmp\\t%%i7+12\\n\\trestore %r1, %2, %Y0\";
+ /* If operands are global or in registers, can use return */
+ else if (TARGET_V9 && IN_OR_GLOBAL_P (operands[1])
+ && (GET_CODE (operands[2]) == CONST_INT
+ || IN_OR_GLOBAL_P (operands[2])))
+ return \"return\\t%%i7+8\\n\\tadd\\t%Y1, %Y2, %Y0\";
else
- return \"ret\;restore %r1,%2,%Y0\";
+ return \"ret\;restore %r1, %2, %Y0\";
}"
[(set_attr "type" "multi")])
(match_operand:DI 1 "arith_double_operand" "rHI"))
(return)]
"TARGET_ARCH64 && ! TARGET_EPILOGUE"
- "ret\;restore %%g0,%1,%Y0"
+ "ret\;restore %%g0, %1, %Y0"
[(set_attr "type" "multi")])
(define_insn "*return_adddi"
(match_operand:DI 2 "arith_double_operand" "rHI")))
(return)]
"TARGET_ARCH64 && ! TARGET_EPILOGUE"
- "ret\;restore %r1,%2,%Y0"
+ "ret\;restore %r1, %2, %Y0"
[(set_attr "type" "multi")])
-;; Turned off because it should never match (subtracting a constant
-;; is turned into addition) and because it would do the wrong thing
-;; when operand 2 is -4096 (--4096 == 4096 is not a valid immediate).
-;;(define_insn "*minus_const"
-;; [(set (match_operand:SI 0 "restore_operand" "")
-;; (minus:SI (match_operand:SI 1 "register_operand" "r")
-;; (match_operand:SI 2 "small_int" "I")))
-;; (return)]
-;; "! TARGET_EPILOGUE"
-;; "ret\;restore %1,-(%2),%Y0"
-;; [(set_attr "type" "multi")])
-
;; The following pattern is only generated by delayed-branch scheduling,
;; when the insn winds up in the epilogue.
(define_insn "*return_sf"
(match_operand:SF 0 "register_operand" "f"))
(return)]
"! TARGET_EPILOGUE"
- "ret\;fmovs %0,%%f0"
+ "ret\;fmovs\\t%0, %%f0"
[(set_attr "type" "multi")])
;; Now peepholes to do a call followed by a jump.
(match_operand 2 "" "")))
(clobber (reg:SI 15))])
(set (pc) (label_ref (match_operand 3 "" "")))]
- "short_branch (INSN_UID (insn), INSN_UID (operands[3]))"
- "call %a1,%2\;add %%o7,(%l3-.-4),%%o7")
+ "short_branch (INSN_UID (insn), INSN_UID (operands[3]))
+ && in_same_eh_region (insn, operands[3])
+ && in_same_eh_region (insn, ins1)"
+ "call\\t%a1, %2\\n\\tadd\\t%%o7, (%l3-.-4), %%o7")
(define_peephole
[(parallel [(call (mem:SI (match_operand:SI 0 "call_operand_address" "ps"))
(match_operand 1 "" ""))
(clobber (reg:SI 15))])
(set (pc) (label_ref (match_operand 2 "" "")))]
- "short_branch (INSN_UID (insn), INSN_UID (operands[2]))"
- "*
-{
- return \"call %a0,%1\;add %%o7,(%l2-.-4),%%o7\";
-}")
+ "short_branch (INSN_UID (insn), INSN_UID (operands[2]))
+ && in_same_eh_region (insn, operands[2])
+ && in_same_eh_region (insn, ins1)"
+ "call\\t%a0, %1\\n\\tadd\\t%%o7, (%l2-.-4), %%o7")
(define_peephole
[(parallel [(set (match_operand 0 "" "")
(match_operand 2 "" "")))
(clobber (reg:DI 15))])
(set (pc) (label_ref (match_operand 3 "" "")))]
- "TARGET_ARCH64 && short_branch (INSN_UID (insn), INSN_UID (operands[3]))"
- "call %a1,%2\;add %%o7,(%l3-.-4),%%o7")
+ "TARGET_ARCH64
+ && short_branch (INSN_UID (insn), INSN_UID (operands[3]))
+ && in_same_eh_region (insn, operands[3])
+ && in_same_eh_region (insn, ins1)"
+ "call\\t%a1, %2\\n\\tadd\\t%%o7, (%l3-.-4), %%o7")
(define_peephole
[(parallel [(call (mem:SI (match_operand:DI 0 "call_operand_address" "ps"))
(match_operand 1 "" ""))
(clobber (reg:DI 15))])
(set (pc) (label_ref (match_operand 2 "" "")))]
- "TARGET_ARCH64 && short_branch (INSN_UID (insn), INSN_UID (operands[2]))"
- "call %a0,%1\;add %%o7,(%l2-.-4),%%o7")
+ "TARGET_ARCH64
+ && short_branch (INSN_UID (insn), INSN_UID (operands[2]))
+ && in_same_eh_region (insn, operands[2])
+ && in_same_eh_region (insn, ins1)"
+ "call\\t%a0, %1\\n\\tadd\\t%%o7, (%l2-.-4), %%o7")
+
+;; After a nonlocal goto, we need to restore the PIC register, but only
+;; if we need it. So do nothing much here, but we'll check for this in
+;; finalize_pic.
+
+;; Make sure this unspec_volatile number agrees with finalize_pic.
+(define_insn "nonlocal_goto_receiver"
+ [(unspec_volatile [(const_int 0)] 5)]
+ "flag_pic"
+ ""
+ [(set_attr "length" "0")])
+\f
+(define_insn "trap"
+ [(trap_if (const_int 1) (const_int 5))]
+ ""
+ "ta\\t5"
+ [(set_attr "type" "misc")
+ (set_attr "length" "1")])
-;; Other miscellaneous peepholes.
+(define_expand "conditional_trap"
+ [(trap_if (match_operator 0 "noov_compare_op"
+ [(match_dup 2) (match_dup 3)])
+ (match_operand:SI 1 "arith_operand" ""))]
+ ""
+ "operands[2] = gen_compare_reg (GET_CODE (operands[0]),
+ sparc_compare_op0, sparc_compare_op1);
+ operands[3] = const0_rtx;")
-;; (reg:SI 100) is created by the {add,neg,sub}di patterns.
-(define_peephole
- [(parallel [(set (match_operand:SI 0 "register_operand" "=r")
- (minus:SI (match_operand:SI 1 "reg_or_0_operand" "rJ")
- (reg:SI 100)))
- (clobber (reg:CC 100))])
- (set (reg:CC 100) (compare (match_dup 0) (const_int 0)))]
+(define_insn ""
+ [(trap_if (match_operator 0 "noov_compare_op" [(reg:CC 100) (const_int 0)])
+ (match_operand:SI 1 "arith_operand" "rM"))]
""
- "subxcc %r1,0,%0")
+ "t%C0\\t%1"
+ [(set_attr "type" "misc")
+ (set_attr "length" "1")])
+
+(define_insn ""
+ [(trap_if (match_operator 0 "noov_compare_op" [(reg:CCX 100) (const_int 0)])
+ (match_operand:SI 1 "arith_operand" "rM"))]
+ "TARGET_V9"
+ "t%C0\\t%%xcc, %1"
+ [(set_attr "type" "misc")
+ (set_attr "length" "1")])