X-Git-Url: http://git.sourceforge.jp/view?p=pf3gnuchains%2Fgcc-fork.git;a=blobdiff_plain;f=gcc%2Fconfig%2Fsparc%2Fsparc.md;h=f45612e5d09e99e70ffc8ffd281bcc7785432f73;hp=008a774e4904d094f1e63d88a0c6c16664254b76;hb=b191fa18199f27a62c1756c67e51a067bd1d6e32;hpb=b9a45ac3a6e36369785222a46ab607eb639f972b diff --git a/gcc/config/sparc/sparc.md b/gcc/config/sparc/sparc.md index 008a774e490..f45612e5d09 100644 --- a/gcc/config/sparc/sparc.md +++ b/gcc/config/sparc/sparc.md @@ -1,5 +1,5 @@ ;;- Machine description for SPARC chip for GNU C compiler -;; Copyright (C) 1987, 88, 89, 92-95, 1996 Free Software Foundation, Inc. +;; Copyright (C) 1987, 88, 89, 92-96, 1997 Free Software Foundation, Inc. ;; Contributed by Michael Tiemann (tiemann@cygnus.com) ;; 64 bit SPARC V9 support by Michael Tiemann, Jim Wilson, and Doug Evans, ;; at Cygnus Support. @@ -28,19 +28,23 @@ ;; is a bit of a misnomer as it covers all 64 fp regs. The corresponding ;; constraint letter is 'e'. To avoid any confusion, 'e' is used instead of ;; 'f' for all DF/TFmode values, including those that are specific to the v8. +;; +;; -mlive-g0 is *not* supported for TARGET_ARCH64, so we don't bother to +;; test TARGET_LIVE_G0 if we have TARGET_ARCH64. ;; Attribute for cpu type. -;; These must match those in sparc.h. -(define_attr "cpu" "common,cypress,v8,supersparc,sparclite,sparclet,v9,ultrasparc" - (const (symbol_ref "sparc_cpu"))) +;; These must match the values for enum processor_type in sparc.h. +(define_attr "cpu" "v7,cypress,v8,supersparc,sparclite,f930,f934,sparclet,tsc701,v8plus,v9,ultrasparc" + (const (symbol_ref "sparc_cpu_attr"))) ;; Attribute for the instruction set. ;; At present we only need to distinguish v9/!v9, but for clarity we ;; test TARGET_V8 too. -(define_attr "isa" "v6,v8,v9" +(define_attr "isa" "v6,v8,v9,sparclet" (const (cond [(symbol_ref "TARGET_V9") (const_string "v9") - (symbol_ref "TARGET_V8") (const_string "v8")] + (symbol_ref "TARGET_V8") (const_string "v8") + (symbol_ref "TARGET_SPARCLET") (const_string "sparclet")] (const_string "v6")))) ;; Architecture size. @@ -49,6 +53,12 @@ (cond [(symbol_ref "TARGET_ARCH64") (const_string "arch64bit")] (const_string "arch32bit")))) +;; Whether -mlive-g0 is in effect. +(define_attr "live_g0" "no,yes" + (const + (cond [(symbol_ref "TARGET_LIVE_G0") (const_string "yes")] + (const_string "no")))) + ;; Insn type. Used to default other attribute values. ;; type "unary" insns have one input operand (1) and one output operand (0) @@ -245,6 +255,60 @@ (and (eq_attr "type" "fpsqrt") (eq_attr "cpu" "supersparc")) 36 30) (define_function_unit "fp_mds" 1 0 (and (eq_attr "type" "imul") (eq_attr "cpu" "supersparc")) 12 12) + +;; ----- sparclet tsc701 scheduling +;; The tsc701 issues 1 insn per cycle. +;; Results may be written back out of order. + +;; Loads take 2 extra cycles to complete and 4 can be buffered at a time. +(define_function_unit "tsc701_load" 4 1 + (and (eq_attr "type" "load") (eq_attr "cpu" "tsc701")) 3 1) +;; Stores take 2(?) extra cycles to complete. +;; It is desirable to not have any memory operation in the following 2 cycles. +;; (??? or 2 memory ops in the case of std). +(define_function_unit "tsc701_store" 1 0 + (and (eq_attr "type" "store") (eq_attr "cpu" "tsc701")) 3 3 + [(eq_attr "type" "load,store")]) +;; The multiply unit has a latency of 5. +(define_function_unit "tsc701_mul" 1 0 + (and (eq_attr "type" "imul") (eq_attr "cpu" "tsc701")) 5 5) + +;; ----- The UltraSPARC-1 scheduling +;; The Ultrasparc can issue 1 - 4 insns per cycle; here we assume +;; four insns/cycle, and hence multiply all costs by four. + +;; Memory delivers its result in three cycles to IU, three cycles to FP +(define_function_unit "memory" 1 0 + (and (eq_attr "type" "load,fpload") (eq_attr "cpu" "ultrasparc")) 12 4) +(define_function_unit "memory" 1 0 + (and (eq_attr "type" "store,fpstore") (eq_attr "cpu" "ultrasparc")) 4 4) +(define_function_unit "ieu" 1 0 + (and (eq_attr "type" "ialu") (eq_attr "cpu" "ultrasparc")) 1 2) +(define_function_unit "ieu" 1 0 + (and (eq_attr "type" "shift") (eq_attr "cpu" "ultrasparc")) 1 4) +(define_function_unit "ieu" 1 0 + (and (eq_attr "type" "cmove") (eq_attr "cpu" "ultrasparc")) 8 4) + +;; Timings; throughput/latency +;; ?? FADD 1/3 add/sub, format conv, compar, abs, neg +;; ?? FMUL 1/3 +;; ?? FDIVs 1/12 +;; ?? FDIVd 1/22 +;; ?? FSQRTs 1/12 +;; ?? FSQRTd 1/22 + +(define_function_unit "fp" 1 0 + (and (eq_attr "type" "fp") (eq_attr "cpu" "ultrasparc")) 12 2) +(define_function_unit "fp" 1 0 + (and (eq_attr "type" "fpcmp") (eq_attr "cpu" "ultrasparc")) 8 2) +(define_function_unit "fp" 1 0 + (and (eq_attr "type" "fpmul") (eq_attr "cpu" "ultrasparc")) 12 2) +(define_function_unit "fp" 1 0 + (and (eq_attr "type" "fpdivs") (eq_attr "cpu" "ultrasparc")) 48 2) +(define_function_unit "fp" 1 0 + (and (eq_attr "type" "fpdivd") (eq_attr "cpu" "ultrasparc")) 88 2) +(define_function_unit "fp" 1 0 + (and (eq_attr "type" "fpsqrt") (eq_attr "cpu" "ultrasparc")) 48 2) ;; Compare instructions. ;; This controls RTL generation and register allocation. @@ -265,7 +329,7 @@ ;; Put cmpsi first among compare insns so it matches two CONST_INT operands. (define_expand "cmpsi" - [(set (reg:CC 0) + [(set (reg:CC 100) (compare:CC (match_operand:SI 0 "register_operand" "") (match_operand:SI 1 "arith_operand" "")))] "" @@ -277,7 +341,7 @@ }") (define_expand "cmpdi" - [(set (reg:CCX 0) + [(set (reg:CCX 100) (compare:CCX (match_operand:DI 0 "register_operand" "") (match_operand:DI 1 "arith_double_operand" "")))] "TARGET_ARCH64" @@ -289,7 +353,8 @@ }") (define_expand "cmpsf" - [(set (reg:CCFP 0) + ;; The 96 here isn't ever used by anyone. + [(set (reg:CCFP 96) (compare:CCFP (match_operand:SF 0 "register_operand" "") (match_operand:SF 1 "register_operand" "")))] "TARGET_FPU" @@ -301,7 +366,8 @@ }") (define_expand "cmpdf" - [(set (reg:CCFP 0) + ;; The 96 here isn't ever used by anyone. + [(set (reg:CCFP 96) (compare:CCFP (match_operand:DF 0 "register_operand" "") (match_operand:DF 1 "register_operand" "")))] "TARGET_FPU" @@ -313,7 +379,8 @@ }") (define_expand "cmptf" - [(set (reg:CCFP 0) + ;; The 96 here isn't ever used by anyone. + [(set (reg:CCFP 96) (compare:CCFP (match_operand:TF 0 "register_operand" "") (match_operand:TF 1 "register_operand" "")))] "TARGET_FPU" @@ -324,6 +391,102 @@ DONE; }") +;; Now the compare DEFINE_INSNs. + +(define_insn "*cmpsi_insn" + [(set (reg:CC 100) + (compare:CC (match_operand:SI 0 "register_operand" "r") + (match_operand:SI 1 "arith_operand" "rI")))] + "" + "cmp %0,%1" + [(set_attr "type" "compare")]) + +(define_insn "*cmpdi_sp64" + [(set (reg:CCX 100) + (compare:CCX (match_operand:DI 0 "register_operand" "r") + (match_operand:DI 1 "arith_double_operand" "rHI")))] + "TARGET_ARCH64" + "cmp %0,%1" + [(set_attr "type" "compare")]) + +(define_insn "*cmpsf_fpe" + [(set (match_operand:CCFPE 0 "fcc_reg_operand" "=c") + (compare:CCFPE (match_operand:SF 1 "register_operand" "f") + (match_operand:SF 2 "register_operand" "f")))] + "TARGET_FPU" + "* +{ + if (TARGET_V9) + return \"fcmpes %0,%1,%2\"; + return \"fcmpes %1,%2\"; +}" + [(set_attr "type" "fpcmp")]) + +(define_insn "*cmpdf_fpe" + [(set (match_operand:CCFPE 0 "fcc_reg_operand" "=c") + (compare:CCFPE (match_operand:DF 1 "register_operand" "e") + (match_operand:DF 2 "register_operand" "e")))] + "TARGET_FPU" + "* +{ + if (TARGET_V9) + return \"fcmped %0,%1,%2\"; + return \"fcmped %1,%2\"; +}" + [(set_attr "type" "fpcmp")]) + +(define_insn "*cmptf_fpe" + [(set (match_operand:CCFPE 0 "fcc_reg_operand" "=c") + (compare:CCFPE (match_operand:TF 1 "register_operand" "e") + (match_operand:TF 2 "register_operand" "e")))] + "TARGET_FPU && TARGET_HARD_QUAD" + "* +{ + if (TARGET_V9) + return \"fcmpeq %0,%1,%2\"; + return \"fcmpeq %1,%2\"; +}" + [(set_attr "type" "fpcmp")]) + +(define_insn "*cmpsf_fp" + [(set (match_operand:CCFP 0 "fcc_reg_operand" "=c") + (compare:CCFP (match_operand:SF 1 "register_operand" "f") + (match_operand:SF 2 "register_operand" "f")))] + "TARGET_FPU" + "* +{ + if (TARGET_V9) + return \"fcmps %0,%1,%2\"; + return \"fcmps %1,%2\"; +}" + [(set_attr "type" "fpcmp")]) + +(define_insn "*cmpdf_fp" + [(set (match_operand:CCFP 0 "fcc_reg_operand" "=c") + (compare:CCFP (match_operand:DF 1 "register_operand" "e") + (match_operand:DF 2 "register_operand" "e")))] + "TARGET_FPU" + "* +{ + if (TARGET_V9) + return \"fcmpd %0,%1,%2\"; + return \"fcmpd %1,%2\"; +}" + [(set_attr "type" "fpcmp")]) + +(define_insn "*cmptf_fp" + [(set (match_operand:CCFP 0 "fcc_reg_operand" "=c") + (compare:CCFP (match_operand:TF 1 "register_operand" "e") + (match_operand:TF 2 "register_operand" "e")))] + "TARGET_FPU && TARGET_HARD_QUAD" + "* +{ + if (TARGET_V9) + return \"fcmpq %0,%1,%2\"; + return \"fcmpq %1,%2\"; +}" + [(set_attr "type" "fpcmp")]) + ;; Next come the scc insns. For seq, sne, sgeu, and sltu, we can do this ;; without jumps using the addx/subx instructions. For seq/sne on v9 we use ;; the same code as v8 (the addx/subx method has more applications). The @@ -340,17 +503,16 @@ (match_operand:SI 2 "register_operand" ""))) (parallel [(set (match_operand:SI 0 "register_operand" "") (eq:SI (match_dup 3) (const_int 0))) - (clobber (reg:CC 0))])] - "" + (clobber (reg:CC 100))])] + "! TARGET_LIVE_G0" "{ operands[3] = gen_reg_rtx (SImode); }") (define_expand "seqdi_special" [(set (match_dup 3) (xor:DI (match_operand:DI 1 "register_operand" "") (match_operand:DI 2 "register_operand" ""))) - (parallel [(set (match_operand:DI 0 "register_operand" "") - (eq:DI (match_dup 3) (const_int 0))) - (clobber (reg:CCX 0))])] + (set (match_operand:DI 0 "register_operand" "") + (eq:DI (match_dup 3) (const_int 0)))] "TARGET_ARCH64" "{ operands[3] = gen_reg_rtx (DImode); }") @@ -360,17 +522,16 @@ (match_operand:SI 2 "register_operand" ""))) (parallel [(set (match_operand:SI 0 "register_operand" "") (ne:SI (match_dup 3) (const_int 0))) - (clobber (reg:CC 0))])] - "" + (clobber (reg:CC 100))])] + "! TARGET_LIVE_G0" "{ operands[3] = gen_reg_rtx (SImode); }") (define_expand "snedi_special" [(set (match_dup 3) (xor:DI (match_operand:DI 1 "register_operand" "") (match_operand:DI 2 "register_operand" ""))) - (parallel [(set (match_operand:DI 0 "register_operand" "") - (ne:DI (match_dup 3) (const_int 0))) - (clobber (reg:CCX 0))])] + (set (match_operand:DI 0 "register_operand" "") + (ne:DI (match_dup 3) (const_int 0)))] "TARGET_ARCH64" "{ operands[3] = gen_reg_rtx (DImode); }") @@ -378,9 +539,8 @@ [(set (match_dup 3) (xor:DI (match_operand:DI 1 "register_operand" "") (match_operand:DI 2 "register_operand" ""))) - (parallel [(set (match_operand:SI 0 "register_operand" "") - (eq:DI (match_dup 3) (const_int 0))) - (clobber (reg:CCX 0))])] + (set (match_operand:SI 0 "register_operand" "") + (eq:DI (match_dup 3) (const_int 0)))] "TARGET_ARCH64" "{ operands[3] = gen_reg_rtx (DImode); }") @@ -388,9 +548,8 @@ [(set (match_dup 3) (xor:DI (match_operand:DI 1 "register_operand" "") (match_operand:DI 2 "register_operand" ""))) - (parallel [(set (match_operand:SI 0 "register_operand" "") - (ne:DI (match_dup 3) (const_int 0))) - (clobber (reg:CCX 0))])] + (set (match_operand:SI 0 "register_operand" "") + (ne:DI (match_dup 3) (const_int 0)))] "TARGET_ARCH64" "{ operands[3] = gen_reg_rtx (DImode); }") @@ -400,7 +559,7 @@ (match_operand:SI 2 "register_operand" ""))) (parallel [(set (match_operand:DI 0 "register_operand" "") (eq:SI (match_dup 3) (const_int 0))) - (clobber (reg:CC 0))])] + (clobber (reg:CC 100))])] "TARGET_ARCH64" "{ operands[3] = gen_reg_rtx (SImode); }") @@ -410,7 +569,7 @@ (match_operand:SI 2 "register_operand" ""))) (parallel [(set (match_operand:DI 0 "register_operand" "") (ne:SI (match_dup 3) (const_int 0))) - (clobber (reg:CC 0))])] + (clobber (reg:CC 100))])] "TARGET_ARCH64" "{ operands[3] = gen_reg_rtx (SImode); }") @@ -419,7 +578,7 @@ (define_expand "seq" [(set (match_operand:SI 0 "intreg_operand" "") (eq:SI (match_dup 1) (const_int 0)))] - "" + "! TARGET_LIVE_G0" " { if (GET_MODE (sparc_compare_op0) == SImode) @@ -472,7 +631,7 @@ (define_expand "sne" [(set (match_operand:SI 0 "intreg_operand" "") (ne:SI (match_dup 1) (const_int 0)))] - "" + "! TARGET_LIVE_G0" " { if (GET_MODE (sparc_compare_op0) == SImode) @@ -523,7 +682,7 @@ (define_expand "sgt" [(set (match_operand:SI 0 "intreg_operand" "") (gt:SI (match_dup 1) (const_int 0)))] - "" + "! TARGET_LIVE_G0" " { if (GET_MODE (sparc_compare_op0) == TFmode && ! TARGET_HARD_QUAD) @@ -544,7 +703,7 @@ (define_expand "slt" [(set (match_operand:SI 0 "intreg_operand" "") (lt:SI (match_dup 1) (const_int 0)))] - "" + "! TARGET_LIVE_G0" " { if (GET_MODE (sparc_compare_op0) == TFmode && ! TARGET_HARD_QUAD) @@ -565,7 +724,7 @@ (define_expand "sge" [(set (match_operand:SI 0 "intreg_operand" "") (ge:SI (match_dup 1) (const_int 0)))] - "" + "! TARGET_LIVE_G0" " { if (GET_MODE (sparc_compare_op0) == TFmode && ! TARGET_HARD_QUAD) @@ -586,7 +745,7 @@ (define_expand "sle" [(set (match_operand:SI 0 "intreg_operand" "") (le:SI (match_dup 1) (const_int 0)))] - "" + "! TARGET_LIVE_G0" " { if (GET_MODE (sparc_compare_op0) == TFmode && ! TARGET_HARD_QUAD) @@ -607,7 +766,7 @@ (define_expand "sgtu" [(set (match_operand:SI 0 "intreg_operand" "") (gtu:SI (match_dup 1) (const_int 0)))] - "" + "! TARGET_LIVE_G0" " { if (! TARGET_V9) @@ -639,7 +798,7 @@ (define_expand "sltu" [(set (match_operand:SI 0 "intreg_operand" "") (ltu:SI (match_dup 1) (const_int 0)))] - "" + "! TARGET_LIVE_G0" " { if (TARGET_V9) @@ -653,7 +812,7 @@ (define_expand "sgeu" [(set (match_operand:SI 0 "intreg_operand" "") (geu:SI (match_dup 1) (const_int 0)))] - "" + "! TARGET_LIVE_G0" " { if (TARGET_V9) @@ -667,7 +826,7 @@ (define_expand "sleu" [(set (match_operand:SI 0 "intreg_operand" "") (leu:SI (match_dup 1) (const_int 0)))] - "" + "! TARGET_LIVE_G0" " { if (! TARGET_V9) @@ -696,119 +855,7 @@ operands[1] = gen_compare_reg (LEU, sparc_compare_op0, sparc_compare_op1); }") -;; Now the DEFINE_INSNs for the compare and scc cases. First the compares. - -(define_insn "*cmpsi_insn" - [(set (reg:CC 0) - (compare:CC (match_operand:SI 0 "register_operand" "r") - (match_operand:SI 1 "arith_operand" "rI")))] - "" - "cmp %r0,%1" - [(set_attr "type" "compare")]) - -(define_insn "*cmpsf_fpe_sp32" - [(set (reg:CCFPE 0) - (compare:CCFPE (match_operand:SF 0 "register_operand" "f") - (match_operand:SF 1 "register_operand" "f")))] - "! TARGET_V9 && TARGET_FPU" - "fcmpes %0,%1" - [(set_attr "type" "fpcmp")]) - -(define_insn "*cmpdf_fpe_sp32" - [(set (reg:CCFPE 0) - (compare:CCFPE (match_operand:DF 0 "register_operand" "e") - (match_operand:DF 1 "register_operand" "e")))] - "! TARGET_V9 && TARGET_FPU" - "fcmped %0,%1" - [(set_attr "type" "fpcmp")]) - -(define_insn "*cmptf_fpe_sp32" - [(set (reg:CCFPE 0) - (compare:CCFPE (match_operand:TF 0 "register_operand" "e") - (match_operand:TF 1 "register_operand" "e")))] - "! TARGET_V9 && TARGET_FPU && TARGET_HARD_QUAD" - "fcmpeq %0,%1" - [(set_attr "type" "fpcmp")]) - -(define_insn "*cmpsf_fp_sp32" - [(set (reg:CCFP 0) - (compare:CCFP (match_operand:SF 0 "register_operand" "f") - (match_operand:SF 1 "register_operand" "f")))] - "! TARGET_V9 && TARGET_FPU" - "fcmps %0,%1" - [(set_attr "type" "fpcmp")]) - -(define_insn "*cmpdf_fp_sp32" - [(set (reg:CCFP 0) - (compare:CCFP (match_operand:DF 0 "register_operand" "e") - (match_operand:DF 1 "register_operand" "e")))] - "! TARGET_V9 && TARGET_FPU" - "fcmpd %0,%1" - [(set_attr "type" "fpcmp")]) - -(define_insn "*cmptf_fp_sp32" - [(set (reg:CCFP 0) - (compare:CCFP (match_operand:TF 0 "register_operand" "e") - (match_operand:TF 1 "register_operand" "e")))] - "! TARGET_V9 && TARGET_FPU && TARGET_HARD_QUAD" - "fcmpq %0,%1" - [(set_attr "type" "fpcmp")]) - -(define_insn "*cmpdi_sp64" - [(set (reg:CCX 0) - (compare:CCX (match_operand:DI 0 "register_operand" "r") - (match_operand:DI 1 "arith_double_operand" "rHI")))] - "TARGET_ARCH64" - "cmp %r0,%1" - [(set_attr "type" "compare")]) - -(define_insn "*cmpsf_fpe_sp64" - [(set (match_operand:CCFPE 0 "ccfp_reg_operand" "=c") - (compare:CCFPE (match_operand:SF 1 "register_operand" "f") - (match_operand:SF 2 "register_operand" "f")))] - "TARGET_V9 && TARGET_FPU" - "fcmpes %0,%1,%2" - [(set_attr "type" "fpcmp")]) - -(define_insn "*cmpdf_fpe_sp64" - [(set (match_operand:CCFPE 0 "ccfp_reg_operand" "=c") - (compare:CCFPE (match_operand:DF 1 "register_operand" "e") - (match_operand:DF 2 "register_operand" "e")))] - "TARGET_V9 && TARGET_FPU" - "fcmped %0,%1,%2" - [(set_attr "type" "fpcmp")]) - -(define_insn "*cmptf_fpe_sp64" - [(set (match_operand:CCFPE 0 "ccfp_reg_operand" "=c") - (compare:CCFPE (match_operand:TF 1 "register_operand" "e") - (match_operand:TF 2 "register_operand" "e")))] - "TARGET_V9 && TARGET_FPU && TARGET_HARD_QUAD" - "fcmpeq %0,%1,%2" - [(set_attr "type" "fpcmp")]) - -(define_insn "*cmpsf_fp_sp64" - [(set (match_operand:CCFP 0 "ccfp_reg_operand" "=c") - (compare:CCFP (match_operand:SF 1 "register_operand" "f") - (match_operand:SF 2 "register_operand" "f")))] - "TARGET_V9 && TARGET_FPU" - "fcmps %0,%1,%2" - [(set_attr "type" "fpcmp")]) - -(define_insn "*cmpdf_fp_sp64" - [(set (match_operand:CCFP 0 "ccfp_reg_operand" "=c") - (compare:CCFP (match_operand:DF 1 "register_operand" "e") - (match_operand:DF 2 "register_operand" "e")))] - "TARGET_V9 && TARGET_FPU" - "fcmpd %0,%1,%2" - [(set_attr "type" "fpcmp")]) - -(define_insn "*cmptf_fp_sp64" - [(set (match_operand:CCFP 0 "ccfp_reg_operand" "=c") - (compare:CCFP (match_operand:TF 1 "register_operand" "e") - (match_operand:TF 2 "register_operand" "e")))] - "TARGET_V9 && TARGET_FPU && TARGET_HARD_QUAD" - "fcmpq %0,%1,%2" - [(set_attr "type" "fpcmp")]) +;; Now the DEFINE_INSNs for the scc cases. ;; The SEQ and SNE patterns are special because they can be done ;; without any branching and do not involve a COMPARE. @@ -817,8 +864,8 @@ [(set (match_operand:SI 0 "register_operand" "=r") (ne:SI (match_operand:SI 1 "register_operand" "r") (const_int 0))) - (clobber (reg:CC 0))] - "" + (clobber (reg:CC 100))] + "! TARGET_LIVE_G0" "subcc %%g0,%1,%%g0\;addx %%g0,0,%0" [(set_attr "type" "unary") (set_attr "length" "2")]) @@ -827,8 +874,8 @@ [(set (match_operand:SI 0 "register_operand" "=r") (neg:SI (ne:SI (match_operand:SI 1 "register_operand" "r") (const_int 0)))) - (clobber (reg:CC 0))] - "" + (clobber (reg:CC 100))] + "! TARGET_LIVE_G0" "subcc %%g0,%1,%%g0\;subx %%g0,0,%0" [(set_attr "type" "unary") (set_attr "length" "2")]) @@ -837,47 +884,34 @@ [(set (match_operand:DI 0 "register_operand" "=r") (ne:SI (match_operand:SI 1 "register_operand" "r") (const_int 0))) - (clobber (reg:CC 0))] + (clobber (reg:CC 100))] "TARGET_ARCH64" "subcc %%g0,%1,%%g0\;addx %%g0,0,%0" [(set_attr "type" "unary") (set_attr "length" "2")]) (define_insn "*snedi_zero" - [(set (match_operand:DI 0 "register_operand" "=r") + [(set (match_operand:DI 0 "register_operand" "=&r") (ne:DI (match_operand:DI 1 "register_operand" "r") - (const_int 0))) - (clobber (reg:CCX 0))] + (const_int 0)))] "TARGET_ARCH64" "mov 0,%0\;movrnz %1,1,%0" [(set_attr "type" "unary") (set_attr "length" "2")]) (define_insn "*neg_snedi_zero" - [(set (match_operand:DI 0 "register_operand" "=r") + [(set (match_operand:DI 0 "register_operand" "=&r") (neg:DI (ne:DI (match_operand:DI 1 "register_operand" "r") - (const_int 0)))) - (clobber (reg:CCX 0))] + (const_int 0))))] "TARGET_ARCH64" "mov 0,%0\;movrnz %1,-1,%0" [(set_attr "type" "unary") (set_attr "length" "2")]) -(define_insn "*snedi_zero_trunc_sp32" - [(set (match_operand:SI 0 "register_operand" "=r") - (ne:DI (match_operand:DI 1 "register_operand" "r") - (const_int 0))) - (clobber (reg:CCX 0))] - "! TARGET_ARCH64" - "xor %1,%R1,%0\;subcc %%g0,%0,%%g0\;addx %%g0,0,%0" - [(set_attr "type" "unary") - (set_attr "length" "3")]) - -(define_insn "*snedi_zero_trunc_sp64" - [(set (match_operand:SI 0 "register_operand" "=r") +(define_insn "*snedi_zero_trunc" + [(set (match_operand:SI 0 "register_operand" "=&r") (ne:DI (match_operand:DI 1 "register_operand" "r") - (const_int 0))) - (clobber (reg:CCX 0))] + (const_int 0)))] "TARGET_ARCH64" "mov 0,%0\;movrnz %1,1,%0" [(set_attr "type" "unary") @@ -887,8 +921,8 @@ [(set (match_operand:SI 0 "register_operand" "=r") (eq:SI (match_operand:SI 1 "register_operand" "r") (const_int 0))) - (clobber (reg:CC 0))] - "" + (clobber (reg:CC 100))] + "! TARGET_LIVE_G0" "subcc %%g0,%1,%%g0\;subx %%g0,-1,%0" [(set_attr "type" "unary") (set_attr "length" "2")]) @@ -897,8 +931,8 @@ [(set (match_operand:SI 0 "register_operand" "=r") (neg:SI (eq:SI (match_operand:SI 1 "register_operand" "r") (const_int 0)))) - (clobber (reg:CC 0))] - "" + (clobber (reg:CC 100))] + "! TARGET_LIVE_G0" "subcc %%g0,%1,%%g0\;addx %%g0,-1,%0" [(set_attr "type" "unary") (set_attr "length" "2")]) @@ -907,47 +941,34 @@ [(set (match_operand:DI 0 "register_operand" "=r") (eq:SI (match_operand:SI 1 "register_operand" "r") (const_int 0))) - (clobber (reg:CC 0))] + (clobber (reg:CC 100))] "TARGET_ARCH64" "subcc %%g0,%1,%%g0\;subx %%g0,-1,%0" [(set_attr "type" "unary") (set_attr "length" "2")]) (define_insn "*seqdi_zero" - [(set (match_operand:DI 0 "register_operand" "=r") + [(set (match_operand:DI 0 "register_operand" "=&r") (eq:DI (match_operand:DI 1 "register_operand" "r") - (const_int 0))) - (clobber (reg:CCX 0))] + (const_int 0)))] "TARGET_ARCH64" "mov 0,%0\;movrz %1,1,%0" [(set_attr "type" "unary") (set_attr "length" "2")]) (define_insn "*neg_seqdi_zero" - [(set (match_operand:DI 0 "register_operand" "=r") + [(set (match_operand:DI 0 "register_operand" "=&r") (neg:DI (eq:DI (match_operand:DI 1 "register_operand" "r") - (const_int 0)))) - (clobber (reg:CCX 0))] + (const_int 0))))] "TARGET_ARCH64" "mov 0,%0\;movrz %1,-1,%0" [(set_attr "type" "unary") (set_attr "length" "2")]) -(define_insn "*seqdi_zero_trunc_sp32" - [(set (match_operand:SI 0 "register_operand" "=r") - (eq:DI (match_operand:DI 1 "register_operand" "r") - (const_int 0))) - (clobber (reg:CCX 0))] - "! TARGET_ARCH64" - "xor %1,%R1,%0\;subcc %%g0,%0,%%g0\;subx %%g0,-1,%0" - [(set_attr "type" "unary") - (set_attr "length" "3")]) - -(define_insn "*seqdi_zero_trunc_sp64" - [(set (match_operand:SI 0 "register_operand" "=r") +(define_insn "*seqdi_zero_trunc" + [(set (match_operand:SI 0 "register_operand" "=&r") (eq:DI (match_operand:DI 1 "register_operand" "r") - (const_int 0))) - (clobber (reg:CCX 0))] + (const_int 0)))] "TARGET_ARCH64" "mov 0,%0\;movrz %1,1,%0" [(set_attr "type" "unary") @@ -962,8 +983,8 @@ (plus:SI (ne:SI (match_operand:SI 1 "register_operand" "r") (const_int 0)) (match_operand:SI 2 "register_operand" "r"))) - (clobber (reg:CC 0))] - "" + (clobber (reg:CC 100))] + "! TARGET_LIVE_G0" "subcc %%g0,%1,%%g0\;addx %2,0,%0" [(set_attr "length" "2")]) @@ -972,8 +993,8 @@ (minus:SI (match_operand:SI 2 "register_operand" "r") (ne:SI (match_operand:SI 1 "register_operand" "r") (const_int 0)))) - (clobber (reg:CC 0))] - "" + (clobber (reg:CC 100))] + "! TARGET_LIVE_G0" "subcc %%g0,%1,%%g0\;subx %2,0,%0" [(set_attr "length" "2")]) @@ -982,8 +1003,8 @@ (plus:SI (eq:SI (match_operand:SI 1 "register_operand" "r") (const_int 0)) (match_operand:SI 2 "register_operand" "r"))) - (clobber (reg:CC 0))] - "" + (clobber (reg:CC 100))] + "! TARGET_LIVE_G0" "subcc %%g0,%1,%%g0\;subx %2,-1,%0" [(set_attr "length" "2")]) @@ -992,8 +1013,8 @@ (minus:SI (match_operand:SI 2 "register_operand" "r") (eq:SI (match_operand:SI 1 "register_operand" "r") (const_int 0)))) - (clobber (reg:CC 0))] - "" + (clobber (reg:CC 100))] + "! TARGET_LIVE_G0" "subcc %%g0,%1,%%g0\;addx %2,-1,%0" [(set_attr "length" "2")]) @@ -1003,46 +1024,46 @@ (define_insn "*sltu_insn" [(set (match_operand:SI 0 "register_operand" "=r") - (ltu:SI (reg:CC 0) (const_int 0)))] - "" + (ltu:SI (reg:CC 100) (const_int 0)))] + "! TARGET_LIVE_G0" "addx %%g0,0,%0" [(set_attr "type" "misc")]) (define_insn "*neg_sltu_insn" [(set (match_operand:SI 0 "register_operand" "=r") - (neg:SI (ltu:SI (reg:CC 0) (const_int 0))))] - "" + (neg:SI (ltu:SI (reg:CC 100) (const_int 0))))] + "! TARGET_LIVE_G0" "subx %%g0,0,%0" [(set_attr "type" "misc")]) ;; ??? Combine should canonicalize these next two to the same pattern. (define_insn "*neg_sltu_minus_x" [(set (match_operand:SI 0 "register_operand" "=r") - (minus:SI (neg:SI (ltu:SI (reg:CC 0) (const_int 0))) + (minus:SI (neg:SI (ltu:SI (reg:CC 100) (const_int 0))) (match_operand:SI 1 "arith_operand" "rI")))] - "" + "! TARGET_LIVE_G0" "subx %%g0,%1,%0" [(set_attr "type" "unary")]) (define_insn "*neg_sltu_plus_x" [(set (match_operand:SI 0 "register_operand" "=r") - (neg:SI (plus:SI (ltu:SI (reg:CC 0) (const_int 0)) + (neg:SI (plus:SI (ltu:SI (reg:CC 100) (const_int 0)) (match_operand:SI 1 "arith_operand" "rI"))))] - "" + "! TARGET_LIVE_G0" "subx %%g0,%1,%0" [(set_attr "type" "unary")]) (define_insn "*sgeu_insn" [(set (match_operand:SI 0 "register_operand" "=r") - (geu:SI (reg:CC 0) (const_int 0)))] - "" + (geu:SI (reg:CC 100) (const_int 0)))] + "! TARGET_LIVE_G0" "subx %%g0,-1,%0" [(set_attr "type" "misc")]) (define_insn "*neg_sgeu_insn" [(set (match_operand:SI 0 "register_operand" "=r") - (neg:SI (geu:SI (reg:CC 0) (const_int 0))))] - "" + (neg:SI (geu:SI (reg:CC 100) (const_int 0))))] + "! TARGET_LIVE_G0" "addx %%g0,-1,%0" [(set_attr "type" "misc")]) @@ -1052,15 +1073,15 @@ (define_insn "*sltu_plus_x" [(set (match_operand:SI 0 "register_operand" "=r") - (plus:SI (ltu:SI (reg:CC 0) (const_int 0)) + (plus:SI (ltu:SI (reg:CC 100) (const_int 0)) (match_operand:SI 1 "arith_operand" "rI")))] - "" + "! TARGET_LIVE_G0" "addx %%g0,%1,%0" [(set_attr "type" "unary")]) (define_insn "*sltu_plus_x_plus_y" [(set (match_operand:SI 0 "register_operand" "=r") - (plus:SI (ltu:SI (reg:CC 0) (const_int 0)) + (plus:SI (ltu:SI (reg:CC 100) (const_int 0)) (plus:SI (match_operand:SI 1 "arith_operand" "%r") (match_operand:SI 2 "arith_operand" "rI"))))] "" @@ -1069,7 +1090,7 @@ (define_insn "*x_minus_sltu" [(set (match_operand:SI 0 "register_operand" "=r") (minus:SI (match_operand:SI 1 "register_operand" "r") - (ltu:SI (reg:CC 0) (const_int 0))))] + (ltu:SI (reg:CC 100) (const_int 0))))] "" "subx %1,0,%0" [(set_attr "type" "unary")]) @@ -1079,21 +1100,21 @@ [(set (match_operand:SI 0 "register_operand" "=r") (minus:SI (minus:SI (match_operand:SI 1 "register_operand" "r") (match_operand:SI 2 "arith_operand" "rI")) - (ltu:SI (reg:CC 0) (const_int 0))))] + (ltu:SI (reg:CC 100) (const_int 0))))] "" "subx %1,%2,%0") (define_insn "*x_minus_sltu_plus_y" [(set (match_operand:SI 0 "register_operand" "=r") (minus:SI (match_operand:SI 1 "register_operand" "r") - (plus:SI (ltu:SI (reg:CC 0) (const_int 0)) + (plus:SI (ltu:SI (reg:CC 100) (const_int 0)) (match_operand:SI 2 "arith_operand" "rI"))))] "" "subx %1,%2,%0") (define_insn "*sgeu_plus_x" [(set (match_operand:SI 0 "register_operand" "=r") - (plus:SI (geu:SI (reg:CC 0) (const_int 0)) + (plus:SI (geu:SI (reg:CC 100) (const_int 0)) (match_operand:SI 1 "register_operand" "r")))] "" "subx %1,-1,%0" @@ -1102,7 +1123,7 @@ (define_insn "*x_minus_sgeu" [(set (match_operand:SI 0 "register_operand" "=r") (minus:SI (match_operand:SI 1 "register_operand" "r") - (geu:SI (reg:CC 0) (const_int 0))))] + (geu:SI (reg:CC 100) (const_int 0))))] "" "addx %1,-1,%0" [(set_attr "type" "unary")]) @@ -1115,7 +1136,9 @@ (define_insn "*scc_si" [(set (match_operand:SI 0 "register_operand" "=r") - (match_operator:SI 1 "noov_compare_op" [(reg 0) (const_int 0)]))] + (match_operator:SI 2 "noov_compare_op" + [(match_operand 1 "icc_or_fcc_reg_operand" "") + (const_int 0)]))] "" "* return output_scc_insn (operands, insn); " [(set_attr "type" "multi") @@ -1123,7 +1146,9 @@ (define_insn "*scc_di" [(set (match_operand:DI 0 "register_operand" "=r") - (match_operator:DI 1 "noov_compare_op" [(reg 0) (const_int 0)]))] + (match_operator:DI 2 "noov_compare_op" + [(match_operand 1 "icc_or_fcc_reg_operand" "") + (const_int 0)]))] "TARGET_ARCH64" "* return output_scc_insn (operands, insn); " [(set_attr "type" "multi") @@ -1324,13 +1349,13 @@ (define_insn "*normal_branch" [(set (pc) (if_then_else (match_operator 0 "noov_compare_op" - [(reg 0) (const_int 0)]) + [(reg 100) (const_int 0)]) (label_ref (match_operand 1 "" "")) (pc)))] "" "* { - return output_cbranch (operands[0], 0, 1, 0, + return output_cbranch (operands[0], 1, 0, final_sequence && INSN_ANNULLED_BRANCH_P (insn), ! final_sequence); }" @@ -1339,77 +1364,77 @@ (define_insn "*inverted_branch" [(set (pc) (if_then_else (match_operator 0 "noov_compare_op" - [(reg 0) (const_int 0)]) + [(reg 100) (const_int 0)]) (pc) (label_ref (match_operand 1 "" ""))))] "" "* { - return output_cbranch (operands[0], 0, 1, 1, + return output_cbranch (operands[0], 1, 1, final_sequence && INSN_ANNULLED_BRANCH_P (insn), ! final_sequence); }" [(set_attr "type" "branch")]) -(define_insn "*normal_fp_branch_sp64" +(define_insn "*normal_fp_branch" [(set (pc) - (if_then_else (match_operator 0 "comparison_operator" - [(match_operand:CCFP 1 "ccfp_reg_operand" "c") + (if_then_else (match_operator 1 "comparison_operator" + [(match_operand:CCFP 0 "fcc_reg_operand" "c") (const_int 0)]) (label_ref (match_operand 2 "" "")) (pc)))] - "TARGET_V9" + "" "* { - return output_cbranch (operands[0], operands[1], 2, 0, + return output_cbranch (operands[1], 2, 0, final_sequence && INSN_ANNULLED_BRANCH_P (insn), ! final_sequence); }" [(set_attr "type" "branch")]) -(define_insn "*inverted_fp_branch_sp64" +(define_insn "*inverted_fp_branch" [(set (pc) - (if_then_else (match_operator 0 "comparison_operator" - [(match_operand:CCFP 1 "ccfp_reg_operand" "c") + (if_then_else (match_operator 1 "comparison_operator" + [(match_operand:CCFP 0 "fcc_reg_operand" "c") (const_int 0)]) (pc) (label_ref (match_operand 2 "" ""))))] - "TARGET_V9" + "" "* { - return output_cbranch (operands[0], operands[1], 2, 1, + return output_cbranch (operands[1], 2, 1, final_sequence && INSN_ANNULLED_BRANCH_P (insn), ! final_sequence); }" [(set_attr "type" "branch")]) -(define_insn "*normal_fpe_branch_sp64" +(define_insn "*normal_fpe_branch" [(set (pc) - (if_then_else (match_operator 0 "comparison_operator" - [(match_operand:CCFPE 1 "ccfp_reg_operand" "c") + (if_then_else (match_operator 1 "comparison_operator" + [(match_operand:CCFPE 0 "fcc_reg_operand" "c") (const_int 0)]) (label_ref (match_operand 2 "" "")) (pc)))] - "TARGET_V9" + "" "* { - return output_cbranch (operands[0], operands[1], 2, 0, + return output_cbranch (operands[1], 2, 0, final_sequence && INSN_ANNULLED_BRANCH_P (insn), ! final_sequence); }" [(set_attr "type" "branch")]) -(define_insn "*inverted_fpe_branch_sp64" +(define_insn "*inverted_fpe_branch" [(set (pc) - (if_then_else (match_operator 0 "comparison_operator" - [(match_operand:CCFPE 1 "ccfp_reg_operand" "c") + (if_then_else (match_operator 1 "comparison_operator" + [(match_operand:CCFPE 0 "fcc_reg_operand" "c") (const_int 0)]) (pc) (label_ref (match_operand 2 "" ""))))] - "TARGET_V9" + "" "* { - return output_cbranch (operands[0], operands[1], 2, 1, + return output_cbranch (operands[1], 2, 1, final_sequence && INSN_ANNULLED_BRANCH_P (insn), ! final_sequence); }" @@ -1492,30 +1517,30 @@ [(set_attr "type" "move") (set_attr "length" "1")]) -(define_insn "*sethi_si" - [(set (match_operand:SI 0 "register_operand" "=r") - (high:SI (match_operand 1 "" "")))] - "check_pic (1)" - "sethi %%hi(%a1),%0" - [(set_attr "type" "move") - (set_attr "length" "1")]) +(define_insn "pic_lo_sum_di" + [(set (match_operand:DI 0 "register_operand" "=r") + (lo_sum:SI (match_operand:DI 1 "register_operand" "r") + (unspec:SI [(match_operand:DI 2 "immediate_operand" "in")] 0)))] + "TARGET_ARCH64 && flag_pic" + "add %1,%%lo(%a2),%0" + [(set_attr "length" "1")]) -(define_insn "*sethi_hi" - [(set (match_operand:HI 0 "register_operand" "=r") - (high:HI (match_operand 1 "" "")))] - "check_pic (1)" +(define_insn "pic_sethi_di" + [(set (match_operand:DI 0 "register_operand" "=r") + (high:SI (unspec:SI [(match_operand 1 "" "")] 0)))] + "TARGET_ARCH64 && flag_pic && check_pic (1)" "sethi %%hi(%a1),%0" [(set_attr "type" "move") (set_attr "length" "1")]) -(define_insn "get_pc_sp32" +(define_insn "get_pc_via_call" [(set (pc) (label_ref (match_operand 0 "" ""))) - (set (reg:SI 15) (label_ref (match_dup 0)))] - "! TARGET_PTR64" + (set (reg:SI 15) (label_ref (match_operand 1 "" "")))] + "" "call %l0%#" [(set_attr "type" "uncond_branch")]) -(define_insn "get_pc_sp64" +(define_insn "get_pc_via_rdpc" [(set (match_operand:DI 0 "register_operand" "=r") (pc))] "TARGET_PTR64" "rd %%pc,%0" @@ -1527,7 +1552,9 @@ (define_insn "move_pic_label_si" [(set (match_operand:SI 0 "register_operand" "=r") - (label_ref:SI (match_operand 1 "" ""))) + ; This was previously (label_ref:SI (match_operand 1 "" "")) but that + ; loses the volatil and other flags of the original label_ref. + (match_operand:SI 1 "label_ref_operand" "")) (set (reg:SI 15) (pc))] "flag_pic" "* @@ -1538,9 +1565,9 @@ return \"\\n1:\;call 2f\;sethi %%hi(%l1-1b),%0\\n2:\\tor %0,%%lo(%l1-1b),%0\;add %0,%%o7,%0\"; }" [(set_attr "type" "multi") - ; 1024 = 4096 bytes / 4 bytes/insn + ; 960 = 4096 bytes / 4 bytes/insn - 64 (for not always perfect length calcs) (set (attr "length") (if_then_else (ltu (minus (match_dup 1) (pc)) - (const_int 1024)) + (const_int 960)) (const_int 2) (const_int 4)))]) @@ -1555,7 +1582,9 @@ (define_insn "move_label_di" [(set (match_operand:DI 0 "register_operand" "=r") - (label_ref:DI (match_operand 1 "" ""))) + ; This was previously (label_ref:DI (match_operand 1 "" "")) but that + ; loses the volatil and other flags of the original label_ref. + (match_operand:DI 1 "label_ref_operand" "")) (set (reg:DI 15) (pc))] "TARGET_ARCH64" "* @@ -1566,12 +1595,30 @@ return \"\\n1:\;rd %%pc,%%o7\;sethi %%hi(%l1-1b),%0\;add %0,%%lo(%l1-1b),%0\;sra %0,0,%0\;add %0,%%o7,%0\"; }" [(set_attr "type" "multi") - ; 1024 = 4096 bytes / 4 bytes/insn + ; 960 = 4096 bytes / 4 bytes/insn - 64 (for not always perfect length calcs) (set (attr "length") (if_then_else (ltu (minus (match_dup 1) (pc)) - (const_int 1024)) + (const_int 960)) (const_int 2) (const_int 5)))]) +(define_insn "*sethi_hi" + [(set (match_operand:HI 0 "register_operand" "=r") + (high:HI (match_operand 1 "" "")))] + "check_pic (1)" + "sethi %%hi(%a1),%0" + [(set_attr "type" "move") + (set_attr "length" "1")]) + +;; This must appear after the PIC sethi so that the PIC unspec will not +;; be matched as part of the operand. +(define_insn "*sethi_si" + [(set (match_operand:SI 0 "register_operand" "=r") + (high:SI (match_operand 1 "" "")))] + "check_pic (1)" + "sethi %%hi(%a1),%0" + [(set_attr "type" "move") + (set_attr "length" "1")]) + (define_insn "*lo_sum_di_sp32" [(set (match_operand:DI 0 "register_operand" "=r") (lo_sum:DI (match_operand:DI 1 "register_operand" "0") @@ -1582,8 +1629,13 @@ /* Don't output a 64 bit constant, since we can't trust the assembler to handle it correctly. */ if (GET_CODE (operands[2]) == CONST_DOUBLE) - operands[2] = gen_rtx (CONST_INT, VOIDmode, CONST_DOUBLE_LOW (operands[2])); - return \"or %R1,%%lo(%a2),%R0\"; + operands[2] = GEN_INT (CONST_DOUBLE_LOW (operands[2])); + else if (GET_CODE (operands[2]) == CONST_INT + && HOST_BITS_PER_WIDE_INT > 32 + && INTVAL (operands[2]) > 0xffffffff) + operands[2] = GEN_INT (INTVAL (operands[2]) & 0xffffffff); + + return \"or %L1,%%lo(%a2),%L0\"; }" ;; Need to set length for this arith insn because operand2 ;; is not an "arith_operand". @@ -1601,7 +1653,12 @@ /* Don't output a 64 bit constant, since we can't trust the assembler to handle it correctly. */ if (GET_CODE (operands[2]) == CONST_DOUBLE) - operands[2] = gen_rtx (CONST_INT, VOIDmode, CONST_DOUBLE_LOW (operands[2])); + operands[2] = GEN_INT (CONST_DOUBLE_LOW (operands[2])); + else if (GET_CODE (operands[2]) == CONST_INT + && HOST_BITS_PER_WIDE_INT > 32 + && INTVAL (operands[2]) > 0xffffffff) + operands[2] = GEN_INT (INTVAL (operands[2]) & 0xffffffff); + /* Note that we use add here. This is important because Medium/Anywhere code model support depends on it. */ return \"add %1,%%lo(%a2),%0\"; @@ -1633,11 +1690,11 @@ else if (GET_CODE (op1) == CONST_DOUBLE) { operands[0] = operand_subword (op0, 1, 0, DImode); - operands[1] = gen_rtx (CONST_INT, VOIDmode, CONST_DOUBLE_LOW (op1)); + operands[1] = GEN_INT (CONST_DOUBLE_LOW (op1)); output_asm_insn (\"sethi %%hi(%a1),%0\", operands); operands[0] = operand_subword (op0, 0, 0, DImode); - operands[1] = gen_rtx (CONST_INT, VOIDmode, CONST_DOUBLE_HIGH (op1)); + operands[1] = GEN_INT (CONST_DOUBLE_HIGH (op1)); return singlemove_string (operands); } else @@ -1671,6 +1728,7 @@ "TARGET_ARCH64 && check_pic (1)" "* { +#if HOST_BITS_PER_WIDE_INT == 32 rtx high, low; split_double (operands[1], &high, &low); @@ -1690,6 +1748,26 @@ if (low != const0_rtx) output_asm_insn (\"sethi %%hi(%a1),%%g1; or %0,%%g1,%0\", operands); } +#else + rtx op = operands[1]; + + if (! SPARC_SETHI_P (INTVAL(op))) + { + operands[1] = GEN_INT (INTVAL (op) >> 32); + output_asm_insn (singlemove_string (operands), operands); + + output_asm_insn (\"sllx %0,32,%0\", operands); + if (INTVAL (op) & 0xffffffff) + { + operands[1] = GEN_INT (INTVAL (op) & 0xffffffff); + output_asm_insn (\"sethi %%hi(%a1),%%g1; or %0,%%g1,%0\", operands); + } + } + else + { + output_asm_insn (\"sethi %%hi(%a1),%0\", operands); + } +#endif return \"\"; }" @@ -1773,16 +1851,33 @@ (define_insn "*movqi_insn" [(set (match_operand:QI 0 "reg_or_nonsymb_mem_operand" "=r,r,r,Q") (match_operand:QI 1 "move_operand" "rI,K,Q,rJ"))] - "register_operand (operands[0], QImode) - || register_operand (operands[1], QImode) - || operands[1] == const0_rtx" + "! TARGET_LIVE_G0 + && (register_operand (operands[0], QImode) + || register_operand (operands[1], QImode) + || operands[1] == const0_rtx)" "@ mov %1,%0 sethi %%hi(%a1),%0 ldub %1,%0 stb %r1,%0" [(set_attr "type" "move,move,load,store") - (set_attr "length" "*,1,*,1")]) + (set_attr "length" "1")]) + +(define_insn "*movqi_insn_liveg0" + [(set (match_operand:QI 0 "reg_or_nonsymb_mem_operand" "=r,r,r,r,r,Q") + (match_operand:QI 1 "move_operand" "r,J,I,K,Q,r"))] + "TARGET_LIVE_G0 + && (register_operand (operands[0], QImode) + || register_operand (operands[1], QImode))" + "@ + mov %1,%0 + and %0,0,%0 + and %0,0,%0\;or %0,%1,%0 + sethi %%hi(%a1),%0 + ldub %1,%0 + stb %1,%0" + [(set_attr "type" "move,move,move,move,load,store") + (set_attr "length" "1,1,2,1,1,1")]) (define_insn "*lo_sum_qi" [(set (match_operand:QI 0 "register_operand" "=r") @@ -1796,7 +1891,8 @@ [(set (mem:QI (match_operand:SI 0 "symbolic_operand" "")) (match_operand:QI 1 "reg_or_0_operand" "rJ")) (clobber (match_scratch:SI 2 "=&r"))] - "(reload_completed || reload_in_progress) && ! TARGET_PTR64" + "(reload_completed || reload_in_progress) + && ! TARGET_PTR64" "sethi %%hi(%a0),%2\;stb %r1,[%2+%%lo(%a0)]" [(set_attr "type" "store") (set_attr "length" "2")]) @@ -1814,16 +1910,33 @@ (define_insn "*movhi_insn" [(set (match_operand:HI 0 "reg_or_nonsymb_mem_operand" "=r,r,r,Q") (match_operand:HI 1 "move_operand" "rI,K,Q,rJ"))] - "register_operand (operands[0], HImode) - || register_operand (operands[1], HImode) - || operands[1] == const0_rtx" + "! TARGET_LIVE_G0 + && (register_operand (operands[0], HImode) + || register_operand (operands[1], HImode) + || operands[1] == const0_rtx)" "@ mov %1,%0 sethi %%hi(%a1),%0 lduh %1,%0 sth %r1,%0" [(set_attr "type" "move,move,load,store") - (set_attr "length" "*,1,*,1")]) + (set_attr "length" "1")]) + +(define_insn "*movhi_insn_liveg0" + [(set (match_operand:HI 0 "reg_or_nonsymb_mem_operand" "=r,r,r,r,r,Q") + (match_operand:HI 1 "move_operand" "r,J,I,K,Q,r"))] + "TARGET_LIVE_G0 + && (register_operand (operands[0], HImode) + || register_operand (operands[1], HImode))" + "@ + mov %1,%0 + and %0,0,%0 + and %0,0,%0\;or %0,%1,%0 + sethi %%hi(%a1),%0 + lduh %1,%0 + sth %1,%0" + [(set_attr "type" "move,move,move,move,load,store") + (set_attr "length" "1,1,2,1,1,1")]) (define_insn "*lo_sum_hi" [(set (match_operand:HI 0 "register_operand" "=r") @@ -1837,7 +1950,8 @@ [(set (mem:HI (match_operand:SI 0 "symbolic_operand" "")) (match_operand:HI 1 "reg_or_0_operand" "rJ")) (clobber (match_scratch:SI 2 "=&r"))] - "(reload_completed || reload_in_progress) && ! TARGET_PTR64" + "(reload_completed || reload_in_progress) + && ! TARGET_PTR64" "sethi %%hi(%a0),%2\;sth %r1,[%2+%%lo(%a0)]" [(set_attr "type" "store") (set_attr "length" "2")]) @@ -1863,9 +1977,10 @@ (define_insn "*movsi_insn" [(set (match_operand:SI 0 "reg_or_nonsymb_mem_operand" "=r,f,r,r,f,Q,Q") (match_operand:SI 1 "move_operand" "rI,!f,K,Q,!Q,rJ,!f"))] - "register_operand (operands[0], SImode) - || register_operand (operands[1], SImode) - || operands[1] == const0_rtx" + "! TARGET_LIVE_G0 + && (register_operand (operands[0], SImode) + || register_operand (operands[1], SImode) + || operands[1] == const0_rtx)" "@ mov %1,%0 fmovs %1,%0 @@ -1873,15 +1988,35 @@ ld %1,%0 ld %1,%0 st %r1,%0 - st %r1,%0" - [(set_attr "type" "move,fp,move,load,load,store,store") - (set_attr "length" "*,*,1,*,*,*,*")]) + st %1,%0" + [(set_attr "type" "move,fp,move,load,fpload,store,fpstore") + (set_attr "length" "1")]) + +(define_insn "*movsi_insn_liveg0" + [(set (match_operand:SI 0 "reg_or_nonsymb_mem_operand" "=r,r,r,f,r,r,f,Q,Q") + (match_operand:SI 1 "move_operand" "r,J,I,!f,K,Q,!Q,r,!f"))] + "TARGET_LIVE_G0 + && (register_operand (operands[0], SImode) + || register_operand (operands[1], SImode))" + "@ + mov %1,%0 + and %0,0,%0 + and %0,0,%0\;or %0,%1,%0 + fmovs %1,%0 + sethi %%hi(%a1),%0 + ld %1,%0 + ld %1,%0 + st %1,%0 + st %1,%0" + [(set_attr "type" "move,move,move,fp,move,load,fpload,store,fpstore") + (set_attr "length" "1,1,2,1,1,1,1,1,1")]) (define_insn "*store_si" [(set (mem:SI (match_operand:SI 0 "symbolic_operand" "")) (match_operand:SI 1 "reg_or_0_operand" "rJ")) (clobber (match_scratch:SI 2 "=&r"))] - "(reload_completed || reload_in_progress) && ! TARGET_PTR64" + "(reload_completed || reload_in_progress) + && ! TARGET_PTR64" "sethi %%hi(%a0),%2\;st %r1,[%2+%%lo(%a0)]" [(set_attr "type" "store") (set_attr "length" "2")]) @@ -1944,8 +2079,7 @@ return \"sethi %%hi(%a1),%0\"; else { - operands[1] = gen_rtx (CONST_INT, VOIDmode, - ~ INTVAL (operands[1])); + operands[1] = GEN_INT (~INTVAL (operands[1])); output_asm_insn (\"sethi %%hi(%a1),%0\", operands); /* The low 10 bits are already zero, but invert the rest. Assemblers don't accept 0x1c00, so use -0x400 instead. */ @@ -1985,7 +2119,7 @@ ; (clobber (match_dup 0)) ; (clobber (match_dup 1)) ; (clobber (match_scratch:SI 4 "")) -; (clobber (reg:SI 0)) +; (clobber (reg:SI 100)) ; (clobber (reg:SI 1))])] ; "" ; " @@ -2011,7 +2145,7 @@ ; (clobber (match_dup 0)) ; (clobber (match_dup 1)) ; (clobber (match_scratch:SI 4 "=&r")) -; (clobber (reg:SI 0)) +; (clobber (reg:SI 100)) ; (clobber (reg:SI 1))] ; "" ; "* return output_block_move (operands);" @@ -2063,8 +2197,8 @@ mov %1,%0 ld %1,%0 ld %1,%0 - st %r1,%0 - st %r1,%0" + st %1,%0 + st %1,%0" [(set_attr "type" "fp,move,fpload,load,fpstore,store")]) ;; Exactly the same as above, except that all `f' cases are deleted. @@ -2080,14 +2214,15 @@ "@ mov %1,%0 ld %1,%0 - st %r1,%0" + st %1,%0" [(set_attr "type" "move,load,store")]) (define_insn "*store_sf" [(set (mem:SF (match_operand:SI 0 "symbolic_operand" "i")) (match_operand:SF 1 "reg_or_0_operand" "rfG")) (clobber (match_scratch:SI 2 "=&r"))] - "(reload_completed || reload_in_progress) && ! TARGET_PTR64" + "(reload_completed || reload_in_progress) + && ! TARGET_PTR64" "sethi %%hi(%a0),%2\;st %r1,[%2+%%lo(%a0)]" [(set_attr "type" "store") (set_attr "length" "2")]) @@ -2164,11 +2299,13 @@ ;; Must handle overlapping registers here, since parameters can be unaligned ;; in registers. -;; ??? Do we need a v9 version of this? + (define_split [(set (match_operand:DF 0 "register_operand" "") (match_operand:DF 1 "register_operand" ""))] - "! TARGET_ARCH64 && reload_completed" + "! TARGET_ARCH64 && reload_completed + && REGNO (operands[0]) < SPARC_FIRST_V9_FP_REG + && REGNO (operands[1]) < SPARC_FIRST_V9_FP_REG" [(set (match_dup 2) (match_dup 3)) (set (match_dup 4) (match_dup 5))] " @@ -2196,7 +2333,8 @@ [(set (mem:DF (match_operand:SI 0 "symbolic_operand" "i,i")) (match_operand:DF 1 "reg_or_0_operand" "re,G")) (clobber (match_scratch:SI 2 "=&r,&r"))] - "(reload_completed || reload_in_progress) && ! TARGET_PTR64" + "(reload_completed || reload_in_progress) + && ! TARGET_PTR64" "* { output_asm_insn (\"sethi %%hi(%a0),%2\", operands); @@ -2293,7 +2431,8 @@ [(set (mem:TF (match_operand:SI 0 "symbolic_operand" "i,i")) (match_operand:TF 1 "reg_or_0_operand" "re,G")) (clobber (match_scratch:SI 2 "=&r,&r"))] - "0 && (reload_completed || reload_in_progress) && ! TARGET_PTR64" + "0 && (reload_completed || reload_in_progress) + && ! TARGET_PTR64" "* { output_asm_insn (\"sethi %%hi(%a0),%2\", operands); @@ -2307,19 +2446,86 @@ ;; Sparc V9 conditional move instructions. -;; We can handle larger constants here for some flavors, but for now we play -;; it safe and only allow those constants supported by all flavours. +;; We can handle larger constants here for some flavors, but for now we keep +;; it simple and only allow those constants supported by all flavours. +;; Note that emit_conditional_move canonicalizes operands 2,3 so that operand +;; 3 contains the constant if one is present, but we handle either for +;; generality (sparc.c puts a constant in operand 2). + +(define_expand "movqicc" + [(set (match_operand:QI 0 "register_operand" "") + (if_then_else:QI (match_operand 1 "comparison_operator" "") + (match_operand:QI 2 "arith10_operand" "") + (match_operand:QI 3 "arith10_operand" "")))] + "TARGET_V9" + " +{ + enum rtx_code code = GET_CODE (operands[1]); + + if (GET_MODE (sparc_compare_op0) == DImode + && ! TARGET_ARCH64) + FAIL; + + if (sparc_compare_op1 == const0_rtx + && GET_CODE (sparc_compare_op0) == REG + && GET_MODE (sparc_compare_op0) == DImode + && v9_regcmp_p (code)) + { + operands[1] = gen_rtx (code, DImode, + sparc_compare_op0, sparc_compare_op1); + } + else + { + rtx cc_reg = gen_compare_reg (code, + sparc_compare_op0, sparc_compare_op1); + operands[1] = gen_rtx (code, GET_MODE (cc_reg), cc_reg, const0_rtx); + } +}") + +(define_expand "movhicc" + [(set (match_operand:HI 0 "register_operand" "") + (if_then_else:HI (match_operand 1 "comparison_operator" "") + (match_operand:HI 2 "arith10_operand" "") + (match_operand:HI 3 "arith10_operand" "")))] + "TARGET_V9" + " +{ + enum rtx_code code = GET_CODE (operands[1]); + + if (GET_MODE (sparc_compare_op0) == DImode + && ! TARGET_ARCH64) + FAIL; + + if (sparc_compare_op1 == const0_rtx + && GET_CODE (sparc_compare_op0) == REG + && GET_MODE (sparc_compare_op0) == DImode + && v9_regcmp_p (code)) + { + operands[1] = gen_rtx (code, DImode, + sparc_compare_op0, sparc_compare_op1); + } + else + { + rtx cc_reg = gen_compare_reg (code, + sparc_compare_op0, sparc_compare_op1); + operands[1] = gen_rtx (code, GET_MODE (cc_reg), cc_reg, const0_rtx); + } +}") (define_expand "movsicc" [(set (match_operand:SI 0 "register_operand" "") - (if_then_else (match_operand 1 "comparison_operator" "") - (match_operand:SI 2 "arith10_operand" "") - (match_operand:SI 3 "register_operand" "")))] - "TARGET_ARCH64" + (if_then_else:SI (match_operand 1 "comparison_operator" "") + (match_operand:SI 2 "arith10_operand" "") + (match_operand:SI 3 "arith10_operand" "")))] + "TARGET_V9" " { enum rtx_code code = GET_CODE (operands[1]); + if (GET_MODE (sparc_compare_op0) == DImode + && ! TARGET_ARCH64) + FAIL; + if (sparc_compare_op1 == const0_rtx && GET_CODE (sparc_compare_op0) == REG && GET_MODE (sparc_compare_op0) == DImode @@ -2338,9 +2544,9 @@ (define_expand "movdicc" [(set (match_operand:DI 0 "register_operand" "") - (if_then_else (match_operand 1 "comparison_operator" "") - (match_operand:DI 2 "arith10_operand" "") - (match_operand:DI 3 "register_operand" "")))] + (if_then_else:DI (match_operand 1 "comparison_operator" "") + (match_operand:DI 2 "arith10_double_operand" "") + (match_operand:DI 3 "arith10_double_operand" "")))] "TARGET_ARCH64" " { @@ -2364,14 +2570,18 @@ (define_expand "movsfcc" [(set (match_operand:SF 0 "register_operand" "") - (if_then_else (match_operand 1 "comparison_operator" "") - (match_operand:SF 2 "register_operand" "") - (match_operand:SF 3 "register_operand" "")))] - "TARGET_ARCH64" + (if_then_else:SF (match_operand 1 "comparison_operator" "") + (match_operand:SF 2 "register_operand" "") + (match_operand:SF 3 "register_operand" "")))] + "TARGET_V9 && TARGET_FPU" " { enum rtx_code code = GET_CODE (operands[1]); + if (GET_MODE (sparc_compare_op0) == DImode + && ! TARGET_ARCH64) + FAIL; + if (sparc_compare_op1 == const0_rtx && GET_CODE (sparc_compare_op0) == REG && GET_MODE (sparc_compare_op0) == DImode @@ -2390,14 +2600,18 @@ (define_expand "movdfcc" [(set (match_operand:DF 0 "register_operand" "") - (if_then_else (match_operand 1 "comparison_operator" "") - (match_operand:DF 2 "register_operand" "") - (match_operand:DF 3 "register_operand" "")))] - "TARGET_ARCH64" + (if_then_else:DF (match_operand 1 "comparison_operator" "") + (match_operand:DF 2 "register_operand" "") + (match_operand:DF 3 "register_operand" "")))] + "TARGET_V9 && TARGET_FPU" " { enum rtx_code code = GET_CODE (operands[1]); + if (GET_MODE (sparc_compare_op0) == DImode + && ! TARGET_ARCH64) + FAIL; + if (sparc_compare_op1 == const0_rtx && GET_CODE (sparc_compare_op0) == REG && GET_MODE (sparc_compare_op0) == DImode @@ -2416,14 +2630,18 @@ (define_expand "movtfcc" [(set (match_operand:TF 0 "register_operand" "") - (if_then_else (match_operand 1 "comparison_operator" "") - (match_operand:TF 2 "register_operand" "") - (match_operand:TF 3 "register_operand" "")))] - "TARGET_ARCH64" + (if_then_else:TF (match_operand 1 "comparison_operator" "") + (match_operand:TF 2 "register_operand" "") + (match_operand:TF 3 "register_operand" "")))] + "TARGET_V9 && TARGET_FPU" " { enum rtx_code code = GET_CODE (operands[1]); + if (GET_MODE (sparc_compare_op0) == DImode + && ! TARGET_ARCH64) + FAIL; + if (sparc_compare_op1 == const0_rtx && GET_CODE (sparc_compare_op0) == REG && GET_MODE (sparc_compare_op0) == DImode @@ -2440,271 +2658,190 @@ } }") -/* Conditional move define_insns. */ +;; Conditional move define_insns. + +(define_insn "*movqi_cc_sp64" + [(set (match_operand:QI 0 "register_operand" "=r,r") + (if_then_else:QI (match_operator 1 "comparison_operator" + [(match_operand 2 "icc_or_fcc_reg_operand" "X,X") + (const_int 0)]) + (match_operand:QI 3 "arith11_operand" "rL,0") + (match_operand:QI 4 "arith11_operand" "0,rL")))] + "TARGET_V9" + "@ + mov%C1 %x2,%3,%0 + mov%c1 %x2,%4,%0" + [(set_attr "type" "cmove")]) + +(define_insn "*movhi_cc_sp64" + [(set (match_operand:HI 0 "register_operand" "=r,r") + (if_then_else:HI (match_operator 1 "comparison_operator" + [(match_operand 2 "icc_or_fcc_reg_operand" "X,X") + (const_int 0)]) + (match_operand:HI 3 "arith11_operand" "rL,0") + (match_operand:HI 4 "arith11_operand" "0,rL")))] + "TARGET_V9" + "@ + mov%C1 %x2,%3,%0 + mov%c1 %x2,%4,%0" + [(set_attr "type" "cmove")]) (define_insn "*movsi_cc_sp64" - [(set (match_operand:SI 0 "register_operand" "=r") - (if_then_else (match_operator 1 "comparison_operator" - [(reg:CC 0) (const_int 0)]) - (match_operand:SI 2 "arith11_operand" "ri") - (match_operand:SI 3 "register_operand" "0")))] + [(set (match_operand:SI 0 "register_operand" "=r,r") + (if_then_else:SI (match_operator 1 "comparison_operator" + [(match_operand 2 "icc_or_fcc_reg_operand" "X,X") + (const_int 0)]) + (match_operand:SI 3 "arith11_operand" "rL,0") + (match_operand:SI 4 "arith11_operand" "0,rL")))] "TARGET_V9" - "mov%C1 %%icc,%2,%0" + "@ + mov%C1 %x2,%3,%0 + mov%c1 %x2,%4,%0" [(set_attr "type" "cmove")]) +;; ??? The constraints of operands 3,4 need work. (define_insn "*movdi_cc_sp64" - [(set (match_operand:DI 0 "register_operand" "=r") - (if_then_else (match_operator 1 "comparison_operator" - [(reg:CC 0) (const_int 0)]) - (match_operand:DI 2 "arith11_double_operand" "rHI") - (match_operand:DI 3 "register_operand" "0")))] - "TARGET_ARCH64" - "mov%C1 %%icc,%2,%0" - [(set_attr "type" "cmove")]) - -(define_insn "*movsi_ccx_sp64" - [(set (match_operand:SI 0 "register_operand" "=r") - (if_then_else (match_operator 1 "comparison_operator" - [(reg:CCX 0) (const_int 0)]) - (match_operand:SI 2 "arith11_operand" "ri") - (match_operand:SI 3 "register_operand" "0")))] + [(set (match_operand:DI 0 "register_operand" "=r,r") + (if_then_else:DI (match_operator 1 "comparison_operator" + [(match_operand 2 "icc_or_fcc_reg_operand" "X,X") + (const_int 0)]) + (match_operand:DI 3 "arith11_double_operand" "rLH,0") + (match_operand:DI 4 "arith11_double_operand" "0,rLH")))] "TARGET_ARCH64" - "mov%C1 %%xcc,%2,%0" + "@ + mov%C1 %x2,%3,%0 + mov%c1 %x2,%4,%0" [(set_attr "type" "cmove")]) -(define_insn "*movdi_ccx_sp64" - [(set (match_operand:DI 0 "register_operand" "=r") - (if_then_else (match_operator 1 "comparison_operator" - [(reg:CCX 0) (const_int 0)]) - (match_operand:DI 2 "arith11_double_operand" "rHI") - (match_operand:DI 3 "register_operand" "0")))] - "TARGET_ARCH64" - "mov%C1 %%xcc,%2,%0" +(define_insn "*movsf_cc_sp64" + [(set (match_operand:SF 0 "register_operand" "=f,f") + (if_then_else:SF (match_operator 1 "comparison_operator" + [(match_operand 2 "icc_or_fcc_reg_operand" "X,X") + (const_int 0)]) + (match_operand:SF 3 "register_operand" "f,0") + (match_operand:SF 4 "register_operand" "0,f")))] + "TARGET_V9 && TARGET_FPU" + "@ + fmovs%C1 %x2,%3,%0 + fmovs%c1 %x2,%4,%0" [(set_attr "type" "cmove")]) -(define_insn "*movsi_ccfp_sp64" - [(set (match_operand:SI 0 "register_operand" "=r") - (if_then_else (match_operator 1 "comparison_operator" - [(match_operand:CCFP 2 "ccfp_reg_operand" "c") +(define_insn "*movdf_cc_sp64" + [(set (match_operand:DF 0 "register_operand" "=e,e") + (if_then_else:DF (match_operator 1 "comparison_operator" + [(match_operand 2 "icc_or_fcc_reg_operand" "X,X") (const_int 0)]) - (match_operand:SI 3 "arith11_operand" "ri") - (match_operand:SI 4 "register_operand" "0")))] - "TARGET_V9" - "mov%C1 %2,%3,%0" + (match_operand:DF 3 "register_operand" "e,0") + (match_operand:DF 4 "register_operand" "0,e")))] + "TARGET_V9 && TARGET_FPU" + "@ + fmovd%C1 %x2,%3,%0 + fmovd%c1 %x2,%4,%0" [(set_attr "type" "cmove")]) -(define_insn "*movsi_ccfpe_sp64" - [(set (match_operand:SI 0 "register_operand" "=r") - (if_then_else (match_operator 1 "comparison_operator" - [(match_operand:CCFPE 2 "ccfp_reg_operand" "c") +(define_insn "*movtf_cc_sp64" + [(set (match_operand:TF 0 "register_operand" "=e,e") + (if_then_else:TF (match_operator 1 "comparison_operator" + [(match_operand 2 "icc_or_fcc_reg_operand" "X,X") (const_int 0)]) - (match_operand:SI 3 "arith11_operand" "ri") - (match_operand:SI 4 "register_operand" "0")))] - "TARGET_V9" - "mov%C1 %2,%3,%0" + (match_operand:TF 3 "register_operand" "e,0") + (match_operand:TF 4 "register_operand" "0,e")))] + "TARGET_V9 && TARGET_FPU && TARGET_HARD_QUAD" + "@ + fmovq%C1 %x2,%3,%0 + fmovq%c1 %x2,%4,%0" [(set_attr "type" "cmove")]) -(define_insn "*movdi_ccfp_sp64" - [(set (match_operand:DI 0 "register_operand" "=r") - (if_then_else (match_operator 1 "comparison_operator" - [(match_operand:CCFP 2 "ccfp_reg_operand" "c") +(define_insn "*movqi_cc_reg_sp64" + [(set (match_operand:QI 0 "register_operand" "=r,r") + (if_then_else:QI (match_operator 1 "v9_regcmp_op" + [(match_operand:DI 2 "register_operand" "r,r") (const_int 0)]) - (match_operand:DI 3 "arith11_double_operand" "rHI") - (match_operand:DI 4 "register_operand" "0")))] + (match_operand:QI 3 "arith10_operand" "rM,0") + (match_operand:QI 4 "arith10_operand" "0,rM")))] "TARGET_ARCH64" - "mov%C1 %2,%3,%0" + "@ + movr%D1 %2,%r3,%0 + movr%d1 %2,%r4,%0" [(set_attr "type" "cmove")]) -(define_insn "*movdi_ccfpe_sp64" - [(set (match_operand:DI 0 "register_operand" "=r") - (if_then_else (match_operator 1 "comparison_operator" - [(match_operand:CCFPE 2 "ccfp_reg_operand" "c") +(define_insn "*movhi_cc_reg_sp64" + [(set (match_operand:HI 0 "register_operand" "=r,r") + (if_then_else:HI (match_operator 1 "v9_regcmp_op" + [(match_operand:DI 2 "register_operand" "r,r") (const_int 0)]) - (match_operand:DI 3 "arith11_double_operand" "rHI") - (match_operand:DI 4 "register_operand" "0")))] + (match_operand:HI 3 "arith10_operand" "rM,0") + (match_operand:HI 4 "arith10_operand" "0,rM")))] "TARGET_ARCH64" - "mov%C1 %2,%3,%0" + "@ + movr%D1 %2,%r3,%0 + movr%d1 %2,%r4,%0" [(set_attr "type" "cmove")]) (define_insn "*movsi_cc_reg_sp64" - [(set (match_operand:SI 0 "register_operand" "=r") - (if_then_else (match_operator 1 "v9_regcmp_op" - [(match_operand:DI 2 "register_operand" "r") + [(set (match_operand:SI 0 "register_operand" "=r,r") + (if_then_else:SI (match_operator 1 "v9_regcmp_op" + [(match_operand:DI 2 "register_operand" "r,r") (const_int 0)]) - (match_operand:SI 3 "arith10_operand" "ri") - (match_operand:SI 4 "register_operand" "0")))] + (match_operand:SI 3 "arith10_operand" "rM,0") + (match_operand:SI 4 "arith10_operand" "0,rM")))] "TARGET_ARCH64" - "movr%D1 %2,%r3,%0" + "@ + movr%D1 %2,%r3,%0 + movr%d1 %2,%r4,%0" [(set_attr "type" "cmove")]) +;; ??? The constraints of operands 3,4 need work. (define_insn "*movdi_cc_reg_sp64" - [(set (match_operand:DI 0 "register_operand" "=r") - (if_then_else (match_operator 1 "v9_regcmp_op" - [(match_operand:DI 2 "register_operand" "r") + [(set (match_operand:DI 0 "register_operand" "=r,r") + (if_then_else:DI (match_operator 1 "v9_regcmp_op" + [(match_operand:DI 2 "register_operand" "r,r") (const_int 0)]) - (match_operand:DI 3 "arith10_double_operand" "ri") - (match_operand:DI 4 "register_operand" "0")))] + (match_operand:DI 3 "arith10_double_operand" "rMH,0") + (match_operand:DI 4 "arith10_double_operand" "0,rMH")))] "TARGET_ARCH64" - "movr%D1 %2,%r3,%0" + "@ + movr%D1 %2,%r3,%0 + movr%d1 %2,%r4,%0" [(set_attr "type" "cmove")]) (define_insn "*movsf_cc_reg_sp64" - [(set (match_operand:SF 0 "register_operand" "=f") - (if_then_else (match_operator 1 "v9_regcmp_op" - [(match_operand:DI 2 "register_operand" "r") + [(set (match_operand:SF 0 "register_operand" "=f,f") + (if_then_else:SF (match_operator 1 "v9_regcmp_op" + [(match_operand:DI 2 "register_operand" "r,r") (const_int 0)]) - (match_operand:SF 3 "register_operand" "f") - (match_operand:SF 4 "register_operand" "0")))] + (match_operand:SF 3 "register_operand" "f,0") + (match_operand:SF 4 "register_operand" "0,f")))] "TARGET_ARCH64 && TARGET_FPU" - "fmovrs%D1 %2,%r3,%0" + "@ + fmovrs%D1 %2,%3,%0 + fmovrs%d1 %2,%4,%0" [(set_attr "type" "cmove")]) (define_insn "*movdf_cc_reg_sp64" - [(set (match_operand:DF 0 "register_operand" "=e") - (if_then_else (match_operator 1 "v9_regcmp_op" - [(match_operand:DI 2 "register_operand" "r") + [(set (match_operand:DF 0 "register_operand" "=e,e") + (if_then_else:DF (match_operator 1 "v9_regcmp_op" + [(match_operand:DI 2 "register_operand" "r,r") (const_int 0)]) - (match_operand:DF 3 "register_operand" "e") - (match_operand:DF 4 "register_operand" "0")))] + (match_operand:DF 3 "register_operand" "e,0") + (match_operand:DF 4 "register_operand" "0,e")))] "TARGET_ARCH64 && TARGET_FPU" - "fmovrd%D1 %2,%r3,%0" + "@ + fmovrd%D1 %2,%3,%0 + fmovrd%d1 %2,%4,%0" [(set_attr "type" "cmove")]) (define_insn "*movtf_cc_reg_sp64" - [(set (match_operand:TF 0 "register_operand" "=e") - (if_then_else (match_operator 1 "v9_regcmp_op" - [(match_operand:DI 2 "register_operand" "r") - (const_int 0)]) - (match_operand:TF 3 "register_operand" "e") - (match_operand:TF 4 "register_operand" "0")))] - "TARGET_ARCH64 && TARGET_FPU" - "fmovrq%D1 %2,%r3,%0" - [(set_attr "type" "cmove")]) - -(define_insn "*movsf_ccfp_sp64" - [(set (match_operand:SF 0 "register_operand" "=f") - (if_then_else (match_operator 1 "comparison_operator" - [(match_operand:CCFP 2 "ccfp_reg_operand" "c") - (const_int 0)]) - (match_operand:SF 3 "register_operand" "f") - (match_operand:SF 4 "register_operand" "0")))] - "TARGET_V9 && TARGET_FPU" - "fmovs%C1 %2,%3,%0" - [(set_attr "type" "cmove")]) - -(define_insn "*movsf_ccfpe_sp64" - [(set (match_operand:SF 0 "register_operand" "=f") - (if_then_else (match_operator 1 "comparison_operator" - [(match_operand:CCFPE 2 "ccfp_reg_operand" "c") - (const_int 0)]) - (match_operand:SF 3 "register_operand" "f") - (match_operand:SF 4 "register_operand" "0")))] - "TARGET_V9 && TARGET_FPU" - "fmovs%C1 %2,%3,%0" - [(set_attr "type" "cmove")]) - -(define_insn "*movdf_ccfp_sp64" - [(set (match_operand:DF 0 "register_operand" "=e") - (if_then_else (match_operator 1 "comparison_operator" - [(match_operand:CCFP 2 "ccfp_reg_operand" "c") - (const_int 0)]) - (match_operand:DF 3 "register_operand" "e") - (match_operand:DF 4 "register_operand" "0")))] - "TARGET_V9 && TARGET_FPU" - "fmovd%C1 %2,%3,%0" - [(set_attr "type" "cmove")]) - -(define_insn "*movdf_ccfpe_sp64" - [(set (match_operand:DF 0 "register_operand" "=e") - (if_then_else (match_operator 1 "comparison_operator" - [(match_operand:CCFPE 2 "ccfp_reg_operand" "c") - (const_int 0)]) - (match_operand:DF 3 "register_operand" "e") - (match_operand:DF 4 "register_operand" "0")))] - "TARGET_V9 && TARGET_FPU" - "fmovd%C1 %2,%3,%0" - [(set_attr "type" "cmove")]) - -(define_insn "*movtf_ccfp_sp64" - [(set (match_operand:TF 0 "register_operand" "=e") - (if_then_else (match_operator 1 "comparison_operator" - [(match_operand:CCFP 2 "ccfp_reg_operand" "c") - (const_int 0)]) - (match_operand:TF 3 "register_operand" "e") - (match_operand:TF 4 "register_operand" "0")))] - "TARGET_V9 && TARGET_FPU" - "fmovq%C1 %2,%3,%0" - [(set_attr "type" "cmove")]) - -(define_insn "*movtf_ccfpe_sp64" - [(set (match_operand:TF 0 "register_operand" "=e") - (if_then_else (match_operator 1 "comparison_operator" - [(match_operand:CCFPE 2 "ccfp_reg_operand" "c") + [(set (match_operand:TF 0 "register_operand" "=e,e") + (if_then_else:TF (match_operator 1 "v9_regcmp_op" + [(match_operand:DI 2 "register_operand" "r,r") (const_int 0)]) - (match_operand:TF 3 "register_operand" "e") - (match_operand:TF 4 "register_operand" "0")))] - "TARGET_V9 && TARGET_FPU" - "fmovq%C1 %2,%3,%0" - [(set_attr "type" "cmove")]) - -(define_insn "*movsf_cc_sp64" - [(set (match_operand:SF 0 "register_operand" "=f") - (if_then_else (match_operator 1 "comparison_operator" - [(reg:CC 0) (const_int 0)]) - (match_operand:SF 2 "register_operand" "f") - (match_operand:SF 3 "register_operand" "0")))] - "TARGET_V9 && TARGET_FPU" - "fmovs%C1 %%icc,%2,%0" - [(set_attr "type" "cmove")]) - -(define_insn "*movdf_cc_sp64" - [(set (match_operand:DF 0 "register_operand" "=e") - (if_then_else (match_operator 1 "comparison_operator" - [(reg:CC 0) (const_int 0)]) - (match_operand:DF 2 "register_operand" "e") - (match_operand:DF 3 "register_operand" "0")))] - "TARGET_V9 && TARGET_FPU" - "fmovd%C1 %%icc,%2,%0" - [(set_attr "type" "cmove")]) - -(define_insn "*movtf_cc_sp64" - [(set (match_operand:TF 0 "register_operand" "=e") - (if_then_else (match_operator 1 "comparison_operator" - [(reg:CC 0) (const_int 0)]) - (match_operand:TF 2 "register_operand" "e") - (match_operand:TF 3 "register_operand" "0")))] - "TARGET_V9 && TARGET_FPU" - "fmovq%C1 %%icc,%2,%0" - [(set_attr "type" "cmove")]) - -(define_insn "*movsf_ccx_sp64" - [(set (match_operand:SF 0 "register_operand" "=f") - (if_then_else (match_operator 1 "comparison_operator" - [(reg:CCX 0) (const_int 0)]) - (match_operand:SF 2 "register_operand" "f") - (match_operand:SF 3 "register_operand" "0")))] - "TARGET_ARCH64 && TARGET_FPU" - "fmovs%C1 %%xcc,%2,%0" - [(set_attr "type" "cmove")]) - -(define_insn "*movdf_ccx_sp64" - [(set (match_operand:DF 0 "register_operand" "=e") - (if_then_else (match_operator 1 "comparison_operator" - [(reg:CCX 0) (const_int 0)]) - (match_operand:DF 2 "register_operand" "e") - (match_operand:DF 3 "register_operand" "0")))] - "TARGET_ARCH64 && TARGET_FPU" - "fmovd%C1 %%xcc,%2,%0" - [(set_attr "type" "cmove")]) - -(define_insn "*movtf_ccx_sp64" - [(set (match_operand:TF 0 "register_operand" "=e") - (if_then_else (match_operator 1 "comparison_operator" - [(reg:CCX 0) (const_int 0)]) - (match_operand:TF 2 "register_operand" "e") - (match_operand:TF 3 "register_operand" "0")))] + (match_operand:TF 3 "register_operand" "e,0") + (match_operand:TF 4 "register_operand" "0,e")))] "TARGET_ARCH64 && TARGET_FPU" - "fmovq%C1 %%xcc,%2,%0" + "@ + fmovrq%D1 %2,%3,%0 + fmovrq%d1 %2,%4,%0" [(set_attr "type" "cmove")]) ;;- zero extension instructions @@ -2720,7 +2857,7 @@ " { rtx temp = gen_reg_rtx (SImode); - rtx shift_16 = gen_rtx (CONST_INT, VOIDmode, 16); + rtx shift_16 = GEN_INT (16); int op1_subword = 0; if (GET_CODE (operand1) == SUBREG) @@ -2798,7 +2935,7 @@ " { rtx temp = gen_reg_rtx (DImode); - rtx shift_48 = gen_rtx (CONST_INT, VOIDmode, 48); + rtx shift_48 = GEN_INT (48); int op1_subword = 0; if (GET_CODE (operand1) == SUBREG) @@ -2842,7 +2979,7 @@ ;; Simplify comparisons of extended values. (define_insn "*cmp_zero_extendqisi2" - [(set (reg:CC 0) + [(set (reg:CC 100) (compare:CC (zero_extend:SI (match_operand:QI 0 "register_operand" "r")) (const_int 0)))] "" @@ -2850,7 +2987,7 @@ [(set_attr "type" "compare")]) (define_insn "*cmp_zero_extendqisi2_set" - [(set (reg:CC 0) + [(set (reg:CC 100) (compare:CC (zero_extend:SI (match_operand:QI 1 "register_operand" "r")) (const_int 0))) (set (match_operand:SI 0 "register_operand" "=r") @@ -2862,7 +2999,7 @@ ;; Similarly, handle SI->QI mode truncation followed by a compare. (define_insn "*cmp_siqi_trunc" - [(set (reg:CC 0) + [(set (reg:CC 100) (compare:CC (subreg:QI (match_operand:SI 0 "register_operand" "r") 0) (const_int 0)))] "" @@ -2870,7 +3007,7 @@ [(set_attr "type" "compare")]) (define_insn "*cmp_siqi_trunc_set" - [(set (reg:CC 0) + [(set (reg:CC 100) (compare:CC (subreg:QI (match_operand:SI 1 "register_operand" "r") 0) (const_int 0))) (set (match_operand:QI 0 "register_operand" "=r") @@ -2892,7 +3029,7 @@ " { rtx temp = gen_reg_rtx (SImode); - rtx shift_16 = gen_rtx (CONST_INT, VOIDmode, 16); + rtx shift_16 = GEN_INT (16); int op1_subword = 0; if (GET_CODE (operand1) == SUBREG) @@ -2922,7 +3059,7 @@ " { rtx temp = gen_reg_rtx (SImode); - rtx shift_24 = gen_rtx (CONST_INT, VOIDmode, 24); + rtx shift_24 = GEN_INT (24); int op1_subword = 0; int op0_subword = 0; @@ -2959,7 +3096,7 @@ " { rtx temp = gen_reg_rtx (SImode); - rtx shift_24 = gen_rtx (CONST_INT, VOIDmode, 24); + rtx shift_24 = GEN_INT (24); int op1_subword = 0; if (GET_CODE (operand1) == SUBREG) @@ -2989,7 +3126,7 @@ " { rtx temp = gen_reg_rtx (DImode); - rtx shift_56 = gen_rtx (CONST_INT, VOIDmode, 56); + rtx shift_56 = GEN_INT (56); int op1_subword = 0; if (GET_CODE (operand1) == SUBREG) @@ -3019,7 +3156,7 @@ " { rtx temp = gen_reg_rtx (DImode); - rtx shift_48 = gen_rtx (CONST_INT, VOIDmode, 48); + rtx shift_48 = GEN_INT (48); int op1_subword = 0; if (GET_CODE (operand1) == SUBREG) @@ -3062,7 +3199,7 @@ ;; because combine uses this as a canonical form. (define_insn "*cmp_zero_extract" - [(set (reg:CC 0) + [(set (reg:CC 100) (compare:CC (zero_extract:SI (match_operand:SI 0 "register_operand" "r") (match_operand:SI 1 "small_int" "n") @@ -3075,12 +3212,12 @@ int pos = 32 - INTVAL (operands[2]) - len; unsigned mask = ((1 << len) - 1) << pos; - operands[1] = gen_rtx (CONST_INT, VOIDmode, mask); + operands[1] = GEN_INT (mask); return \"andcc %0,%1,%%g0\"; }") (define_insn "*cmp_zero_extract_sp64" - [(set (reg:CCX 0) + [(set (reg:CCX 100) (compare:CCX (zero_extract:DI (match_operand:DI 0 "register_operand" "r") (match_operand:SI 1 "small_int" "n") @@ -3091,9 +3228,9 @@ { int len = INTVAL (operands[1]); int pos = 64 - INTVAL (operands[2]) - len; - unsigned mask = ((1 << len) - 1) << pos; + unsigned HOST_WIDE_INT mask = (((unsigned HOST_WIDE_INT) 1 << len) - 1) << pos; - operands[1] = gen_rtx (CONST_INT, VOIDmode, mask); + operands[1] = GEN_INT (mask); return \"andcc %0,%1,%%g0\"; }") @@ -3440,7 +3577,8 @@ gen_rtx (SET, VOIDmode, operands[0], gen_rtx (PLUS, DImode, operands[1], operands[2])), - gen_rtx (CLOBBER, VOIDmode, gen_rtx (REG, SImode, 0))))); + gen_rtx (CLOBBER, VOIDmode, + gen_rtx (REG, SImode, SPARC_ICC_REG))))); DONE; } }") @@ -3449,35 +3587,29 @@ [(set (match_operand:DI 0 "register_operand" "=r") (plus:DI (match_operand:DI 1 "arith_double_operand" "%r") (match_operand:DI 2 "arith_double_operand" "rHI"))) - (clobber (reg:SI 0))] + (clobber (reg:SI 100))] "! TARGET_ARCH64" "* { rtx op2 = operands[2]; - /* If constant is positive, upper bits zeroed, otherwise unchanged. - Give the assembler a chance to pick the move instruction. */ - if (GET_CODE (op2) == CONST_INT) - { - int sign = INTVAL (op2); - if (sign < 0) - return \"addcc %R1,%2,%R0\;addx %1,-1,%0\"; - return \"addcc %R1,%2,%R0\;addx %1,0,%0\"; - } - else if (GET_CODE (op2) == CONST_DOUBLE) + if (GET_CODE (op2) == CONST_INT + || GET_CODE (op2) == CONST_DOUBLE) { rtx xoperands[4]; xoperands[0] = operands[0]; xoperands[1] = operands[1]; - xoperands[2] = GEN_INT (CONST_DOUBLE_LOW (op2)); - xoperands[3] = GEN_INT (CONST_DOUBLE_HIGH (op2)); - if (xoperands[2] == const0_rtx && xoperands[0] == xoperands[1]) - output_asm_insn (\"add %1,%3,%0\", xoperands); + if (WORDS_BIG_ENDIAN) + split_double (op2, &xoperands[2], &xoperands[3]); else - output_asm_insn (\"addcc %R1,%2,%R0\;addx %1,%3,%0\", xoperands); + split_double (op2, &xoperands[3], &xoperands[2]); + if (xoperands[3] == const0_rtx && xoperands[0] == xoperands[1]) + output_asm_insn (\"add %H1,%2,%H0\", xoperands); + else + output_asm_insn (\"addcc %L1,%3,%L0\;addx %H1,%2,%H0\", xoperands); return \"\"; } - return \"addcc %R1,%R2,%R0\;addx %1,%2,%0\"; + return \"addcc %L1,%L2,%L0\;addx %H1,%H2,%H0\"; }" [(set_attr "length" "2")]) @@ -3497,7 +3629,7 @@ [(set_attr "type" "ialu")]) (define_insn "*cmp_cc_plus" - [(set (reg:CC_NOOV 0) + [(set (reg:CC_NOOV 100) (compare:CC_NOOV (plus:SI (match_operand:SI 0 "arith_operand" "%r") (match_operand:SI 1 "arith_operand" "rI")) (const_int 0)))] @@ -3506,7 +3638,7 @@ [(set_attr "type" "compare")]) (define_insn "*cmp_ccx_plus" - [(set (reg:CCX_NOOV 0) + [(set (reg:CCX_NOOV 100) (compare:CCX_NOOV (plus:DI (match_operand:DI 0 "arith_double_operand" "%r") (match_operand:DI 1 "arith_double_operand" "rHI")) (const_int 0)))] @@ -3515,7 +3647,7 @@ [(set_attr "type" "compare")]) (define_insn "*cmp_cc_plus_set" - [(set (reg:CC_NOOV 0) + [(set (reg:CC_NOOV 100) (compare:CC_NOOV (plus:SI (match_operand:SI 1 "arith_operand" "%r") (match_operand:SI 2 "arith_operand" "rI")) (const_int 0))) @@ -3525,7 +3657,7 @@ "addcc %1,%2,%0") (define_insn "*cmp_ccx_plus_set" - [(set (reg:CCX_NOOV 0) + [(set (reg:CCX_NOOV 100) (compare:CCX_NOOV (plus:DI (match_operand:DI 1 "arith_double_operand" "%r") (match_operand:DI 2 "arith_double_operand" "rHI")) (const_int 0))) @@ -3547,7 +3679,8 @@ gen_rtx (SET, VOIDmode, operands[0], gen_rtx (MINUS, DImode, operands[1], operands[2])), - gen_rtx (CLOBBER, VOIDmode, gen_rtx (REG, SImode, 0))))); + gen_rtx (CLOBBER, VOIDmode, + gen_rtx (REG, SImode, SPARC_ICC_REG))))); DONE; } }") @@ -3556,35 +3689,29 @@ [(set (match_operand:DI 0 "register_operand" "=r") (minus:DI (match_operand:DI 1 "register_operand" "r") (match_operand:DI 2 "arith_double_operand" "rHI"))) - (clobber (reg:SI 0))] + (clobber (reg:SI 100))] "! TARGET_ARCH64" "* { rtx op2 = operands[2]; - /* If constant is positive, upper bits zeroed, otherwise unchanged. - Give the assembler a chance to pick the move instruction. */ - if (GET_CODE (op2) == CONST_INT) - { - int sign = INTVAL (op2); - if (sign < 0) - return \"subcc %R1,%2,%R0\;subx %1,-1,%0\"; - return \"subcc %R1,%2,%R0\;subx %1,0,%0\"; - } - else if (GET_CODE (op2) == CONST_DOUBLE) + if (GET_CODE (op2) == CONST_INT + || GET_CODE (op2) == CONST_DOUBLE) { rtx xoperands[4]; xoperands[0] = operands[0]; xoperands[1] = operands[1]; - xoperands[2] = GEN_INT (CONST_DOUBLE_LOW (op2)); - xoperands[3] = GEN_INT (CONST_DOUBLE_HIGH (op2)); - if (xoperands[2] == const0_rtx && xoperands[0] == xoperands[1]) - output_asm_insn (\"sub %1,%3,%0\", xoperands); + if (WORDS_BIG_ENDIAN) + split_double (op2, &xoperands[2], &xoperands[3]); + else + split_double (op2, &xoperands[3], &xoperands[2]); + if (xoperands[3] == const0_rtx && xoperands[0] == xoperands[1]) + output_asm_insn (\"sub %H1,%2,%H0\", xoperands); else - output_asm_insn (\"subcc %R1,%2,%R0\;subx %1,%3,%0\", xoperands); + output_asm_insn (\"subcc %L1,%3,%L0\;subx %H1,%2,%H0\", xoperands); return \"\"; } - return \"subcc %R1,%R2,%R0\;subx %1,%2,%0\"; + return \"subcc %L1,%L2,%L0\;subx %H1,%H2,%H0\"; }" [(set_attr "length" "2")]) @@ -3604,7 +3731,7 @@ [(set_attr "type" "ialu")]) (define_insn "*cmp_minus_cc" - [(set (reg:CC_NOOV 0) + [(set (reg:CC_NOOV 100) (compare:CC_NOOV (minus:SI (match_operand:SI 0 "register_operand" "r") (match_operand:SI 1 "arith_operand" "rI")) (const_int 0)))] @@ -3613,7 +3740,7 @@ [(set_attr "type" "compare")]) (define_insn "*cmp_minus_ccx" - [(set (reg:CCX_NOOV 0) + [(set (reg:CCX_NOOV 100) (compare:CCX_NOOV (minus:DI (match_operand:DI 0 "register_operand" "r") (match_operand:DI 1 "arith_double_operand" "rHI")) (const_int 0)))] @@ -3622,7 +3749,7 @@ [(set_attr "type" "compare")]) (define_insn "*cmp_minus_cc_set" - [(set (reg:CC_NOOV 0) + [(set (reg:CC_NOOV 100) (compare:CC_NOOV (minus:SI (match_operand:SI 1 "register_operand" "r") (match_operand:SI 2 "arith_operand" "rI")) (const_int 0))) @@ -3632,7 +3759,7 @@ "subcc %1,%2,%0") (define_insn "*cmp_minus_ccx_set" - [(set (reg:CCX_NOOV 0) + [(set (reg:CCX_NOOV 100) (compare:CCX_NOOV (minus:DI (match_operand:DI 1 "register_operand" "r") (match_operand:DI 2 "arith_double_operand" "rHI")) (const_int 0))) @@ -3651,7 +3778,7 @@ [(set (match_operand:SI 0 "register_operand" "=r") (mult:SI (match_operand:SI 1 "arith_operand" "%r") (match_operand:SI 2 "arith_operand" "rI")))] - "TARGET_V8 || TARGET_SPARCLITE || TARGET_DEPRECATED_V8_INSNS" + "TARGET_V8 || TARGET_SPARCLITE || TARGET_SPARCLET || TARGET_DEPRECATED_V8_INSNS" "smul %1,%2,%0" [(set_attr "type" "imul")]) @@ -3668,7 +3795,7 @@ [(set (match_operand:SI 0 "register_operand" "=r") (mult:SI (match_operand:SI 1 "arith_operand" "%r") (match_operand:SI 2 "arith_operand" "rI"))) - (set (reg:CC_NOOV 0) + (set (reg:CC_NOOV 100) (compare:CC_NOOV (mult:SI (match_dup 1) (match_dup 2)) (const_int 0)))] "TARGET_V8 || TARGET_SPARCLITE || TARGET_DEPRECATED_V8_INSNS" @@ -3679,7 +3806,7 @@ [(set (match_operand:DI 0 "register_operand" "") (mult:DI (sign_extend:DI (match_operand:SI 1 "register_operand" "")) (sign_extend:DI (match_operand:SI 2 "arith_operand" ""))))] - "TARGET_V8 || TARGET_SPARCLITE || TARGET_DEPRECATED_V8_INSNS" + "TARGET_V8 || TARGET_SPARCLITE || TARGET_SPARCLET || TARGET_DEPRECATED_V8_INSNS" " { if (CONSTANT_P (operands[2])) @@ -3693,9 +3820,14 @@ [(set (match_operand:DI 0 "register_operand" "=r") (mult:DI (sign_extend:DI (match_operand:SI 1 "register_operand" "r")) (sign_extend:DI (match_operand:SI 2 "register_operand" "r"))))] - "TARGET_V8 || TARGET_SPARCLITE || TARGET_DEPRECATED_V8_INSNS" - "smul %1,%2,%R0\;rd %%y,%0" - [(set_attr "length" "2")]) + "TARGET_V8 || TARGET_SPARCLITE || TARGET_SPARCLET || TARGET_DEPRECATED_V8_INSNS" + "* +{ + return TARGET_SPARCLET ? \"smuld %1,%2,%L0\" : \"smul %1,%2,%L0\;rd %%y,%H0\"; +}" + [(set (attr "length") + (if_then_else (eq_attr "isa" "sparclet") + (const_int 1) (const_int 2)))]) ;; Extra pattern, because sign_extend of a constant isn't valid. @@ -3703,9 +3835,14 @@ [(set (match_operand:DI 0 "register_operand" "=r") (mult:DI (sign_extend:DI (match_operand:SI 1 "register_operand" "r")) (match_operand:SI 2 "small_int" "I")))] - "TARGET_V8 || TARGET_SPARCLITE || TARGET_DEPRECATED_V8_INSNS" - "smul %1,%2,%R0\;rd %%y,%0" - [(set_attr "length" "2")]) + "TARGET_V8 || TARGET_SPARCLITE || TARGET_SPARCLET || TARGET_DEPRECATED_V8_INSNS" + "* +{ + return TARGET_SPARCLET ? \"smuld %1,%2,%L0\" : \"smul %1,%2,%L0\;rd %%y,%H0\"; +}" + [(set (attr "length") + (if_then_else (eq_attr "isa" "sparclet") + (const_int 1) (const_int 2)))]) (define_expand "smulsi3_highpart" [(set (match_operand:SI 0 "register_operand" "") @@ -3713,7 +3850,7 @@ (lshiftrt:DI (mult:DI (sign_extend:DI (match_operand:SI 1 "register_operand" "")) (sign_extend:DI (match_operand:SI 2 "arith_operand" ""))) (const_int 32))))] - "TARGET_V8 || TARGET_SPARCLITE || TARGET_DEPRECATED_V8_INSNS" + "TARGET_V8 || TARGET_SPARCLITE || TARGET_SPARCLET || TARGET_DEPRECATED_V8_INSNS" " { if (CONSTANT_P (operands[2])) @@ -3729,7 +3866,7 @@ (lshiftrt:DI (mult:DI (sign_extend:DI (match_operand:SI 1 "register_operand" "r")) (sign_extend:DI (match_operand:SI 2 "register_operand" "r"))) (const_int 32))))] - "TARGET_V8 || TARGET_SPARCLITE || TARGET_DEPRECATED_V8_INSNS" + "TARGET_V8 || TARGET_SPARCLITE || TARGET_SPARCLET || TARGET_DEPRECATED_V8_INSNS" "smul %1,%2,%%g0\;rd %%y,%0" [(set_attr "length" "2")]) @@ -3739,7 +3876,7 @@ (lshiftrt:DI (mult:DI (sign_extend:DI (match_operand:SI 1 "register_operand" "r")) (match_operand:SI 2 "register_operand" "r")) (const_int 32))))] - "TARGET_V8 || TARGET_SPARCLITE || TARGET_DEPRECATED_V8_INSNS" + "TARGET_V8 || TARGET_SPARCLITE || TARGET_SPARCLET || TARGET_DEPRECATED_V8_INSNS" "smul %1,%2,%%g0\;rd %%y,%0" [(set_attr "length" "2")]) @@ -3747,7 +3884,7 @@ [(set (match_operand:DI 0 "register_operand" "") (mult:DI (zero_extend:DI (match_operand:SI 1 "register_operand" "")) (zero_extend:DI (match_operand:SI 2 "uns_arith_operand" ""))))] - "TARGET_V8 || TARGET_SPARCLITE || TARGET_DEPRECATED_V8_INSNS" + "TARGET_V8 || TARGET_SPARCLITE || TARGET_SPARCLET || TARGET_DEPRECATED_V8_INSNS" " { if (CONSTANT_P (operands[2])) @@ -3761,9 +3898,14 @@ [(set (match_operand:DI 0 "register_operand" "=r") (mult:DI (zero_extend:DI (match_operand:SI 1 "register_operand" "r")) (zero_extend:DI (match_operand:SI 2 "register_operand" "r"))))] - "TARGET_V8 || TARGET_SPARCLITE || TARGET_DEPRECATED_V8_INSNS" - "umul %1,%2,%R0\;rd %%y,%0" - [(set_attr "length" "2")]) + "TARGET_V8 || TARGET_SPARCLITE || TARGET_SPARCLET || TARGET_DEPRECATED_V8_INSNS" + "* +{ + return TARGET_SPARCLET ? \"umuld %1,%2,%L0\" : \"umul %1,%2,%L0\;rd %%y,%H0\"; +}" + [(set (attr "length") + (if_then_else (eq_attr "isa" "sparclet") + (const_int 1) (const_int 2)))]) ;; Extra pattern, because sign_extend of a constant isn't valid. @@ -3771,9 +3913,14 @@ [(set (match_operand:DI 0 "register_operand" "=r") (mult:DI (zero_extend:DI (match_operand:SI 1 "register_operand" "r")) (match_operand:SI 2 "uns_small_int" "")))] - "TARGET_V8 || TARGET_SPARCLITE || TARGET_DEPRECATED_V8_INSNS" - "umul %1,%2,%R0\;rd %%y,%0" - [(set_attr "length" "2")]) + "TARGET_V8 || TARGET_SPARCLITE || TARGET_SPARCLET || TARGET_DEPRECATED_V8_INSNS" + "* +{ + return TARGET_SPARCLET ? \"umuld %1,%2,%L0\" : \"umul %1,%2,%L0\;rd %%y,%H0\"; +}" + [(set (attr "length") + (if_then_else (eq_attr "isa" "sparclet") + (const_int 1) (const_int 2)))]) (define_expand "umulsi3_highpart" [(set (match_operand:SI 0 "register_operand" "") @@ -3781,7 +3928,7 @@ (lshiftrt:DI (mult:DI (zero_extend:DI (match_operand:SI 1 "register_operand" "")) (zero_extend:DI (match_operand:SI 2 "uns_arith_operand" ""))) (const_int 32))))] - "TARGET_V8 || TARGET_SPARCLITE || TARGET_DEPRECATED_V8_INSNS" + "TARGET_V8 || TARGET_SPARCLITE || TARGET_SPARCLET || TARGET_DEPRECATED_V8_INSNS" " { if (CONSTANT_P (operands[2])) @@ -3797,7 +3944,7 @@ (lshiftrt:DI (mult:DI (zero_extend:DI (match_operand:SI 1 "register_operand" "r")) (zero_extend:DI (match_operand:SI 2 "register_operand" "r"))) (const_int 32))))] - "TARGET_V8 || TARGET_SPARCLITE || TARGET_DEPRECATED_V8_INSNS" + "TARGET_V8 || TARGET_SPARCLITE || TARGET_SPARCLET || TARGET_DEPRECATED_V8_INSNS" "umul %1,%2,%%g0\;rd %%y,%0" [(set_attr "length" "2")]) @@ -3807,7 +3954,7 @@ (lshiftrt:DI (mult:DI (zero_extend:DI (match_operand:SI 1 "register_operand" "r")) (match_operand:SI 2 "uns_small_int" "")) (const_int 32))))] - "TARGET_V8 || TARGET_SPARCLITE || TARGET_DEPRECATED_V8_INSNS" + "TARGET_V8 || TARGET_SPARCLITE || TARGET_SPARCLET || TARGET_DEPRECATED_V8_INSNS" "umul %1,%2,%%g0\;rd %%y,%0" [(set_attr "length" "2")]) @@ -3844,7 +3991,7 @@ [(set (match_operand:SI 0 "register_operand" "=r") (div:SI (match_operand:SI 1 "register_operand" "r") (match_operand:SI 2 "arith_operand" "rI"))) - (set (reg:CC 0) + (set (reg:CC 100) (compare:CC (div:SI (match_dup 1) (match_dup 2)) (const_int 0))) (clobber (match_scratch:SI 3 "=&r"))] @@ -3889,7 +4036,7 @@ [(set (match_operand:SI 0 "register_operand" "=r") (udiv:SI (match_operand:SI 1 "register_operand" "r") (match_operand:SI 2 "arith_operand" "rI"))) - (set (reg:CC 0) + (set (reg:CC 100) (compare:CC (udiv:SI (match_dup 1) (match_dup 2)) (const_int 0)))] "TARGET_V8 || TARGET_DEPRECATED_V8_INSNS" @@ -3903,10 +4050,43 @@ [(set (attr "length") (if_then_else (eq_attr "isa" "v9") (const_int 2) (const_int 5)))]) + +; sparclet multiply/accumulate insns + +(define_insn "*smacsi" + [(set (match_operand:SI 0 "register_operand" "=r") + (plus:SI (mult:SI (match_operand:SI 1 "register_operand" "%r") + (match_operand:SI 2 "arith_operand" "rI")) + (match_operand:SI 3 "register_operand" "0")))] + "TARGET_SPARCLET" + "smac %1,%2,%0" + [(set_attr "type" "imul")]) + +(define_insn "*smacdi" + [(set (match_operand:DI 0 "register_operand" "=r") + (plus:DI (mult:DI (sign_extend:DI + (match_operand:SI 1 "register_operand" "%r")) + (sign_extend:DI + (match_operand:SI 2 "register_operand" "r"))) + (match_operand:DI 3 "register_operand" "0")))] + "TARGET_SPARCLET" + "smacd %1,%2,%L0" + [(set_attr "type" "imul")]) + +(define_insn "*umacdi" + [(set (match_operand:DI 0 "register_operand" "=r") + (plus:DI (mult:DI (zero_extend:DI + (match_operand:SI 1 "register_operand" "%r")) + (zero_extend:DI + (match_operand:SI 2 "register_operand" "r"))) + (match_operand:DI 3 "register_operand" "0")))] + "TARGET_SPARCLET" + "umacd %1,%2,%L0" + [(set_attr "type" "imul")]) ;;- Boolean instructions -;; We define DImode `and` so with DImode `not` we can get -;; DImode `andn`. Other combinations are possible. +;; We define DImode `and' so with DImode `not' we can get +;; DImode `andn'. Other combinations are possible. (define_expand "anddi3" [(set (match_operand:DI 0 "register_operand" "") @@ -3924,25 +4104,17 @@ { rtx op2 = operands[2]; - /* If constant is positive, upper bits zeroed, otherwise unchanged. - Give the assembler a chance to pick the move instruction. */ - if (GET_CODE (op2) == CONST_INT) - { - int sign = INTVAL (op2); - if (sign < 0) - return \"mov %1,%0\;and %R1,%2,%R0\"; - return \"mov 0,%0\;and %R1,%2,%R0\"; - } - else if (GET_CODE (op2) == CONST_DOUBLE) + if (GET_CODE (op2) == CONST_INT + || GET_CODE (op2) == CONST_DOUBLE) { rtx xoperands[4]; xoperands[0] = operands[0]; xoperands[1] = operands[1]; - xoperands[2] = GEN_INT (CONST_DOUBLE_LOW (op2)); - xoperands[3] = GEN_INT (CONST_DOUBLE_HIGH (op2)); - /* We could optimize then operands[1] == operands[0] - and either half of the constant is -1. */ - output_asm_insn (\"and %R1,%2,%R0\;and %1,%3,%0\", xoperands); + if (WORDS_BIG_ENDIAN) + split_double (op2, &xoperands[2], &xoperands[3]); + else + split_double (op2, &xoperands[3], &xoperands[2]); + output_asm_insn (\"and %L1,%3,%L0\;and %H1,%2,%H0\", xoperands); return \"\"; } return \"and %1,%2,%0\;and %R1,%R2,%R0\"; @@ -3976,7 +4148,7 @@ (set (match_dup 0) (and:SI (not:SI (match_dup 3)) (match_dup 1)))] " { - operands[4] = gen_rtx (CONST_INT, VOIDmode, ~INTVAL (operands[2])); + operands[4] = GEN_INT (~INTVAL (operands[2])); }") (define_insn "*and_not_di_sp32" @@ -4018,25 +4190,17 @@ { rtx op2 = operands[2]; - /* If constant is positive, upper bits zeroed, otherwise unchanged. - Give the assembler a chance to pick the move instruction. */ - if (GET_CODE (op2) == CONST_INT) - { - int sign = INTVAL (op2); - if (sign < 0) - return \"mov -1,%0\;or %R1,%2,%R0\"; - return \"mov %1,%0\;or %R1,%2,%R0\"; - } - else if (GET_CODE (op2) == CONST_DOUBLE) + if (GET_CODE (op2) == CONST_INT + || GET_CODE (op2) == CONST_DOUBLE) { rtx xoperands[4]; xoperands[0] = operands[0]; xoperands[1] = operands[1]; - xoperands[2] = GEN_INT (CONST_DOUBLE_LOW (op2)); - xoperands[3] = GEN_INT (CONST_DOUBLE_HIGH (op2)); - /* We could optimize then operands[1] == operands[0] - and either half of the constant is 0. */ - output_asm_insn (\"or %R1,%2,%R0\;or %1,%3,%0\", xoperands); + if (WORDS_BIG_ENDIAN) + split_double (op2, &xoperands[2], &xoperands[3]); + else + split_double (op2, &xoperands[3], &xoperands[2]); + output_asm_insn (\"or %L1,%3,%L0\;or %H1,%2,%H0\", xoperands); return \"\"; } return \"or %1,%2,%0\;or %R1,%R2,%R0\"; @@ -4070,7 +4234,7 @@ (set (match_dup 0) (ior:SI (not:SI (match_dup 3)) (match_dup 1)))] " { - operands[4] = gen_rtx (CONST_INT, VOIDmode, ~INTVAL (operands[2])); + operands[4] = GEN_INT (~INTVAL (operands[2])); }") (define_insn "*or_not_di_sp32" @@ -4112,25 +4276,17 @@ { rtx op2 = operands[2]; - /* If constant is positive, upper bits zeroed, otherwise unchanged. - Give the assembler a chance to pick the move instruction. */ - if (GET_CODE (op2) == CONST_INT) - { - int sign = INTVAL (op2); - if (sign < 0) - return \"xor %1,-1,%0\;xor %R1,%2,%R0\"; - return \"mov %1,%0\;xor %R1,%2,%R0\"; - } - else if (GET_CODE (op2) == CONST_DOUBLE) + if (GET_CODE (op2) == CONST_INT + || GET_CODE (op2) == CONST_DOUBLE) { rtx xoperands[4]; xoperands[0] = operands[0]; xoperands[1] = operands[1]; - xoperands[2] = GEN_INT (CONST_DOUBLE_LOW (op2)); - xoperands[3] = GEN_INT (CONST_DOUBLE_HIGH (op2)); - /* We could optimize then operands[1] == operands[0] - and either half of the constant is 0. */ - output_asm_insn (\"xor %R1,%2,%R0\;xor %1,%3,%0\", xoperands); + if (WORDS_BIG_ENDIAN) + split_double (op2, &xoperands[2], &xoperands[3]); + else + split_double (op2, &xoperands[3], &xoperands[2]); + output_asm_insn (\"xor %L1,%3,%L0\;xor %H1,%2,%H0\", xoperands); return \"\"; } return \"xor %1,%2,%0\;xor %R1,%R2,%R0\"; @@ -4164,7 +4320,7 @@ (set (match_dup 0) (not:SI (xor:SI (match_dup 3) (match_dup 1))))] " { - operands[4] = gen_rtx (CONST_INT, VOIDmode, ~INTVAL (operands[2])); + operands[4] = GEN_INT (~INTVAL (operands[2])); }") (define_split @@ -4179,7 +4335,7 @@ (set (match_dup 0) (xor:SI (match_dup 3) (match_dup 1)))] " { - operands[4] = gen_rtx (CONST_INT, VOIDmode, ~INTVAL (operands[2])); + operands[4] = GEN_INT (~INTVAL (operands[2])); }") ;; xnor patterns. Note that (a ^ ~b) == (~a ^ b) == ~(a ^ b). @@ -4211,7 +4367,7 @@ ;; want to set the condition code. (define_insn "*cmp_cc_arith_op" - [(set (reg:CC 0) + [(set (reg:CC 100) (compare:CC (match_operator:SI 2 "cc_arithop" [(match_operand:SI 0 "arith_operand" "%r") @@ -4222,7 +4378,7 @@ [(set_attr "type" "compare")]) (define_insn "*cmp_ccx_arith_op" - [(set (reg:CCX 0) + [(set (reg:CCX 100) (compare:CCX (match_operator:DI 2 "cc_arithop" [(match_operand:DI 0 "arith_double_operand" "%r") @@ -4233,7 +4389,7 @@ [(set_attr "type" "compare")]) (define_insn "*cmp_cc_arith_op_set" - [(set (reg:CC 0) + [(set (reg:CC 100) (compare:CC (match_operator:SI 3 "cc_arithop" [(match_operand:SI 1 "arith_operand" "%r") @@ -4245,7 +4401,7 @@ "%A3cc %1,%2,%0") (define_insn "*cmp_ccx_arith_op_set" - [(set (reg:CCX 0) + [(set (reg:CCX 100) (compare:CCX (match_operator:DI 3 "cc_arithop" [(match_operand:DI 1 "arith_double_operand" "%r") @@ -4257,7 +4413,7 @@ "%A3cc %1,%2,%0") (define_insn "*cmp_cc_xor_not" - [(set (reg:CC 0) + [(set (reg:CC 100) (compare:CC (not:SI (xor:SI (match_operand:SI 0 "reg_or_0_operand" "%rJ") (match_operand:SI 1 "arith_operand" "rI"))) @@ -4267,7 +4423,7 @@ [(set_attr "type" "compare")]) (define_insn "*cmp_ccx_xor_not" - [(set (reg:CCX 0) + [(set (reg:CCX 100) (compare:CCX (not:DI (xor:DI (match_operand:DI 0 "reg_or_0_operand" "%rJ") (match_operand:DI 1 "arith_double_operand" "rHI"))) @@ -4277,7 +4433,7 @@ [(set_attr "type" "compare")]) (define_insn "*cmp_cc_xor_not_set" - [(set (reg:CC 0) + [(set (reg:CC 100) (compare:CC (not:SI (xor:SI (match_operand:SI 1 "reg_or_0_operand" "%rJ") (match_operand:SI 2 "arith_operand" "rI"))) @@ -4288,7 +4444,7 @@ "xnorcc %r1,%2,%0") (define_insn "*cmp_ccx_xor_not_set" - [(set (reg:CCX 0) + [(set (reg:CCX 100) (compare:CCX (not:DI (xor:DI (match_operand:DI 1 "reg_or_0_operand" "%rJ") (match_operand:DI 2 "arith_double_operand" "rHI"))) @@ -4299,7 +4455,7 @@ "xnorcc %r1,%2,%0") (define_insn "*cmp_cc_arith_op_not" - [(set (reg:CC 0) + [(set (reg:CC 100) (compare:CC (match_operator:SI 2 "cc_arithopn" [(not:SI (match_operand:SI 0 "arith_operand" "rI")) @@ -4310,7 +4466,7 @@ [(set_attr "type" "compare")]) (define_insn "*cmp_ccx_arith_op_not" - [(set (reg:CCX 0) + [(set (reg:CCX 100) (compare:CCX (match_operator:DI 2 "cc_arithopn" [(not:DI (match_operand:DI 0 "arith_double_operand" "rHI")) @@ -4321,7 +4477,7 @@ [(set_attr "type" "compare")]) (define_insn "*cmp_cc_arith_op_not_set" - [(set (reg:CC 0) + [(set (reg:CC 100) (compare:CC (match_operator:SI 3 "cc_arithopn" [(not:SI (match_operand:SI 1 "arith_operand" "rI")) @@ -4333,7 +4489,7 @@ "%B3cc %r2,%1,%0") (define_insn "*cmp_ccx_arith_op_not_set" - [(set (reg:CCX 0) + [(set (reg:CCX 100) (compare:CCX (match_operator:DI 3 "cc_arithopn" [(not:DI (match_operand:DI 1 "arith_double_operand" "rHI")) @@ -4358,7 +4514,8 @@ emit_insn (gen_rtx (PARALLEL, VOIDmode, gen_rtvec (2, gen_rtx (SET, VOIDmode, operand0, gen_rtx (NEG, DImode, operand1)), - gen_rtx (CLOBBER, VOIDmode, gen_rtx (REG, SImode, 0))))); + gen_rtx (CLOBBER, VOIDmode, + gen_rtx (REG, SImode, SPARC_ICC_REG))))); DONE; } }") @@ -4366,10 +4523,16 @@ (define_insn "*negdi2_sp32" [(set (match_operand:DI 0 "register_operand" "=r") (neg:DI (match_operand:DI 1 "register_operand" "r"))) - (clobber (reg:SI 0))] + (clobber (reg:SI 100))] "! TARGET_ARCH64" - "subcc %%g0,%R1,%R0\;subx %%g0,%1,%0" + "* +{ + if (TARGET_LIVE_G0) + output_asm_insn (\"and %%g0,0,%%g0\", operands); + return \"subcc %%g0,%L1,%L0\;subx %%g0,%H1,%H0\"; +}" [(set_attr "type" "unary") + ;; ??? This is wrong for TARGET_LIVE_G0 but it's not critical. (set_attr "length" "2")]) (define_insn "*negdi2_sp64" @@ -4384,19 +4547,26 @@ [(set (match_operand:SI 0 "register_operand" "=r") (neg:SI (match_operand:SI 1 "arith_operand" "rI")))] "" - "sub %%g0,%1,%0" - [(set_attr "type" "unary")]) + "* +{ + if (TARGET_LIVE_G0) + return \"and %%g0,0,%%g0\;sub %%g0,%1,%0\"; + return \"sub %%g0,%1,%0\"; +}" + [(set_attr "type" "unary") + (set (attr "length") + (if_then_else (eq_attr "live_g0" "yes") (const_int 2) (const_int 1)))]) (define_insn "*cmp_cc_neg" - [(set (reg:CC_NOOV 0) + [(set (reg:CC_NOOV 100) (compare:CC_NOOV (neg:SI (match_operand:SI 0 "arith_operand" "rI")) (const_int 0)))] - "" + "! TARGET_LIVE_G0" "subcc %%g0,%0,%%g0" [(set_attr "type" "compare")]) (define_insn "*cmp_ccx_neg" - [(set (reg:CCX_NOOV 0) + [(set (reg:CCX_NOOV 100) (compare:CCX_NOOV (neg:DI (match_operand:DI 0 "arith_double_operand" "rHI")) (const_int 0)))] "TARGET_ARCH64" @@ -4404,17 +4574,17 @@ [(set_attr "type" "compare")]) (define_insn "*cmp_cc_set_neg" - [(set (reg:CC_NOOV 0) + [(set (reg:CC_NOOV 100) (compare:CC_NOOV (neg:SI (match_operand:SI 1 "arith_operand" "rI")) (const_int 0))) (set (match_operand:SI 0 "register_operand" "=r") (neg:SI (match_dup 1)))] - "" + "! TARGET_LIVE_G0" "subcc %%g0,%1,%0" [(set_attr "type" "unary")]) (define_insn "*cmp_ccx_set_neg" - [(set (reg:CCX_NOOV 0) + [(set (reg:CCX_NOOV 100) (compare:CCX_NOOV (neg:DI (match_operand:DI 1 "arith_double_operand" "rHI")) (const_int 0))) (set (match_operand:DI 0 "register_operand" "=r") @@ -4435,7 +4605,7 @@ [(set (match_operand:DI 0 "register_operand" "=r") (not:DI (match_operand:DI 1 "register_operand" "r")))] "! TARGET_ARCH64" - "xnor %%g0,%1,%0\;xnor %%g0,%R1,%R0" + "xnor %1,0,%0\;xnor %R1,0,%R0" [(set_attr "type" "unary") (set_attr "length" "2")]) @@ -4443,26 +4613,36 @@ [(set (match_operand:DI 0 "register_operand" "=r") (not:DI (match_operand:DI 1 "arith_double_operand" "rHI")))] "TARGET_ARCH64" - "xnor %%g0,%1,%0" + "xnor %1,0,%0" [(set_attr "type" "unary")]) (define_insn "one_cmplsi2" - [(set (match_operand:SI 0 "register_operand" "=r") - (not:SI (match_operand:SI 1 "arith_operand" "rI")))] + [(set (match_operand:SI 0 "register_operand" "=r,r") + (not:SI (match_operand:SI 1 "arith_operand" "r,I")))] "" - "xnor %%g0,%1,%0" - [(set_attr "type" "unary")]) + "* +{ + if (which_alternative == 0) + return \"xnor %1,0,%0\"; + if (TARGET_LIVE_G0) + output_asm_insn (\"and %%g0,0,%%g0\", operands); + return \"xnor %%g0,%1,%0\"; +}" + [(set_attr "type" "unary") + (set_attr_alternative "length" + [(const_int 1) + (if_then_else (eq_attr "live_g0" "yes") (const_int 2) (const_int 1))])]) (define_insn "*cmp_cc_not" - [(set (reg:CC 0) + [(set (reg:CC 100) (compare:CC (not:SI (match_operand:SI 0 "arith_operand" "rI")) (const_int 0)))] - "" + "! TARGET_LIVE_G0" "xnorcc %%g0,%0,%%g0" [(set_attr "type" "compare")]) (define_insn "*cmp_ccx_not" - [(set (reg:CCX 0) + [(set (reg:CCX 100) (compare:CCX (not:DI (match_operand:DI 0 "arith_double_operand" "rHI")) (const_int 0)))] "TARGET_ARCH64" @@ -4470,17 +4650,17 @@ [(set_attr "type" "compare")]) (define_insn "*cmp_cc_set_not" - [(set (reg:CC 0) + [(set (reg:CC 100) (compare:CC (not:SI (match_operand:SI 1 "arith_operand" "rI")) (const_int 0))) (set (match_operand:SI 0 "register_operand" "=r") (not:SI (match_dup 1)))] - "" + "! TARGET_LIVE_G0" "xnorcc %%g0,%1,%0" [(set_attr "type" "unary")]) (define_insn "*cmp_ccx_set_not" - [(set (reg:CCX 0) + [(set (reg:CCX 100) (compare:CCX (not:DI (match_operand:DI 1 "arith_double_operand" "rHI")) (const_int 0))) (set (match_operand:DI 0 "register_operand" "=r") @@ -4607,20 +4787,21 @@ (define_insn "negtf2" [(set (match_operand:TF 0 "register_operand" "=e,e") (neg:TF (match_operand:TF 1 "register_operand" "0,e")))] + ; We don't use quad float insns here so we don't need TARGET_HARD_QUAD. "TARGET_FPU" "* { - if (TARGET_V9) - return \"fnegd %1,%0\"; /* Can't use fnegs, won't work with upper regs. */ - else if (which_alternative == 0) - return \"fnegs %0,%0\"; + /* v9: can't use fnegs, won't work with upper regs. */ + if (which_alternative == 0) + return TARGET_V9 ? \"fnegd %0,%0\" : \"fnegs %0,%0\"; else - return \"fnegs %1,%0\;fmovs %R1,%R0\;fmovs %S1,%S0\;fmovs %T1,%T0\"; + return TARGET_V9 ? \"fnegd %1,%0\;fmovd %S1,%S0\" + : \"fnegs %1,%0\;fmovs %R1,%R0\;fmovs %S1,%S0\;fmovs %T1,%T0\"; }" [(set_attr "type" "fp") (set_attr_alternative "length" [(const_int 1) - (if_then_else (eq_attr "isa" "v9") (const_int 1) (const_int 4))])]) + (if_then_else (eq_attr "isa" "v9") (const_int 2) (const_int 4))])]) (define_insn "negdf2" [(set (match_operand:DF 0 "register_operand" "=e,e") @@ -4650,20 +4831,21 @@ (define_insn "abstf2" [(set (match_operand:TF 0 "register_operand" "=e,e") (abs:TF (match_operand:TF 1 "register_operand" "0,e")))] + ; We don't use quad float insns here so we don't need TARGET_HARD_QUAD. "TARGET_FPU" "* { - if (TARGET_V9) - return \"fabsd %1,%0\"; /* Can't use fabss, won't work with upper regs. */ - else if (which_alternative == 0) - return \"fabss %0,%0\"; + /* v9: can't use fabss, won't work with upper regs. */ + if (which_alternative == 0) + return TARGET_V9 ? \"fabsd %0,%0\" : \"fabss %0,%0\"; else - return \"fabss %1,%0\;fmovs %R1,%R0\;fmovs %S1,%S0\;fmovs %T1,%T0\"; + return TARGET_V9 ? \"fabsd %1,%0\;fmovd %S1,%S0\" + : \"fabss %1,%0\;fmovs %R1,%R0\;fmovs %S1,%S0\;fmovs %T1,%T0\"; }" [(set_attr "type" "fp") (set_attr_alternative "length" [(const_int 1) - (if_then_else (eq_attr "isa" "v9") (const_int 1) (const_int 4))])]) + (if_then_else (eq_attr "isa" "v9") (const_int 2) (const_int 4))])]) (define_insn "absdf2" [(set (match_operand:DF 0 "register_operand" "=e,e") @@ -4721,7 +4903,7 @@ "* { if (GET_CODE (operands[2]) == CONST_INT - && (unsigned) INTVAL (operands[2]) > 31) + && (unsigned HOST_WIDE_INT) INTVAL (operands[2]) > 31) operands[2] = GEN_INT (INTVAL (operands[2]) & 0x1f); return \"sll %1,%2,%0\"; @@ -4736,14 +4918,14 @@ "* { if (GET_CODE (operands[2]) == CONST_INT - && (unsigned) INTVAL (operands[2]) > 63) + && (unsigned HOST_WIDE_INT) INTVAL (operands[2]) > 31) operands[2] = GEN_INT (INTVAL (operands[2]) & 0x3f); return \"sllx %1,%2,%0\"; }") (define_insn "*cmp_cc_ashift_1" - [(set (reg:CC_NOOV 0) + [(set (reg:CC_NOOV 100) (compare:CC_NOOV (ashift:SI (match_operand:SI 0 "register_operand" "r") (const_int 1)) (const_int 0)))] @@ -4752,7 +4934,7 @@ [(set_attr "type" "compare")]) (define_insn "*cmp_cc_set_ashift_1" - [(set (reg:CC_NOOV 0) + [(set (reg:CC_NOOV 100) (compare:CC_NOOV (ashift:SI (match_operand:SI 1 "register_operand" "r") (const_int 1)) (const_int 0))) @@ -4769,7 +4951,7 @@ "* { if (GET_CODE (operands[2]) == CONST_INT - && (unsigned) INTVAL (operands[2]) > 31) + && (unsigned HOST_WIDE_INT) INTVAL (operands[2]) > 31) operands[2] = GEN_INT (INTVAL (operands[2]) & 0x1f); return \"sra %1,%2,%0\"; @@ -4784,7 +4966,7 @@ "* { if (GET_CODE (operands[2]) == CONST_INT - && (unsigned) INTVAL (operands[2]) > 63) + && (unsigned HOST_WIDE_INT) INTVAL (operands[2]) > 63) operands[2] = GEN_INT (INTVAL (operands[2]) & 0x3f); return \"srax %1,%2,%0\"; @@ -4798,7 +4980,7 @@ "* { if (GET_CODE (operands[2]) == CONST_INT - && (unsigned) INTVAL (operands[2]) > 31) + && (unsigned HOST_WIDE_INT) INTVAL (operands[2]) > 31) operands[2] = GEN_INT (INTVAL (operands[2]) & 0x1f); return \"srl %1,%2,%0\"; @@ -4813,7 +4995,7 @@ "* { if (GET_CODE (operands[2]) == CONST_INT - && (unsigned) INTVAL (operands[2]) > 63) + && (unsigned HOST_WIDE_INT) INTVAL (operands[2]) > 63) operands[2] = GEN_INT (INTVAL (operands[2]) & 0x3f); return \"srlx %1,%2,%0\"; @@ -4905,7 +5087,7 @@ if (GET_MODE (operands[0]) != FUNCTION_MODE) abort (); - if (GET_CODE (XEXP (operands[0], 0)) == LABEL_REF) + if (GET_CODE (XEXP (operands[0], 0)) == LABEL_REF) { /* This is really a PIC sequence. We want to represent it as a funny jump so it's delay slots can be filled. @@ -4915,18 +5097,22 @@ Why cannot we have delay slots filled if it were a CALL? */ if (! TARGET_ARCH64 && INTVAL (operands[3]) != 0) - emit_jump_insn (gen_rtx (PARALLEL, VOIDmode, gen_rtvec (3, - gen_rtx (SET, VOIDmode, pc_rtx, - XEXP (operands[0], 0)), - operands[3], - gen_rtx (CLOBBER, VOIDmode, - gen_rtx (REG, Pmode, 15))))); + emit_jump_insn + (gen_rtx (PARALLEL, VOIDmode, + gen_rtvec (3, + gen_rtx (SET, VOIDmode, pc_rtx, + XEXP (operands[0], 0)), + GEN_INT (INTVAL (operands[3]) & 0xfff), + gen_rtx (CLOBBER, VOIDmode, + gen_rtx (REG, Pmode, 15))))); else - emit_jump_insn (gen_rtx (PARALLEL, VOIDmode, gen_rtvec (2, - gen_rtx (SET, VOIDmode, pc_rtx, - XEXP (operands[0], 0)), - gen_rtx (CLOBBER, VOIDmode, - gen_rtx (REG, Pmode, 15))))); + emit_jump_insn + (gen_rtx (PARALLEL, VOIDmode, + gen_rtvec (2, + gen_rtx (SET, VOIDmode, pc_rtx, + XEXP (operands[0], 0)), + gen_rtx (CLOBBER, VOIDmode, + gen_rtx (REG, Pmode, 15))))); goto finish_call; } @@ -4937,24 +5123,26 @@ means 6 on the sparc. */ #if 0 if (operands[2]) - nregs_rtx = gen_rtx (CONST_INT, VOIDmode, REGNO (operands[2]) - 8); + nregs_rtx = GEN_INT (REGNO (operands[2]) - 8); else - nregs_rtx = gen_rtx (CONST_INT, VOIDmode, 6); + nregs_rtx = GEN_INT (6); #else nregs_rtx = const0_rtx; #endif if (! TARGET_ARCH64 && INTVAL (operands[3]) != 0) - emit_call_insn (gen_rtx (PARALLEL, VOIDmode, gen_rtvec (3, - gen_rtx (CALL, VOIDmode, fn_rtx, nregs_rtx), - operands[3], - gen_rtx (CLOBBER, VOIDmode, - gen_rtx (REG, Pmode, 15))))); + emit_call_insn + (gen_rtx (PARALLEL, VOIDmode, + gen_rtvec (3, gen_rtx (CALL, VOIDmode, fn_rtx, nregs_rtx), + GEN_INT (INTVAL (operands[3]) & 0xfff), + gen_rtx (CLOBBER, VOIDmode, + gen_rtx (REG, Pmode, 15))))); else - emit_call_insn (gen_rtx (PARALLEL, VOIDmode, gen_rtvec (2, - gen_rtx (CALL, VOIDmode, fn_rtx, nregs_rtx), - gen_rtx (CLOBBER, VOIDmode, - gen_rtx (REG, Pmode, 15))))); + emit_call_insn + (gen_rtx (PARALLEL, VOIDmode, + gen_rtvec (2, gen_rtx (CALL, VOIDmode, fn_rtx, nregs_rtx), + gen_rtx (CLOBBER, VOIDmode, + gen_rtx (REG, Pmode, 15))))); finish_call: #if 0 @@ -5017,7 +5205,7 @@ (match_operand 2 "immediate_operand" "") (clobber (reg:SI 15))] ;;- Do not use operand 1 for most machines. - "! TARGET_ARCH64 && GET_CODE (operands[2]) == CONST_INT && INTVAL (operands[2]) > 0" + "! TARGET_ARCH64 && GET_CODE (operands[2]) == CONST_INT && INTVAL (operands[2]) >= 0" "call %a0,%1\;nop\;unimp %2" [(set_attr "type" "call_no_delay_slot")]) @@ -5029,7 +5217,7 @@ (match_operand 2 "immediate_operand" "") (clobber (reg:SI 15))] ;;- Do not use operand 1 for most machines. - "! TARGET_ARCH64 && GET_CODE (operands[2]) == CONST_INT && INTVAL (operands[2]) > 0" + "! TARGET_ARCH64 && GET_CODE (operands[2]) == CONST_INT && INTVAL (operands[2]) >= 0" "call %a0,%1\;nop\;unimp %2" [(set_attr "type" "call_no_delay_slot")]) @@ -5224,7 +5412,8 @@ [(set_attr "type" "multi")]) (define_insn "return" - [(return)] + [(return) + (use (reg:SI 31))] "! TARGET_EPILOGUE" "* return output_return (operands);" [(set_attr "type" "multi")]) @@ -5286,6 +5475,7 @@ emit_insn (gen_rtx (USE, VOIDmode, static_chain_rtx)); /* Return, restoring reg window and jumping to goto handler. */ emit_insn (gen_goto_handler_and_restore ()); + emit_barrier (); DONE; }") @@ -5305,6 +5495,36 @@ [(set_attr "type" "misc") (set_attr "length" "2")]) +;; Pattern for use after a setjmp to store FP and the return register +;; into the stack area. + +(define_expand "setjmp" + [(const_int 0)] + "" + " +{ + if (TARGET_ARCH64) + emit_insn (gen_setjmp_64 ()); + else + emit_insn (gen_setjmp_32 ()); + + DONE; +}") + +(define_expand "setjmp_32" + [(set (mem:SI (plus:SI (reg:SI 14) (const_int 56))) (match_dup 0)) + (set (mem:SI (plus:SI (reg:SI 14) (const_int 60))) (reg:SI 31))] + "" + " +{ operands[0] = frame_pointer_rtx; }") + +(define_expand "setjmp_64" + [(set (mem:DI (plus:DI (reg:DI 14) (const_int 112))) (match_dup 0)) + (set (mem:DI (plus:DI (reg:DI 14) (const_int 120))) (reg:DI 31))] + "" + " +{ operands[0] = frame_pointer_rtx; }") + ;; Special pattern for the FLUSH instruction. (define_insn "flush" @@ -5324,8 +5544,13 @@ [(set (match_operand:SI 0 "register_operand" "=&r") (ffs:SI (match_operand:SI 1 "register_operand" "r"))) (clobber (match_scratch:SI 2 "=&r"))] - "TARGET_SPARCLITE" - "sub %%g0,%1,%0\;and %0,%1,%0\;scan %0,0,%0\;mov 32,%2\;sub %2,%0,%0\;sra %0,31,%2\;and %2,31,%2\;add %2,%0,%0" + "TARGET_SPARCLITE || TARGET_SPARCLET" + "* +{ + if (TARGET_LIVE_G0) + output_asm_insn (\"and %%g0,0,%%g0\", operands); + return \"sub %%g0,%1,%0\;and %0,%1,%0\;scan %0,0,%0\;mov 32,%2\;sub %2,%0,%0\;sra %0,31,%2\;and %2,31,%2\;add %2,%0,%0\"; +}" [(set_attr "type" "multi") (set_attr "length" "8")]) @@ -5337,7 +5562,7 @@ (ffs:DI (match_operand:DI 1 "register_operand" "r"))) (clobber (match_scratch:DI 2 "=&r"))] "TARGET_ARCH64" - "neg %1,%2\;not %2,%2\;xor %1,%2,%2\;popc %2,%0\;movrz %1,%%g0,%0" + "neg %1,%2\;not %2,%2\;xor %1,%2,%2\;popc %2,%0\;movrz %1,0,%0" [(set_attr "type" "multi") (set_attr "length" "5")]) @@ -5425,7 +5650,7 @@ (lo_sum:SI (match_dup 0) (match_dup 1)))] "") -;; LABEL_REFs are not modified by `legitimize_pic_address` +;; LABEL_REFs are not modified by `legitimize_pic_address' ;; so do not recurse infinitely in the PIC case. (define_split [(set (match_operand:SI 0 "register_operand" "") @@ -5448,44 +5673,44 @@ [(set (match_operand:SI 0 "register_operand" "") (ne:SI (match_operand:SI 1 "register_operand" "") (const_int 0))) - (clobber (reg:CC 0))] + (clobber (reg:CC 100))] "" - [(set (reg:CC_NOOV 0) (compare:CC_NOOV (neg:SI (match_dup 1)) - (const_int 0))) - (set (match_dup 0) (ltu:SI (reg:CC 0) (const_int 0)))] + [(set (reg:CC_NOOV 100) (compare:CC_NOOV (neg:SI (match_dup 1)) + (const_int 0))) + (set (match_dup 0) (ltu:SI (reg:CC 100) (const_int 0)))] "") (define_split [(set (match_operand:SI 0 "register_operand" "") (neg:SI (ne:SI (match_operand:SI 1 "register_operand" "") (const_int 0)))) - (clobber (reg:CC 0))] + (clobber (reg:CC 100))] "" - [(set (reg:CC_NOOV 0) (compare:CC_NOOV (neg:SI (match_dup 1)) - (const_int 0))) - (set (match_dup 0) (neg:SI (ltu:SI (reg:CC 0) (const_int 0))))] + [(set (reg:CC_NOOV 100) (compare:CC_NOOV (neg:SI (match_dup 1)) + (const_int 0))) + (set (match_dup 0) (neg:SI (ltu:SI (reg:CC 100) (const_int 0))))] "") (define_split [(set (match_operand:SI 0 "register_operand" "") (eq:SI (match_operand:SI 1 "register_operand" "") (const_int 0))) - (clobber (reg:CC 0))] + (clobber (reg:CC 100))] "" - [(set (reg:CC_NOOV 0) (compare:CC_NOOV (neg:SI (match_dup 1)) - (const_int 0))) - (set (match_dup 0) (geu:SI (reg:CC 0) (const_int 0)))] + [(set (reg:CC_NOOV 100) (compare:CC_NOOV (neg:SI (match_dup 1)) + (const_int 0))) + (set (match_dup 0) (geu:SI (reg:CC 100) (const_int 0)))] "") (define_split [(set (match_operand:SI 0 "register_operand" "") (neg:SI (eq:SI (match_operand:SI 1 "register_operand" "") (const_int 0)))) - (clobber (reg:CC 0))] + (clobber (reg:CC 100))] "" - [(set (reg:CC_NOOV 0) (compare:CC_NOOV (neg:SI (match_dup 1)) - (const_int 0))) - (set (match_dup 0) (neg:SI (geu:SI (reg:CC 0) (const_int 0))))] + [(set (reg:CC_NOOV 100) (compare:CC_NOOV (neg:SI (match_dup 1)) + (const_int 0))) + (set (match_dup 0) (neg:SI (geu:SI (reg:CC 100) (const_int 0))))] "") (define_split @@ -5493,11 +5718,11 @@ (plus:SI (ne:SI (match_operand:SI 1 "register_operand" "") (const_int 0)) (match_operand:SI 2 "register_operand" ""))) - (clobber (reg:CC 0))] + (clobber (reg:CC 100))] "" - [(set (reg:CC_NOOV 0) (compare:CC_NOOV (neg:SI (match_dup 1)) - (const_int 0))) - (set (match_dup 0) (plus:SI (ltu:SI (reg:CC 0) (const_int 0)) + [(set (reg:CC_NOOV 100) (compare:CC_NOOV (neg:SI (match_dup 1)) + (const_int 0))) + (set (match_dup 0) (plus:SI (ltu:SI (reg:CC 100) (const_int 0)) (match_dup 2)))] "") @@ -5506,12 +5731,12 @@ (minus:SI (match_operand:SI 2 "register_operand" "") (ne:SI (match_operand:SI 1 "register_operand" "") (const_int 0)))) - (clobber (reg:CC 0))] + (clobber (reg:CC 100))] "" - [(set (reg:CC_NOOV 0) (compare:CC_NOOV (neg:SI (match_dup 1)) - (const_int 0))) + [(set (reg:CC_NOOV 100) (compare:CC_NOOV (neg:SI (match_dup 1)) + (const_int 0))) (set (match_dup 0) (minus:SI (match_dup 2) - (ltu:SI (reg:CC 0) (const_int 0))))] + (ltu:SI (reg:CC 100) (const_int 0))))] "") (define_split @@ -5519,11 +5744,11 @@ (plus:SI (eq:SI (match_operand:SI 1 "register_operand" "") (const_int 0)) (match_operand:SI 2 "register_operand" ""))) - (clobber (reg:CC 0))] + (clobber (reg:CC 100))] "" - [(set (reg:CC_NOOV 0) (compare:CC_NOOV (neg:SI (match_dup 1)) - (const_int 0))) - (set (match_dup 0) (plus:SI (geu:SI (reg:CC 0) (const_int 0)) + [(set (reg:CC_NOOV 100) (compare:CC_NOOV (neg:SI (match_dup 1)) + (const_int 0))) + (set (match_dup 0) (plus:SI (geu:SI (reg:CC 100) (const_int 0)) (match_dup 2)))] "") @@ -5532,12 +5757,12 @@ (minus:SI (match_operand:SI 2 "register_operand" "") (eq:SI (match_operand:SI 1 "register_operand" "") (const_int 0)))) - (clobber (reg:CC 0))] + (clobber (reg:CC 100))] "" - [(set (reg:CC_NOOV 0) (compare:CC_NOOV (neg:SI (match_dup 1)) - (const_int 0))) + [(set (reg:CC_NOOV 100) (compare:CC_NOOV (neg:SI (match_dup 1)) + (const_int 0))) (set (match_dup 0) (minus:SI (match_dup 2) - (geu:SI (reg:CC 0) (const_int 0))))] + (geu:SI (reg:CC 100) (const_int 0))))] "") ;; Peepholes go at the end. @@ -5641,25 +5866,25 @@ (define_peephole [(set (match_operand:SI 0 "register_operand" "=r") (match_operand:SI 1 "register_operand" "r")) - (set (reg:CC 0) + (set (reg:CC 100) (compare:CC (match_operand:SI 2 "register_operand" "r") (const_int 0)))] "(rtx_equal_p (operands[2], operands[0]) || rtx_equal_p (operands[2], operands[1])) && ! FP_REG_P (operands[0]) && ! FP_REG_P (operands[1])" - "orcc %1,%%g0,%0") + "orcc %1,0,%0") (define_peephole [(set (match_operand:DI 0 "register_operand" "=r") (match_operand:DI 1 "register_operand" "r")) - (set (reg:CCX 0) + (set (reg:CCX 100) (compare:CCX (match_operand:DI 2 "register_operand" "r") (const_int 0)))] "TARGET_ARCH64 && (rtx_equal_p (operands[2], operands[0]) || rtx_equal_p (operands[2], operands[1])) && ! FP_REG_P (operands[0]) && ! FP_REG_P (operands[1])" - "orcc %1,%%g0,%0") + "orcc %1,0,%0") ;; Do {sign,zero}-extended compares somewhat more efficiently. ;; ??? Is this now the Right Way to do this? Or will SCRATCH @@ -5670,44 +5895,44 @@ (match_operand:HI 1 "memory_operand" "")) (set (match_operand:SI 2 "register_operand" "") (sign_extend:SI (match_dup 0))) - (set (reg:CC 0) + (set (reg:CC 100) (compare:CC (match_dup 2) (const_int 0)))] "" - "ldsh %1,%0\;orcc %0,%%g0,%2") + "ldsh %1,%0\;orcc %0,0,%2") (define_peephole [(set (match_operand:HI 0 "register_operand" "") (match_operand:HI 1 "memory_operand" "")) (set (match_operand:DI 2 "register_operand" "") (sign_extend:DI (match_dup 0))) - (set (reg:CCX 0) + (set (reg:CCX 100) (compare:CCX (match_dup 2) (const_int 0)))] "TARGET_ARCH64" - "ldsh %1,%0\;orcc %0,%%g0,%2") + "ldsh %1,%0\;orcc %0,0,%2") (define_peephole [(set (match_operand:QI 0 "register_operand" "") (match_operand:QI 1 "memory_operand" "")) (set (match_operand:SI 2 "register_operand" "") (sign_extend:SI (match_dup 0))) - (set (reg:CC 0) + (set (reg:CC 100) (compare:CC (match_dup 2) (const_int 0)))] "" - "ldsb %1,%0\;orcc %0,%%g0,%2") + "ldsb %1,%0\;orcc %0,0,%2") (define_peephole [(set (match_operand:QI 0 "register_operand" "") (match_operand:QI 1 "memory_operand" "")) (set (match_operand:DI 2 "register_operand" "") (sign_extend:DI (match_dup 0))) - (set (reg:CCX 0) + (set (reg:CCX 100) (compare:CCX (match_dup 2) (const_int 0)))] "TARGET_ARCH64" - "ldsb %1,%0\;orcc %0,%%g0,%2") + "ldsb %1,%0\;orcc %0,0,%2") ;; Floating-point move peepholes ;; ??? v9: Do we want similar ones? @@ -5738,16 +5963,14 @@ "RTX_UNCHANGING_P (operands[1]) && reg_unused_after (operands[0], insn)" "ld [%0+%%lo(%a1)],%2") -;; Return peepholes. First the "normal" ones - -;; ??? There are QImode, HImode, and SImode versions of this pattern. -;; It might be possible to write one more general pattern instead of three. +;; Return peepholes. First the "normal" ones. +;; These are necessary to catch insns ending up in the epilogue delay list. (define_insn "*return_qi" [(set (match_operand:QI 0 "restore_operand" "") (match_operand:QI 1 "arith_operand" "rI")) (return)] - "! TARGET_EPILOGUE" + "! TARGET_EPILOGUE && ! TARGET_LIVE_G0" "* { if (! TARGET_ARCH64 && current_function_returns_struct) @@ -5761,7 +5984,7 @@ [(set (match_operand:HI 0 "restore_operand" "") (match_operand:HI 1 "arith_operand" "rI")) (return)] - "! TARGET_EPILOGUE" + "! TARGET_EPILOGUE && ! TARGET_LIVE_G0" "* { if (! TARGET_ARCH64 && current_function_returns_struct) @@ -5775,7 +5998,7 @@ [(set (match_operand:SI 0 "restore_operand" "") (match_operand:SI 1 "arith_operand" "rI")) (return)] - "! TARGET_EPILOGUE" + "! TARGET_EPILOGUE && ! TARGET_LIVE_G0" "* { if (! TARGET_ARCH64 && current_function_returns_struct) @@ -5792,7 +6015,7 @@ [(set (match_operand:SF 0 "restore_operand" "r") (match_operand:SF 1 "register_operand" "r")) (return)] - "! TARGET_FPU && ! TARGET_EPILOGUE" + "! TARGET_FPU && ! TARGET_EPILOGUE && ! TARGET_LIVE_G0" "* { if (! TARGET_ARCH64 && current_function_returns_struct) @@ -5807,7 +6030,9 @@ (plus:SI (match_operand:SI 1 "arith_operand" "%r") (match_operand:SI 2 "arith_operand" "rI"))) (return)] - "! TARGET_EPILOGUE" + "! TARGET_EPILOGUE && ! TARGET_LIVE_G0 + && (register_operand (operands[1], SImode) + || register_operand (operands[2], SImode))" "* { if (! TARGET_ARCH64 && current_function_returns_struct) @@ -5827,24 +6052,23 @@ (define_insn "*return_adddi" [(set (match_operand:DI 0 "restore_operand" "") - (plus:DI (match_operand:DI 1 "arith_operand" "%r") + (plus:DI (match_operand:DI 1 "arith_double_operand" "%r") (match_operand:DI 2 "arith_double_operand" "rHI"))) (return)] - "TARGET_ARCH64 && ! TARGET_EPILOGUE" + "TARGET_ARCH64 && ! TARGET_EPILOGUE + && (register_operand (operands[1], DImode) + || register_operand (operands[2], DImode))" "ret\;restore %r1,%2,%Y0" [(set_attr "type" "multi")]) -;; Turned off because it should never match (subtracting a constant -;; is turned into addition) and because it would do the wrong thing -;; when operand 2 is -4096 (--4096 == 4096 is not a valid immediate). -;;(define_insn "*minus_const" -;; [(set (match_operand:SI 0 "restore_operand" "") -;; (minus:SI (match_operand:SI 1 "register_operand" "r") -;; (match_operand:SI 2 "small_int" "I"))) -;; (return)] -;; "! TARGET_EPILOGUE" -;; "ret\;restore %1,-(%2),%Y0" -;; [(set_attr "type" "multi")]) +(define_insn "*return_subsi" + [(set (match_operand:SI 0 "restore_operand" "") + (minus:SI (match_operand:SI 1 "register_operand" "r") + (match_operand:SI 2 "small_int" "I"))) + (return)] + "! TARGET_EPILOGUE && INTVAL (operands[2]) != -4096" + "ret\;restore %1,%n2,%Y0" + [(set_attr "type" "multi")]) ;; The following pattern is only generated by delayed-branch scheduling, ;; when the insn winds up in the epilogue. @@ -5897,11 +6121,12 @@ ;; Other miscellaneous peepholes. +;; (reg:SI 100) is created by the {add,neg,sub}di patterns. (define_peephole [(parallel [(set (match_operand:SI 0 "register_operand" "=r") (minus:SI (match_operand:SI 1 "reg_or_0_operand" "rJ") - (reg:SI 0))) - (clobber (reg:CC 0))]) - (set (reg:CC 0) (compare (match_dup 0) (const_int 0)))] + (reg:SI 100))) + (clobber (reg:CC 100))]) + (set (reg:CC 100) (compare (match_dup 0) (const_int 0)))] "" "subxcc %r1,0,%0")