;;- Machine description for HP PA-RISC architecture for GCC compiler
;; Copyright (C) 1992, 1993, 1994, 1995, 1996, 1997, 1998, 1999, 2000, 2001,
-;; 2002, 2003, 2004 Free Software Foundation, Inc.
+;; 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2010
+;; Free Software Foundation, Inc.
;; Contributed by the Center for Software Science at the University
;; of Utah.
;; GCC is free software; you can redistribute it and/or modify
;; it under the terms of the GNU General Public License as published by
-;; the Free Software Foundation; either version 2, or (at your option)
+;; the Free Software Foundation; either version 3, or (at your option)
;; any later version.
;; GCC is distributed in the hope that it will be useful,
;; GNU General Public License for more details.
;; You should have received a copy of the GNU General Public License
-;; along with GCC; see the file COPYING. If not, write to
-;; the Free Software Foundation, 59 Temple Place - Suite 330,
-;; Boston, MA 02111-1307, USA.
+;; along with GCC; see the file COPYING3. If not see
+;; <http://www.gnu.org/licenses/>.
;; This gcc Version 2 machine description is inspired by sparc.md and
;; mips.md.
;;- See file "rtl.def" for documentation on define_insn, match_*, et. al.
+;; Uses of UNSPEC in this file:
+
+(define_constants
+ [(UNSPEC_CFFC 0) ; canonicalize_funcptr_for_compare
+ (UNSPEC_GOTO 1) ; indirect_goto
+ (UNSPEC_DLTIND14R 2) ;
+ (UNSPEC_TP 3)
+ (UNSPEC_TLSGD 4)
+ (UNSPEC_TLSLDM 5)
+ (UNSPEC_TLSLDO 6)
+ (UNSPEC_TLSLDBASE 7)
+ (UNSPEC_TLSIE 8)
+ (UNSPEC_TLSLE 9)
+ (UNSPEC_TLSGD_PIC 10)
+ (UNSPEC_TLSLDM_PIC 11)
+ (UNSPEC_TLSIE_PIC 12)
+ ])
+
+;; UNSPEC_VOLATILE:
+
+(define_constants
+ [(UNSPECV_BLOCKAGE 0) ; blockage
+ (UNSPECV_DCACHE 1) ; dcacheflush
+ (UNSPECV_ICACHE 2) ; icacheflush
+ (UNSPECV_OPC 3) ; outline_prologue_call
+ (UNSPECV_OEC 4) ; outline_epilogue_call
+ (UNSPECV_LONGJMP 5) ; builtin_longjmp
+ ])
+
+;; Maximum pc-relative branch offsets.
+
+;; These numbers are a bit smaller than the maximum allowable offsets
+;; so that a few instructions may be inserted before the actual branch.
+
+(define_constants
+ [(MAX_12BIT_OFFSET 8184) ; 12-bit branch
+ (MAX_17BIT_OFFSET 262100) ; 17-bit branch
+ ])
+
+;; Mode and code iterators
+
+;; This mode iterator allows :P to be used for patterns that operate on
+;; pointer-sized quantities. Exactly one of the two alternatives will match.
+(define_mode_iterator P [(SI "Pmode == SImode") (DI "Pmode == DImode")])
+
+;; This attribute defines the condition prefix for word and double word
+;; add, compare, subtract and logical instructions.
+(define_mode_attr dwc [(SI "") (DI "*")])
+
;; Insn type. Used to default other attribute values.
;; type "unary" insns have one input operand (1) and one output operand (0)
;; type "binary" insns have two input operands (1,2) and one output (0)
(define_attr "type"
- "move,unary,binary,shift,nullshift,compare,load,store,uncond_branch,btable_branch,branch,cbranch,fbranch,call,dyncall,fpload,fpstore,fpalu,fpcc,fpmulsgl,fpmuldbl,fpdivsgl,fpdivdbl,fpsqrtsgl,fpsqrtdbl,multi,milli,parallel_branch"
+ "move,unary,binary,shift,nullshift,compare,load,store,uncond_branch,btable_branch,branch,cbranch,fbranch,call,dyncall,fpload,fpstore,fpalu,fpcc,fpmulsgl,fpmuldbl,fpdivsgl,fpdivdbl,fpsqrtsgl,fpsqrtdbl,multi,milli,parallel_branch,fpstore_load,store_fpload"
(const_string "binary"))
(define_attr "pa_combine_type"
(define_delay (eq_attr "type" "btable_branch,branch,parallel_branch")
[(eq_attr "in_branch_delay" "true") (nil) (nil)])
-;; Floating point conditional branch delay slot description and
+;; Floating point conditional branch delay slot description.
(define_delay (eq_attr "type" "fbranch")
[(eq_attr "in_branch_delay" "true")
(eq_attr "in_nullified_branch_delay" "true")
(eq_attr "cpu" "700"))
"mem_700*3")
-(define_insn_reservation "W11" 1
- (and (eq_attr "type" "!fpcc,fpalu,fpmulsgl,fpmuldbl,fpdivsgl,fpdivdbl,fpsqrtsgl,fpsqrtdbl,load,fpload,store,fpstore")
+(define_insn_reservation "W11" 5
+ (and (eq_attr "type" "fpstore_load")
+ (eq_attr "cpu" "700"))
+ "mem_700*5")
+
+(define_insn_reservation "W12" 6
+ (and (eq_attr "type" "store_fpload")
+ (eq_attr "cpu" "700"))
+ "mem_700*6")
+
+(define_insn_reservation "W13" 1
+ (and (eq_attr "type" "!fpcc,fpalu,fpmulsgl,fpmuldbl,fpdivsgl,fpdivdbl,fpsqrtsgl,fpsqrtdbl,load,fpload,store,fpstore,fpstore_load,store_fpload")
(eq_attr "cpu" "700"))
"dummy_700")
;; We have a bypass for all computations in the FP unit which feed an
;; FP store as long as the sizes are the same.
-(define_bypass 2 "W1,W2" "W10" "hppa_fpstore_bypass_p")
-(define_bypass 9 "W3" "W10" "hppa_fpstore_bypass_p")
-(define_bypass 11 "W4" "W10" "hppa_fpstore_bypass_p")
-(define_bypass 13 "W5" "W10" "hppa_fpstore_bypass_p")
-(define_bypass 17 "W6" "W10" "hppa_fpstore_bypass_p")
+(define_bypass 2 "W1,W2" "W10,W11" "hppa_fpstore_bypass_p")
+(define_bypass 9 "W3" "W10,W11" "hppa_fpstore_bypass_p")
+(define_bypass 11 "W4" "W10,W11" "hppa_fpstore_bypass_p")
+(define_bypass 13 "W5" "W10,W11" "hppa_fpstore_bypass_p")
+(define_bypass 17 "W6" "W10,W11" "hppa_fpstore_bypass_p")
;; We have an "anti-bypass" for FP loads which feed an FP store.
-(define_bypass 4 "W8" "W10" "hppa_fpstore_bypass_p")
+(define_bypass 4 "W8,W12" "W10,W11" "hppa_fpstore_bypass_p")
;; Function units for the 7100 and 7150. The 7100/7150 can dual-issue
;; floating point computations with non-floating point computations (fp loads
(eq_attr "cpu" "7100"))
"i_7100+mem_7100,mem_7100")
-(define_insn_reservation "X7" 1
- (and (eq_attr "type" "!fpcc,fpalu,fpmulsgl,fpmuldbl,fpdivsgl,fpsqrtsgl,fpdivdbl,fpsqrtdbl,load,fpload,store,fpstore")
+(define_insn_reservation "X7" 4
+ (and (eq_attr "type" "fpstore_load")
+ (eq_attr "cpu" "7100"))
+ "i_7100+mem_7100,mem_7100*3")
+
+(define_insn_reservation "X8" 4
+ (and (eq_attr "type" "store_fpload")
+ (eq_attr "cpu" "7100"))
+ "i_7100+mem_7100,mem_7100*3")
+
+(define_insn_reservation "X9" 1
+ (and (eq_attr "type" "!fpcc,fpalu,fpmulsgl,fpmuldbl,fpdivsgl,fpsqrtsgl,fpdivdbl,fpsqrtdbl,load,fpload,store,fpstore,fpstore_load,store_fpload")
(eq_attr "cpu" "7100"))
"i_7100")
;; We have a bypass for all computations in the FP unit which feed an
;; FP store as long as the sizes are the same.
-(define_bypass 1 "X0" "X6" "hppa_fpstore_bypass_p")
-(define_bypass 7 "X1" "X6" "hppa_fpstore_bypass_p")
-(define_bypass 14 "X2" "X6" "hppa_fpstore_bypass_p")
+(define_bypass 1 "X0" "X6,X7" "hppa_fpstore_bypass_p")
+(define_bypass 7 "X1" "X6,X7" "hppa_fpstore_bypass_p")
+(define_bypass 14 "X2" "X6,X7" "hppa_fpstore_bypass_p")
;; We have an "anti-bypass" for FP loads which feed an FP store.
-(define_bypass 3 "X4" "X6" "hppa_fpstore_bypass_p")
+(define_bypass 3 "X4,X8" "X6,X7" "hppa_fpstore_bypass_p")
;; The 7100LC has three floating-point units: ALU, MUL, and DIV.
;; There's no value in modeling the ALU and MUL separately though
(eq_attr "cpu" "7100LC"))
"i1_7100lc+mem_7100lc,mem_7100lc")
-(define_insn_reservation "Y6" 1
+(define_insn_reservation "Y6" 4
+ (and (eq_attr "type" "fpstore_load")
+ (eq_attr "cpu" "7100LC"))
+ "i1_7100lc+mem_7100lc,mem_7100lc*3")
+
+(define_insn_reservation "Y7" 4
+ (and (eq_attr "type" "store_fpload")
+ (eq_attr "cpu" "7100LC"))
+ "i1_7100lc+mem_7100lc,mem_7100lc*3")
+
+(define_insn_reservation "Y8" 1
(and (eq_attr "type" "shift,nullshift")
(eq_attr "cpu" "7100LC,7200,7300"))
"i1_7100lc")
-(define_insn_reservation "Y7" 1
+(define_insn_reservation "Y9" 1
(and (eq_attr "type" "!fpcc,fpalu,fpmulsgl,fpmuldbl,fpdivsgl,fpsqrtsgl,fpdivdbl,fpsqrtdbl,load,fpload,store,fpstore,shift,nullshift")
(eq_attr "cpu" "7100LC,7200,7300"))
"(i0_7100lc|i1_7100lc)")
;; The 7200 has a store-load penalty
-(define_insn_reservation "Y8" 2
+(define_insn_reservation "Y10" 2
(and (eq_attr "type" "store")
(eq_attr "cpu" "7200"))
"i1_7100lc,mem_7100lc")
-(define_insn_reservation "Y9" 2
+(define_insn_reservation "Y11" 2
(and (eq_attr "type" "fpstore")
(eq_attr "cpu" "7200"))
"i1_7100lc,mem_7100lc")
+(define_insn_reservation "Y12" 4
+ (and (eq_attr "type" "fpstore_load")
+ (eq_attr "cpu" "7200"))
+ "i1_7100lc,mem_7100lc,i1_7100lc+mem_7100lc")
+
+(define_insn_reservation "Y13" 4
+ (and (eq_attr "type" "store_fpload")
+ (eq_attr "cpu" "7200"))
+ "i1_7100lc,mem_7100lc,i1_7100lc+mem_7100lc")
+
;; The 7300 has no penalty for store-store or store-load
-(define_insn_reservation "Y10" 2
+(define_insn_reservation "Y14" 2
(and (eq_attr "type" "store")
(eq_attr "cpu" "7300"))
"i1_7100lc")
-(define_insn_reservation "Y11" 2
+(define_insn_reservation "Y15" 2
(and (eq_attr "type" "fpstore")
(eq_attr "cpu" "7300"))
"i1_7100lc")
+(define_insn_reservation "Y16" 4
+ (and (eq_attr "type" "fpstore_load")
+ (eq_attr "cpu" "7300"))
+ "i1_7100lc,i1_7100lc+mem_7100lc")
+
+(define_insn_reservation "Y17" 4
+ (and (eq_attr "type" "store_fpload")
+ (eq_attr "cpu" "7300"))
+ "i1_7100lc,i1_7100lc+mem_7100lc")
+
;; We have an "anti-bypass" for FP loads which feed an FP store.
-(define_bypass 3 "Y3" "Y5,Y9,Y11" "hppa_fpstore_bypass_p")
+(define_bypass 3 "Y3,Y7,Y13,Y17" "Y5,Y6,Y11,Y12,Y15,Y16" "hppa_fpstore_bypass_p")
;; Scheduling for the PA8000 is somewhat different than scheduling for a
;; traditional architecture.
(eq_attr "cpu" "8000"))
"im_8000,rm_8000+store_8000")
+(define_insn_reservation "Z2" 0
+ (and (eq_attr "type" "fpstore_load,store_fpload")
+ (eq_attr "cpu" "8000"))
+ "im_8000,rm_8000+store_8000,im_8000,rm_8000")
+
;; We can issue and retire two non-memory operations per cycle with
;; a few exceptions (branches). This group catches those we want
;; to assume have zero latency.
-(define_insn_reservation "Z2" 0
+(define_insn_reservation "Z3" 0
(and
- (eq_attr "type" "!load,fpload,store,fpstore,uncond_branch,btable_branch,branch,cbranch,fbranch,call,dyncall,multi,milli,parallel_branch,fpcc,fpalu,fpmulsgl,fpmuldbl,fpsqrtsgl,fpsqrtdbl,fpdivsgl,fpdivdbl")
+ (eq_attr "type" "!load,fpload,store,fpstore,uncond_branch,btable_branch,branch,cbranch,fbranch,call,dyncall,multi,milli,parallel_branch,fpcc,fpalu,fpmulsgl,fpmuldbl,fpsqrtsgl,fpsqrtdbl,fpdivsgl,fpdivdbl,fpstore_load,store_fpload")
(eq_attr "cpu" "8000"))
"inm_8000,rnm_8000")
;; Branches use both slots in the non-memory issue and
;; retirement unit.
-(define_insn_reservation "Z3" 0
+(define_insn_reservation "Z4" 0
(and
(eq_attr "type" "uncond_branch,btable_branch,branch,cbranch,fbranch,call,dyncall,multi,milli,parallel_branch")
(eq_attr "cpu" "8000"))
;; They can issue/retire two at a time in the non-memory
;; units. We fix their latency at 2 cycles and they
;; are fully pipelined.
-(define_insn_reservation "Z4" 1
+(define_insn_reservation "Z5" 1
(and
(eq_attr "type" "fpcc,fpalu,fpmulsgl,fpmuldbl")
(eq_attr "cpu" "8000"))
;; The fdivsqrt units are not pipelined and have a very long latency.
;; To keep the DFA from exploding, we do not show all the
;; reservations for the divsqrt unit.
-(define_insn_reservation "Z5" 17
+(define_insn_reservation "Z6" 17
(and
(eq_attr "type" "fpdivsgl,fpsqrtsgl")
(eq_attr "cpu" "8000"))
"inm_8000,fdivsqrt_8000*6,rnm_8000")
-(define_insn_reservation "Z6" 31
+(define_insn_reservation "Z7" 31
(and
(eq_attr "type" "fpdivdbl,fpsqrtdbl")
(eq_attr "cpu" "8000"))
"inm_8000,fdivsqrt_8000*6,rnm_8000")
+;; Operand and operator predicates and constraints
+(include "predicates.md")
+(include "constraints.md")
\f
;; Compare instructions.
;; This controls RTL generation and register allocation.
-;; We generate RTL for comparisons and branches by having the cmpxx
-;; patterns store away the operands. Then, the scc and bcc patterns
-;; emit RTL for both the compare and the branch.
-;;
-
-(define_expand "cmpdi"
- [(set (reg:CC 0)
- (compare:CC (match_operand:DI 0 "reg_or_0_operand" "")
- (match_operand:DI 1 "register_operand" "")))]
- "TARGET_64BIT"
-
- "
-{
- hppa_compare_op0 = operands[0];
- hppa_compare_op1 = operands[1];
- hppa_branch_type = CMP_SI;
- DONE;
-}")
-
-(define_expand "cmpsi"
- [(set (reg:CC 0)
- (compare:CC (match_operand:SI 0 "reg_or_0_operand" "")
- (match_operand:SI 1 "arith5_operand" "")))]
- ""
- "
-{
- hppa_compare_op0 = operands[0];
- hppa_compare_op1 = operands[1];
- hppa_branch_type = CMP_SI;
- DONE;
-}")
-
-(define_expand "cmpsf"
- [(set (reg:CCFP 0)
- (compare:CCFP (match_operand:SF 0 "reg_or_0_operand" "")
- (match_operand:SF 1 "reg_or_0_operand" "")))]
- "! TARGET_SOFT_FLOAT"
- "
-{
- hppa_compare_op0 = operands[0];
- hppa_compare_op1 = operands[1];
- hppa_branch_type = CMP_SF;
- DONE;
-}")
-
-(define_expand "cmpdf"
- [(set (reg:CCFP 0)
- (compare:CCFP (match_operand:DF 0 "reg_or_0_operand" "")
- (match_operand:DF 1 "reg_or_0_operand" "")))]
- "! TARGET_SOFT_FLOAT"
- "
-{
- hppa_compare_op0 = operands[0];
- hppa_compare_op1 = operands[1];
- hppa_branch_type = CMP_DF;
- DONE;
-}")
-
(define_insn ""
[(set (reg:CCFP 0)
(match_operator:CCFP 2 "comparison_operator"
;; scc insns.
-(define_expand "seq"
- [(set (match_operand:SI 0 "register_operand" "")
- (eq:SI (match_dup 1)
- (match_dup 2)))]
- "!TARGET_64BIT"
- "
-{
- /* fp scc patterns rarely match, and are not a win on the PA. */
- if (hppa_branch_type != CMP_SI)
- FAIL;
- /* set up operands from compare. */
- operands[1] = hppa_compare_op0;
- operands[2] = hppa_compare_op1;
- /* fall through and generate default code */
-}")
-
-(define_expand "sne"
- [(set (match_operand:SI 0 "register_operand" "")
- (ne:SI (match_dup 1)
- (match_dup 2)))]
- "!TARGET_64BIT"
- "
-{
- /* fp scc patterns rarely match, and are not a win on the PA. */
- if (hppa_branch_type != CMP_SI)
- FAIL;
- operands[1] = hppa_compare_op0;
- operands[2] = hppa_compare_op1;
-}")
-
-(define_expand "slt"
- [(set (match_operand:SI 0 "register_operand" "")
- (lt:SI (match_dup 1)
- (match_dup 2)))]
- "!TARGET_64BIT"
- "
-{
- /* fp scc patterns rarely match, and are not a win on the PA. */
- if (hppa_branch_type != CMP_SI)
- FAIL;
- operands[1] = hppa_compare_op0;
- operands[2] = hppa_compare_op1;
-}")
-
-(define_expand "sgt"
- [(set (match_operand:SI 0 "register_operand" "")
- (gt:SI (match_dup 1)
- (match_dup 2)))]
- "!TARGET_64BIT"
- "
-{
- /* fp scc patterns rarely match, and are not a win on the PA. */
- if (hppa_branch_type != CMP_SI)
- FAIL;
- operands[1] = hppa_compare_op0;
- operands[2] = hppa_compare_op1;
-}")
-
-(define_expand "sle"
- [(set (match_operand:SI 0 "register_operand" "")
- (le:SI (match_dup 1)
- (match_dup 2)))]
- "!TARGET_64BIT"
- "
-{
- /* fp scc patterns rarely match, and are not a win on the PA. */
- if (hppa_branch_type != CMP_SI)
- FAIL;
- operands[1] = hppa_compare_op0;
- operands[2] = hppa_compare_op1;
-}")
-
-(define_expand "sge"
- [(set (match_operand:SI 0 "register_operand" "")
- (ge:SI (match_dup 1)
- (match_dup 2)))]
- "!TARGET_64BIT"
- "
-{
- /* fp scc patterns rarely match, and are not a win on the PA. */
- if (hppa_branch_type != CMP_SI)
- FAIL;
- operands[1] = hppa_compare_op0;
- operands[2] = hppa_compare_op1;
-}")
-
-(define_expand "sltu"
- [(set (match_operand:SI 0 "register_operand" "")
- (ltu:SI (match_dup 1)
- (match_dup 2)))]
- "!TARGET_64BIT"
- "
-{
- if (hppa_branch_type != CMP_SI)
- FAIL;
- operands[1] = hppa_compare_op0;
- operands[2] = hppa_compare_op1;
-}")
-
-(define_expand "sgtu"
- [(set (match_operand:SI 0 "register_operand" "")
- (gtu:SI (match_dup 1)
- (match_dup 2)))]
- "!TARGET_64BIT"
- "
-{
- if (hppa_branch_type != CMP_SI)
- FAIL;
- operands[1] = hppa_compare_op0;
- operands[2] = hppa_compare_op1;
-}")
-
-(define_expand "sleu"
- [(set (match_operand:SI 0 "register_operand" "")
- (leu:SI (match_dup 1)
- (match_dup 2)))]
- "!TARGET_64BIT"
- "
-{
- if (hppa_branch_type != CMP_SI)
- FAIL;
- operands[1] = hppa_compare_op0;
- operands[2] = hppa_compare_op1;
-}")
-
-(define_expand "sgeu"
- [(set (match_operand:SI 0 "register_operand" "")
- (geu:SI (match_dup 1)
- (match_dup 2)))]
+(define_expand "cstoresi4"
+ [(set (match_operand:SI 0 "register_operand")
+ (match_operator:SI 1 "ordered_comparison_operator"
+ [(match_operand:SI 2 "reg_or_0_operand" "")
+ (match_operand:SI 3 "arith5_operand" "")]))]
"!TARGET_64BIT"
- "
-{
- if (hppa_branch_type != CMP_SI)
- FAIL;
- operands[1] = hppa_compare_op0;
- operands[2] = hppa_compare_op1;
-}")
+ "")
;; Instruction canonicalization puts immediate operands second, which
;; is the reverse of what we want.
(define_expand "movsicc"
[(set (match_operand:SI 0 "register_operand" "")
(if_then_else:SI
- (match_operator 1 "comparison_operator"
- [(match_dup 4)
- (match_dup 5)])
+ (match_operand 1 "comparison_operator" "")
(match_operand:SI 2 "reg_or_cint_move_operand" "")
(match_operand:SI 3 "reg_or_cint_move_operand" "")))]
""
"
{
- enum rtx_code code = GET_CODE (operands[1]);
-
- if (hppa_branch_type != CMP_SI)
+ if (GET_MODE (XEXP (operands[1], 0)) != SImode
+ || GET_MODE (XEXP (operands[1], 0)) != GET_MODE (XEXP (operands[1], 1)))
FAIL;
-
- if (GET_MODE (hppa_compare_op0) != GET_MODE (hppa_compare_op1)
- || GET_MODE (hppa_compare_op0) != GET_MODE (operands[0]))
- FAIL;
-
- /* operands[1] is currently the result of compare_from_rtx. We want to
- emit a compare of the original operands. */
- operands[1] = gen_rtx_fmt_ee (code, SImode, hppa_compare_op0, hppa_compare_op1);
- operands[4] = hppa_compare_op0;
- operands[5] = hppa_compare_op1;
}")
;; We used to accept any register for op1.
(define_expand "movdicc"
[(set (match_operand:DI 0 "register_operand" "")
(if_then_else:DI
- (match_operator 1 "comparison_operator"
- [(match_dup 4)
- (match_dup 5)])
+ (match_operand 1 "comparison_operator" "")
(match_operand:DI 2 "reg_or_cint_move_operand" "")
(match_operand:DI 3 "reg_or_cint_move_operand" "")))]
"TARGET_64BIT"
"
{
- enum rtx_code code = GET_CODE (operands[1]);
-
- if (hppa_branch_type != CMP_SI)
+ if (GET_MODE (XEXP (operands[1], 0)) != DImode
+ || GET_MODE (XEXP (operands[1], 0)) != GET_MODE (XEXP (operands[1], 1)))
FAIL;
-
- if (GET_MODE (hppa_compare_op0) != GET_MODE (hppa_compare_op1)
- || GET_MODE (hppa_compare_op0) != GET_MODE (operands[0]))
- FAIL;
-
- /* operands[1] is currently the result of compare_from_rtx. We want to
- emit a compare of the original operands. */
- operands[1] = gen_rtx_fmt_ee (code, DImode, hppa_compare_op0, hppa_compare_op1);
- operands[4] = hppa_compare_op0;
- operands[5] = hppa_compare_op1;
}")
; We need the first constraint alternative in order to avoid
;; Conditional Branches
-(define_expand "beq"
- [(set (pc)
- (if_then_else (eq (match_dup 1) (match_dup 2))
- (label_ref (match_operand 0 "" ""))
- (pc)))]
- ""
- "
-{
- if (hppa_branch_type != CMP_SI)
- {
- emit_insn (gen_cmp_fp (EQ, hppa_compare_op0, hppa_compare_op1));
- emit_bcond_fp (NE, operands[0]);
- DONE;
- }
- /* set up operands from compare. */
- operands[1] = hppa_compare_op0;
- operands[2] = hppa_compare_op1;
- /* fall through and generate default code */
-}")
-
-(define_expand "bne"
- [(set (pc)
- (if_then_else (ne (match_dup 1) (match_dup 2))
- (label_ref (match_operand 0 "" ""))
- (pc)))]
- ""
- "
-{
- if (hppa_branch_type != CMP_SI)
- {
- emit_insn (gen_cmp_fp (NE, hppa_compare_op0, hppa_compare_op1));
- emit_bcond_fp (NE, operands[0]);
- DONE;
- }
- operands[1] = hppa_compare_op0;
- operands[2] = hppa_compare_op1;
-}")
-
-(define_expand "bgt"
- [(set (pc)
- (if_then_else (gt (match_dup 1) (match_dup 2))
- (label_ref (match_operand 0 "" ""))
- (pc)))]
- ""
- "
-{
- if (hppa_branch_type != CMP_SI)
- {
- emit_insn (gen_cmp_fp (GT, hppa_compare_op0, hppa_compare_op1));
- emit_bcond_fp (NE, operands[0]);
- DONE;
- }
- operands[1] = hppa_compare_op0;
- operands[2] = hppa_compare_op1;
-}")
-
-(define_expand "blt"
- [(set (pc)
- (if_then_else (lt (match_dup 1) (match_dup 2))
- (label_ref (match_operand 0 "" ""))
- (pc)))]
- ""
- "
-{
- if (hppa_branch_type != CMP_SI)
- {
- emit_insn (gen_cmp_fp (LT, hppa_compare_op0, hppa_compare_op1));
- emit_bcond_fp (NE, operands[0]);
- DONE;
- }
- operands[1] = hppa_compare_op0;
- operands[2] = hppa_compare_op1;
-}")
-
-(define_expand "bge"
- [(set (pc)
- (if_then_else (ge (match_dup 1) (match_dup 2))
- (label_ref (match_operand 0 "" ""))
- (pc)))]
- ""
- "
-{
- if (hppa_branch_type != CMP_SI)
- {
- emit_insn (gen_cmp_fp (GE, hppa_compare_op0, hppa_compare_op1));
- emit_bcond_fp (NE, operands[0]);
- DONE;
- }
- operands[1] = hppa_compare_op0;
- operands[2] = hppa_compare_op1;
-}")
-
-(define_expand "ble"
+(define_expand "cbranchdi4"
[(set (pc)
- (if_then_else (le (match_dup 1) (match_dup 2))
- (label_ref (match_operand 0 "" ""))
+ (if_then_else (match_operator 0 "ordered_comparison_operator"
+ [(match_operand:DI 1 "reg_or_0_operand" "")
+ (match_operand:DI 2 "register_operand" "")])
+ (label_ref (match_operand 3 "" ""))
(pc)))]
- ""
- "
-{
- if (hppa_branch_type != CMP_SI)
- {
- emit_insn (gen_cmp_fp (LE, hppa_compare_op0, hppa_compare_op1));
- emit_bcond_fp (NE, operands[0]);
- DONE;
- }
- operands[1] = hppa_compare_op0;
- operands[2] = hppa_compare_op1;
-}")
-
-(define_expand "bgtu"
- [(set (pc)
- (if_then_else (gtu (match_dup 1) (match_dup 2))
- (label_ref (match_operand 0 "" ""))
- (pc)))]
- ""
- "
-{
- if (hppa_branch_type != CMP_SI)
- FAIL;
- operands[1] = hppa_compare_op0;
- operands[2] = hppa_compare_op1;
-}")
-
-(define_expand "bltu"
- [(set (pc)
- (if_then_else (ltu (match_dup 1) (match_dup 2))
- (label_ref (match_operand 0 "" ""))
- (pc)))]
- ""
- "
-{
- if (hppa_branch_type != CMP_SI)
- FAIL;
- operands[1] = hppa_compare_op0;
- operands[2] = hppa_compare_op1;
-}")
-
-(define_expand "bgeu"
- [(set (pc)
- (if_then_else (geu (match_dup 1) (match_dup 2))
- (label_ref (match_operand 0 "" ""))
- (pc)))]
- ""
- "
-{
- if (hppa_branch_type != CMP_SI)
- FAIL;
- operands[1] = hppa_compare_op0;
- operands[2] = hppa_compare_op1;
-}")
-
-(define_expand "bleu"
- [(set (pc)
- (if_then_else (leu (match_dup 1) (match_dup 2))
- (label_ref (match_operand 0 "" ""))
- (pc)))]
- ""
- "
-{
- if (hppa_branch_type != CMP_SI)
- FAIL;
- operands[1] = hppa_compare_op0;
- operands[2] = hppa_compare_op1;
-}")
-
-(define_expand "bltgt"
- [(set (pc)
- (if_then_else (ltgt (match_dup 1) (match_dup 2))
- (label_ref (match_operand 0 "" ""))
- (pc)))]
- ""
- "
-{
- if (hppa_branch_type == CMP_SI)
- FAIL;
- emit_insn (gen_cmp_fp (LTGT, hppa_compare_op0, hppa_compare_op1));
- emit_bcond_fp (NE, operands[0]);
- DONE;
-}")
-
-(define_expand "bunle"
- [(set (pc)
- (if_then_else (unle (match_dup 1) (match_dup 2))
- (label_ref (match_operand 0 "" ""))
- (pc)))]
- ""
- "
-{
- if (hppa_branch_type == CMP_SI)
- FAIL;
- emit_insn (gen_cmp_fp (UNLE, hppa_compare_op0, hppa_compare_op1));
- emit_bcond_fp (NE, operands[0]);
- DONE;
-}")
-
-(define_expand "bunlt"
- [(set (pc)
- (if_then_else (unlt (match_dup 1) (match_dup 2))
- (label_ref (match_operand 0 "" ""))
- (pc)))]
- ""
- "
-{
- if (hppa_branch_type == CMP_SI)
- FAIL;
- emit_insn (gen_cmp_fp (UNLT, hppa_compare_op0, hppa_compare_op1));
- emit_bcond_fp (NE, operands[0]);
- DONE;
-}")
-
-(define_expand "bunge"
- [(set (pc)
- (if_then_else (unge (match_dup 1) (match_dup 2))
- (label_ref (match_operand 0 "" ""))
- (pc)))]
- ""
- "
-{
- if (hppa_branch_type == CMP_SI)
- FAIL;
- emit_insn (gen_cmp_fp (UNGE, hppa_compare_op0, hppa_compare_op1));
- emit_bcond_fp (NE, operands[0]);
- DONE;
-}")
+ "TARGET_64BIT"
+ "")
-(define_expand "bungt"
+(define_expand "cbranchsi4"
[(set (pc)
- (if_then_else (ungt (match_dup 1) (match_dup 2))
- (label_ref (match_operand 0 "" ""))
+ (if_then_else (match_operator 0 "ordered_comparison_operator"
+ [(match_operand:SI 1 "reg_or_0_operand" "")
+ (match_operand:SI 2 "arith5_operand" "")])
+ (label_ref (match_operand 3 "" ""))
(pc)))]
""
- "
-{
- if (hppa_branch_type == CMP_SI)
- FAIL;
- emit_insn (gen_cmp_fp (UNGT, hppa_compare_op0, hppa_compare_op1));
- emit_bcond_fp (NE, operands[0]);
- DONE;
-}")
+ "")
-(define_expand "buneq"
+(define_expand "cbranchsf4"
[(set (pc)
- (if_then_else (uneq (match_dup 1) (match_dup 2))
- (label_ref (match_operand 0 "" ""))
+ (if_then_else (match_operator 0 "comparison_operator"
+ [(match_operand:SF 1 "reg_or_0_operand" "")
+ (match_operand:SF 2 "reg_or_0_operand" "")])
+ (label_ref (match_operand 3 "" ""))
(pc)))]
""
"
{
- if (hppa_branch_type == CMP_SI)
- FAIL;
- emit_insn (gen_cmp_fp (UNEQ, hppa_compare_op0, hppa_compare_op1));
- emit_bcond_fp (NE, operands[0]);
+ emit_bcond_fp (operands);
DONE;
}")
-(define_expand "bunordered"
- [(set (pc)
- (if_then_else (unordered (match_dup 1) (match_dup 2))
- (label_ref (match_operand 0 "" ""))
- (pc)))]
- ""
- "
-{
- if (hppa_branch_type == CMP_SI)
- FAIL;
- emit_insn (gen_cmp_fp (UNORDERED, hppa_compare_op0, hppa_compare_op1));
- emit_bcond_fp (NE, operands[0]);
- DONE;
-}")
-(define_expand "bordered"
+(define_expand "cbranchdf4"
[(set (pc)
- (if_then_else (ordered (match_dup 1) (match_dup 2))
- (label_ref (match_operand 0 "" ""))
+ (if_then_else (match_operator 0 "comparison_operator"
+ [(match_operand:DF 1 "reg_or_0_operand" "")
+ (match_operand:DF 2 "reg_or_0_operand" "")])
+ (label_ref (match_operand 3 "" ""))
(pc)))]
""
"
{
- if (hppa_branch_type == CMP_SI)
- FAIL;
- emit_insn (gen_cmp_fp (ORDERED, hppa_compare_op0, hppa_compare_op1));
- emit_bcond_fp (NE, operands[0]);
+ emit_bcond_fp (operands);
DONE;
}")
""
"*
{
- return output_cbranch (operands, INSN_ANNULLED_BRANCH_P (insn),
- get_attr_length (insn), 0, insn);
+ return output_cbranch (operands, 0, insn);
}"
[(set_attr "type" "cbranch")
(set (attr "length")
(cond [(lt (abs (minus (match_dup 0) (plus (pc) (const_int 8))))
- (const_int 8184))
+ (const_int MAX_12BIT_OFFSET))
(const_int 4)
(lt (abs (minus (match_dup 0) (plus (pc) (const_int 8))))
- (const_int 262100))
+ (const_int MAX_17BIT_OFFSET))
(const_int 8)
+ (ne (symbol_ref "TARGET_PORTABLE_RUNTIME") (const_int 0))
+ (const_int 24)
(eq (symbol_ref "flag_pic") (const_int 0))
(const_int 20)]
(const_int 28)))])
""
"*
{
- return output_cbranch (operands, INSN_ANNULLED_BRANCH_P (insn),
- get_attr_length (insn), 1, insn);
+ return output_cbranch (operands, 1, insn);
}"
[(set_attr "type" "cbranch")
(set (attr "length")
(cond [(lt (abs (minus (match_dup 0) (plus (pc) (const_int 8))))
- (const_int 8184))
+ (const_int MAX_12BIT_OFFSET))
(const_int 4)
(lt (abs (minus (match_dup 0) (plus (pc) (const_int 8))))
- (const_int 262100))
+ (const_int MAX_17BIT_OFFSET))
(const_int 8)
+ (ne (symbol_ref "TARGET_PORTABLE_RUNTIME") (const_int 0))
+ (const_int 24)
(eq (symbol_ref "flag_pic") (const_int 0))
(const_int 20)]
(const_int 28)))])
"TARGET_64BIT"
"*
{
- return output_cbranch (operands, INSN_ANNULLED_BRANCH_P (insn),
- get_attr_length (insn), 0, insn);
+ return output_cbranch (operands, 0, insn);
}"
[(set_attr "type" "cbranch")
(set (attr "length")
(cond [(lt (abs (minus (match_dup 0) (plus (pc) (const_int 8))))
- (const_int 8184))
+ (const_int MAX_12BIT_OFFSET))
(const_int 4)
(lt (abs (minus (match_dup 0) (plus (pc) (const_int 8))))
- (const_int 262100))
+ (const_int MAX_17BIT_OFFSET))
(const_int 8)
+ (ne (symbol_ref "TARGET_PORTABLE_RUNTIME") (const_int 0))
+ (const_int 24)
(eq (symbol_ref "flag_pic") (const_int 0))
(const_int 20)]
(const_int 28)))])
"TARGET_64BIT"
"*
{
- return output_cbranch (operands, INSN_ANNULLED_BRANCH_P (insn),
- get_attr_length (insn), 1, insn);
+ return output_cbranch (operands, 1, insn);
}"
[(set_attr "type" "cbranch")
(set (attr "length")
(cond [(lt (abs (minus (match_dup 0) (plus (pc) (const_int 8))))
- (const_int 8184))
+ (const_int MAX_12BIT_OFFSET))
(const_int 4)
(lt (abs (minus (match_dup 0) (plus (pc) (const_int 8))))
- (const_int 262100))
+ (const_int MAX_17BIT_OFFSET))
(const_int 8)
+ (ne (symbol_ref "TARGET_PORTABLE_RUNTIME") (const_int 0))
+ (const_int 24)
(eq (symbol_ref "flag_pic") (const_int 0))
(const_int 20)]
(const_int 28)))])
"TARGET_64BIT"
"*
{
- return output_cbranch (operands, INSN_ANNULLED_BRANCH_P (insn),
- get_attr_length (insn), 0, insn);
+ return output_cbranch (operands, 0, insn);
}"
[(set_attr "type" "cbranch")
(set (attr "length")
(cond [(lt (abs (minus (match_dup 0) (plus (pc) (const_int 8))))
- (const_int 8184))
+ (const_int MAX_12BIT_OFFSET))
(const_int 4)
(lt (abs (minus (match_dup 0) (plus (pc) (const_int 8))))
- (const_int 262100))
+ (const_int MAX_17BIT_OFFSET))
(const_int 8)
+ (ne (symbol_ref "TARGET_PORTABLE_RUNTIME") (const_int 0))
+ (const_int 24)
(eq (symbol_ref "flag_pic") (const_int 0))
(const_int 20)]
(const_int 28)))])
"TARGET_64BIT"
"*
{
- return output_cbranch (operands, INSN_ANNULLED_BRANCH_P (insn),
- get_attr_length (insn), 1, insn);
+ return output_cbranch (operands, 1, insn);
}"
[(set_attr "type" "cbranch")
(set (attr "length")
(cond [(lt (abs (minus (match_dup 0) (plus (pc) (const_int 8))))
- (const_int 8184))
+ (const_int MAX_12BIT_OFFSET))
(const_int 4)
(lt (abs (minus (match_dup 0) (plus (pc) (const_int 8))))
- (const_int 262100))
+ (const_int MAX_17BIT_OFFSET))
(const_int 8)
+ (ne (symbol_ref "TARGET_PORTABLE_RUNTIME") (const_int 0))
+ (const_int 24)
(eq (symbol_ref "flag_pic") (const_int 0))
(const_int 20)]
(const_int 28)))])
""
"*
{
- return output_bb (operands, INSN_ANNULLED_BRANCH_P (insn),
- get_attr_length (insn), 0, insn, 0);
+ return output_bb (operands, 0, insn, 0);
}"
[(set_attr "type" "cbranch")
(set (attr "length")
- (if_then_else (lt (abs (minus (match_dup 2) (plus (pc) (const_int 8))))
- (const_int 8184))
- (const_int 4)
- (const_int 8)))])
+ (cond [(lt (abs (minus (match_dup 2) (plus (pc) (const_int 8))))
+ (const_int MAX_12BIT_OFFSET))
+ (const_int 4)
+ (lt (abs (minus (match_dup 2) (plus (pc) (const_int 8))))
+ (const_int MAX_17BIT_OFFSET))
+ (const_int 8)
+ (ne (symbol_ref "TARGET_PORTABLE_RUNTIME") (const_int 0))
+ (const_int 24)
+ (eq (symbol_ref "flag_pic") (const_int 0))
+ (const_int 20)]
+ (const_int 28)))])
(define_insn ""
[(set (pc)
"TARGET_64BIT"
"*
{
- return output_bb (operands, INSN_ANNULLED_BRANCH_P (insn),
- get_attr_length (insn), 0, insn, 0);
+ return output_bb (operands, 0, insn, 0);
}"
[(set_attr "type" "cbranch")
(set (attr "length")
- (if_then_else (lt (abs (minus (match_dup 2) (plus (pc) (const_int 8))))
- (const_int 8184))
- (const_int 4)
- (const_int 8)))])
+ (cond [(lt (abs (minus (match_dup 2) (plus (pc) (const_int 8))))
+ (const_int MAX_12BIT_OFFSET))
+ (const_int 4)
+ (lt (abs (minus (match_dup 2) (plus (pc) (const_int 8))))
+ (const_int MAX_17BIT_OFFSET))
+ (const_int 8)
+ (ne (symbol_ref "TARGET_PORTABLE_RUNTIME") (const_int 0))
+ (const_int 24)
+ (eq (symbol_ref "flag_pic") (const_int 0))
+ (const_int 20)]
+ (const_int 28)))])
(define_insn ""
[(set (pc)
""
"*
{
- return output_bb (operands, INSN_ANNULLED_BRANCH_P (insn),
- get_attr_length (insn), 1, insn, 0);
+ return output_bb (operands, 1, insn, 0);
}"
[(set_attr "type" "cbranch")
(set (attr "length")
- (if_then_else (lt (abs (minus (match_dup 2) (plus (pc) (const_int 8))))
- (const_int 8184))
- (const_int 4)
- (const_int 8)))])
+ (cond [(lt (abs (minus (match_dup 2) (plus (pc) (const_int 8))))
+ (const_int MAX_12BIT_OFFSET))
+ (const_int 4)
+ (lt (abs (minus (match_dup 2) (plus (pc) (const_int 8))))
+ (const_int MAX_17BIT_OFFSET))
+ (const_int 8)
+ (ne (symbol_ref "TARGET_PORTABLE_RUNTIME") (const_int 0))
+ (const_int 24)
+ (eq (symbol_ref "flag_pic") (const_int 0))
+ (const_int 20)]
+ (const_int 28)))])
(define_insn ""
[(set (pc)
"TARGET_64BIT"
"*
{
- return output_bb (operands, INSN_ANNULLED_BRANCH_P (insn),
- get_attr_length (insn), 1, insn, 0);
+ return output_bb (operands, 1, insn, 0);
}"
[(set_attr "type" "cbranch")
(set (attr "length")
- (if_then_else (lt (abs (minus (match_dup 2) (plus (pc) (const_int 8))))
- (const_int 8184))
- (const_int 4)
- (const_int 8)))])
+ (cond [(lt (abs (minus (match_dup 2) (plus (pc) (const_int 8))))
+ (const_int MAX_12BIT_OFFSET))
+ (const_int 4)
+ (lt (abs (minus (match_dup 2) (plus (pc) (const_int 8))))
+ (const_int MAX_17BIT_OFFSET))
+ (const_int 8)
+ (ne (symbol_ref "TARGET_PORTABLE_RUNTIME") (const_int 0))
+ (const_int 24)
+ (eq (symbol_ref "flag_pic") (const_int 0))
+ (const_int 20)]
+ (const_int 28)))])
(define_insn ""
[(set (pc)
""
"*
{
- return output_bb (operands, INSN_ANNULLED_BRANCH_P (insn),
- get_attr_length (insn), 0, insn, 1);
+ return output_bb (operands, 0, insn, 1);
}"
[(set_attr "type" "cbranch")
(set (attr "length")
- (if_then_else (lt (abs (minus (match_dup 2) (plus (pc) (const_int 8))))
- (const_int 8184))
- (const_int 4)
- (const_int 8)))])
+ (cond [(lt (abs (minus (match_dup 2) (plus (pc) (const_int 8))))
+ (const_int MAX_12BIT_OFFSET))
+ (const_int 4)
+ (lt (abs (minus (match_dup 2) (plus (pc) (const_int 8))))
+ (const_int MAX_17BIT_OFFSET))
+ (const_int 8)
+ (ne (symbol_ref "TARGET_PORTABLE_RUNTIME") (const_int 0))
+ (const_int 24)
+ (eq (symbol_ref "flag_pic") (const_int 0))
+ (const_int 20)]
+ (const_int 28)))])
(define_insn ""
[(set (pc)
"TARGET_64BIT"
"*
{
- return output_bb (operands, INSN_ANNULLED_BRANCH_P (insn),
- get_attr_length (insn), 0, insn, 1);
+ return output_bb (operands, 0, insn, 1);
}"
[(set_attr "type" "cbranch")
(set (attr "length")
- (if_then_else (lt (abs (minus (match_dup 2) (plus (pc) (const_int 8))))
- (const_int 8184))
- (const_int 4)
- (const_int 8)))])
+ (cond [(lt (abs (minus (match_dup 2) (plus (pc) (const_int 8))))
+ (const_int MAX_12BIT_OFFSET))
+ (const_int 4)
+ (lt (abs (minus (match_dup 2) (plus (pc) (const_int 8))))
+ (const_int MAX_17BIT_OFFSET))
+ (const_int 8)
+ (ne (symbol_ref "TARGET_PORTABLE_RUNTIME") (const_int 0))
+ (const_int 24)
+ (eq (symbol_ref "flag_pic") (const_int 0))
+ (const_int 20)]
+ (const_int 28)))])
(define_insn ""
[(set (pc)
""
"*
{
- return output_bb (operands, INSN_ANNULLED_BRANCH_P (insn),
- get_attr_length (insn), 1, insn, 1);
+ return output_bb (operands, 1, insn, 1);
}"
[(set_attr "type" "cbranch")
(set (attr "length")
- (if_then_else (lt (abs (minus (match_dup 2) (plus (pc) (const_int 8))))
- (const_int 8184))
- (const_int 4)
- (const_int 8)))])
+ (cond [(lt (abs (minus (match_dup 2) (plus (pc) (const_int 8))))
+ (const_int MAX_12BIT_OFFSET))
+ (const_int 4)
+ (lt (abs (minus (match_dup 2) (plus (pc) (const_int 8))))
+ (const_int MAX_17BIT_OFFSET))
+ (const_int 8)
+ (ne (symbol_ref "TARGET_PORTABLE_RUNTIME") (const_int 0))
+ (const_int 24)
+ (eq (symbol_ref "flag_pic") (const_int 0))
+ (const_int 20)]
+ (const_int 28)))])
(define_insn ""
[(set (pc)
"TARGET_64BIT"
"*
{
- return output_bb (operands, INSN_ANNULLED_BRANCH_P (insn),
- get_attr_length (insn), 1, insn, 1);
+ return output_bb (operands, 1, insn, 1);
}"
[(set_attr "type" "cbranch")
(set (attr "length")
- (if_then_else (lt (abs (minus (match_dup 2) (plus (pc) (const_int 8))))
- (const_int 8184))
- (const_int 4)
- (const_int 8)))])
+ (cond [(lt (abs (minus (match_dup 2) (plus (pc) (const_int 8))))
+ (const_int MAX_12BIT_OFFSET))
+ (const_int 4)
+ (lt (abs (minus (match_dup 2) (plus (pc) (const_int 8))))
+ (const_int MAX_17BIT_OFFSET))
+ (const_int 8)
+ (ne (symbol_ref "TARGET_PORTABLE_RUNTIME") (const_int 0))
+ (const_int 24)
+ (eq (symbol_ref "flag_pic") (const_int 0))
+ (const_int 20)]
+ (const_int 28)))])
;; Branch on Variable Bit patterns.
(define_insn ""
""
"*
{
- return output_bvb (operands, INSN_ANNULLED_BRANCH_P (insn),
- get_attr_length (insn), 0, insn, 0);
+ return output_bvb (operands, 0, insn, 0);
}"
[(set_attr "type" "cbranch")
(set (attr "length")
- (if_then_else (lt (abs (minus (match_dup 2) (plus (pc) (const_int 8))))
- (const_int 8184))
- (const_int 4)
- (const_int 8)))])
+ (cond [(lt (abs (minus (match_dup 2) (plus (pc) (const_int 8))))
+ (const_int MAX_12BIT_OFFSET))
+ (const_int 4)
+ (lt (abs (minus (match_dup 2) (plus (pc) (const_int 8))))
+ (const_int MAX_17BIT_OFFSET))
+ (const_int 8)
+ (ne (symbol_ref "TARGET_PORTABLE_RUNTIME") (const_int 0))
+ (const_int 24)
+ (eq (symbol_ref "flag_pic") (const_int 0))
+ (const_int 20)]
+ (const_int 28)))])
(define_insn ""
[(set (pc)
"TARGET_64BIT"
"*
{
- return output_bvb (operands, INSN_ANNULLED_BRANCH_P (insn),
- get_attr_length (insn), 0, insn, 0);
+ return output_bvb (operands, 0, insn, 0);
}"
[(set_attr "type" "cbranch")
(set (attr "length")
- (if_then_else (lt (abs (minus (match_dup 2) (plus (pc) (const_int 8))))
- (const_int 8184))
- (const_int 4)
- (const_int 8)))])
+ (cond [(lt (abs (minus (match_dup 2) (plus (pc) (const_int 8))))
+ (const_int MAX_12BIT_OFFSET))
+ (const_int 4)
+ (lt (abs (minus (match_dup 2) (plus (pc) (const_int 8))))
+ (const_int MAX_17BIT_OFFSET))
+ (const_int 8)
+ (ne (symbol_ref "TARGET_PORTABLE_RUNTIME") (const_int 0))
+ (const_int 24)
+ (eq (symbol_ref "flag_pic") (const_int 0))
+ (const_int 20)]
+ (const_int 28)))])
(define_insn ""
[(set (pc)
""
"*
{
- return output_bvb (operands, INSN_ANNULLED_BRANCH_P (insn),
- get_attr_length (insn), 1, insn, 0);
+ return output_bvb (operands, 1, insn, 0);
}"
[(set_attr "type" "cbranch")
(set (attr "length")
- (if_then_else (lt (abs (minus (match_dup 2) (plus (pc) (const_int 8))))
- (const_int 8184))
- (const_int 4)
- (const_int 8)))])
+ (cond [(lt (abs (minus (match_dup 2) (plus (pc) (const_int 8))))
+ (const_int MAX_12BIT_OFFSET))
+ (const_int 4)
+ (lt (abs (minus (match_dup 2) (plus (pc) (const_int 8))))
+ (const_int MAX_17BIT_OFFSET))
+ (const_int 8)
+ (ne (symbol_ref "TARGET_PORTABLE_RUNTIME") (const_int 0))
+ (const_int 24)
+ (eq (symbol_ref "flag_pic") (const_int 0))
+ (const_int 20)]
+ (const_int 28)))])
(define_insn ""
[(set (pc)
"TARGET_64BIT"
"*
{
- return output_bvb (operands, INSN_ANNULLED_BRANCH_P (insn),
- get_attr_length (insn), 1, insn, 0);
+ return output_bvb (operands, 1, insn, 0);
}"
[(set_attr "type" "cbranch")
(set (attr "length")
- (if_then_else (lt (abs (minus (match_dup 2) (plus (pc) (const_int 8))))
- (const_int 8184))
- (const_int 4)
- (const_int 8)))])
+ (cond [(lt (abs (minus (match_dup 2) (plus (pc) (const_int 8))))
+ (const_int MAX_12BIT_OFFSET))
+ (const_int 4)
+ (lt (abs (minus (match_dup 2) (plus (pc) (const_int 8))))
+ (const_int MAX_17BIT_OFFSET))
+ (const_int 8)
+ (ne (symbol_ref "TARGET_PORTABLE_RUNTIME") (const_int 0))
+ (const_int 24)
+ (eq (symbol_ref "flag_pic") (const_int 0))
+ (const_int 20)]
+ (const_int 28)))])
(define_insn ""
[(set (pc)
""
"*
{
- return output_bvb (operands, INSN_ANNULLED_BRANCH_P (insn),
- get_attr_length (insn), 0, insn, 1);
+ return output_bvb (operands, 0, insn, 1);
}"
[(set_attr "type" "cbranch")
(set (attr "length")
- (if_then_else (lt (abs (minus (match_dup 2) (plus (pc) (const_int 8))))
- (const_int 8184))
- (const_int 4)
- (const_int 8)))])
+ (cond [(lt (abs (minus (match_dup 2) (plus (pc) (const_int 8))))
+ (const_int MAX_12BIT_OFFSET))
+ (const_int 4)
+ (lt (abs (minus (match_dup 2) (plus (pc) (const_int 8))))
+ (const_int MAX_17BIT_OFFSET))
+ (const_int 8)
+ (ne (symbol_ref "TARGET_PORTABLE_RUNTIME") (const_int 0))
+ (const_int 24)
+ (eq (symbol_ref "flag_pic") (const_int 0))
+ (const_int 20)]
+ (const_int 28)))])
(define_insn ""
[(set (pc)
"TARGET_64BIT"
"*
{
- return output_bvb (operands, INSN_ANNULLED_BRANCH_P (insn),
- get_attr_length (insn), 0, insn, 1);
+ return output_bvb (operands, 0, insn, 1);
}"
[(set_attr "type" "cbranch")
(set (attr "length")
- (if_then_else (lt (abs (minus (match_dup 2) (plus (pc) (const_int 8))))
- (const_int 8184))
- (const_int 4)
- (const_int 8)))])
+ (cond [(lt (abs (minus (match_dup 2) (plus (pc) (const_int 8))))
+ (const_int MAX_12BIT_OFFSET))
+ (const_int 4)
+ (lt (abs (minus (match_dup 2) (plus (pc) (const_int 8))))
+ (const_int MAX_17BIT_OFFSET))
+ (const_int 8)
+ (ne (symbol_ref "TARGET_PORTABLE_RUNTIME") (const_int 0))
+ (const_int 24)
+ (eq (symbol_ref "flag_pic") (const_int 0))
+ (const_int 20)]
+ (const_int 28)))])
(define_insn ""
[(set (pc)
""
"*
{
- return output_bvb (operands, INSN_ANNULLED_BRANCH_P (insn),
- get_attr_length (insn), 1, insn, 1);
+ return output_bvb (operands, 1, insn, 1);
}"
[(set_attr "type" "cbranch")
(set (attr "length")
- (if_then_else (lt (abs (minus (match_dup 2) (plus (pc) (const_int 8))))
- (const_int 8184))
- (const_int 4)
- (const_int 8)))])
+ (cond [(lt (abs (minus (match_dup 2) (plus (pc) (const_int 8))))
+ (const_int MAX_12BIT_OFFSET))
+ (const_int 4)
+ (lt (abs (minus (match_dup 2) (plus (pc) (const_int 8))))
+ (const_int MAX_17BIT_OFFSET))
+ (const_int 8)
+ (ne (symbol_ref "TARGET_PORTABLE_RUNTIME") (const_int 0))
+ (const_int 24)
+ (eq (symbol_ref "flag_pic") (const_int 0))
+ (const_int 20)]
+ (const_int 28)))])
(define_insn ""
[(set (pc)
"TARGET_64BIT"
"*
{
- return output_bvb (operands, INSN_ANNULLED_BRANCH_P (insn),
- get_attr_length (insn), 1, insn, 1);
+ return output_bvb (operands, 1, insn, 1);
}"
[(set_attr "type" "cbranch")
(set (attr "length")
- (if_then_else (lt (abs (minus (match_dup 2) (plus (pc) (const_int 8))))
- (const_int 8184))
- (const_int 4)
- (const_int 8)))])
+ (cond [(lt (abs (minus (match_dup 2) (plus (pc) (const_int 8))))
+ (const_int MAX_12BIT_OFFSET))
+ (const_int 4)
+ (lt (abs (minus (match_dup 2) (plus (pc) (const_int 8))))
+ (const_int MAX_17BIT_OFFSET))
+ (const_int 8)
+ (ne (symbol_ref "TARGET_PORTABLE_RUNTIME") (const_int 0))
+ (const_int 24)
+ (eq (symbol_ref "flag_pic") (const_int 0))
+ (const_int 20)]
+ (const_int 28)))])
;; Floating point branches
+
+;; ??? Nullification is handled differently from other branches.
+;; If nullification is specified, the delay slot is nullified on any
+;; taken branch regardless of branch direction.
(define_insn ""
[(set (pc) (if_then_else (ne (reg:CCFP 0) (const_int 0))
(label_ref (match_operand 0 "" ""))
(pc)))]
- "! TARGET_SOFT_FLOAT"
+ "!TARGET_SOFT_FLOAT"
"*
{
- if (INSN_ANNULLED_BRANCH_P (insn))
- return \"ftest\;b,n %0\";
+ int length = get_attr_length (insn);
+ rtx xoperands[1];
+ int nullify, xdelay;
+
+ if (length < 16)
+ return \"ftest\;b%* %l0\";
+
+ if (dbr_sequence_length () == 0 || INSN_ANNULLED_BRANCH_P (insn))
+ {
+ nullify = 1;
+ xdelay = 0;
+ xoperands[0] = GEN_INT (length - 8);
+ }
+ else
+ {
+ nullify = 0;
+ xdelay = 1;
+ xoperands[0] = GEN_INT (length - 4);
+ }
+
+ if (nullify)
+ output_asm_insn (\"ftest\;add,tr %%r0,%%r0,%%r0\;b,n .+%0\", xoperands);
else
- return \"ftest\;b%* %0\";
+ output_asm_insn (\"ftest\;add,tr %%r0,%%r0,%%r0\;b .+%0\", xoperands);
+ return output_lbranch (operands[0], insn, xdelay);
}"
- [(set_attr "type" "fbranch")
- (set_attr "length" "8")])
+[(set_attr "type" "fbranch")
+ (set (attr "length")
+ (cond [(lt (abs (minus (match_dup 0) (plus (pc) (const_int 8))))
+ (const_int MAX_17BIT_OFFSET))
+ (const_int 8)
+ (ne (symbol_ref "TARGET_PORTABLE_RUNTIME") (const_int 0))
+ (const_int 32)
+ (eq (symbol_ref "flag_pic") (const_int 0))
+ (const_int 28)]
+ (const_int 36)))])
(define_insn ""
[(set (pc) (if_then_else (ne (reg:CCFP 0) (const_int 0))
(pc)
(label_ref (match_operand 0 "" ""))))]
- "! TARGET_SOFT_FLOAT"
+ "!TARGET_SOFT_FLOAT"
"*
{
- if (INSN_ANNULLED_BRANCH_P (insn))
- return \"ftest\;add,tr %%r0,%%r0,%%r0\;b,n %0\";
- else
+ int length = get_attr_length (insn);
+ rtx xoperands[1];
+ int nullify, xdelay;
+
+ if (length < 16)
return \"ftest\;add,tr %%r0,%%r0,%%r0\;b%* %0\";
+
+ if (dbr_sequence_length () == 0 || INSN_ANNULLED_BRANCH_P (insn))
+ {
+ nullify = 1;
+ xdelay = 0;
+ xoperands[0] = GEN_INT (length - 4);
+ }
+ else
+ {
+ nullify = 0;
+ xdelay = 1;
+ xoperands[0] = GEN_INT (length);
+ }
+
+ if (nullify)
+ output_asm_insn (\"ftest\;b,n .+%0\", xoperands);
+ else
+ output_asm_insn (\"ftest\;b .+%0\", xoperands);
+ return output_lbranch (operands[0], insn, xdelay);
}"
- [(set_attr "type" "fbranch")
- (set_attr "length" "12")])
+[(set_attr "type" "fbranch")
+ (set (attr "length")
+ (cond [(lt (abs (minus (match_dup 0) (plus (pc) (const_int 8))))
+ (const_int MAX_17BIT_OFFSET))
+ (const_int 12)
+ (ne (symbol_ref "TARGET_PORTABLE_RUNTIME") (const_int 0))
+ (const_int 28)
+ (eq (symbol_ref "flag_pic") (const_int 0))
+ (const_int 24)]
+ (const_int 32)))])
;; Move instructions
DONE;
}")
-;; Reloading an SImode or DImode value requires a scratch register if
-;; going in to or out of float point registers.
+;; Handle SImode input reloads requiring %r1 as a scratch register.
+(define_expand "reload_insi_r1"
+ [(set (match_operand:SI 0 "register_operand" "=Z")
+ (match_operand:SI 1 "non_hard_reg_operand" ""))
+ (clobber (match_operand:SI 2 "register_operand" "=&a"))]
+ ""
+ "
+{
+ if (emit_move_sequence (operands, SImode, operands[2]))
+ DONE;
+ /* We don't want the clobber emitted, so handle this ourselves. */
+ emit_insn (gen_rtx_SET (VOIDmode, operands[0], operands[1]));
+ DONE;
+}")
+
+;; Handle SImode input reloads requiring a general register as a
+;; scratch register.
(define_expand "reload_insi"
[(set (match_operand:SI 0 "register_operand" "=Z")
(match_operand:SI 1 "non_hard_reg_operand" ""))
DONE;
}")
+;; Handle SImode output reloads requiring a general register as a
+;; scratch register.
(define_expand "reload_outsi"
[(set (match_operand:SI 0 "non_hard_reg_operand" "")
(match_operand:SI 1 "register_operand" "Z"))
(define_insn ""
[(set (match_operand:SI 0 "move_dest_operand"
- "=r,r,r,r,r,r,Q,!*q,!*f,*f,T")
+ "=r,r,r,r,r,r,Q,!*q,!r,!*f,*f,T,?r,?*f")
(match_operand:SI 1 "move_src_operand"
- "A,r,J,N,K,RQ,rM,!rM,!*fM,RT,*f"))]
+ "A,r,J,N,K,RQ,rM,!rM,!*q,!*fM,RT,*f,*f,r"))]
"(register_operand (operands[0], SImode)
|| reg_or_0_operand (operands[1], SImode))
- && !TARGET_SOFT_FLOAT"
+ && !TARGET_SOFT_FLOAT
+ && !TARGET_64BIT"
+ "@
+ ldw RT'%A1,%0
+ copy %1,%0
+ ldi %1,%0
+ ldil L'%1,%0
+ {zdepi|depwi,z} %Z1,%0
+ ldw%M1 %1,%0
+ stw%M0 %r1,%0
+ mtsar %r1
+ {mfctl|mfctl,w} %%sar,%0
+ fcpy,sgl %f1,%0
+ fldw%F1 %1,%0
+ fstw%F0 %1,%0
+ {fstws|fstw} %1,-16(%%sp)\n\t{ldws|ldw} -16(%%sp),%0
+ {stws|stw} %1,-16(%%sp)\n\t{fldws|fldw} -16(%%sp),%0"
+ [(set_attr "type" "load,move,move,move,shift,load,store,move,move,fpalu,fpload,fpstore,fpstore_load,store_fpload")
+ (set_attr "pa_combine_type" "addmove")
+ (set_attr "length" "4,4,4,4,4,4,4,4,4,4,4,4,8,8")])
+
+(define_insn ""
+ [(set (match_operand:SI 0 "move_dest_operand"
+ "=r,r,r,r,r,r,Q,!*q,!r,!*f,*f,T")
+ (match_operand:SI 1 "move_src_operand"
+ "A,r,J,N,K,RQ,rM,!rM,!*q,!*fM,RT,*f"))]
+ "(register_operand (operands[0], SImode)
+ || reg_or_0_operand (operands[1], SImode))
+ && !TARGET_SOFT_FLOAT
+ && TARGET_64BIT"
"@
ldw RT'%A1,%0
copy %1,%0
ldw%M1 %1,%0
stw%M0 %r1,%0
mtsar %r1
+ {mfctl|mfctl,w} %%sar,%0
fcpy,sgl %f1,%0
fldw%F1 %1,%0
fstw%F0 %1,%0"
- [(set_attr "type" "load,move,move,move,shift,load,store,move,fpalu,fpload,fpstore")
+ [(set_attr "type" "load,move,move,move,shift,load,store,move,move,fpalu,fpload,fpstore")
(set_attr "pa_combine_type" "addmove")
- (set_attr "length" "4,4,4,4,4,4,4,4,4,4,4")])
+ (set_attr "length" "4,4,4,4,4,4,4,4,4,4,4,4")])
(define_insn ""
[(set (match_operand:SI 0 "indexed_memory_operand" "=R")
(const_int 4))
(match_operand:SI 2 "register_operand" "")))
(set (mem:SI (match_dup 0))
- (match_operand:SI 3 "reg_or_0_operand" ""))]
+ (match_operand:SI 3 "register_operand" ""))]
"!TARGET_SOFT_FLOAT
+ && !TARGET_DISABLE_INDEXING
&& REG_OK_FOR_BASE_P (operands[2])
&& FP_REGNO_P (REGNO (operands[3]))"
[(set (mem:SI (plus:SI (mult:SI (match_dup 1) (const_int 4)) (match_dup 2)))
(set (mem:SI (match_dup 0))
(match_operand:SI 3 "register_operand" ""))]
"!TARGET_SOFT_FLOAT
+ && !TARGET_DISABLE_INDEXING
&& REG_OK_FOR_BASE_P (operands[2])
&& FP_REGNO_P (REGNO (operands[3]))"
[(set (mem:SI (plus:SI (mult:SI (match_dup 1) (const_int 4)) (match_dup 2)))
(set (mem:SI (match_dup 0))
(match_operand:SI 3 "register_operand" ""))]
"!TARGET_SOFT_FLOAT
+ && !TARGET_DISABLE_INDEXING
&& TARGET_64BIT
&& REG_OK_FOR_BASE_P (operands[2])
&& FP_REGNO_P (REGNO (operands[3]))"
(set (mem:SI (match_dup 0))
(match_operand:SI 3 "register_operand" ""))]
"!TARGET_SOFT_FLOAT
+ && !TARGET_DISABLE_INDEXING
&& TARGET_64BIT
&& REG_OK_FOR_BASE_P (operands[2])
&& FP_REGNO_P (REGNO (operands[3]))"
(set (mem:SI (match_dup 0))
(match_operand:SI 3 "register_operand" ""))]
"!TARGET_SOFT_FLOAT
- && REG_OK_FOR_BASE_P (operands[1])
- && (TARGET_NO_SPACE_REGS
- || (!REG_POINTER (operands[1]) && REG_POINTER (operands[2])))
+ && !TARGET_DISABLE_INDEXING
+ && TARGET_NO_SPACE_REGS
+ && REG_OK_FOR_INDEX_P (operands[1])
+ && REG_OK_FOR_BASE_P (operands[2])
&& FP_REGNO_P (REGNO (operands[3]))"
[(set (mem:SI (plus:SI (match_dup 1) (match_dup 2)))
(match_dup 3))
(set (mem:SI (match_dup 0))
(match_operand:SI 3 "register_operand" ""))]
"!TARGET_SOFT_FLOAT
- && REG_OK_FOR_BASE_P (operands[2])
- && (TARGET_NO_SPACE_REGS
- || (REG_POINTER (operands[1]) && !REG_POINTER (operands[2])))
+ && !TARGET_DISABLE_INDEXING
+ && TARGET_NO_SPACE_REGS
+ && REG_OK_FOR_BASE_P (operands[1])
+ && REG_OK_FOR_INDEX_P (operands[2])
&& FP_REGNO_P (REGNO (operands[3]))"
[(set (mem:SI (plus:SI (match_dup 2) (match_dup 1)))
(match_dup 3))
(set (mem:SI (match_dup 0))
(match_operand:SI 3 "register_operand" ""))]
"!TARGET_SOFT_FLOAT
+ && !TARGET_DISABLE_INDEXING
&& TARGET_64BIT
- && REG_OK_FOR_BASE_P (operands[1])
- && (TARGET_NO_SPACE_REGS
- || (!REG_POINTER (operands[1]) && REG_POINTER (operands[2])))
+ && TARGET_NO_SPACE_REGS
+ && REG_OK_FOR_INDEX_P (operands[1])
+ && REG_OK_FOR_BASE_P (operands[2])
&& FP_REGNO_P (REGNO (operands[3]))"
[(set (mem:SI (plus:DI (match_dup 1) (match_dup 2)))
(match_dup 3))
(set (mem:SI (match_dup 0))
(match_operand:SI 3 "register_operand" ""))]
"!TARGET_SOFT_FLOAT
+ && !TARGET_DISABLE_INDEXING
&& TARGET_64BIT
- && REG_OK_FOR_BASE_P (operands[2])
- && (TARGET_NO_SPACE_REGS
- || (REG_POINTER (operands[1]) && !REG_POINTER (operands[2])))
+ && TARGET_NO_SPACE_REGS
+ && REG_OK_FOR_BASE_P (operands[1])
+ && REG_OK_FOR_INDEX_P (operands[2])
&& FP_REGNO_P (REGNO (operands[3]))"
[(set (mem:SI (plus:DI (match_dup 2) (match_dup 1)))
(match_dup 3))
(define_insn ""
[(set (match_operand:SI 0 "move_dest_operand"
- "=r,r,r,r,r,r,Q,!*q")
+ "=r,r,r,r,r,r,Q,!*q,!r")
(match_operand:SI 1 "move_src_operand"
- "A,r,J,N,K,RQ,rM,!rM"))]
+ "A,r,J,N,K,RQ,rM,!rM,!*q"))]
"(register_operand (operands[0], SImode)
|| reg_or_0_operand (operands[1], SImode))
&& TARGET_SOFT_FLOAT"
{zdepi|depwi,z} %Z1,%0
ldw%M1 %1,%0
stw%M0 %r1,%0
- mtsar %r1"
- [(set_attr "type" "load,move,move,move,move,load,store,move")
+ mtsar %r1
+ {mfctl|mfctl,w} %%sar,%0"
+ [(set_attr "type" "load,move,move,move,move,load,store,move,move")
(set_attr "pa_combine_type" "addmove")
- (set_attr "length" "4,4,4,4,4,4,4,4")])
+ (set_attr "length" "4,4,4,4,4,4,4,4,4")])
;; Load or store with base-register modification.
(define_insn ""
"*
{
rtx xoperands[3];
- extern FILE *asm_out_file;
xoperands[0] = operands[0];
xoperands[1] = operands[1];
/* If we're trying to load the address of a label that happens to be
close, then we can use a shorter sequence. */
if (GET_CODE (operands[1]) == LABEL_REF
+ && !LABEL_REF_NONLOCAL_P (operands[1])
&& INSN_ADDRESSES_SET_P ()
&& abs (INSN_ADDRESSES (INSN_UID (XEXP (operands[1], 0)))
- INSN_ADDRESSES (INSN_UID (insn))) < 8100)
"*
{
rtx xoperands[3];
- extern FILE *asm_out_file;
xoperands[0] = operands[0];
xoperands[1] = operands[1];
/* If we're trying to load the address of a label that happens to be
close, then we can use a shorter sequence. */
if (GET_CODE (operands[1]) == LABEL_REF
+ && !LABEL_REF_NONLOCAL_P (operands[1])
&& INSN_ADDRESSES_SET_P ()
&& abs (INSN_ADDRESSES (INSN_UID (XEXP (operands[1], 0)))
- INSN_ADDRESSES (INSN_UID (insn))) < 8100)
"!is_function_label_plus_const (operands[2])"
"*
{
- if (flag_pic && symbolic_operand (operands[2], Pmode))
- abort ();
- else if (symbolic_operand (operands[2], Pmode))
+ gcc_assert (!flag_pic || !symbolic_operand (operands[2], Pmode));
+
+ if (symbolic_operand (operands[2], Pmode))
return \"ldo RR'%G2(%1),%0\";
else
return \"ldo R'%G2(%1),%0\";
DONE;
}")
+;; Handle HImode input reloads requiring a general register as a
+;; scratch register.
+(define_expand "reload_inhi"
+ [(set (match_operand:HI 0 "register_operand" "=Z")
+ (match_operand:HI 1 "non_hard_reg_operand" ""))
+ (clobber (match_operand:HI 2 "register_operand" "=&r"))]
+ ""
+ "
+{
+ if (emit_move_sequence (operands, HImode, operands[2]))
+ DONE;
+
+ /* We don't want the clobber emitted, so handle this ourselves. */
+ emit_insn (gen_rtx_SET (VOIDmode, operands[0], operands[1]));
+ DONE;
+}")
+
+;; Handle HImode output reloads requiring a general register as a
+;; scratch register.
+(define_expand "reload_outhi"
+ [(set (match_operand:HI 0 "non_hard_reg_operand" "")
+ (match_operand:HI 1 "register_operand" "Z"))
+ (clobber (match_operand:HI 2 "register_operand" "=&r"))]
+ ""
+ "
+{
+ if (emit_move_sequence (operands, HImode, operands[2]))
+ DONE;
+
+ /* We don't want the clobber emitted, so handle this ourselves. */
+ emit_insn (gen_rtx_SET (VOIDmode, operands[0], operands[1]));
+ DONE;
+}")
+
(define_insn ""
[(set (match_operand:HI 0 "move_dest_operand"
- "=r,r,r,r,r,Q,!*q,!*f")
+ "=r,r,r,r,r,Q,!*q,!r")
(match_operand:HI 1 "move_src_operand"
- "r,J,N,K,RQ,rM,!rM,!*fM"))]
- "register_operand (operands[0], HImode)
- || reg_or_0_operand (operands[1], HImode)"
+ "r,J,N,K,RQ,rM,!rM,!*q"))]
+ "(register_operand (operands[0], HImode)
+ || reg_or_0_operand (operands[1], HImode))"
"@
copy %1,%0
ldi %1,%0
ldh%M1 %1,%0
sth%M0 %r1,%0
mtsar %r1
- fcpy,sgl %f1,%0"
- [(set_attr "type" "move,move,move,shift,load,store,move,fpalu")
+ {mfctl|mfctl,w} %sar,%0"
+ [(set_attr "type" "move,move,move,shift,load,store,move,move")
(set_attr "pa_combine_type" "addmove")
(set_attr "length" "4,4,4,4,4,4,4,4")])
DONE;
}")
+;; Handle QImode input reloads requiring a general register as a
+;; scratch register.
+(define_expand "reload_inqi"
+ [(set (match_operand:QI 0 "register_operand" "=Z")
+ (match_operand:QI 1 "non_hard_reg_operand" ""))
+ (clobber (match_operand:QI 2 "register_operand" "=&r"))]
+ ""
+ "
+{
+ if (emit_move_sequence (operands, QImode, operands[2]))
+ DONE;
+
+ /* We don't want the clobber emitted, so handle this ourselves. */
+ emit_insn (gen_rtx_SET (VOIDmode, operands[0], operands[1]));
+ DONE;
+}")
+
+;; Handle QImode output reloads requiring a general register as a
+;; scratch register.
+(define_expand "reload_outqi"
+ [(set (match_operand:QI 0 "non_hard_reg_operand" "")
+ (match_operand:QI 1 "register_operand" "Z"))
+ (clobber (match_operand:QI 2 "register_operand" "=&r"))]
+ ""
+ "
+{
+ if (emit_move_sequence (operands, QImode, operands[2]))
+ DONE;
+
+ /* We don't want the clobber emitted, so handle this ourselves. */
+ emit_insn (gen_rtx_SET (VOIDmode, operands[0], operands[1]));
+ DONE;
+}")
+
(define_insn ""
[(set (match_operand:QI 0 "move_dest_operand"
- "=r,r,r,r,r,Q,!*q,!*f")
+ "=r,r,r,r,r,Q,!*q,!r")
(match_operand:QI 1 "move_src_operand"
- "r,J,N,K,RQ,rM,!rM,!*fM"))]
- "register_operand (operands[0], QImode)
- || reg_or_0_operand (operands[1], QImode)"
+ "r,J,N,K,RQ,rM,!rM,!*q"))]
+ "(register_operand (operands[0], QImode)
+ || reg_or_0_operand (operands[1], QImode))"
"@
copy %1,%0
ldi %1,%0
ldb%M1 %1,%0
stb%M0 %r1,%0
mtsar %r1
- fcpy,sgl %f1,%0"
- [(set_attr "type" "move,move,move,shift,load,store,move,fpalu")
+ {mfctl|mfctl,w} %%sar,%0"
+ [(set_attr "type" "move,move,move,shift,load,store,move,move")
(set_attr "pa_combine_type" "addmove")
(set_attr "length" "4,4,4,4,4,4,4,4")])
;; The definition of this insn does not really explain what it does,
;; but it should suffice that anything generated as this insn will be
-;; recognized as a movstrsi operation, and that it will not successfully
+;; recognized as a movmemsi operation, and that it will not successfully
;; combine with anything.
-(define_expand "movstrsi"
+(define_expand "movmemsi"
[(parallel [(set (match_operand:BLK 0 "" "")
(match_operand:BLK 1 "" ""))
(clobber (match_dup 4))
size = INTVAL (operands[2]);
align = INTVAL (operands[3]);
- align = align > 4 ? 4 : align;
+ align = align > 4 ? 4 : (align ? align : 1);
/* If size/alignment is large, then use the library routines. */
if (size / align > 16)
FAIL;
/* This does happen, but not often enough to worry much about. */
- if (size / align < MOVE_RATIO)
+ if (size / align < MOVE_RATIO (optimize_insn_for_speed_p ()))
FAIL;
/* Fall through means we're going to use our block move pattern. */
;; operands 0 and 1 are both equivalent to symbolic MEMs. Thus, we are
;; forced to internally copy operands 0 and 1 to operands 7 and 8,
;; respectively. We then split or peephole optimize after reload.
-(define_insn "movstrsi_prereload"
+(define_insn "movmemsi_prereload"
[(set (mem:BLK (match_operand:SI 0 "register_operand" "r,r"))
(mem:BLK (match_operand:SI 1 "register_operand" "r,r")))
(clobber (match_operand:SI 2 "register_operand" "=&r,&r")) ;loop cnt/tmp
[(set_attr "type" "multi,multi")])
(define_split
- [(parallel [(set (mem:BLK (match_operand:SI 0 "register_operand" ""))
- (mem:BLK (match_operand:SI 1 "register_operand" "")))
+ [(parallel [(set (match_operand:BLK 0 "memory_operand" "")
+ (match_operand:BLK 1 "memory_operand" ""))
(clobber (match_operand:SI 2 "register_operand" ""))
(clobber (match_operand:SI 3 "register_operand" ""))
(clobber (match_operand:SI 6 "register_operand" ""))
(clobber (match_operand:SI 8 "register_operand" ""))
(use (match_operand:SI 4 "arith_operand" ""))
(use (match_operand:SI 5 "const_int_operand" ""))])]
- "!TARGET_64BIT && reload_completed && !flag_peephole2"
- [(set (match_dup 7) (match_dup 0))
- (set (match_dup 8) (match_dup 1))
- (parallel [(set (mem:BLK (match_dup 7)) (mem:BLK (match_dup 8)))
+ "!TARGET_64BIT && reload_completed && !flag_peephole2
+ && GET_CODE (operands[0]) == MEM
+ && register_operand (XEXP (operands[0], 0), SImode)
+ && GET_CODE (operands[1]) == MEM
+ && register_operand (XEXP (operands[1], 0), SImode)"
+ [(set (match_dup 7) (match_dup 9))
+ (set (match_dup 8) (match_dup 10))
+ (parallel [(set (match_dup 0) (match_dup 1))
(clobber (match_dup 2))
(clobber (match_dup 3))
(clobber (match_dup 6))
(use (match_dup 4))
(use (match_dup 5))
(const_int 0)])]
- "")
+ "
+{
+ operands[9] = XEXP (operands[0], 0);
+ operands[10] = XEXP (operands[1], 0);
+ operands[0] = replace_equiv_address (operands[0], operands[7]);
+ operands[1] = replace_equiv_address (operands[1], operands[8]);
+}")
(define_peephole2
- [(parallel [(set (mem:BLK (match_operand:SI 0 "register_operand" ""))
- (mem:BLK (match_operand:SI 1 "register_operand" "")))
+ [(parallel [(set (match_operand:BLK 0 "memory_operand" "")
+ (match_operand:BLK 1 "memory_operand" ""))
(clobber (match_operand:SI 2 "register_operand" ""))
(clobber (match_operand:SI 3 "register_operand" ""))
(clobber (match_operand:SI 6 "register_operand" ""))
(clobber (match_operand:SI 8 "register_operand" ""))
(use (match_operand:SI 4 "arith_operand" ""))
(use (match_operand:SI 5 "const_int_operand" ""))])]
- "!TARGET_64BIT"
- [(parallel [(set (mem:BLK (match_dup 7)) (mem:BLK (match_dup 8)))
+ "!TARGET_64BIT
+ && GET_CODE (operands[0]) == MEM
+ && register_operand (XEXP (operands[0], 0), SImode)
+ && GET_CODE (operands[1]) == MEM
+ && register_operand (XEXP (operands[1], 0), SImode)"
+ [(parallel [(set (match_dup 0) (match_dup 1))
(clobber (match_dup 2))
(clobber (match_dup 3))
(clobber (match_dup 6))
(const_int 0)])]
"
{
- if (dead_or_set_p (curr_insn, operands[0]))
- operands[7] = operands[0];
+ rtx addr = XEXP (operands[0], 0);
+ if (dead_or_set_p (curr_insn, addr))
+ operands[7] = addr;
else
- emit_insn (gen_rtx_SET (VOIDmode, operands[7], operands[0]));
+ {
+ emit_insn (gen_rtx_SET (VOIDmode, operands[7], addr));
+ operands[0] = replace_equiv_address (operands[0], operands[7]);
+ }
- if (dead_or_set_p (curr_insn, operands[1]))
- operands[8] = operands[1];
+ addr = XEXP (operands[1], 0);
+ if (dead_or_set_p (curr_insn, addr))
+ operands[8] = addr;
else
- emit_insn (gen_rtx_SET (VOIDmode, operands[8], operands[1]));
+ {
+ emit_insn (gen_rtx_SET (VOIDmode, operands[8], addr));
+ operands[1] = replace_equiv_address (operands[1], operands[8]);
+ }
}")
-(define_insn "movstrsi_postreload"
+(define_insn "movmemsi_postreload"
[(set (mem:BLK (match_operand:SI 0 "register_operand" "+r,r"))
(mem:BLK (match_operand:SI 1 "register_operand" "+r,r")))
(clobber (match_operand:SI 2 "register_operand" "=&r,&r")) ;loop cnt/tmp
"* return output_block_move (operands, !which_alternative);"
[(set_attr "type" "multi,multi")])
-(define_expand "movstrdi"
+(define_expand "movmemdi"
[(parallel [(set (match_operand:BLK 0 "" "")
(match_operand:BLK 1 "" ""))
(clobber (match_dup 4))
size = INTVAL (operands[2]);
align = INTVAL (operands[3]);
- align = align > 8 ? 8 : align;
+ align = align > 8 ? 8 : (align ? align : 1);
/* If size/alignment is large, then use the library routines. */
if (size / align > 16)
FAIL;
/* This does happen, but not often enough to worry much about. */
- if (size / align < MOVE_RATIO)
+ if (size / align < MOVE_RATIO (optimize_insn_for_speed_p ()))
FAIL;
/* Fall through means we're going to use our block move pattern. */
;; operands 0 and 1 are both equivalent to symbolic MEMs. Thus, we are
;; forced to internally copy operands 0 and 1 to operands 7 and 8,
;; respectively. We then split or peephole optimize after reload.
-(define_insn "movstrdi_prereload"
+(define_insn "movmemdi_prereload"
[(set (mem:BLK (match_operand:DI 0 "register_operand" "r,r"))
(mem:BLK (match_operand:DI 1 "register_operand" "r,r")))
(clobber (match_operand:DI 2 "register_operand" "=&r,&r")) ;loop cnt/tmp
[(set_attr "type" "multi,multi")])
(define_split
- [(parallel [(set (mem:BLK (match_operand:DI 0 "register_operand" ""))
- (mem:BLK (match_operand:DI 1 "register_operand" "")))
+ [(parallel [(set (match_operand:BLK 0 "memory_operand" "")
+ (match_operand:BLK 1 "memory_operand" ""))
(clobber (match_operand:DI 2 "register_operand" ""))
(clobber (match_operand:DI 3 "register_operand" ""))
(clobber (match_operand:DI 6 "register_operand" ""))
(clobber (match_operand:DI 8 "register_operand" ""))
(use (match_operand:DI 4 "arith_operand" ""))
(use (match_operand:DI 5 "const_int_operand" ""))])]
- "TARGET_64BIT && reload_completed && !flag_peephole2"
- [(set (match_dup 7) (match_dup 0))
- (set (match_dup 8) (match_dup 1))
- (parallel [(set (mem:BLK (match_dup 7)) (mem:BLK (match_dup 8)))
+ "TARGET_64BIT && reload_completed && !flag_peephole2
+ && GET_CODE (operands[0]) == MEM
+ && register_operand (XEXP (operands[0], 0), DImode)
+ && GET_CODE (operands[1]) == MEM
+ && register_operand (XEXP (operands[1], 0), DImode)"
+ [(set (match_dup 7) (match_dup 9))
+ (set (match_dup 8) (match_dup 10))
+ (parallel [(set (match_dup 0) (match_dup 1))
(clobber (match_dup 2))
(clobber (match_dup 3))
(clobber (match_dup 6))
(use (match_dup 4))
(use (match_dup 5))
(const_int 0)])]
- "")
+ "
+{
+ operands[9] = XEXP (operands[0], 0);
+ operands[10] = XEXP (operands[1], 0);
+ operands[0] = replace_equiv_address (operands[0], operands[7]);
+ operands[1] = replace_equiv_address (operands[1], operands[8]);
+}")
(define_peephole2
- [(parallel [(set (mem:BLK (match_operand:DI 0 "register_operand" ""))
- (mem:BLK (match_operand:DI 1 "register_operand" "")))
+ [(parallel [(set (match_operand:BLK 0 "memory_operand" "")
+ (match_operand:BLK 1 "memory_operand" ""))
(clobber (match_operand:DI 2 "register_operand" ""))
(clobber (match_operand:DI 3 "register_operand" ""))
(clobber (match_operand:DI 6 "register_operand" ""))
(clobber (match_operand:DI 8 "register_operand" ""))
(use (match_operand:DI 4 "arith_operand" ""))
(use (match_operand:DI 5 "const_int_operand" ""))])]
- "TARGET_64BIT"
- [(parallel [(set (mem:BLK (match_dup 7)) (mem:BLK (match_dup 8)))
+ "TARGET_64BIT
+ && GET_CODE (operands[0]) == MEM
+ && register_operand (XEXP (operands[0], 0), DImode)
+ && GET_CODE (operands[1]) == MEM
+ && register_operand (XEXP (operands[1], 0), DImode)"
+ [(parallel [(set (match_dup 0) (match_dup 1))
(clobber (match_dup 2))
(clobber (match_dup 3))
(clobber (match_dup 6))
(const_int 0)])]
"
{
- if (dead_or_set_p (curr_insn, operands[0]))
- operands[7] = operands[0];
+ rtx addr = XEXP (operands[0], 0);
+ if (dead_or_set_p (curr_insn, addr))
+ operands[7] = addr;
else
- emit_insn (gen_rtx_SET (VOIDmode, operands[7], operands[0]));
+ {
+ emit_insn (gen_rtx_SET (VOIDmode, operands[7], addr));
+ operands[0] = replace_equiv_address (operands[0], operands[7]);
+ }
- if (dead_or_set_p (curr_insn, operands[1]))
- operands[8] = operands[1];
+ addr = XEXP (operands[1], 0);
+ if (dead_or_set_p (curr_insn, addr))
+ operands[8] = addr;
else
- emit_insn (gen_rtx_SET (VOIDmode, operands[8], operands[1]));
+ {
+ emit_insn (gen_rtx_SET (VOIDmode, operands[8], addr));
+ operands[1] = replace_equiv_address (operands[1], operands[8]);
+ }
}")
-(define_insn "movstrdi_postreload"
+(define_insn "movmemdi_postreload"
[(set (mem:BLK (match_operand:DI 0 "register_operand" "+r,r"))
(mem:BLK (match_operand:DI 1 "register_operand" "+r,r")))
(clobber (match_operand:DI 2 "register_operand" "=&r,&r")) ;loop cnt/tmp
"* return output_block_move (operands, !which_alternative);"
[(set_attr "type" "multi,multi")])
-(define_expand "clrstrsi"
+(define_expand "setmemsi"
[(parallel [(set (match_operand:BLK 0 "" "")
- (const_int 0))
- (clobber (match_dup 3))
+ (match_operand 2 "const_int_operand" ""))
(clobber (match_dup 4))
+ (clobber (match_dup 5))
(use (match_operand:SI 1 "arith_operand" ""))
- (use (match_operand:SI 2 "const_int_operand" ""))])]
+ (use (match_operand:SI 3 "const_int_operand" ""))])]
"!TARGET_64BIT && optimize > 0"
"
{
int size, align;
+ /* If value to set is not zero, use the library routine. */
+ if (operands[2] != const0_rtx)
+ FAIL;
+
/* Undetermined size, use the library routine. */
if (GET_CODE (operands[1]) != CONST_INT)
FAIL;
size = INTVAL (operands[1]);
- align = INTVAL (operands[2]);
+ align = INTVAL (operands[3]);
align = align > 4 ? 4 : align;
/* If size/alignment is large, then use the library routines. */
FAIL;
/* This does happen, but not often enough to worry much about. */
- if (size / align < MOVE_RATIO)
+ if (size / align < MOVE_RATIO (optimize_insn_for_speed_p ()))
FAIL;
/* Fall through means we're going to use our block clear pattern. */
operands[0]
= replace_equiv_address (operands[0],
copy_to_mode_reg (SImode, XEXP (operands[0], 0)));
- operands[3] = gen_reg_rtx (SImode);
operands[4] = gen_reg_rtx (SImode);
+ operands[5] = gen_reg_rtx (SImode);
}")
-(define_insn "clrstrsi_prereload"
+(define_insn "clrmemsi_prereload"
[(set (mem:BLK (match_operand:SI 0 "register_operand" "r,r"))
(const_int 0))
(clobber (match_operand:SI 1 "register_operand" "=&r,&r")) ;loop cnt/tmp
[(set_attr "type" "multi,multi")])
(define_split
- [(parallel [(set (mem:BLK (match_operand:SI 0 "register_operand" ""))
+ [(parallel [(set (match_operand:BLK 0 "memory_operand" "")
(const_int 0))
(clobber (match_operand:SI 1 "register_operand" ""))
(clobber (match_operand:SI 4 "register_operand" ""))
(use (match_operand:SI 2 "arith_operand" ""))
(use (match_operand:SI 3 "const_int_operand" ""))])]
- "!TARGET_64BIT && reload_completed && !flag_peephole2"
- [(set (match_dup 4) (match_dup 0))
- (parallel [(set (mem:BLK (match_dup 4)) (const_int 0))
+ "!TARGET_64BIT && reload_completed && !flag_peephole2
+ && GET_CODE (operands[0]) == MEM
+ && register_operand (XEXP (operands[0], 0), SImode)"
+ [(set (match_dup 4) (match_dup 5))
+ (parallel [(set (match_dup 0) (const_int 0))
(clobber (match_dup 1))
(clobber (match_dup 4))
(use (match_dup 2))
(use (match_dup 3))
(const_int 0)])]
- "")
+ "
+{
+ operands[5] = XEXP (operands[0], 0);
+ operands[0] = replace_equiv_address (operands[0], operands[4]);
+}")
(define_peephole2
- [(parallel [(set (mem:BLK (match_operand:SI 0 "register_operand" ""))
+ [(parallel [(set (match_operand:BLK 0 "memory_operand" "")
(const_int 0))
(clobber (match_operand:SI 1 "register_operand" ""))
(clobber (match_operand:SI 4 "register_operand" ""))
(use (match_operand:SI 2 "arith_operand" ""))
(use (match_operand:SI 3 "const_int_operand" ""))])]
- "!TARGET_64BIT"
- [(parallel [(set (mem:BLK (match_dup 4)) (const_int 0))
+ "!TARGET_64BIT
+ && GET_CODE (operands[0]) == MEM
+ && register_operand (XEXP (operands[0], 0), SImode)"
+ [(parallel [(set (match_dup 0) (const_int 0))
(clobber (match_dup 1))
(clobber (match_dup 4))
(use (match_dup 2))
(const_int 0)])]
"
{
- if (dead_or_set_p (curr_insn, operands[0]))
- operands[4] = operands[0];
+ rtx addr = XEXP (operands[0], 0);
+ if (dead_or_set_p (curr_insn, addr))
+ operands[4] = addr;
else
- emit_insn (gen_rtx_SET (VOIDmode, operands[4], operands[0]));
+ {
+ emit_insn (gen_rtx_SET (VOIDmode, operands[4], addr));
+ operands[0] = replace_equiv_address (operands[0], operands[4]);
+ }
}")
-(define_insn "clrstrsi_postreload"
+(define_insn "clrmemsi_postreload"
[(set (mem:BLK (match_operand:SI 0 "register_operand" "+r,r"))
(const_int 0))
(clobber (match_operand:SI 1 "register_operand" "=&r,&r")) ;loop cnt/tmp
"* return output_block_clear (operands, !which_alternative);"
[(set_attr "type" "multi,multi")])
-(define_expand "clrstrdi"
+(define_expand "setmemdi"
[(parallel [(set (match_operand:BLK 0 "" "")
- (const_int 0))
- (clobber (match_dup 3))
+ (match_operand 2 "const_int_operand" ""))
(clobber (match_dup 4))
+ (clobber (match_dup 5))
(use (match_operand:DI 1 "arith_operand" ""))
- (use (match_operand:DI 2 "const_int_operand" ""))])]
+ (use (match_operand:DI 3 "const_int_operand" ""))])]
"TARGET_64BIT && optimize > 0"
"
{
int size, align;
+ /* If value to set is not zero, use the library routine. */
+ if (operands[2] != const0_rtx)
+ FAIL;
+
/* Undetermined size, use the library routine. */
if (GET_CODE (operands[1]) != CONST_INT)
FAIL;
size = INTVAL (operands[1]);
- align = INTVAL (operands[2]);
+ align = INTVAL (operands[3]);
align = align > 8 ? 8 : align;
/* If size/alignment is large, then use the library routines. */
FAIL;
/* This does happen, but not often enough to worry much about. */
- if (size / align < MOVE_RATIO)
+ if (size / align < MOVE_RATIO (optimize_insn_for_speed_p ()))
FAIL;
/* Fall through means we're going to use our block clear pattern. */
operands[0]
= replace_equiv_address (operands[0],
copy_to_mode_reg (DImode, XEXP (operands[0], 0)));
- operands[3] = gen_reg_rtx (DImode);
operands[4] = gen_reg_rtx (DImode);
+ operands[5] = gen_reg_rtx (DImode);
}")
-(define_insn "clrstrdi_prereload"
+(define_insn "clrmemdi_prereload"
[(set (mem:BLK (match_operand:DI 0 "register_operand" "r,r"))
(const_int 0))
(clobber (match_operand:DI 1 "register_operand" "=&r,&r")) ;loop cnt/tmp
[(set_attr "type" "multi,multi")])
(define_split
- [(parallel [(set (mem:BLK (match_operand:DI 0 "register_operand" ""))
+ [(parallel [(set (match_operand:BLK 0 "memory_operand" "")
(const_int 0))
(clobber (match_operand:DI 1 "register_operand" ""))
(clobber (match_operand:DI 4 "register_operand" ""))
(use (match_operand:DI 2 "arith_operand" ""))
(use (match_operand:DI 3 "const_int_operand" ""))])]
- "TARGET_64BIT && reload_completed && !flag_peephole2"
- [(set (match_dup 4) (match_dup 0))
- (parallel [(set (mem:BLK (match_dup 4)) (const_int 0))
+ "TARGET_64BIT && reload_completed && !flag_peephole2
+ && GET_CODE (operands[0]) == MEM
+ && register_operand (XEXP (operands[0], 0), DImode)"
+ [(set (match_dup 4) (match_dup 5))
+ (parallel [(set (match_dup 0) (const_int 0))
(clobber (match_dup 1))
(clobber (match_dup 4))
(use (match_dup 2))
(use (match_dup 3))
(const_int 0)])]
- "")
+ "
+{
+ operands[5] = XEXP (operands[0], 0);
+ operands[0] = replace_equiv_address (operands[0], operands[4]);
+}")
(define_peephole2
- [(parallel [(set (mem:BLK (match_operand:DI 0 "register_operand" ""))
+ [(parallel [(set (match_operand:BLK 0 "memory_operand" "")
(const_int 0))
(clobber (match_operand:DI 1 "register_operand" ""))
(clobber (match_operand:DI 4 "register_operand" ""))
(use (match_operand:DI 2 "arith_operand" ""))
(use (match_operand:DI 3 "const_int_operand" ""))])]
- "TARGET_64BIT"
- [(parallel [(set (mem:BLK (match_dup 4)) (const_int 0))
+ "TARGET_64BIT
+ && GET_CODE (operands[0]) == MEM
+ && register_operand (XEXP (operands[0], 0), DImode)"
+ [(parallel [(set (match_dup 0) (const_int 0))
(clobber (match_dup 1))
(clobber (match_dup 4))
(use (match_dup 2))
(const_int 0)])]
"
{
- if (dead_or_set_p (curr_insn, operands[0]))
- operands[4] = operands[0];
+ rtx addr = XEXP (operands[0], 0);
+ if (dead_or_set_p (curr_insn, addr))
+ operands[4] = addr;
else
- emit_insn (gen_rtx_SET (VOIDmode, operands[4], operands[0]));
+ {
+ emit_insn (gen_rtx_SET (VOIDmode, operands[4], addr));
+ operands[0] = replace_equiv_address (operands[0], operands[4]);
+ }
}")
-(define_insn "clrstrdi_postreload"
+(define_insn "clrmemdi_postreload"
[(set (mem:BLK (match_operand:DI 0 "register_operand" "+r,r"))
(const_int 0))
(clobber (match_operand:DI 1 "register_operand" "=&r,&r")) ;loop cnt/tmp
""
"
{
- if (GET_CODE (operands[1]) == CONST_DOUBLE && TARGET_64BIT)
- operands[1] = force_const_mem (DFmode, operands[1]);
+ if (GET_CODE (operands[1]) == CONST_DOUBLE
+ && operands[1] != CONST0_RTX (DFmode))
+ {
+ /* Reject CONST_DOUBLE loads to all hard registers when
+ generating 64-bit code and to floating point registers
+ when generating 32-bit code. */
+ if (REG_P (operands[0])
+ && HARD_REGISTER_P (operands[0])
+ && (TARGET_64BIT || REGNO (operands[0]) >= 32))
+ FAIL;
+
+ if (TARGET_64BIT)
+ operands[1] = force_const_mem (DFmode, operands[1]);
+ }
if (emit_move_sequence (operands, DFmode, 0))
DONE;
}")
-;; Reloading an SImode or DImode value requires a scratch register if
-;; going in to or out of float point registers.
-
+;; Handle DFmode input reloads requiring a general register as a
+;; scratch register.
(define_expand "reload_indf"
[(set (match_operand:DF 0 "register_operand" "=Z")
(match_operand:DF 1 "non_hard_reg_operand" ""))
DONE;
}")
+;; Handle DFmode output reloads requiring a general register as a
+;; scratch register.
(define_expand "reload_outdf"
[(set (match_operand:DF 0 "non_hard_reg_operand" "")
(match_operand:DF 1 "register_operand" "Z"))
(define_insn ""
[(set (match_operand:DF 0 "move_dest_operand"
- "=f,*r,Q,?o,?Q,f,*r,*r")
+ "=f,*r,Q,?o,?Q,f,*r,*r,?*r,?f")
(match_operand:DF 1 "reg_or_0_or_nonsymb_mem_operand"
- "fG,*rG,f,*r,*r,RQ,o,RQ"))]
+ "fG,*rG,f,*r,*r,RQ,o,RQ,f,*r"))]
"(register_operand (operands[0], DFmode)
|| reg_or_0_operand (operands[1], DFmode))
&& !(GET_CODE (operands[1]) == CONST_DOUBLE
&& !TARGET_SOFT_FLOAT"
"*
{
- if (FP_REG_P (operands[0]) || FP_REG_P (operands[1])
- || operands[1] == CONST0_RTX (DFmode))
+ if ((FP_REG_P (operands[0]) || FP_REG_P (operands[1])
+ || operands[1] == CONST0_RTX (DFmode))
+ && !(REG_P (operands[0]) && REG_P (operands[1])
+ && FP_REG_P (operands[0]) ^ FP_REG_P (operands[1])))
return output_fp_move_double (operands);
return output_move_double (operands);
}"
- [(set_attr "type" "fpalu,move,fpstore,store,store,fpload,load,load")
- (set_attr "length" "4,8,4,8,16,4,8,16")])
+ [(set_attr "type" "fpalu,move,fpstore,store,store,fpload,load,load,fpstore_load,store_fpload")
+ (set_attr "length" "4,8,4,8,16,4,8,16,12,12")])
(define_insn ""
[(set (match_operand:DF 0 "indexed_memory_operand" "=R")
(set (mem:DF (match_dup 0))
(match_operand:DF 3 "register_operand" ""))]
"!TARGET_SOFT_FLOAT
+ && !TARGET_DISABLE_INDEXING
&& REG_OK_FOR_BASE_P (operands[2])
&& FP_REGNO_P (REGNO (operands[3]))"
[(set (mem:DF (plus:SI (mult:SI (match_dup 1) (const_int 8)) (match_dup 2)))
(set (mem:DF (match_dup 0))
(match_operand:DF 3 "register_operand" ""))]
"!TARGET_SOFT_FLOAT
+ && !TARGET_DISABLE_INDEXING
&& REG_OK_FOR_BASE_P (operands[2])
&& FP_REGNO_P (REGNO (operands[3]))"
[(set (mem:DF (plus:SI (mult:SI (match_dup 1) (const_int 8)) (match_dup 2)))
(set (mem:DF (match_dup 0))
(match_operand:DF 3 "register_operand" ""))]
"!TARGET_SOFT_FLOAT
+ && !TARGET_DISABLE_INDEXING
&& TARGET_64BIT
&& REG_OK_FOR_BASE_P (operands[2])
&& FP_REGNO_P (REGNO (operands[3]))"
(set (mem:DF (match_dup 0))
(match_operand:DF 3 "register_operand" ""))]
"!TARGET_SOFT_FLOAT
+ && !TARGET_DISABLE_INDEXING
&& TARGET_64BIT
&& REG_OK_FOR_BASE_P (operands[2])
&& FP_REGNO_P (REGNO (operands[3]))"
(set (mem:DF (match_dup 0))
(match_operand:DF 3 "register_operand" ""))]
"!TARGET_SOFT_FLOAT
- && REG_OK_FOR_BASE_P (operands[1])
- && (TARGET_NO_SPACE_REGS
- || (!REG_POINTER (operands[1]) && REG_POINTER (operands[2])))
+ && !TARGET_DISABLE_INDEXING
+ && TARGET_NO_SPACE_REGS
+ && REG_OK_FOR_INDEX_P (operands[1])
+ && REG_OK_FOR_BASE_P (operands[2])
&& FP_REGNO_P (REGNO (operands[3]))"
[(set (mem:DF (plus:SI (match_dup 1) (match_dup 2)))
(match_dup 3))
(set (mem:DF (match_dup 0))
(match_operand:DF 3 "register_operand" ""))]
"!TARGET_SOFT_FLOAT
- && REG_OK_FOR_BASE_P (operands[2])
- && (TARGET_NO_SPACE_REGS
- || (REG_POINTER (operands[1]) && !REG_POINTER (operands[2])))
+ && !TARGET_DISABLE_INDEXING
+ && TARGET_NO_SPACE_REGS
+ && REG_OK_FOR_BASE_P (operands[1])
+ && REG_OK_FOR_INDEX_P (operands[2])
&& FP_REGNO_P (REGNO (operands[3]))"
[(set (mem:DF (plus:SI (match_dup 2) (match_dup 1)))
(match_dup 3))
(set (mem:DF (match_dup 0))
(match_operand:DF 3 "register_operand" ""))]
"!TARGET_SOFT_FLOAT
+ && !TARGET_DISABLE_INDEXING
&& TARGET_64BIT
- && REG_OK_FOR_BASE_P (operands[1])
- && (TARGET_NO_SPACE_REGS
- || (!REG_POINTER (operands[1]) && REG_POINTER (operands[2])))
+ && TARGET_NO_SPACE_REGS
+ && REG_OK_FOR_INDEX_P (operands[1])
+ && REG_OK_FOR_BASE_P (operands[2])
&& FP_REGNO_P (REGNO (operands[3]))"
[(set (mem:DF (plus:DI (match_dup 1) (match_dup 2)))
(match_dup 3))
(set (mem:DF (match_dup 0))
(match_operand:DF 3 "register_operand" ""))]
"!TARGET_SOFT_FLOAT
+ && !TARGET_DISABLE_INDEXING
&& TARGET_64BIT
- && REG_OK_FOR_BASE_P (operands[2])
- && (TARGET_NO_SPACE_REGS
- || (REG_POINTER (operands[1]) && !REG_POINTER (operands[2])))
+ && TARGET_NO_SPACE_REGS
+ && REG_OK_FOR_BASE_P (operands[1])
+ && REG_OK_FOR_INDEX_P (operands[2])
&& FP_REGNO_P (REGNO (operands[3]))"
[(set (mem:DF (plus:DI (match_dup 2) (match_dup 1)))
(match_dup 3))
(define_insn ""
[(set (match_operand:DF 0 "move_dest_operand"
- "=!*r,*r,*r,*r,*r,Q,!*q,f,f,T")
+ "=!*r,*r,*r,*r,*r,Q,f,f,T")
(match_operand:DF 1 "move_src_operand"
- "!*r,J,N,K,RQ,*rM,!*rM,fM,RT,f"))]
+ "!*r,J,N,K,RQ,*rG,fG,RT,f"))]
"(register_operand (operands[0], DFmode)
|| reg_or_0_operand (operands[1], DFmode))
&& !TARGET_SOFT_FLOAT && TARGET_64BIT"
depdi,z %z1,%0
ldd%M1 %1,%0
std%M0 %r1,%0
- mtsar %r1
fcpy,dbl %f1,%0
fldd%F1 %1,%0
fstd%F0 %1,%0"
- [(set_attr "type" "move,move,move,shift,load,store,move,fpalu,fpload,fpstore")
+ [(set_attr "type" "move,move,move,shift,load,store,fpalu,fpload,fpstore")
(set_attr "pa_combine_type" "addmove")
- (set_attr "length" "4,4,4,4,4,4,4,4,4,4")])
+ (set_attr "length" "4,4,4,4,4,4,4,4,4")])
\f
(define_expand "movdi"
""
"
{
- if (GET_CODE (operands[1]) == CONST_DOUBLE && TARGET_64BIT)
- operands[1] = force_const_mem (DImode, operands[1]);
+ /* Except for zero, we don't support loading a CONST_INT directly
+ to a hard floating-point register since a scratch register is
+ needed for the operation. While the operation could be handled
+ before register allocation, the simplest solution is to fail. */
+ if (TARGET_64BIT
+ && GET_CODE (operands[1]) == CONST_INT
+ && operands[1] != CONST0_RTX (DImode)
+ && REG_P (operands[0])
+ && HARD_REGISTER_P (operands[0])
+ && REGNO (operands[0]) >= 32)
+ FAIL;
if (emit_move_sequence (operands, DImode, 0))
DONE;
}")
+;; Handle DImode input reloads requiring %r1 as a scratch register.
+(define_expand "reload_indi_r1"
+ [(set (match_operand:DI 0 "register_operand" "=Z")
+ (match_operand:DI 1 "non_hard_reg_operand" ""))
+ (clobber (match_operand:SI 2 "register_operand" "=&a"))]
+ ""
+ "
+{
+ if (emit_move_sequence (operands, DImode, operands[2]))
+ DONE;
+
+ /* We don't want the clobber emitted, so handle this ourselves. */
+ emit_insn (gen_rtx_SET (VOIDmode, operands[0], operands[1]));
+ DONE;
+}")
+
+;; Handle DImode input reloads requiring a general register as a
+;; scratch register.
(define_expand "reload_indi"
[(set (match_operand:DI 0 "register_operand" "=Z")
(match_operand:DI 1 "non_hard_reg_operand" ""))
DONE;
}")
+;; Handle DImode output reloads requiring a general register as a
+;; scratch register.
(define_expand "reload_outdi"
[(set (match_operand:DI 0 "non_hard_reg_operand" "")
(match_operand:DI 1 "register_operand" "Z"))
rtx op0 = operands[0];
rtx op1 = operands[1];
- if (GET_CODE (op1) == CONST_INT)
+ switch (GET_CODE (op1))
{
+ case CONST_INT:
+#if HOST_BITS_PER_WIDE_INT <= 32
operands[0] = operand_subword (op0, 1, 0, DImode);
output_asm_insn (\"ldil L'%1,%0\", operands);
output_asm_insn (\"ldi -1,%0\", operands);
else
output_asm_insn (\"ldi 0,%0\", operands);
- return \"\";
- }
- else if (GET_CODE (op1) == CONST_DOUBLE)
- {
+#else
+ operands[0] = operand_subword (op0, 1, 0, DImode);
+ operands[1] = GEN_INT (INTVAL (op1) & 0xffffffff);
+ output_asm_insn (\"ldil L'%1,%0\", operands);
+
+ operands[0] = operand_subword (op0, 0, 0, DImode);
+ operands[1] = GEN_INT (INTVAL (op1) >> 32);
+ output_asm_insn (singlemove_string (operands), operands);
+#endif
+ break;
+
+ case CONST_DOUBLE:
operands[0] = operand_subword (op0, 1, 0, DImode);
operands[1] = GEN_INT (CONST_DOUBLE_LOW (op1));
output_asm_insn (\"ldil L'%1,%0\", operands);
operands[0] = operand_subword (op0, 0, 0, DImode);
operands[1] = GEN_INT (CONST_DOUBLE_HIGH (op1));
output_asm_insn (singlemove_string (operands), operands);
- return \"\";
+ break;
+
+ default:
+ gcc_unreachable ();
}
- else
- abort ();
+ return \"\";
}"
[(set_attr "type" "move")
- (set_attr "length" "8")])
+ (set_attr "length" "12")])
(define_insn ""
[(set (match_operand:DI 0 "move_dest_operand"
- "=r,o,Q,r,r,r,*f,*f,T")
+ "=r,o,Q,r,r,r,*f,*f,T,?r,?*f")
(match_operand:DI 1 "general_operand"
- "rM,r,r,o*R,Q,i,*fM,RT,*f"))]
+ "rM,r,r,o*R,Q,i,*fM,RT,*f,*f,r"))]
"(register_operand (operands[0], DImode)
|| reg_or_0_operand (operands[1], DImode))
&& !TARGET_64BIT
&& !TARGET_SOFT_FLOAT"
"*
{
- if (FP_REG_P (operands[0]) || FP_REG_P (operands[1])
- || (operands[1] == CONST0_RTX (DImode)))
+ if ((FP_REG_P (operands[0]) || FP_REG_P (operands[1])
+ || operands[1] == CONST0_RTX (DFmode))
+ && !(REG_P (operands[0]) && REG_P (operands[1])
+ && FP_REG_P (operands[0]) ^ FP_REG_P (operands[1])))
return output_fp_move_double (operands);
return output_move_double (operands);
}"
- [(set_attr "type" "move,store,store,load,load,multi,fpalu,fpload,fpstore")
- (set_attr "length" "8,8,16,8,16,16,4,4,4")])
+ [(set_attr "type"
+ "move,store,store,load,load,multi,fpalu,fpload,fpstore,fpstore_load,store_fpload")
+ (set_attr "length" "8,8,16,8,16,16,4,4,4,12,12")])
(define_insn ""
[(set (match_operand:DI 0 "move_dest_operand"
- "=r,r,r,r,r,r,Q,!*q,!*f,*f,T")
+ "=r,r,r,r,r,r,Q,!*q,!r,!*f,*f,T")
(match_operand:DI 1 "move_src_operand"
- "A,r,J,N,K,RQ,rM,!rM,!*fM,RT,*f"))]
+ "A,r,J,N,K,RQ,rM,!rM,!*q,!*fM,RT,*f"))]
"(register_operand (operands[0], DImode)
|| reg_or_0_operand (operands[1], DImode))
&& !TARGET_SOFT_FLOAT && TARGET_64BIT"
ldd%M1 %1,%0
std%M0 %r1,%0
mtsar %r1
+ {mfctl|mfctl,w} %%sar,%0
fcpy,dbl %f1,%0
fldd%F1 %1,%0
fstd%F0 %1,%0"
- [(set_attr "type" "load,move,move,move,shift,load,store,move,fpalu,fpload,fpstore")
+ [(set_attr "type" "load,move,move,move,shift,load,store,move,move,fpalu,fpload,fpstore")
(set_attr "pa_combine_type" "addmove")
- (set_attr "length" "4,4,4,4,4,4,4,4,4,4,4")])
+ (set_attr "length" "4,4,4,4,4,4,4,4,4,4,4,4")])
(define_insn ""
[(set (match_operand:DI 0 "indexed_memory_operand" "=R")
(set (mem:DI (match_dup 0))
(match_operand:DI 3 "register_operand" ""))]
"!TARGET_SOFT_FLOAT
+ && !TARGET_DISABLE_INDEXING
&& TARGET_64BIT
&& REG_OK_FOR_BASE_P (operands[2])
&& FP_REGNO_P (REGNO (operands[3]))"
(set (mem:DI (match_dup 0))
(match_operand:DI 3 "register_operand" ""))]
"!TARGET_SOFT_FLOAT
+ && !TARGET_DISABLE_INDEXING
&& TARGET_64BIT
&& REG_OK_FOR_BASE_P (operands[2])
&& FP_REGNO_P (REGNO (operands[3]))"
(set (mem:DI (match_dup 0))
(match_operand:DI 3 "register_operand" ""))]
"!TARGET_SOFT_FLOAT
+ && !TARGET_DISABLE_INDEXING
&& TARGET_64BIT
- && REG_OK_FOR_BASE_P (operands[1])
- && (TARGET_NO_SPACE_REGS
- || (!REG_POINTER (operands[1]) && REG_POINTER (operands[2])))
+ && TARGET_NO_SPACE_REGS
+ && REG_OK_FOR_INDEX_P (operands[1])
+ && REG_OK_FOR_BASE_P (operands[2])
&& FP_REGNO_P (REGNO (operands[3]))"
[(set (mem:DI (plus:DI (match_dup 1) (match_dup 2)))
(match_dup 3))
(set (mem:DI (match_dup 0))
(match_operand:DI 3 "register_operand" ""))]
"!TARGET_SOFT_FLOAT
+ && !TARGET_DISABLE_INDEXING
&& TARGET_64BIT
- && REG_OK_FOR_BASE_P (operands[2])
- && (TARGET_NO_SPACE_REGS
- || (REG_POINTER (operands[1]) && !REG_POINTER (operands[2])))
+ && TARGET_NO_SPACE_REGS
+ && REG_OK_FOR_BASE_P (operands[1])
+ && REG_OK_FOR_INDEX_P (operands[2])
&& FP_REGNO_P (REGNO (operands[3]))"
[(set (mem:DI (plus:DI (match_dup 2) (match_dup 1)))
(match_dup 3))
"!TARGET_64BIT"
"*
{
- /* Don't output a 64 bit constant, since we can't trust the assembler to
+ /* Don't output a 64-bit constant, since we can't trust the assembler to
handle it correctly. */
if (GET_CODE (operands[2]) == CONST_DOUBLE)
operands[2] = GEN_INT (CONST_DOUBLE_LOW (operands[2]));
+ else if (HOST_BITS_PER_WIDE_INT > 32
+ && GET_CODE (operands[2]) == CONST_INT)
+ operands[2] = GEN_INT (INTVAL (operands[2]) & 0xffffffff);
if (which_alternative == 1)
output_asm_insn (\"copy %1,%0\", operands);
return \"ldo R'%G2(%R1),%R0\";
""
"
{
+ /* Reject CONST_DOUBLE loads to floating point registers. */
+ if (GET_CODE (operands[1]) == CONST_DOUBLE
+ && operands[1] != CONST0_RTX (SFmode)
+ && REG_P (operands[0])
+ && HARD_REGISTER_P (operands[0])
+ && REGNO (operands[0]) >= 32)
+ FAIL;
+
if (emit_move_sequence (operands, SFmode, 0))
DONE;
}")
-;; Reloading an SImode or DImode value requires a scratch register if
-;; going in to or out of float point registers.
-
+;; Handle SFmode input reloads requiring a general register as a
+;; scratch register.
(define_expand "reload_insf"
[(set (match_operand:SF 0 "register_operand" "=Z")
(match_operand:SF 1 "non_hard_reg_operand" ""))
DONE;
}")
+;; Handle SFmode output reloads requiring a general register as a
+;; scratch register.
(define_expand "reload_outsf"
[(set (match_operand:SF 0 "non_hard_reg_operand" "")
(match_operand:SF 1 "register_operand" "Z"))
(define_insn ""
[(set (match_operand:SF 0 "move_dest_operand"
+ "=f,!*r,f,*r,Q,Q,?*r,?f")
+ (match_operand:SF 1 "reg_or_0_or_nonsymb_mem_operand"
+ "fG,!*rG,RQ,RQ,f,*rG,f,*r"))]
+ "(register_operand (operands[0], SFmode)
+ || reg_or_0_operand (operands[1], SFmode))
+ && !TARGET_SOFT_FLOAT
+ && !TARGET_64BIT"
+ "@
+ fcpy,sgl %f1,%0
+ copy %r1,%0
+ fldw%F1 %1,%0
+ ldw%M1 %1,%0
+ fstw%F0 %1,%0
+ stw%M0 %r1,%0
+ {fstws|fstw} %1,-16(%%sp)\n\t{ldws|ldw} -16(%%sp),%0
+ {stws|stw} %1,-16(%%sp)\n\t{fldws|fldw} -16(%%sp),%0"
+ [(set_attr "type" "fpalu,move,fpload,load,fpstore,store,fpstore_load,store_fpload")
+ (set_attr "pa_combine_type" "addmove")
+ (set_attr "length" "4,4,4,4,4,4,8,8")])
+
+(define_insn ""
+ [(set (match_operand:SF 0 "move_dest_operand"
"=f,!*r,f,*r,Q,Q")
(match_operand:SF 1 "reg_or_0_or_nonsymb_mem_operand"
"fG,!*rG,RQ,RQ,f,*rG"))]
"(register_operand (operands[0], SFmode)
|| reg_or_0_operand (operands[1], SFmode))
- && !TARGET_SOFT_FLOAT"
+ && !TARGET_SOFT_FLOAT
+ && TARGET_64BIT"
"@
fcpy,sgl %f1,%0
copy %r1,%0
(set (mem:SF (match_dup 0))
(match_operand:SF 3 "register_operand" ""))]
"!TARGET_SOFT_FLOAT
+ && !TARGET_DISABLE_INDEXING
&& REG_OK_FOR_BASE_P (operands[2])
&& FP_REGNO_P (REGNO (operands[3]))"
[(set (mem:SF (plus:SI (mult:SI (match_dup 1) (const_int 4)) (match_dup 2)))
(set (mem:SF (match_dup 0))
(match_operand:SF 3 "register_operand" ""))]
"!TARGET_SOFT_FLOAT
+ && !TARGET_DISABLE_INDEXING
&& REG_OK_FOR_BASE_P (operands[2])
&& FP_REGNO_P (REGNO (operands[3]))"
[(set (mem:SF (plus:SI (mult:SI (match_dup 1) (const_int 4)) (match_dup 2)))
(set (mem:SF (match_dup 0))
(match_operand:SF 3 "register_operand" ""))]
"!TARGET_SOFT_FLOAT
+ && !TARGET_DISABLE_INDEXING
&& TARGET_64BIT
&& REG_OK_FOR_BASE_P (operands[2])
&& FP_REGNO_P (REGNO (operands[3]))"
(set (mem:SF (match_dup 0))
(match_operand:SF 3 "register_operand" ""))]
"!TARGET_SOFT_FLOAT
+ && !TARGET_DISABLE_INDEXING
&& TARGET_64BIT
&& REG_OK_FOR_BASE_P (operands[2])
&& FP_REGNO_P (REGNO (operands[3]))"
(set (mem:SF (match_dup 0))
(match_operand:SF 3 "register_operand" ""))]
"!TARGET_SOFT_FLOAT
- && REG_OK_FOR_BASE_P (operands[1])
- && (TARGET_NO_SPACE_REGS
- || (!REG_POINTER (operands[1]) && REG_POINTER (operands[2])))
+ && !TARGET_DISABLE_INDEXING
+ && TARGET_NO_SPACE_REGS
+ && REG_OK_FOR_INDEX_P (operands[1])
+ && REG_OK_FOR_BASE_P (operands[2])
&& FP_REGNO_P (REGNO (operands[3]))"
[(set (mem:SF (plus:SI (match_dup 1) (match_dup 2)))
(match_dup 3))
(set (mem:SF (match_dup 0))
(match_operand:SF 3 "register_operand" ""))]
"!TARGET_SOFT_FLOAT
- && REG_OK_FOR_BASE_P (operands[2])
- && (TARGET_NO_SPACE_REGS
- || (REG_POINTER (operands[1]) && !REG_POINTER (operands[2])))
+ && !TARGET_DISABLE_INDEXING
+ && TARGET_NO_SPACE_REGS
+ && REG_OK_FOR_BASE_P (operands[1])
+ && REG_OK_FOR_INDEX_P (operands[2])
&& FP_REGNO_P (REGNO (operands[3]))"
[(set (mem:SF (plus:SI (match_dup 2) (match_dup 1)))
(match_dup 3))
(set (mem:SF (match_dup 0))
(match_operand:SF 3 "register_operand" ""))]
"!TARGET_SOFT_FLOAT
+ && !TARGET_DISABLE_INDEXING
&& TARGET_64BIT
- && REG_OK_FOR_BASE_P (operands[1])
- && (TARGET_NO_SPACE_REGS
- || (!REG_POINTER (operands[1]) && REG_POINTER (operands[2])))
+ && TARGET_NO_SPACE_REGS
+ && REG_OK_FOR_INDEX_P (operands[1])
+ && REG_OK_FOR_BASE_P (operands[2])
&& FP_REGNO_P (REGNO (operands[3]))"
[(set (mem:SF (plus:DI (match_dup 1) (match_dup 2)))
(match_dup 3))
(set (mem:SF (match_dup 0))
(match_operand:SF 3 "register_operand" ""))]
"!TARGET_SOFT_FLOAT
+ && !TARGET_DISABLE_INDEXING
&& TARGET_64BIT
- && REG_OK_FOR_BASE_P (operands[2])
- && (TARGET_NO_SPACE_REGS
- || (REG_POINTER (operands[1]) && !REG_POINTER (operands[2])))
+ && TARGET_NO_SPACE_REGS
+ && REG_OK_FOR_BASE_P (operands[1])
+ && REG_OK_FOR_INDEX_P (operands[2])
&& FP_REGNO_P (REGNO (operands[3]))"
[(set (mem:SF (plus:DI (match_dup 2) (match_dup 1)))
(match_dup 3))
[(set_attr "type" "binary")
(set_attr "length" "4")])
+(define_expand "addvdi3"
+ [(parallel [(set (match_operand:DI 0 "register_operand" "")
+ (plus:DI (match_operand:DI 1 "reg_or_0_operand" "")
+ (match_operand:DI 2 "arith11_operand" "")))
+ (trap_if (ne (plus:TI (sign_extend:TI (match_dup 1))
+ (sign_extend:TI (match_dup 2)))
+ (sign_extend:TI (plus:DI (match_dup 1)
+ (match_dup 2))))
+ (const_int 0))])]
+ ""
+ "")
+
+(define_insn ""
+ [(set (match_operand:DI 0 "register_operand" "=r,r")
+ (plus:DI (match_operand:DI 1 "reg_or_0_operand" "%rM,rM")
+ (match_operand:DI 2 "arith11_operand" "r,I")))
+ (trap_if (ne (plus:TI (sign_extend:TI (match_dup 1))
+ (sign_extend:TI (match_dup 2)))
+ (sign_extend:TI (plus:DI (match_dup 1)
+ (match_dup 2))))
+ (const_int 0))]
+ "TARGET_64BIT"
+ "@
+ add,tsv,* %2,%1,%0
+ addi,tsv,* %2,%1,%0"
+ [(set_attr "type" "binary,binary")
+ (set_attr "length" "4,4")])
+
+(define_insn ""
+ [(set (match_operand:DI 0 "register_operand" "=r")
+ (plus:DI (match_operand:DI 1 "reg_or_0_operand" "%rM")
+ (match_operand:DI 2 "arith11_operand" "rI")))
+ (trap_if (ne (plus:TI (sign_extend:TI (match_dup 1))
+ (sign_extend:TI (match_dup 2)))
+ (sign_extend:TI (plus:DI (match_dup 1)
+ (match_dup 2))))
+ (const_int 0))]
+ "!TARGET_64BIT"
+ "*
+{
+ if (GET_CODE (operands[2]) == CONST_INT)
+ {
+ if (INTVAL (operands[2]) >= 0)
+ return \"addi %2,%R1,%R0\;{addco|add,c,tsv} %1,%%r0,%0\";
+ else
+ return \"addi %2,%R1,%R0\;{subbo|sub,b,tsv} %1,%%r0,%0\";
+ }
+ else
+ return \"add %R2,%R1,%R0\;{addco|add,c,tsv} %2,%1,%0\";
+}"
+ [(set_attr "type" "binary")
+ (set_attr "length" "8")])
+
;; define_splits to optimize cases of adding a constant integer
;; to a register when the constant does not fit in 14 bits. */
(define_split
if (intval % 2 == 0 && cint_ok_for_move (intval / 2))
{
operands[2] = GEN_INT (intval / 2);
- operands[3] = GEN_INT (2);
+ operands[3] = const2_rtx;
}
else if (intval % 4 == 0 && cint_ok_for_move (intval / 4))
{
(set_attr "pa_combine_type" "addmove")
(set_attr "length" "4,4")])
+(define_insn "addvsi3"
+ [(set (match_operand:SI 0 "register_operand" "=r,r")
+ (plus:SI (match_operand:SI 1 "reg_or_0_operand" "%rM,rM")
+ (match_operand:SI 2 "arith11_operand" "r,I")))
+ (trap_if (ne (plus:DI (sign_extend:DI (match_dup 1))
+ (sign_extend:DI (match_dup 2)))
+ (sign_extend:DI (plus:SI (match_dup 1)
+ (match_dup 2))))
+ (const_int 0))]
+ ""
+ "@
+ {addo|add,tsv} %2,%1,%0
+ {addio|addi,tsv} %2,%1,%0"
+ [(set_attr "type" "binary,binary")
+ (set_attr "length" "4,4")])
+
(define_expand "subdi3"
[(set (match_operand:DI 0 "register_operand" "")
- (minus:DI (match_operand:DI 1 "register_operand" "")
- (match_operand:DI 2 "register_operand" "")))]
+ (minus:DI (match_operand:DI 1 "arith11_operand" "")
+ (match_operand:DI 2 "reg_or_0_operand" "")))]
""
"")
(define_insn ""
- [(set (match_operand:DI 0 "register_operand" "=r")
- (minus:DI (match_operand:DI 1 "register_operand" "r")
- (match_operand:DI 2 "register_operand" "r")))]
- "!TARGET_64BIT"
- "sub %R1,%R2,%R0\;{subb|sub,b} %1,%2,%0"
- [(set_attr "type" "binary")
- (set_attr "length" "8")])
-
-(define_insn ""
[(set (match_operand:DI 0 "register_operand" "=r,r,!q")
(minus:DI (match_operand:DI 1 "arith11_operand" "r,I,!U")
- (match_operand:DI 2 "register_operand" "r,r,!r")))]
+ (match_operand:DI 2 "reg_or_0_operand" "rM,rM,!rM")))]
"TARGET_64BIT"
"@
sub %1,%2,%0
[(set_attr "type" "binary,binary,move")
(set_attr "length" "4,4,4")])
+(define_insn ""
+ [(set (match_operand:DI 0 "register_operand" "=r,&r")
+ (minus:DI (match_operand:DI 1 "arith11_operand" "r,I")
+ (match_operand:DI 2 "reg_or_0_operand" "rM,rM")))]
+ "!TARGET_64BIT"
+ "*
+{
+ if (GET_CODE (operands[1]) == CONST_INT)
+ {
+ if (INTVAL (operands[1]) >= 0)
+ return \"subi %1,%R2,%R0\;{subb|sub,b} %%r0,%2,%0\";
+ else
+ return \"ldi -1,%0\;subi %1,%R2,%R0\;{subb|sub,b} %0,%2,%0\";
+ }
+ else
+ return \"sub %R1,%R2,%R0\;{subb|sub,b} %1,%2,%0\";
+}"
+ [(set_attr "type" "binary")
+ (set (attr "length")
+ (if_then_else (eq_attr "alternative" "0")
+ (const_int 8)
+ (if_then_else (ge (symbol_ref "INTVAL (operands[1])")
+ (const_int 0))
+ (const_int 8)
+ (const_int 12))))])
+
+(define_expand "subvdi3"
+ [(parallel [(set (match_operand:DI 0 "register_operand" "")
+ (minus:DI (match_operand:DI 1 "arith11_operand" "")
+ (match_operand:DI 2 "reg_or_0_operand" "")))
+ (trap_if (ne (minus:TI (sign_extend:TI (match_dup 1))
+ (sign_extend:TI (match_dup 2)))
+ (sign_extend:TI (minus:DI (match_dup 1)
+ (match_dup 2))))
+ (const_int 0))])]
+ ""
+ "")
+
+(define_insn ""
+ [(set (match_operand:DI 0 "register_operand" "=r,r")
+ (minus:DI (match_operand:DI 1 "arith11_operand" "r,I")
+ (match_operand:DI 2 "reg_or_0_operand" "rM,rM")))
+ (trap_if (ne (minus:TI (sign_extend:TI (match_dup 1))
+ (sign_extend:TI (match_dup 2)))
+ (sign_extend:TI (minus:DI (match_dup 1)
+ (match_dup 2))))
+ (const_int 0))]
+ "TARGET_64BIT"
+ "@
+ {subo|sub,tsv} %1,%2,%0
+ {subio|subi,tsv} %1,%2,%0"
+ [(set_attr "type" "binary,binary")
+ (set_attr "length" "4,4")])
+
+(define_insn ""
+ [(set (match_operand:DI 0 "register_operand" "=r,&r")
+ (minus:DI (match_operand:DI 1 "arith11_operand" "r,I")
+ (match_operand:DI 2 "reg_or_0_operand" "rM,rM")))
+ (trap_if (ne (minus:TI (sign_extend:TI (match_dup 1))
+ (sign_extend:TI (match_dup 2)))
+ (sign_extend:TI (minus:DI (match_dup 1)
+ (match_dup 2))))
+ (const_int 0))]
+ "!TARGET_64BIT"
+ "*
+{
+ if (GET_CODE (operands[1]) == CONST_INT)
+ {
+ if (INTVAL (operands[1]) >= 0)
+ return \"subi %1,%R2,%R0\;{subbo|sub,b,tsv} %%r0,%2,%0\";
+ else
+ return \"ldi -1,%0\;subi %1,%R2,%R0\;{subbo|sub,b,tsv} %0,%2,%0\";
+ }
+ else
+ return \"sub %R1,%R2,%R0\;{subbo|sub,b,tsv} %1,%2,%0\";
+}"
+ [(set_attr "type" "binary,binary")
+ (set (attr "length")
+ (if_then_else (eq_attr "alternative" "0")
+ (const_int 8)
+ (if_then_else (ge (symbol_ref "INTVAL (operands[1])")
+ (const_int 0))
+ (const_int 8)
+ (const_int 12))))])
+
(define_expand "subsi3"
[(set (match_operand:SI 0 "register_operand" "")
(minus:SI (match_operand:SI 1 "arith11_operand" "")
[(set_attr "type" "binary,binary,move")
(set_attr "length" "4,4,4")])
+(define_insn "subvsi3"
+ [(set (match_operand:SI 0 "register_operand" "=r,r")
+ (minus:SI (match_operand:SI 1 "arith11_operand" "rM,I")
+ (match_operand:SI 2 "reg_or_0_operand" "rM,rM")))
+ (trap_if (ne (minus:DI (sign_extend:DI (match_dup 1))
+ (sign_extend:DI (match_dup 2)))
+ (sign_extend:DI (minus:SI (match_dup 1)
+ (match_dup 2))))
+ (const_int 0))]
+ ""
+ "@
+ {subo|sub,tsv} %1,%2,%0
+ {subio|subi,tsv} %1,%2,%0"
+ [(set_attr "type" "binary,binary")
+ (set_attr "length" "4,4")])
+
;; Clobbering a "register_operand" instead of a match_scratch
;; in operand3 of millicode calls avoids spilling %r1 and
;; produces better code.
(clobber (reg:SI 26))
(clobber (reg:SI 25))
(clobber (match_dup 4))])
- (set (match_operand:SI 0 "general_operand" "") (reg:SI 29))]
+ (set (match_operand:SI 0 "move_dest_operand" "") (reg:SI 29))]
""
"
{
GEN_INT (32)));
emit_move_insn (op2shifted, gen_rtx_LSHIFTRT (DImode, operands[2],
GEN_INT (32)));
- op1r = gen_rtx_SUBREG (SImode, operands[1], 4);
- op2r = gen_rtx_SUBREG (SImode, operands[2], 4);
- op1l = gen_rtx_SUBREG (SImode, op1shifted, 4);
- op2l = gen_rtx_SUBREG (SImode, op2shifted, 4);
+ op1r = force_reg (SImode, gen_rtx_SUBREG (SImode, operands[1], 4));
+ op2r = force_reg (SImode, gen_rtx_SUBREG (SImode, operands[2], 4));
+ op1l = force_reg (SImode, gen_rtx_SUBREG (SImode, op1shifted, 4));
+ op2l = force_reg (SImode, gen_rtx_SUBREG (SImode, op2shifted, 4));
/* Emit multiplies for the cross products. */
emit_insn (gen_umulsidi3 (cross_product1, op2r, op1l));
(clobber (reg:SI 26))
(clobber (reg:SI 25))
(clobber (match_dup 5))])
- (set (match_operand:SI 0 "general_operand" "") (reg:SI 29))]
+ (set (match_operand:SI 0 "move_dest_operand" "") (reg:SI 29))]
""
"
{
(clobber (reg:SI 26))
(clobber (reg:SI 25))
(clobber (match_dup 5))])
- (set (match_operand:SI 0 "general_operand" "") (reg:SI 29))]
+ (set (match_operand:SI 0 "move_dest_operand" "") (reg:SI 29))]
""
"
{
(clobber (reg:SI 26))
(clobber (reg:SI 25))
(clobber (match_dup 5))])
- (set (match_operand:SI 0 "general_operand" "") (reg:SI 29))]
+ (set (match_operand:SI 0 "move_dest_operand" "") (reg:SI 29))]
""
"
{
(clobber (reg:SI 26))
(clobber (reg:SI 25))
(clobber (match_dup 5))])
- (set (match_operand:SI 0 "general_operand" "") (reg:SI 29))]
+ (set (match_operand:SI 0 "move_dest_operand" "") (reg:SI 29))]
""
"
{
(define_expand "anddi3"
[(set (match_operand:DI 0 "register_operand" "")
- (and:DI (match_operand:DI 1 "and_operand" "")
+ (and:DI (match_operand:DI 1 "register_operand" "")
(match_operand:DI 2 "and_operand" "")))]
""
"
{
- if (TARGET_64BIT)
- {
- /* One operand must be a register operand. */
- if (!register_operand (operands[1], DImode)
- && !register_operand (operands[2], DImode))
- FAIL;
- }
- else
- {
- /* Both operands must be register operands. */
- if (!register_operand (operands[1], DImode)
- || !register_operand (operands[2], DImode))
- FAIL;
- }
+ /* Both operands must be register operands. */
+ if (!TARGET_64BIT && !register_operand (operands[2], DImode))
+ FAIL;
}")
(define_insn ""
(define_expand "iordi3"
[(set (match_operand:DI 0 "register_operand" "")
- (ior:DI (match_operand:DI 1 "ior_operand" "")
+ (ior:DI (match_operand:DI 1 "register_operand" "")
(match_operand:DI 2 "ior_operand" "")))]
""
"
{
- if (TARGET_64BIT)
- {
- /* One operand must be a register operand. */
- if (!register_operand (operands[1], DImode)
- && !register_operand (operands[2], DImode))
- FAIL;
- }
- else
- {
- /* Both operands must be register operands. */
- if (!register_operand (operands[1], DImode)
- || !register_operand (operands[2], DImode))
- FAIL;
- }
+ /* Both operands must be register operands. */
+ if (!TARGET_64BIT && !register_operand (operands[2], DImode))
+ FAIL;
}")
(define_insn ""
[(set_attr "type" "unary")
(set_attr "length" "4")])
+(define_expand "negvdi2"
+ [(parallel [(set (match_operand:DI 0 "register_operand" "")
+ (neg:DI (match_operand:DI 1 "register_operand" "")))
+ (trap_if (ne (neg:TI (sign_extend:TI (match_dup 1)))
+ (sign_extend:TI (neg:DI (match_dup 1))))
+ (const_int 0))])]
+ ""
+ "")
+
+(define_insn ""
+ [(set (match_operand:DI 0 "register_operand" "=r")
+ (neg:DI (match_operand:DI 1 "register_operand" "r")))
+ (trap_if (ne (neg:TI (sign_extend:TI (match_dup 1)))
+ (sign_extend:TI (neg:DI (match_dup 1))))
+ (const_int 0))]
+ "!TARGET_64BIT"
+ "sub %%r0,%R1,%R0\;{subbo|sub,b,tsv} %%r0,%1,%0"
+ [(set_attr "type" "unary")
+ (set_attr "length" "8")])
+
+(define_insn ""
+ [(set (match_operand:DI 0 "register_operand" "=r")
+ (neg:DI (match_operand:DI 1 "register_operand" "r")))
+ (trap_if (ne (neg:TI (sign_extend:TI (match_dup 1)))
+ (sign_extend:TI (neg:DI (match_dup 1))))
+ (const_int 0))]
+ "TARGET_64BIT"
+ "sub,tsv %%r0,%1,%0"
+ [(set_attr "type" "unary")
+ (set_attr "length" "4")])
+
(define_insn "negsi2"
[(set (match_operand:SI 0 "register_operand" "=r")
(neg:SI (match_operand:SI 1 "register_operand" "r")))]
[(set_attr "type" "unary")
(set_attr "length" "4")])
+(define_insn "negvsi2"
+ [(set (match_operand:SI 0 "register_operand" "=r")
+ (neg:SI (match_operand:SI 1 "register_operand" "r")))
+ (trap_if (ne (neg:DI (sign_extend:DI (match_dup 1)))
+ (sign_extend:DI (neg:SI (match_dup 1))))
+ (const_int 0))]
+ ""
+ "{subo|sub,tsv} %%r0,%1,%0"
+ [(set_attr "type" "unary")
+ (set_attr "length" "4")])
+
(define_expand "one_cmpldi2"
[(set (match_operand:DI 0 "register_operand" "")
(not:DI (match_operand:DI 1 "register_operand" "")))]
;; Processors prior to PA 2.0 don't have a fneg instruction. Fast
;; negation can be done by subtracting from plus zero. However, this
;; violates the IEEE standard when negating plus and minus zero.
+;; The slow path toggles the sign bit in the general registers.
(define_expand "negdf2"
- [(parallel [(set (match_operand:DF 0 "register_operand" "")
- (neg:DF (match_operand:DF 1 "register_operand" "")))
- (use (match_dup 2))])]
- "! TARGET_SOFT_FLOAT"
+ [(set (match_operand:DF 0 "register_operand" "")
+ (neg:DF (match_operand:DF 1 "register_operand" "")))]
+ "!TARGET_SOFT_FLOAT"
{
if (TARGET_PA_20 || flag_unsafe_math_optimizations)
emit_insn (gen_negdf2_fast (operands[0], operands[1]));
else
- {
- operands[2] = force_reg (DFmode,
- CONST_DOUBLE_FROM_REAL_VALUE (dconstm1, DFmode));
- emit_insn (gen_muldf3 (operands[0], operands[1], operands[2]));
- }
+ emit_insn (gen_negdf2_slow (operands[0], operands[1]));
DONE;
})
+(define_insn "negdf2_slow"
+ [(set (match_operand:DF 0 "register_operand" "=r")
+ (neg:DF (match_operand:DF 1 "register_operand" "r")))]
+ "!TARGET_SOFT_FLOAT && !TARGET_PA_20"
+ "*
+{
+ if (rtx_equal_p (operands[0], operands[1]))
+ return \"and,< %1,%1,%0\;depi,tr 1,0,1,%0\;depi 0,0,1,%0\";
+ else
+ return \"and,< %1,%1,%0\;depi,tr 1,0,1,%0\;depi 0,0,1,%0\;copy %R1,%R0\";
+}"
+ [(set_attr "type" "multi")
+ (set (attr "length")
+ (if_then_else (ne (symbol_ref "rtx_equal_p (operands[0], operands[1])")
+ (const_int 0))
+ (const_int 12)
+ (const_int 16)))])
+
(define_insn "negdf2_fast"
[(set (match_operand:DF 0 "register_operand" "=f")
(neg:DF (match_operand:DF 1 "register_operand" "f")))]
- "! TARGET_SOFT_FLOAT && (TARGET_PA_20 || flag_unsafe_math_optimizations)"
+ "!TARGET_SOFT_FLOAT"
"*
{
if (TARGET_PA_20)
(set_attr "length" "4")])
(define_expand "negsf2"
- [(parallel [(set (match_operand:SF 0 "register_operand" "")
- (neg:SF (match_operand:SF 1 "register_operand" "")))
- (use (match_dup 2))])]
- "! TARGET_SOFT_FLOAT"
+ [(set (match_operand:SF 0 "register_operand" "")
+ (neg:SF (match_operand:SF 1 "register_operand" "")))]
+ "!TARGET_SOFT_FLOAT"
{
if (TARGET_PA_20 || flag_unsafe_math_optimizations)
emit_insn (gen_negsf2_fast (operands[0], operands[1]));
else
- {
- operands[2] = force_reg (SFmode,
- CONST_DOUBLE_FROM_REAL_VALUE (dconstm1, SFmode));
- emit_insn (gen_mulsf3 (operands[0], operands[1], operands[2]));
- }
+ emit_insn (gen_negsf2_slow (operands[0], operands[1]));
DONE;
})
+(define_insn "negsf2_slow"
+ [(set (match_operand:SF 0 "register_operand" "=r")
+ (neg:SF (match_operand:SF 1 "register_operand" "r")))]
+ "!TARGET_SOFT_FLOAT && !TARGET_PA_20"
+ "and,< %1,%1,%0\;depi,tr 1,0,1,%0\;depi 0,0,1,%0"
+ [(set_attr "type" "multi")
+ (set_attr "length" "12")])
+
(define_insn "negsf2_fast"
[(set (match_operand:SF 0 "register_operand" "=f")
(neg:SF (match_operand:SF 1 "register_operand" "f")))]
- "! TARGET_SOFT_FLOAT && (TARGET_PA_20 || flag_unsafe_math_optimizations)"
+ "!TARGET_SOFT_FLOAT"
"*
{
if (TARGET_PA_20)
(match_operand:SI 2 "register_operand" "q")))
(match_operand:SI 3 "register_operand" "0")))]
; accept ...0001...1, can this be generalized?
- "exact_log2 (INTVAL (operands[1]) + 1) >= 0"
+ "exact_log2 (INTVAL (operands[1]) + 1) > 0"
"*
{
int x = INTVAL (operands[1]);
(match_operand:DI 2 "register_operand" "q")))
(match_operand:DI 3 "register_operand" "0")))]
; accept ...0001...1, can this be generalized?
- "TARGET_64BIT && exact_log2 (INTVAL (operands[1]) + 1) >= 0"
+ "TARGET_64BIT && exact_log2 (INTVAL (operands[1]) + 1) > 0"
"*
{
int x = INTVAL (operands[1]);
(and:SI (ashift:SI (match_operand:SI 1 "register_operand" "r")
(match_operand:SI 2 "const_int_operand" ""))
(match_operand:SI 3 "const_int_operand" "")))]
- "exact_log2 (1 + (INTVAL (operands[3]) >> (INTVAL (operands[2]) & 31))) >= 0"
+ "exact_log2 (1 + (INTVAL (operands[3]) >> (INTVAL (operands[2]) & 31))) > 0"
"*
{
int cnt = INTVAL (operands[2]) & 31;
\f
;; Unconditional and other jump instructions.
-;; This can only be used in a leaf function, so we do
-;; not need to use the PIC register when generating PIC code.
-(define_insn "return"
- [(return)
- (use (reg:SI 2))
- (const_int 0)]
- "hppa_can_use_return_insn_p ()"
- "*
-{
- if (TARGET_PA_20)
- return \"bve%* (%%r2)\";
- return \"bv%* %%r0(%%r2)\";
-}"
- [(set_attr "type" "branch")
- (set_attr "length" "4")])
-
-;; Emit a different pattern for functions which have non-trivial
-;; epilogues so as not to confuse jump and reorg.
+;; This is used for most returns.
(define_insn "return_internal"
[(return)
- (use (reg:SI 2))
- (const_int 1)]
+ (use (reg:SI 2))]
""
"*
{
(use (reg:SI 2))]
"!TARGET_NO_SPACE_REGS
&& !TARGET_PA_20
- && flag_pic && current_function_calls_eh_return"
+ && flag_pic && crtl->calls_eh_return"
"ldsid (%%sr0,%%r2),%%r1\;mtsp %%r1,%%sr0\;be%* 0(%%sr0,%%r2)"
[(set_attr "type" "branch")
(set_attr "length" "12")])
""
"
{
- /* Try to use the trivial return first. Else use the full
- epilogue. */
- if (hppa_can_use_return_insn_p ())
- emit_jump_insn (gen_return ());
+ rtx x;
+
+ /* Try to use the trivial return first. Else use the full epilogue. */
+ if (reload_completed
+ && !frame_pointer_needed
+ && !df_regs_ever_live_p (2)
+ && (compute_frame_size (get_frame_size (), 0) ? 0 : 1))
+ x = gen_return_internal ();
else
{
- rtx x;
-
hppa_expand_epilogue ();
/* EH returns bypass the normal return stub. Thus, we must do an
using space registers. */
if (!TARGET_NO_SPACE_REGS
&& !TARGET_PA_20
- && flag_pic && current_function_calls_eh_return)
+ && flag_pic && crtl->calls_eh_return)
x = gen_return_external_pic ();
else
x = gen_return_internal ();
-
- emit_jump_insn (x);
}
+ emit_jump_insn (x);
DONE;
}")
(set_attr "length" "4")])
(define_insn "blockage"
- [(unspec_volatile [(const_int 2)] 0)]
+ [(unspec_volatile [(const_int 2)] UNSPECV_BLOCKAGE)]
""
""
[(set_attr "length" "0")])
"*
{
/* An unconditional branch which can reach its target. */
- if (get_attr_length (insn) != 24
- && get_attr_length (insn) != 16)
+ if (get_attr_length (insn) < 16)
return \"b%* %l0\";
- return output_lbranch (operands[0], insn);
+ return output_lbranch (operands[0], insn, 1);
}"
[(set_attr "type" "uncond_branch")
(set_attr "pa_combine_type" "uncond_branch")
(cond [(eq (symbol_ref "jump_in_call_delay (insn)") (const_int 1))
(if_then_else (lt (abs (minus (match_dup 0)
(plus (pc) (const_int 8))))
- (const_int 8184))
- (const_int 4)
- (const_int 8))
- (ge (abs (minus (match_dup 0) (plus (pc) (const_int 8))))
- (const_int 262100))
- (if_then_else (eq (symbol_ref "flag_pic") (const_int 0))
- (const_int 16)
- (const_int 24))]
- (const_int 4)))])
+ (const_int MAX_12BIT_OFFSET))
+ (const_int 4)
+ (const_int 8))
+ (lt (abs (minus (match_dup 0) (plus (pc) (const_int 8))))
+ (const_int MAX_17BIT_OFFSET))
+ (const_int 4)
+ (ne (symbol_ref "TARGET_PORTABLE_RUNTIME") (const_int 0))
+ (const_int 20)
+ (eq (symbol_ref "flag_pic") (const_int 0))
+ (const_int 16)]
+ (const_int 24)))])
;;; Hope this is only within a function...
(define_insn "indirect_jump"
[(set_attr "type" "branch")
(set_attr "length" "4")])
+;;; An indirect jump can be optimized to a direct jump. GAS for the
+;;; SOM target doesn't allow branching to a label inside a function.
+;;; We also don't correctly compute branch distances for labels
+;;; outside the current function. Thus, we use an indirect jump can't
+;;; be optimized to a direct jump for all targets. We assume that
+;;; the branch target is in the same space (i.e., nested function
+;;; jumping to a label in an outer function in the same translation
+;;; unit).
+(define_expand "nonlocal_goto"
+ [(use (match_operand 0 "general_operand" ""))
+ (use (match_operand 1 "general_operand" ""))
+ (use (match_operand 2 "general_operand" ""))
+ (use (match_operand 3 "general_operand" ""))]
+ ""
+{
+ rtx lab = operands[1];
+ rtx stack = operands[2];
+ rtx fp = operands[3];
+
+ lab = copy_to_reg (lab);
+
+ emit_clobber (gen_rtx_MEM (BLKmode, gen_rtx_SCRATCH (VOIDmode)));
+ emit_clobber (gen_rtx_MEM (BLKmode, hard_frame_pointer_rtx));
+
+ /* Restore the frame pointer. The virtual_stack_vars_rtx is saved
+ instead of the hard_frame_pointer_rtx in the save area. As a
+ result, an extra instruction is needed to adjust for the offset
+ of the virtual stack variables and the frame pointer. */
+ if (GET_CODE (fp) != REG)
+ fp = force_reg (Pmode, fp);
+ emit_move_insn (virtual_stack_vars_rtx, fp);
+
+ emit_stack_restore (SAVE_NONLOCAL, stack, NULL_RTX);
+
+ emit_use (hard_frame_pointer_rtx);
+ emit_use (stack_pointer_rtx);
+
+ /* Nonlocal goto jumps are only used between functions in the same
+ translation unit. Thus, we can avoid the extra overhead of an
+ interspace jump. */
+ emit_jump_insn (gen_indirect_goto (lab));
+ emit_barrier ();
+ DONE;
+})
+
+(define_insn "indirect_goto"
+ [(unspec [(match_operand 0 "register_operand" "=r")] UNSPEC_GOTO)]
+ "GET_MODE (operands[0]) == word_mode"
+ "bv%* %%r0(%0)"
+ [(set_attr "type" "branch")
+ (set_attr "length" "4")])
+
;;; This jump is used in branch tables where the insn length is fixed.
;;; The length of this insn is adjusted if the delay slot is not filled.
(define_insn "short_jump"
operands[0] = index;
}
- /* In 64bit mode we must make sure to wipe the upper bits of the register
- just in case the addition overflowed or we had random bits in the
- high part of the register. */
- if (TARGET_64BIT)
- {
- rtx index = gen_reg_rtx (DImode);
-
- emit_insn (gen_extendsidi2 (index, operands[0]));
- operands[0] = gen_rtx_SUBREG (SImode, index, 4);
- }
-
if (!INT_5_BITS (operands[2]))
operands[2] = force_reg (SImode, operands[2]);
then be worthwhile to split the casesi patterns to improve scheduling.
However, it's not clear that all this extra complexity is worth
the effort. */
- emit_insn (gen_cmpsi (operands[0], operands[2]));
- emit_jump_insn (gen_bgtu (operands[4]));
+ {
+ rtx test = gen_rtx_GTU (VOIDmode, operands[0], operands[2]);
+ emit_jump_insn (gen_cbranchsi4 (test, operands[0], operands[2], operands[4]));
+ }
+
+ /* In 64bit mode we must make sure to wipe the upper bits of the register
+ just in case the addition overflowed or we had random bits in the
+ high part of the register. */
+ if (TARGET_64BIT)
+ {
+ rtx index = gen_reg_rtx (DImode);
+
+ emit_insn (gen_extendsidi2 (index, operands[0]));
+ operands[0] = index;
+ }
if (TARGET_BIG_SWITCH)
{
if (TARGET_64BIT)
- {
- rtx tmp1 = gen_reg_rtx (DImode);
- rtx tmp2 = gen_reg_rtx (DImode);
-
- emit_jump_insn (gen_casesi64p (operands[0], operands[3],
- tmp1, tmp2));
- }
+ emit_jump_insn (gen_casesi64p (operands[0], operands[3]));
+ else if (flag_pic)
+ emit_jump_insn (gen_casesi32p (operands[0], operands[3]));
else
- {
- rtx tmp1 = gen_reg_rtx (SImode);
-
- if (flag_pic)
- {
- rtx tmp2 = gen_reg_rtx (SImode);
-
- emit_jump_insn (gen_casesi32p (operands[0], operands[3],
- tmp1, tmp2));
- }
- else
- emit_jump_insn (gen_casesi32 (operands[0], operands[3], tmp1));
- }
+ emit_jump_insn (gen_casesi32 (operands[0], operands[3]));
}
else
emit_jump_insn (gen_casesi0 (operands[0], operands[3]));
(mult:SI (match_operand:SI 0 "register_operand" "r")
(const_int 4))
(label_ref (match_operand 1 "" "")))))
- (clobber (match_operand:SI 2 "register_operand" "=&r"))]
- "!TARGET_64BIT && TARGET_BIG_SWITCH"
+ (clobber (match_scratch:SI 2 "=&r"))]
+ "!flag_pic"
"ldil L'%l1,%2\;ldo R'%l1(%2),%2\;{ldwx|ldw},s %0(%2),%2\;bv,n %%r0(%2)"
[(set_attr "type" "multi")
(set_attr "length" "16")])
(mult:SI (match_operand:SI 0 "register_operand" "r")
(const_int 4))
(label_ref (match_operand 1 "" "")))))
- (clobber (match_operand:SI 2 "register_operand" "=&a"))
- (clobber (match_operand:SI 3 "register_operand" "=&r"))]
- "!TARGET_64BIT && TARGET_BIG_SWITCH"
- "{bl .+8,%2\;depi 0,31,2,%2|mfia %2}\;ldo {16|20}(%2),%2\;\
+ (clobber (match_scratch:SI 2 "=&r"))
+ (clobber (match_scratch:SI 3 "=&r"))]
+ "flag_pic"
+ "{bl .+8,%2\;depi 0,31,2,%2|mfia %2}\;ldo {%l1-.|%l1+4-.}(%2),%2\;\
{ldwx|ldw},s %0(%2),%3\;{addl|add,l} %2,%3,%3\;bv,n %%r0(%3)"
[(set_attr "type" "multi")
(set (attr "length")
;;; 64-bit code, 32-bit relative branch table.
(define_insn "casesi64p"
[(set (pc) (mem:DI (plus:DI
- (mult:DI (sign_extend:DI
- (match_operand:SI 0 "register_operand" "r"))
+ (mult:DI (match_operand:DI 0 "register_operand" "r")
(const_int 8))
(label_ref (match_operand 1 "" "")))))
- (clobber (match_operand:DI 2 "register_operand" "=&r"))
- (clobber (match_operand:DI 3 "register_operand" "=&r"))]
- "TARGET_64BIT && TARGET_BIG_SWITCH"
- "mfia %2\;ldo 24(%2),%2\;ldw,s %0(%2),%3\;extrd,s %3,63,32,%3\;\
+ (clobber (match_scratch:DI 2 "=&r"))
+ (clobber (match_scratch:DI 3 "=&r"))]
+ ""
+ "mfia %2\;ldo %l1+4-.(%2),%2\;ldw,s %0(%2),%3\;extrd,s %3,63,32,%3\;\
add,l %2,%3,%3\;bv,n %%r0(%3)"
[(set_attr "type" "multi")
(set_attr "length" "24")])
""
"
{
- rtx op, call_insn;
+ rtx op;
rtx nb = operands[1];
if (TARGET_PORTABLE_RUNTIME)
the only method that we have for doing DImode multiplication
is with a libcall. This could be trouble if we haven't
allocated enough space for the outgoing arguments. */
- if (INTVAL (nb) > current_function_outgoing_args_size)
- abort ();
+ gcc_assert (INTVAL (nb) <= crtl->outgoing_args_size);
emit_move_insn (arg_pointer_rtx,
gen_rtx_PLUS (word_mode, stack_pointer_rtx,
need to have a use of the PIC register in the return pattern and
the final save/restore operation is not needed.
- I elected to just clobber %r4 in the PIC patterns and use it instead
+ I elected to just use register %r4 in the PIC patterns instead
of trying to force hppa_pic_save_rtx () to a callee saved register.
This might have required a new register class and constraint. It
was also simpler to just handle the restore from a register than a
generic pseudo. */
if (TARGET_64BIT)
{
+ rtx r4 = gen_rtx_REG (word_mode, 4);
if (GET_CODE (op) == SYMBOL_REF)
- call_insn = emit_call_insn (gen_call_symref_64bit (op, nb));
+ emit_call_insn (gen_call_symref_64bit (op, nb, r4));
else
{
op = force_reg (word_mode, op);
- call_insn = emit_call_insn (gen_call_reg_64bit (op, nb));
+ emit_call_insn (gen_call_reg_64bit (op, nb, r4));
}
}
else
if (GET_CODE (op) == SYMBOL_REF)
{
if (flag_pic)
- call_insn = emit_call_insn (gen_call_symref_pic (op, nb));
+ {
+ rtx r4 = gen_rtx_REG (word_mode, 4);
+ emit_call_insn (gen_call_symref_pic (op, nb, r4));
+ }
else
- call_insn = emit_call_insn (gen_call_symref (op, nb));
+ emit_call_insn (gen_call_symref (op, nb));
}
else
{
rtx tmpreg = gen_rtx_REG (word_mode, 22);
-
emit_move_insn (tmpreg, force_reg (word_mode, op));
if (flag_pic)
- call_insn = emit_call_insn (gen_call_reg_pic (nb));
+ {
+ rtx r4 = gen_rtx_REG (word_mode, 4);
+ emit_call_insn (gen_call_reg_pic (nb, r4));
+ }
else
- call_insn = emit_call_insn (gen_call_reg (nb));
+ emit_call_insn (gen_call_reg (nb));
}
}
(set (attr "length") (symbol_ref "attr_length_call (insn, 0)"))])
(define_insn "call_symref_pic"
- [(call (mem:SI (match_operand 0 "call_operand_address" ""))
+ [(set (match_operand:SI 2 "register_operand" "=&r") (reg:SI 19))
+ (call (mem:SI (match_operand 0 "call_operand_address" ""))
(match_operand 1 "" "i"))
(clobber (reg:SI 1))
(clobber (reg:SI 2))
- (clobber (reg:SI 4))
+ (use (match_dup 2))
(use (reg:SI 19))
(use (const_int 0))]
"!TARGET_PORTABLE_RUNTIME && !TARGET_64BIT"
- "*
-{
- output_arg_descriptor (insn);
- return output_call (insn, operands[0], 0);
-}"
- [(set_attr "type" "call")
- (set (attr "length")
- (plus (symbol_ref "attr_length_call (insn, 0)")
- (symbol_ref "attr_length_save_restore_dltp (insn)")))])
-
-;; Split out the PIC register save and restore after reload. This is
-;; done only if the function returns. As the split is done after reload,
-;; there are some situations in which we unnecessarily save and restore
-;; %r4. This happens when there is a single call and the PIC register
-;; is "dead" after the call. This isn't easy to fix as the usage of
-;; the PIC register isn't completely determined until the reload pass.
+ "#")
+
+;; Split out the PIC register save and restore after reload. As the
+;; split is done after reload, there are some situations in which we
+;; unnecessarily save and restore %r4. This happens when there is a
+;; single call and the PIC register is not used after the call.
+;;
+;; The split has to be done since call_from_call_insn () can't handle
+;; the pattern as is. Noreturn calls are special because they have to
+;; terminate the basic block. The split has to contain more than one
+;; insn.
(define_split
- [(parallel [(call (mem:SI (match_operand 0 "call_operand_address" ""))
+ [(parallel [(set (match_operand:SI 2 "register_operand" "") (reg:SI 19))
+ (call (mem:SI (match_operand 0 "call_operand_address" ""))
(match_operand 1 "" ""))
(clobber (reg:SI 1))
(clobber (reg:SI 2))
- (clobber (reg:SI 4))
+ (use (match_dup 2))
(use (reg:SI 19))
(use (const_int 0))])]
- "!TARGET_PORTABLE_RUNTIME && !TARGET_64BIT
- && reload_completed
- && !find_reg_note (insn, REG_NORETURN, NULL_RTX)"
- [(set (reg:SI 4) (reg:SI 19))
+ "!TARGET_PORTABLE_RUNTIME && !TARGET_64BIT && reload_completed
+ && find_reg_note (insn, REG_NORETURN, NULL_RTX)"
+ [(set (match_dup 2) (reg:SI 19))
(parallel [(call (mem:SI (match_dup 0))
(match_dup 1))
(clobber (reg:SI 1))
(clobber (reg:SI 2))
(use (reg:SI 19))
- (use (const_int 0))])
- (set (reg:SI 19) (reg:SI 4))]
+ (use (const_int 0))])]
"")
-;; Remove the clobber of register 4 when optimizing. This has to be
-;; done with a peephole optimization rather than a split because the
-;; split sequence for a call must be longer than one instruction.
-(define_peephole2
- [(parallel [(call (mem:SI (match_operand 0 "call_operand_address" ""))
+(define_split
+ [(parallel [(set (match_operand:SI 2 "register_operand" "") (reg:SI 19))
+ (call (mem:SI (match_operand 0 "call_operand_address" ""))
(match_operand 1 "" ""))
(clobber (reg:SI 1))
(clobber (reg:SI 2))
- (clobber (reg:SI 4))
+ (use (match_dup 2))
(use (reg:SI 19))
(use (const_int 0))])]
"!TARGET_PORTABLE_RUNTIME && !TARGET_64BIT && reload_completed"
- [(parallel [(call (mem:SI (match_dup 0))
+ [(set (match_dup 2) (reg:SI 19))
+ (parallel [(call (mem:SI (match_dup 0))
(match_dup 1))
(clobber (reg:SI 1))
(clobber (reg:SI 2))
(use (reg:SI 19))
- (use (const_int 0))])]
+ (use (const_int 0))])
+ (set (reg:SI 19) (match_dup 2))]
"")
(define_insn "*call_symref_pic_post_reload"
;; This pattern is split if it is necessary to save and restore the
;; PIC register.
(define_insn "call_symref_64bit"
- [(call (mem:SI (match_operand 0 "call_operand_address" ""))
+ [(set (match_operand:DI 2 "register_operand" "=&r") (reg:DI 27))
+ (call (mem:SI (match_operand 0 "call_operand_address" ""))
(match_operand 1 "" "i"))
(clobber (reg:DI 1))
(clobber (reg:DI 2))
- (clobber (reg:DI 4))
+ (use (match_dup 2))
(use (reg:DI 27))
(use (reg:DI 29))
(use (const_int 0))]
"TARGET_64BIT"
- "*
-{
- output_arg_descriptor (insn);
- return output_call (insn, operands[0], 0);
-}"
- [(set_attr "type" "call")
- (set (attr "length")
- (plus (symbol_ref "attr_length_call (insn, 0)")
- (symbol_ref "attr_length_save_restore_dltp (insn)")))])
-
-;; Split out the PIC register save and restore after reload. This is
-;; done only if the function returns. As the split is done after reload,
-;; there are some situations in which we unnecessarily save and restore
-;; %r4. This happens when there is a single call and the PIC register
-;; is "dead" after the call. This isn't easy to fix as the usage of
-;; the PIC register isn't completely determined until the reload pass.
+ "#")
+
+;; Split out the PIC register save and restore after reload. As the
+;; split is done after reload, there are some situations in which we
+;; unnecessarily save and restore %r4. This happens when there is a
+;; single call and the PIC register is not used after the call.
+;;
+;; The split has to be done since call_from_call_insn () can't handle
+;; the pattern as is. Noreturn calls are special because they have to
+;; terminate the basic block. The split has to contain more than one
+;; insn.
(define_split
- [(parallel [(call (mem:SI (match_operand 0 "call_operand_address" ""))
+ [(parallel [(set (match_operand:DI 2 "register_operand" "") (reg:DI 27))
+ (call (mem:SI (match_operand 0 "call_operand_address" ""))
(match_operand 1 "" ""))
(clobber (reg:DI 1))
(clobber (reg:DI 2))
- (clobber (reg:DI 4))
+ (use (match_dup 2))
(use (reg:DI 27))
(use (reg:DI 29))
(use (const_int 0))])]
- "TARGET_64BIT
- && reload_completed
- && !find_reg_note (insn, REG_NORETURN, NULL_RTX)"
- [(set (reg:DI 4) (reg:DI 27))
+ "TARGET_64BIT && reload_completed
+ && find_reg_note (insn, REG_NORETURN, NULL_RTX)"
+ [(set (match_dup 2) (reg:DI 27))
(parallel [(call (mem:SI (match_dup 0))
(match_dup 1))
(clobber (reg:DI 1))
(clobber (reg:DI 2))
(use (reg:DI 27))
(use (reg:DI 29))
- (use (const_int 0))])
- (set (reg:DI 27) (reg:DI 4))]
+ (use (const_int 0))])]
"")
-;; Remove the clobber of register 4 when optimizing. This has to be
-;; done with a peephole optimization rather than a split because the
-;; split sequence for a call must be longer than one instruction.
-(define_peephole2
- [(parallel [(call (mem:SI (match_operand 0 "call_operand_address" ""))
+(define_split
+ [(parallel [(set (match_operand:DI 2 "register_operand" "") (reg:DI 27))
+ (call (mem:SI (match_operand 0 "call_operand_address" ""))
(match_operand 1 "" ""))
(clobber (reg:DI 1))
(clobber (reg:DI 2))
- (clobber (reg:DI 4))
+ (use (match_dup 2))
(use (reg:DI 27))
(use (reg:DI 29))
(use (const_int 0))])]
"TARGET_64BIT && reload_completed"
- [(parallel [(call (mem:SI (match_dup 0))
+ [(set (match_dup 2) (reg:DI 27))
+ (parallel [(call (mem:SI (match_dup 0))
(match_dup 1))
(clobber (reg:DI 1))
(clobber (reg:DI 2))
(use (reg:DI 27))
(use (reg:DI 29))
- (use (const_int 0))])]
+ (use (const_int 0))])
+ (set (reg:DI 27) (match_dup 2))]
"")
(define_insn "*call_symref_64bit_post_reload"
;; This pattern is split if it is necessary to save and restore the
;; PIC register.
(define_insn "call_reg_pic"
- [(call (mem:SI (reg:SI 22))
+ [(set (match_operand:SI 1 "register_operand" "=&r") (reg:SI 19))
+ (call (mem:SI (reg:SI 22))
(match_operand 0 "" "i"))
(clobber (reg:SI 1))
(clobber (reg:SI 2))
- (clobber (reg:SI 4))
+ (use (match_dup 1))
(use (reg:SI 19))
(use (const_int 1))]
"!TARGET_64BIT"
- "*
-{
- return output_indirect_call (insn, gen_rtx_REG (word_mode, 22));
-}"
- [(set_attr "type" "dyncall")
- (set (attr "length")
- (plus (symbol_ref "attr_length_indirect_call (insn)")
- (symbol_ref "attr_length_save_restore_dltp (insn)")))])
-
-;; Split out the PIC register save and restore after reload. This is
-;; done only if the function returns. As the split is done after reload,
-;; there are some situations in which we unnecessarily save and restore
-;; %r4. This happens when there is a single call and the PIC register
-;; is "dead" after the call. This isn't easy to fix as the usage of
-;; the PIC register isn't completely determined until the reload pass.
+ "#")
+
+;; Split out the PIC register save and restore after reload. As the
+;; split is done after reload, there are some situations in which we
+;; unnecessarily save and restore %r4. This happens when there is a
+;; single call and the PIC register is not used after the call.
+;;
+;; The split has to be done since call_from_call_insn () can't handle
+;; the pattern as is. Noreturn calls are special because they have to
+;; terminate the basic block. The split has to contain more than one
+;; insn.
(define_split
- [(parallel [(call (mem:SI (reg:SI 22))
+ [(parallel [(set (match_operand:SI 1 "register_operand" "") (reg:SI 19))
+ (call (mem:SI (reg:SI 22))
(match_operand 0 "" ""))
(clobber (reg:SI 1))
(clobber (reg:SI 2))
- (clobber (reg:SI 4))
+ (use (match_dup 1))
(use (reg:SI 19))
(use (const_int 1))])]
- "!TARGET_64BIT
- && reload_completed
- && !find_reg_note (insn, REG_NORETURN, NULL_RTX)"
- [(set (reg:SI 4) (reg:SI 19))
+ "!TARGET_64BIT && reload_completed
+ && find_reg_note (insn, REG_NORETURN, NULL_RTX)"
+ [(set (match_dup 1) (reg:SI 19))
(parallel [(call (mem:SI (reg:SI 22))
(match_dup 0))
(clobber (reg:SI 1))
(clobber (reg:SI 2))
(use (reg:SI 19))
- (use (const_int 1))])
- (set (reg:SI 19) (reg:SI 4))]
+ (use (const_int 1))])]
"")
-;; Remove the clobber of register 4 when optimizing. This has to be
-;; done with a peephole optimization rather than a split because the
-;; split sequence for a call must be longer than one instruction.
-(define_peephole2
- [(parallel [(call (mem:SI (reg:SI 22))
+(define_split
+ [(parallel [(set (match_operand:SI 1 "register_operand" "") (reg:SI 19))
+ (call (mem:SI (reg:SI 22))
(match_operand 0 "" ""))
(clobber (reg:SI 1))
(clobber (reg:SI 2))
- (clobber (reg:SI 4))
+ (use (match_dup 1))
(use (reg:SI 19))
(use (const_int 1))])]
"!TARGET_64BIT && reload_completed"
- [(parallel [(call (mem:SI (reg:SI 22))
+ [(set (match_dup 1) (reg:SI 19))
+ (parallel [(call (mem:SI (reg:SI 22))
(match_dup 0))
(clobber (reg:SI 1))
(clobber (reg:SI 2))
(use (reg:SI 19))
- (use (const_int 1))])]
+ (use (const_int 1))])
+ (set (reg:SI 19) (match_dup 1))]
"")
(define_insn "*call_reg_pic_post_reload"
;; This pattern is split if it is necessary to save and restore the
;; PIC register.
(define_insn "call_reg_64bit"
- [(call (mem:SI (match_operand:DI 0 "register_operand" "r"))
+ [(set (match_operand:DI 2 "register_operand" "=&r") (reg:DI 27))
+ (call (mem:SI (match_operand:DI 0 "register_operand" "r"))
(match_operand 1 "" "i"))
+ (clobber (reg:DI 1))
(clobber (reg:DI 2))
- (clobber (reg:DI 4))
+ (use (match_dup 2))
(use (reg:DI 27))
(use (reg:DI 29))
(use (const_int 1))]
"TARGET_64BIT"
- "*
-{
- return output_indirect_call (insn, operands[0]);
-}"
- [(set_attr "type" "dyncall")
- (set (attr "length")
- (plus (symbol_ref "attr_length_indirect_call (insn)")
- (symbol_ref "attr_length_save_restore_dltp (insn)")))])
-
-;; Split out the PIC register save and restore after reload. This is
-;; done only if the function returns. As the split is done after reload,
-;; there are some situations in which we unnecessarily save and restore
-;; %r4. This happens when there is a single call and the PIC register
-;; is "dead" after the call. This isn't easy to fix as the usage of
-;; the PIC register isn't completely determined until the reload pass.
+ "#")
+
+;; Split out the PIC register save and restore after reload. As the
+;; split is done after reload, there are some situations in which we
+;; unnecessarily save and restore %r4. This happens when there is a
+;; single call and the PIC register is not used after the call.
+;;
+;; The split has to be done since call_from_call_insn () can't handle
+;; the pattern as is. Noreturn calls are special because they have to
+;; terminate the basic block. The split has to contain more than one
+;; insn.
(define_split
- [(parallel [(call (mem:SI (match_operand 0 "register_operand" ""))
+ [(parallel [(set (match_operand:DI 2 "register_operand" "") (reg:DI 27))
+ (call (mem:SI (match_operand 0 "register_operand" ""))
(match_operand 1 "" ""))
+ (clobber (reg:DI 1))
(clobber (reg:DI 2))
- (clobber (reg:DI 4))
+ (use (match_dup 2))
(use (reg:DI 27))
(use (reg:DI 29))
(use (const_int 1))])]
- "TARGET_64BIT
- && reload_completed
- && !find_reg_note (insn, REG_NORETURN, NULL_RTX)"
- [(set (reg:DI 4) (reg:DI 27))
+ "TARGET_64BIT && reload_completed
+ && find_reg_note (insn, REG_NORETURN, NULL_RTX)"
+ [(set (match_dup 2) (reg:DI 27))
(parallel [(call (mem:SI (match_dup 0))
(match_dup 1))
+ (clobber (reg:DI 1))
(clobber (reg:DI 2))
(use (reg:DI 27))
(use (reg:DI 29))
- (use (const_int 1))])
- (set (reg:DI 27) (reg:DI 4))]
+ (use (const_int 1))])]
"")
-;; Remove the clobber of register 4 when optimizing. This has to be
-;; done with a peephole optimization rather than a split because the
-;; split sequence for a call must be longer than one instruction.
-(define_peephole2
- [(parallel [(call (mem:SI (match_operand 0 "register_operand" ""))
+(define_split
+ [(parallel [(set (match_operand:DI 2 "register_operand" "") (reg:DI 27))
+ (call (mem:SI (match_operand 0 "register_operand" ""))
(match_operand 1 "" ""))
+ (clobber (reg:DI 1))
(clobber (reg:DI 2))
- (clobber (reg:DI 4))
+ (use (match_dup 2))
(use (reg:DI 27))
(use (reg:DI 29))
(use (const_int 1))])]
"TARGET_64BIT && reload_completed"
- [(parallel [(call (mem:SI (match_dup 0))
+ [(set (match_dup 2) (reg:DI 27))
+ (parallel [(call (mem:SI (match_dup 0))
(match_dup 1))
+ (clobber (reg:DI 1))
(clobber (reg:DI 2))
(use (reg:DI 27))
(use (reg:DI 29))
- (use (const_int 1))])]
+ (use (const_int 1))])
+ (set (reg:DI 27) (match_dup 2))]
"")
(define_insn "*call_reg_64bit_post_reload"
[(call (mem:SI (match_operand:DI 0 "register_operand" "r"))
(match_operand 1 "" "i"))
+ (clobber (reg:DI 1))
(clobber (reg:DI 2))
(use (reg:DI 27))
(use (reg:DI 29))
""
"
{
- rtx op, call_insn;
+ rtx op;
rtx dst = operands[0];
rtx nb = operands[2];
the only method that we have for doing DImode multiplication
is with a libcall. This could be trouble if we haven't
allocated enough space for the outgoing arguments. */
- if (INTVAL (nb) > current_function_outgoing_args_size)
- abort ();
+ gcc_assert (INTVAL (nb) <= crtl->outgoing_args_size);
emit_move_insn (arg_pointer_rtx,
gen_rtx_PLUS (word_mode, stack_pointer_rtx,
need to have a use of the PIC register in the return pattern and
the final save/restore operation is not needed.
- I elected to just clobber %r4 in the PIC patterns and use it instead
+ I elected to just use register %r4 in the PIC patterns instead
of trying to force hppa_pic_save_rtx () to a callee saved register.
This might have required a new register class and constraint. It
was also simpler to just handle the restore from a register than a
generic pseudo. */
if (TARGET_64BIT)
{
+ rtx r4 = gen_rtx_REG (word_mode, 4);
if (GET_CODE (op) == SYMBOL_REF)
- call_insn = emit_call_insn (gen_call_val_symref_64bit (dst, op, nb));
+ emit_call_insn (gen_call_val_symref_64bit (dst, op, nb, r4));
else
{
op = force_reg (word_mode, op);
- call_insn = emit_call_insn (gen_call_val_reg_64bit (dst, op, nb));
+ emit_call_insn (gen_call_val_reg_64bit (dst, op, nb, r4));
}
}
else
if (GET_CODE (op) == SYMBOL_REF)
{
if (flag_pic)
- call_insn = emit_call_insn (gen_call_val_symref_pic (dst, op, nb));
+ {
+ rtx r4 = gen_rtx_REG (word_mode, 4);
+ emit_call_insn (gen_call_val_symref_pic (dst, op, nb, r4));
+ }
else
- call_insn = emit_call_insn (gen_call_val_symref (dst, op, nb));
+ emit_call_insn (gen_call_val_symref (dst, op, nb));
}
else
{
rtx tmpreg = gen_rtx_REG (word_mode, 22);
-
emit_move_insn (tmpreg, force_reg (word_mode, op));
if (flag_pic)
- call_insn = emit_call_insn (gen_call_val_reg_pic (dst, nb));
+ {
+ rtx r4 = gen_rtx_REG (word_mode, 4);
+ emit_call_insn (gen_call_val_reg_pic (dst, nb, r4));
+ }
else
- call_insn = emit_call_insn (gen_call_val_reg (dst, nb));
+ emit_call_insn (gen_call_val_reg (dst, nb));
}
}
(set (attr "length") (symbol_ref "attr_length_call (insn, 0)"))])
(define_insn "call_val_symref_pic"
- [(set (match_operand 0 "" "")
+ [(set (match_operand:SI 3 "register_operand" "=&r") (reg:SI 19))
+ (set (match_operand 0 "" "")
(call (mem:SI (match_operand 1 "call_operand_address" ""))
(match_operand 2 "" "i")))
(clobber (reg:SI 1))
(clobber (reg:SI 2))
- (clobber (reg:SI 4))
+ (use (match_dup 3))
(use (reg:SI 19))
(use (const_int 0))]
"!TARGET_PORTABLE_RUNTIME && !TARGET_64BIT"
- "*
-{
- output_arg_descriptor (insn);
- return output_call (insn, operands[1], 0);
-}"
- [(set_attr "type" "call")
- (set (attr "length")
- (plus (symbol_ref "attr_length_call (insn, 0)")
- (symbol_ref "attr_length_save_restore_dltp (insn)")))])
-
-;; Split out the PIC register save and restore after reload. This is
-;; done only if the function returns. As the split is done after reload,
-;; there are some situations in which we unnecessarily save and restore
-;; %r4. This happens when there is a single call and the PIC register
-;; is "dead" after the call. This isn't easy to fix as the usage of
-;; the PIC register isn't completely determined until the reload pass.
+ "#")
+
+;; Split out the PIC register save and restore after reload. As the
+;; split is done after reload, there are some situations in which we
+;; unnecessarily save and restore %r4. This happens when there is a
+;; single call and the PIC register is not used after the call.
+;;
+;; The split has to be done since call_from_call_insn () can't handle
+;; the pattern as is. Noreturn calls are special because they have to
+;; terminate the basic block. The split has to contain more than one
+;; insn.
(define_split
- [(parallel [(set (match_operand 0 "" "")
+ [(parallel [(set (match_operand:SI 3 "register_operand" "") (reg:SI 19))
+ (set (match_operand 0 "" "")
(call (mem:SI (match_operand 1 "call_operand_address" ""))
(match_operand 2 "" "")))
(clobber (reg:SI 1))
(clobber (reg:SI 2))
- (clobber (reg:SI 4))
+ (use (match_dup 3))
(use (reg:SI 19))
(use (const_int 0))])]
- "!TARGET_PORTABLE_RUNTIME && !TARGET_64BIT
- && reload_completed
- && !find_reg_note (insn, REG_NORETURN, NULL_RTX)"
- [(set (reg:SI 4) (reg:SI 19))
+ "!TARGET_PORTABLE_RUNTIME && !TARGET_64BIT && reload_completed
+ && find_reg_note (insn, REG_NORETURN, NULL_RTX)"
+ [(set (match_dup 3) (reg:SI 19))
(parallel [(set (match_dup 0)
(call (mem:SI (match_dup 1))
(match_dup 2)))
(clobber (reg:SI 1))
(clobber (reg:SI 2))
(use (reg:SI 19))
- (use (const_int 0))])
- (set (reg:SI 19) (reg:SI 4))]
+ (use (const_int 0))])]
"")
-;; Remove the clobber of register 4 when optimizing. This has to be
-;; done with a peephole optimization rather than a split because the
-;; split sequence for a call must be longer than one instruction.
-(define_peephole2
- [(parallel [(set (match_operand 0 "" "")
+(define_split
+ [(parallel [(set (match_operand:SI 3 "register_operand" "") (reg:SI 19))
+ (set (match_operand 0 "" "")
(call (mem:SI (match_operand 1 "call_operand_address" ""))
(match_operand 2 "" "")))
(clobber (reg:SI 1))
(clobber (reg:SI 2))
- (clobber (reg:SI 4))
+ (use (match_dup 3))
(use (reg:SI 19))
(use (const_int 0))])]
"!TARGET_PORTABLE_RUNTIME && !TARGET_64BIT && reload_completed"
- [(parallel [(set (match_dup 0)
+ [(set (match_dup 3) (reg:SI 19))
+ (parallel [(set (match_dup 0)
(call (mem:SI (match_dup 1))
(match_dup 2)))
(clobber (reg:SI 1))
(clobber (reg:SI 2))
(use (reg:SI 19))
- (use (const_int 0))])]
+ (use (const_int 0))])
+ (set (reg:SI 19) (match_dup 3))]
"")
(define_insn "*call_val_symref_pic_post_reload"
;; This pattern is split if it is necessary to save and restore the
;; PIC register.
(define_insn "call_val_symref_64bit"
- [(set (match_operand 0 "" "")
+ [(set (match_operand:DI 3 "register_operand" "=&r") (reg:DI 27))
+ (set (match_operand 0 "" "")
(call (mem:SI (match_operand 1 "call_operand_address" ""))
(match_operand 2 "" "i")))
(clobber (reg:DI 1))
(clobber (reg:DI 2))
- (clobber (reg:DI 4))
+ (use (match_dup 3))
(use (reg:DI 27))
(use (reg:DI 29))
(use (const_int 0))]
"TARGET_64BIT"
- "*
-{
- output_arg_descriptor (insn);
- return output_call (insn, operands[1], 0);
-}"
- [(set_attr "type" "call")
- (set (attr "length")
- (plus (symbol_ref "attr_length_call (insn, 0)")
- (symbol_ref "attr_length_save_restore_dltp (insn)")))])
-
-;; Split out the PIC register save and restore after reload. This is
-;; done only if the function returns. As the split is done after reload,
-;; there are some situations in which we unnecessarily save and restore
-;; %r4. This happens when there is a single call and the PIC register
-;; is "dead" after the call. This isn't easy to fix as the usage of
-;; the PIC register isn't completely determined until the reload pass.
+ "#")
+
+;; Split out the PIC register save and restore after reload. As the
+;; split is done after reload, there are some situations in which we
+;; unnecessarily save and restore %r4. This happens when there is a
+;; single call and the PIC register is not used after the call.
+;;
+;; The split has to be done since call_from_call_insn () can't handle
+;; the pattern as is. Noreturn calls are special because they have to
+;; terminate the basic block. The split has to contain more than one
+;; insn.
(define_split
- [(parallel [(set (match_operand 0 "" "")
+ [(parallel [(set (match_operand:DI 3 "register_operand" "") (reg:DI 27))
+ (set (match_operand 0 "" "")
(call (mem:SI (match_operand 1 "call_operand_address" ""))
(match_operand 2 "" "")))
(clobber (reg:DI 1))
(clobber (reg:DI 2))
- (clobber (reg:DI 4))
+ (use (match_dup 3))
(use (reg:DI 27))
(use (reg:DI 29))
(use (const_int 0))])]
- "TARGET_64BIT
- && reload_completed
- && !find_reg_note (insn, REG_NORETURN, NULL_RTX)"
- [(set (reg:DI 4) (reg:DI 27))
+ "TARGET_64BIT && reload_completed
+ && find_reg_note (insn, REG_NORETURN, NULL_RTX)"
+ [(set (match_dup 3) (reg:DI 27))
(parallel [(set (match_dup 0)
(call (mem:SI (match_dup 1))
(match_dup 2)))
(clobber (reg:DI 2))
(use (reg:DI 27))
(use (reg:DI 29))
- (use (const_int 0))])
- (set (reg:DI 27) (reg:DI 4))]
+ (use (const_int 0))])]
"")
-;; Remove the clobber of register 4 when optimizing. This has to be
-;; done with a peephole optimization rather than a split because the
-;; split sequence for a call must be longer than one instruction.
-(define_peephole2
- [(parallel [(set (match_operand 0 "" "")
+(define_split
+ [(parallel [(set (match_operand:DI 3 "register_operand" "") (reg:DI 27))
+ (set (match_operand 0 "" "")
(call (mem:SI (match_operand 1 "call_operand_address" ""))
(match_operand 2 "" "")))
(clobber (reg:DI 1))
(clobber (reg:DI 2))
- (clobber (reg:DI 4))
+ (use (match_dup 3))
(use (reg:DI 27))
(use (reg:DI 29))
(use (const_int 0))])]
"TARGET_64BIT && reload_completed"
- [(parallel [(set (match_dup 0)
+ [(set (match_dup 3) (reg:DI 27))
+ (parallel [(set (match_dup 0)
(call (mem:SI (match_dup 1))
(match_dup 2)))
(clobber (reg:DI 1))
(clobber (reg:DI 2))
(use (reg:DI 27))
(use (reg:DI 29))
- (use (const_int 0))])]
+ (use (const_int 0))])
+ (set (reg:DI 27) (match_dup 3))]
"")
(define_insn "*call_val_symref_64bit_post_reload"
;; This pattern is split if it is necessary to save and restore the
;; PIC register.
(define_insn "call_val_reg_pic"
- [(set (match_operand 0 "" "")
+ [(set (match_operand:SI 2 "register_operand" "=&r") (reg:SI 19))
+ (set (match_operand 0 "" "")
(call (mem:SI (reg:SI 22))
(match_operand 1 "" "i")))
(clobber (reg:SI 1))
(clobber (reg:SI 2))
- (clobber (reg:SI 4))
+ (use (match_dup 2))
(use (reg:SI 19))
(use (const_int 1))]
"!TARGET_64BIT"
- "*
-{
- return output_indirect_call (insn, gen_rtx_REG (word_mode, 22));
-}"
- [(set_attr "type" "dyncall")
- (set (attr "length")
- (plus (symbol_ref "attr_length_indirect_call (insn)")
- (symbol_ref "attr_length_save_restore_dltp (insn)")))])
-
-;; Split out the PIC register save and restore after reload. This is
-;; done only if the function returns. As the split is done after reload,
-;; there are some situations in which we unnecessarily save and restore
-;; %r4. This happens when there is a single call and the PIC register
-;; is "dead" after the call. This isn't easy to fix as the usage of
-;; the PIC register isn't completely determined until the reload pass.
+ "#")
+
+;; Split out the PIC register save and restore after reload. As the
+;; split is done after reload, there are some situations in which we
+;; unnecessarily save and restore %r4. This happens when there is a
+;; single call and the PIC register is not used after the call.
+;;
+;; The split has to be done since call_from_call_insn () can't handle
+;; the pattern as is. Noreturn calls are special because they have to
+;; terminate the basic block. The split has to contain more than one
+;; insn.
(define_split
- [(parallel [(set (match_operand 0 "" "")
+ [(parallel [(set (match_operand:SI 2 "register_operand" "") (reg:SI 19))
+ (set (match_operand 0 "" "")
(call (mem:SI (reg:SI 22))
(match_operand 1 "" "")))
(clobber (reg:SI 1))
(clobber (reg:SI 2))
- (clobber (reg:SI 4))
+ (use (match_dup 2))
(use (reg:SI 19))
(use (const_int 1))])]
- "!TARGET_64BIT
- && reload_completed
- && !find_reg_note (insn, REG_NORETURN, NULL_RTX)"
- [(set (reg:SI 4) (reg:SI 19))
+ "!TARGET_64BIT && reload_completed
+ && find_reg_note (insn, REG_NORETURN, NULL_RTX)"
+ [(set (match_dup 2) (reg:SI 19))
(parallel [(set (match_dup 0)
(call (mem:SI (reg:SI 22))
(match_dup 1)))
(clobber (reg:SI 1))
(clobber (reg:SI 2))
(use (reg:SI 19))
- (use (const_int 1))])
- (set (reg:SI 19) (reg:SI 4))]
+ (use (const_int 1))])]
"")
-;; Remove the clobber of register 4 when optimizing. This has to be
-;; done with a peephole optimization rather than a split because the
-;; split sequence for a call must be longer than one instruction.
-(define_peephole2
- [(parallel [(set (match_operand 0 "" "")
+(define_split
+ [(parallel [(set (match_operand:SI 2 "register_operand" "") (reg:SI 19))
+ (set (match_operand 0 "" "")
(call (mem:SI (reg:SI 22))
(match_operand 1 "" "")))
(clobber (reg:SI 1))
(clobber (reg:SI 2))
- (clobber (reg:SI 4))
+ (use (match_dup 2))
(use (reg:SI 19))
(use (const_int 1))])]
"!TARGET_64BIT && reload_completed"
- [(parallel [(set (match_dup 0)
+ [(set (match_dup 2) (reg:SI 19))
+ (parallel [(set (match_dup 0)
(call (mem:SI (reg:SI 22))
(match_dup 1)))
(clobber (reg:SI 1))
(clobber (reg:SI 2))
(use (reg:SI 19))
- (use (const_int 1))])]
+ (use (const_int 1))])
+ (set (reg:SI 19) (match_dup 2))]
"")
(define_insn "*call_val_reg_pic_post_reload"
;; This pattern is split if it is necessary to save and restore the
;; PIC register.
(define_insn "call_val_reg_64bit"
- [(set (match_operand 0 "" "")
+ [(set (match_operand:DI 3 "register_operand" "=&r") (reg:DI 27))
+ (set (match_operand 0 "" "")
(call (mem:SI (match_operand:DI 1 "register_operand" "r"))
(match_operand 2 "" "i")))
+ (clobber (reg:DI 1))
(clobber (reg:DI 2))
- (clobber (reg:DI 4))
+ (use (match_dup 3))
(use (reg:DI 27))
(use (reg:DI 29))
(use (const_int 1))]
"TARGET_64BIT"
- "*
-{
- return output_indirect_call (insn, operands[1]);
-}"
- [(set_attr "type" "dyncall")
- (set (attr "length")
- (plus (symbol_ref "attr_length_indirect_call (insn)")
- (symbol_ref "attr_length_save_restore_dltp (insn)")))])
-
-;; Split out the PIC register save and restore after reload. This is
-;; done only if the function returns. As the split is done after reload,
-;; there are some situations in which we unnecessarily save and restore
-;; %r4. This happens when there is a single call and the PIC register
-;; is "dead" after the call. This isn't easy to fix as the usage of
-;; the PIC register isn't completely determined until the reload pass.
+ "#")
+
+;; Split out the PIC register save and restore after reload. As the
+;; split is done after reload, there are some situations in which we
+;; unnecessarily save and restore %r4. This happens when there is a
+;; single call and the PIC register is not used after the call.
+;;
+;; The split has to be done since call_from_call_insn () can't handle
+;; the pattern as is. Noreturn calls are special because they have to
+;; terminate the basic block. The split has to contain more than one
+;; insn.
(define_split
- [(parallel [(set (match_operand 0 "" "")
+ [(parallel [(set (match_operand:DI 3 "register_operand" "") (reg:DI 27))
+ (set (match_operand 0 "" "")
(call (mem:SI (match_operand:DI 1 "register_operand" ""))
(match_operand 2 "" "")))
+ (clobber (reg:DI 1))
(clobber (reg:DI 2))
- (clobber (reg:DI 4))
+ (use (match_dup 3))
(use (reg:DI 27))
(use (reg:DI 29))
(use (const_int 1))])]
- "TARGET_64BIT
- && reload_completed
- && !find_reg_note (insn, REG_NORETURN, NULL_RTX)"
- [(set (reg:DI 4) (reg:DI 27))
+ "TARGET_64BIT && reload_completed
+ && find_reg_note (insn, REG_NORETURN, NULL_RTX)"
+ [(set (match_dup 3) (reg:DI 27))
(parallel [(set (match_dup 0)
(call (mem:SI (match_dup 1))
(match_dup 2)))
+ (clobber (reg:DI 1))
(clobber (reg:DI 2))
(use (reg:DI 27))
(use (reg:DI 29))
- (use (const_int 1))])
- (set (reg:DI 27) (reg:DI 4))]
+ (use (const_int 1))])]
"")
-;; Remove the clobber of register 4 when optimizing. This has to be
-;; done with a peephole optimization rather than a split because the
-;; split sequence for a call must be longer than one instruction.
-(define_peephole2
- [(parallel [(set (match_operand 0 "" "")
+(define_split
+ [(parallel [(set (match_operand:DI 3 "register_operand" "") (reg:DI 27))
+ (set (match_operand 0 "" "")
(call (mem:SI (match_operand:DI 1 "register_operand" ""))
(match_operand 2 "" "")))
+ (clobber (reg:DI 1))
(clobber (reg:DI 2))
- (clobber (reg:DI 4))
+ (use (match_dup 3))
(use (reg:DI 27))
(use (reg:DI 29))
(use (const_int 1))])]
"TARGET_64BIT && reload_completed"
- [(parallel [(set (match_dup 0)
+ [(set (match_dup 3) (reg:DI 27))
+ (parallel [(set (match_dup 0)
(call (mem:SI (match_dup 1))
(match_dup 2)))
+ (clobber (reg:DI 1))
(clobber (reg:DI 2))
(use (reg:DI 27))
(use (reg:DI 29))
- (use (const_int 1))])]
+ (use (const_int 1))])
+ (set (reg:DI 27) (match_dup 3))]
"")
(define_insn "*call_val_reg_64bit_post_reload"
[(set (match_operand 0 "" "")
(call (mem:SI (match_operand:DI 1 "register_operand" "r"))
(match_operand 2 "" "i")))
+ (clobber (reg:DI 1))
(clobber (reg:DI 2))
(use (reg:DI 27))
(use (reg:DI 29))
the only method that we have for doing DImode multiplication
is with a libcall. This could be trouble if we haven't
allocated enough space for the outgoing arguments. */
- if (INTVAL (nb) > current_function_outgoing_args_size)
- abort ();
+ gcc_assert (INTVAL (nb) <= crtl->outgoing_args_size);
emit_move_insn (arg_pointer_rtx,
gen_rtx_PLUS (word_mode, stack_pointer_rtx,
the only method that we have for doing DImode multiplication
is with a libcall. This could be trouble if we haven't
allocated enough space for the outgoing arguments. */
- if (INTVAL (nb) > current_function_outgoing_args_size)
- abort ();
+ gcc_assert (INTVAL (nb) <= crtl->outgoing_args_size);
emit_move_insn (arg_pointer_rtx,
gen_rtx_PLUS (word_mode, stack_pointer_rtx,
[(set (pc) (match_operand 0 "pmode_register_operand" "a"))
(clobber (reg:SI 2))]
"!TARGET_64BIT"
- "ldsid (%%sr0,%0),%%r2\; mtsp %%r2,%%sr0\; be%* 0(%%sr0,%0)"
+ "ldsid (%%sr0,%0),%%r2\;mtsp %%r2,%%sr0\;be%* 0(%%sr0,%0)"
[(set_attr "type" "branch")
(set_attr "length" "12")])
(set_attr "length" "4")])
(define_expand "builtin_longjmp"
- [(unspec_volatile [(match_operand 0 "register_operand" "r")] 3)]
+ [(unspec_volatile [(match_operand 0 "register_operand" "r")] UNSPECV_LONGJMP)]
""
"
{
(POINTER_SIZE * 2) / BITS_PER_UNIT));
rtx pv = gen_rtx_REG (Pmode, 1);
+ emit_clobber (gen_rtx_MEM (BLKmode, gen_rtx_SCRATCH (VOIDmode)));
+ emit_clobber (gen_rtx_MEM (BLKmode, hard_frame_pointer_rtx));
+
+ /* Restore the frame pointer. The virtual_stack_vars_rtx is saved
+ instead of the hard_frame_pointer_rtx in the save area. We need
+ to adjust for the offset between these two values when we have
+ a nonlocal_goto pattern. When we don't have a nonlocal_goto
+ pattern, the receiver performs the adjustment. */
+#ifdef HAVE_nonlocal_goto
+ if (HAVE_nonlocal_goto)
+ emit_move_insn (virtual_stack_vars_rtx, force_reg (Pmode, fp));
+ else
+#endif
+ emit_move_insn (hard_frame_pointer_rtx, fp);
+
/* This bit is the same as expand_builtin_longjmp. */
- emit_move_insn (hard_frame_pointer_rtx, fp);
emit_stack_restore (SAVE_NONLOCAL, stack, NULL_RTX);
- emit_insn (gen_rtx_USE (VOIDmode, hard_frame_pointer_rtx));
- emit_insn (gen_rtx_USE (VOIDmode, stack_pointer_rtx));
+ emit_use (hard_frame_pointer_rtx);
+ emit_use (stack_pointer_rtx);
/* Load the label we are jumping through into r1 so that we know
where to look for it when we get back to setjmp's function for
(if_then_else (eq_attr "alternative" "0")
;; Loop counter in register case
;; Short branch has length of 4
-;; Long branch has length of 8
- (if_then_else (lt (abs (minus (match_dup 3) (plus (pc) (const_int 8))))
- (const_int 8184))
- (const_int 4)
- (const_int 8))
+;; Long branch has length of 8, 20, 24 or 28
+ (cond [(lt (abs (minus (match_dup 3) (plus (pc) (const_int 8))))
+ (const_int MAX_12BIT_OFFSET))
+ (const_int 4)
+ (lt (abs (minus (match_dup 3) (plus (pc) (const_int 8))))
+ (const_int MAX_17BIT_OFFSET))
+ (const_int 8)
+ (ne (symbol_ref "TARGET_PORTABLE_RUNTIME") (const_int 0))
+ (const_int 24)
+ (eq (symbol_ref "flag_pic") (const_int 0))
+ (const_int 20)]
+ (const_int 28))
;; Loop counter in FP reg case.
;; Extra goo to deal with additional reload insns.
(if_then_else (eq_attr "alternative" "1")
(if_then_else (lt (match_dup 3) (pc))
- (if_then_else
- (lt (abs (minus (match_dup 3) (plus (pc) (const_int 24))))
- (const_int 8184))
- (const_int 24)
- (const_int 28))
- (if_then_else
- (lt (abs (minus (match_dup 3) (plus (pc) (const_int 8))))
- (const_int 8184))
- (const_int 24)
- (const_int 28)))
+ (cond [(lt (abs (minus (match_dup 3) (plus (pc) (const_int 24))))
+ (const_int MAX_12BIT_OFFSET))
+ (const_int 24)
+ (lt (abs (minus (match_dup 3) (plus (pc) (const_int 24))))
+ (const_int MAX_17BIT_OFFSET))
+ (const_int 28)
+ (ne (symbol_ref "TARGET_PORTABLE_RUNTIME") (const_int 0))
+ (const_int 44)
+ (eq (symbol_ref "flag_pic") (const_int 0))
+ (const_int 40)]
+ (const_int 48))
+ (cond [(lt (abs (minus (match_dup 3) (plus (pc) (const_int 8))))
+ (const_int MAX_12BIT_OFFSET))
+ (const_int 24)
+ (lt (abs (minus (match_dup 3) (plus (pc) (const_int 8))))
+ (const_int MAX_17BIT_OFFSET))
+ (const_int 28)
+ (ne (symbol_ref "TARGET_PORTABLE_RUNTIME") (const_int 0))
+ (const_int 44)
+ (eq (symbol_ref "flag_pic") (const_int 0))
+ (const_int 40)]
+ (const_int 48)))
+
;; Loop counter in memory case.
;; Extra goo to deal with additional reload insns.
(if_then_else (lt (match_dup 3) (pc))
- (if_then_else
- (lt (abs (minus (match_dup 3) (plus (pc) (const_int 12))))
- (const_int 8184))
- (const_int 12)
- (const_int 16))
- (if_then_else
- (lt (abs (minus (match_dup 3) (plus (pc) (const_int 8))))
- (const_int 8184))
- (const_int 12)
- (const_int 16))))))])
+ (cond [(lt (abs (minus (match_dup 3) (plus (pc) (const_int 12))))
+ (const_int MAX_12BIT_OFFSET))
+ (const_int 12)
+ (lt (abs (minus (match_dup 3) (plus (pc) (const_int 12))))
+ (const_int MAX_17BIT_OFFSET))
+ (const_int 16)
+ (ne (symbol_ref "TARGET_PORTABLE_RUNTIME") (const_int 0))
+ (const_int 32)
+ (eq (symbol_ref "flag_pic") (const_int 0))
+ (const_int 28)]
+ (const_int 36))
+ (cond [(lt (abs (minus (match_dup 3) (plus (pc) (const_int 8))))
+ (const_int MAX_12BIT_OFFSET))
+ (const_int 12)
+ (lt (abs (minus (match_dup 3) (plus (pc) (const_int 8))))
+ (const_int MAX_17BIT_OFFSET))
+ (const_int 16)
+ (ne (symbol_ref "TARGET_PORTABLE_RUNTIME") (const_int 0))
+ (const_int 32)
+ (eq (symbol_ref "flag_pic") (const_int 0))
+ (const_int 28)]
+ (const_int 36))))))])
(define_insn ""
[(set (pc)
(if_then_else (eq_attr "alternative" "0")
;; Loop counter in register case
;; Short branch has length of 4
-;; Long branch has length of 8
- (if_then_else (lt (abs (minus (match_dup 3) (plus (pc) (const_int 8))))
- (const_int 8184))
- (const_int 4)
- (const_int 8))
+;; Long branch has length of 8, 20, 24 or 28
+ (cond [(lt (abs (minus (match_dup 3) (plus (pc) (const_int 8))))
+ (const_int MAX_12BIT_OFFSET))
+ (const_int 4)
+ (lt (abs (minus (match_dup 3) (plus (pc) (const_int 8))))
+ (const_int MAX_17BIT_OFFSET))
+ (const_int 8)
+ (ne (symbol_ref "TARGET_PORTABLE_RUNTIME") (const_int 0))
+ (const_int 24)
+ (eq (symbol_ref "flag_pic") (const_int 0))
+ (const_int 20)]
+ (const_int 28))
;; Loop counter in FP reg case.
;; Extra goo to deal with additional reload insns.
(if_then_else (eq_attr "alternative" "1")
(if_then_else (lt (match_dup 3) (pc))
- (if_then_else
- (lt (abs (minus (match_dup 3) (plus (pc) (const_int 12))))
- (const_int 8184))
- (const_int 12)
- (const_int 16))
- (if_then_else
- (lt (abs (minus (match_dup 3) (plus (pc) (const_int 8))))
- (const_int 8184))
- (const_int 12)
- (const_int 16)))
+ (cond [(lt (abs (minus (match_dup 3) (plus (pc) (const_int 12))))
+ (const_int MAX_12BIT_OFFSET))
+ (const_int 12)
+ (lt (abs (minus (match_dup 3) (plus (pc) (const_int 12))))
+ (const_int MAX_17BIT_OFFSET))
+ (const_int 16)
+ (ne (symbol_ref "TARGET_PORTABLE_RUNTIME") (const_int 0))
+ (const_int 32)
+ (eq (symbol_ref "flag_pic") (const_int 0))
+ (const_int 28)]
+ (const_int 36))
+ (cond [(lt (abs (minus (match_dup 3) (plus (pc) (const_int 8))))
+ (const_int MAX_12BIT_OFFSET))
+ (const_int 12)
+ (lt (abs (minus (match_dup 3) (plus (pc) (const_int 8))))
+ (const_int MAX_17BIT_OFFSET))
+ (const_int 16)
+ (ne (symbol_ref "TARGET_PORTABLE_RUNTIME") (const_int 0))
+ (const_int 32)
+ (eq (symbol_ref "flag_pic") (const_int 0))
+ (const_int 28)]
+ (const_int 36)))
+
;; Loop counter in memory or sar case.
;; Extra goo to deal with additional reload insns.
- (if_then_else
- (lt (abs (minus (match_dup 3) (plus (pc) (const_int 8))))
- (const_int 8184))
- (const_int 8)
- (const_int 12)))))])
+ (cond [(lt (abs (minus (match_dup 3) (plus (pc) (const_int 8))))
+ (const_int MAX_12BIT_OFFSET))
+ (const_int 8)
+ (lt (abs (minus (match_dup 3) (plus (pc) (const_int 8))))
+ (const_int MAX_17BIT_OFFSET))
+ (const_int 12)
+ (ne (symbol_ref "TARGET_PORTABLE_RUNTIME") (const_int 0))
+ (const_int 28)
+ (eq (symbol_ref "flag_pic") (const_int 0))
+ (const_int 24)]
+ (const_int 32)))))])
;; Handle negated branch.
(define_insn ""
;; Loop counter in register case
;; Short branch has length of 4
;; Long branch has length of 8
- (if_then_else (lt (abs (minus (match_dup 3) (plus (pc) (const_int 8))))
- (const_int 8184))
- (const_int 4)
- (const_int 8))
+ (cond [(lt (abs (minus (match_dup 3) (plus (pc) (const_int 8))))
+ (const_int MAX_12BIT_OFFSET))
+ (const_int 4)
+ (lt (abs (minus (match_dup 3) (plus (pc) (const_int 8))))
+ (const_int MAX_17BIT_OFFSET))
+ (const_int 8)
+ (ne (symbol_ref "TARGET_PORTABLE_RUNTIME") (const_int 0))
+ (const_int 24)
+ (eq (symbol_ref "flag_pic") (const_int 0))
+ (const_int 20)]
+ (const_int 28))
;; Loop counter in FP reg case.
;; Extra goo to deal with additional reload insns.
(if_then_else (eq_attr "alternative" "1")
(if_then_else (lt (match_dup 3) (pc))
- (if_then_else
- (lt (abs (minus (match_dup 3) (plus (pc) (const_int 12))))
- (const_int 8184))
- (const_int 12)
- (const_int 16))
- (if_then_else
- (lt (abs (minus (match_dup 3) (plus (pc) (const_int 8))))
- (const_int 8184))
- (const_int 12)
- (const_int 16)))
+ (cond [(lt (abs (minus (match_dup 3) (plus (pc) (const_int 12))))
+ (const_int MAX_12BIT_OFFSET))
+ (const_int 12)
+ (lt (abs (minus (match_dup 3) (plus (pc) (const_int 12))))
+ (const_int MAX_17BIT_OFFSET))
+ (const_int 16)
+ (ne (symbol_ref "TARGET_PORTABLE_RUNTIME") (const_int 0))
+ (const_int 32)
+ (eq (symbol_ref "flag_pic") (const_int 0))
+ (const_int 28)]
+ (const_int 36))
+ (cond [(lt (abs (minus (match_dup 3) (plus (pc) (const_int 8))))
+ (const_int MAX_12BIT_OFFSET))
+ (const_int 12)
+ (lt (abs (minus (match_dup 3) (plus (pc) (const_int 8))))
+ (const_int MAX_17BIT_OFFSET))
+ (const_int 16)
+ (ne (symbol_ref "TARGET_PORTABLE_RUNTIME") (const_int 0))
+ (const_int 32)
+ (eq (symbol_ref "flag_pic") (const_int 0))
+ (const_int 28)]
+ (const_int 36)))
+
;; Loop counter in memory or SAR case.
;; Extra goo to deal with additional reload insns.
- (if_then_else
- (lt (abs (minus (match_dup 3) (plus (pc) (const_int 8))))
- (const_int 8184))
- (const_int 8)
- (const_int 12)))))])
+ (cond [(lt (abs (minus (match_dup 3) (plus (pc) (const_int 8))))
+ (const_int MAX_12BIT_OFFSET))
+ (const_int 8)
+ (lt (abs (minus (match_dup 3) (plus (pc) (const_int 8))))
+ (const_int MAX_17BIT_OFFSET))
+ (const_int 12)
+ (ne (symbol_ref "TARGET_PORTABLE_RUNTIME") (const_int 0))
+ (const_int 28)
+ (eq (symbol_ref "flag_pic") (const_int 0))
+ (const_int 24)]
+ (const_int 32)))))])
(define_insn ""
[(set (pc) (label_ref (match_operand 3 "" "" )))
"(reload_completed && operands[0] == operands[1]) || operands[0] == operands[2]"
"*
{
- return output_parallel_addb (operands, get_attr_length (insn));
+ return output_parallel_addb (operands, insn);
}"
- [(set_attr "type" "parallel_branch")
- (set (attr "length")
- (if_then_else (lt (abs (minus (match_dup 3) (plus (pc) (const_int 8))))
- (const_int 8184))
- (const_int 4)
- (const_int 8)))])
+[(set_attr "type" "parallel_branch")
+ (set (attr "length")
+ (cond [(lt (abs (minus (match_dup 3) (plus (pc) (const_int 8))))
+ (const_int MAX_12BIT_OFFSET))
+ (const_int 4)
+ (lt (abs (minus (match_dup 3) (plus (pc) (const_int 8))))
+ (const_int MAX_17BIT_OFFSET))
+ (const_int 8)
+ (ne (symbol_ref "TARGET_PORTABLE_RUNTIME") (const_int 0))
+ (const_int 24)
+ (eq (symbol_ref "flag_pic") (const_int 0))
+ (const_int 20)]
+ (const_int 28)))])
(define_insn ""
[(set (pc) (label_ref (match_operand 2 "" "" )))
"reload_completed"
"*
{
- return output_parallel_movb (operands, get_attr_length (insn));
+ return output_parallel_movb (operands, insn);
}"
- [(set_attr "type" "parallel_branch")
- (set (attr "length")
- (if_then_else (lt (abs (minus (match_dup 2) (plus (pc) (const_int 8))))
- (const_int 8184))
- (const_int 4)
- (const_int 8)))])
+[(set_attr "type" "parallel_branch")
+ (set (attr "length")
+ (cond [(lt (abs (minus (match_dup 2) (plus (pc) (const_int 8))))
+ (const_int MAX_12BIT_OFFSET))
+ (const_int 4)
+ (lt (abs (minus (match_dup 2) (plus (pc) (const_int 8))))
+ (const_int MAX_17BIT_OFFSET))
+ (const_int 8)
+ (ne (symbol_ref "TARGET_PORTABLE_RUNTIME") (const_int 0))
+ (const_int 24)
+ (eq (symbol_ref "flag_pic") (const_int 0))
+ (const_int 20)]
+ (const_int 28)))])
(define_insn ""
[(set (pc) (label_ref (match_operand 2 "" "" )))
"reload_completed"
"*
{
- return output_parallel_movb (operands, get_attr_length (insn));
+ return output_parallel_movb (operands, insn);
}"
- [(set_attr "type" "parallel_branch")
- (set (attr "length")
- (if_then_else (lt (abs (minus (match_dup 2) (plus (pc) (const_int 8))))
- (const_int 8184))
- (const_int 4)
- (const_int 8)))])
+[(set_attr "type" "parallel_branch")
+ (set (attr "length")
+ (cond [(lt (abs (minus (match_dup 2) (plus (pc) (const_int 8))))
+ (const_int MAX_12BIT_OFFSET))
+ (const_int 4)
+ (lt (abs (minus (match_dup 2) (plus (pc) (const_int 8))))
+ (const_int MAX_17BIT_OFFSET))
+ (const_int 8)
+ (ne (symbol_ref "TARGET_PORTABLE_RUNTIME") (const_int 0))
+ (const_int 24)
+ (eq (symbol_ref "flag_pic") (const_int 0))
+ (const_int 20)]
+ (const_int 28)))])
(define_insn ""
[(set (pc) (label_ref (match_operand 2 "" "" )))
"reload_completed"
"*
{
- return output_parallel_movb (operands, get_attr_length (insn));
+ return output_parallel_movb (operands, insn);
}"
- [(set_attr "type" "parallel_branch")
- (set (attr "length")
- (if_then_else (lt (abs (minus (match_dup 2) (plus (pc) (const_int 8))))
- (const_int 8184))
- (const_int 4)
- (const_int 8)))])
+[(set_attr "type" "parallel_branch")
+ (set (attr "length")
+ (cond [(lt (abs (minus (match_dup 2) (plus (pc) (const_int 8))))
+ (const_int MAX_12BIT_OFFSET))
+ (const_int 4)
+ (lt (abs (minus (match_dup 2) (plus (pc) (const_int 8))))
+ (const_int MAX_17BIT_OFFSET))
+ (const_int 8)
+ (ne (symbol_ref "TARGET_PORTABLE_RUNTIME") (const_int 0))
+ (const_int 24)
+ (eq (symbol_ref "flag_pic") (const_int 0))
+ (const_int 20)]
+ (const_int 28)))])
(define_insn ""
[(set (pc) (label_ref (match_operand 2 "" "" )))
"reload_completed"
"*
{
- return output_parallel_movb (operands, get_attr_length (insn));
+ return output_parallel_movb (operands, insn);
}"
- [(set_attr "type" "parallel_branch")
- (set (attr "length")
- (if_then_else (lt (abs (minus (match_dup 2) (plus (pc) (const_int 8))))
- (const_int 8184))
- (const_int 4)
- (const_int 8)))])
+[(set_attr "type" "parallel_branch")
+ (set (attr "length")
+ (cond [(lt (abs (minus (match_dup 2) (plus (pc) (const_int 8))))
+ (const_int MAX_12BIT_OFFSET))
+ (const_int 4)
+ (lt (abs (minus (match_dup 2) (plus (pc) (const_int 8))))
+ (const_int MAX_17BIT_OFFSET))
+ (const_int 8)
+ (ne (symbol_ref "TARGET_PORTABLE_RUNTIME") (const_int 0))
+ (const_int 24)
+ (eq (symbol_ref "flag_pic") (const_int 0))
+ (const_int 20)]
+ (const_int 28)))])
(define_insn ""
[(set (match_operand 0 "register_operand" "=f")
[(set_attr "type" "fpalu")
(set_attr "length" "4")])
-;; Clean up turds left by reload.
-(define_peephole
- [(set (match_operand 0 "move_dest_operand" "")
- (match_operand 1 "register_operand" "fr"))
- (set (match_operand 2 "register_operand" "fr")
- (match_dup 0))]
- "!TARGET_SOFT_FLOAT
- && GET_CODE (operands[0]) == MEM
- && ! MEM_VOLATILE_P (operands[0])
- && GET_MODE (operands[0]) == GET_MODE (operands[1])
- && GET_MODE (operands[0]) == GET_MODE (operands[2])
- && GET_MODE (operands[0]) == DFmode
- && GET_CODE (operands[1]) == REG
- && GET_CODE (operands[2]) == REG
- && ! side_effects_p (XEXP (operands[0], 0))
- && REGNO_REG_CLASS (REGNO (operands[1]))
- == REGNO_REG_CLASS (REGNO (operands[2]))"
- "*
-{
- rtx xoperands[2];
-
- if (FP_REG_P (operands[1]))
- output_asm_insn (output_fp_move_double (operands), operands);
- else
- output_asm_insn (output_move_double (operands), operands);
-
- if (rtx_equal_p (operands[1], operands[2]))
- return \"\";
-
- xoperands[0] = operands[2];
- xoperands[1] = operands[1];
-
- if (FP_REG_P (xoperands[1]))
- output_asm_insn (output_fp_move_double (xoperands), xoperands);
- else
- output_asm_insn (output_move_double (xoperands), xoperands);
-
- return \"\";
-}")
-
-(define_peephole
- [(set (match_operand 0 "register_operand" "fr")
- (match_operand 1 "move_src_operand" ""))
- (set (match_operand 2 "register_operand" "fr")
- (match_dup 1))]
- "!TARGET_SOFT_FLOAT
- && GET_CODE (operands[1]) == MEM
- && ! MEM_VOLATILE_P (operands[1])
- && GET_MODE (operands[0]) == GET_MODE (operands[1])
- && GET_MODE (operands[0]) == GET_MODE (operands[2])
- && GET_MODE (operands[0]) == DFmode
- && GET_CODE (operands[0]) == REG
- && GET_CODE (operands[2]) == REG
- && ! side_effects_p (XEXP (operands[1], 0))
- && REGNO_REG_CLASS (REGNO (operands[0]))
- == REGNO_REG_CLASS (REGNO (operands[2]))"
- "*
-{
- rtx xoperands[2];
-
- if (FP_REG_P (operands[0]))
- output_asm_insn (output_fp_move_double (operands), operands);
- else
- output_asm_insn (output_move_double (operands), operands);
-
- xoperands[0] = operands[2];
- xoperands[1] = operands[0];
-
- if (FP_REG_P (xoperands[1]))
- output_asm_insn (output_fp_move_double (xoperands), xoperands);
- else
- output_asm_insn (output_move_double (xoperands), xoperands);
-
- return \"\";
-}")
-
-;; Flush the I and D cache lines from the start address (operand0)
-;; to the end address (operand1). No lines are flushed if the end
-;; address is less than the start address (unsigned).
+;; The following two patterns are used by the trampoline code for nested
+;; functions. They flush the I and D cache lines from the start address
+;; (operand0) to the end address (operand1). No lines are flushed if the
+;; end address is less than the start address (unsigned).
;;
-;; Because the range of memory flushed is variable and the size of
-;; a MEM can only be a CONST_INT, the patterns specify that they
-;; perform an unspecified volatile operation on all memory.
+;; Because the range of memory flushed is variable and the size of a MEM
+;; can only be a CONST_INT, the patterns specify that they perform an
+;; unspecified volatile operation on all memory.
;;
;; The address range for an icache flush must lie within a single
;; space on targets with non-equivalent space registers.
;;
-;; This is used by the trampoline code for nested functions.
-;;
;; Operand 0 contains the start address.
;; Operand 1 contains the end address.
;; Operand 2 contains the line length to use.
-;; Operand 3 contains the start address (clobbered).
-;; Operands 4 and 5 (icacheflush) are clobbered scratch registers.
-(define_insn "dcacheflush"
+(define_insn "dcacheflush<P:mode>"
[(const_int 1)
- (unspec_volatile [(mem:BLK (scratch))] 0)
+ (unspec_volatile [(mem:BLK (scratch))] UNSPECV_DCACHE)
(use (match_operand 0 "pmode_register_operand" "r"))
(use (match_operand 1 "pmode_register_operand" "r"))
(use (match_operand 2 "pmode_register_operand" "r"))
- (clobber (match_scratch 3 "=&0"))]
+ (clobber (match_scratch:P 3 "=&0"))]
""
- "*
-{
- if (TARGET_64BIT)
- return \"cmpb,*<<=,n %3,%1,.\;fdc,m %2(%3)\;sync\";
- else
- return \"cmpb,<<=,n %3,%1,.\;fdc,m %2(%3)\;sync\";
-}"
+ "cmpb,<dwc><<=,n %3,%1,.\;fdc,m %2(%3)\;sync"
[(set_attr "type" "multi")
(set_attr "length" "12")])
-(define_insn "icacheflush"
+(define_insn "icacheflush<P:mode>"
[(const_int 2)
- (unspec_volatile [(mem:BLK (scratch))] 0)
+ (unspec_volatile [(mem:BLK (scratch))] UNSPECV_ICACHE)
(use (match_operand 0 "pmode_register_operand" "r"))
(use (match_operand 1 "pmode_register_operand" "r"))
(use (match_operand 2 "pmode_register_operand" "r"))
- (clobber (match_scratch 3 "=&0"))
+ (clobber (match_operand 3 "pmode_register_operand" "=&r"))
(clobber (match_operand 4 "pmode_register_operand" "=&r"))
- (clobber (match_operand 5 "pmode_register_operand" "=&r"))]
+ (clobber (match_scratch:P 5 "=&0"))]
""
- "*
-{
- if (TARGET_64BIT)
- return \"mfsp %%sr0,%5\;ldsid (%3),%4\;mtsp %4,%%sr0\;cmpb,*<<=,n %3,%1,.\;fic,m %2(%%sr0,%3)\;sync\;mtsp %5,%%sr0\;nop\;nop\;nop\;nop\;nop\;nop\";
- else
- return \"mfsp %%sr0,%5\;ldsid (%3),%4\;mtsp %4,%%sr0\;cmpb,<<=,n %3,%1,.\;fic,m %2(%%sr0,%3)\;sync\;mtsp %5,%%sr0\;nop\;nop\;nop\;nop\;nop\;nop\";
-}"
+ "mfsp %%sr0,%4\;ldsid (%5),%3\;mtsp %3,%%sr0\;cmpb,<dwc><<=,n %5,%1,.\;fic,m %2(%%sr0,%5)\;sync\;mtsp %4,%%sr0\;nop\;nop\;nop\;nop\;nop\;nop"
[(set_attr "type" "multi")
(set_attr "length" "52")])
;; An out-of-line prologue.
(define_insn "outline_prologue_call"
- [(unspec_volatile [(const_int 0)] 0)
+ [(unspec_volatile [(const_int 0)] UNSPECV_OPC)
(clobber (reg:SI 31))
(clobber (reg:SI 22))
(clobber (reg:SI 21))
""
"*
{
- extern int frame_pointer_needed;
-
+
/* We need two different versions depending on whether or not we
need a frame pointer. Also note that we return to the instruction
immediately after the branch rather than two instructions after the
;; An out-of-line epilogue.
(define_insn "outline_epilogue_call"
- [(unspec_volatile [(const_int 1)] 0)
+ [(unspec_volatile [(const_int 1)] UNSPECV_OEC)
(use (reg:SI 29))
(use (reg:SI 28))
(clobber (reg:SI 31))
""
"*
{
- extern int frame_pointer_needed;
/* We need two different versions depending on whether or not we
need a frame pointer. Also note that we return to the instruction
;; reliably compared to another function pointer. */
(define_expand "canonicalize_funcptr_for_compare"
[(set (reg:SI 26) (match_operand:SI 1 "register_operand" ""))
- (parallel [(set (reg:SI 29) (unspec:SI [(reg:SI 26)] 0))
+ (parallel [(set (reg:SI 29) (unspec:SI [(reg:SI 26)] UNSPEC_CFFC))
(clobber (match_dup 2))
(clobber (reg:SI 26))
(clobber (reg:SI 22))
}
}")
-(define_insn ""
- [(set (reg:SI 29) (unspec:SI [(reg:SI 26)] 0))
+(define_insn "*$$sh_func_adrs"
+ [(set (reg:SI 29) (unspec:SI [(reg:SI 26)] UNSPEC_CFFC))
(clobber (match_operand:SI 0 "register_operand" "=a"))
(clobber (reg:SI 26))
(clobber (reg:SI 22))
}
DONE;
}")
+
+(define_expand "prefetch"
+ [(match_operand 0 "address_operand" "")
+ (match_operand 1 "const_int_operand" "")
+ (match_operand 2 "const_int_operand" "")]
+ "TARGET_PA_20"
+{
+ operands[0] = copy_addr_to_reg (operands[0]);
+ emit_insn (gen_prefetch_20 (operands[0], operands[1], operands[2]));
+ DONE;
+})
+
+(define_insn "prefetch_20"
+ [(prefetch (match_operand 0 "pmode_register_operand" "r")
+ (match_operand:SI 1 "const_int_operand" "n")
+ (match_operand:SI 2 "const_int_operand" "n"))]
+ "TARGET_PA_20"
+{
+ /* The SL cache-control completer indicates good spatial locality but
+ poor temporal locality. The ldw instruction with a target of general
+ register 0 prefetches a cache line for a read. The ldd instruction
+ prefetches a cache line for a write. */
+ static const char * const instr[2][2] = {
+ {
+ "ldw,sl 0(%0),%%r0",
+ "ldd,sl 0(%0),%%r0"
+ },
+ {
+ "ldw 0(%0),%%r0",
+ "ldd 0(%0),%%r0"
+ }
+ };
+ int read_or_write = INTVAL (operands[1]) == 0 ? 0 : 1;
+ int locality = INTVAL (operands[2]) == 0 ? 0 : 1;
+
+ return instr [locality][read_or_write];
+}
+ [(set_attr "type" "load")
+ (set_attr "length" "4")])
+
+;; TLS Support
+(define_insn "tgd_load"
+ [(set (match_operand:SI 0 "register_operand" "=r")
+ (unspec:SI [(match_operand 1 "tgd_symbolic_operand" "")] UNSPEC_TLSGD))
+ (clobber (reg:SI 1))
+ (use (reg:SI 27))]
+ ""
+ "*
+{
+ return \"addil LR'%1-$tls_gdidx$,%%r27\;ldo RR'%1-$tls_gdidx$(%%r1),%0\";
+}"
+ [(set_attr "type" "multi")
+ (set_attr "length" "8")])
+
+(define_insn "tgd_load_pic"
+ [(set (match_operand:SI 0 "register_operand" "=r")
+ (unspec:SI [(match_operand 1 "tgd_symbolic_operand" "")] UNSPEC_TLSGD_PIC))
+ (clobber (reg:SI 1))
+ (use (reg:SI 19))]
+ ""
+ "*
+{
+ return \"addil LT'%1-$tls_gdidx$,%%r19\;ldo RT'%1-$tls_gdidx$(%%r1),%0\";
+}"
+ [(set_attr "type" "multi")
+ (set_attr "length" "8")])
+
+(define_insn "tld_load"
+ [(set (match_operand:SI 0 "register_operand" "=r")
+ (unspec:SI [(match_operand 1 "tld_symbolic_operand" "")] UNSPEC_TLSLDM))
+ (clobber (reg:SI 1))
+ (use (reg:SI 27))]
+ ""
+ "*
+{
+ return \"addil LR'%1-$tls_ldidx$,%%r27\;ldo RR'%1-$tls_ldidx$(%%r1),%0\";
+}"
+ [(set_attr "type" "multi")
+ (set_attr "length" "8")])
+
+(define_insn "tld_load_pic"
+ [(set (match_operand:SI 0 "register_operand" "=r")
+ (unspec:SI [(match_operand 1 "tld_symbolic_operand" "")] UNSPEC_TLSLDM_PIC))
+ (clobber (reg:SI 1))
+ (use (reg:SI 19))]
+ ""
+ "*
+{
+ return \"addil LT'%1-$tls_ldidx$,%%r19\;ldo RT'%1-$tls_ldidx$(%%r1),%0\";
+}"
+ [(set_attr "type" "multi")
+ (set_attr "length" "8")])
+
+(define_insn "tld_offset_load"
+ [(set (match_operand:SI 0 "register_operand" "=r")
+ (plus:SI (unspec:SI [(match_operand 1 "tld_symbolic_operand" "")]
+ UNSPEC_TLSLDO)
+ (match_operand:SI 2 "register_operand" "r")))
+ (clobber (reg:SI 1))]
+ ""
+ "*
+{
+ return \"addil LR'%1-$tls_dtpoff$,%2\;ldo RR'%1-$tls_dtpoff$(%%r1),%0\";
+}"
+ [(set_attr "type" "multi")
+ (set_attr "length" "8")])
+
+(define_insn "tp_load"
+ [(set (match_operand:SI 0 "register_operand" "=r")
+ (unspec:SI [(const_int 0)] UNSPEC_TP))]
+ ""
+ "mfctl %%cr27,%0"
+ [(set_attr "type" "multi")
+ (set_attr "length" "4")])
+
+(define_insn "tie_load"
+ [(set (match_operand:SI 0 "register_operand" "=r")
+ (unspec:SI [(match_operand 1 "tie_symbolic_operand" "")] UNSPEC_TLSIE))
+ (clobber (reg:SI 1))
+ (use (reg:SI 27))]
+ ""
+ "*
+{
+ return \"addil LR'%1-$tls_ieoff$,%%r27\;ldw RR'%1-$tls_ieoff$(%%r1),%0\";
+}"
+ [(set_attr "type" "multi")
+ (set_attr "length" "8")])
+
+(define_insn "tie_load_pic"
+ [(set (match_operand:SI 0 "register_operand" "=r")
+ (unspec:SI [(match_operand 1 "tie_symbolic_operand" "")] UNSPEC_TLSIE_PIC))
+ (clobber (reg:SI 1))
+ (use (reg:SI 19))]
+ ""
+ "*
+{
+ return \"addil LT'%1-$tls_ieoff$,%%r19\;ldw RT'%1-$tls_ieoff$(%%r1),%0\";
+}"
+ [(set_attr "type" "multi")
+ (set_attr "length" "8")])
+
+(define_insn "tle_load"
+ [(set (match_operand:SI 0 "register_operand" "=r")
+ (plus:SI (unspec:SI [(match_operand 1 "tle_symbolic_operand" "")]
+ UNSPEC_TLSLE)
+ (match_operand:SI 2 "register_operand" "r")))
+ (clobber (reg:SI 1))]
+ ""
+ "addil LR'%1-$tls_leoff$,%2\;ldo RR'%1-$tls_leoff$(%%r1),%0"
+ [(set_attr "type" "multi")
+ (set_attr "length" "8")])