;;- Machine description for HP PA-RISC architecture for GCC compiler
;; Copyright (C) 1992, 1993, 1994, 1995, 1996, 1997, 1998, 1999, 2000, 2001,
-;; 2002, 2003, 2004, 2005, 2006 Free Software Foundation, Inc.
+;; 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2010
+;; Free Software Foundation, Inc.
;; Contributed by the Center for Software Science at the University
;; of Utah.
;; GCC is free software; you can redistribute it and/or modify
;; it under the terms of the GNU General Public License as published by
-;; the Free Software Foundation; either version 2, or (at your option)
+;; the Free Software Foundation; either version 3, or (at your option)
;; any later version.
;; GCC is distributed in the hope that it will be useful,
;; GNU General Public License for more details.
;; You should have received a copy of the GNU General Public License
-;; along with GCC; see the file COPYING. If not, write to
-;; the Free Software Foundation, 51 Franklin Street, Fifth Floor,
-;; Boston, MA 02110-1301, USA.
+;; along with GCC; see the file COPYING3. If not see
+;; <http://www.gnu.org/licenses/>.
;; This gcc Version 2 machine description is inspired by sparc.md and
;; mips.md.
(UNSPEC_TLSLDBASE 7)
(UNSPEC_TLSIE 8)
(UNSPEC_TLSLE 9)
+ (UNSPEC_TLSGD_PIC 10)
+ (UNSPEC_TLSLDM_PIC 11)
+ (UNSPEC_TLSIE_PIC 12)
])
;; UNSPEC_VOLATILE:
(MAX_17BIT_OFFSET 262100) ; 17-bit branch
])
+;; Mode and code iterators
+
+;; This mode iterator allows :P to be used for patterns that operate on
+;; pointer-sized quantities. Exactly one of the two alternatives will match.
+(define_mode_iterator P [(SI "Pmode == SImode") (DI "Pmode == DImode")])
+
+;; This attribute defines the condition prefix for word and double word
+;; add, compare, subtract and logical instructions.
+(define_mode_attr dwc [(SI "") (DI "*")])
+
;; Insn type. Used to default other attribute values.
;; type "unary" insns have one input operand (1) and one output operand (0)
(eq_attr "cpu" "8000"))
"inm_8000,fdivsqrt_8000*6,rnm_8000")
+;; Operand and operator predicates and constraints
+
(include "predicates.md")
+(include "constraints.md")
\f
;; Compare instructions.
;; This controls RTL generation and register allocation.
-;; We generate RTL for comparisons and branches by having the cmpxx
-;; patterns store away the operands. Then, the scc and bcc patterns
-;; emit RTL for both the compare and the branch.
-;;
-
-(define_expand "cmpdi"
- [(set (reg:CC 0)
- (compare:CC (match_operand:DI 0 "reg_or_0_operand" "")
- (match_operand:DI 1 "register_operand" "")))]
- "TARGET_64BIT"
-
- "
-{
- hppa_compare_op0 = operands[0];
- hppa_compare_op1 = operands[1];
- hppa_branch_type = CMP_SI;
- DONE;
-}")
-
-(define_expand "cmpsi"
- [(set (reg:CC 0)
- (compare:CC (match_operand:SI 0 "reg_or_0_operand" "")
- (match_operand:SI 1 "arith5_operand" "")))]
- ""
- "
-{
- hppa_compare_op0 = operands[0];
- hppa_compare_op1 = operands[1];
- hppa_branch_type = CMP_SI;
- DONE;
-}")
-
-(define_expand "cmpsf"
- [(set (reg:CCFP 0)
- (compare:CCFP (match_operand:SF 0 "reg_or_0_operand" "")
- (match_operand:SF 1 "reg_or_0_operand" "")))]
- "! TARGET_SOFT_FLOAT"
- "
-{
- hppa_compare_op0 = operands[0];
- hppa_compare_op1 = operands[1];
- hppa_branch_type = CMP_SF;
- DONE;
-}")
-
-(define_expand "cmpdf"
- [(set (reg:CCFP 0)
- (compare:CCFP (match_operand:DF 0 "reg_or_0_operand" "")
- (match_operand:DF 1 "reg_or_0_operand" "")))]
- "! TARGET_SOFT_FLOAT"
- "
-{
- hppa_compare_op0 = operands[0];
- hppa_compare_op1 = operands[1];
- hppa_branch_type = CMP_DF;
- DONE;
-}")
-
(define_insn ""
[(set (reg:CCFP 0)
(match_operator:CCFP 2 "comparison_operator"
;; scc insns.
-(define_expand "seq"
- [(set (match_operand:SI 0 "register_operand" "")
- (eq:SI (match_dup 1)
- (match_dup 2)))]
- "!TARGET_64BIT"
- "
-{
- /* fp scc patterns rarely match, and are not a win on the PA. */
- if (hppa_branch_type != CMP_SI)
- FAIL;
- /* set up operands from compare. */
- operands[1] = hppa_compare_op0;
- operands[2] = hppa_compare_op1;
- /* fall through and generate default code */
-}")
-
-(define_expand "sne"
- [(set (match_operand:SI 0 "register_operand" "")
- (ne:SI (match_dup 1)
- (match_dup 2)))]
- "!TARGET_64BIT"
- "
-{
- /* fp scc patterns rarely match, and are not a win on the PA. */
- if (hppa_branch_type != CMP_SI)
- FAIL;
- operands[1] = hppa_compare_op0;
- operands[2] = hppa_compare_op1;
-}")
-
-(define_expand "slt"
- [(set (match_operand:SI 0 "register_operand" "")
- (lt:SI (match_dup 1)
- (match_dup 2)))]
- "!TARGET_64BIT"
- "
-{
- /* fp scc patterns rarely match, and are not a win on the PA. */
- if (hppa_branch_type != CMP_SI)
- FAIL;
- operands[1] = hppa_compare_op0;
- operands[2] = hppa_compare_op1;
-}")
-
-(define_expand "sgt"
- [(set (match_operand:SI 0 "register_operand" "")
- (gt:SI (match_dup 1)
- (match_dup 2)))]
- "!TARGET_64BIT"
- "
-{
- /* fp scc patterns rarely match, and are not a win on the PA. */
- if (hppa_branch_type != CMP_SI)
- FAIL;
- operands[1] = hppa_compare_op0;
- operands[2] = hppa_compare_op1;
-}")
-
-(define_expand "sle"
- [(set (match_operand:SI 0 "register_operand" "")
- (le:SI (match_dup 1)
- (match_dup 2)))]
- "!TARGET_64BIT"
- "
-{
- /* fp scc patterns rarely match, and are not a win on the PA. */
- if (hppa_branch_type != CMP_SI)
- FAIL;
- operands[1] = hppa_compare_op0;
- operands[2] = hppa_compare_op1;
-}")
-
-(define_expand "sge"
- [(set (match_operand:SI 0 "register_operand" "")
- (ge:SI (match_dup 1)
- (match_dup 2)))]
- "!TARGET_64BIT"
- "
-{
- /* fp scc patterns rarely match, and are not a win on the PA. */
- if (hppa_branch_type != CMP_SI)
- FAIL;
- operands[1] = hppa_compare_op0;
- operands[2] = hppa_compare_op1;
-}")
-
-(define_expand "sltu"
- [(set (match_operand:SI 0 "register_operand" "")
- (ltu:SI (match_dup 1)
- (match_dup 2)))]
- "!TARGET_64BIT"
- "
-{
- if (hppa_branch_type != CMP_SI)
- FAIL;
- operands[1] = hppa_compare_op0;
- operands[2] = hppa_compare_op1;
-}")
-
-(define_expand "sgtu"
- [(set (match_operand:SI 0 "register_operand" "")
- (gtu:SI (match_dup 1)
- (match_dup 2)))]
- "!TARGET_64BIT"
- "
-{
- if (hppa_branch_type != CMP_SI)
- FAIL;
- operands[1] = hppa_compare_op0;
- operands[2] = hppa_compare_op1;
-}")
-
-(define_expand "sleu"
- [(set (match_operand:SI 0 "register_operand" "")
- (leu:SI (match_dup 1)
- (match_dup 2)))]
+(define_expand "cstoresi4"
+ [(set (match_operand:SI 0 "register_operand")
+ (match_operator:SI 1 "ordered_comparison_operator"
+ [(match_operand:SI 2 "reg_or_0_operand" "")
+ (match_operand:SI 3 "arith5_operand" "")]))]
"!TARGET_64BIT"
- "
-{
- if (hppa_branch_type != CMP_SI)
- FAIL;
- operands[1] = hppa_compare_op0;
- operands[2] = hppa_compare_op1;
-}")
-
-(define_expand "sgeu"
- [(set (match_operand:SI 0 "register_operand" "")
- (geu:SI (match_dup 1)
- (match_dup 2)))]
- "!TARGET_64BIT"
- "
-{
- if (hppa_branch_type != CMP_SI)
- FAIL;
- operands[1] = hppa_compare_op0;
- operands[2] = hppa_compare_op1;
-}")
+ "")
;; Instruction canonicalization puts immediate operands second, which
;; is the reverse of what we want.
(define_expand "movsicc"
[(set (match_operand:SI 0 "register_operand" "")
(if_then_else:SI
- (match_operator 1 "comparison_operator"
- [(match_dup 4)
- (match_dup 5)])
+ (match_operand 1 "comparison_operator" "")
(match_operand:SI 2 "reg_or_cint_move_operand" "")
(match_operand:SI 3 "reg_or_cint_move_operand" "")))]
""
"
{
- enum rtx_code code = GET_CODE (operands[1]);
-
- if (hppa_branch_type != CMP_SI)
- FAIL;
-
- if (GET_MODE (hppa_compare_op0) != GET_MODE (hppa_compare_op1)
- || GET_MODE (hppa_compare_op0) != GET_MODE (operands[0]))
+ if (GET_MODE (XEXP (operands[1], 0)) != SImode
+ || GET_MODE (XEXP (operands[1], 0)) != GET_MODE (XEXP (operands[1], 1)))
FAIL;
-
- /* operands[1] is currently the result of compare_from_rtx. We want to
- emit a compare of the original operands. */
- operands[1] = gen_rtx_fmt_ee (code, SImode, hppa_compare_op0, hppa_compare_op1);
- operands[4] = hppa_compare_op0;
- operands[5] = hppa_compare_op1;
}")
;; We used to accept any register for op1.
(define_expand "movdicc"
[(set (match_operand:DI 0 "register_operand" "")
(if_then_else:DI
- (match_operator 1 "comparison_operator"
- [(match_dup 4)
- (match_dup 5)])
+ (match_operand 1 "comparison_operator" "")
(match_operand:DI 2 "reg_or_cint_move_operand" "")
(match_operand:DI 3 "reg_or_cint_move_operand" "")))]
"TARGET_64BIT"
"
{
- enum rtx_code code = GET_CODE (operands[1]);
-
- if (hppa_branch_type != CMP_SI)
- FAIL;
-
- if (GET_MODE (hppa_compare_op0) != GET_MODE (hppa_compare_op1)
- || GET_MODE (hppa_compare_op0) != GET_MODE (operands[0]))
+ if (GET_MODE (XEXP (operands[1], 0)) != DImode
+ || GET_MODE (XEXP (operands[1], 0)) != GET_MODE (XEXP (operands[1], 1)))
FAIL;
-
- /* operands[1] is currently the result of compare_from_rtx. We want to
- emit a compare of the original operands. */
- operands[1] = gen_rtx_fmt_ee (code, DImode, hppa_compare_op0, hppa_compare_op1);
- operands[4] = hppa_compare_op0;
- operands[5] = hppa_compare_op1;
}")
; We need the first constraint alternative in order to avoid
;; Conditional Branches
-(define_expand "beq"
- [(set (pc)
- (if_then_else (eq (match_dup 1) (match_dup 2))
- (label_ref (match_operand 0 "" ""))
- (pc)))]
- ""
- "
-{
- if (hppa_branch_type != CMP_SI)
- {
- emit_insn (gen_cmp_fp (EQ, hppa_compare_op0, hppa_compare_op1));
- emit_bcond_fp (NE, operands[0]);
- DONE;
- }
- /* set up operands from compare. */
- operands[1] = hppa_compare_op0;
- operands[2] = hppa_compare_op1;
- /* fall through and generate default code */
-}")
-
-(define_expand "bne"
- [(set (pc)
- (if_then_else (ne (match_dup 1) (match_dup 2))
- (label_ref (match_operand 0 "" ""))
- (pc)))]
- ""
- "
-{
- if (hppa_branch_type != CMP_SI)
- {
- emit_insn (gen_cmp_fp (NE, hppa_compare_op0, hppa_compare_op1));
- emit_bcond_fp (NE, operands[0]);
- DONE;
- }
- operands[1] = hppa_compare_op0;
- operands[2] = hppa_compare_op1;
-}")
-
-(define_expand "bgt"
+(define_expand "cbranchdi4"
[(set (pc)
- (if_then_else (gt (match_dup 1) (match_dup 2))
- (label_ref (match_operand 0 "" ""))
+ (if_then_else (match_operator 0 "ordered_comparison_operator"
+ [(match_operand:DI 1 "reg_or_0_operand" "")
+ (match_operand:DI 2 "register_operand" "")])
+ (label_ref (match_operand 3 "" ""))
(pc)))]
- ""
- "
-{
- if (hppa_branch_type != CMP_SI)
- {
- emit_insn (gen_cmp_fp (GT, hppa_compare_op0, hppa_compare_op1));
- emit_bcond_fp (NE, operands[0]);
- DONE;
- }
- operands[1] = hppa_compare_op0;
- operands[2] = hppa_compare_op1;
-}")
-
-(define_expand "blt"
- [(set (pc)
- (if_then_else (lt (match_dup 1) (match_dup 2))
- (label_ref (match_operand 0 "" ""))
- (pc)))]
- ""
- "
-{
- if (hppa_branch_type != CMP_SI)
- {
- emit_insn (gen_cmp_fp (LT, hppa_compare_op0, hppa_compare_op1));
- emit_bcond_fp (NE, operands[0]);
- DONE;
- }
- operands[1] = hppa_compare_op0;
- operands[2] = hppa_compare_op1;
-}")
-
-(define_expand "bge"
- [(set (pc)
- (if_then_else (ge (match_dup 1) (match_dup 2))
- (label_ref (match_operand 0 "" ""))
- (pc)))]
- ""
- "
-{
- if (hppa_branch_type != CMP_SI)
- {
- emit_insn (gen_cmp_fp (GE, hppa_compare_op0, hppa_compare_op1));
- emit_bcond_fp (NE, operands[0]);
- DONE;
- }
- operands[1] = hppa_compare_op0;
- operands[2] = hppa_compare_op1;
-}")
-
-(define_expand "ble"
- [(set (pc)
- (if_then_else (le (match_dup 1) (match_dup 2))
- (label_ref (match_operand 0 "" ""))
- (pc)))]
- ""
- "
-{
- if (hppa_branch_type != CMP_SI)
- {
- emit_insn (gen_cmp_fp (LE, hppa_compare_op0, hppa_compare_op1));
- emit_bcond_fp (NE, operands[0]);
- DONE;
- }
- operands[1] = hppa_compare_op0;
- operands[2] = hppa_compare_op1;
-}")
-
-(define_expand "bgtu"
- [(set (pc)
- (if_then_else (gtu (match_dup 1) (match_dup 2))
- (label_ref (match_operand 0 "" ""))
- (pc)))]
- ""
- "
-{
- if (hppa_branch_type != CMP_SI)
- FAIL;
- operands[1] = hppa_compare_op0;
- operands[2] = hppa_compare_op1;
-}")
-
-(define_expand "bltu"
- [(set (pc)
- (if_then_else (ltu (match_dup 1) (match_dup 2))
- (label_ref (match_operand 0 "" ""))
- (pc)))]
- ""
- "
-{
- if (hppa_branch_type != CMP_SI)
- FAIL;
- operands[1] = hppa_compare_op0;
- operands[2] = hppa_compare_op1;
-}")
-
-(define_expand "bgeu"
- [(set (pc)
- (if_then_else (geu (match_dup 1) (match_dup 2))
- (label_ref (match_operand 0 "" ""))
- (pc)))]
- ""
- "
-{
- if (hppa_branch_type != CMP_SI)
- FAIL;
- operands[1] = hppa_compare_op0;
- operands[2] = hppa_compare_op1;
-}")
-
-(define_expand "bleu"
- [(set (pc)
- (if_then_else (leu (match_dup 1) (match_dup 2))
- (label_ref (match_operand 0 "" ""))
- (pc)))]
- ""
- "
-{
- if (hppa_branch_type != CMP_SI)
- FAIL;
- operands[1] = hppa_compare_op0;
- operands[2] = hppa_compare_op1;
-}")
-
-(define_expand "bltgt"
- [(set (pc)
- (if_then_else (ltgt (match_dup 1) (match_dup 2))
- (label_ref (match_operand 0 "" ""))
- (pc)))]
- ""
- "
-{
- if (hppa_branch_type == CMP_SI)
- FAIL;
- emit_insn (gen_cmp_fp (LTGT, hppa_compare_op0, hppa_compare_op1));
- emit_bcond_fp (NE, operands[0]);
- DONE;
-}")
-
-(define_expand "bunle"
- [(set (pc)
- (if_then_else (unle (match_dup 1) (match_dup 2))
- (label_ref (match_operand 0 "" ""))
- (pc)))]
- ""
- "
-{
- if (hppa_branch_type == CMP_SI)
- FAIL;
- emit_insn (gen_cmp_fp (UNLE, hppa_compare_op0, hppa_compare_op1));
- emit_bcond_fp (NE, operands[0]);
- DONE;
-}")
-
-(define_expand "bunlt"
- [(set (pc)
- (if_then_else (unlt (match_dup 1) (match_dup 2))
- (label_ref (match_operand 0 "" ""))
- (pc)))]
- ""
- "
-{
- if (hppa_branch_type == CMP_SI)
- FAIL;
- emit_insn (gen_cmp_fp (UNLT, hppa_compare_op0, hppa_compare_op1));
- emit_bcond_fp (NE, operands[0]);
- DONE;
-}")
-
-(define_expand "bunge"
- [(set (pc)
- (if_then_else (unge (match_dup 1) (match_dup 2))
- (label_ref (match_operand 0 "" ""))
- (pc)))]
- ""
- "
-{
- if (hppa_branch_type == CMP_SI)
- FAIL;
- emit_insn (gen_cmp_fp (UNGE, hppa_compare_op0, hppa_compare_op1));
- emit_bcond_fp (NE, operands[0]);
- DONE;
-}")
+ "TARGET_64BIT"
+ "")
-(define_expand "bungt"
+(define_expand "cbranchsi4"
[(set (pc)
- (if_then_else (ungt (match_dup 1) (match_dup 2))
- (label_ref (match_operand 0 "" ""))
+ (if_then_else (match_operator 0 "ordered_comparison_operator"
+ [(match_operand:SI 1 "reg_or_0_operand" "")
+ (match_operand:SI 2 "arith5_operand" "")])
+ (label_ref (match_operand 3 "" ""))
(pc)))]
""
- "
-{
- if (hppa_branch_type == CMP_SI)
- FAIL;
- emit_insn (gen_cmp_fp (UNGT, hppa_compare_op0, hppa_compare_op1));
- emit_bcond_fp (NE, operands[0]);
- DONE;
-}")
+ "")
-(define_expand "buneq"
+(define_expand "cbranchsf4"
[(set (pc)
- (if_then_else (uneq (match_dup 1) (match_dup 2))
- (label_ref (match_operand 0 "" ""))
+ (if_then_else (match_operator 0 "comparison_operator"
+ [(match_operand:SF 1 "reg_or_0_operand" "")
+ (match_operand:SF 2 "reg_or_0_operand" "")])
+ (label_ref (match_operand 3 "" ""))
(pc)))]
""
"
{
- if (hppa_branch_type == CMP_SI)
- FAIL;
- emit_insn (gen_cmp_fp (UNEQ, hppa_compare_op0, hppa_compare_op1));
- emit_bcond_fp (NE, operands[0]);
+ emit_bcond_fp (operands);
DONE;
}")
-(define_expand "bunordered"
- [(set (pc)
- (if_then_else (unordered (match_dup 1) (match_dup 2))
- (label_ref (match_operand 0 "" ""))
- (pc)))]
- ""
- "
-{
- if (hppa_branch_type == CMP_SI)
- FAIL;
- emit_insn (gen_cmp_fp (UNORDERED, hppa_compare_op0, hppa_compare_op1));
- emit_bcond_fp (NE, operands[0]);
- DONE;
-}")
-(define_expand "bordered"
+(define_expand "cbranchdf4"
[(set (pc)
- (if_then_else (ordered (match_dup 1) (match_dup 2))
- (label_ref (match_operand 0 "" ""))
+ (if_then_else (match_operator 0 "comparison_operator"
+ [(match_operand:DF 1 "reg_or_0_operand" "")
+ (match_operand:DF 2 "reg_or_0_operand" "")])
+ (label_ref (match_operand 3 "" ""))
(pc)))]
""
"
{
- if (hppa_branch_type == CMP_SI)
- FAIL;
- emit_insn (gen_cmp_fp (ORDERED, hppa_compare_op0, hppa_compare_op1));
- emit_bcond_fp (NE, operands[0]);
+ emit_bcond_fp (operands);
DONE;
}")
DONE;
}")
-(define_insn ""
- [(set (match_operand:HI 0 "move_dest_operand"
- "=r,r,r,r,r,Q,!*q,!r,!*f,?r,?*f")
- (match_operand:HI 1 "move_src_operand"
- "r,J,N,K,RQ,rM,!rM,!*q,!*fM,*f,r"))]
- "(register_operand (operands[0], HImode)
- || reg_or_0_operand (operands[1], HImode))
- && !TARGET_SOFT_FLOAT
- && !TARGET_64BIT"
- "@
- copy %1,%0
- ldi %1,%0
- ldil L'%1,%0
- {zdepi|depwi,z} %Z1,%0
- ldh%M1 %1,%0
- sth%M0 %r1,%0
- mtsar %r1
- {mfctl|mfctl,w} %sar,%0
- fcpy,sgl %f1,%0
- {fstws|fstw} %1,-16(%%sp)\n\t{ldws|ldw} -16(%%sp),%0
- {stws|stw} %1,-16(%%sp)\n\t{fldws|fldw} -16(%%sp),%0"
- [(set_attr "type" "move,move,move,shift,load,store,move,move,move,fpstore_load,store_fpload")
- (set_attr "pa_combine_type" "addmove")
- (set_attr "length" "4,4,4,4,4,4,4,4,4,8,8")])
+;; Handle HImode input reloads requiring a general register as a
+;; scratch register.
+(define_expand "reload_inhi"
+ [(set (match_operand:HI 0 "register_operand" "=Z")
+ (match_operand:HI 1 "non_hard_reg_operand" ""))
+ (clobber (match_operand:HI 2 "register_operand" "=&r"))]
+ ""
+ "
+{
+ if (emit_move_sequence (operands, HImode, operands[2]))
+ DONE;
-(define_insn ""
- [(set (match_operand:HI 0 "move_dest_operand"
- "=r,r,r,r,r,Q,!*q,!r,!*f")
- (match_operand:HI 1 "move_src_operand"
- "r,J,N,K,RQ,rM,!rM,!*q,!*fM"))]
- "(register_operand (operands[0], HImode)
- || reg_or_0_operand (operands[1], HImode))
- && !TARGET_SOFT_FLOAT
- && TARGET_64BIT"
- "@
- copy %1,%0
- ldi %1,%0
- ldil L'%1,%0
- {zdepi|depwi,z} %Z1,%0
- ldh%M1 %1,%0
- sth%M0 %r1,%0
- mtsar %r1
- {mfctl|mfctl,w} %sar,%0
- fcpy,sgl %f1,%0"
- [(set_attr "type" "move,move,move,shift,load,store,move,move,move")
- (set_attr "pa_combine_type" "addmove")
- (set_attr "length" "4,4,4,4,4,4,4,4,4")])
+ /* We don't want the clobber emitted, so handle this ourselves. */
+ emit_insn (gen_rtx_SET (VOIDmode, operands[0], operands[1]));
+ DONE;
+}")
+
+;; Handle HImode output reloads requiring a general register as a
+;; scratch register.
+(define_expand "reload_outhi"
+ [(set (match_operand:HI 0 "non_hard_reg_operand" "")
+ (match_operand:HI 1 "register_operand" "Z"))
+ (clobber (match_operand:HI 2 "register_operand" "=&r"))]
+ ""
+ "
+{
+ if (emit_move_sequence (operands, HImode, operands[2]))
+ DONE;
+
+ /* We don't want the clobber emitted, so handle this ourselves. */
+ emit_insn (gen_rtx_SET (VOIDmode, operands[0], operands[1]));
+ DONE;
+}")
(define_insn ""
[(set (match_operand:HI 0 "move_dest_operand"
(match_operand:HI 1 "move_src_operand"
"r,J,N,K,RQ,rM,!rM,!*q"))]
"(register_operand (operands[0], HImode)
- || reg_or_0_operand (operands[1], HImode))
- && TARGET_SOFT_FLOAT"
+ || reg_or_0_operand (operands[1], HImode))"
"@
copy %1,%0
ldi %1,%0
DONE;
}")
-(define_insn ""
- [(set (match_operand:QI 0 "move_dest_operand"
- "=r,r,r,r,r,Q,!*q,!r,!*f,?r,?*f")
- (match_operand:QI 1 "move_src_operand"
- "r,J,N,K,RQ,rM,!rM,!*q,!*fM,*f,r"))]
- "(register_operand (operands[0], QImode)
- || reg_or_0_operand (operands[1], QImode))
- && !TARGET_SOFT_FLOAT
- && !TARGET_64BIT"
- "@
- copy %1,%0
- ldi %1,%0
- ldil L'%1,%0
- {zdepi|depwi,z} %Z1,%0
- ldb%M1 %1,%0
- stb%M0 %r1,%0
- mtsar %r1
- {mfctl|mfctl,w} %%sar,%0
- fcpy,sgl %f1,%0
- {fstws|fstw} %1,-16(%%sp)\n\t{ldws|ldw} -16(%%sp),%0
- {stws|stw} %1,-16(%%sp)\n\t{fldws|fldw} -16(%%sp),%0"
- [(set_attr "type" "move,move,move,shift,load,store,move,move,move,fpstore_load,store_fpload")
- (set_attr "pa_combine_type" "addmove")
- (set_attr "length" "4,4,4,4,4,4,4,4,4,8,8")])
+;; Handle QImode input reloads requiring a general register as a
+;; scratch register.
+(define_expand "reload_inqi"
+ [(set (match_operand:QI 0 "register_operand" "=Z")
+ (match_operand:QI 1 "non_hard_reg_operand" ""))
+ (clobber (match_operand:QI 2 "register_operand" "=&r"))]
+ ""
+ "
+{
+ if (emit_move_sequence (operands, QImode, operands[2]))
+ DONE;
-(define_insn ""
- [(set (match_operand:QI 0 "move_dest_operand"
- "=r,r,r,r,r,Q,!*q,!r,!*f")
- (match_operand:QI 1 "move_src_operand"
- "r,J,N,K,RQ,rM,!rM,!*q,!*fM"))]
- "(register_operand (operands[0], QImode)
- || reg_or_0_operand (operands[1], QImode))
- && !TARGET_SOFT_FLOAT
- && TARGET_64BIT"
- "@
- copy %1,%0
- ldi %1,%0
- ldil L'%1,%0
- {zdepi|depwi,z} %Z1,%0
- ldb%M1 %1,%0
- stb%M0 %r1,%0
- mtsar %r1
- {mfctl|mfctl,w} %%sar,%0
- fcpy,sgl %f1,%0"
- [(set_attr "type" "move,move,move,shift,load,store,move,move,move")
- (set_attr "pa_combine_type" "addmove")
- (set_attr "length" "4,4,4,4,4,4,4,4,4")])
+ /* We don't want the clobber emitted, so handle this ourselves. */
+ emit_insn (gen_rtx_SET (VOIDmode, operands[0], operands[1]));
+ DONE;
+}")
+
+;; Handle QImode output reloads requiring a general register as a
+;; scratch register.
+(define_expand "reload_outqi"
+ [(set (match_operand:QI 0 "non_hard_reg_operand" "")
+ (match_operand:QI 1 "register_operand" "Z"))
+ (clobber (match_operand:QI 2 "register_operand" "=&r"))]
+ ""
+ "
+{
+ if (emit_move_sequence (operands, QImode, operands[2]))
+ DONE;
+
+ /* We don't want the clobber emitted, so handle this ourselves. */
+ emit_insn (gen_rtx_SET (VOIDmode, operands[0], operands[1]));
+ DONE;
+}")
(define_insn ""
[(set (match_operand:QI 0 "move_dest_operand"
(match_operand:QI 1 "move_src_operand"
"r,J,N,K,RQ,rM,!rM,!*q"))]
"(register_operand (operands[0], QImode)
- || reg_or_0_operand (operands[1], QImode))
- && TARGET_SOFT_FLOAT"
+ || reg_or_0_operand (operands[1], QImode))"
"@
copy %1,%0
ldi %1,%0
size = INTVAL (operands[2]);
align = INTVAL (operands[3]);
- align = align > 4 ? 4 : align;
+ align = align > 4 ? 4 : (align ? align : 1);
/* If size/alignment is large, then use the library routines. */
if (size / align > 16)
FAIL;
/* This does happen, but not often enough to worry much about. */
- if (size / align < MOVE_RATIO)
+ if (size / align < MOVE_RATIO (optimize_insn_for_speed_p ()))
FAIL;
/* Fall through means we're going to use our block move pattern. */
size = INTVAL (operands[2]);
align = INTVAL (operands[3]);
- align = align > 8 ? 8 : align;
+ align = align > 8 ? 8 : (align ? align : 1);
/* If size/alignment is large, then use the library routines. */
if (size / align > 16)
FAIL;
/* This does happen, but not often enough to worry much about. */
- if (size / align < MOVE_RATIO)
+ if (size / align < MOVE_RATIO (optimize_insn_for_speed_p ()))
FAIL;
/* Fall through means we're going to use our block move pattern. */
FAIL;
/* This does happen, but not often enough to worry much about. */
- if (size / align < MOVE_RATIO)
+ if (size / align < MOVE_RATIO (optimize_insn_for_speed_p ()))
FAIL;
/* Fall through means we're going to use our block clear pattern. */
FAIL;
/* This does happen, but not often enough to worry much about. */
- if (size / align < MOVE_RATIO)
+ if (size / align < MOVE_RATIO (optimize_insn_for_speed_p ()))
FAIL;
/* Fall through means we're going to use our block clear pattern. */
/* Except for zero, we don't support loading a CONST_INT directly
to a hard floating-point register since a scratch register is
needed for the operation. While the operation could be handled
- before no_new_pseudos is true, the simplest solution is to fail. */
+ before register allocation, the simplest solution is to fail. */
if (TARGET_64BIT
&& GET_CODE (operands[1]) == CONST_INT
&& operands[1] != CONST0_RTX (DImode)
;; Processors prior to PA 2.0 don't have a fneg instruction. Fast
;; negation can be done by subtracting from plus zero. However, this
;; violates the IEEE standard when negating plus and minus zero.
+;; The slow path toggles the sign bit in the general registers.
(define_expand "negdf2"
- [(parallel [(set (match_operand:DF 0 "register_operand" "")
- (neg:DF (match_operand:DF 1 "register_operand" "")))
- (use (match_dup 2))])]
- "! TARGET_SOFT_FLOAT"
+ [(set (match_operand:DF 0 "register_operand" "")
+ (neg:DF (match_operand:DF 1 "register_operand" "")))]
+ "!TARGET_SOFT_FLOAT"
{
if (TARGET_PA_20 || flag_unsafe_math_optimizations)
emit_insn (gen_negdf2_fast (operands[0], operands[1]));
else
- {
- operands[2] = force_reg (DFmode,
- CONST_DOUBLE_FROM_REAL_VALUE (dconstm1, DFmode));
- emit_insn (gen_muldf3 (operands[0], operands[1], operands[2]));
- }
+ emit_insn (gen_negdf2_slow (operands[0], operands[1]));
DONE;
})
+(define_insn "negdf2_slow"
+ [(set (match_operand:DF 0 "register_operand" "=r")
+ (neg:DF (match_operand:DF 1 "register_operand" "r")))]
+ "!TARGET_SOFT_FLOAT && !TARGET_PA_20"
+ "*
+{
+ if (rtx_equal_p (operands[0], operands[1]))
+ return \"and,< %1,%1,%0\;depi,tr 1,0,1,%0\;depi 0,0,1,%0\";
+ else
+ return \"and,< %1,%1,%0\;depi,tr 1,0,1,%0\;depi 0,0,1,%0\;copy %R1,%R0\";
+}"
+ [(set_attr "type" "multi")
+ (set (attr "length")
+ (if_then_else (ne (symbol_ref "rtx_equal_p (operands[0], operands[1])")
+ (const_int 0))
+ (const_int 12)
+ (const_int 16)))])
+
(define_insn "negdf2_fast"
[(set (match_operand:DF 0 "register_operand" "=f")
(neg:DF (match_operand:DF 1 "register_operand" "f")))]
- "! TARGET_SOFT_FLOAT && (TARGET_PA_20 || flag_unsafe_math_optimizations)"
+ "!TARGET_SOFT_FLOAT"
"*
{
if (TARGET_PA_20)
(set_attr "length" "4")])
(define_expand "negsf2"
- [(parallel [(set (match_operand:SF 0 "register_operand" "")
- (neg:SF (match_operand:SF 1 "register_operand" "")))
- (use (match_dup 2))])]
- "! TARGET_SOFT_FLOAT"
+ [(set (match_operand:SF 0 "register_operand" "")
+ (neg:SF (match_operand:SF 1 "register_operand" "")))]
+ "!TARGET_SOFT_FLOAT"
{
if (TARGET_PA_20 || flag_unsafe_math_optimizations)
emit_insn (gen_negsf2_fast (operands[0], operands[1]));
else
- {
- operands[2] = force_reg (SFmode,
- CONST_DOUBLE_FROM_REAL_VALUE (dconstm1, SFmode));
- emit_insn (gen_mulsf3 (operands[0], operands[1], operands[2]));
- }
+ emit_insn (gen_negsf2_slow (operands[0], operands[1]));
DONE;
})
+(define_insn "negsf2_slow"
+ [(set (match_operand:SF 0 "register_operand" "=r")
+ (neg:SF (match_operand:SF 1 "register_operand" "r")))]
+ "!TARGET_SOFT_FLOAT && !TARGET_PA_20"
+ "and,< %1,%1,%0\;depi,tr 1,0,1,%0\;depi 0,0,1,%0"
+ [(set_attr "type" "multi")
+ (set_attr "length" "12")])
+
(define_insn "negsf2_fast"
[(set (match_operand:SF 0 "register_operand" "=f")
(neg:SF (match_operand:SF 1 "register_operand" "f")))]
- "! TARGET_SOFT_FLOAT && (TARGET_PA_20 || flag_unsafe_math_optimizations)"
+ "!TARGET_SOFT_FLOAT"
"*
{
if (TARGET_PA_20)
\f
;; Unconditional and other jump instructions.
-;; This can only be used in a leaf function, so we do
-;; not need to use the PIC register when generating PIC code.
-(define_insn "return"
- [(return)
- (use (reg:SI 2))
- (const_int 0)]
- "hppa_can_use_return_insn_p ()"
- "*
-{
- if (TARGET_PA_20)
- return \"bve%* (%%r2)\";
- return \"bv%* %%r0(%%r2)\";
-}"
- [(set_attr "type" "branch")
- (set_attr "length" "4")])
-
-;; Emit a different pattern for functions which have non-trivial
-;; epilogues so as not to confuse jump and reorg.
+;; This is used for most returns.
(define_insn "return_internal"
[(return)
- (use (reg:SI 2))
- (const_int 1)]
+ (use (reg:SI 2))]
""
"*
{
(use (reg:SI 2))]
"!TARGET_NO_SPACE_REGS
&& !TARGET_PA_20
- && flag_pic && current_function_calls_eh_return"
+ && flag_pic && crtl->calls_eh_return"
"ldsid (%%sr0,%%r2),%%r1\;mtsp %%r1,%%sr0\;be%* 0(%%sr0,%%r2)"
[(set_attr "type" "branch")
(set_attr "length" "12")])
""
"
{
- /* Try to use the trivial return first. Else use the full
- epilogue. */
- if (hppa_can_use_return_insn_p ())
- emit_jump_insn (gen_return ());
+ rtx x;
+
+ /* Try to use the trivial return first. Else use the full epilogue. */
+ if (reload_completed
+ && !frame_pointer_needed
+ && !df_regs_ever_live_p (2)
+ && (compute_frame_size (get_frame_size (), 0) ? 0 : 1))
+ x = gen_return_internal ();
else
{
- rtx x;
-
hppa_expand_epilogue ();
/* EH returns bypass the normal return stub. Thus, we must do an
using space registers. */
if (!TARGET_NO_SPACE_REGS
&& !TARGET_PA_20
- && flag_pic && current_function_calls_eh_return)
+ && flag_pic && crtl->calls_eh_return)
x = gen_return_external_pic ();
else
x = gen_return_internal ();
-
- emit_jump_insn (x);
}
+ emit_jump_insn (x);
DONE;
}")
lab = copy_to_reg (lab);
- emit_insn (gen_rtx_CLOBBER (VOIDmode,
- gen_rtx_MEM (BLKmode,
- gen_rtx_SCRATCH (VOIDmode))));
- emit_insn (gen_rtx_CLOBBER (VOIDmode,
- gen_rtx_MEM (BLKmode,
- hard_frame_pointer_rtx)));
+ emit_clobber (gen_rtx_MEM (BLKmode, gen_rtx_SCRATCH (VOIDmode)));
+ emit_clobber (gen_rtx_MEM (BLKmode, hard_frame_pointer_rtx));
/* Restore the frame pointer. The virtual_stack_vars_rtx is saved
instead of the hard_frame_pointer_rtx in the save area. As a
emit_stack_restore (SAVE_NONLOCAL, stack, NULL_RTX);
- emit_insn (gen_rtx_USE (VOIDmode, hard_frame_pointer_rtx));
- emit_insn (gen_rtx_USE (VOIDmode, stack_pointer_rtx));
+ emit_use (hard_frame_pointer_rtx);
+ emit_use (stack_pointer_rtx);
/* Nonlocal goto jumps are only used between functions in the same
translation unit. Thus, we can avoid the extra overhead of an
operands[0] = index;
}
- /* In 64bit mode we must make sure to wipe the upper bits of the register
- just in case the addition overflowed or we had random bits in the
- high part of the register. */
- if (TARGET_64BIT)
- {
- rtx index = gen_reg_rtx (DImode);
-
- emit_insn (gen_extendsidi2 (index, operands[0]));
- operands[0] = gen_rtx_SUBREG (SImode, index, 4);
- }
-
if (!INT_5_BITS (operands[2]))
operands[2] = force_reg (SImode, operands[2]);
then be worthwhile to split the casesi patterns to improve scheduling.
However, it's not clear that all this extra complexity is worth
the effort. */
- emit_insn (gen_cmpsi (operands[0], operands[2]));
- emit_jump_insn (gen_bgtu (operands[4]));
+ {
+ rtx test = gen_rtx_GTU (VOIDmode, operands[0], operands[2]);
+ emit_jump_insn (gen_cbranchsi4 (test, operands[0], operands[2], operands[4]));
+ }
+
+ /* In 64bit mode we must make sure to wipe the upper bits of the register
+ just in case the addition overflowed or we had random bits in the
+ high part of the register. */
+ if (TARGET_64BIT)
+ {
+ rtx index = gen_reg_rtx (DImode);
+
+ emit_insn (gen_extendsidi2 (index, operands[0]));
+ operands[0] = index;
+ }
if (TARGET_BIG_SWITCH)
{
if (TARGET_64BIT)
- {
- rtx tmp1 = gen_reg_rtx (DImode);
- rtx tmp2 = gen_reg_rtx (DImode);
-
- emit_jump_insn (gen_casesi64p (operands[0], operands[3],
- tmp1, tmp2));
- }
+ emit_jump_insn (gen_casesi64p (operands[0], operands[3]));
+ else if (flag_pic)
+ emit_jump_insn (gen_casesi32p (operands[0], operands[3]));
else
- {
- rtx tmp1 = gen_reg_rtx (SImode);
-
- if (flag_pic)
- {
- rtx tmp2 = gen_reg_rtx (SImode);
-
- emit_jump_insn (gen_casesi32p (operands[0], operands[3],
- tmp1, tmp2));
- }
- else
- emit_jump_insn (gen_casesi32 (operands[0], operands[3], tmp1));
- }
+ emit_jump_insn (gen_casesi32 (operands[0], operands[3]));
}
else
emit_jump_insn (gen_casesi0 (operands[0], operands[3]));
(mult:SI (match_operand:SI 0 "register_operand" "r")
(const_int 4))
(label_ref (match_operand 1 "" "")))))
- (clobber (match_operand:SI 2 "register_operand" "=&r"))]
- "!TARGET_64BIT && TARGET_BIG_SWITCH"
+ (clobber (match_scratch:SI 2 "=&r"))]
+ "!flag_pic"
"ldil L'%l1,%2\;ldo R'%l1(%2),%2\;{ldwx|ldw},s %0(%2),%2\;bv,n %%r0(%2)"
[(set_attr "type" "multi")
(set_attr "length" "16")])
(mult:SI (match_operand:SI 0 "register_operand" "r")
(const_int 4))
(label_ref (match_operand 1 "" "")))))
- (clobber (match_operand:SI 2 "register_operand" "=&a"))
- (clobber (match_operand:SI 3 "register_operand" "=&r"))]
- "!TARGET_64BIT && TARGET_BIG_SWITCH"
- "{bl .+8,%2\;depi 0,31,2,%2|mfia %2}\;ldo {16|20}(%2),%2\;\
+ (clobber (match_scratch:SI 2 "=&r"))
+ (clobber (match_scratch:SI 3 "=&r"))]
+ "flag_pic"
+ "{bl .+8,%2\;depi 0,31,2,%2|mfia %2}\;ldo {%l1-.|%l1+4-.}(%2),%2\;\
{ldwx|ldw},s %0(%2),%3\;{addl|add,l} %2,%3,%3\;bv,n %%r0(%3)"
[(set_attr "type" "multi")
(set (attr "length")
;;; 64-bit code, 32-bit relative branch table.
(define_insn "casesi64p"
[(set (pc) (mem:DI (plus:DI
- (mult:DI (sign_extend:DI
- (match_operand:SI 0 "register_operand" "r"))
+ (mult:DI (match_operand:DI 0 "register_operand" "r")
(const_int 8))
(label_ref (match_operand 1 "" "")))))
- (clobber (match_operand:DI 2 "register_operand" "=&r"))
- (clobber (match_operand:DI 3 "register_operand" "=&r"))]
- "TARGET_64BIT && TARGET_BIG_SWITCH"
- "mfia %2\;ldo 24(%2),%2\;ldw,s %0(%2),%3\;extrd,s %3,63,32,%3\;\
+ (clobber (match_scratch:DI 2 "=&r"))
+ (clobber (match_scratch:DI 3 "=&r"))]
+ ""
+ "mfia %2\;ldo %l1+4-.(%2),%2\;ldw,s %0(%2),%3\;extrd,s %3,63,32,%3\;\
add,l %2,%3,%3\;bv,n %%r0(%3)"
[(set_attr "type" "multi")
(set_attr "length" "24")])
""
"
{
- rtx op, call_insn;
+ rtx op;
rtx nb = operands[1];
if (TARGET_PORTABLE_RUNTIME)
the only method that we have for doing DImode multiplication
is with a libcall. This could be trouble if we haven't
allocated enough space for the outgoing arguments. */
- gcc_assert (INTVAL (nb) <= current_function_outgoing_args_size);
+ gcc_assert (INTVAL (nb) <= crtl->outgoing_args_size);
emit_move_insn (arg_pointer_rtx,
gen_rtx_PLUS (word_mode, stack_pointer_rtx,
need to have a use of the PIC register in the return pattern and
the final save/restore operation is not needed.
- I elected to just clobber %r4 in the PIC patterns and use it instead
+ I elected to just use register %r4 in the PIC patterns instead
of trying to force hppa_pic_save_rtx () to a callee saved register.
This might have required a new register class and constraint. It
was also simpler to just handle the restore from a register than a
generic pseudo. */
if (TARGET_64BIT)
{
+ rtx r4 = gen_rtx_REG (word_mode, 4);
if (GET_CODE (op) == SYMBOL_REF)
- call_insn = emit_call_insn (gen_call_symref_64bit (op, nb));
+ emit_call_insn (gen_call_symref_64bit (op, nb, r4));
else
{
op = force_reg (word_mode, op);
- call_insn = emit_call_insn (gen_call_reg_64bit (op, nb));
+ emit_call_insn (gen_call_reg_64bit (op, nb, r4));
}
}
else
if (GET_CODE (op) == SYMBOL_REF)
{
if (flag_pic)
- call_insn = emit_call_insn (gen_call_symref_pic (op, nb));
+ {
+ rtx r4 = gen_rtx_REG (word_mode, 4);
+ emit_call_insn (gen_call_symref_pic (op, nb, r4));
+ }
else
- call_insn = emit_call_insn (gen_call_symref (op, nb));
+ emit_call_insn (gen_call_symref (op, nb));
}
else
{
rtx tmpreg = gen_rtx_REG (word_mode, 22);
-
emit_move_insn (tmpreg, force_reg (word_mode, op));
if (flag_pic)
- call_insn = emit_call_insn (gen_call_reg_pic (nb));
+ {
+ rtx r4 = gen_rtx_REG (word_mode, 4);
+ emit_call_insn (gen_call_reg_pic (nb, r4));
+ }
else
- call_insn = emit_call_insn (gen_call_reg (nb));
+ emit_call_insn (gen_call_reg (nb));
}
}
(set (attr "length") (symbol_ref "attr_length_call (insn, 0)"))])
(define_insn "call_symref_pic"
- [(call (mem:SI (match_operand 0 "call_operand_address" ""))
+ [(set (match_operand:SI 2 "register_operand" "=&r") (reg:SI 19))
+ (call (mem:SI (match_operand 0 "call_operand_address" ""))
(match_operand 1 "" "i"))
(clobber (reg:SI 1))
(clobber (reg:SI 2))
- (clobber (reg:SI 4))
+ (use (match_dup 2))
(use (reg:SI 19))
(use (const_int 0))]
"!TARGET_PORTABLE_RUNTIME && !TARGET_64BIT"
- "*
-{
- output_arg_descriptor (insn);
- return output_call (insn, operands[0], 0);
-}"
- [(set_attr "type" "call")
- (set (attr "length")
- (plus (symbol_ref "attr_length_call (insn, 0)")
- (symbol_ref "attr_length_save_restore_dltp (insn)")))])
-
-;; Split out the PIC register save and restore after reload. This is
-;; done only if the function returns. As the split is done after reload,
-;; there are some situations in which we unnecessarily save and restore
-;; %r4. This happens when there is a single call and the PIC register
-;; is "dead" after the call. This isn't easy to fix as the usage of
-;; the PIC register isn't completely determined until the reload pass.
+ "#")
+
+;; Split out the PIC register save and restore after reload. As the
+;; split is done after reload, there are some situations in which we
+;; unnecessarily save and restore %r4. This happens when there is a
+;; single call and the PIC register is not used after the call.
+;;
+;; The split has to be done since call_from_call_insn () can't handle
+;; the pattern as is. Noreturn calls are special because they have to
+;; terminate the basic block. The split has to contain more than one
+;; insn.
(define_split
- [(parallel [(call (mem:SI (match_operand 0 "call_operand_address" ""))
+ [(parallel [(set (match_operand:SI 2 "register_operand" "") (reg:SI 19))
+ (call (mem:SI (match_operand 0 "call_operand_address" ""))
(match_operand 1 "" ""))
(clobber (reg:SI 1))
(clobber (reg:SI 2))
- (clobber (reg:SI 4))
+ (use (match_dup 2))
(use (reg:SI 19))
(use (const_int 0))])]
- "!TARGET_PORTABLE_RUNTIME && !TARGET_64BIT
- && reload_completed
- && !find_reg_note (insn, REG_NORETURN, NULL_RTX)"
- [(set (reg:SI 4) (reg:SI 19))
+ "!TARGET_PORTABLE_RUNTIME && !TARGET_64BIT && reload_completed
+ && find_reg_note (insn, REG_NORETURN, NULL_RTX)"
+ [(set (match_dup 2) (reg:SI 19))
(parallel [(call (mem:SI (match_dup 0))
(match_dup 1))
(clobber (reg:SI 1))
(clobber (reg:SI 2))
(use (reg:SI 19))
- (use (const_int 0))])
- (set (reg:SI 19) (reg:SI 4))]
+ (use (const_int 0))])]
"")
-;; Remove the clobber of register 4 when optimizing. This has to be
-;; done with a peephole optimization rather than a split because the
-;; split sequence for a call must be longer than one instruction.
-(define_peephole2
- [(parallel [(call (mem:SI (match_operand 0 "call_operand_address" ""))
+(define_split
+ [(parallel [(set (match_operand:SI 2 "register_operand" "") (reg:SI 19))
+ (call (mem:SI (match_operand 0 "call_operand_address" ""))
(match_operand 1 "" ""))
(clobber (reg:SI 1))
(clobber (reg:SI 2))
- (clobber (reg:SI 4))
+ (use (match_dup 2))
(use (reg:SI 19))
(use (const_int 0))])]
"!TARGET_PORTABLE_RUNTIME && !TARGET_64BIT && reload_completed"
- [(parallel [(call (mem:SI (match_dup 0))
+ [(set (match_dup 2) (reg:SI 19))
+ (parallel [(call (mem:SI (match_dup 0))
(match_dup 1))
(clobber (reg:SI 1))
(clobber (reg:SI 2))
(use (reg:SI 19))
- (use (const_int 0))])]
+ (use (const_int 0))])
+ (set (reg:SI 19) (match_dup 2))]
"")
(define_insn "*call_symref_pic_post_reload"
;; This pattern is split if it is necessary to save and restore the
;; PIC register.
(define_insn "call_symref_64bit"
- [(call (mem:SI (match_operand 0 "call_operand_address" ""))
+ [(set (match_operand:DI 2 "register_operand" "=&r") (reg:DI 27))
+ (call (mem:SI (match_operand 0 "call_operand_address" ""))
(match_operand 1 "" "i"))
(clobber (reg:DI 1))
(clobber (reg:DI 2))
- (clobber (reg:DI 4))
+ (use (match_dup 2))
(use (reg:DI 27))
(use (reg:DI 29))
(use (const_int 0))]
"TARGET_64BIT"
- "*
-{
- output_arg_descriptor (insn);
- return output_call (insn, operands[0], 0);
-}"
- [(set_attr "type" "call")
- (set (attr "length")
- (plus (symbol_ref "attr_length_call (insn, 0)")
- (symbol_ref "attr_length_save_restore_dltp (insn)")))])
-
-;; Split out the PIC register save and restore after reload. This is
-;; done only if the function returns. As the split is done after reload,
-;; there are some situations in which we unnecessarily save and restore
-;; %r4. This happens when there is a single call and the PIC register
-;; is "dead" after the call. This isn't easy to fix as the usage of
-;; the PIC register isn't completely determined until the reload pass.
+ "#")
+
+;; Split out the PIC register save and restore after reload. As the
+;; split is done after reload, there are some situations in which we
+;; unnecessarily save and restore %r4. This happens when there is a
+;; single call and the PIC register is not used after the call.
+;;
+;; The split has to be done since call_from_call_insn () can't handle
+;; the pattern as is. Noreturn calls are special because they have to
+;; terminate the basic block. The split has to contain more than one
+;; insn.
(define_split
- [(parallel [(call (mem:SI (match_operand 0 "call_operand_address" ""))
+ [(parallel [(set (match_operand:DI 2 "register_operand" "") (reg:DI 27))
+ (call (mem:SI (match_operand 0 "call_operand_address" ""))
(match_operand 1 "" ""))
(clobber (reg:DI 1))
(clobber (reg:DI 2))
- (clobber (reg:DI 4))
+ (use (match_dup 2))
(use (reg:DI 27))
(use (reg:DI 29))
(use (const_int 0))])]
- "TARGET_64BIT
- && reload_completed
- && !find_reg_note (insn, REG_NORETURN, NULL_RTX)"
- [(set (reg:DI 4) (reg:DI 27))
+ "TARGET_64BIT && reload_completed
+ && find_reg_note (insn, REG_NORETURN, NULL_RTX)"
+ [(set (match_dup 2) (reg:DI 27))
(parallel [(call (mem:SI (match_dup 0))
(match_dup 1))
(clobber (reg:DI 1))
(clobber (reg:DI 2))
(use (reg:DI 27))
(use (reg:DI 29))
- (use (const_int 0))])
- (set (reg:DI 27) (reg:DI 4))]
+ (use (const_int 0))])]
"")
-;; Remove the clobber of register 4 when optimizing. This has to be
-;; done with a peephole optimization rather than a split because the
-;; split sequence for a call must be longer than one instruction.
-(define_peephole2
- [(parallel [(call (mem:SI (match_operand 0 "call_operand_address" ""))
+(define_split
+ [(parallel [(set (match_operand:DI 2 "register_operand" "") (reg:DI 27))
+ (call (mem:SI (match_operand 0 "call_operand_address" ""))
(match_operand 1 "" ""))
(clobber (reg:DI 1))
(clobber (reg:DI 2))
- (clobber (reg:DI 4))
+ (use (match_dup 2))
(use (reg:DI 27))
(use (reg:DI 29))
(use (const_int 0))])]
"TARGET_64BIT && reload_completed"
- [(parallel [(call (mem:SI (match_dup 0))
+ [(set (match_dup 2) (reg:DI 27))
+ (parallel [(call (mem:SI (match_dup 0))
(match_dup 1))
(clobber (reg:DI 1))
(clobber (reg:DI 2))
(use (reg:DI 27))
(use (reg:DI 29))
- (use (const_int 0))])]
+ (use (const_int 0))])
+ (set (reg:DI 27) (match_dup 2))]
"")
(define_insn "*call_symref_64bit_post_reload"
;; This pattern is split if it is necessary to save and restore the
;; PIC register.
(define_insn "call_reg_pic"
- [(call (mem:SI (reg:SI 22))
+ [(set (match_operand:SI 1 "register_operand" "=&r") (reg:SI 19))
+ (call (mem:SI (reg:SI 22))
(match_operand 0 "" "i"))
(clobber (reg:SI 1))
(clobber (reg:SI 2))
- (clobber (reg:SI 4))
+ (use (match_dup 1))
(use (reg:SI 19))
(use (const_int 1))]
"!TARGET_64BIT"
- "*
-{
- return output_indirect_call (insn, gen_rtx_REG (word_mode, 22));
-}"
- [(set_attr "type" "dyncall")
- (set (attr "length")
- (plus (symbol_ref "attr_length_indirect_call (insn)")
- (symbol_ref "attr_length_save_restore_dltp (insn)")))])
-
-;; Split out the PIC register save and restore after reload. This is
-;; done only if the function returns. As the split is done after reload,
-;; there are some situations in which we unnecessarily save and restore
-;; %r4. This happens when there is a single call and the PIC register
-;; is "dead" after the call. This isn't easy to fix as the usage of
-;; the PIC register isn't completely determined until the reload pass.
+ "#")
+
+;; Split out the PIC register save and restore after reload. As the
+;; split is done after reload, there are some situations in which we
+;; unnecessarily save and restore %r4. This happens when there is a
+;; single call and the PIC register is not used after the call.
+;;
+;; The split has to be done since call_from_call_insn () can't handle
+;; the pattern as is. Noreturn calls are special because they have to
+;; terminate the basic block. The split has to contain more than one
+;; insn.
(define_split
- [(parallel [(call (mem:SI (reg:SI 22))
+ [(parallel [(set (match_operand:SI 1 "register_operand" "") (reg:SI 19))
+ (call (mem:SI (reg:SI 22))
(match_operand 0 "" ""))
(clobber (reg:SI 1))
(clobber (reg:SI 2))
- (clobber (reg:SI 4))
+ (use (match_dup 1))
(use (reg:SI 19))
(use (const_int 1))])]
- "!TARGET_64BIT
- && reload_completed
- && !find_reg_note (insn, REG_NORETURN, NULL_RTX)"
- [(set (reg:SI 4) (reg:SI 19))
+ "!TARGET_64BIT && reload_completed
+ && find_reg_note (insn, REG_NORETURN, NULL_RTX)"
+ [(set (match_dup 1) (reg:SI 19))
(parallel [(call (mem:SI (reg:SI 22))
(match_dup 0))
(clobber (reg:SI 1))
(clobber (reg:SI 2))
(use (reg:SI 19))
- (use (const_int 1))])
- (set (reg:SI 19) (reg:SI 4))]
+ (use (const_int 1))])]
"")
-;; Remove the clobber of register 4 when optimizing. This has to be
-;; done with a peephole optimization rather than a split because the
-;; split sequence for a call must be longer than one instruction.
-(define_peephole2
- [(parallel [(call (mem:SI (reg:SI 22))
+(define_split
+ [(parallel [(set (match_operand:SI 1 "register_operand" "") (reg:SI 19))
+ (call (mem:SI (reg:SI 22))
(match_operand 0 "" ""))
(clobber (reg:SI 1))
(clobber (reg:SI 2))
- (clobber (reg:SI 4))
+ (use (match_dup 1))
(use (reg:SI 19))
(use (const_int 1))])]
"!TARGET_64BIT && reload_completed"
- [(parallel [(call (mem:SI (reg:SI 22))
+ [(set (match_dup 1) (reg:SI 19))
+ (parallel [(call (mem:SI (reg:SI 22))
(match_dup 0))
(clobber (reg:SI 1))
(clobber (reg:SI 2))
(use (reg:SI 19))
- (use (const_int 1))])]
+ (use (const_int 1))])
+ (set (reg:SI 19) (match_dup 1))]
"")
(define_insn "*call_reg_pic_post_reload"
;; This pattern is split if it is necessary to save and restore the
;; PIC register.
(define_insn "call_reg_64bit"
- [(call (mem:SI (match_operand:DI 0 "register_operand" "r"))
+ [(set (match_operand:DI 2 "register_operand" "=&r") (reg:DI 27))
+ (call (mem:SI (match_operand:DI 0 "register_operand" "r"))
(match_operand 1 "" "i"))
+ (clobber (reg:DI 1))
(clobber (reg:DI 2))
- (clobber (reg:DI 4))
+ (use (match_dup 2))
(use (reg:DI 27))
(use (reg:DI 29))
(use (const_int 1))]
"TARGET_64BIT"
- "*
-{
- return output_indirect_call (insn, operands[0]);
-}"
- [(set_attr "type" "dyncall")
- (set (attr "length")
- (plus (symbol_ref "attr_length_indirect_call (insn)")
- (symbol_ref "attr_length_save_restore_dltp (insn)")))])
-
-;; Split out the PIC register save and restore after reload. This is
-;; done only if the function returns. As the split is done after reload,
-;; there are some situations in which we unnecessarily save and restore
-;; %r4. This happens when there is a single call and the PIC register
-;; is "dead" after the call. This isn't easy to fix as the usage of
-;; the PIC register isn't completely determined until the reload pass.
+ "#")
+
+;; Split out the PIC register save and restore after reload. As the
+;; split is done after reload, there are some situations in which we
+;; unnecessarily save and restore %r4. This happens when there is a
+;; single call and the PIC register is not used after the call.
+;;
+;; The split has to be done since call_from_call_insn () can't handle
+;; the pattern as is. Noreturn calls are special because they have to
+;; terminate the basic block. The split has to contain more than one
+;; insn.
(define_split
- [(parallel [(call (mem:SI (match_operand 0 "register_operand" ""))
+ [(parallel [(set (match_operand:DI 2 "register_operand" "") (reg:DI 27))
+ (call (mem:SI (match_operand 0 "register_operand" ""))
(match_operand 1 "" ""))
+ (clobber (reg:DI 1))
(clobber (reg:DI 2))
- (clobber (reg:DI 4))
+ (use (match_dup 2))
(use (reg:DI 27))
(use (reg:DI 29))
(use (const_int 1))])]
- "TARGET_64BIT
- && reload_completed
- && !find_reg_note (insn, REG_NORETURN, NULL_RTX)"
- [(set (reg:DI 4) (reg:DI 27))
+ "TARGET_64BIT && reload_completed
+ && find_reg_note (insn, REG_NORETURN, NULL_RTX)"
+ [(set (match_dup 2) (reg:DI 27))
(parallel [(call (mem:SI (match_dup 0))
(match_dup 1))
+ (clobber (reg:DI 1))
(clobber (reg:DI 2))
(use (reg:DI 27))
(use (reg:DI 29))
- (use (const_int 1))])
- (set (reg:DI 27) (reg:DI 4))]
+ (use (const_int 1))])]
"")
-;; Remove the clobber of register 4 when optimizing. This has to be
-;; done with a peephole optimization rather than a split because the
-;; split sequence for a call must be longer than one instruction.
-(define_peephole2
- [(parallel [(call (mem:SI (match_operand 0 "register_operand" ""))
+(define_split
+ [(parallel [(set (match_operand:DI 2 "register_operand" "") (reg:DI 27))
+ (call (mem:SI (match_operand 0 "register_operand" ""))
(match_operand 1 "" ""))
+ (clobber (reg:DI 1))
(clobber (reg:DI 2))
- (clobber (reg:DI 4))
+ (use (match_dup 2))
(use (reg:DI 27))
(use (reg:DI 29))
(use (const_int 1))])]
"TARGET_64BIT && reload_completed"
- [(parallel [(call (mem:SI (match_dup 0))
+ [(set (match_dup 2) (reg:DI 27))
+ (parallel [(call (mem:SI (match_dup 0))
(match_dup 1))
+ (clobber (reg:DI 1))
(clobber (reg:DI 2))
(use (reg:DI 27))
(use (reg:DI 29))
- (use (const_int 1))])]
+ (use (const_int 1))])
+ (set (reg:DI 27) (match_dup 2))]
"")
(define_insn "*call_reg_64bit_post_reload"
[(call (mem:SI (match_operand:DI 0 "register_operand" "r"))
(match_operand 1 "" "i"))
+ (clobber (reg:DI 1))
(clobber (reg:DI 2))
(use (reg:DI 27))
(use (reg:DI 29))
""
"
{
- rtx op, call_insn;
+ rtx op;
rtx dst = operands[0];
rtx nb = operands[2];
the only method that we have for doing DImode multiplication
is with a libcall. This could be trouble if we haven't
allocated enough space for the outgoing arguments. */
- gcc_assert (INTVAL (nb) <= current_function_outgoing_args_size);
+ gcc_assert (INTVAL (nb) <= crtl->outgoing_args_size);
emit_move_insn (arg_pointer_rtx,
gen_rtx_PLUS (word_mode, stack_pointer_rtx,
need to have a use of the PIC register in the return pattern and
the final save/restore operation is not needed.
- I elected to just clobber %r4 in the PIC patterns and use it instead
+ I elected to just use register %r4 in the PIC patterns instead
of trying to force hppa_pic_save_rtx () to a callee saved register.
This might have required a new register class and constraint. It
was also simpler to just handle the restore from a register than a
generic pseudo. */
if (TARGET_64BIT)
{
+ rtx r4 = gen_rtx_REG (word_mode, 4);
if (GET_CODE (op) == SYMBOL_REF)
- call_insn = emit_call_insn (gen_call_val_symref_64bit (dst, op, nb));
+ emit_call_insn (gen_call_val_symref_64bit (dst, op, nb, r4));
else
{
op = force_reg (word_mode, op);
- call_insn = emit_call_insn (gen_call_val_reg_64bit (dst, op, nb));
+ emit_call_insn (gen_call_val_reg_64bit (dst, op, nb, r4));
}
}
else
if (GET_CODE (op) == SYMBOL_REF)
{
if (flag_pic)
- call_insn = emit_call_insn (gen_call_val_symref_pic (dst, op, nb));
+ {
+ rtx r4 = gen_rtx_REG (word_mode, 4);
+ emit_call_insn (gen_call_val_symref_pic (dst, op, nb, r4));
+ }
else
- call_insn = emit_call_insn (gen_call_val_symref (dst, op, nb));
+ emit_call_insn (gen_call_val_symref (dst, op, nb));
}
else
{
rtx tmpreg = gen_rtx_REG (word_mode, 22);
-
emit_move_insn (tmpreg, force_reg (word_mode, op));
if (flag_pic)
- call_insn = emit_call_insn (gen_call_val_reg_pic (dst, nb));
+ {
+ rtx r4 = gen_rtx_REG (word_mode, 4);
+ emit_call_insn (gen_call_val_reg_pic (dst, nb, r4));
+ }
else
- call_insn = emit_call_insn (gen_call_val_reg (dst, nb));
+ emit_call_insn (gen_call_val_reg (dst, nb));
}
}
(set (attr "length") (symbol_ref "attr_length_call (insn, 0)"))])
(define_insn "call_val_symref_pic"
- [(set (match_operand 0 "" "")
+ [(set (match_operand:SI 3 "register_operand" "=&r") (reg:SI 19))
+ (set (match_operand 0 "" "")
(call (mem:SI (match_operand 1 "call_operand_address" ""))
(match_operand 2 "" "i")))
(clobber (reg:SI 1))
(clobber (reg:SI 2))
- (clobber (reg:SI 4))
+ (use (match_dup 3))
(use (reg:SI 19))
(use (const_int 0))]
"!TARGET_PORTABLE_RUNTIME && !TARGET_64BIT"
- "*
-{
- output_arg_descriptor (insn);
- return output_call (insn, operands[1], 0);
-}"
- [(set_attr "type" "call")
- (set (attr "length")
- (plus (symbol_ref "attr_length_call (insn, 0)")
- (symbol_ref "attr_length_save_restore_dltp (insn)")))])
-
-;; Split out the PIC register save and restore after reload. This is
-;; done only if the function returns. As the split is done after reload,
-;; there are some situations in which we unnecessarily save and restore
-;; %r4. This happens when there is a single call and the PIC register
-;; is "dead" after the call. This isn't easy to fix as the usage of
-;; the PIC register isn't completely determined until the reload pass.
+ "#")
+
+;; Split out the PIC register save and restore after reload. As the
+;; split is done after reload, there are some situations in which we
+;; unnecessarily save and restore %r4. This happens when there is a
+;; single call and the PIC register is not used after the call.
+;;
+;; The split has to be done since call_from_call_insn () can't handle
+;; the pattern as is. Noreturn calls are special because they have to
+;; terminate the basic block. The split has to contain more than one
+;; insn.
(define_split
- [(parallel [(set (match_operand 0 "" "")
+ [(parallel [(set (match_operand:SI 3 "register_operand" "") (reg:SI 19))
+ (set (match_operand 0 "" "")
(call (mem:SI (match_operand 1 "call_operand_address" ""))
(match_operand 2 "" "")))
(clobber (reg:SI 1))
(clobber (reg:SI 2))
- (clobber (reg:SI 4))
+ (use (match_dup 3))
(use (reg:SI 19))
(use (const_int 0))])]
- "!TARGET_PORTABLE_RUNTIME && !TARGET_64BIT
- && reload_completed
- && !find_reg_note (insn, REG_NORETURN, NULL_RTX)"
- [(set (reg:SI 4) (reg:SI 19))
+ "!TARGET_PORTABLE_RUNTIME && !TARGET_64BIT && reload_completed
+ && find_reg_note (insn, REG_NORETURN, NULL_RTX)"
+ [(set (match_dup 3) (reg:SI 19))
(parallel [(set (match_dup 0)
(call (mem:SI (match_dup 1))
(match_dup 2)))
(clobber (reg:SI 1))
(clobber (reg:SI 2))
(use (reg:SI 19))
- (use (const_int 0))])
- (set (reg:SI 19) (reg:SI 4))]
+ (use (const_int 0))])]
"")
-;; Remove the clobber of register 4 when optimizing. This has to be
-;; done with a peephole optimization rather than a split because the
-;; split sequence for a call must be longer than one instruction.
-(define_peephole2
- [(parallel [(set (match_operand 0 "" "")
+(define_split
+ [(parallel [(set (match_operand:SI 3 "register_operand" "") (reg:SI 19))
+ (set (match_operand 0 "" "")
(call (mem:SI (match_operand 1 "call_operand_address" ""))
(match_operand 2 "" "")))
(clobber (reg:SI 1))
(clobber (reg:SI 2))
- (clobber (reg:SI 4))
+ (use (match_dup 3))
(use (reg:SI 19))
(use (const_int 0))])]
"!TARGET_PORTABLE_RUNTIME && !TARGET_64BIT && reload_completed"
- [(parallel [(set (match_dup 0)
+ [(set (match_dup 3) (reg:SI 19))
+ (parallel [(set (match_dup 0)
(call (mem:SI (match_dup 1))
(match_dup 2)))
(clobber (reg:SI 1))
(clobber (reg:SI 2))
(use (reg:SI 19))
- (use (const_int 0))])]
+ (use (const_int 0))])
+ (set (reg:SI 19) (match_dup 3))]
"")
(define_insn "*call_val_symref_pic_post_reload"
;; This pattern is split if it is necessary to save and restore the
;; PIC register.
(define_insn "call_val_symref_64bit"
- [(set (match_operand 0 "" "")
+ [(set (match_operand:DI 3 "register_operand" "=&r") (reg:DI 27))
+ (set (match_operand 0 "" "")
(call (mem:SI (match_operand 1 "call_operand_address" ""))
(match_operand 2 "" "i")))
(clobber (reg:DI 1))
(clobber (reg:DI 2))
- (clobber (reg:DI 4))
+ (use (match_dup 3))
(use (reg:DI 27))
(use (reg:DI 29))
(use (const_int 0))]
"TARGET_64BIT"
- "*
-{
- output_arg_descriptor (insn);
- return output_call (insn, operands[1], 0);
-}"
- [(set_attr "type" "call")
- (set (attr "length")
- (plus (symbol_ref "attr_length_call (insn, 0)")
- (symbol_ref "attr_length_save_restore_dltp (insn)")))])
-
-;; Split out the PIC register save and restore after reload. This is
-;; done only if the function returns. As the split is done after reload,
-;; there are some situations in which we unnecessarily save and restore
-;; %r4. This happens when there is a single call and the PIC register
-;; is "dead" after the call. This isn't easy to fix as the usage of
-;; the PIC register isn't completely determined until the reload pass.
+ "#")
+
+;; Split out the PIC register save and restore after reload. As the
+;; split is done after reload, there are some situations in which we
+;; unnecessarily save and restore %r4. This happens when there is a
+;; single call and the PIC register is not used after the call.
+;;
+;; The split has to be done since call_from_call_insn () can't handle
+;; the pattern as is. Noreturn calls are special because they have to
+;; terminate the basic block. The split has to contain more than one
+;; insn.
(define_split
- [(parallel [(set (match_operand 0 "" "")
+ [(parallel [(set (match_operand:DI 3 "register_operand" "") (reg:DI 27))
+ (set (match_operand 0 "" "")
(call (mem:SI (match_operand 1 "call_operand_address" ""))
(match_operand 2 "" "")))
(clobber (reg:DI 1))
(clobber (reg:DI 2))
- (clobber (reg:DI 4))
+ (use (match_dup 3))
(use (reg:DI 27))
(use (reg:DI 29))
(use (const_int 0))])]
- "TARGET_64BIT
- && reload_completed
- && !find_reg_note (insn, REG_NORETURN, NULL_RTX)"
- [(set (reg:DI 4) (reg:DI 27))
+ "TARGET_64BIT && reload_completed
+ && find_reg_note (insn, REG_NORETURN, NULL_RTX)"
+ [(set (match_dup 3) (reg:DI 27))
(parallel [(set (match_dup 0)
(call (mem:SI (match_dup 1))
(match_dup 2)))
(clobber (reg:DI 2))
(use (reg:DI 27))
(use (reg:DI 29))
- (use (const_int 0))])
- (set (reg:DI 27) (reg:DI 4))]
+ (use (const_int 0))])]
"")
-;; Remove the clobber of register 4 when optimizing. This has to be
-;; done with a peephole optimization rather than a split because the
-;; split sequence for a call must be longer than one instruction.
-(define_peephole2
- [(parallel [(set (match_operand 0 "" "")
+(define_split
+ [(parallel [(set (match_operand:DI 3 "register_operand" "") (reg:DI 27))
+ (set (match_operand 0 "" "")
(call (mem:SI (match_operand 1 "call_operand_address" ""))
(match_operand 2 "" "")))
(clobber (reg:DI 1))
(clobber (reg:DI 2))
- (clobber (reg:DI 4))
+ (use (match_dup 3))
(use (reg:DI 27))
(use (reg:DI 29))
(use (const_int 0))])]
"TARGET_64BIT && reload_completed"
- [(parallel [(set (match_dup 0)
+ [(set (match_dup 3) (reg:DI 27))
+ (parallel [(set (match_dup 0)
(call (mem:SI (match_dup 1))
(match_dup 2)))
(clobber (reg:DI 1))
(clobber (reg:DI 2))
(use (reg:DI 27))
(use (reg:DI 29))
- (use (const_int 0))])]
+ (use (const_int 0))])
+ (set (reg:DI 27) (match_dup 3))]
"")
(define_insn "*call_val_symref_64bit_post_reload"
;; This pattern is split if it is necessary to save and restore the
;; PIC register.
(define_insn "call_val_reg_pic"
- [(set (match_operand 0 "" "")
+ [(set (match_operand:SI 2 "register_operand" "=&r") (reg:SI 19))
+ (set (match_operand 0 "" "")
(call (mem:SI (reg:SI 22))
(match_operand 1 "" "i")))
(clobber (reg:SI 1))
(clobber (reg:SI 2))
- (clobber (reg:SI 4))
+ (use (match_dup 2))
(use (reg:SI 19))
(use (const_int 1))]
"!TARGET_64BIT"
- "*
-{
- return output_indirect_call (insn, gen_rtx_REG (word_mode, 22));
-}"
- [(set_attr "type" "dyncall")
- (set (attr "length")
- (plus (symbol_ref "attr_length_indirect_call (insn)")
- (symbol_ref "attr_length_save_restore_dltp (insn)")))])
-
-;; Split out the PIC register save and restore after reload. This is
-;; done only if the function returns. As the split is done after reload,
-;; there are some situations in which we unnecessarily save and restore
-;; %r4. This happens when there is a single call and the PIC register
-;; is "dead" after the call. This isn't easy to fix as the usage of
-;; the PIC register isn't completely determined until the reload pass.
+ "#")
+
+;; Split out the PIC register save and restore after reload. As the
+;; split is done after reload, there are some situations in which we
+;; unnecessarily save and restore %r4. This happens when there is a
+;; single call and the PIC register is not used after the call.
+;;
+;; The split has to be done since call_from_call_insn () can't handle
+;; the pattern as is. Noreturn calls are special because they have to
+;; terminate the basic block. The split has to contain more than one
+;; insn.
(define_split
- [(parallel [(set (match_operand 0 "" "")
+ [(parallel [(set (match_operand:SI 2 "register_operand" "") (reg:SI 19))
+ (set (match_operand 0 "" "")
(call (mem:SI (reg:SI 22))
(match_operand 1 "" "")))
(clobber (reg:SI 1))
(clobber (reg:SI 2))
- (clobber (reg:SI 4))
+ (use (match_dup 2))
(use (reg:SI 19))
(use (const_int 1))])]
- "!TARGET_64BIT
- && reload_completed
- && !find_reg_note (insn, REG_NORETURN, NULL_RTX)"
- [(set (reg:SI 4) (reg:SI 19))
+ "!TARGET_64BIT && reload_completed
+ && find_reg_note (insn, REG_NORETURN, NULL_RTX)"
+ [(set (match_dup 2) (reg:SI 19))
(parallel [(set (match_dup 0)
(call (mem:SI (reg:SI 22))
(match_dup 1)))
(clobber (reg:SI 1))
(clobber (reg:SI 2))
(use (reg:SI 19))
- (use (const_int 1))])
- (set (reg:SI 19) (reg:SI 4))]
+ (use (const_int 1))])]
"")
-;; Remove the clobber of register 4 when optimizing. This has to be
-;; done with a peephole optimization rather than a split because the
-;; split sequence for a call must be longer than one instruction.
-(define_peephole2
- [(parallel [(set (match_operand 0 "" "")
+(define_split
+ [(parallel [(set (match_operand:SI 2 "register_operand" "") (reg:SI 19))
+ (set (match_operand 0 "" "")
(call (mem:SI (reg:SI 22))
(match_operand 1 "" "")))
(clobber (reg:SI 1))
(clobber (reg:SI 2))
- (clobber (reg:SI 4))
+ (use (match_dup 2))
(use (reg:SI 19))
(use (const_int 1))])]
"!TARGET_64BIT && reload_completed"
- [(parallel [(set (match_dup 0)
+ [(set (match_dup 2) (reg:SI 19))
+ (parallel [(set (match_dup 0)
(call (mem:SI (reg:SI 22))
(match_dup 1)))
(clobber (reg:SI 1))
(clobber (reg:SI 2))
(use (reg:SI 19))
- (use (const_int 1))])]
+ (use (const_int 1))])
+ (set (reg:SI 19) (match_dup 2))]
"")
(define_insn "*call_val_reg_pic_post_reload"
;; This pattern is split if it is necessary to save and restore the
;; PIC register.
(define_insn "call_val_reg_64bit"
- [(set (match_operand 0 "" "")
+ [(set (match_operand:DI 3 "register_operand" "=&r") (reg:DI 27))
+ (set (match_operand 0 "" "")
(call (mem:SI (match_operand:DI 1 "register_operand" "r"))
(match_operand 2 "" "i")))
+ (clobber (reg:DI 1))
(clobber (reg:DI 2))
- (clobber (reg:DI 4))
+ (use (match_dup 3))
(use (reg:DI 27))
(use (reg:DI 29))
(use (const_int 1))]
"TARGET_64BIT"
- "*
-{
- return output_indirect_call (insn, operands[1]);
-}"
- [(set_attr "type" "dyncall")
- (set (attr "length")
- (plus (symbol_ref "attr_length_indirect_call (insn)")
- (symbol_ref "attr_length_save_restore_dltp (insn)")))])
-
-;; Split out the PIC register save and restore after reload. This is
-;; done only if the function returns. As the split is done after reload,
-;; there are some situations in which we unnecessarily save and restore
-;; %r4. This happens when there is a single call and the PIC register
-;; is "dead" after the call. This isn't easy to fix as the usage of
-;; the PIC register isn't completely determined until the reload pass.
+ "#")
+
+;; Split out the PIC register save and restore after reload. As the
+;; split is done after reload, there are some situations in which we
+;; unnecessarily save and restore %r4. This happens when there is a
+;; single call and the PIC register is not used after the call.
+;;
+;; The split has to be done since call_from_call_insn () can't handle
+;; the pattern as is. Noreturn calls are special because they have to
+;; terminate the basic block. The split has to contain more than one
+;; insn.
(define_split
- [(parallel [(set (match_operand 0 "" "")
+ [(parallel [(set (match_operand:DI 3 "register_operand" "") (reg:DI 27))
+ (set (match_operand 0 "" "")
(call (mem:SI (match_operand:DI 1 "register_operand" ""))
(match_operand 2 "" "")))
+ (clobber (reg:DI 1))
(clobber (reg:DI 2))
- (clobber (reg:DI 4))
+ (use (match_dup 3))
(use (reg:DI 27))
(use (reg:DI 29))
(use (const_int 1))])]
- "TARGET_64BIT
- && reload_completed
- && !find_reg_note (insn, REG_NORETURN, NULL_RTX)"
- [(set (reg:DI 4) (reg:DI 27))
+ "TARGET_64BIT && reload_completed
+ && find_reg_note (insn, REG_NORETURN, NULL_RTX)"
+ [(set (match_dup 3) (reg:DI 27))
(parallel [(set (match_dup 0)
(call (mem:SI (match_dup 1))
(match_dup 2)))
+ (clobber (reg:DI 1))
(clobber (reg:DI 2))
(use (reg:DI 27))
(use (reg:DI 29))
- (use (const_int 1))])
- (set (reg:DI 27) (reg:DI 4))]
+ (use (const_int 1))])]
"")
-;; Remove the clobber of register 4 when optimizing. This has to be
-;; done with a peephole optimization rather than a split because the
-;; split sequence for a call must be longer than one instruction.
-(define_peephole2
- [(parallel [(set (match_operand 0 "" "")
+(define_split
+ [(parallel [(set (match_operand:DI 3 "register_operand" "") (reg:DI 27))
+ (set (match_operand 0 "" "")
(call (mem:SI (match_operand:DI 1 "register_operand" ""))
(match_operand 2 "" "")))
+ (clobber (reg:DI 1))
(clobber (reg:DI 2))
- (clobber (reg:DI 4))
+ (use (match_dup 3))
(use (reg:DI 27))
(use (reg:DI 29))
(use (const_int 1))])]
"TARGET_64BIT && reload_completed"
- [(parallel [(set (match_dup 0)
+ [(set (match_dup 3) (reg:DI 27))
+ (parallel [(set (match_dup 0)
(call (mem:SI (match_dup 1))
(match_dup 2)))
+ (clobber (reg:DI 1))
(clobber (reg:DI 2))
(use (reg:DI 27))
(use (reg:DI 29))
- (use (const_int 1))])]
+ (use (const_int 1))])
+ (set (reg:DI 27) (match_dup 3))]
"")
(define_insn "*call_val_reg_64bit_post_reload"
[(set (match_operand 0 "" "")
(call (mem:SI (match_operand:DI 1 "register_operand" "r"))
(match_operand 2 "" "i")))
+ (clobber (reg:DI 1))
(clobber (reg:DI 2))
(use (reg:DI 27))
(use (reg:DI 29))
the only method that we have for doing DImode multiplication
is with a libcall. This could be trouble if we haven't
allocated enough space for the outgoing arguments. */
- gcc_assert (INTVAL (nb) <= current_function_outgoing_args_size);
+ gcc_assert (INTVAL (nb) <= crtl->outgoing_args_size);
emit_move_insn (arg_pointer_rtx,
gen_rtx_PLUS (word_mode, stack_pointer_rtx,
the only method that we have for doing DImode multiplication
is with a libcall. This could be trouble if we haven't
allocated enough space for the outgoing arguments. */
- gcc_assert (INTVAL (nb) <= current_function_outgoing_args_size);
+ gcc_assert (INTVAL (nb) <= crtl->outgoing_args_size);
emit_move_insn (arg_pointer_rtx,
gen_rtx_PLUS (word_mode, stack_pointer_rtx,
(POINTER_SIZE * 2) / BITS_PER_UNIT));
rtx pv = gen_rtx_REG (Pmode, 1);
- emit_insn (gen_rtx_CLOBBER (VOIDmode,
- gen_rtx_MEM (BLKmode,
- gen_rtx_SCRATCH (VOIDmode))));
- emit_insn (gen_rtx_CLOBBER (VOIDmode,
- gen_rtx_MEM (BLKmode,
- hard_frame_pointer_rtx)));
+ emit_clobber (gen_rtx_MEM (BLKmode, gen_rtx_SCRATCH (VOIDmode)));
+ emit_clobber (gen_rtx_MEM (BLKmode, hard_frame_pointer_rtx));
/* Restore the frame pointer. The virtual_stack_vars_rtx is saved
instead of the hard_frame_pointer_rtx in the save area. We need
/* This bit is the same as expand_builtin_longjmp. */
emit_stack_restore (SAVE_NONLOCAL, stack, NULL_RTX);
- emit_insn (gen_rtx_USE (VOIDmode, hard_frame_pointer_rtx));
- emit_insn (gen_rtx_USE (VOIDmode, stack_pointer_rtx));
+ emit_use (hard_frame_pointer_rtx);
+ emit_use (stack_pointer_rtx);
/* Load the label we are jumping through into r1 so that we know
where to look for it when we get back to setjmp's function for
[(set_attr "type" "fpalu")
(set_attr "length" "4")])
-;; Flush the I and D cache lines from the start address (operand0)
-;; to the end address (operand1). No lines are flushed if the end
-;; address is less than the start address (unsigned).
+;; The following two patterns are used by the trampoline code for nested
+;; functions. They flush the I and D cache lines from the start address
+;; (operand0) to the end address (operand1). No lines are flushed if the
+;; end address is less than the start address (unsigned).
;;
-;; Because the range of memory flushed is variable and the size of
-;; a MEM can only be a CONST_INT, the patterns specify that they
-;; perform an unspecified volatile operation on all memory.
+;; Because the range of memory flushed is variable and the size of a MEM
+;; can only be a CONST_INT, the patterns specify that they perform an
+;; unspecified volatile operation on all memory.
;;
;; The address range for an icache flush must lie within a single
;; space on targets with non-equivalent space registers.
;;
-;; This is used by the trampoline code for nested functions.
-;;
;; Operand 0 contains the start address.
;; Operand 1 contains the end address.
;; Operand 2 contains the line length to use.
-;; Operands 3 and 4 (icacheflush) are clobbered scratch registers.
-(define_insn "dcacheflush"
+(define_insn "dcacheflush<P:mode>"
[(const_int 1)
(unspec_volatile [(mem:BLK (scratch))] UNSPECV_DCACHE)
(use (match_operand 0 "pmode_register_operand" "r"))
(use (match_operand 1 "pmode_register_operand" "r"))
(use (match_operand 2 "pmode_register_operand" "r"))
- (clobber (match_scratch 3 "=&0"))]
+ (clobber (match_scratch:P 3 "=&0"))]
""
- "*
-{
- if (TARGET_64BIT)
- return \"cmpb,*<<=,n %3,%1,.\;fdc,m %2(%3)\;sync\";
- else
- return \"cmpb,<<=,n %3,%1,.\;fdc,m %2(%3)\;sync\";
-}"
+ "cmpb,<dwc><<=,n %3,%1,.\;fdc,m %2(%3)\;sync"
[(set_attr "type" "multi")
(set_attr "length" "12")])
-(define_insn "icacheflush"
+(define_insn "icacheflush<P:mode>"
[(const_int 2)
(unspec_volatile [(mem:BLK (scratch))] UNSPECV_ICACHE)
(use (match_operand 0 "pmode_register_operand" "r"))
(use (match_operand 2 "pmode_register_operand" "r"))
(clobber (match_operand 3 "pmode_register_operand" "=&r"))
(clobber (match_operand 4 "pmode_register_operand" "=&r"))
- (clobber (match_scratch 5 "=&0"))]
+ (clobber (match_scratch:P 5 "=&0"))]
""
- "*
-{
- if (TARGET_64BIT)
- return \"mfsp %%sr0,%4\;ldsid (%5),%3\;mtsp %3,%%sr0\;cmpb,*<<=,n %5,%1,.\;fic,m %2(%%sr0,%5)\;sync\;mtsp %4,%%sr0\;nop\;nop\;nop\;nop\;nop\;nop\";
- else
- return \"mfsp %%sr0,%4\;ldsid (%5),%3\;mtsp %3,%%sr0\;cmpb,<<=,n %5,%1,.\;fic,m %2(%%sr0,%5)\;sync\;mtsp %4,%%sr0\;nop\;nop\;nop\;nop\;nop\;nop\";
-}"
+ "mfsp %%sr0,%4\;ldsid (%5),%3\;mtsp %3,%%sr0\;cmpb,<dwc><<=,n %5,%1,.\;fic,m %2(%%sr0,%5)\;sync\;mtsp %4,%%sr0\;nop\;nop\;nop\;nop\;nop\;nop"
[(set_attr "type" "multi")
(set_attr "length" "52")])
""
"*
{
- extern int frame_pointer_needed;
-
+
/* We need two different versions depending on whether or not we
need a frame pointer. Also note that we return to the instruction
immediately after the branch rather than two instructions after the
""
"*
{
- extern int frame_pointer_needed;
/* We need two different versions depending on whether or not we
need a frame pointer. Also note that we return to the instruction
(match_operand 2 "const_int_operand" "")]
"TARGET_PA_20"
{
- int locality = INTVAL (operands[2]);
-
- gcc_assert (locality >= 0 && locality <= 3);
-
- /* Change operand[0] to a MEM as we don't have the infrastructure
- to output all the supported address modes for ldw/ldd when we use
- the address directly. However, we do have it for MEMs. */
- operands[0] = gen_rtx_MEM (QImode, operands[0]);
-
- /* If the address isn't valid for the prefetch, replace it. */
- if (locality)
- {
- if (!prefetch_nocc_operand (operands[0], QImode))
- operands[0]
- = replace_equiv_address (operands[0],
- copy_to_mode_reg (Pmode,
- XEXP (operands[0], 0)));
- emit_insn (gen_prefetch_nocc (operands[0], operands[1], operands[2]));
- }
- else
- {
- if (!prefetch_cc_operand (operands[0], QImode))
- operands[0]
- = replace_equiv_address (operands[0],
- copy_to_mode_reg (Pmode,
- XEXP (operands[0], 0)));
- emit_insn (gen_prefetch_cc (operands[0], operands[1], operands[2]));
- }
+ operands[0] = copy_addr_to_reg (operands[0]);
+ emit_insn (gen_prefetch_20 (operands[0], operands[1], operands[2]));
DONE;
})
-(define_insn "prefetch_cc"
- [(prefetch (match_operand:QI 0 "prefetch_cc_operand" "RW")
+(define_insn "prefetch_20"
+ [(prefetch (match_operand 0 "pmode_register_operand" "r")
(match_operand:SI 1 "const_int_operand" "n")
(match_operand:SI 2 "const_int_operand" "n"))]
- "TARGET_PA_20 && operands[2] == const0_rtx"
+ "TARGET_PA_20"
{
- /* The SL cache-control completor indicates good spatial locality but
+ /* The SL cache-control completer indicates good spatial locality but
poor temporal locality. The ldw instruction with a target of general
register 0 prefetches a cache line for a read. The ldd instruction
prefetches a cache line for a write. */
- static const char * const instr[2] = {
- "ldw%M0,sl %0,%%r0",
- "ldd%M0,sl %0,%%r0"
- };
- int read_or_write = INTVAL (operands[1]);
-
- gcc_assert (read_or_write >= 0 && read_or_write <= 1);
-
- return instr [read_or_write];
-}
- [(set_attr "type" "load")
- (set_attr "length" "4")])
-
-(define_insn "prefetch_nocc"
- [(prefetch (match_operand:QI 0 "prefetch_nocc_operand" "A,RQ")
- (match_operand:SI 1 "const_int_operand" "n,n")
- (match_operand:SI 2 "const_int_operand" "n,n"))]
- "TARGET_PA_20 && operands[2] != const0_rtx"
-{
- /* The ldw instruction with a target of general register 0 prefetches
- a cache line for a read. The ldd instruction prefetches a cache line
- for a write. */
static const char * const instr[2][2] = {
{
- "ldw RT'%A0,%%r0",
- "ldd RT'%A0,%%r0",
+ "ldw,sl 0(%0),%%r0",
+ "ldd,sl 0(%0),%%r0"
},
{
- "ldw%M0 %0,%%r0",
- "ldd%M0 %0,%%r0",
+ "ldw 0(%0),%%r0",
+ "ldd 0(%0),%%r0"
}
};
- int read_or_write = INTVAL (operands[1]);
+ int read_or_write = INTVAL (operands[1]) == 0 ? 0 : 1;
+ int locality = INTVAL (operands[2]) == 0 ? 0 : 1;
- gcc_assert (which_alternative == 0 || which_alternative == 1);
- gcc_assert (read_or_write >= 0 && read_or_write <= 1);
-
- return instr [which_alternative][read_or_write];
+ return instr [locality][read_or_write];
}
[(set_attr "type" "load")
(set_attr "length" "4")])
-
;; TLS Support
(define_insn "tgd_load"
[(set (match_operand:SI 0 "register_operand" "=r")
(unspec:SI [(match_operand 1 "tgd_symbolic_operand" "")] UNSPEC_TLSGD))
- (clobber (reg:SI 1))]
+ (clobber (reg:SI 1))
+ (use (reg:SI 27))]
""
"*
{
- if (flag_pic)
- return \"addil LT'%1-$tls_gdidx$,%%r19\;ldo RT'%1-$tls_gdidx$(%%r1),%0\";
- else
- return \"addil LR'%1-$tls_gdidx$,%%r27\;ldo RR'%1-$tls_gdidx$(%%r1),%0\";
+ return \"addil LR'%1-$tls_gdidx$,%%r27\;ldo RR'%1-$tls_gdidx$(%%r1),%0\";
+}"
+ [(set_attr "type" "multi")
+ (set_attr "length" "8")])
+
+(define_insn "tgd_load_pic"
+ [(set (match_operand:SI 0 "register_operand" "=r")
+ (unspec:SI [(match_operand 1 "tgd_symbolic_operand" "")] UNSPEC_TLSGD_PIC))
+ (clobber (reg:SI 1))
+ (use (reg:SI 19))]
+ ""
+ "*
+{
+ return \"addil LT'%1-$tls_gdidx$,%%r19\;ldo RT'%1-$tls_gdidx$(%%r1),%0\";
}"
[(set_attr "type" "multi")
(set_attr "length" "8")])
(define_insn "tld_load"
[(set (match_operand:SI 0 "register_operand" "=r")
(unspec:SI [(match_operand 1 "tld_symbolic_operand" "")] UNSPEC_TLSLDM))
- (clobber (reg:SI 1))]
+ (clobber (reg:SI 1))
+ (use (reg:SI 27))]
""
"*
{
- if (flag_pic)
- return \"addil LT'%1-$tls_ldidx$,%%r19\;ldo RT'%1-$tls_ldidx$(%%r1),%0\";
- else
- return \"addil LR'%1-$tls_ldidx$,%%r27\;ldo RR'%1-$tls_ldidx$(%%r1),%0\";
+ return \"addil LR'%1-$tls_ldidx$,%%r27\;ldo RR'%1-$tls_ldidx$(%%r1),%0\";
+}"
+ [(set_attr "type" "multi")
+ (set_attr "length" "8")])
+
+(define_insn "tld_load_pic"
+ [(set (match_operand:SI 0 "register_operand" "=r")
+ (unspec:SI [(match_operand 1 "tld_symbolic_operand" "")] UNSPEC_TLSLDM_PIC))
+ (clobber (reg:SI 1))
+ (use (reg:SI 19))]
+ ""
+ "*
+{
+ return \"addil LT'%1-$tls_ldidx$,%%r19\;ldo RT'%1-$tls_ldidx$(%%r1),%0\";
}"
[(set_attr "type" "multi")
(set_attr "length" "8")])
(define_insn "tie_load"
[(set (match_operand:SI 0 "register_operand" "=r")
(unspec:SI [(match_operand 1 "tie_symbolic_operand" "")] UNSPEC_TLSIE))
- (clobber (reg:SI 1))]
+ (clobber (reg:SI 1))
+ (use (reg:SI 27))]
""
"*
{
- if (flag_pic)
- return \"addil LT'%1-$tls_ieoff$,%%r19\;ldw RT'%1-$tls_ieoff$(%%r1),%0\";
- else
- return \"addil LR'%1-$tls_ieoff$,%%r27\;ldw RR'%1-$tls_ieoff$(%%r1),%0\";
+ return \"addil LR'%1-$tls_ieoff$,%%r27\;ldw RR'%1-$tls_ieoff$(%%r1),%0\";
+}"
+ [(set_attr "type" "multi")
+ (set_attr "length" "8")])
+
+(define_insn "tie_load_pic"
+ [(set (match_operand:SI 0 "register_operand" "=r")
+ (unspec:SI [(match_operand 1 "tie_symbolic_operand" "")] UNSPEC_TLSIE_PIC))
+ (clobber (reg:SI 1))
+ (use (reg:SI 19))]
+ ""
+ "*
+{
+ return \"addil LT'%1-$tls_ieoff$,%%r19\;ldw RT'%1-$tls_ieoff$(%%r1),%0\";
}"
[(set_attr "type" "multi")
(set_attr "length" "8")])