;; Machine description of the Mitsubishi M32R cpu for GNU C compiler
-;; Copyright (C) 1996, 1997, 1998, 1999 Free Software Foundation, Inc.
+;; Copyright (C) 1996, 1997, 1998, 1999, 2001 Free Software Foundation, Inc.
;; This file is part of GNU CC.
(define_attr "m32r" "no,yes"
(const (symbol_ref "(TARGET_M32R != 0)")))
-
-
\f
;; ::::::::::::::::::::
;; ::
3 0
[(eq_attr "insn_size" "short")])
-
-\f
-;; Instruction grouping
-
\f
;; Expand prologue as RTL
(define_expand "prologue"
;; ??? Do we need a const_double constraint here for large unsigned values?
(define_insn "*movsi_insn"
- [(set (match_operand:SI 0 "move_dest_operand" "=r,r,r,r,r,r,r,r,r,T,U,m")
+ [(set (match_operand:SI 0 "move_dest_operand" "=r,r,r,r,r,r,r,r,r,T,S,m")
(match_operand:SI 1 "move_src_operand" "r,I,J,MQ,L,n,T,U,m,r,r,r"))]
"register_operand (operands[0], SImode) || register_operand (operands[1], SImode)"
"*
return \"mv %0,%1\";
case MEM:
+ if (GET_CODE (XEXP (operands[1], 0)) == POST_INC
+ && XEXP (XEXP (operands[1], 0), 0) == stack_pointer_rtx)
+ return \"pop %0\";
+
return \"ld %0,%1\";
case CONST_INT:
else if (GET_CODE (operands[0]) == MEM
&& (GET_CODE (operands[1]) == REG || GET_CODE (operands[1]) == SUBREG))
- return \"st %1,%0\";
+ {
+ if (GET_CODE (XEXP (operands[0], 0)) == PRE_DEC
+ && XEXP (XEXP (operands[0], 0), 0) == stack_pointer_rtx)
+ return \"push %1\";
+
+ return \"st %1,%0\";
+ }
abort ();
}"
}")
(define_insn "*movsf_insn"
- [(set (match_operand:SF 0 "move_dest_operand" "=r,r,r,r,T,m")
- (match_operand:SF 1 "move_src_operand" "r,F,T,m,r,r"))]
+ [(set (match_operand:SF 0 "move_dest_operand" "=r,r,r,r,r,T,S,m")
+ (match_operand:SF 1 "move_src_operand" "r,F,U,S,m,r,r,r"))]
"register_operand (operands[0], SFmode) || register_operand (operands[1], SFmode)"
"@
mv %0,%1
#
ld %0,%1
ld %0,%1
+ ld %0,%1
+ st %1,%0
st %1,%0
st %1,%0"
;; ??? Length of alternative 1 is either 2, 4 or 8.
- [(set_attr "type" "int2,multi,load2,load4,store2,store4")
- (set_attr "length" "2,8,2,4,2,4")])
+ [(set_attr "type" "int2,multi,load2,load2,load4,store2,store2,store4")
+ (set_attr "length" "2,8,2,2,4,2,2,4")])
(define_split
[(set (match_operand:SF 0 "register_operand" "")
(define_insn "zero_extendqihi2"
[(set (match_operand:HI 0 "register_operand" "=r,r,r")
- (zero_extend:HI (match_operand:QI 1 "nonimmediate_operand" "r,T,m")))]
+ (zero_extend:HI (match_operand:QI 1 "extend_operand" "r,T,m")))]
""
"@
and3 %0,%1,%#255
(define_insn "zero_extendqisi2"
[(set (match_operand:SI 0 "register_operand" "=r,r,r")
- (zero_extend:SI (match_operand:QI 1 "nonimmediate_operand" "r,T,m")))]
+ (zero_extend:SI (match_operand:QI 1 "extend_operand" "r,T,m")))]
""
"@
and3 %0,%1,%#255
(define_insn "zero_extendhisi2"
[(set (match_operand:SI 0 "register_operand" "=r,r,r")
- (zero_extend:SI (match_operand:HI 1 "nonimmediate_operand" "r,T,m")))]
+ (zero_extend:SI (match_operand:HI 1 "extend_operand" "r,T,m")))]
""
"@
and3 %0,%1,%#65535
[(set_attr "type" "int4,load2,load4")
(set_attr "length" "4,2,4")])
\f
-;; Sign extension instructions.
-;; ??? See v850.md.
-
-;; These patterns originally accepted general_operands, however, slightly
-;; better code is generated by only accepting register_operands, and then
-;; letting combine generate the lds[hb] insns.
-;; [This comment copied from sparc.md, I think.]
+;; Signed conversions from a smaller integer to a larger integer
+(define_insn "extendqihi2"
+ [(set (match_operand:HI 0 "register_operand" "=r,r,r")
+ (sign_extend:HI (match_operand:QI 1 "extend_operand" "0,T,m")))]
+ ""
+ "@
+ #
+ ldb %0,%1
+ ldb %0,%1"
+ [(set_attr "type" "multi,load2,load4")
+ (set_attr "length" "2,2,4")])
-(define_expand "extendqihi2"
+(define_split
[(set (match_operand:HI 0 "register_operand" "")
(sign_extend:HI (match_operand:QI 1 "register_operand" "")))]
- ""
+ "reload_completed"
+ [(match_dup 2)
+ (match_dup 3)]
"
{
- rtx temp = gen_reg_rtx (SImode);
- rtx shift_24 = GEN_INT (24);
- int op1_subword = 0;
- int op0_subword = 0;
+ rtx op0 = gen_lowpart (SImode, operands[0]);
+ rtx shift = gen_rtx (CONST_INT, VOIDmode, 24);
- if (GET_CODE (operand1) == SUBREG)
- {
- op1_subword = SUBREG_WORD (operand1);
- operand1 = XEXP (operand1, 0);
- }
- if (GET_CODE (operand0) == SUBREG)
- {
- op0_subword = SUBREG_WORD (operand0);
- operand0 = XEXP (operand0, 0);
- }
- emit_insn (gen_ashlsi3 (temp, gen_rtx_SUBREG (SImode, operand1,
- op1_subword),
- shift_24));
- if (GET_MODE (operand0) != SImode)
- operand0 = gen_rtx_SUBREG (SImode, operand0, op0_subword);
- emit_insn (gen_ashrsi3 (operand0, temp, shift_24));
- DONE;
+ operands[2] = gen_ashlsi3 (op0, op0, shift);
+ operands[3] = gen_ashrsi3 (op0, op0, shift);
}")
-(define_insn "*sign_extendqihi2_insn"
- [(set (match_operand:HI 0 "register_operand" "=r,r")
- (sign_extend:HI (match_operand:QI 1 "memory_operand" "T,m")))]
+(define_insn "extendqisi2"
+ [(set (match_operand:SI 0 "register_operand" "=r,r,r")
+ (sign_extend:SI (match_operand:QI 1 "extend_operand" "0,T,m")))]
""
- "ldb %0,%1"
- [(set_attr "type" "load2,load4")
- (set_attr "length" "2,4")])
+ "@
+ #
+ ldb %0,%1
+ ldb %0,%1"
+ [(set_attr "type" "multi,load2,load4")
+ (set_attr "length" "4,2,4")])
-(define_expand "extendqisi2"
+(define_split
[(set (match_operand:SI 0 "register_operand" "")
(sign_extend:SI (match_operand:QI 1 "register_operand" "")))]
- ""
+ "reload_completed"
+ [(match_dup 2)
+ (match_dup 3)]
"
{
- rtx temp = gen_reg_rtx (SImode);
- rtx shift_24 = GEN_INT (24);
- int op1_subword = 0;
-
- if (GET_CODE (operand1) == SUBREG)
- {
- op1_subword = SUBREG_WORD (operand1);
- operand1 = XEXP (operand1, 0);
- }
+ rtx op0 = gen_lowpart (SImode, operands[0]);
+ rtx shift = gen_rtx (CONST_INT, VOIDmode, 24);
- emit_insn (gen_ashlsi3 (temp, gen_rtx_SUBREG (SImode, operand1, op1_subword),
- shift_24));
- emit_insn (gen_ashrsi3 (operand0, temp, shift_24));
- DONE;
+ operands[2] = gen_ashlsi3 (op0, op0, shift);
+ operands[3] = gen_ashrsi3 (op0, op0, shift);
}")
-(define_insn "*sign_extendqisi2_insn"
- [(set (match_operand:SI 0 "register_operand" "=r,r")
- (sign_extend:SI (match_operand:QI 1 "memory_operand" "T,m")))]
+(define_insn "extendhisi2"
+ [(set (match_operand:SI 0 "register_operand" "=r,r,r")
+ (sign_extend:SI (match_operand:HI 1 "extend_operand" "0,T,m")))]
""
- "ldb %0,%1"
- [(set_attr "type" "load2,load4")
- (set_attr "length" "2,4")])
+ "@
+ #
+ ldh %0,%1
+ ldh %0,%1"
+ [(set_attr "type" "multi,load2,load4")
+ (set_attr "length" "4,2,4")])
-(define_expand "extendhisi2"
+(define_split
[(set (match_operand:SI 0 "register_operand" "")
(sign_extend:SI (match_operand:HI 1 "register_operand" "")))]
- ""
+ "reload_completed"
+ [(match_dup 2)
+ (match_dup 3)]
"
{
- rtx temp = gen_reg_rtx (SImode);
- rtx shift_16 = GEN_INT (16);
- int op1_subword = 0;
+ rtx op0 = gen_lowpart (SImode, operands[0]);
+ rtx shift = gen_rtx (CONST_INT, VOIDmode, 16);
- if (GET_CODE (operand1) == SUBREG)
- {
- op1_subword = SUBREG_WORD (operand1);
- operand1 = XEXP (operand1, 0);
- }
-
- emit_insn (gen_ashlsi3 (temp, gen_rtx_SUBREG (SImode, operand1, op1_subword),
- shift_16));
- emit_insn (gen_ashrsi3 (operand0, temp, shift_16));
- DONE;
+ operands[2] = gen_ashlsi3 (op0, op0, shift);
+ operands[3] = gen_ashrsi3 (op0, op0, shift);
}")
-
-(define_insn "*sign_extendhisi2_insn"
- [(set (match_operand:SI 0 "register_operand" "=r,r")
- (sign_extend:SI (match_operand:HI 1 "memory_operand" "T,m")))]
- ""
- "ldh %0,%1"
- [(set_attr "type" "load2,load4")
- (set_attr "length" "2,4")])
\f
;; Arithmetic instructions.
(define_insn "andsi3"
[(set (match_operand:SI 0 "register_operand" "=r,r")
(and:SI (match_operand:SI 1 "register_operand" "%0,r")
- (match_operand:SI 2 "nonmemory_operand" "r,K")))]
+ (match_operand:SI 2 "reg_or_uint16_operand" "r,K")))]
""
- "@
- and %0,%2
- and3 %0,%1,%#%2\\t; %X2"
+ "*
+{
+ /* If we are worried about space, see if we can break this up into two
+ short instructions, which might eliminate a NOP being inserted. */
+ if (optimize_size
+ && m32r_not_same_reg (operands[0], operands[1])
+ && GET_CODE (operands[2]) == CONST_INT
+ && INT8_P (INTVAL (operands[2])))
+ return \"#\";
+
+ else if (GET_CODE (operands[2]) == CONST_INT)
+ return \"and3 %0,%1,%#%X2\";
+
+ return \"and %0,%2\";
+}"
[(set_attr "type" "int2,int4")
(set_attr "length" "2,4")])
+(define_split
+ [(set (match_operand:SI 0 "register_operand" "")
+ (and:SI (match_operand:SI 1 "register_operand" "")
+ (match_operand:SI 2 "int8_operand" "")))]
+ "optimize_size && m32r_not_same_reg (operands[0], operands[1])"
+ [(set (match_dup 0) (match_dup 2))
+ (set (match_dup 0) (and:SI (match_dup 1) (match_dup 0)))]
+ "")
+
(define_insn "iorsi3"
[(set (match_operand:SI 0 "register_operand" "=r,r")
(ior:SI (match_operand:SI 1 "register_operand" "%0,r")
- (match_operand:SI 2 "nonmemory_operand" "r,K")))]
+ (match_operand:SI 2 "reg_or_uint16_operand" "r,K")))]
""
- "@
- or %0,%2
- or3 %0,%1,%#%2\\t; %X2"
+ "*
+{
+ /* If we are worried about space, see if we can break this up into two
+ short instructions, which might eliminate a NOP being inserted. */
+ if (optimize_size
+ && m32r_not_same_reg (operands[0], operands[1])
+ && GET_CODE (operands[2]) == CONST_INT
+ && INT8_P (INTVAL (operands[2])))
+ return \"#\";
+
+ else if (GET_CODE (operands[2]) == CONST_INT)
+ return \"or3 %0,%1,%#%X2\";
+
+ return \"or %0,%2\";
+}"
[(set_attr "type" "int2,int4")
(set_attr "length" "2,4")])
+(define_split
+ [(set (match_operand:SI 0 "register_operand" "")
+ (ior:SI (match_operand:SI 1 "register_operand" "")
+ (match_operand:SI 2 "int8_operand" "")))]
+ "optimize_size && m32r_not_same_reg (operands[0], operands[1])"
+ [(set (match_dup 0) (match_dup 2))
+ (set (match_dup 0) (ior:SI (match_dup 1) (match_dup 0)))]
+ "")
+
(define_insn "xorsi3"
[(set (match_operand:SI 0 "register_operand" "=r,r")
(xor:SI (match_operand:SI 1 "register_operand" "%0,r")
- (match_operand:SI 2 "nonmemory_operand" "r,K")))]
+ (match_operand:SI 2 "reg_or_uint16_operand" "r,K")))]
""
- "@
- xor %0,%2
- xor3 %0,%1,%#%2\\t; %X2"
+ "*
+{
+ /* If we are worried about space, see if we can break this up into two
+ short instructions, which might eliminate a NOP being inserted. */
+ if (optimize_size
+ && m32r_not_same_reg (operands[0], operands[1])
+ && GET_CODE (operands[2]) == CONST_INT
+ && INT8_P (INTVAL (operands[2])))
+ return \"#\";
+
+ else if (GET_CODE (operands[2]) == CONST_INT)
+ return \"xor3 %0,%1,%#%X2\";
+
+ return \"xor %0,%2\";
+}"
[(set_attr "type" "int2,int4")
(set_attr "length" "2,4")])
+(define_split
+ [(set (match_operand:SI 0 "register_operand" "")
+ (xor:SI (match_operand:SI 1 "register_operand" "")
+ (match_operand:SI 2 "int8_operand" "")))]
+ "optimize_size && m32r_not_same_reg (operands[0], operands[1])"
+ [(set (match_dup 0) (match_dup 2))
+ (set (match_dup 0) (xor:SI (match_dup 1) (match_dup 0)))]
+ "")
+
(define_insn "negsi2"
[(set (match_operand:SI 0 "register_operand" "=r")
(neg:SI (match_operand:SI 1 "register_operand" "r")))]
(define_expand "cmpsi"
[(set (reg:SI 17)
- (compare:SI (match_operand:SI 0 "register_operand" "")
- (match_operand:SI 1 "nonmemory_operand" "")))]
+ (compare:CC (match_operand:SI 0 "register_operand" "")
+ (match_operand:SI 1 "reg_or_cmp_int16_operand" "")))]
""
"
{
DONE;
}")
-
;; The cmp_xxx_insn patterns set the condition bit to the result of the
;; comparison. There isn't a "compare equal" instruction so cmp_eqsi_insn
;; is quite inefficient. However, it is rarely used.
(define_insn "cmp_ltusi_insn"
[(set (reg:SI 17)
(ltu:SI (match_operand:SI 0 "register_operand" "r,r")
- (match_operand:SI 1 "reg_or_uint16_operand" "r,K")))]
+ (match_operand:SI 1 "reg_or_int16_operand" "r,J")))]
""
"@
cmpu %0,%1
""
"
{
- operands[1] = gen_compare ((int)EQ, m32r_compare_op0, m32r_compare_op1, FALSE);
+ operands[1] = gen_compare (EQ, m32r_compare_op0, m32r_compare_op1, FALSE);
}")
(define_expand "bne"
""
"
{
- operands[1] = gen_compare ((int)NE, m32r_compare_op0, m32r_compare_op1, FALSE);
+ operands[1] = gen_compare (NE, m32r_compare_op0, m32r_compare_op1, FALSE);
}")
(define_expand "bgt"
""
"
{
- operands[1] = gen_compare ((int)GT, m32r_compare_op0, m32r_compare_op1, FALSE);
+ operands[1] = gen_compare (GT, m32r_compare_op0, m32r_compare_op1, FALSE);
}")
(define_expand "ble"
""
"
{
- operands[1] = gen_compare ((int)LE, m32r_compare_op0, m32r_compare_op1, FALSE);
+ operands[1] = gen_compare (LE, m32r_compare_op0, m32r_compare_op1, FALSE);
}")
(define_expand "bge"
""
"
{
- operands[1] = gen_compare ((int)GE, m32r_compare_op0, m32r_compare_op1, FALSE);
+ operands[1] = gen_compare (GE, m32r_compare_op0, m32r_compare_op1, FALSE);
}")
(define_expand "blt"
""
"
{
- operands[1] = gen_compare ((int)LT, m32r_compare_op0, m32r_compare_op1, FALSE);
+ operands[1] = gen_compare (LT, m32r_compare_op0, m32r_compare_op1, FALSE);
}")
(define_expand "bgtu"
""
"
{
- operands[1] = gen_compare ((int)GTU, m32r_compare_op0, m32r_compare_op1, FALSE);
+ operands[1] = gen_compare (GTU, m32r_compare_op0, m32r_compare_op1, FALSE);
}")
(define_expand "bleu"
""
"
{
- operands[1] = gen_compare ((int)LEU, m32r_compare_op0, m32r_compare_op1, FALSE);
+ operands[1] = gen_compare (LEU, m32r_compare_op0, m32r_compare_op1, FALSE);
}")
(define_expand "bgeu"
""
"
{
- operands[1] = gen_compare ((int)GEU, m32r_compare_op0, m32r_compare_op1, FALSE);
+ operands[1] = gen_compare (GEU, m32r_compare_op0, m32r_compare_op1, FALSE);
}")
(define_expand "bltu"
""
"
{
- operands[1] = gen_compare ((int)LTU, m32r_compare_op0, m32r_compare_op1, FALSE);
+ operands[1] = gen_compare (LTU, m32r_compare_op0, m32r_compare_op1, FALSE);
}")
;; Now match both normal and inverted jump.
""
"*
{
- char *br,*invbr;
+ const char *br,*invbr;
char asmtext[40];
switch (GET_CODE (operands[1]))
""
"*
{
- char *br,*invbr;
+ const char *br,*invbr;
char asmtext[40];
switch (GET_CODE (operands[1]))
(const_int 4)
(const_int 8)))])
\f
+;; S<cc> operations to set a register to 1/0 based on a comparison
+
+(define_expand "seq"
+ [(match_operand:SI 0 "register_operand" "")]
+ ""
+ "
+{
+ rtx op0 = operands[0];
+ rtx op1 = m32r_compare_op0;
+ rtx op2 = m32r_compare_op1;
+ enum machine_mode mode = GET_MODE (op0);
+
+ if (mode != SImode)
+ FAIL;
+
+ if (! register_operand (op1, mode))
+ op1 = force_reg (mode, op1);
+
+ if (GET_CODE (op2) == CONST_INT && INTVAL (op2) == 0)
+ {
+ emit_insn (gen_seq_zero_insn (op0, op1));
+ DONE;
+ }
+
+ if (! reg_or_eq_int16_operand (op2, mode))
+ op2 = force_reg (mode, op2);
+
+ emit_insn (gen_seq_insn (op0, op1, op2));
+ DONE;
+}")
+
+(define_insn "seq_zero_insn"
+ [(set (match_operand:SI 0 "register_operand" "=r")
+ (eq:SI (match_operand:SI 1 "register_operand" "r")
+ (const_int 0)))
+ (clobber (reg:SI 17))]
+ "TARGET_M32R"
+ "#"
+ [(set_attr "type" "multi")
+ (set_attr "length" "6")])
+
+(define_split
+ [(set (match_operand:SI 0 "register_operand" "")
+ (eq:SI (match_operand:SI 1 "register_operand" "")
+ (const_int 0)))
+ (clobber (reg:SI 17))]
+ "TARGET_M32R"
+ [(match_dup 3)]
+ "
+{
+ rtx op0 = operands[0];
+ rtx op1 = operands[1];
+
+ start_sequence ();
+ emit_insn (gen_cmp_ltusi_insn (op1, GEN_INT (1)));
+ emit_insn (gen_movcc_insn (op0));
+ operands[3] = gen_sequence ();
+ end_sequence ();
+}")
+
+(define_insn "seq_insn"
+ [(set (match_operand:SI 0 "register_operand" "=r,r,??r,r")
+ (eq:SI (match_operand:SI 1 "register_operand" "r,r,r,r")
+ (match_operand:SI 2 "reg_or_eq_int16_operand" "r,r,r,PK")))
+ (clobber (reg:SI 17))
+ (clobber (match_scratch:SI 3 "=1,2,&r,r"))]
+ "TARGET_M32R"
+ "#"
+ [(set_attr "type" "multi")
+ (set_attr "length" "8,8,10,10")])
+
+(define_split
+ [(set (match_operand:SI 0 "register_operand" "")
+ (eq:SI (match_operand:SI 1 "register_operand" "")
+ (match_operand:SI 2 "reg_or_eq_int16_operand" "")))
+ (clobber (reg:SI 17))
+ (clobber (match_scratch:SI 3 ""))]
+ "TARGET_M32R && reload_completed"
+ [(match_dup 4)]
+ "
+{
+ rtx op0 = operands[0];
+ rtx op1 = operands[1];
+ rtx op2 = operands[2];
+ rtx op3 = operands[3];
+ HOST_WIDE_INT value;
+
+ if (GET_CODE (op2) == REG && GET_CODE (op3) == REG
+ && REGNO (op2) == REGNO (op3))
+ {
+ op1 = operands[2];
+ op2 = operands[1];
+ }
+
+ start_sequence ();
+ if (GET_CODE (op1) == REG && GET_CODE (op3) == REG
+ && REGNO (op1) != REGNO (op3))
+ {
+ emit_move_insn (op3, op1);
+ op1 = op3;
+ }
+
+ if (GET_CODE (op2) == CONST_INT && (value = INTVAL (op2)) != 0
+ && CMP_INT16_P (value))
+ emit_insn (gen_addsi3 (op3, op1, GEN_INT (-value)));
+ else
+ emit_insn (gen_xorsi3 (op3, op1, op2));
+
+ emit_insn (gen_cmp_ltusi_insn (op3, GEN_INT (1)));
+ emit_insn (gen_movcc_insn (op0));
+ operands[4] = gen_sequence ();
+ end_sequence ();
+}")
+
+(define_expand "sne"
+ [(match_operand:SI 0 "register_operand" "")]
+ ""
+ "
+{
+ rtx op0 = operands[0];
+ rtx op1 = m32r_compare_op0;
+ rtx op2 = m32r_compare_op1;
+ enum machine_mode mode = GET_MODE (op0);
+
+ if (mode != SImode)
+ FAIL;
+
+ if (GET_CODE (op2) != CONST_INT
+ || (INTVAL (op2) != 0 && UINT16_P (INTVAL (op2))))
+ {
+ rtx reg;
+
+ if (reload_completed || reload_in_progress)
+ FAIL;
+
+ reg = gen_reg_rtx (SImode);
+ emit_insn (gen_xorsi3 (reg, op1, op2));
+ op1 = reg;
+
+ if (! register_operand (op1, mode))
+ op1 = force_reg (mode, op1);
+
+ emit_insn (gen_sne_zero_insn (op0, op1));
+ DONE;
+ }
+ else
+ FAIL;
+}")
+
+(define_insn "sne_zero_insn"
+ [(set (match_operand:SI 0 "register_operand" "=r")
+ (ne:SI (match_operand:SI 1 "register_operand" "r")
+ (const_int 0)))
+ (clobber (reg:SI 17))
+ (clobber (match_scratch:SI 2 "=&r"))]
+ ""
+ "#"
+ [(set_attr "type" "multi")
+ (set_attr "length" "6")])
+
+(define_split
+ [(set (match_operand:SI 0 "register_operand" "")
+ (ne:SI (match_operand:SI 1 "register_operand" "")
+ (const_int 0)))
+ (clobber (reg:SI 17))
+ (clobber (match_scratch:SI 2 ""))]
+ "reload_completed"
+ [(set (match_dup 2)
+ (const_int 0))
+ (set (reg:SI 17)
+ (ltu:SI (match_dup 2)
+ (match_dup 1)))
+ (set (match_dup 0)
+ (reg:SI 17))]
+ "")
+
+(define_expand "slt"
+ [(match_operand:SI 0 "register_operand" "")]
+ ""
+ "
+{
+ rtx op0 = operands[0];
+ rtx op1 = m32r_compare_op0;
+ rtx op2 = m32r_compare_op1;
+ enum machine_mode mode = GET_MODE (op0);
+
+ if (mode != SImode)
+ FAIL;
+
+ if (! register_operand (op1, mode))
+ op1 = force_reg (mode, op1);
+
+ if (! reg_or_int16_operand (op2, mode))
+ op2 = force_reg (mode, op2);
+
+ emit_insn (gen_slt_insn (op0, op1, op2));
+ DONE;
+}")
+
+(define_insn "slt_insn"
+ [(set (match_operand:SI 0 "register_operand" "=r,r")
+ (lt:SI (match_operand:SI 1 "register_operand" "r,r")
+ (match_operand:SI 2 "reg_or_int16_operand" "r,J")))
+ (clobber (reg:SI 17))]
+ ""
+ "#"
+ [(set_attr "type" "multi")
+ (set_attr "length" "4,6")])
+
+(define_split
+ [(set (match_operand:SI 0 "register_operand" "")
+ (lt:SI (match_operand:SI 1 "register_operand" "")
+ (match_operand:SI 2 "reg_or_int16_operand" "")))
+ (clobber (reg:SI 17))]
+ ""
+ [(set (reg:SI 17)
+ (lt:SI (match_dup 1)
+ (match_dup 2)))
+ (set (match_dup 0)
+ (reg:SI 17))]
+ "")
+
+(define_expand "sle"
+ [(match_operand:SI 0 "register_operand" "")]
+ ""
+ "
+{
+ rtx op0 = operands[0];
+ rtx op1 = m32r_compare_op0;
+ rtx op2 = m32r_compare_op1;
+ enum machine_mode mode = GET_MODE (op0);
+
+ if (mode != SImode)
+ FAIL;
+
+ if (! register_operand (op1, mode))
+ op1 = force_reg (mode, op1);
+
+ if (GET_CODE (op2) == CONST_INT)
+ {
+ HOST_WIDE_INT value = INTVAL (op2);
+ if (value >= 2147483647)
+ {
+ emit_move_insn (op0, GEN_INT (1));
+ DONE;
+ }
+
+ op2 = GEN_INT (value+1);
+ if (value < -32768 || value >= 32767)
+ op2 = force_reg (mode, op2);
+
+ emit_insn (gen_slt_insn (op0, op1, op2));
+ DONE;
+ }
+
+ if (! register_operand (op2, mode))
+ op2 = force_reg (mode, op2);
+
+ emit_insn (gen_sle_insn (op0, op1, op2));
+ DONE;
+}")
+
+(define_insn "sle_insn"
+ [(set (match_operand:SI 0 "register_operand" "=r")
+ (le:SI (match_operand:SI 1 "register_operand" "r")
+ (match_operand:SI 2 "register_operand" "r")))
+ (clobber (reg:SI 17))]
+ ""
+ "#"
+ [(set_attr "type" "multi")
+ (set_attr "length" "8")])
+
+(define_split
+ [(set (match_operand:SI 0 "register_operand" "")
+ (le:SI (match_operand:SI 1 "register_operand" "")
+ (match_operand:SI 2 "register_operand" "")))
+ (clobber (reg:SI 17))]
+ "!optimize_size"
+ [(set (reg:SI 17)
+ (lt:SI (match_dup 2)
+ (match_dup 1)))
+ (set (match_dup 0)
+ (reg:SI 17))
+ (set (match_dup 0)
+ (xor:SI (match_dup 0)
+ (const_int 1)))]
+ "")
+
+;; If optimizing for space, use -(reg - 1) to invert the comparison rather than
+;; xor reg,reg,1 which might eliminate a NOP being inserted.
+(define_split
+ [(set (match_operand:SI 0 "register_operand" "")
+ (le:SI (match_operand:SI 1 "register_operand" "")
+ (match_operand:SI 2 "register_operand" "")))
+ (clobber (reg:SI 17))]
+ "optimize_size"
+ [(set (reg:SI 17)
+ (lt:SI (match_dup 2)
+ (match_dup 1)))
+ (set (match_dup 0)
+ (reg:SI 17))
+ (set (match_dup 0)
+ (plus:SI (match_dup 0)
+ (const_int -1)))
+ (set (match_dup 0)
+ (neg:SI (match_dup 0)))]
+ "")
+
+(define_expand "sgt"
+ [(match_operand:SI 0 "register_operand" "")]
+ ""
+ "
+{
+ rtx op0 = operands[0];
+ rtx op1 = m32r_compare_op0;
+ rtx op2 = m32r_compare_op1;
+ enum machine_mode mode = GET_MODE (op0);
+
+ if (mode != SImode)
+ FAIL;
+
+ if (! register_operand (op1, mode))
+ op1 = force_reg (mode, op1);
+
+ if (! register_operand (op2, mode))
+ op2 = force_reg (mode, op2);
+
+ emit_insn (gen_slt_insn (op0, op2, op1));
+ DONE;
+}")
+
+(define_expand "sge"
+ [(match_operand:SI 0 "register_operand" "")]
+ ""
+ "
+{
+ rtx op0 = operands[0];
+ rtx op1 = m32r_compare_op0;
+ rtx op2 = m32r_compare_op1;
+ enum machine_mode mode = GET_MODE (op0);
+
+ if (mode != SImode)
+ FAIL;
+
+ if (! register_operand (op1, mode))
+ op1 = force_reg (mode, op1);
+
+ if (! reg_or_int16_operand (op2, mode))
+ op2 = force_reg (mode, op2);
+
+ emit_insn (gen_sge_insn (op0, op1, op2));
+ DONE;
+}")
+
+(define_insn "sge_insn"
+ [(set (match_operand:SI 0 "register_operand" "=r,r")
+ (ge:SI (match_operand:SI 1 "register_operand" "r,r")
+ (match_operand:SI 2 "reg_or_int16_operand" "r,J")))
+ (clobber (reg:SI 17))]
+ ""
+ "#"
+ [(set_attr "type" "multi")
+ (set_attr "length" "8,10")])
+
+(define_split
+ [(set (match_operand:SI 0 "register_operand" "")
+ (ge:SI (match_operand:SI 1 "register_operand" "")
+ (match_operand:SI 2 "reg_or_int16_operand" "")))
+ (clobber (reg:SI 17))]
+ "!optimize_size"
+ [(set (reg:SI 17)
+ (lt:SI (match_dup 1)
+ (match_dup 2)))
+ (set (match_dup 0)
+ (reg:SI 17))
+ (set (match_dup 0)
+ (xor:SI (match_dup 0)
+ (const_int 1)))]
+ "")
+
+;; If optimizing for space, use -(reg - 1) to invert the comparison rather than
+;; xor reg,reg,1 which might eliminate a NOP being inserted.
+(define_split
+ [(set (match_operand:SI 0 "register_operand" "")
+ (ge:SI (match_operand:SI 1 "register_operand" "")
+ (match_operand:SI 2 "reg_or_int16_operand" "")))
+ (clobber (reg:SI 17))]
+ "optimize_size"
+ [(set (reg:SI 17)
+ (lt:SI (match_dup 1)
+ (match_dup 2)))
+ (set (match_dup 0)
+ (reg:SI 17))
+ (set (match_dup 0)
+ (plus:SI (match_dup 0)
+ (const_int -1)))
+ (set (match_dup 0)
+ (neg:SI (match_dup 0)))]
+ "")
+
+(define_expand "sltu"
+ [(match_operand:SI 0 "register_operand" "")]
+ ""
+ "
+{
+ rtx op0 = operands[0];
+ rtx op1 = m32r_compare_op0;
+ rtx op2 = m32r_compare_op1;
+ enum machine_mode mode = GET_MODE (op0);
+
+ if (mode != SImode)
+ FAIL;
+
+ if (! register_operand (op1, mode))
+ op1 = force_reg (mode, op1);
+
+ if (! reg_or_int16_operand (op2, mode))
+ op2 = force_reg (mode, op2);
+
+ emit_insn (gen_sltu_insn (op0, op1, op2));
+ DONE;
+}")
+
+(define_insn "sltu_insn"
+ [(set (match_operand:SI 0 "register_operand" "=r,r")
+ (ltu:SI (match_operand:SI 1 "register_operand" "r,r")
+ (match_operand:SI 2 "reg_or_int16_operand" "r,J")))
+ (clobber (reg:SI 17))]
+ ""
+ "#"
+ [(set_attr "type" "multi")
+ (set_attr "length" "6,8")])
+
+(define_split
+ [(set (match_operand:SI 0 "register_operand" "")
+ (ltu:SI (match_operand:SI 1 "register_operand" "")
+ (match_operand:SI 2 "reg_or_int16_operand" "")))
+ (clobber (reg:SI 17))]
+ ""
+ [(set (reg:SI 17)
+ (ltu:SI (match_dup 1)
+ (match_dup 2)))
+ (set (match_dup 0)
+ (reg:SI 17))]
+ "")
+
+(define_expand "sleu"
+ [(match_operand:SI 0 "register_operand" "")]
+ ""
+ "
+{
+ rtx op0 = operands[0];
+ rtx op1 = m32r_compare_op0;
+ rtx op2 = m32r_compare_op1;
+ enum machine_mode mode = GET_MODE (op0);
+
+ if (mode != SImode)
+ FAIL;
+
+ if (GET_CODE (op2) == CONST_INT)
+ {
+ HOST_WIDE_INT value = INTVAL (op2);
+ if (value >= 2147483647)
+ {
+ emit_move_insn (op0, GEN_INT (1));
+ DONE;
+ }
+
+ op2 = GEN_INT (value+1);
+ if (value < 0 || value >= 32767)
+ op2 = force_reg (mode, op2);
+
+ emit_insn (gen_sltu_insn (op0, op1, op2));
+ DONE;
+ }
+
+ if (! register_operand (op2, mode))
+ op2 = force_reg (mode, op2);
+
+ emit_insn (gen_sleu_insn (op0, op1, op2));
+ DONE;
+}")
+
+(define_insn "sleu_insn"
+ [(set (match_operand:SI 0 "register_operand" "=r")
+ (leu:SI (match_operand:SI 1 "register_operand" "r")
+ (match_operand:SI 2 "register_operand" "r")))
+ (clobber (reg:SI 17))]
+ ""
+ "#"
+ [(set_attr "type" "multi")
+ (set_attr "length" "8")])
+
+(define_split
+ [(set (match_operand:SI 0 "register_operand" "")
+ (leu:SI (match_operand:SI 1 "register_operand" "")
+ (match_operand:SI 2 "register_operand" "")))
+ (clobber (reg:SI 17))]
+ "!optimize_size"
+ [(set (reg:SI 17)
+ (ltu:SI (match_dup 2)
+ (match_dup 1)))
+ (set (match_dup 0)
+ (reg:SI 17))
+ (set (match_dup 0)
+ (xor:SI (match_dup 0)
+ (const_int 1)))]
+ "")
+
+;; If optimizing for space, use -(reg - 1) to invert the comparison rather than
+;; xor reg,reg,1 which might eliminate a NOP being inserted.
+(define_split
+ [(set (match_operand:SI 0 "register_operand" "")
+ (leu:SI (match_operand:SI 1 "register_operand" "")
+ (match_operand:SI 2 "register_operand" "")))
+ (clobber (reg:SI 17))]
+ "optimize_size"
+ [(set (reg:SI 17)
+ (ltu:SI (match_dup 2)
+ (match_dup 1)))
+ (set (match_dup 0)
+ (reg:SI 17))
+ (set (match_dup 0)
+ (plus:SI (match_dup 0)
+ (const_int -1)))
+ (set (match_dup 0)
+ (neg:SI (match_dup 0)))]
+ "")
+
+(define_expand "sgtu"
+ [(match_operand:SI 0 "register_operand" "")]
+ ""
+ "
+{
+ rtx op0 = operands[0];
+ rtx op1 = m32r_compare_op0;
+ rtx op2 = m32r_compare_op1;
+ enum machine_mode mode = GET_MODE (op0);
+
+ if (mode != SImode)
+ FAIL;
+
+ if (! register_operand (op1, mode))
+ op1 = force_reg (mode, op1);
+
+ if (! register_operand (op2, mode))
+ op2 = force_reg (mode, op2);
+
+ emit_insn (gen_sltu_insn (op0, op2, op1));
+ DONE;
+}")
+
+(define_expand "sgeu"
+ [(match_operand:SI 0 "register_operand" "")]
+ ""
+ "
+{
+ rtx op0 = operands[0];
+ rtx op1 = m32r_compare_op0;
+ rtx op2 = m32r_compare_op1;
+ enum machine_mode mode = GET_MODE (op0);
+
+ if (mode != SImode)
+ FAIL;
+
+ if (! register_operand (op1, mode))
+ op1 = force_reg (mode, op1);
+
+ if (! reg_or_int16_operand (op2, mode))
+ op2 = force_reg (mode, op2);
+
+ emit_insn (gen_sgeu_insn (op0, op1, op2));
+ DONE;
+}")
+
+(define_insn "sgeu_insn"
+ [(set (match_operand:SI 0 "register_operand" "=r,r")
+ (geu:SI (match_operand:SI 1 "register_operand" "r,r")
+ (match_operand:SI 2 "reg_or_int16_operand" "r,J")))
+ (clobber (reg:SI 17))]
+ ""
+ "#"
+ [(set_attr "type" "multi")
+ (set_attr "length" "8,10")])
+
+(define_split
+ [(set (match_operand:SI 0 "register_operand" "")
+ (geu:SI (match_operand:SI 1 "register_operand" "")
+ (match_operand:SI 2 "reg_or_int16_operand" "")))
+ (clobber (reg:SI 17))]
+ "!optimize_size"
+ [(set (reg:SI 17)
+ (ltu:SI (match_dup 1)
+ (match_dup 2)))
+ (set (match_dup 0)
+ (reg:SI 17))
+ (set (match_dup 0)
+ (xor:SI (match_dup 0)
+ (const_int 1)))]
+ "")
+
+;; If optimizing for space, use -(reg - 1) to invert the comparison rather than
+;; xor reg,reg,1 which might eliminate a NOP being inserted.
+(define_split
+ [(set (match_operand:SI 0 "register_operand" "")
+ (geu:SI (match_operand:SI 1 "register_operand" "")
+ (match_operand:SI 2 "reg_or_int16_operand" "")))
+ (clobber (reg:SI 17))]
+ "optimize_size"
+ [(set (reg:SI 17)
+ (ltu:SI (match_dup 1)
+ (match_dup 2)))
+ (set (match_dup 0)
+ (reg:SI 17))
+ (set (match_dup 0)
+ (plus:SI (match_dup 0)
+ (const_int -1)))
+ (set (match_dup 0)
+ (neg:SI (match_dup 0)))]
+ "")
+
+(define_insn "movcc_insn"
+ [(set (match_operand:SI 0 "register_operand" "=r")
+ (reg:SI 17))]
+ ""
+ "mvfc %0, cbr"
+ [(set_attr "type" "misc")
+ (set_attr "length" "2")])
+
+\f
;; Unconditional and other jump instructions.
(define_insn "jump"
"jmp %a0"
[(set_attr "type" "uncond_branch")
(set_attr "length" "2")])
+
+(define_insn "return"
+ [(return)]
+ "direct_return ()"
+ "jmp lr"
+ [(set_attr "type" "uncond_branch")
+ (set_attr "length" "2")])
(define_insn "tablejump"
[(set (pc) (match_operand:SI 0 "address_operand" "p"))
[(set_attr "type" "int2")
(set_attr "length" "2")])
\f
+;; Speed up fabs and provide correct sign handling for -0
+
+(define_insn "absdf2"
+ [(set (match_operand:DF 0 "register_operand" "=r")
+ (abs:DF (match_operand:DF 1 "register_operand" "0")))]
+ ""
+ "#"
+ [(set_attr "type" "multi")
+ (set_attr "length" "4")])
+
+(define_split
+ [(set (match_operand:DF 0 "register_operand" "")
+ (abs:DF (match_operand:DF 1 "register_operand" "")))]
+ "reload_completed"
+ [(set (match_dup 2)
+ (ashift:SI (match_dup 2)
+ (const_int 1)))
+ (set (match_dup 2)
+ (lshiftrt:SI (match_dup 2)
+ (const_int 1)))]
+ "operands[2] = gen_highpart (SImode, operands[0]);")
+
+(define_insn "abssf2"
+ [(set (match_operand:SF 0 "register_operand" "=r")
+ (abs:SF (match_operand:SF 1 "register_operand" "0")))]
+ ""
+ "#"
+ [(set_attr "type" "multi")
+ (set_attr "length" "4")])
+
+(define_split
+ [(set (match_operand:SF 0 "register_operand" "")
+ (abs:SF (match_operand:SF 1 "register_operand" "")))]
+ "reload_completed"
+ [(set (match_dup 2)
+ (ashift:SI (match_dup 2)
+ (const_int 1)))
+ (set (match_dup 2)
+ (lshiftrt:SI (match_dup 2)
+ (const_int 1)))]
+ "operands[2] = gen_highpart (SImode, operands[0]);")
+\f
;; Conditional move instructions
;; Based on those done for the d10v
-
(define_expand "movsicc"
[
(set (match_operand:SI 0 "register_operand" "r")
FAIL;
/* Generate the comparision that will set the carry flag. */
- operands[1] = gen_compare ((int)GET_CODE (operands[1]), m32r_compare_op0,
+ operands[1] = gen_compare (GET_CODE (operands[1]), m32r_compare_op0,
m32r_compare_op1, TRUE);
/* See other movsicc pattern below for reason why. */
- emit_insn (gen_blockage());
+ emit_insn (gen_blockage ());
}")
;; Generate the conditional instructions based on how the carry flag is examined.
(define_insn "*movsicc_internal"
- [(set (match_operand:SI 0 "register_operand" "r")
+ [(set (match_operand:SI 0 "register_operand" "=r")
(if_then_else:SI (match_operand 1 "carry_compare_operand" "")
(match_operand:SI 2 "conditional_move_operand" "O")
(match_operand:SI 3 "conditional_move_operand" "O")
]
)
-(define_insn "movcc_insn"
- [(set (match_operand:SI 0 "register_operand" "=r")
- (reg:SI 17))]
- ""
- "mvfc %0, cbr"
- [(set_attr "type" "misc")
- (set_attr "length" "2")]
-)
-
\f
;; Split up troublesome insns for better scheduling.
-\f
-;; Peepholes go at the end.
+;; FIXME: Peepholes go at the end.
;; ??? Setting the type attribute may not be useful, but for completeness
;; we do it.
;; Insn generated by block moves
(define_insn "movstrsi_internal"
- [(set (mem:BLK (match_operand:SI 0 "register_operand" "r")) ;; destination
- (mem:BLK (match_operand:SI 1 "register_operand" "r"))) ;; source
+ [(set (mem:BLK (match_operand:SI 0 "register_operand" "+r")) ;; destination
+ (mem:BLK (match_operand:SI 1 "register_operand" "+r"))) ;; source
(use (match_operand:SI 2 "m32r_block_immediate_operand" "J"));; # bytes to move
(set (match_dup 0) (plus:SI (match_dup 0) (minus:SI (match_dup 2) (const_int 4))))
(set (match_dup 1) (plus:SI (match_dup 1) (match_dup 2)))
(clobber (match_scratch:SI 3 "=&r")) ;; temp 1
(clobber (match_scratch:SI 4 "=&r"))] ;; temp 2
""
- "* return m32r_output_block_move (insn, operands);"
+ "* m32r_output_block_move (insn, operands); return \"\"; "
[(set_attr "type" "store8")
(set_attr "length" "72")]) ;; Maximum