1 ;; Machine description of the Renesas M32R cpu for GNU C compiler
2 ;; Copyright (C) 1996, 1997, 1998, 1999, 2001, 2003, 2004, 2005
3 ; Free Software Foundation, Inc.
5 ;; This file is part of GCC.
7 ;; GCC is free software; you can redistribute it and/or modify it
8 ;; under the terms of the GNU General Public License as published
9 ;; by the Free Software Foundation; either version 2, or (at your
10 ;; option) any later version.
12 ;; GCC is distributed in the hope that it will be useful, but WITHOUT
13 ;; ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
14 ;; or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public
15 ;; License for more details.
17 ;; You should have received a copy of the GNU General Public License
18 ;; along with GCC; see the file COPYING. If not, write to
19 ;; the Free Software Foundation, 51 Franklin Street, Fifth Floor,
20 ;; Boston, MA 02110-1301, USA.
22 ;; See file "rtl.def" for documentation on define_insn, match_*, et. al.
24 ;; UNSPEC_VOLATILE usage
27 (UNSPECV_FLUSH_ICACHE 1)])
31 [(UNSPEC_LOAD_SDA_BASE 2)
33 (UNSPEC_PIC_LOAD_ADDR 4)
38 ;; Insn type. Used to default other attribute values.
40 "int2,int4,load2,load4,load8,store2,store4,store8,shift2,shift4,mul2,div4,uncond_branch,branch,call,multi,misc"
41 (const_string "misc"))
44 (define_attr "length" ""
45 (cond [(eq_attr "type" "int2,load2,store2,shift2,mul2")
48 (eq_attr "type" "int4,load4,store4,shift4,div4")
51 (eq_attr "type" "multi")
54 (eq_attr "type" "uncond_branch,branch,call")
59 ;; The length here is the length of a single asm. Unfortunately it might be
60 ;; 2 or 4 so we must allow for 4. That's ok though.
61 (define_asm_attributes
62 [(set_attr "length" "4")
63 (set_attr "type" "multi")])
65 ;; Whether an instruction is short (16-bit) or long (32-bit).
66 (define_attr "insn_size" "short,long"
67 (if_then_else (eq_attr "type" "int2,load2,store2,shift2,mul2")
68 (const_string "short")
69 (const_string "long")))
71 ;; The target CPU we're compiling for.
72 (define_attr "cpu" "m32r,m32r2,m32rx"
73 (cond [(ne (symbol_ref "TARGET_M32RX") (const_int 0))
74 (const_string "m32rx")
75 (ne (symbol_ref "TARGET_M32R2") (const_int 0))
76 (const_string "m32r2")]
77 (const_string "m32r")))
79 ;; Defines the pipeline where an instruction can be executed on.
80 ;; For the M32R, a short instruction can execute one of the two pipes.
81 ;; For the M32Rx, the restrictions are modelled in the second
82 ;; condition of this attribute definition.
83 (define_attr "m32r_pipeline" "either,s,o,long"
84 (cond [(and (eq_attr "cpu" "m32r")
85 (eq_attr "insn_size" "short"))
86 (const_string "either")
87 (eq_attr "insn_size" "!short")
88 (const_string "long")]
89 (cond [(eq_attr "type" "int2")
90 (const_string "either")
91 (eq_attr "type" "load2,store2,shift2,uncond_branch,branch,call")
93 (eq_attr "type" "mul2")
95 (const_string "long"))))
97 ;; ::::::::::::::::::::
99 ;; :: Pipeline description
101 ;; ::::::::::::::::::::
103 ;; This model is based on Chapter 2, Appendix 3 and Appendix 4 of the
104 ;; "M32R-FPU Software Manual", Revision 1.01, plus additional information
105 ;; obtained by our best friend and mine, Google.
107 ;; The pipeline is modelled as a fetch unit, and a core with a memory unit,
108 ;; two execution units, where "fetch" models IF and D, "memory" for MEM1
109 ;; and MEM2, and "EXEC" for E, E1, E2, EM, and EA. Writeback and
110 ;; bypasses are not modelled.
111 (define_automaton "m32r")
113 ;; We pretend there are two short (16 bits) instruction fetchers. The
114 ;; "s" short fetcher cannot be reserved until the "o" short fetcher is
115 ;; reserved. Some instructions reserve both the left and right fetchers.
116 ;; These fetch units are a hack to get GCC to better pack the instructions
117 ;; for the M32Rx processor, which has two execution pipes.
119 ;; In reality there is only one decoder, which can decode either two 16 bits
120 ;; instructions, or a single 32 bits instruction.
122 ;; Note, "fetch" models both the IF and the D pipeline stages.
124 ;; The m32rx core has two execution pipes. We name them o_E and s_E.
125 ;; In addition, there's a memory unit.
127 (define_cpu_unit "o_IF,s_IF,o_E,s_E,memory" "m32r")
129 ;; Prevent the s pipe from being reserved before the o pipe.
130 (absence_set "s_IF" "o_IF")
131 (absence_set "s_E" "o_E")
133 ;; On the M32Rx, long instructions execute on both pipes, so reserve
134 ;; both fetch slots and both pipes.
135 (define_reservation "long_IF" "o_IF+s_IF")
136 (define_reservation "long_E" "o_E+s_E")
138 ;; ::::::::::::::::::::
140 ;; Simple instructions do 4 stages: IF D E WB. WB is not modelled.
141 ;; Hence, ready latency is 1.
142 (define_insn_reservation "short_left" 1
143 (and (eq_attr "m32r_pipeline" "o")
144 (and (eq_attr "insn_size" "short")
145 (eq_attr "type" "!load2")))
148 (define_insn_reservation "short_right" 1
149 (and (eq_attr "m32r_pipeline" "s")
150 (and (eq_attr "insn_size" "short")
151 (eq_attr "type" "!load2")))
154 (define_insn_reservation "short_either" 1
155 (and (eq_attr "m32r_pipeline" "either")
156 (and (eq_attr "insn_size" "short")
157 (eq_attr "type" "!load2")))
160 (define_insn_reservation "long_m32r" 1
161 (and (eq_attr "cpu" "m32r")
162 (and (eq_attr "insn_size" "long")
163 (eq_attr "type" "!load4,load8")))
166 (define_insn_reservation "long_m32rx" 2
167 (and (eq_attr "m32r_pipeline" "long")
168 (and (eq_attr "insn_size" "long")
169 (eq_attr "type" "!load4,load8")))
172 ;; Load/store instructions do 6 stages: IF D E MEM1 MEM2 WB.
173 ;; MEM1 may require more than one cycle depending on locality. We
174 ;; optimistically assume all memory is nearby, i.e. MEM1 takes only
175 ;; one cycle. Hence, ready latency is 3.
177 ;; The M32Rx can do short load/store only on the left pipe.
178 (define_insn_reservation "short_load_left" 3
179 (and (eq_attr "m32r_pipeline" "o")
180 (and (eq_attr "insn_size" "short")
181 (eq_attr "type" "load2")))
184 (define_insn_reservation "short_load" 3
185 (and (eq_attr "m32r_pipeline" "either")
186 (and (eq_attr "insn_size" "short")
187 (eq_attr "type" "load2")))
188 "s_IF|o_IF,s_E|o_E,memory*2")
190 (define_insn_reservation "long_load" 3
191 (and (eq_attr "cpu" "m32r")
192 (and (eq_attr "insn_size" "long")
193 (eq_attr "type" "load4,load8")))
194 "long_IF,long_E,memory*2")
196 (define_insn_reservation "long_load_m32rx" 3
197 (and (eq_attr "m32r_pipeline" "long")
198 (eq_attr "type" "load4,load8"))
199 "long_IF,long_E,memory*2")
202 (include "predicates.md")
204 ;; Expand prologue as RTL
205 (define_expand "prologue"
210 m32r_expand_prologue ();
215 ;; Move instructions.
217 ;; For QI and HI moves, the register must contain the full properly
218 ;; sign-extended value. nonzero_bits assumes this [otherwise
219 ;; SHORT_IMMEDIATES_SIGN_EXTEND must be used, but the comment for it
220 ;; says it's a kludge and the .md files should be fixed instead].
222 (define_expand "movqi"
223 [(set (match_operand:QI 0 "general_operand" "")
224 (match_operand:QI 1 "general_operand" ""))]
228 /* Fixup PIC cases. */
231 if (symbolic_operand (operands[1], QImode))
233 if (reload_in_progress || reload_completed)
234 operands[1] = m32r_legitimize_pic_address (operands[1], operands[0]);
236 operands[1] = m32r_legitimize_pic_address (operands[1], NULL_RTX);
240 /* Everything except mem = const or mem = mem can be done easily.
241 Objects in the small data area are handled too. */
243 if (GET_CODE (operands[0]) == MEM)
244 operands[1] = force_reg (QImode, operands[1]);
247 (define_insn "*movqi_insn"
248 [(set (match_operand:QI 0 "move_dest_operand" "=r,r,r,r,r,T,m")
249 (match_operand:QI 1 "move_src_operand" "r,I,JQR,T,m,r,r"))]
250 "register_operand (operands[0], QImode) || register_operand (operands[1], QImode)"
259 [(set_attr "type" "int2,int2,int4,load2,load4,store2,store4")
260 (set_attr "length" "2,2,4,2,4,2,4")])
262 (define_expand "movhi"
263 [(set (match_operand:HI 0 "general_operand" "")
264 (match_operand:HI 1 "general_operand" ""))]
268 /* Fixup PIC cases. */
271 if (symbolic_operand (operands[1], HImode))
273 if (reload_in_progress || reload_completed)
274 operands[1] = m32r_legitimize_pic_address (operands[1], operands[0]);
276 operands[1] = m32r_legitimize_pic_address (operands[1], NULL_RTX);
280 /* Everything except mem = const or mem = mem can be done easily. */
282 if (GET_CODE (operands[0]) == MEM)
283 operands[1] = force_reg (HImode, operands[1]);
286 (define_insn "*movhi_insn"
287 [(set (match_operand:HI 0 "move_dest_operand" "=r,r,r,r,r,r,T,m")
288 (match_operand:HI 1 "move_src_operand" "r,I,JQR,K,T,m,r,r"))]
289 "register_operand (operands[0], HImode) || register_operand (operands[1], HImode)"
299 [(set_attr "type" "int2,int2,int4,int4,load2,load4,store2,store4")
300 (set_attr "length" "2,2,4,4,2,4,2,4")])
302 (define_expand "movsi_push"
303 [(set (mem:SI (pre_dec:SI (match_operand:SI 0 "register_operand" "")))
304 (match_operand:SI 1 "register_operand" ""))]
308 (define_expand "movsi_pop"
309 [(set (match_operand:SI 0 "register_operand" "")
310 (mem:SI (post_inc:SI (match_operand:SI 1 "register_operand" ""))))]
314 (define_expand "movsi"
315 [(set (match_operand:SI 0 "general_operand" "")
316 (match_operand:SI 1 "general_operand" ""))]
320 /* Fixup PIC cases. */
323 if (symbolic_operand (operands[1], SImode))
325 if (reload_in_progress || reload_completed)
326 operands[1] = m32r_legitimize_pic_address (operands[1], operands[0]);
328 operands[1] = m32r_legitimize_pic_address (operands[1], NULL_RTX);
332 /* Everything except mem = const or mem = mem can be done easily. */
334 if (GET_CODE (operands[0]) == MEM)
335 operands[1] = force_reg (SImode, operands[1]);
337 /* Small Data Area reference? */
338 if (small_data_operand (operands[1], SImode))
340 emit_insn (gen_movsi_sda (operands[0], operands[1]));
344 /* If medium or large code model, symbols have to be loaded with
346 if (addr32_operand (operands[1], SImode))
348 emit_insn (gen_movsi_addr32 (operands[0], operands[1]));
353 ;; ??? Do we need a const_double constraint here for large unsigned values?
354 (define_insn "*movsi_insn"
355 [(set (match_operand:SI 0 "move_dest_operand" "=r,r,r,r,r,r,r,r,r,T,S,m")
356 (match_operand:SI 1 "move_src_operand" "r,I,J,MQ,L,n,T,U,m,r,r,r"))]
357 "register_operand (operands[0], SImode) || register_operand (operands[1], SImode)"
360 if (GET_CODE (operands[0]) == REG || GET_CODE (operands[1]) == SUBREG)
362 switch (GET_CODE (operands[1]))
374 if (GET_CODE (XEXP (operands[1], 0)) == POST_INC
375 && XEXP (XEXP (operands[1], 0), 0) == stack_pointer_rtx)
381 value = INTVAL (operands[1]);
383 return \"ldi %0,%#%1\\t; %X1\";
385 if (UINT24_P (value))
386 return \"ld24 %0,%#%1\\t; %X1\";
388 if (UPPER16_P (value))
389 return \"seth %0,%#%T1\\t; %X1\";
397 return \"ld24 %0,%#%1\";
403 else if (GET_CODE (operands[0]) == MEM
404 && (GET_CODE (operands[1]) == REG || GET_CODE (operands[1]) == SUBREG))
406 if (GET_CODE (XEXP (operands[0], 0)) == PRE_DEC
407 && XEXP (XEXP (operands[0], 0), 0) == stack_pointer_rtx)
415 [(set_attr "type" "int2,int2,int4,int4,int4,multi,load2,load2,load4,store2,store2,store4")
416 (set_attr "length" "2,2,4,4,4,8,2,2,4,2,2,4")])
418 ; Try to use a four byte / two byte pair for constants not loadable with
422 [(set (match_operand:SI 0 "register_operand" "")
423 (match_operand:SI 1 "two_insn_const_operand" ""))]
425 [(set (match_dup 0) (match_dup 2))
426 (set (match_dup 0) (ior:SI (match_dup 0) (match_dup 3)))]
429 unsigned HOST_WIDE_INT val = INTVAL (operands[1]);
430 unsigned HOST_WIDE_INT tmp;
433 /* In all cases we will emit two instructions. However we try to
434 use 2 byte instructions wherever possible. We can assume the
435 constant isn't loadable with any of ldi, ld24, or seth. */
437 /* See if we can load a 24 bit unsigned value and invert it. */
438 if (UINT24_P (~ val))
440 emit_insn (gen_movsi (operands[0], GEN_INT (~ val)));
441 emit_insn (gen_one_cmplsi2 (operands[0], operands[0]));
445 /* See if we can load a 24 bit unsigned value and shift it into place.
446 0x01fffffe is just beyond ld24's range. */
447 for (shift = 1, tmp = 0x01fffffe;
451 if ((val & ~tmp) == 0)
453 emit_insn (gen_movsi (operands[0], GEN_INT (val >> shift)));
454 emit_insn (gen_ashlsi3 (operands[0], operands[0], GEN_INT (shift)));
459 /* Can't use any two byte insn, fall back to seth/or3. Use ~0xffff instead
460 of 0xffff0000, since the later fails on a 64-bit host. */
461 operands[2] = GEN_INT ((val) & ~0xffff);
462 operands[3] = GEN_INT ((val) & 0xffff);
466 [(set (match_operand:SI 0 "register_operand" "")
467 (match_operand:SI 1 "seth_add3_operand" ""))]
470 (high:SI (match_dup 1)))
472 (lo_sum:SI (match_dup 0)
476 ;; Small data area support.
477 ;; The address of _SDA_BASE_ is loaded into a register and all objects in
478 ;; the small data area are indexed off that. This is done for each reference
479 ;; but cse will clean things up for us. We let the compiler choose the
480 ;; register to use so we needn't allocate (and maybe even fix) a special
481 ;; register to use. Since the load and store insns have a 16 bit offset the
482 ;; total size of the data area can be 64K. However, if the data area lives
483 ;; above 16M (24 bits), _SDA_BASE_ will have to be loaded with seth/add3 which
484 ;; would then yield 3 instructions to reference an object [though there would
485 ;; be no net loss if two or more objects were referenced]. The 3 insns can be
486 ;; reduced back to 2 if the size of the small data area were reduced to 32K
487 ;; [then seth + ld/st would work for any object in the area]. Doing this
488 ;; would require special handling of _SDA_BASE_ (its value would be
489 ;; (.sdata + 32K) & 0xffff0000) and reloc computations would be different
490 ;; [I think]. What to do about this is deferred until later and for now we
491 ;; require .sdata to be in the first 16M.
493 (define_expand "movsi_sda"
495 (unspec:SI [(const_int 0)] UNSPEC_LOAD_SDA_BASE))
496 (set (match_operand:SI 0 "register_operand" "")
497 (lo_sum:SI (match_dup 2)
498 (match_operand:SI 1 "small_data_operand" "")))]
502 if (reload_in_progress || reload_completed)
503 operands[2] = operands[0];
505 operands[2] = gen_reg_rtx (SImode);
508 (define_insn "*load_sda_base_32"
509 [(set (match_operand:SI 0 "register_operand" "=r")
510 (unspec:SI [(const_int 0)] UNSPEC_LOAD_SDA_BASE))]
512 "seth %0,%#shigh(_SDA_BASE_)\;add3 %0,%0,%#low(_SDA_BASE_)"
513 [(set_attr "type" "multi")
514 (set_attr "length" "8")])
516 (define_insn "*load_sda_base"
517 [(set (match_operand:SI 0 "register_operand" "=r")
518 (unspec:SI [(const_int 0)] UNSPEC_LOAD_SDA_BASE))]
520 "ld24 %0,#_SDA_BASE_"
521 [(set_attr "type" "int4")
522 (set_attr "length" "4")])
524 ;; 32 bit address support.
526 (define_expand "movsi_addr32"
528 ; addr32_operand isn't used because it's too restrictive,
529 ; seth_add3_operand is more general and thus safer.
530 (high:SI (match_operand:SI 1 "seth_add3_operand" "")))
531 (set (match_operand:SI 0 "register_operand" "")
532 (lo_sum:SI (match_dup 2) (match_dup 1)))]
536 if (reload_in_progress || reload_completed)
537 operands[2] = operands[0];
539 operands[2] = gen_reg_rtx (SImode);
542 (define_insn "set_hi_si"
543 [(set (match_operand:SI 0 "register_operand" "=r")
544 (high:SI (match_operand 1 "symbolic_operand" "")))]
546 "seth %0,%#shigh(%1)"
547 [(set_attr "type" "int4")
548 (set_attr "length" "4")])
550 (define_insn "lo_sum_si"
551 [(set (match_operand:SI 0 "register_operand" "=r")
552 (lo_sum:SI (match_operand:SI 1 "register_operand" "r")
553 (match_operand:SI 2 "immediate_operand" "in")))]
556 [(set_attr "type" "int4")
557 (set_attr "length" "4")])
559 (define_expand "movdi"
560 [(set (match_operand:DI 0 "general_operand" "")
561 (match_operand:DI 1 "general_operand" ""))]
565 /* Fixup PIC cases. */
568 if (symbolic_operand (operands[1], DImode))
570 if (reload_in_progress || reload_completed)
571 operands[1] = m32r_legitimize_pic_address (operands[1], operands[0]);
573 operands[1] = m32r_legitimize_pic_address (operands[1], NULL_RTX);
577 /* Everything except mem = const or mem = mem can be done easily. */
579 if (GET_CODE (operands[0]) == MEM)
580 operands[1] = force_reg (DImode, operands[1]);
583 (define_insn "*movdi_insn"
584 [(set (match_operand:DI 0 "move_dest_operand" "=r,r,r,r,m")
585 (match_operand:DI 1 "move_double_src_operand" "r,nG,F,m,r"))]
586 "register_operand (operands[0], DImode) || register_operand (operands[1], DImode)"
588 [(set_attr "type" "multi,multi,multi,load8,store8")
589 (set_attr "length" "4,4,16,6,6")])
592 [(set (match_operand:DI 0 "move_dest_operand" "")
593 (match_operand:DI 1 "move_double_src_operand" ""))]
596 "operands[2] = gen_split_move_double (operands);")
598 ;; Floating point move insns.
600 (define_expand "movsf"
601 [(set (match_operand:SF 0 "general_operand" "")
602 (match_operand:SF 1 "general_operand" ""))]
606 /* Fixup PIC cases. */
609 if (symbolic_operand (operands[1], SFmode))
611 if (reload_in_progress || reload_completed)
612 operands[1] = m32r_legitimize_pic_address (operands[1], operands[0]);
614 operands[1] = m32r_legitimize_pic_address (operands[1], NULL_RTX);
618 /* Everything except mem = const or mem = mem can be done easily. */
620 if (GET_CODE (operands[0]) == MEM)
621 operands[1] = force_reg (SFmode, operands[1]);
624 (define_insn "*movsf_insn"
625 [(set (match_operand:SF 0 "move_dest_operand" "=r,r,r,r,r,T,S,m")
626 (match_operand:SF 1 "move_src_operand" "r,F,U,S,m,r,r,r"))]
627 "register_operand (operands[0], SFmode) || register_operand (operands[1], SFmode)"
637 ;; ??? Length of alternative 1 is either 2, 4 or 8.
638 [(set_attr "type" "int2,multi,load2,load2,load4,store2,store2,store4")
639 (set_attr "length" "2,8,2,2,4,2,2,4")])
642 [(set (match_operand:SF 0 "register_operand" "")
643 (match_operand:SF 1 "const_double_operand" ""))]
645 [(set (match_dup 2) (match_dup 3))]
648 operands[2] = operand_subword (operands[0], 0, 0, SFmode);
649 operands[3] = operand_subword (operands[1], 0, 0, SFmode);
652 (define_expand "movdf"
653 [(set (match_operand:DF 0 "general_operand" "")
654 (match_operand:DF 1 "general_operand" ""))]
658 /* Fixup PIC cases. */
661 if (symbolic_operand (operands[1], DFmode))
663 if (reload_in_progress || reload_completed)
664 operands[1] = m32r_legitimize_pic_address (operands[1], operands[0]);
666 operands[1] = m32r_legitimize_pic_address (operands[1], NULL_RTX);
670 /* Everything except mem = const or mem = mem can be done easily. */
672 if (GET_CODE (operands[0]) == MEM)
673 operands[1] = force_reg (DFmode, operands[1]);
676 (define_insn "*movdf_insn"
677 [(set (match_operand:DF 0 "move_dest_operand" "=r,r,r,m")
678 (match_operand:DF 1 "move_double_src_operand" "r,F,m,r"))]
679 "register_operand (operands[0], DFmode) || register_operand (operands[1], DFmode)"
681 [(set_attr "type" "multi,multi,load8,store8")
682 (set_attr "length" "4,16,6,6")])
685 [(set (match_operand:DF 0 "move_dest_operand" "")
686 (match_operand:DF 1 "move_double_src_operand" ""))]
689 "operands[2] = gen_split_move_double (operands);")
691 ;; Zero extension instructions.
693 (define_insn "zero_extendqihi2"
694 [(set (match_operand:HI 0 "register_operand" "=r,r,r")
695 (zero_extend:HI (match_operand:QI 1 "extend_operand" "r,T,m")))]
701 [(set_attr "type" "int4,load2,load4")
702 (set_attr "length" "4,2,4")])
704 (define_insn "zero_extendqisi2"
705 [(set (match_operand:SI 0 "register_operand" "=r,r,r")
706 (zero_extend:SI (match_operand:QI 1 "extend_operand" "r,T,m")))]
712 [(set_attr "type" "int4,load2,load4")
713 (set_attr "length" "4,2,4")])
715 (define_insn "zero_extendhisi2"
716 [(set (match_operand:SI 0 "register_operand" "=r,r,r")
717 (zero_extend:SI (match_operand:HI 1 "extend_operand" "r,T,m")))]
723 [(set_attr "type" "int4,load2,load4")
724 (set_attr "length" "4,2,4")])
726 ;; Signed conversions from a smaller integer to a larger integer
727 (define_insn "extendqihi2"
728 [(set (match_operand:HI 0 "register_operand" "=r,r,r")
729 (sign_extend:HI (match_operand:QI 1 "extend_operand" "0,T,m")))]
735 [(set_attr "type" "multi,load2,load4")
736 (set_attr "length" "2,2,4")])
739 [(set (match_operand:HI 0 "register_operand" "")
740 (sign_extend:HI (match_operand:QI 1 "register_operand" "")))]
746 rtx op0 = gen_lowpart (SImode, operands[0]);
747 rtx shift = GEN_INT (24);
749 operands[2] = gen_ashlsi3 (op0, op0, shift);
750 operands[3] = gen_ashrsi3 (op0, op0, shift);
753 (define_insn "extendqisi2"
754 [(set (match_operand:SI 0 "register_operand" "=r,r,r")
755 (sign_extend:SI (match_operand:QI 1 "extend_operand" "0,T,m")))]
761 [(set_attr "type" "multi,load2,load4")
762 (set_attr "length" "4,2,4")])
765 [(set (match_operand:SI 0 "register_operand" "")
766 (sign_extend:SI (match_operand:QI 1 "register_operand" "")))]
772 rtx shift = GEN_INT (24);
774 operands[2] = gen_ashlsi3 (operands[0], operands[0], shift);
775 operands[3] = gen_ashrsi3 (operands[0], operands[0], shift);
778 (define_insn "extendhisi2"
779 [(set (match_operand:SI 0 "register_operand" "=r,r,r")
780 (sign_extend:SI (match_operand:HI 1 "extend_operand" "0,T,m")))]
786 [(set_attr "type" "multi,load2,load4")
787 (set_attr "length" "4,2,4")])
790 [(set (match_operand:SI 0 "register_operand" "")
791 (sign_extend:SI (match_operand:HI 1 "register_operand" "")))]
797 rtx shift = GEN_INT (16);
799 operands[2] = gen_ashlsi3 (operands[0], operands[0], shift);
800 operands[3] = gen_ashrsi3 (operands[0], operands[0], shift);
803 ;; Arithmetic instructions.
805 ; ??? Adding an alternative to split add3 of small constants into two
806 ; insns yields better instruction packing but slower code. Adds of small
807 ; values is done a lot.
809 (define_insn "addsi3"
810 [(set (match_operand:SI 0 "register_operand" "=r,r,r")
811 (plus:SI (match_operand:SI 1 "register_operand" "%0,0,r")
812 (match_operand:SI 2 "nonmemory_operand" "r,I,J")))]
818 [(set_attr "type" "int2,int2,int4")
819 (set_attr "length" "2,2,4")])
822 ; [(set (match_operand:SI 0 "register_operand" "")
823 ; (plus:SI (match_operand:SI 1 "register_operand" "")
824 ; (match_operand:SI 2 "int8_operand" "")))]
826 ; && REGNO (operands[0]) != REGNO (operands[1])
827 ; && INT8_P (INTVAL (operands[2]))
828 ; && INTVAL (operands[2]) != 0"
829 ; [(set (match_dup 0) (match_dup 1))
830 ; (set (match_dup 0) (plus:SI (match_dup 0) (match_dup 2)))]
833 (define_insn "adddi3"
834 [(set (match_operand:DI 0 "register_operand" "=r")
835 (plus:DI (match_operand:DI 1 "register_operand" "%0")
836 (match_operand:DI 2 "register_operand" "r")))
837 (clobber (reg:CC 17))]
840 [(set_attr "type" "multi")
841 (set_attr "length" "6")])
843 ;; ??? The cmp clears the condition bit. Can we speed up somehow?
845 [(set (match_operand:DI 0 "register_operand" "")
846 (plus:DI (match_operand:DI 1 "register_operand" "")
847 (match_operand:DI 2 "register_operand" "")))
848 (clobber (reg:CC 17))]
850 [(parallel [(set (reg:CC 17)
852 (use (match_dup 4))])
853 (parallel [(set (match_dup 4)
854 (plus:SI (match_dup 4)
855 (plus:SI (match_dup 5)
856 (ne:SI (reg:CC 17) (const_int 0)))))
858 (unspec:CC [(const_int 0)] UNSPEC_SET_CBIT))])
859 (parallel [(set (match_dup 6)
860 (plus:SI (match_dup 6)
861 (plus:SI (match_dup 7)
862 (ne:SI (reg:CC 17) (const_int 0)))))
864 (unspec:CC [(const_int 0)] UNSPEC_SET_CBIT))])]
867 operands[4] = operand_subword (operands[0], (WORDS_BIG_ENDIAN != 0), 0, DImode);
868 operands[5] = operand_subword (operands[2], (WORDS_BIG_ENDIAN != 0), 0, DImode);
869 operands[6] = operand_subword (operands[0], (WORDS_BIG_ENDIAN == 0), 0, DImode);
870 operands[7] = operand_subword (operands[2], (WORDS_BIG_ENDIAN == 0), 0, DImode);
873 (define_insn "*clear_c"
876 (use (match_operand:SI 0 "register_operand" "r"))]
879 [(set_attr "type" "int2")
880 (set_attr "length" "2")])
882 (define_insn "*add_carry"
883 [(set (match_operand:SI 0 "register_operand" "=r")
884 (plus:SI (match_operand:SI 1 "register_operand" "%0")
885 (plus:SI (match_operand:SI 2 "register_operand" "r")
886 (ne:SI (reg:CC 17) (const_int 0)))))
888 (unspec:CC [(const_int 0)] UNSPEC_SET_CBIT))]
891 [(set_attr "type" "int2")
892 (set_attr "length" "2")])
894 (define_insn "subsi3"
895 [(set (match_operand:SI 0 "register_operand" "=r")
896 (minus:SI (match_operand:SI 1 "register_operand" "0")
897 (match_operand:SI 2 "register_operand" "r")))]
900 [(set_attr "type" "int2")
901 (set_attr "length" "2")])
903 (define_insn "subdi3"
904 [(set (match_operand:DI 0 "register_operand" "=r")
905 (minus:DI (match_operand:DI 1 "register_operand" "0")
906 (match_operand:DI 2 "register_operand" "r")))
907 (clobber (reg:CC 17))]
910 [(set_attr "type" "multi")
911 (set_attr "length" "6")])
913 ;; ??? The cmp clears the condition bit. Can we speed up somehow?
915 [(set (match_operand:DI 0 "register_operand" "")
916 (minus:DI (match_operand:DI 1 "register_operand" "")
917 (match_operand:DI 2 "register_operand" "")))
918 (clobber (reg:CC 17))]
920 [(parallel [(set (reg:CC 17)
922 (use (match_dup 4))])
923 (parallel [(set (match_dup 4)
924 (minus:SI (match_dup 4)
925 (minus:SI (match_dup 5)
926 (ne:SI (reg:CC 17) (const_int 0)))))
928 (unspec:CC [(const_int 0)] UNSPEC_SET_CBIT))])
929 (parallel [(set (match_dup 6)
930 (minus:SI (match_dup 6)
931 (minus:SI (match_dup 7)
932 (ne:SI (reg:CC 17) (const_int 0)))))
934 (unspec:CC [(const_int 0)] UNSPEC_SET_CBIT))])]
937 operands[4] = operand_subword (operands[0], (WORDS_BIG_ENDIAN != 0), 0, DImode);
938 operands[5] = operand_subword (operands[2], (WORDS_BIG_ENDIAN != 0), 0, DImode);
939 operands[6] = operand_subword (operands[0], (WORDS_BIG_ENDIAN == 0), 0, DImode);
940 operands[7] = operand_subword (operands[2], (WORDS_BIG_ENDIAN == 0), 0, DImode);
943 (define_insn "*sub_carry"
944 [(set (match_operand:SI 0 "register_operand" "=r")
945 (minus:SI (match_operand:SI 1 "register_operand" "%0")
946 (minus:SI (match_operand:SI 2 "register_operand" "r")
947 (ne:SI (reg:CC 17) (const_int 0)))))
949 (unspec:CC [(const_int 0)] UNSPEC_SET_CBIT))]
952 [(set_attr "type" "int2")
953 (set_attr "length" "2")])
955 ; Multiply/Divide instructions.
957 (define_insn "mulhisi3"
958 [(set (match_operand:SI 0 "register_operand" "=r")
959 (mult:SI (sign_extend:SI (match_operand:HI 1 "register_operand" "r"))
960 (sign_extend:SI (match_operand:HI 2 "register_operand" "r"))))]
962 "mullo %1,%2\;mvfacmi %0"
963 [(set_attr "type" "multi")
964 (set_attr "length" "4")])
966 (define_insn "mulsi3"
967 [(set (match_operand:SI 0 "register_operand" "=r")
968 (mult:SI (match_operand:SI 1 "register_operand" "%0")
969 (match_operand:SI 2 "register_operand" "r")))]
972 [(set_attr "type" "mul2")
973 (set_attr "length" "2")])
975 (define_insn "divsi3"
976 [(set (match_operand:SI 0 "register_operand" "=r")
977 (div:SI (match_operand:SI 1 "register_operand" "0")
978 (match_operand:SI 2 "register_operand" "r")))]
981 [(set_attr "type" "div4")
982 (set_attr "length" "4")])
984 (define_insn "udivsi3"
985 [(set (match_operand:SI 0 "register_operand" "=r")
986 (udiv:SI (match_operand:SI 1 "register_operand" "0")
987 (match_operand:SI 2 "register_operand" "r")))]
990 [(set_attr "type" "div4")
991 (set_attr "length" "4")])
993 (define_insn "modsi3"
994 [(set (match_operand:SI 0 "register_operand" "=r")
995 (mod:SI (match_operand:SI 1 "register_operand" "0")
996 (match_operand:SI 2 "register_operand" "r")))]
999 [(set_attr "type" "div4")
1000 (set_attr "length" "4")])
1002 (define_insn "umodsi3"
1003 [(set (match_operand:SI 0 "register_operand" "=r")
1004 (umod:SI (match_operand:SI 1 "register_operand" "0")
1005 (match_operand:SI 2 "register_operand" "r")))]
1008 [(set_attr "type" "div4")
1009 (set_attr "length" "4")])
1011 ;; Boolean instructions.
1013 ;; We don't define the DImode versions as expand_binop does a good enough job.
1014 ;; And if it doesn't it should be fixed.
1016 (define_insn "andsi3"
1017 [(set (match_operand:SI 0 "register_operand" "=r,r")
1018 (and:SI (match_operand:SI 1 "register_operand" "%0,r")
1019 (match_operand:SI 2 "reg_or_uint16_operand" "r,K")))]
1023 /* If we are worried about space, see if we can break this up into two
1024 short instructions, which might eliminate a NOP being inserted. */
1026 && m32r_not_same_reg (operands[0], operands[1])
1027 && GET_CODE (operands[2]) == CONST_INT
1028 && INT8_P (INTVAL (operands[2])))
1031 else if (GET_CODE (operands[2]) == CONST_INT)
1032 return \"and3 %0,%1,%#%X2\";
1034 return \"and %0,%2\";
1036 [(set_attr "type" "int2,int4")
1037 (set_attr "length" "2,4")])
1040 [(set (match_operand:SI 0 "register_operand" "")
1041 (and:SI (match_operand:SI 1 "register_operand" "")
1042 (match_operand:SI 2 "int8_operand" "")))]
1043 "optimize_size && m32r_not_same_reg (operands[0], operands[1])"
1044 [(set (match_dup 0) (match_dup 2))
1045 (set (match_dup 0) (and:SI (match_dup 0) (match_dup 1)))]
1048 (define_insn "iorsi3"
1049 [(set (match_operand:SI 0 "register_operand" "=r,r")
1050 (ior:SI (match_operand:SI 1 "register_operand" "%0,r")
1051 (match_operand:SI 2 "reg_or_uint16_operand" "r,K")))]
1055 /* If we are worried about space, see if we can break this up into two
1056 short instructions, which might eliminate a NOP being inserted. */
1058 && m32r_not_same_reg (operands[0], operands[1])
1059 && GET_CODE (operands[2]) == CONST_INT
1060 && INT8_P (INTVAL (operands[2])))
1063 else if (GET_CODE (operands[2]) == CONST_INT)
1064 return \"or3 %0,%1,%#%X2\";
1066 return \"or %0,%2\";
1068 [(set_attr "type" "int2,int4")
1069 (set_attr "length" "2,4")])
1072 [(set (match_operand:SI 0 "register_operand" "")
1073 (ior:SI (match_operand:SI 1 "register_operand" "")
1074 (match_operand:SI 2 "int8_operand" "")))]
1075 "optimize_size && m32r_not_same_reg (operands[0], operands[1])"
1076 [(set (match_dup 0) (match_dup 2))
1077 (set (match_dup 0) (ior:SI (match_dup 0) (match_dup 1)))]
1080 (define_insn "xorsi3"
1081 [(set (match_operand:SI 0 "register_operand" "=r,r")
1082 (xor:SI (match_operand:SI 1 "register_operand" "%0,r")
1083 (match_operand:SI 2 "reg_or_uint16_operand" "r,K")))]
1087 /* If we are worried about space, see if we can break this up into two
1088 short instructions, which might eliminate a NOP being inserted. */
1090 && m32r_not_same_reg (operands[0], operands[1])
1091 && GET_CODE (operands[2]) == CONST_INT
1092 && INT8_P (INTVAL (operands[2])))
1095 else if (GET_CODE (operands[2]) == CONST_INT)
1096 return \"xor3 %0,%1,%#%X2\";
1098 return \"xor %0,%2\";
1100 [(set_attr "type" "int2,int4")
1101 (set_attr "length" "2,4")])
1104 [(set (match_operand:SI 0 "register_operand" "")
1105 (xor:SI (match_operand:SI 1 "register_operand" "")
1106 (match_operand:SI 2 "int8_operand" "")))]
1107 "optimize_size && m32r_not_same_reg (operands[0], operands[1])"
1108 [(set (match_dup 0) (match_dup 2))
1109 (set (match_dup 0) (xor:SI (match_dup 0) (match_dup 1)))]
1112 (define_insn "negsi2"
1113 [(set (match_operand:SI 0 "register_operand" "=r")
1114 (neg:SI (match_operand:SI 1 "register_operand" "r")))]
1117 [(set_attr "type" "int2")
1118 (set_attr "length" "2")])
1120 (define_insn "one_cmplsi2"
1121 [(set (match_operand:SI 0 "register_operand" "=r")
1122 (not:SI (match_operand:SI 1 "register_operand" "r")))]
1125 [(set_attr "type" "int2")
1126 (set_attr "length" "2")])
1128 ;; Shift instructions.
1130 (define_insn "ashlsi3"
1131 [(set (match_operand:SI 0 "register_operand" "=r,r,r")
1132 (ashift:SI (match_operand:SI 1 "register_operand" "0,0,r")
1133 (match_operand:SI 2 "reg_or_uint16_operand" "r,O,K")))]
1139 [(set_attr "type" "shift2,shift2,shift4")
1140 (set_attr "length" "2,2,4")])
1142 (define_insn "ashrsi3"
1143 [(set (match_operand:SI 0 "register_operand" "=r,r,r")
1144 (ashiftrt:SI (match_operand:SI 1 "register_operand" "0,0,r")
1145 (match_operand:SI 2 "reg_or_uint16_operand" "r,O,K")))]
1151 [(set_attr "type" "shift2,shift2,shift4")
1152 (set_attr "length" "2,2,4")])
1154 (define_insn "lshrsi3"
1155 [(set (match_operand:SI 0 "register_operand" "=r,r,r")
1156 (lshiftrt:SI (match_operand:SI 1 "register_operand" "0,0,r")
1157 (match_operand:SI 2 "reg_or_uint16_operand" "r,O,K")))]
1163 [(set_attr "type" "shift2,shift2,shift4")
1164 (set_attr "length" "2,2,4")])
1166 ;; Compare instructions.
1167 ;; This controls RTL generation and register allocation.
1169 ;; We generate RTL for comparisons and branches by having the cmpxx
1170 ;; patterns store away the operands. Then the bcc patterns
1171 ;; emit RTL for both the compare and the branch.
1173 ;; On the m32r it is more efficient to use the bxxz instructions and
1174 ;; thus merge the compare and branch into one instruction, so they are
1177 (define_expand "cmpsi"
1179 (compare:CC (match_operand:SI 0 "register_operand" "")
1180 (match_operand:SI 1 "reg_or_cmp_int16_operand" "")))]
1184 m32r_compare_op0 = operands[0];
1185 m32r_compare_op1 = operands[1];
1189 (define_insn "cmp_eqsi_zero_insn"
1191 (eq:CC (match_operand:SI 0 "register_operand" "r,r")
1192 (match_operand:SI 1 "reg_or_zero_operand" "r,P")))]
1193 "TARGET_M32RX || TARGET_M32R2"
1197 [(set_attr "type" "int4")
1198 (set_attr "length" "4")])
1200 ;; The cmp_xxx_insn patterns set the condition bit to the result of the
1201 ;; comparison. There isn't a "compare equal" instruction so cmp_eqsi_insn
1202 ;; is quite inefficient. However, it is rarely used.
1204 (define_insn "cmp_eqsi_insn"
1206 (eq:CC (match_operand:SI 0 "register_operand" "r,r")
1207 (match_operand:SI 1 "reg_or_cmp_int16_operand" "r,P")))
1208 (clobber (match_scratch:SI 2 "=&r,&r"))]
1212 if (which_alternative == 0)
1214 return \"mv %2,%0\;sub %2,%1\;cmpui %2,#1\";
1218 if (INTVAL (operands [1]) == 0)
1219 return \"cmpui %0, #1\";
1220 else if (REGNO (operands [2]) == REGNO (operands [0]))
1221 return \"addi %0,%#%N1\;cmpui %2,#1\";
1223 return \"add3 %2,%0,%#%N1\;cmpui %2,#1\";
1226 [(set_attr "type" "multi,multi")
1227 (set_attr "length" "8,8")])
1229 (define_insn "cmp_ltsi_insn"
1231 (lt:CC (match_operand:SI 0 "register_operand" "r,r")
1232 (match_operand:SI 1 "reg_or_int16_operand" "r,J")))]
1237 [(set_attr "type" "int2,int4")
1238 (set_attr "length" "2,4")])
1240 (define_insn "cmp_ltusi_insn"
1242 (ltu:CC (match_operand:SI 0 "register_operand" "r,r")
1243 (match_operand:SI 1 "reg_or_int16_operand" "r,J")))]
1248 [(set_attr "type" "int2,int4")
1249 (set_attr "length" "2,4")])
1251 ;; These control RTL generation for conditional jump insns.
1253 (define_expand "beq"
1255 (if_then_else (match_dup 1)
1256 (label_ref (match_operand 0 "" ""))
1261 operands[1] = gen_compare (EQ, m32r_compare_op0, m32r_compare_op1, FALSE);
1264 (define_expand "bne"
1266 (if_then_else (match_dup 1)
1267 (label_ref (match_operand 0 "" ""))
1272 operands[1] = gen_compare (NE, m32r_compare_op0, m32r_compare_op1, FALSE);
1275 (define_expand "bgt"
1277 (if_then_else (match_dup 1)
1278 (label_ref (match_operand 0 "" ""))
1283 operands[1] = gen_compare (GT, m32r_compare_op0, m32r_compare_op1, FALSE);
1286 (define_expand "ble"
1288 (if_then_else (match_dup 1)
1289 (label_ref (match_operand 0 "" ""))
1294 operands[1] = gen_compare (LE, m32r_compare_op0, m32r_compare_op1, FALSE);
1297 (define_expand "bge"
1299 (if_then_else (match_dup 1)
1300 (label_ref (match_operand 0 "" ""))
1305 operands[1] = gen_compare (GE, m32r_compare_op0, m32r_compare_op1, FALSE);
1308 (define_expand "blt"
1310 (if_then_else (match_dup 1)
1311 (label_ref (match_operand 0 "" ""))
1316 operands[1] = gen_compare (LT, m32r_compare_op0, m32r_compare_op1, FALSE);
1319 (define_expand "bgtu"
1321 (if_then_else (match_dup 1)
1322 (label_ref (match_operand 0 "" ""))
1327 operands[1] = gen_compare (GTU, m32r_compare_op0, m32r_compare_op1, FALSE);
1330 (define_expand "bleu"
1332 (if_then_else (match_dup 1)
1333 (label_ref (match_operand 0 "" ""))
1338 operands[1] = gen_compare (LEU, m32r_compare_op0, m32r_compare_op1, FALSE);
1341 (define_expand "bgeu"
1343 (if_then_else (match_dup 1)
1344 (label_ref (match_operand 0 "" ""))
1349 operands[1] = gen_compare (GEU, m32r_compare_op0, m32r_compare_op1, FALSE);
1352 (define_expand "bltu"
1354 (if_then_else (match_dup 1)
1355 (label_ref (match_operand 0 "" ""))
1360 operands[1] = gen_compare (LTU, m32r_compare_op0, m32r_compare_op1, FALSE);
1363 ;; Now match both normal and inverted jump.
1365 (define_insn "*branch_insn"
1367 (if_then_else (match_operator 1 "eqne_comparison_operator"
1368 [(reg 17) (const_int 0)])
1369 (label_ref (match_operand 0 "" ""))
1374 static char instruction[40];
1375 sprintf (instruction, \"%s%s %%l0\",
1376 (GET_CODE (operands[1]) == NE) ? \"bc\" : \"bnc\",
1377 (get_attr_length (insn) == 2) ? \".s\" : \"\");
1380 [(set_attr "type" "branch")
1382 ; We use 300/600 instead of 512,1024 to account for inaccurate insn
1383 ; lengths and insn alignments that are complex to track.
1384 ; It's not important that we be hyper-precise here. It may be more
1385 ; important blah blah blah when the chip supports parallel execution
1386 ; blah blah blah but until then blah blah blah this is simple and
1388 (set (attr "length") (if_then_else (ltu (plus (minus (match_dup 0) (pc))
1394 (define_insn "*rev_branch_insn"
1396 (if_then_else (match_operator 1 "eqne_comparison_operator"
1397 [(reg 17) (const_int 0)])
1399 (label_ref (match_operand 0 "" ""))))]
1400 ;"REVERSIBLE_CC_MODE (GET_MODE (XEXP (operands[1], 0)))"
1404 static char instruction[40];
1405 sprintf (instruction, \"%s%s %%l0\",
1406 (GET_CODE (operands[1]) == EQ) ? \"bc\" : \"bnc\",
1407 (get_attr_length (insn) == 2) ? \".s\" : \"\");
1410 [(set_attr "type" "branch")
1412 ; We use 300/600 instead of 512,1024 to account for inaccurate insn
1413 ; lengths and insn alignments that are complex to track.
1414 ; It's not important that we be hyper-precise here. It may be more
1415 ; important blah blah blah when the chip supports parallel execution
1416 ; blah blah blah but until then blah blah blah this is simple and
1418 (set (attr "length") (if_then_else (ltu (plus (minus (match_dup 0) (pc))
1424 ; reg/reg compare and branch insns
1426 (define_insn "*reg_branch_insn"
1428 (if_then_else (match_operator 1 "eqne_comparison_operator"
1429 [(match_operand:SI 2 "register_operand" "r")
1430 (match_operand:SI 3 "register_operand" "r")])
1431 (label_ref (match_operand 0 "" ""))
1436 /* Is branch target reachable with beq/bne? */
1437 if (get_attr_length (insn) == 4)
1439 if (GET_CODE (operands[1]) == EQ)
1440 return \"beq %2,%3,%l0\";
1442 return \"bne %2,%3,%l0\";
1446 if (GET_CODE (operands[1]) == EQ)
1447 return \"bne %2,%3,1f\;bra %l0\;1:\";
1449 return \"beq %2,%3,1f\;bra %l0\;1:\";
1452 [(set_attr "type" "branch")
1453 ; We use 25000/50000 instead of 32768/65536 to account for slot filling
1454 ; which is complex to track and inaccurate length specs.
1455 (set (attr "length") (if_then_else (ltu (plus (minus (match_dup 0) (pc))
1461 (define_insn "*rev_reg_branch_insn"
1463 (if_then_else (match_operator 1 "eqne_comparison_operator"
1464 [(match_operand:SI 2 "register_operand" "r")
1465 (match_operand:SI 3 "register_operand" "r")])
1467 (label_ref (match_operand 0 "" ""))))]
1471 /* Is branch target reachable with beq/bne? */
1472 if (get_attr_length (insn) == 4)
1474 if (GET_CODE (operands[1]) == NE)
1475 return \"beq %2,%3,%l0\";
1477 return \"bne %2,%3,%l0\";
1481 if (GET_CODE (operands[1]) == NE)
1482 return \"bne %2,%3,1f\;bra %l0\;1:\";
1484 return \"beq %2,%3,1f\;bra %l0\;1:\";
1487 [(set_attr "type" "branch")
1488 ; We use 25000/50000 instead of 32768/65536 to account for slot filling
1489 ; which is complex to track and inaccurate length specs.
1490 (set (attr "length") (if_then_else (ltu (plus (minus (match_dup 0) (pc))
1496 ; reg/zero compare and branch insns
1498 (define_insn "*zero_branch_insn"
1500 (if_then_else (match_operator 1 "signed_comparison_operator"
1501 [(match_operand:SI 2 "register_operand" "r")
1503 (label_ref (match_operand 0 "" ""))
1508 const char *br,*invbr;
1511 switch (GET_CODE (operands[1]))
1513 case EQ : br = \"eq\"; invbr = \"ne\"; break;
1514 case NE : br = \"ne\"; invbr = \"eq\"; break;
1515 case LE : br = \"le\"; invbr = \"gt\"; break;
1516 case GT : br = \"gt\"; invbr = \"le\"; break;
1517 case LT : br = \"lt\"; invbr = \"ge\"; break;
1518 case GE : br = \"ge\"; invbr = \"lt\"; break;
1520 default: gcc_unreachable ();
1523 /* Is branch target reachable with bxxz? */
1524 if (get_attr_length (insn) == 4)
1526 sprintf (asmtext, \"b%sz %%2,%%l0\", br);
1527 output_asm_insn (asmtext, operands);
1531 sprintf (asmtext, \"b%sz %%2,1f\;bra %%l0\;1:\", invbr);
1532 output_asm_insn (asmtext, operands);
1536 [(set_attr "type" "branch")
1537 ; We use 25000/50000 instead of 32768/65536 to account for slot filling
1538 ; which is complex to track and inaccurate length specs.
1539 (set (attr "length") (if_then_else (ltu (plus (minus (match_dup 0) (pc))
1545 (define_insn "*rev_zero_branch_insn"
1547 (if_then_else (match_operator 1 "eqne_comparison_operator"
1548 [(match_operand:SI 2 "register_operand" "r")
1551 (label_ref (match_operand 0 "" ""))))]
1555 const char *br,*invbr;
1558 switch (GET_CODE (operands[1]))
1560 case EQ : br = \"eq\"; invbr = \"ne\"; break;
1561 case NE : br = \"ne\"; invbr = \"eq\"; break;
1562 case LE : br = \"le\"; invbr = \"gt\"; break;
1563 case GT : br = \"gt\"; invbr = \"le\"; break;
1564 case LT : br = \"lt\"; invbr = \"ge\"; break;
1565 case GE : br = \"ge\"; invbr = \"lt\"; break;
1567 default: gcc_unreachable ();
1570 /* Is branch target reachable with bxxz? */
1571 if (get_attr_length (insn) == 4)
1573 sprintf (asmtext, \"b%sz %%2,%%l0\", invbr);
1574 output_asm_insn (asmtext, operands);
1578 sprintf (asmtext, \"b%sz %%2,1f\;bra %%l0\;1:\", br);
1579 output_asm_insn (asmtext, operands);
1583 [(set_attr "type" "branch")
1584 ; We use 25000/50000 instead of 32768/65536 to account for slot filling
1585 ; which is complex to track and inaccurate length specs.
1586 (set (attr "length") (if_then_else (ltu (plus (minus (match_dup 0) (pc))
1592 ;; S<cc> operations to set a register to 1/0 based on a comparison
1594 (define_expand "seq"
1595 [(match_operand:SI 0 "register_operand" "")]
1599 rtx op0 = operands[0];
1600 rtx op1 = m32r_compare_op0;
1601 rtx op2 = m32r_compare_op1;
1602 enum machine_mode mode = GET_MODE (op0);
1607 if (! register_operand (op1, mode))
1608 op1 = force_reg (mode, op1);
1610 if (TARGET_M32RX || TARGET_M32R2)
1612 if (! reg_or_zero_operand (op2, mode))
1613 op2 = force_reg (mode, op2);
1615 emit_insn (gen_seq_insn_m32rx (op0, op1, op2));
1618 if (GET_CODE (op2) == CONST_INT && INTVAL (op2) == 0)
1620 emit_insn (gen_seq_zero_insn (op0, op1));
1624 if (! reg_or_eq_int16_operand (op2, mode))
1625 op2 = force_reg (mode, op2);
1627 emit_insn (gen_seq_insn (op0, op1, op2));
1631 (define_insn "seq_insn_m32rx"
1632 [(set (match_operand:SI 0 "register_operand" "=r")
1633 (eq:SI (match_operand:SI 1 "register_operand" "%r")
1634 (match_operand:SI 2 "reg_or_zero_operand" "rP")))
1635 (clobber (reg:CC 17))]
1636 "TARGET_M32RX || TARGET_M32R2"
1638 [(set_attr "type" "multi")
1639 (set_attr "length" "6")])
1642 [(set (match_operand:SI 0 "register_operand" "")
1643 (eq:SI (match_operand:SI 1 "register_operand" "")
1644 (match_operand:SI 2 "reg_or_zero_operand" "")))
1645 (clobber (reg:CC 17))]
1646 "TARGET_M32RX || TARGET_M32R2"
1648 (eq:CC (match_dup 1)
1651 (ne:SI (reg:CC 17) (const_int 0)))]
1654 (define_insn "seq_zero_insn"
1655 [(set (match_operand:SI 0 "register_operand" "=r")
1656 (eq:SI (match_operand:SI 1 "register_operand" "r")
1658 (clobber (reg:CC 17))]
1661 [(set_attr "type" "multi")
1662 (set_attr "length" "6")])
1665 [(set (match_operand:SI 0 "register_operand" "")
1666 (eq:SI (match_operand:SI 1 "register_operand" "")
1668 (clobber (reg:CC 17))]
1673 rtx op0 = operands[0];
1674 rtx op1 = operands[1];
1677 emit_insn (gen_cmp_ltusi_insn (op1, const1_rtx));
1678 emit_insn (gen_movcc_insn (op0));
1679 operands[3] = get_insns ();
1683 (define_insn "seq_insn"
1684 [(set (match_operand:SI 0 "register_operand" "=r,r,??r,r")
1685 (eq:SI (match_operand:SI 1 "register_operand" "r,r,r,r")
1686 (match_operand:SI 2 "reg_or_eq_int16_operand" "r,r,r,PK")))
1687 (clobber (reg:CC 17))
1688 (clobber (match_scratch:SI 3 "=1,2,&r,r"))]
1691 [(set_attr "type" "multi")
1692 (set_attr "length" "8,8,10,10")])
1695 [(set (match_operand:SI 0 "register_operand" "")
1696 (eq:SI (match_operand:SI 1 "register_operand" "")
1697 (match_operand:SI 2 "reg_or_eq_int16_operand" "")))
1698 (clobber (reg:CC 17))
1699 (clobber (match_scratch:SI 3 ""))]
1700 "TARGET_M32R && reload_completed"
1704 rtx op0 = operands[0];
1705 rtx op1 = operands[1];
1706 rtx op2 = operands[2];
1707 rtx op3 = operands[3];
1708 HOST_WIDE_INT value;
1710 if (GET_CODE (op2) == REG && GET_CODE (op3) == REG
1711 && REGNO (op2) == REGNO (op3))
1718 if (GET_CODE (op1) == REG && GET_CODE (op3) == REG
1719 && REGNO (op1) != REGNO (op3))
1721 emit_move_insn (op3, op1);
1725 if (GET_CODE (op2) == CONST_INT && (value = INTVAL (op2)) != 0
1726 && CMP_INT16_P (value))
1727 emit_insn (gen_addsi3 (op3, op1, GEN_INT (-value)));
1729 emit_insn (gen_xorsi3 (op3, op1, op2));
1731 emit_insn (gen_cmp_ltusi_insn (op3, const1_rtx));
1732 emit_insn (gen_movcc_insn (op0));
1733 operands[4] = get_insns ();
1737 (define_expand "sne"
1738 [(match_operand:SI 0 "register_operand" "")]
1742 rtx op0 = operands[0];
1743 rtx op1 = m32r_compare_op0;
1744 rtx op2 = m32r_compare_op1;
1745 enum machine_mode mode = GET_MODE (op0);
1750 if (GET_CODE (op2) != CONST_INT
1751 || (INTVAL (op2) != 0 && UINT16_P (INTVAL (op2))))
1755 if (reload_completed || reload_in_progress)
1758 reg = gen_reg_rtx (SImode);
1759 emit_insn (gen_xorsi3 (reg, op1, op2));
1762 if (! register_operand (op1, mode))
1763 op1 = force_reg (mode, op1);
1765 emit_insn (gen_sne_zero_insn (op0, op1));
1772 (define_insn "sne_zero_insn"
1773 [(set (match_operand:SI 0 "register_operand" "=r")
1774 (ne:SI (match_operand:SI 1 "register_operand" "r")
1776 (clobber (reg:CC 17))
1777 (clobber (match_scratch:SI 2 "=&r"))]
1780 [(set_attr "type" "multi")
1781 (set_attr "length" "6")])
1784 [(set (match_operand:SI 0 "register_operand" "")
1785 (ne:SI (match_operand:SI 1 "register_operand" "")
1787 (clobber (reg:CC 17))
1788 (clobber (match_scratch:SI 2 ""))]
1793 (ltu:CC (match_dup 2)
1796 (ne:SI (reg:CC 17) (const_int 0)))]
1799 (define_expand "slt"
1800 [(match_operand:SI 0 "register_operand" "")]
1804 rtx op0 = operands[0];
1805 rtx op1 = m32r_compare_op0;
1806 rtx op2 = m32r_compare_op1;
1807 enum machine_mode mode = GET_MODE (op0);
1812 if (! register_operand (op1, mode))
1813 op1 = force_reg (mode, op1);
1815 if (! reg_or_int16_operand (op2, mode))
1816 op2 = force_reg (mode, op2);
1818 emit_insn (gen_slt_insn (op0, op1, op2));
1822 (define_insn "slt_insn"
1823 [(set (match_operand:SI 0 "register_operand" "=r,r")
1824 (lt:SI (match_operand:SI 1 "register_operand" "r,r")
1825 (match_operand:SI 2 "reg_or_int16_operand" "r,J")))
1826 (clobber (reg:CC 17))]
1829 [(set_attr "type" "multi")
1830 (set_attr "length" "4,6")])
1833 [(set (match_operand:SI 0 "register_operand" "")
1834 (lt:SI (match_operand:SI 1 "register_operand" "")
1835 (match_operand:SI 2 "reg_or_int16_operand" "")))
1836 (clobber (reg:CC 17))]
1839 (lt:CC (match_dup 1)
1842 (ne:SI (reg:CC 17) (const_int 0)))]
1845 (define_expand "sle"
1846 [(match_operand:SI 0 "register_operand" "")]
1850 rtx op0 = operands[0];
1851 rtx op1 = m32r_compare_op0;
1852 rtx op2 = m32r_compare_op1;
1853 enum machine_mode mode = GET_MODE (op0);
1858 if (! register_operand (op1, mode))
1859 op1 = force_reg (mode, op1);
1861 if (GET_CODE (op2) == CONST_INT)
1863 HOST_WIDE_INT value = INTVAL (op2);
1864 if (value >= 2147483647)
1866 emit_move_insn (op0, const1_rtx);
1870 op2 = GEN_INT (value+1);
1871 if (value < -32768 || value >= 32767)
1872 op2 = force_reg (mode, op2);
1874 emit_insn (gen_slt_insn (op0, op1, op2));
1878 if (! register_operand (op2, mode))
1879 op2 = force_reg (mode, op2);
1881 emit_insn (gen_sle_insn (op0, op1, op2));
1885 (define_insn "sle_insn"
1886 [(set (match_operand:SI 0 "register_operand" "=r")
1887 (le:SI (match_operand:SI 1 "register_operand" "r")
1888 (match_operand:SI 2 "register_operand" "r")))
1889 (clobber (reg:CC 17))]
1892 [(set_attr "type" "multi")
1893 (set_attr "length" "8")])
1896 [(set (match_operand:SI 0 "register_operand" "")
1897 (le:SI (match_operand:SI 1 "register_operand" "")
1898 (match_operand:SI 2 "register_operand" "")))
1899 (clobber (reg:CC 17))]
1902 (lt:CC (match_dup 2)
1905 (ne:SI (reg:CC 17) (const_int 0)))
1907 (xor:SI (match_dup 0)
1911 ;; If optimizing for space, use -(reg - 1) to invert the comparison rather than
1912 ;; xor reg,reg,1 which might eliminate a NOP being inserted.
1914 [(set (match_operand:SI 0 "register_operand" "")
1915 (le:SI (match_operand:SI 1 "register_operand" "")
1916 (match_operand:SI 2 "register_operand" "")))
1917 (clobber (reg:CC 17))]
1920 (lt:CC (match_dup 2)
1923 (ne:SI (reg:CC 17) (const_int 0)))
1925 (plus:SI (match_dup 0)
1928 (neg:SI (match_dup 0)))]
1931 (define_expand "sgt"
1932 [(match_operand:SI 0 "register_operand" "")]
1936 rtx op0 = operands[0];
1937 rtx op1 = m32r_compare_op0;
1938 rtx op2 = m32r_compare_op1;
1939 enum machine_mode mode = GET_MODE (op0);
1944 if (! register_operand (op1, mode))
1945 op1 = force_reg (mode, op1);
1947 if (! register_operand (op2, mode))
1948 op2 = force_reg (mode, op2);
1950 emit_insn (gen_slt_insn (op0, op2, op1));
1954 (define_expand "sge"
1955 [(match_operand:SI 0 "register_operand" "")]
1959 rtx op0 = operands[0];
1960 rtx op1 = m32r_compare_op0;
1961 rtx op2 = m32r_compare_op1;
1962 enum machine_mode mode = GET_MODE (op0);
1967 if (! register_operand (op1, mode))
1968 op1 = force_reg (mode, op1);
1970 if (! reg_or_int16_operand (op2, mode))
1971 op2 = force_reg (mode, op2);
1973 emit_insn (gen_sge_insn (op0, op1, op2));
1977 (define_insn "sge_insn"
1978 [(set (match_operand:SI 0 "register_operand" "=r,r")
1979 (ge:SI (match_operand:SI 1 "register_operand" "r,r")
1980 (match_operand:SI 2 "reg_or_int16_operand" "r,J")))
1981 (clobber (reg:CC 17))]
1984 [(set_attr "type" "multi")
1985 (set_attr "length" "8,10")])
1988 [(set (match_operand:SI 0 "register_operand" "")
1989 (ge:SI (match_operand:SI 1 "register_operand" "")
1990 (match_operand:SI 2 "reg_or_int16_operand" "")))
1991 (clobber (reg:CC 17))]
1994 (lt:CC (match_dup 1)
1997 (ne:SI (reg:CC 17) (const_int 0)))
1999 (xor:SI (match_dup 0)
2003 ;; If optimizing for space, use -(reg - 1) to invert the comparison rather than
2004 ;; xor reg,reg,1 which might eliminate a NOP being inserted.
2006 [(set (match_operand:SI 0 "register_operand" "")
2007 (ge:SI (match_operand:SI 1 "register_operand" "")
2008 (match_operand:SI 2 "reg_or_int16_operand" "")))
2009 (clobber (reg:CC 17))]
2012 (lt:CC (match_dup 1)
2015 (ne:SI (reg:CC 17) (const_int 0)))
2017 (plus:SI (match_dup 0)
2020 (neg:SI (match_dup 0)))]
2023 (define_expand "sltu"
2024 [(match_operand:SI 0 "register_operand" "")]
2028 rtx op0 = operands[0];
2029 rtx op1 = m32r_compare_op0;
2030 rtx op2 = m32r_compare_op1;
2031 enum machine_mode mode = GET_MODE (op0);
2036 if (! register_operand (op1, mode))
2037 op1 = force_reg (mode, op1);
2039 if (! reg_or_int16_operand (op2, mode))
2040 op2 = force_reg (mode, op2);
2042 emit_insn (gen_sltu_insn (op0, op1, op2));
2046 (define_insn "sltu_insn"
2047 [(set (match_operand:SI 0 "register_operand" "=r,r")
2048 (ltu:SI (match_operand:SI 1 "register_operand" "r,r")
2049 (match_operand:SI 2 "reg_or_int16_operand" "r,J")))
2050 (clobber (reg:CC 17))]
2053 [(set_attr "type" "multi")
2054 (set_attr "length" "6,8")])
2057 [(set (match_operand:SI 0 "register_operand" "")
2058 (ltu:SI (match_operand:SI 1 "register_operand" "")
2059 (match_operand:SI 2 "reg_or_int16_operand" "")))
2060 (clobber (reg:CC 17))]
2063 (ltu:CC (match_dup 1)
2066 (ne:SI (reg:CC 17) (const_int 0)))]
2069 (define_expand "sleu"
2070 [(match_operand:SI 0 "register_operand" "")]
2074 rtx op0 = operands[0];
2075 rtx op1 = m32r_compare_op0;
2076 rtx op2 = m32r_compare_op1;
2077 enum machine_mode mode = GET_MODE (op0);
2082 if (GET_CODE (op2) == CONST_INT)
2084 HOST_WIDE_INT value = INTVAL (op2);
2085 if (value >= 2147483647)
2087 emit_move_insn (op0, const1_rtx);
2091 op2 = GEN_INT (value+1);
2092 if (value < 0 || value >= 32767)
2093 op2 = force_reg (mode, op2);
2095 emit_insn (gen_sltu_insn (op0, op1, op2));
2099 if (! register_operand (op2, mode))
2100 op2 = force_reg (mode, op2);
2102 emit_insn (gen_sleu_insn (op0, op1, op2));
2106 (define_insn "sleu_insn"
2107 [(set (match_operand:SI 0 "register_operand" "=r")
2108 (leu:SI (match_operand:SI 1 "register_operand" "r")
2109 (match_operand:SI 2 "register_operand" "r")))
2110 (clobber (reg:CC 17))]
2113 [(set_attr "type" "multi")
2114 (set_attr "length" "8")])
2117 [(set (match_operand:SI 0 "register_operand" "")
2118 (leu:SI (match_operand:SI 1 "register_operand" "")
2119 (match_operand:SI 2 "register_operand" "")))
2120 (clobber (reg:CC 17))]
2123 (ltu:CC (match_dup 2)
2126 (ne:SI (reg:CC 17) (const_int 0)))
2128 (xor:SI (match_dup 0)
2132 ;; If optimizing for space, use -(reg - 1) to invert the comparison rather than
2133 ;; xor reg,reg,1 which might eliminate a NOP being inserted.
2135 [(set (match_operand:SI 0 "register_operand" "")
2136 (leu:SI (match_operand:SI 1 "register_operand" "")
2137 (match_operand:SI 2 "register_operand" "")))
2138 (clobber (reg:CC 17))]
2141 (ltu:CC (match_dup 2)
2144 (ne:SI (reg:CC 17) (const_int 0)))
2146 (plus:SI (match_dup 0)
2149 (neg:SI (match_dup 0)))]
2152 (define_expand "sgtu"
2153 [(match_operand:SI 0 "register_operand" "")]
2157 rtx op0 = operands[0];
2158 rtx op1 = m32r_compare_op0;
2159 rtx op2 = m32r_compare_op1;
2160 enum machine_mode mode = GET_MODE (op0);
2165 if (! register_operand (op1, mode))
2166 op1 = force_reg (mode, op1);
2168 if (! register_operand (op2, mode))
2169 op2 = force_reg (mode, op2);
2171 emit_insn (gen_sltu_insn (op0, op2, op1));
2175 (define_expand "sgeu"
2176 [(match_operand:SI 0 "register_operand" "")]
2180 rtx op0 = operands[0];
2181 rtx op1 = m32r_compare_op0;
2182 rtx op2 = m32r_compare_op1;
2183 enum machine_mode mode = GET_MODE (op0);
2188 if (! register_operand (op1, mode))
2189 op1 = force_reg (mode, op1);
2191 if (! reg_or_int16_operand (op2, mode))
2192 op2 = force_reg (mode, op2);
2194 emit_insn (gen_sgeu_insn (op0, op1, op2));
2198 (define_insn "sgeu_insn"
2199 [(set (match_operand:SI 0 "register_operand" "=r,r")
2200 (geu:SI (match_operand:SI 1 "register_operand" "r,r")
2201 (match_operand:SI 2 "reg_or_int16_operand" "r,J")))
2202 (clobber (reg:CC 17))]
2205 [(set_attr "type" "multi")
2206 (set_attr "length" "8,10")])
2209 [(set (match_operand:SI 0 "register_operand" "")
2210 (geu:SI (match_operand:SI 1 "register_operand" "")
2211 (match_operand:SI 2 "reg_or_int16_operand" "")))
2212 (clobber (reg:CC 17))]
2215 (ltu:CC (match_dup 1)
2218 (ne:SI (reg:CC 17) (const_int 0)))
2220 (xor:SI (match_dup 0)
2224 ;; If optimizing for space, use -(reg - 1) to invert the comparison rather than
2225 ;; xor reg,reg,1 which might eliminate a NOP being inserted.
2227 [(set (match_operand:SI 0 "register_operand" "")
2228 (geu:SI (match_operand:SI 1 "register_operand" "")
2229 (match_operand:SI 2 "reg_or_int16_operand" "")))
2230 (clobber (reg:CC 17))]
2233 (ltu:CC (match_dup 1)
2236 (ne:SI (reg:CC 17) (const_int 0)))
2238 (plus:SI (match_dup 0)
2241 (neg:SI (match_dup 0)))]
2244 (define_insn "movcc_insn"
2245 [(set (match_operand:SI 0 "register_operand" "=r")
2246 (ne:SI (reg:CC 17) (const_int 0)))]
2249 [(set_attr "type" "misc")
2250 (set_attr "length" "2")])
2253 ;; Unconditional and other jump instructions.
2256 [(set (pc) (label_ref (match_operand 0 "" "")))]
2259 [(set_attr "type" "uncond_branch")
2260 (set (attr "length") (if_then_else (ltu (plus (minus (match_dup 0) (pc))
2266 (define_insn "indirect_jump"
2267 [(set (pc) (match_operand:SI 0 "address_operand" "p"))]
2270 [(set_attr "type" "uncond_branch")
2271 (set_attr "length" "2")])
2273 (define_insn "return"
2277 [(set_attr "type" "uncond_branch")
2278 (set_attr "length" "2")])
2280 (define_expand "tablejump"
2281 [(parallel [(set (pc) (match_operand 0 "register_operand" "r"))
2282 (use (label_ref (match_operand 1 "" "")))])]
2286 /* In pic mode, our address differences are against the base of the
2287 table. Add that base value back in; CSE ought to be able to combine
2288 the two address loads. */
2293 tmp = gen_rtx_LABEL_REF (Pmode, operands[1]);
2295 tmp = gen_rtx_PLUS (Pmode, tmp2, tmp);
2296 operands[0] = memory_address (Pmode, tmp);
2300 (define_insn "*tablejump_insn"
2301 [(set (pc) (match_operand:SI 0 "address_operand" "p"))
2302 (use (label_ref (match_operand 1 "" "")))]
2305 [(set_attr "type" "uncond_branch")
2306 (set_attr "length" "2")])
2308 (define_expand "call"
2309 ;; operands[1] is stack_size_rtx
2310 ;; operands[2] is next_arg_register
2311 [(parallel [(call (match_operand:SI 0 "call_operand" "")
2312 (match_operand 1 "" ""))
2313 (clobber (reg:SI 14))])]
2318 current_function_uses_pic_offset_table = 1;
2321 (define_insn "*call_via_reg"
2322 [(call (mem:SI (match_operand:SI 0 "register_operand" "r"))
2323 (match_operand 1 "" ""))
2324 (clobber (reg:SI 14))]
2327 [(set_attr "type" "call")
2328 (set_attr "length" "2")])
2330 (define_insn "*call_via_label"
2331 [(call (mem:SI (match_operand:SI 0 "call_address_operand" ""))
2332 (match_operand 1 "" ""))
2333 (clobber (reg:SI 14))]
2337 int call26_p = call26_operand (operands[0], FUNCTION_MODE);
2341 /* We may not be able to reach with a `bl' insn so punt and leave it to
2343 We do this here, rather than doing a force_reg in the define_expand
2344 so these insns won't be separated, say by scheduling, thus simplifying
2346 return \"seth r14,%T0\;add3 r14,r14,%B0\;jl r14\";
2351 [(set_attr "type" "call")
2352 (set (attr "length")
2353 (if_then_else (eq (symbol_ref "call26_operand (operands[0], FUNCTION_MODE)")
2355 (const_int 12) ; 10 + 2 for nop filler
2356 ; The return address must be on a 4 byte boundary so
2357 ; there's no point in using a value of 2 here. A 2 byte
2358 ; insn may go in the left slot but we currently can't
2359 ; use such knowledge.
2362 (define_expand "call_value"
2363 ;; operand 2 is stack_size_rtx
2364 ;; operand 3 is next_arg_register
2365 [(parallel [(set (match_operand 0 "register_operand" "=r")
2366 (call (match_operand:SI 1 "call_operand" "")
2367 (match_operand 2 "" "")))
2368 (clobber (reg:SI 14))])]
2373 current_function_uses_pic_offset_table = 1;
2376 (define_insn "*call_value_via_reg"
2377 [(set (match_operand 0 "register_operand" "=r")
2378 (call (mem:SI (match_operand:SI 1 "register_operand" "r"))
2379 (match_operand 2 "" "")))
2380 (clobber (reg:SI 14))]
2383 [(set_attr "type" "call")
2384 (set_attr "length" "2")])
2386 (define_insn "*call_value_via_label"
2387 [(set (match_operand 0 "register_operand" "=r")
2388 (call (mem:SI (match_operand:SI 1 "call_address_operand" ""))
2389 (match_operand 2 "" "")))
2390 (clobber (reg:SI 14))]
2394 int call26_p = call26_operand (operands[1], FUNCTION_MODE);
2397 current_function_uses_pic_offset_table = 1;
2401 /* We may not be able to reach with a `bl' insn so punt and leave it to
2403 We do this here, rather than doing a force_reg in the define_expand
2404 so these insns won't be separated, say by scheduling, thus simplifying
2406 return \"seth r14,%T1\;add3 r14,r14,%B1\;jl r14\";
2411 [(set_attr "type" "call")
2412 (set (attr "length")
2413 (if_then_else (eq (symbol_ref "call26_operand (operands[1], FUNCTION_MODE)")
2415 (const_int 12) ; 10 + 2 for nop filler
2416 ; The return address must be on a 4 byte boundary so
2417 ; there's no point in using a value of 2 here. A 2 byte
2418 ; insn may go in the left slot but we currently can't
2419 ; use such knowledge.
2426 [(set_attr "type" "int2")
2427 (set_attr "length" "2")])
2429 ;; UNSPEC_VOLATILE is considered to use and clobber all hard registers and
2430 ;; all of memory. This blocks insns from being moved across this point.
2432 (define_insn "blockage"
2433 [(unspec_volatile [(const_int 0)] UNSPECV_BLOCKAGE)]
2437 ;; Special pattern to flush the icache.
2439 (define_insn "flush_icache"
2440 [(unspec_volatile [(match_operand 0 "memory_operand" "m")]
2441 UNSPECV_FLUSH_ICACHE)
2442 (match_operand 1 "" "")
2443 (clobber (reg:SI 17))]
2445 "* return \"trap %#%1 ; flush-icache\";"
2446 [(set_attr "type" "int4")
2447 (set_attr "length" "4")])
2449 ;; Speed up fabs and provide correct sign handling for -0
2451 (define_insn "absdf2"
2452 [(set (match_operand:DF 0 "register_operand" "=r")
2453 (abs:DF (match_operand:DF 1 "register_operand" "0")))]
2456 [(set_attr "type" "multi")
2457 (set_attr "length" "4")])
2460 [(set (match_operand:DF 0 "register_operand" "")
2461 (abs:DF (match_operand:DF 1 "register_operand" "")))]
2464 (ashift:SI (match_dup 2)
2467 (lshiftrt:SI (match_dup 2)
2469 "operands[2] = gen_highpart (SImode, operands[0]);")
2471 (define_insn "abssf2"
2472 [(set (match_operand:SF 0 "register_operand" "=r")
2473 (abs:SF (match_operand:SF 1 "register_operand" "0")))]
2476 [(set_attr "type" "multi")
2477 (set_attr "length" "4")])
2480 [(set (match_operand:SF 0 "register_operand" "")
2481 (abs:SF (match_operand:SF 1 "register_operand" "")))]
2484 (ashift:SI (match_dup 2)
2487 (lshiftrt:SI (match_dup 2)
2489 "operands[2] = gen_highpart (SImode, operands[0]);")
2491 ;; Conditional move instructions
2492 ;; Based on those done for the d10v
2494 (define_expand "movsicc"
2496 (set (match_operand:SI 0 "register_operand" "r")
2497 (if_then_else:SI (match_operand 1 "" "")
2498 (match_operand:SI 2 "conditional_move_operand" "O")
2499 (match_operand:SI 3 "conditional_move_operand" "O")
2506 if (! zero_and_one (operands [2], operands [3]))
2509 /* Generate the comparison that will set the carry flag. */
2510 operands[1] = gen_compare (GET_CODE (operands[1]), m32r_compare_op0,
2511 m32r_compare_op1, TRUE);
2513 /* See other movsicc pattern below for reason why. */
2514 emit_insn (gen_blockage ());
2517 ;; Generate the conditional instructions based on how the carry flag is examined.
2518 (define_insn "*movsicc_internal"
2519 [(set (match_operand:SI 0 "register_operand" "=r")
2520 (if_then_else:SI (match_operand 1 "carry_compare_operand" "")
2521 (match_operand:SI 2 "conditional_move_operand" "O")
2522 (match_operand:SI 3 "conditional_move_operand" "O")
2525 "zero_and_one (operands [2], operands[3])"
2526 "* return emit_cond_move (operands, insn);"
2527 [(set_attr "type" "multi")
2528 (set_attr "length" "8")
2533 ;; Block moves, see m32r.c for more details.
2534 ;; Argument 0 is the destination
2535 ;; Argument 1 is the source
2536 ;; Argument 2 is the length
2537 ;; Argument 3 is the alignment
2539 (define_expand "movmemsi"
2540 [(parallel [(set (match_operand:BLK 0 "general_operand" "")
2541 (match_operand:BLK 1 "general_operand" ""))
2542 (use (match_operand:SI 2 "immediate_operand" ""))
2543 (use (match_operand:SI 3 "immediate_operand" ""))])]
2547 if (operands[0]) /* Avoid unused code messages. */
2549 if (m32r_expand_block_move (operands))
2556 ;; Insn generated by block moves
2558 (define_insn "movmemsi_internal"
2559 [(set (mem:BLK (match_operand:SI 0 "register_operand" "r")) ;; destination
2560 (mem:BLK (match_operand:SI 1 "register_operand" "r"))) ;; source
2561 (use (match_operand:SI 2 "m32r_block_immediate_operand" "J"));; # bytes to move
2562 (set (match_operand:SI 3 "register_operand" "=0")
2563 (plus:SI (minus (match_dup 2) (const_int 4))
2565 (set (match_operand:SI 4 "register_operand" "=1")
2566 (plus:SI (match_dup 1)
2568 (clobber (match_scratch:SI 5 "=&r")) ;; temp1
2569 (clobber (match_scratch:SI 6 "=&r"))] ;; temp2
2571 "* m32r_output_block_move (insn, operands); return \"\"; "
2572 [(set_attr "type" "store8")
2573 (set_attr "length" "72")]) ;; Maximum
2577 /* When generating pic, we need to load the symbol offset into a register.
2578 So that the optimizer does not confuse this with a normal symbol load
2579 we use an unspec. The offset will be loaded from a constant pool entry,
2580 since that is the only type of relocation we can use. */
2582 (define_insn "pic_load_addr"
2583 [(set (match_operand:SI 0 "register_operand" "=r")
2584 (unspec:SI [(match_operand 1 "" "")] UNSPEC_PIC_LOAD_ADDR))]
2587 [(set_attr "type" "int4")])
2589 (define_insn "gotoff_load_addr"
2590 [(set (match_operand:SI 0 "register_operand" "=r")
2591 (unspec:SI [(match_operand 1 "" "")] UNSPEC_GOTOFF))]
2593 "seth %0, %#shigh(%1@GOTOFF)\;add3 %0, %0, low(%1@GOTOFF)"
2594 [(set_attr "type" "int4")
2595 (set_attr "length" "8")])
2597 ;; Load program counter insns.
2599 (define_insn "get_pc"
2600 [(clobber (reg:SI 14))
2601 (set (match_operand 0 "register_operand" "=r")
2602 (unspec [(match_operand 1 "" "")] UNSPEC_GET_PC))
2603 (use (match_operand:SI 2 "immediate_operand" ""))]
2607 if (INTVAL(operands[2]))
2608 return \"bl.s .+4\;ld24 %0,%#%1\;add %0,lr\";
2610 return \"bl.s .+4\;seth %0,%#shigh(%1)\;add3 %0,%0,%#low(%1+4)\;add %0,lr\";}"
2611 [(set (attr "length") (if_then_else (ne (match_dup 2) (const_int 0))
2615 (define_expand "builtin_setjmp_receiver"
2616 [(label_ref (match_operand 0 "" ""))]
2620 m32r_load_pic_register ();