1 ;; Machine description the Motorola MCore
2 ;; Copyright (C) 1993, 1999, 2000, 2004, 2005, 2007
3 ;; Free Software Foundation, Inc.
4 ;; Contributed by Motorola.
6 ;; This file is part of GCC.
8 ;; GCC is free software; you can redistribute it and/or modify
9 ;; it under the terms of the GNU General Public License as published by
10 ;; the Free Software Foundation; either version 2, or (at your option)
13 ;; GCC is distributed in the hope that it will be useful,
14 ;; but WITHOUT ANY WARRANTY; without even the implied warranty of
15 ;; MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 ;; GNU General Public License for more details.
18 ;; You should have received a copy of the GNU General Public License
19 ;; along with GCC; see the file COPYING. If not, write to
20 ;; the Free Software Foundation, 51 Franklin Street, Fifth Floor,
21 ;; Boston, MA 02110-1301, USA.
23 ;;- See file "rtl.def" for documentation on define_insn, match_*, et. al.
27 ;; -------------------------------------------------------------------------
29 ;; -------------------------------------------------------------------------
33 (define_attr "type" "brcond,branch,jmp,load,store,move,alu,shift"
36 ;; If a branch destination is within -2048..2047 bytes away from the
37 ;; instruction it can be 2 bytes long. All other conditional branches
38 ;; are 10 bytes long, and all other unconditional branches are 8 bytes.
40 ;; the assembler handles the long-branch span case for us if we use
41 ;; the "jb*" mnemonics for jumps/branches. This pushes the span
42 ;; calculations and the literal table placement into the assembler,
43 ;; where their interactions can be managed in a single place.
45 ;; All MCORE instructions are two bytes long.
47 (define_attr "length" "" (const_int 2))
49 ;; Scheduling. We only model a simple load latency.
50 (define_insn_reservation "any_insn" 1
51 (eq_attr "type" "!load")
53 (define_insn_reservation "memory" 2
54 (eq_attr "type" "load")
57 (include "predicates.md")
59 ;; -------------------------------------------------------------------------
61 ;; -------------------------------------------------------------------------
65 (sign_extract:SI (match_operand:SI 0 "mcore_arith_reg_operand" "r")
67 (match_operand:SI 1 "mcore_literal_K_operand" "K")))]
70 [(set_attr "type" "shift")])
74 (zero_extract:SI (match_operand:SI 0 "mcore_arith_reg_operand" "r")
76 (match_operand:SI 1 "mcore_literal_K_operand" "K")))]
79 [(set_attr "type" "shift")])
81 ;;; This is created by combine.
84 (ne:CC (zero_extract:SI (match_operand:SI 0 "mcore_arith_reg_operand" "r")
86 (match_operand:SI 1 "mcore_literal_K_operand" "K"))
90 [(set_attr "type" "shift")])
93 ;; Created by combine from conditional patterns below (see sextb/btsti rx,31)
97 (ne:CC (lshiftrt:SI (match_operand:SI 0 "mcore_arith_reg_operand" "r")
100 "GET_CODE(operands[0]) == SUBREG &&
101 GET_MODE(SUBREG_REG(operands[0])) == QImode"
103 [(set_attr "type" "shift")])
107 (ne:CC (lshiftrt:SI (match_operand:SI 0 "mcore_arith_reg_operand" "r")
110 "GET_CODE(operands[0]) == SUBREG &&
111 GET_MODE(SUBREG_REG(operands[0])) == HImode"
113 [(set_attr "type" "shift")])
117 (if_then_else (ne (eq:CC (zero_extract:SI
118 (match_operand:SI 0 "mcore_arith_reg_operand" "")
120 (match_operand:SI 1 "mcore_literal_K_operand" ""))
123 (label_ref (match_operand 2 "" ""))
127 (zero_extract:SI (match_dup 0) (const_int 1) (match_dup 1)))
128 (set (pc) (if_then_else (eq (reg:CC 17) (const_int 0))
129 (label_ref (match_dup 2))
135 (if_then_else (eq (ne:CC (zero_extract:SI
136 (match_operand:SI 0 "mcore_arith_reg_operand" "")
138 (match_operand:SI 1 "mcore_literal_K_operand" ""))
141 (label_ref (match_operand 2 "" ""))
145 (zero_extract:SI (match_dup 0) (const_int 1) (match_dup 1)))
146 (set (pc) (if_then_else (eq (reg:CC 17) (const_int 0))
147 (label_ref (match_dup 2))
151 ;; XXX - disabled by nickc because it fails on libiberty/fnmatch.c
153 ;; ; Experimental - relax immediates for and, andn, or, and tst to allow
154 ;; ; any immediate value (or an immediate at all -- or, andn, & tst).
155 ;; ; This is done to allow bit field masks to fold together in combine.
156 ;; ; The reload phase will force the immediate into a register at the
157 ;; ; very end. This helps in some cases, but hurts in others: we'd
158 ;; ; really like to cse these immediates. However, there is a phase
159 ;; ; ordering problem here. cse picks up individual masks and cse's
160 ;; ; those, but not folded masks (cse happens before combine). It's
161 ;; ; not clear what the best solution is because we really want cse
162 ;; ; before combine (leaving the bit field masks alone). To pick up
163 ;; ; relaxed immediates use -mrelax-immediates. It might take some
164 ;; ; experimenting to see which does better (i.e. regular imms vs.
165 ;; ; arbitrary imms) for a particular code. BRC
169 ;; (ne:CC (and:SI (match_operand:SI 0 "mcore_arith_reg_operand" "r")
170 ;; (match_operand:SI 1 "mcore_arith_any_imm_operand" "rI"))
172 ;; "TARGET_RELAX_IMM"
177 ;; (ne:CC (and:SI (match_operand:SI 0 "mcore_arith_reg_operand" "r")
178 ;; (match_operand:SI 1 "mcore_arith_M_operand" "r"))
180 ;; "!TARGET_RELAX_IMM"
185 (ne:CC (and:SI (match_operand:SI 0 "mcore_arith_reg_operand" "r")
186 (match_operand:SI 1 "mcore_arith_M_operand" "r"))
195 (ne:CC (ne:SI (leu:CC (match_operand:SI 0 "mcore_arith_reg_operand" "")
196 (match_operand:SI 1 "mcore_arith_reg_operand" ""))
199 (clobber (match_operand:CC 2 "mcore_arith_reg_operand" ""))])]
201 [(set (reg:CC 17) (ne:SI (match_dup 0) (const_int 0)))
202 (set (reg:CC 17) (leu:CC (match_dup 0) (match_dup 1)))])
204 ;; -------------------------------------------------------------------------
205 ;; SImode signed integer comparisons
206 ;; -------------------------------------------------------------------------
208 (define_insn "decne_t"
209 [(set (reg:CC 17) (ne:CC (plus:SI (match_operand:SI 0 "mcore_arith_reg_operand" "+r")
213 (plus:SI (match_dup 0)
218 ;; The combiner seems to prefer the following to the former.
221 [(set (reg:CC 17) (ne:CC (match_operand:SI 0 "mcore_arith_reg_operand" "+r")
224 (plus:SI (match_dup 0)
229 (define_insn "cmpnesi_t"
230 [(set (reg:CC 17) (ne:CC (match_operand:SI 0 "mcore_arith_reg_operand" "r")
231 (match_operand:SI 1 "mcore_arith_reg_operand" "r")))]
235 (define_insn "cmpneisi_t"
236 [(set (reg:CC 17) (ne:CC (match_operand:SI 0 "mcore_arith_reg_operand" "r")
237 (match_operand:SI 1 "mcore_arith_K_operand" "K")))]
241 (define_insn "cmpgtsi_t"
242 [(set (reg:CC 17) (gt:CC (match_operand:SI 0 "mcore_arith_reg_operand" "r")
243 (match_operand:SI 1 "mcore_arith_reg_operand" "r")))]
248 [(set (reg:CC 17) (gt:CC (plus:SI
249 (match_operand:SI 0 "mcore_arith_reg_operand" "+r")
252 (set (match_dup 0) (plus:SI (match_dup 0) (const_int -1)))]
256 (define_insn "cmpltsi_t"
257 [(set (reg:CC 17) (lt:CC (match_operand:SI 0 "mcore_arith_reg_operand" "r")
258 (match_operand:SI 1 "mcore_arith_reg_operand" "r")))]
263 (define_insn "cmpltisi_t"
264 [(set (reg:CC 17) (lt:CC (match_operand:SI 0 "mcore_arith_reg_operand" "r")
265 (match_operand:SI 1 "mcore_arith_J_operand" "J")))]
271 [(set (reg:CC 17) (lt:CC (match_operand:SI 0 "mcore_arith_reg_operand" "r")
277 [(set (reg:CC 17) (lt:CC (plus:SI
278 (match_operand:SI 0 "mcore_arith_reg_operand" "+r")
281 (set (match_dup 0) (plus:SI (match_dup 0) (const_int -1)))]
285 ;; -------------------------------------------------------------------------
286 ;; SImode unsigned integer comparisons
287 ;; -------------------------------------------------------------------------
289 (define_insn "cmpgeusi_t"
290 [(set (reg:CC 17) (geu:CC (match_operand:SI 0 "mcore_arith_reg_operand" "r")
291 (match_operand:SI 1 "mcore_arith_reg_operand" "r")))]
295 (define_insn "cmpgeusi_0"
296 [(set (reg:CC 17) (geu:CC (match_operand:SI 0 "mcore_arith_reg_operand" "r")
301 (define_insn "cmpleusi_t"
302 [(set (reg:CC 17) (leu:CC (match_operand:SI 0 "mcore_arith_reg_operand" "r")
303 (match_operand:SI 1 "mcore_arith_reg_operand" "r")))]
307 ;; We save the compare operands in the cmpxx patterns and use them when
308 ;; we generate the branch.
310 ;; We accept constants here, in case we can modify them to ones which
311 ;; are more efficient to load. E.g. change 'x <= 62' to 'x < 63'.
313 (define_expand "cmpsi"
314 [(set (reg:CC 17) (compare:CC (match_operand:SI 0 "mcore_compare_operand" "")
315 (match_operand:SI 1 "nonmemory_operand" "")))]
318 { arch_compare_op0 = operands[0];
319 arch_compare_op1 = operands[1];
323 ;; -------------------------------------------------------------------------
324 ;; Logical operations
325 ;; -------------------------------------------------------------------------
327 ;; Logical AND clearing a single bit. andsi3 knows that we have this
328 ;; pattern and allows the constant literal pass through.
331 ;; RBE 2/97: don't need this pattern any longer...
332 ;; RBE: I don't think we need both "S" and exact_log2() clauses.
334 ;; [(set (match_operand:SI 0 "mcore_arith_reg_operand" "=r")
335 ;; (and:SI (match_operand:SI 1 "mcore_arith_reg_operand" "%0")
336 ;; (match_operand:SI 2 "const_int_operand" "S")))]
337 ;; "mcore_arith_S_operand (operands[2])"
341 (define_insn "andnsi3"
342 [(set (match_operand:SI 0 "mcore_arith_reg_operand" "=r")
343 (and:SI (not:SI (match_operand:SI 1 "mcore_arith_reg_operand" "r"))
344 (match_operand:SI 2 "mcore_arith_reg_operand" "0")))]
348 (define_expand "andsi3"
349 [(set (match_operand:SI 0 "mcore_arith_reg_operand" "")
350 (and:SI (match_operand:SI 1 "mcore_arith_reg_operand" "")
351 (match_operand:SI 2 "nonmemory_operand" "")))]
355 if (GET_CODE (operands[2]) == CONST_INT && INTVAL (operands[2]) < 0
356 && ! mcore_arith_S_operand (operands[2]))
358 HOST_WIDE_INT not_value = ~ INTVAL (operands[2]);
360 if ( CONST_OK_FOR_I (not_value)
361 || CONST_OK_FOR_M (not_value)
362 || CONST_OK_FOR_N (not_value))
364 operands[2] = copy_to_mode_reg (SImode, GEN_INT (not_value));
365 emit_insn (gen_andnsi3 (operands[0], operands[2], operands[1]));
370 if (! mcore_arith_K_S_operand (operands[2], SImode))
371 operands[2] = copy_to_mode_reg (SImode, operands[2]);
375 [(set (match_operand:SI 0 "mcore_arith_reg_operand" "=r,r,r,r")
376 (and:SI (match_operand:SI 1 "mcore_arith_reg_operand" "0,0,r,0")
377 (match_operand:SI 2 "mcore_arith_any_imm_operand" "r,K,0,S")))]
381 switch (which_alternative)
383 case 0: return \"and %0,%2\";
384 case 1: return \"andi %0,%2\";
385 case 2: return \"and %0,%1\";
386 /* case -1: return \"bclri %0,%Q2\"; will not happen */
387 case 3: return mcore_output_bclri (operands[0], INTVAL (operands[2]));
388 default: gcc_unreachable ();
392 ;; This was the old "S" which was "!(2^n)" */
393 ;; case -1: return \"bclri %0,%Q2\"; will not happen */
396 [(set (match_operand:SI 0 "mcore_arith_reg_operand" "=r,r,r,r")
397 (and:SI (match_operand:SI 1 "mcore_arith_reg_operand" "0,0,r,0")
398 (match_operand:SI 2 "mcore_arith_K_S_operand" "r,K,0,S")))]
402 switch (which_alternative)
404 case 0: return \"and %0,%2\";
405 case 1: return \"andi %0,%2\";
406 case 2: return \"and %0,%1\";
407 case 3: return mcore_output_bclri (operands[0], INTVAL (operands[2]));
408 default: gcc_unreachable ();
412 ;(define_insn "iorsi3"
413 ; [(set (match_operand:SI 0 "mcore_arith_reg_operand" "=r")
414 ; (ior:SI (match_operand:SI 1 "mcore_arith_reg_operand" "%0")
415 ; (match_operand:SI 2 "mcore_arith_reg_operand" "r")))]
419 ; need an expand to resolve ambiguity betw. the two iors below.
420 (define_expand "iorsi3"
421 [(set (match_operand:SI 0 "mcore_arith_reg_operand" "")
422 (ior:SI (match_operand:SI 1 "mcore_arith_reg_operand" "")
423 (match_operand:SI 2 "nonmemory_operand" "")))]
427 if (! mcore_arith_M_operand (operands[2], SImode))
428 operands[2] = copy_to_mode_reg (SImode, operands[2]);
432 [(set (match_operand:SI 0 "mcore_arith_reg_operand" "=r,r,r")
433 (ior:SI (match_operand:SI 1 "mcore_arith_reg_operand" "%0,0,0")
434 (match_operand:SI 2 "mcore_arith_any_imm_operand" "r,M,T")))]
438 switch (which_alternative)
440 case 0: return \"or %0,%2\";
441 case 1: return \"bseti %0,%P2\";
442 case 2: return mcore_output_bseti (operands[0], INTVAL (operands[2]));
443 default: gcc_unreachable ();
448 [(set (match_operand:SI 0 "mcore_arith_reg_operand" "=r,r,r")
449 (ior:SI (match_operand:SI 1 "mcore_arith_reg_operand" "%0,0,0")
450 (match_operand:SI 2 "mcore_arith_M_operand" "r,M,T")))]
454 switch (which_alternative)
456 case 0: return \"or %0,%2\";
457 case 1: return \"bseti %0,%P2\";
458 case 2: return mcore_output_bseti (operands[0], INTVAL (operands[2]));
459 default: gcc_unreachable ();
464 ; [(set (match_operand:SI 0 "mcore_arith_reg_operand" "=r")
465 ; (ior:SI (match_operand:SI 1 "mcore_arith_reg_operand" "0")
466 ; (match_operand:SI 2 "const_int_operand" "M")))]
467 ; "exact_log2 (INTVAL (operands[2])) >= 0"
471 ; [(set (match_operand:SI 0 "mcore_arith_reg_operand" "=r")
472 ; (ior:SI (match_operand:SI 1 "mcore_arith_reg_operand" "0")
473 ; (match_operand:SI 2 "const_int_operand" "i")))]
474 ; "mcore_num_ones (INTVAL (operands[2])) < 3"
475 ; "* return mcore_output_bseti (operands[0], INTVAL (operands[2]));")
477 (define_insn "xorsi3"
478 [(set (match_operand:SI 0 "mcore_arith_reg_operand" "=r")
479 (xor:SI (match_operand:SI 1 "mcore_arith_reg_operand" "%0")
480 (match_operand:SI 2 "mcore_arith_reg_operand" "r")))]
484 ; these patterns give better code then gcc invents if
485 ; left to its own devices
487 (define_insn "anddi3"
488 [(set (match_operand:DI 0 "mcore_arith_reg_operand" "=r")
489 (and:DI (match_operand:DI 1 "mcore_arith_reg_operand" "%0")
490 (match_operand:DI 2 "mcore_arith_reg_operand" "r")))]
492 "and %0,%2\;and %R0,%R2"
493 [(set_attr "length" "4")])
495 (define_insn "iordi3"
496 [(set (match_operand:DI 0 "mcore_arith_reg_operand" "=r")
497 (ior:DI (match_operand:DI 1 "mcore_arith_reg_operand" "%0")
498 (match_operand:DI 2 "mcore_arith_reg_operand" "r")))]
500 "or %0,%2\;or %R0,%R2"
501 [(set_attr "length" "4")])
503 (define_insn "xordi3"
504 [(set (match_operand:DI 0 "mcore_arith_reg_operand" "=r")
505 (xor:DI (match_operand:DI 1 "mcore_arith_reg_operand" "%0")
506 (match_operand:DI 2 "mcore_arith_reg_operand" "r")))]
508 "xor %0,%2\;xor %R0,%R2"
509 [(set_attr "length" "4")])
511 ;; -------------------------------------------------------------------------
512 ;; Shifts and rotates
513 ;; -------------------------------------------------------------------------
515 ;; Only allow these if the shift count is a convenient constant.
516 (define_expand "rotlsi3"
517 [(set (match_operand:SI 0 "mcore_arith_reg_operand" "")
518 (rotate:SI (match_operand:SI 1 "mcore_arith_reg_operand" "")
519 (match_operand:SI 2 "nonmemory_operand" "")))]
521 "if (! mcore_literal_K_operand (operands[2], SImode))
525 ;; We can only do constant rotates, which is what this pattern provides.
526 ;; The combiner will put it together for us when we do:
527 ;; (x << N) | (x >> (32 - N))
529 [(set (match_operand:SI 0 "mcore_arith_reg_operand" "=r")
530 (rotate:SI (match_operand:SI 1 "mcore_arith_reg_operand" "0")
531 (match_operand:SI 2 "mcore_literal_K_operand" "K")))]
534 [(set_attr "type" "shift")])
536 (define_insn "ashlsi3"
537 [(set (match_operand:SI 0 "mcore_arith_reg_operand" "=r,r")
538 (ashift:SI (match_operand:SI 1 "mcore_arith_reg_operand" "0,0")
539 (match_operand:SI 2 "mcore_arith_K_operand_not_0" "r,K")))]
544 [(set_attr "type" "shift")])
547 [(set (match_operand:SI 0 "mcore_arith_reg_operand" "=r")
548 (ashift:SI (const_int 1)
549 (match_operand:SI 1 "mcore_arith_reg_operand" "r")))]
552 [(set_attr "type" "shift")])
554 (define_insn "ashrsi3"
555 [(set (match_operand:SI 0 "mcore_arith_reg_operand" "=r,r")
556 (ashiftrt:SI (match_operand:SI 1 "mcore_arith_reg_operand" "0,0")
557 (match_operand:SI 2 "mcore_arith_K_operand_not_0" "r,K")))]
562 [(set_attr "type" "shift")])
564 (define_insn "lshrsi3"
565 [(set (match_operand:SI 0 "mcore_arith_reg_operand" "=r,r")
566 (lshiftrt:SI (match_operand:SI 1 "mcore_arith_reg_operand" "0,0")
567 (match_operand:SI 2 "mcore_arith_K_operand_not_0" "r,K")))]
572 [(set_attr "type" "shift")])
574 ;(define_expand "ashldi3"
575 ; [(parallel[(set (match_operand:DI 0 "mcore_arith_reg_operand" "")
576 ; (ashift:DI (match_operand:DI 1 "mcore_arith_reg_operand" "")
577 ; (match_operand:DI 2 "immediate_operand" "")))
579 ; (clobber (reg:CC 17))])]
584 ; if (GET_CODE (operands[2]) != CONST_INT
585 ; || INTVAL (operands[2]) != 1)
590 ; [(set (match_operand:DI 0 "mcore_arith_reg_operand" "=r")
591 ; (ashift:DI (match_operand:DI 1 "mcore_arith_reg_operand" "0")
593 ; (clobber (reg:CC 17))]
595 ; "lsli %R0,0\;rotli %0,0"
596 ; [(set_attr "length" "4") (set_attr "type" "shift")])
598 ;; -------------------------------------------------------------------------
599 ;; Index instructions
600 ;; -------------------------------------------------------------------------
601 ;; The second of each set of patterns is borrowed from the alpha.md file.
602 ;; These variants of the above insns can occur if the second operand
603 ;; is the frame pointer. This is a kludge, but there doesn't
604 ;; seem to be a way around it. Only recognize them while reloading.
606 ;; We must use reload_operand for some operands in case frame pointer
607 ;; elimination put a MEM with invalid address there. Otherwise,
608 ;; the result of the substitution will not match this pattern, and reload
609 ;; will not be able to correctly fix the result.
611 ;; indexing longlongs or doubles (8 bytes)
613 (define_insn "indexdi_t"
614 [(set (match_operand:SI 0 "mcore_arith_reg_operand" "=r")
615 (plus:SI (mult:SI (match_operand:SI 1 "mcore_arith_reg_operand" "r")
617 (match_operand:SI 2 "mcore_arith_reg_operand" "0")))]
620 if (! mcore_is_same_reg (operands[1], operands[2]))
622 output_asm_insn (\"ixw\\t%0,%1\", operands);
623 output_asm_insn (\"ixw\\t%0,%1\", operands);
627 output_asm_insn (\"ixh\\t%0,%1\", operands);
628 output_asm_insn (\"ixh\\t%0,%1\", operands);
632 ;; if operands[1] == operands[2], the first option above is wrong! -- dac
633 ;; was this... -- dac
634 ;; ixw %0,%1\;ixw %0,%1"
636 [(set_attr "length" "4")])
639 [(set (match_operand:SI 0 "mcore_reload_operand" "=r,r,r")
640 (plus:SI (plus:SI (mult:SI (match_operand:SI 1 "mcore_reload_operand" "r,r,r")
642 (match_operand:SI 2 "mcore_arith_reg_operand" "0,0,0"))
643 (match_operand:SI 3 "mcore_addsub_operand" "r,J,L")))]
646 ixw %0,%1\;ixw %0,%1\;addu %0,%3
647 ixw %0,%1\;ixw %0,%1\;addi %0,%3
648 ixw %0,%1\;ixw %0,%1\;subi %0,%M3"
649 [(set_attr "length" "6")])
651 ;; indexing longs (4 bytes)
653 (define_insn "indexsi_t"
654 [(set (match_operand:SI 0 "mcore_arith_reg_operand" "=r")
655 (plus:SI (mult:SI (match_operand:SI 1 "mcore_arith_reg_operand" "r")
657 (match_operand:SI 2 "mcore_arith_reg_operand" "0")))]
662 [(set (match_operand:SI 0 "mcore_reload_operand" "=r,r,r")
663 (plus:SI (plus:SI (mult:SI (match_operand:SI 1 "mcore_reload_operand" "r,r,r")
665 (match_operand:SI 2 "mcore_arith_reg_operand" "0,0,0"))
666 (match_operand:SI 3 "mcore_addsub_operand" "r,J,L")))]
669 ixw %0,%1\;addu %0,%3
670 ixw %0,%1\;addi %0,%3
671 ixw %0,%1\;subi %0,%M3"
672 [(set_attr "length" "4")])
674 ;; indexing shorts (2 bytes)
676 (define_insn "indexhi_t"
677 [(set (match_operand:SI 0 "mcore_arith_reg_operand" "=r")
678 (plus:SI (mult:SI (match_operand:SI 1 "mcore_arith_reg_operand" "r")
680 (match_operand:SI 2 "mcore_arith_reg_operand" "0")))]
685 [(set (match_operand:SI 0 "mcore_reload_operand" "=r,r,r")
686 (plus:SI (plus:SI (mult:SI (match_operand:SI 1 "mcore_reload_operand" "r,r,r")
688 (match_operand:SI 2 "mcore_arith_reg_operand" "0,0,0"))
689 (match_operand:SI 3 "mcore_addsub_operand" "r,J,L")))]
692 ixh %0,%1\;addu %0,%3
693 ixh %0,%1\;addi %0,%3
694 ixh %0,%1\;subi %0,%M3"
695 [(set_attr "length" "4")])
698 ;; Other sizes may be handy for indexing.
699 ;; the tradeoffs to consider when adding these are
700 ;; code size, execution time [vs. mul it is easy to win],
701 ;; and register pressure -- these patterns don't use an extra
702 ;; register to build the offset from the base
703 ;; and whether the compiler will not come up with some other idiom.
706 ;; -------------------------------------------------------------------------
707 ;; Addition, Subtraction instructions
708 ;; -------------------------------------------------------------------------
710 (define_expand "addsi3"
711 [(set (match_operand:SI 0 "mcore_arith_reg_operand" "")
712 (plus:SI (match_operand:SI 1 "mcore_arith_reg_operand" "")
713 (match_operand:SI 2 "nonmemory_operand" "")))]
717 extern int flag_omit_frame_pointer;
719 /* If this is an add to the frame pointer, then accept it as is so
720 that we can later fold in the fp/sp offset from frame pointer
722 if (flag_omit_frame_pointer
723 && GET_CODE (operands[1]) == REG
724 && (REGNO (operands[1]) == VIRTUAL_STACK_VARS_REGNUM
725 || REGNO (operands[1]) == FRAME_POINTER_REGNUM))
727 emit_insn (gen_addsi3_fp (operands[0], operands[1], operands[2]));
731 /* Convert adds to subtracts if this makes loading the constant cheaper.
732 But only if we are allowed to generate new pseudos. */
733 if (! (reload_in_progress || reload_completed)
734 && GET_CODE (operands[2]) == CONST_INT
735 && INTVAL (operands[2]) < -32)
737 HOST_WIDE_INT neg_value = - INTVAL (operands[2]);
739 if ( CONST_OK_FOR_I (neg_value)
740 || CONST_OK_FOR_M (neg_value)
741 || CONST_OK_FOR_N (neg_value))
743 operands[2] = copy_to_mode_reg (SImode, GEN_INT (neg_value));
744 emit_insn (gen_subsi3 (operands[0], operands[1], operands[2]));
749 if (! mcore_addsub_operand (operands[2], SImode))
750 operands[2] = copy_to_mode_reg (SImode, operands[2]);
753 ;; RBE: for some constants which are not in the range which allows
754 ;; us to do a single operation, we will try a paired addi/addi instead
755 ;; of a movi/addi. This relieves some register pressure at the expense
756 ;; of giving away some potential constant reuse.
758 ;; RBE 6/17/97: this didn't buy us anything, but I keep the pattern
759 ;; for later reference
761 ;; (define_insn "addsi3_i2"
762 ;; [(set (match_operand:SI 0 "mcore_arith_reg_operand" "=r")
763 ;; (plus:SI (match_operand:SI 1 "mcore_arith_reg_operand" "%0")
764 ;; (match_operand:SI 2 "const_int_operand" "g")))]
765 ;; "GET_CODE(operands[2]) == CONST_INT
766 ;; && ((INTVAL (operands[2]) > 32 && INTVAL(operands[2]) <= 64)
767 ;; || (INTVAL (operands[2]) < -32 && INTVAL(operands[2]) >= -64))"
770 ;; HOST_WIDE_INT n = INTVAL(operands[2]);
773 ;; operands[2] = GEN_INT(n - 32);
774 ;; return \"addi\\t%0,32\;addi\\t%0,%2\";
779 ;; operands[2] = GEN_INT(n - 32);
780 ;; return \"subi\\t%0,32\;subi\\t%0,%2\";
783 ;; [(set_attr "length" "4")])
785 (define_insn "addsi3_i"
786 [(set (match_operand:SI 0 "mcore_arith_reg_operand" "=r,r,r")
787 (plus:SI (match_operand:SI 1 "mcore_arith_reg_operand" "%0,0,0")
788 (match_operand:SI 2 "mcore_addsub_operand" "r,J,L")))]
795 ;; This exists so that address computations based on the frame pointer
796 ;; can be folded in when frame pointer elimination occurs. Ordinarily
797 ;; this would be bad because it allows insns which would require reloading,
798 ;; but without it, we get multiple adds where one would do.
800 (define_insn "addsi3_fp"
801 [(set (match_operand:SI 0 "mcore_arith_reg_operand" "=r,r,r")
802 (plus:SI (match_operand:SI 1 "mcore_arith_reg_operand" "%0,0,0")
803 (match_operand:SI 2 "immediate_operand" "r,J,L")))]
804 "flag_omit_frame_pointer
805 && (reload_in_progress || reload_completed || REGNO (operands[1]) == FRAME_POINTER_REGNUM)"
811 ;; RBE: for some constants which are not in the range which allows
812 ;; us to do a single operation, we will try a paired addi/addi instead
813 ;; of a movi/addi. This relieves some register pressure at the expense
814 ;; of giving away some potential constant reuse.
816 ;; RBE 6/17/97: this didn't buy us anything, but I keep the pattern
817 ;; for later reference
819 ;; (define_insn "subsi3_i2"
820 ;; [(set (match_operand:SI 0 "mcore_arith_reg_operand" "=r")
821 ;; (plus:SI (match_operand:SI 1 "mcore_arith_reg_operand" "%0")
822 ;; (match_operand:SI 2 "const_int_operand" "g")))]
823 ;; "TARGET_RBETEST && GET_CODE(operands[2]) == CONST_INT
824 ;; && ((INTVAL (operands[2]) > 32 && INTVAL(operands[2]) <= 64)
825 ;; || (INTVAL (operands[2]) < -32 && INTVAL(operands[2]) >= -64))"
828 ;; HOST_WIDE_INT n = INTVAL(operands[2]);
831 ;; operands[2] = GEN_INT( n - 32);
832 ;; return \"subi\\t%0,32\;subi\\t%0,%2\";
837 ;; operands[2] = GEN_INT(n - 32);
838 ;; return \"addi\\t%0,32\;addi\\t%0,%2\";
841 ;; [(set_attr "length" "4")])
843 ;(define_insn "subsi3"
844 ; [(set (match_operand:SI 0 "mcore_arith_reg_operand" "=r,r,r,r")
845 ; (minus:SI (match_operand:SI 1 "mcore_arith_K_operand" "0,0,r,K")
846 ; (match_operand:SI 2 "mcore_arith_J_operand" "r,J,0,0")))]
854 (define_insn "subsi3"
855 [(set (match_operand:SI 0 "mcore_arith_reg_operand" "=r,r,r")
856 (minus:SI (match_operand:SI 1 "mcore_arith_reg_operand" "0,0,r")
857 (match_operand:SI 2 "mcore_arith_J_operand" "r,J,0")))]
865 [(set (match_operand:SI 0 "mcore_arith_reg_operand" "=r")
866 (minus:SI (match_operand:SI 1 "mcore_literal_K_operand" "K")
867 (match_operand:SI 2 "mcore_arith_reg_operand" "0")))]
871 (define_insn "adddi3"
872 [(set (match_operand:DI 0 "mcore_arith_reg_operand" "=&r")
873 (plus:DI (match_operand:DI 1 "mcore_arith_reg_operand" "%0")
874 (match_operand:DI 2 "mcore_arith_reg_operand" "r")))
875 (clobber (reg:CC 17))]
879 if (TARGET_LITTLE_END)
880 return \"cmplt %0,%0\;addc %0,%2\;addc %R0,%R2\";
881 return \"cmplt %R0,%R0\;addc %R0,%R2\;addc %0,%2\";
883 [(set_attr "length" "6")])
885 ;; special case for "longlong += 1"
887 [(set (match_operand:DI 0 "mcore_arith_reg_operand" "=&r")
888 (plus:DI (match_operand:DI 1 "mcore_arith_reg_operand" "0")
890 (clobber (reg:CC 17))]
894 if (TARGET_LITTLE_END)
895 return \"addi %0,1\;cmpnei %0,0\;incf %R0\";
896 return \"addi %R0,1\;cmpnei %R0,0\;incf %0\";
898 [(set_attr "length" "6")])
900 ;; special case for "longlong -= 1"
902 [(set (match_operand:DI 0 "mcore_arith_reg_operand" "=&r")
903 (plus:DI (match_operand:DI 1 "mcore_arith_reg_operand" "0")
905 (clobber (reg:CC 17))]
909 if (TARGET_LITTLE_END)
910 return \"cmpnei %0,0\;decf %R0\;subi %0,1\";
911 return \"cmpnei %R0,0\;decf %0\;subi %R0,1\";
913 [(set_attr "length" "6")])
915 ;; special case for "longlong += const_int"
916 ;; we have to use a register for the const_int because we don't
917 ;; have an unsigned compare immediate... only +/- 1 get to
918 ;; play the no-extra register game because they compare with 0.
919 ;; This winds up working out for any literal that is synthesized
920 ;; with a single instruction. The more complicated ones look
921 ;; like the get broken into subreg's to get initialized too soon
922 ;; for us to catch here. -- RBE 4/25/96
923 ;; only allow for-sure positive values.
926 [(set (match_operand:DI 0 "mcore_arith_reg_operand" "=&r")
927 (plus:DI (match_operand:DI 1 "mcore_arith_reg_operand" "0")
928 (match_operand:SI 2 "const_int_operand" "r")))
929 (clobber (reg:CC 17))]
930 "GET_CODE (operands[2]) == CONST_INT
931 && INTVAL (operands[2]) > 0 && ! (INTVAL (operands[2]) & 0x80000000)"
934 gcc_assert (GET_MODE (operands[2]) == SImode);
935 if (TARGET_LITTLE_END)
936 return \"addu %0,%2\;cmphs %0,%2\;incf %R0\";
937 return \"addu %R0,%2\;cmphs %R0,%2\;incf %0\";
939 [(set_attr "length" "6")])
941 ;; optimize "long long" + "unsigned long"
942 ;; won't trigger because of how the extension is expanded upstream.
944 ;; [(set (match_operand:DI 0 "mcore_arith_reg_operand" "=&r")
945 ;; (plus:DI (match_operand:DI 1 "mcore_arith_reg_operand" "%0")
946 ;; (zero_extend:DI (match_operand:SI 2 "mcore_arith_reg_operand" "r"))))
947 ;; (clobber (reg:CC 17))]
949 ;; "cmplt %R0,%R0\;addc %R0,%2\;inct %0"
950 ;; [(set_attr "length" "6")])
952 ;; optimize "long long" + "signed long"
953 ;; won't trigger because of how the extension is expanded upstream.
955 ;; [(set (match_operand:DI 0 "mcore_arith_reg_operand" "=&r")
956 ;; (plus:DI (match_operand:DI 1 "mcore_arith_reg_operand" "%0")
957 ;; (sign_extend:DI (match_operand:SI 2 "mcore_arith_reg_operand" "r"))))
958 ;; (clobber (reg:CC 17))]
960 ;; "cmplt %R0,%R0\;addc %R0,%2\;inct %0\;btsti %2,31\;dect %0"
961 ;; [(set_attr "length" "6")])
963 (define_insn "subdi3"
964 [(set (match_operand:DI 0 "mcore_arith_reg_operand" "=&r")
965 (minus:DI (match_operand:DI 1 "mcore_arith_reg_operand" "0")
966 (match_operand:DI 2 "mcore_arith_reg_operand" "r")))
967 (clobber (reg:CC 17))]
971 if (TARGET_LITTLE_END)
972 return \"cmphs %0,%0\;subc %0,%2\;subc %R0,%R2\";
973 return \"cmphs %R0,%R0\;subc %R0,%R2\;subc %0,%2\";
975 [(set_attr "length" "6")])
977 ;; -------------------------------------------------------------------------
978 ;; Multiplication instructions
979 ;; -------------------------------------------------------------------------
981 (define_insn "mulsi3"
982 [(set (match_operand:SI 0 "mcore_arith_reg_operand" "=r")
983 (mult:SI (match_operand:SI 1 "mcore_arith_reg_operand" "%0")
984 (match_operand:SI 2 "mcore_arith_reg_operand" "r")))]
989 ;; 32/32 signed division -- added to the MCORE instruction set spring 1997
991 ;; Different constraints based on the architecture revision...
993 (define_expand "divsi3"
994 [(set (match_operand:SI 0 "mcore_arith_reg_operand" "")
995 (div:SI (match_operand:SI 1 "mcore_arith_reg_operand" "")
996 (match_operand:SI 2 "mcore_arith_reg_operand" "")))]
1000 ;; MCORE Revision 1.50: restricts the divisor to be in r1. (6/97)
1003 [(set (match_operand:SI 0 "mcore_arith_reg_operand" "=r")
1004 (div:SI (match_operand:SI 1 "mcore_arith_reg_operand" "0")
1005 (match_operand:SI 2 "mcore_arith_reg_operand" "b")))]
1010 ;; 32/32 signed division -- added to the MCORE instruction set spring 1997
1012 ;; Different constraints based on the architecture revision...
1014 (define_expand "udivsi3"
1015 [(set (match_operand:SI 0 "mcore_arith_reg_operand" "")
1016 (udiv:SI (match_operand:SI 1 "mcore_arith_reg_operand" "")
1017 (match_operand:SI 2 "mcore_arith_reg_operand" "")))]
1021 ;; MCORE Revision 1.50: restricts the divisor to be in r1. (6/97)
1023 [(set (match_operand:SI 0 "mcore_arith_reg_operand" "=r")
1024 (udiv:SI (match_operand:SI 1 "mcore_arith_reg_operand" "0")
1025 (match_operand:SI 2 "mcore_arith_reg_operand" "b")))]
1029 ;; -------------------------------------------------------------------------
1031 ;; -------------------------------------------------------------------------
1033 (define_insn "negsi2"
1034 [(set (match_operand:SI 0 "mcore_arith_reg_operand" "=r")
1035 (neg:SI (match_operand:SI 1 "mcore_arith_reg_operand" "0")))]
1039 return \"rsubi %0,0\";
1043 (define_insn "abssi2"
1044 [(set (match_operand:SI 0 "mcore_arith_reg_operand" "=r")
1045 (abs:SI (match_operand:SI 1 "mcore_arith_reg_operand" "0")))]
1049 (define_insn "negdi2"
1050 [(set (match_operand:DI 0 "mcore_arith_reg_operand" "=&r")
1051 (neg:DI (match_operand:DI 1 "mcore_arith_reg_operand" "0")))
1052 (clobber (reg:CC 17))]
1056 if (TARGET_LITTLE_END)
1057 return \"cmpnei %0,0\\n\\trsubi %0,0\\n\\tnot %R0\\n\\tincf %R0\";
1058 return \"cmpnei %R0,0\\n\\trsubi %R0,0\\n\\tnot %0\\n\\tincf %0\";
1060 [(set_attr "length" "8")])
1062 (define_insn "one_cmplsi2"
1063 [(set (match_operand:SI 0 "mcore_arith_reg_operand" "=r")
1064 (not:SI (match_operand:SI 1 "mcore_arith_reg_operand" "0")))]
1068 ;; -------------------------------------------------------------------------
1069 ;; Zero extension instructions
1070 ;; -------------------------------------------------------------------------
1072 (define_expand "zero_extendhisi2"
1073 [(set (match_operand:SI 0 "mcore_arith_reg_operand" "")
1074 (zero_extend:SI (match_operand:HI 1 "mcore_arith_reg_operand" "")))]
1079 [(set (match_operand:SI 0 "mcore_arith_reg_operand" "=r,r")
1080 (zero_extend:SI (match_operand:HI 1 "general_operand" "0,m")))]
1085 [(set_attr "type" "shift,load")])
1087 ;; ldh gives us a free zero-extension. The combiner picks up on this.
1089 [(set (match_operand:SI 0 "mcore_arith_reg_operand" "=r")
1090 (zero_extend:SI (mem:HI (match_operand:SI 1 "mcore_arith_reg_operand" "r"))))]
1093 [(set_attr "type" "load")])
1096 [(set (match_operand:SI 0 "mcore_arith_reg_operand" "=r")
1097 (zero_extend:SI (mem:HI (plus:SI (match_operand:SI 1 "mcore_arith_reg_operand" "r")
1098 (match_operand:SI 2 "const_int_operand" "")))))]
1099 "(INTVAL (operands[2]) >= 0) &&
1100 (INTVAL (operands[2]) < 32) &&
1101 ((INTVAL (operands[2])&1) == 0)"
1103 [(set_attr "type" "load")])
1105 (define_expand "zero_extendqisi2"
1106 [(set (match_operand:SI 0 "mcore_arith_reg_operand" "")
1107 (zero_extend:SI (match_operand:QI 1 "general_operand" "")))]
1111 ;; RBE: XXX: we don't recognize that the xtrb3 kills the CC register.
1113 [(set (match_operand:SI 0 "mcore_arith_reg_operand" "=r,b,r")
1114 (zero_extend:SI (match_operand:QI 1 "general_operand" "0,r,m")))]
1120 [(set_attr "type" "shift,shift,load")])
1122 ;; ldb gives us a free zero-extension. The combiner picks up on this.
1124 [(set (match_operand:SI 0 "mcore_arith_reg_operand" "=r")
1125 (zero_extend:SI (mem:QI (match_operand:SI 1 "mcore_arith_reg_operand" "r"))))]
1128 [(set_attr "type" "load")])
1131 [(set (match_operand:SI 0 "mcore_arith_reg_operand" "=r")
1132 (zero_extend:SI (mem:QI (plus:SI (match_operand:SI 1 "mcore_arith_reg_operand" "r")
1133 (match_operand:SI 2 "const_int_operand" "")))))]
1134 "(INTVAL (operands[2]) >= 0) &&
1135 (INTVAL (operands[2]) < 16)"
1137 [(set_attr "type" "load")])
1139 (define_expand "zero_extendqihi2"
1140 [(set (match_operand:HI 0 "mcore_arith_reg_operand" "")
1141 (zero_extend:HI (match_operand:QI 1 "general_operand" "")))]
1145 ;; RBE: XXX: we don't recognize that the xtrb3 kills the CC register.
1147 [(set (match_operand:HI 0 "mcore_arith_reg_operand" "=r,b,r")
1148 (zero_extend:HI (match_operand:QI 1 "general_operand" "0,r,m")))]
1154 [(set_attr "type" "shift,shift,load")])
1156 ;; ldb gives us a free zero-extension. The combiner picks up on this.
1157 ;; this doesn't catch references that are into a structure.
1158 ;; note that normally the compiler uses the above insn, unless it turns
1159 ;; out that we're dealing with a volatile...
1161 [(set (match_operand:HI 0 "mcore_arith_reg_operand" "=r")
1162 (zero_extend:HI (mem:QI (match_operand:SI 1 "mcore_arith_reg_operand" "r"))))]
1165 [(set_attr "type" "load")])
1168 [(set (match_operand:HI 0 "mcore_arith_reg_operand" "=r")
1169 (zero_extend:HI (mem:QI (plus:SI (match_operand:SI 1 "mcore_arith_reg_operand" "r")
1170 (match_operand:SI 2 "const_int_operand" "")))))]
1171 "(INTVAL (operands[2]) >= 0) &&
1172 (INTVAL (operands[2]) < 16)"
1174 [(set_attr "type" "load")])
1177 ;; -------------------------------------------------------------------------
1178 ;; Sign extension instructions
1179 ;; -------------------------------------------------------------------------
1181 (define_expand "extendsidi2"
1182 [(set (match_operand:DI 0 "mcore_arith_reg_operand" "=r")
1183 (match_operand:SI 1 "mcore_arith_reg_operand" "r"))]
1189 if (TARGET_LITTLE_END)
1194 emit_insn (gen_rtx_SET (VOIDmode, gen_rtx_SUBREG (SImode, operands[0], low),
1196 emit_insn (gen_rtx_SET (VOIDmode, gen_rtx_SUBREG (SImode, operands[0], high),
1197 gen_rtx_ASHIFTRT (SImode,
1198 gen_rtx_SUBREG (SImode, operands[0], low),
1204 (define_insn "extendhisi2"
1205 [(set (match_operand:SI 0 "mcore_arith_reg_operand" "=r")
1206 (sign_extend:SI (match_operand:HI 1 "mcore_arith_reg_operand" "0")))]
1210 (define_insn "extendqisi2"
1211 [(set (match_operand:SI 0 "mcore_arith_reg_operand" "=r")
1212 (sign_extend:SI (match_operand:QI 1 "mcore_arith_reg_operand" "0")))]
1216 (define_insn "extendqihi2"
1217 [(set (match_operand:HI 0 "mcore_arith_reg_operand" "=r")
1218 (sign_extend:HI (match_operand:QI 1 "mcore_arith_reg_operand" "0")))]
1222 ;; -------------------------------------------------------------------------
1223 ;; Move instructions
1224 ;; -------------------------------------------------------------------------
1228 (define_expand "movsi"
1229 [(set (match_operand:SI 0 "general_operand" "")
1230 (match_operand:SI 1 "general_operand" ""))]
1234 if (GET_CODE (operands[0]) == MEM)
1235 operands[1] = force_reg (SImode, operands[1]);
1239 [(set (match_operand:SI 0 "mcore_general_movdst_operand" "=r,r,a,r,a,r,m")
1240 (match_operand:SI 1 "mcore_general_movsrc_operand" "r,P,i,c,R,m,r"))]
1241 "(register_operand (operands[0], SImode)
1242 || register_operand (operands[1], SImode))"
1243 "* return mcore_output_move (insn, operands, SImode);"
1244 [(set_attr "type" "move,move,move,move,load,load,store")])
1250 (define_expand "movhi"
1251 [(set (match_operand:HI 0 "general_operand" "")
1252 (match_operand:HI 1 "general_operand" ""))]
1256 if (GET_CODE (operands[0]) == MEM)
1257 operands[1] = force_reg (HImode, operands[1]);
1258 else if (CONSTANT_P (operands[1])
1259 && (GET_CODE (operands[1]) != CONST_INT
1260 || (! CONST_OK_FOR_I (INTVAL (operands[1]))
1261 && ! CONST_OK_FOR_M (INTVAL (operands[1]))
1262 && ! CONST_OK_FOR_N (INTVAL (operands[1]))))
1263 && ! reload_completed && ! reload_in_progress)
1265 rtx reg = gen_reg_rtx (SImode);
1266 emit_insn (gen_movsi (reg, operands[1]));
1267 operands[1] = gen_lowpart (HImode, reg);
1272 [(set (match_operand:HI 0 "mcore_general_movdst_operand" "=r,r,a,r,r,m")
1273 (match_operand:HI 1 "mcore_general_movsrc_operand" "r,P,i,c,m,r"))]
1274 "(register_operand (operands[0], HImode)
1275 || register_operand (operands[1], HImode))"
1276 "* return mcore_output_move (insn, operands, HImode);"
1277 [(set_attr "type" "move,move,move,move,load,store")])
1283 (define_expand "movqi"
1284 [(set (match_operand:QI 0 "general_operand" "")
1285 (match_operand:QI 1 "general_operand" ""))]
1289 if (GET_CODE (operands[0]) == MEM)
1290 operands[1] = force_reg (QImode, operands[1]);
1291 else if (CONSTANT_P (operands[1])
1292 && (GET_CODE (operands[1]) != CONST_INT
1293 || (! CONST_OK_FOR_I (INTVAL (operands[1]))
1294 && ! CONST_OK_FOR_M (INTVAL (operands[1]))
1295 && ! CONST_OK_FOR_N (INTVAL (operands[1]))))
1296 && ! reload_completed && ! reload_in_progress)
1298 rtx reg = gen_reg_rtx (SImode);
1299 emit_insn (gen_movsi (reg, operands[1]));
1300 operands[1] = gen_lowpart (QImode, reg);
1305 [(set (match_operand:QI 0 "mcore_general_movdst_operand" "=r,r,a,r,r,m")
1306 (match_operand:QI 1 "mcore_general_movsrc_operand" "r,P,i,c,m,r"))]
1307 "(register_operand (operands[0], QImode)
1308 || register_operand (operands[1], QImode))"
1309 "* return mcore_output_move (insn, operands, QImode);"
1310 [(set_attr "type" "move,move,move,move,load,store")])
1315 (define_expand "movdi"
1316 [(set (match_operand:DI 0 "general_operand" "")
1317 (match_operand:DI 1 "general_operand" ""))]
1321 if (GET_CODE (operands[0]) == MEM)
1322 operands[1] = force_reg (DImode, operands[1]);
1323 else if (GET_CODE (operands[1]) == CONST_INT
1324 && ! CONST_OK_FOR_I (INTVAL (operands[1]))
1325 && ! CONST_OK_FOR_M (INTVAL (operands[1]))
1326 && ! CONST_OK_FOR_N (INTVAL (operands[1])))
1329 for (i = 0; i < UNITS_PER_WORD * 2; i += UNITS_PER_WORD)
1330 emit_move_insn (simplify_gen_subreg (SImode, operands[0], DImode, i),
1331 simplify_gen_subreg (SImode, operands[1], DImode, i));
1336 (define_insn "movdi_i"
1337 [(set (match_operand:DI 0 "general_operand" "=r,r,r,r,a,r,m")
1338 (match_operand:DI 1 "mcore_general_movsrc_operand" "I,M,N,r,R,m,r"))]
1340 "* return mcore_output_movedouble (operands, DImode);"
1341 [(set_attr "length" "4") (set_attr "type" "move,move,move,move,load,load,store")])
1345 (define_expand "movsf"
1346 [(set (match_operand:SF 0 "general_operand" "")
1347 (match_operand:SF 1 "general_operand" ""))]
1351 if (GET_CODE (operands[0]) == MEM)
1352 operands[1] = force_reg (SFmode, operands[1]);
1355 (define_insn "movsf_i"
1356 [(set (match_operand:SF 0 "general_operand" "=r,r,m")
1357 (match_operand:SF 1 "general_operand" "r,m,r"))]
1363 [(set_attr "type" "move,load,store")])
1367 (define_expand "movdf"
1368 [(set (match_operand:DF 0 "general_operand" "")
1369 (match_operand:DF 1 "general_operand" ""))]
1373 if (GET_CODE (operands[0]) == MEM)
1374 operands[1] = force_reg (DFmode, operands[1]);
1377 (define_insn "movdf_k"
1378 [(set (match_operand:DF 0 "general_operand" "=r,r,m")
1379 (match_operand:DF 1 "general_operand" "r,m,r"))]
1381 "* return mcore_output_movedouble (operands, DFmode);"
1382 [(set_attr "length" "4") (set_attr "type" "move,load,store")])
1385 ;; Load/store multiple
1387 ;; ??? This is not currently used.
1389 [(set (match_operand:TI 0 "mcore_arith_reg_operand" "=r")
1390 (mem:TI (match_operand:SI 1 "mcore_arith_reg_operand" "r")))]
1394 ;; ??? This is not currently used.
1396 [(set (mem:TI (match_operand:SI 0 "mcore_arith_reg_operand" "r"))
1397 (match_operand:TI 1 "mcore_arith_reg_operand" "r"))]
1401 (define_expand "load_multiple"
1402 [(match_par_dup 3 [(set (match_operand:SI 0 "" "")
1403 (match_operand:SI 1 "" ""))
1404 (use (match_operand:SI 2 "" ""))])]
1408 int regno, count, i;
1410 /* Support only loading a constant number of registers from memory and
1411 only if at least two registers. The last register must be r15. */
1412 if (GET_CODE (operands[2]) != CONST_INT
1413 || INTVAL (operands[2]) < 2
1414 || GET_CODE (operands[1]) != MEM
1415 || XEXP (operands[1], 0) != stack_pointer_rtx
1416 || GET_CODE (operands[0]) != REG
1417 || REGNO (operands[0]) + INTVAL (operands[2]) != 16)
1420 count = INTVAL (operands[2]);
1421 regno = REGNO (operands[0]);
1423 operands[3] = gen_rtx_PARALLEL (VOIDmode, rtvec_alloc (count));
1425 for (i = 0; i < count; i++)
1426 XVECEXP (operands[3], 0, i)
1427 = gen_rtx_SET (VOIDmode,
1428 gen_rtx_REG (SImode, regno + i),
1429 gen_rtx_MEM (SImode, plus_constant (stack_pointer_rtx,
1434 [(match_parallel 0 "mcore_load_multiple_operation"
1435 [(set (match_operand:SI 1 "mcore_arith_reg_operand" "=r")
1436 (mem:SI (match_operand:SI 2 "register_operand" "r")))])]
1437 "GET_CODE (operands[2]) == REG && REGNO (operands[2]) == STACK_POINTER_REGNUM"
1440 (define_expand "store_multiple"
1441 [(match_par_dup 3 [(set (match_operand:SI 0 "" "")
1442 (match_operand:SI 1 "" ""))
1443 (use (match_operand:SI 2 "" ""))])]
1447 int regno, count, i;
1449 /* Support only storing a constant number of registers to memory and
1450 only if at least two registers. The last register must be r15. */
1451 if (GET_CODE (operands[2]) != CONST_INT
1452 || INTVAL (operands[2]) < 2
1453 || GET_CODE (operands[0]) != MEM
1454 || XEXP (operands[0], 0) != stack_pointer_rtx
1455 || GET_CODE (operands[1]) != REG
1456 || REGNO (operands[1]) + INTVAL (operands[2]) != 16)
1459 count = INTVAL (operands[2]);
1460 regno = REGNO (operands[1]);
1462 operands[3] = gen_rtx_PARALLEL (VOIDmode, rtvec_alloc (count));
1464 for (i = 0; i < count; i++)
1465 XVECEXP (operands[3], 0, i)
1466 = gen_rtx_SET (VOIDmode,
1467 gen_rtx_MEM (SImode, plus_constant (stack_pointer_rtx,
1469 gen_rtx_REG (SImode, regno + i));
1473 [(match_parallel 0 "mcore_store_multiple_operation"
1474 [(set (mem:SI (match_operand:SI 2 "register_operand" "r"))
1475 (match_operand:SI 1 "mcore_arith_reg_operand" "r"))])]
1476 "GET_CODE (operands[2]) == REG && REGNO (operands[2]) == STACK_POINTER_REGNUM"
1479 ;; ------------------------------------------------------------------------
1480 ;; Define the real conditional branch instructions.
1481 ;; ------------------------------------------------------------------------
1483 (define_insn "branch_true"
1484 [(set (pc) (if_then_else (ne (reg:CC 17) (const_int 0))
1485 (label_ref (match_operand 0 "" ""))
1489 [(set_attr "type" "brcond")])
1491 (define_insn "branch_false"
1492 [(set (pc) (if_then_else (eq (reg:CC 17) (const_int 0))
1493 (label_ref (match_operand 0 "" ""))
1497 [(set_attr "type" "brcond")])
1499 (define_insn "inverse_branch_true"
1500 [(set (pc) (if_then_else (ne (reg:CC 17) (const_int 0))
1502 (label_ref (match_operand 0 "" ""))))]
1505 [(set_attr "type" "brcond")])
1507 (define_insn "inverse_branch_false"
1508 [(set (pc) (if_then_else (eq (reg:CC 17) (const_int 0))
1510 (label_ref (match_operand 0 "" ""))))]
1513 [(set_attr "type" "brcond")])
1515 ;; Conditional branch insns
1517 ;; At top-level, condition test are eq/ne, because we
1518 ;; are comparing against the condition register (which
1519 ;; has the result of the true relational test
1521 ; There is no beq compare, so we reverse the branch arms.
1523 (define_expand "beq"
1524 [(set (pc) (if_then_else (ne (match_dup 1) (const_int 0))
1526 (label_ref (match_operand 0 "" ""))))]
1530 operands[1] = mcore_gen_compare_reg (EQ);
1533 (define_expand "bne"
1534 [(set (pc) (if_then_else (ne (match_dup 1) (const_int 0))
1535 (label_ref (match_operand 0 "" ""))
1540 operands[1] = mcore_gen_compare_reg (NE);
1543 ; check whether (GT A imm) can become (LE A imm) with the branch reversed.
1544 ; if so, emit a (LT A imm + 1) in place of the (LE A imm). BRC
1546 (define_expand "bgt"
1547 [(set (pc) (if_then_else (ne (match_dup 1) (const_int 0))
1548 (label_ref (match_operand 0 "" ""))
1553 if (mcore_modify_comparison (LE))
1555 emit_jump_insn (gen_reverse_blt (operands[0]));
1558 operands[1] = mcore_gen_compare_reg (GT);
1561 ; There is no ble compare, so we reverse the branch arms.
1562 ; reversed the condition and branch arms for ble -- the check_dbra_loop()
1563 ; transformation assumes that ble uses a branch-true with the label as
1564 ; as the target. BRC
1566 ; check whether (LE A imm) can become (LT A imm + 1).
1568 (define_expand "ble"
1569 [(set (pc) (if_then_else (eq (match_dup 1) (const_int 0))
1570 (label_ref (match_operand 0 "" ""))
1575 if (mcore_modify_comparison (LE))
1577 emit_jump_insn (gen_blt (operands[0]));
1580 operands[1] = mcore_gen_compare_reg (LE);
1583 ; make generating a reversed blt simple
1584 (define_expand "reverse_blt"
1585 [(set (pc) (if_then_else (ne (match_dup 1) (const_int 0))
1587 (label_ref (match_operand 0 "" ""))))]
1591 operands[1] = mcore_gen_compare_reg (LT);
1594 (define_expand "blt"
1595 [(set (pc) (if_then_else (ne (match_dup 1) (const_int 0))
1596 (label_ref (match_operand 0 "" ""))
1601 operands[1] = mcore_gen_compare_reg (LT);
1604 ; There is no bge compare, so we reverse the branch arms.
1606 (define_expand "bge"
1607 [(set (pc) (if_then_else (ne (match_dup 1) (const_int 0))
1609 (label_ref (match_operand 0 "" ""))))]
1613 operands[1] = mcore_gen_compare_reg (GE);
1616 ; There is no gtu compare, so we reverse the branch arms
1618 ;(define_expand "bgtu"
1619 ; [(set (pc) (if_then_else (ne (match_dup 1) (const_int 0))
1621 ; (label_ref (match_operand 0 "" ""))))]
1625 ; if (GET_CODE (arch_compare_op1) == CONST_INT
1626 ; && INTVAL (arch_compare_op1) == 0)
1627 ; operands[1] = mcore_gen_compare_reg (NE);
1629 ; { if (mcore_modify_comparison (GTU))
1631 ; emit_jump_insn (gen_bgeu (operands[0]));
1634 ; operands[1] = mcore_gen_compare_reg (LEU);
1638 (define_expand "bgtu"
1639 [(set (pc) (if_then_else (ne (match_dup 1) (const_int 0))
1641 (label_ref (match_operand 0 "" ""))))]
1645 if (GET_CODE (arch_compare_op1) == CONST_INT
1646 && INTVAL (arch_compare_op1) == 0)
1648 /* The inverse of '> 0' for an unsigned test is
1649 '== 0' but we do not have such an instruction available.
1650 Instead we must reverse the branch (back to the normal
1651 ordering) and test '!= 0'. */
1653 operands[1] = mcore_gen_compare_reg (NE);
1655 emit_jump_insn (gen_rtx_SET (VOIDmode,
1657 gen_rtx_IF_THEN_ELSE (VOIDmode,
1658 gen_rtx_NE (VOIDmode,
1661 gen_rtx_LABEL_REF (VOIDmode,operands[0]),
1665 operands[1] = mcore_gen_compare_reg (GTU);
1669 (define_expand "bleu"
1670 [(set (pc) (if_then_else (ne (match_dup 1) (const_int 0))
1671 (label_ref (match_operand 0 "" ""))
1676 operands[1] = mcore_gen_compare_reg (LEU);
1679 ; There is no bltu compare, so we reverse the branch arms
1680 (define_expand "bltu"
1681 [(set (pc) (if_then_else (ne (match_dup 1) (const_int 0))
1683 (label_ref (match_operand 0 "" ""))))]
1687 operands[1] = mcore_gen_compare_reg (LTU);
1690 (define_expand "bgeu"
1691 [(set (pc) (if_then_else (ne (match_dup 1) (const_int 0))
1692 (label_ref (match_operand 0 "" ""))
1698 operands[1] = mcore_gen_compare_reg (GEU);
1701 ;; ------------------------------------------------------------------------
1702 ;; Jump and linkage insns
1703 ;; ------------------------------------------------------------------------
1705 (define_insn "jump_real"
1707 (label_ref (match_operand 0 "" "")))]
1710 [(set_attr "type" "branch")])
1712 (define_expand "jump"
1713 [(set (pc) (label_ref (match_operand 0 "" "")))]
1717 emit_jump_insn (gen_jump_real (operand0));
1722 (define_insn "indirect_jump"
1724 (match_operand:SI 0 "mcore_arith_reg_operand" "r"))]
1727 [(set_attr "type" "jmp")])
1729 (define_expand "call"
1730 [(parallel[(call (match_operand:SI 0 "" "")
1731 (match_operand 1 "" ""))
1732 (clobber (reg:SI 15))])]
1736 if (GET_CODE (operands[0]) == MEM
1737 && ! register_operand (XEXP (operands[0], 0), SImode)
1738 && ! mcore_symbolic_address_p (XEXP (operands[0], 0)))
1739 operands[0] = gen_rtx_MEM (GET_MODE (operands[0]),
1740 force_reg (Pmode, XEXP (operands[0], 0)));
1743 (define_insn "call_internal"
1744 [(call (mem:SI (match_operand:SI 0 "mcore_call_address_operand" "riR"))
1745 (match_operand 1 "" ""))
1746 (clobber (reg:SI 15))]
1748 "* return mcore_output_call (operands, 0);")
1750 (define_expand "call_value"
1751 [(parallel[(set (match_operand 0 "register_operand" "")
1752 (call (match_operand:SI 1 "" "")
1753 (match_operand 2 "" "")))
1754 (clobber (reg:SI 15))])]
1758 if (GET_CODE (operands[0]) == MEM
1759 && ! register_operand (XEXP (operands[0], 0), SImode)
1760 && ! mcore_symbolic_address_p (XEXP (operands[0], 0)))
1761 operands[1] = gen_rtx_MEM (GET_MODE (operands[1]),
1762 force_reg (Pmode, XEXP (operands[1], 0)));
1765 (define_insn "call_value_internal"
1766 [(set (match_operand 0 "register_operand" "=r")
1767 (call (mem:SI (match_operand:SI 1 "mcore_call_address_operand" "riR"))
1768 (match_operand 2 "" "")))
1769 (clobber (reg:SI 15))]
1771 "* return mcore_output_call (operands, 1);")
1773 (define_insn "call_value_struct"
1774 [(parallel [(set (match_parallel 0 ""
1775 [(expr_list (match_operand 3 "register_operand" "") (match_operand 4 "immediate_operand" ""))
1776 (expr_list (match_operand 5 "register_operand" "") (match_operand 6 "immediate_operand" ""))])
1777 (call (match_operand:SI 1 "" "")
1778 (match_operand 2 "" "")))
1779 (clobber (reg:SI 15))])]
1781 "* return mcore_output_call (operands, 1);"
1785 ;; ------------------------------------------------------------------------
1787 ;; ------------------------------------------------------------------------
1794 (define_insn "tablejump"
1796 (match_operand:SI 0 "mcore_arith_reg_operand" "r"))
1797 (use (label_ref (match_operand 1 "" "")))]
1800 [(set_attr "type" "jmp")])
1802 (define_insn "*return"
1804 "reload_completed && ! mcore_naked_function_p ()"
1806 [(set_attr "type" "jmp")])
1808 (define_insn "*no_return"
1810 "reload_completed && mcore_naked_function_p ()"
1812 [(set_attr "length" "0")]
1815 (define_expand "prologue"
1818 "mcore_expand_prolog (); DONE;")
1820 (define_expand "epilogue"
1823 "mcore_expand_epilog ();")
1825 ;; ------------------------------------------------------------------------
1827 ;; ------------------------------------------------------------------------
1830 [(set (match_operand:SI 0 "mcore_arith_reg_operand" "=r")
1831 (ne:SI (reg:CC 17) (const_int 0)))]
1834 [(set_attr "type" "move")])
1837 [(set (match_operand:SI 0 "mcore_arith_reg_operand" "=r")
1838 (eq:SI (reg:CC 17) (const_int 0)))]
1841 [(set_attr "type" "move")])
1843 ; in 0.97 use (LE 0) with (LT 1) and complement c. BRC
1846 (set (match_operand:SI 0 "mcore_arith_reg_operand" "")
1847 (ne:SI (gt:CC (match_operand:SI 1 "mcore_arith_reg_operand" "")
1850 (clobber (reg:SI 17))])]
1853 (lt:CC (match_dup 1) (const_int 1)))
1854 (set (match_dup 0) (eq:SI (reg:CC 17) (const_int 0)))])
1857 (define_expand "seq"
1858 [(set (match_operand:SI 0 "mcore_arith_reg_operand" "")
1859 (eq:SI (match_dup 1) (const_int 0)))]
1863 operands[1] = mcore_gen_compare_reg (NE);
1866 (define_expand "sne"
1867 [(set (match_operand:SI 0 "mcore_arith_reg_operand" "")
1868 (ne:SI (match_dup 1) (const_int 0)))]
1872 operands[1] = mcore_gen_compare_reg (NE);
1875 (define_expand "slt"
1876 [(set (match_operand:SI 0 "mcore_arith_reg_operand" "")
1877 (ne:SI (match_dup 1) (const_int 0)))]
1881 operands[1] = mcore_gen_compare_reg (LT);
1884 ; make generating a LT with the comparison reversed easy. BRC
1885 (define_expand "reverse_slt"
1886 [(set (match_operand:SI 0 "mcore_arith_reg_operand" "")
1887 (eq:SI (match_dup 1) (const_int 0)))]
1891 operands[1] = mcore_gen_compare_reg (LT);
1894 (define_expand "sge"
1895 [(set (match_operand:SI 0 "mcore_arith_reg_operand" "")
1896 (eq:SI (match_dup 1) (const_int 0)))]
1900 operands[1] = mcore_gen_compare_reg (LT);
1903 ; check whether (GT A imm) can become (LE A imm) with the comparison
1904 ; reversed. if so, emit a (LT A imm + 1) in place of the (LE A imm). BRC
1906 (define_expand "sgt"
1907 [(set (match_operand:SI 0 "mcore_arith_reg_operand" "")
1908 (ne:SI (match_dup 1) (const_int 0)))]
1912 if (mcore_modify_comparison (LE))
1914 emit_insn (gen_reverse_slt (operands[0]));
1918 operands[1] = mcore_gen_compare_reg (GT);
1921 (define_expand "sle"
1922 [(set (match_operand:SI 0 "mcore_arith_reg_operand" "")
1923 (eq:SI (match_dup 1) (const_int 0)))]
1927 if (mcore_modify_comparison (LE))
1929 emit_insn (gen_slt (operands[0]));
1932 operands[1] = mcore_gen_compare_reg (GT);
1935 (define_expand "sltu"
1936 [(set (match_operand:SI 0 "mcore_arith_reg_operand" "")
1937 (eq:SI (match_dup 1) (const_int 0)))]
1941 operands[1] = mcore_gen_compare_reg (GEU);
1944 (define_expand "sgeu"
1945 [(set (match_operand:SI 0 "mcore_arith_reg_operand" "")
1946 (ne:SI (match_dup 1) (const_int 0)))]
1950 operands[1] = mcore_gen_compare_reg (GEU);
1953 (define_expand "sgtu"
1954 [(set (match_operand:SI 0 "mcore_arith_reg_operand" "")
1955 (eq:SI (match_dup 1) (const_int 0)))]
1959 operands[1] = mcore_gen_compare_reg (LEU);
1962 (define_expand "sleu"
1963 [(set (match_operand:SI 0 "mcore_arith_reg_operand" "")
1964 (ne:SI (match_dup 1) (const_int 0)))]
1968 operands[1] = mcore_gen_compare_reg (LEU);
1971 (define_insn "incscc"
1972 [(set (match_operand:SI 0 "mcore_arith_reg_operand" "=r")
1973 (plus:SI (ne (reg:CC 17) (const_int 0))
1974 (match_operand:SI 1 "mcore_arith_reg_operand" "0")))]
1978 (define_insn "incscc_false"
1979 [(set (match_operand:SI 0 "mcore_arith_reg_operand" "=r")
1980 (plus:SI (eq (reg:CC 17) (const_int 0))
1981 (match_operand:SI 1 "mcore_arith_reg_operand" "0")))]
1985 (define_insn "decscc"
1986 [(set (match_operand:SI 0 "mcore_arith_reg_operand" "=r")
1987 (minus:SI (match_operand:SI 1 "mcore_arith_reg_operand" "0")
1988 (ne (reg:CC 17) (const_int 0))))]
1992 (define_insn "decscc_false"
1993 [(set (match_operand:SI 0 "mcore_arith_reg_operand" "=r")
1994 (minus:SI (match_operand:SI 1 "mcore_arith_reg_operand" "0")
1995 (eq (reg:CC 17) (const_int 0))))]
1999 ;; ------------------------------------------------------------------------
2000 ;; Conditional move patterns.
2001 ;; ------------------------------------------------------------------------
2003 (define_expand "smaxsi3"
2005 (lt:CC (match_operand:SI 1 "mcore_arith_reg_operand" "")
2006 (match_operand:SI 2 "mcore_arith_reg_operand" "")))
2007 (set (match_operand:SI 0 "mcore_arith_reg_operand" "")
2008 (if_then_else:SI (eq (reg:CC 17) (const_int 0))
2009 (match_dup 1) (match_dup 2)))]
2014 [(set (match_operand:SI 0 "mcore_arith_reg_operand" "")
2015 (smax:SI (match_operand:SI 1 "mcore_arith_reg_operand" "")
2016 (match_operand:SI 2 "mcore_arith_reg_operand" "")))]
2019 (lt:SI (match_dup 1) (match_dup 2)))
2021 (if_then_else:SI (eq (reg:CC 17) (const_int 0))
2022 (match_dup 1) (match_dup 2)))]
2025 ; no tstgt in 0.97, so just use cmplti (btsti x,31) and reverse move
2028 [(set (match_operand:SI 0 "mcore_arith_reg_operand" "")
2029 (smax:SI (match_operand:SI 1 "mcore_arith_reg_operand" "")
2033 (lt:CC (match_dup 1) (const_int 0)))
2035 (if_then_else:SI (eq (reg:CC 17) (const_int 0))
2036 (match_dup 1) (const_int 0)))]
2039 (define_expand "sminsi3"
2041 (lt:CC (match_operand:SI 1 "mcore_arith_reg_operand" "")
2042 (match_operand:SI 2 "mcore_arith_reg_operand" "")))
2043 (set (match_operand:SI 0 "mcore_arith_reg_operand" "")
2044 (if_then_else:SI (ne (reg:CC 17) (const_int 0))
2045 (match_dup 1) (match_dup 2)))]
2050 [(set (match_operand:SI 0 "mcore_arith_reg_operand" "")
2051 (smin:SI (match_operand:SI 1 "mcore_arith_reg_operand" "")
2052 (match_operand:SI 2 "mcore_arith_reg_operand" "")))]
2055 (lt:SI (match_dup 1) (match_dup 2)))
2057 (if_then_else:SI (ne (reg:CC 17) (const_int 0))
2058 (match_dup 1) (match_dup 2)))]
2062 ; [(set (match_operand:SI 0 "mcore_arith_reg_operand" "")
2063 ; (smin:SI (match_operand:SI 1 "mcore_arith_reg_operand" "")
2067 ; (gt:CC (match_dup 1) (const_int 0)))
2068 ; (set (match_dup 0)
2069 ; (if_then_else:SI (eq (reg:CC 17) (const_int 0))
2070 ; (match_dup 1) (const_int 0)))]
2073 ; changed these unsigned patterns to use geu instead of ltu. it appears
2074 ; that the c-torture & ssrl test suites didn't catch these! only showed
2075 ; up in friedman's clib work. BRC 7/7/95
2077 (define_expand "umaxsi3"
2079 (geu:CC (match_operand:SI 1 "mcore_arith_reg_operand" "")
2080 (match_operand:SI 2 "mcore_arith_reg_operand" "")))
2081 (set (match_operand:SI 0 "mcore_arith_reg_operand" "")
2082 (if_then_else:SI (eq (reg:CC 17) (const_int 0))
2083 (match_dup 2) (match_dup 1)))]
2088 [(set (match_operand:SI 0 "mcore_arith_reg_operand" "")
2089 (umax:SI (match_operand:SI 1 "mcore_arith_reg_operand" "")
2090 (match_operand:SI 2 "mcore_arith_reg_operand" "")))]
2093 (geu:SI (match_dup 1) (match_dup 2)))
2095 (if_then_else:SI (eq (reg:CC 17) (const_int 0))
2096 (match_dup 2) (match_dup 1)))]
2099 (define_expand "uminsi3"
2101 (geu:CC (match_operand:SI 1 "mcore_arith_reg_operand" "")
2102 (match_operand:SI 2 "mcore_arith_reg_operand" "")))
2103 (set (match_operand:SI 0 "mcore_arith_reg_operand" "")
2104 (if_then_else:SI (ne (reg:CC 17) (const_int 0))
2105 (match_dup 2) (match_dup 1)))]
2110 [(set (match_operand:SI 0 "mcore_arith_reg_operand" "")
2111 (umin:SI (match_operand:SI 1 "mcore_arith_reg_operand" "")
2112 (match_operand:SI 2 "mcore_arith_reg_operand" "")))]
2115 (geu:SI (match_dup 1) (match_dup 2)))
2117 (if_then_else:SI (ne (reg:CC 17) (const_int 0))
2118 (match_dup 2) (match_dup 1)))]
2121 ;; ------------------------------------------------------------------------
2122 ;; conditional move patterns really start here
2123 ;; ------------------------------------------------------------------------
2125 ;; the "movtK" patterns are experimental. they are intended to account for
2126 ;; gcc's mucking on code such as:
2128 ;; free_ent = ((block_compress) ? 257 : 256 );
2130 ;; these patterns help to get a tstne/bgeni/inct (or equivalent) sequence
2131 ;; when both arms have constants that are +/- 1 of each other.
2133 ;; note in the following patterns that the "movtK" ones should be the first
2134 ;; one defined in each sequence. this is because the general pattern also
2135 ;; matches, so use ordering to determine priority (it's easier this way than
2136 ;; adding conditions to the general patterns). BRC
2138 ;; the U and Q constraints are necessary to ensure that reload does the
2139 ;; 'right thing'. U constrains the operand to 0 and Q to 1 for use in the
2140 ;; clrt & clrf and clrt/inct & clrf/incf patterns. BRC 6/26
2142 ;; ??? there appears to be some problems with these movtK patterns for ops
2143 ;; other than eq & ne. need to fix. 6/30 BRC
2145 ;; ------------------------------------------------------------------------
2147 ;; ------------------------------------------------------------------------
2149 ; experimental conditional move with two constants +/- 1 BRC
2151 (define_insn "movtK_1"
2152 [(set (match_operand:SI 0 "mcore_arith_reg_operand" "=r")
2154 (ne (reg:CC 17) (const_int 0))
2155 (match_operand:SI 1 "mcore_arith_O_operand" "O")
2156 (match_operand:SI 2 "mcore_arith_O_operand" "O")))]
2157 " GET_CODE (operands[1]) == CONST_INT
2158 && GET_CODE (operands[2]) == CONST_INT
2159 && ( (INTVAL (operands[1]) - INTVAL (operands[2]) == 1)
2160 || (INTVAL (operands[2]) - INTVAL (operands[1]) == 1))"
2161 "* return mcore_output_cmov (operands, 1, NULL);"
2162 [(set_attr "length" "4")])
2164 (define_insn "movt0"
2165 [(set (match_operand:SI 0 "mcore_arith_reg_operand" "=r,r,r,r")
2167 (ne (reg:CC 17) (const_int 0))
2168 (match_operand:SI 1 "mcore_arith_imm_operand" "r,0,U,0")
2169 (match_operand:SI 2 "mcore_arith_imm_operand" "0,r,0,U")))]
2177 ;; ------------------------------------------------------------------------
2179 ;; ------------------------------------------------------------------------
2181 ; experimental conditional move with two constants +/- 1 BRC
2182 (define_insn "movtK_2"
2183 [(set (match_operand:SI 0 "mcore_arith_reg_operand" "=r")
2185 (eq (reg:CC 17) (const_int 0))
2186 (match_operand:SI 1 "mcore_arith_O_operand" "O")
2187 (match_operand:SI 2 "mcore_arith_O_operand" "O")))]
2188 " GET_CODE (operands[1]) == CONST_INT
2189 && GET_CODE (operands[2]) == CONST_INT
2190 && ( (INTVAL (operands[1]) - INTVAL (operands[2]) == 1)
2191 || (INTVAL (operands[2]) - INTVAL (operands[1]) == 1))"
2192 "* return mcore_output_cmov (operands, 0, NULL);"
2193 [(set_attr "length" "4")])
2195 (define_insn "movf0"
2196 [(set (match_operand:SI 0 "mcore_arith_reg_operand" "=r,r,r,r")
2198 (eq (reg:CC 17) (const_int 0))
2199 (match_operand:SI 1 "mcore_arith_imm_operand" "r,0,U,0")
2200 (match_operand:SI 2 "mcore_arith_imm_operand" "0,r,0,U")))]
2208 ; turns lsli rx,imm/btsti rx,31 into btsti rx,imm. not done by a peephole
2209 ; because the instructions are not adjacent (peepholes are related by posn -
2210 ; not by dataflow). BRC
2213 [(set (match_operand:SI 0 "mcore_arith_reg_operand" "=r,r,r,r")
2214 (if_then_else:SI (eq (zero_extract:SI
2215 (match_operand:SI 1 "mcore_arith_reg_operand" "r,r,r,r")
2217 (match_operand:SI 2 "mcore_literal_K_operand" "K,K,K,K"))
2219 (match_operand:SI 3 "mcore_arith_imm_operand" "r,0,U,0")
2220 (match_operand:SI 4 "mcore_arith_imm_operand" "0,r,0,U")))]
2223 btsti %1,%2\;movf %0,%3
2224 btsti %1,%2\;movt %0,%4
2225 btsti %1,%2\;clrf %0
2226 btsti %1,%2\;clrt %0"
2227 [(set_attr "length" "4")])
2229 ; turns sextb rx/btsti rx,31 into btsti rx,7. must be QImode to be safe. BRC
2232 [(set (match_operand:SI 0 "mcore_arith_reg_operand" "=r,r,r,r")
2233 (if_then_else:SI (eq (lshiftrt:SI
2234 (match_operand:SI 1 "mcore_arith_reg_operand" "r,r,r,r")
2237 (match_operand:SI 2 "mcore_arith_imm_operand" "r,0,U,0")
2238 (match_operand:SI 3 "mcore_arith_imm_operand" "0,r,0,U")))]
2239 "GET_CODE (operands[1]) == SUBREG &&
2240 GET_MODE (SUBREG_REG (operands[1])) == QImode"
2242 btsti %1,7\;movf %0,%2
2243 btsti %1,7\;movt %0,%3
2245 btsti %1,7\;clrt %0"
2246 [(set_attr "length" "4")])
2249 ;; ------------------------------------------------------------------------
2251 ;; ------------------------------------------------------------------------
2253 ;; Combine creates this from an andn instruction in a scc sequence.
2254 ;; We must recognize it to get conditional moves generated.
2256 ; experimental conditional move with two constants +/- 1 BRC
2257 (define_insn "movtK_3"
2258 [(set (match_operand:SI 0 "mcore_arith_reg_operand" "=r")
2260 (ne (match_operand:SI 1 "mcore_arith_reg_operand" "r")
2262 (match_operand:SI 2 "mcore_arith_O_operand" "O")
2263 (match_operand:SI 3 "mcore_arith_O_operand" "O")))]
2264 " GET_CODE (operands[2]) == CONST_INT
2265 && GET_CODE (operands[3]) == CONST_INT
2266 && ( (INTVAL (operands[2]) - INTVAL (operands[3]) == 1)
2267 || (INTVAL (operands[3]) - INTVAL (operands[2]) == 1))"
2270 rtx out_operands[4];
2271 out_operands[0] = operands[0];
2272 out_operands[1] = operands[2];
2273 out_operands[2] = operands[3];
2274 out_operands[3] = operands[1];
2276 return mcore_output_cmov (out_operands, 1, \"cmpnei %3,0\");
2279 [(set_attr "length" "6")])
2281 (define_insn "movt2"
2282 [(set (match_operand:SI 0 "mcore_arith_reg_operand" "=r,r,r,r")
2283 (if_then_else:SI (ne (match_operand:SI 1 "mcore_arith_reg_operand" "r,r,r,r")
2285 (match_operand:SI 2 "mcore_arith_imm_operand" "r,0,U,0")
2286 (match_operand:SI 3 "mcore_arith_imm_operand" "0,r,0,U")))]
2289 cmpnei %1,0\;movt %0,%2
2290 cmpnei %1,0\;movf %0,%3
2291 cmpnei %1,0\;clrt %0
2292 cmpnei %1,0\;clrf %0"
2293 [(set_attr "length" "4")])
2295 ; turns lsli rx,imm/btsti rx,31 into btsti rx,imm. not done by a peephole
2296 ; because the instructions are not adjacent (peepholes are related by posn -
2297 ; not by dataflow). BRC
2300 [(set (match_operand:SI 0 "mcore_arith_reg_operand" "=r,r,r,r")
2301 (if_then_else:SI (ne (zero_extract:SI
2302 (match_operand:SI 1 "mcore_arith_reg_operand" "r,r,r,r")
2304 (match_operand:SI 2 "mcore_literal_K_operand" "K,K,K,K"))
2306 (match_operand:SI 3 "mcore_arith_imm_operand" "r,0,U,0")
2307 (match_operand:SI 4 "mcore_arith_imm_operand" "0,r,0,U")))]
2310 btsti %1,%2\;movt %0,%3
2311 btsti %1,%2\;movf %0,%4
2312 btsti %1,%2\;clrt %0
2313 btsti %1,%2\;clrf %0"
2314 [(set_attr "length" "4")])
2316 ; turns sextb rx/btsti rx,31 into btsti rx,7. must be QImode to be safe. BRC
2319 [(set (match_operand:SI 0 "mcore_arith_reg_operand" "=r,r,r,r")
2320 (if_then_else:SI (ne (lshiftrt:SI
2321 (match_operand:SI 1 "mcore_arith_reg_operand" "r,r,r,r")
2324 (match_operand:SI 2 "mcore_arith_imm_operand" "r,0,U,0")
2325 (match_operand:SI 3 "mcore_arith_imm_operand" "0,r,0,U")))]
2326 "GET_CODE (operands[1]) == SUBREG &&
2327 GET_MODE (SUBREG_REG (operands[1])) == QImode"
2329 btsti %1,7\;movt %0,%2
2330 btsti %1,7\;movf %0,%3
2332 btsti %1,7\;clrf %0"
2333 [(set_attr "length" "4")])
2335 ;; ------------------------------------------------------------------------
2337 ;; ------------------------------------------------------------------------
2339 ; experimental conditional move with two constants +/- 1 BRC
2340 (define_insn "movtK_4"
2341 [(set (match_operand:SI 0 "mcore_arith_reg_operand" "=r")
2343 (eq (eq:SI (reg:CC 17) (const_int 0)) (const_int 0))
2344 (match_operand:SI 1 "mcore_arith_O_operand" "O")
2345 (match_operand:SI 2 "mcore_arith_O_operand" "O")))]
2346 "GET_CODE (operands[1]) == CONST_INT &&
2347 GET_CODE (operands[2]) == CONST_INT &&
2348 ((INTVAL (operands[1]) - INTVAL (operands[2]) == 1) ||
2349 (INTVAL (operands[2]) - INTVAL (operands[1]) == 1))"
2350 "* return mcore_output_cmov(operands, 1, NULL);"
2351 [(set_attr "length" "4")])
2353 (define_insn "movt3"
2354 [(set (match_operand:SI 0 "mcore_arith_reg_operand" "=r,r,r,r")
2356 (eq (eq:SI (reg:CC 17) (const_int 0)) (const_int 0))
2357 (match_operand:SI 1 "mcore_arith_imm_operand" "r,0,U,0")
2358 (match_operand:SI 2 "mcore_arith_imm_operand" "0,r,0,U")))]
2366 ;; ------------------------------------------------------------------------
2368 ;; ------------------------------------------------------------------------
2370 ; experimental conditional move with two constants +/- 1 BRC
2371 (define_insn "movtK_5"
2372 [(set (match_operand:SI 0 "mcore_arith_reg_operand" "=r")
2374 (eq (ne:SI (reg:CC 17) (const_int 0)) (const_int 0))
2375 (match_operand:SI 1 "mcore_arith_O_operand" "O")
2376 (match_operand:SI 2 "mcore_arith_O_operand" "O")))]
2377 "GET_CODE (operands[1]) == CONST_INT &&
2378 GET_CODE (operands[2]) == CONST_INT &&
2379 ((INTVAL (operands[1]) - INTVAL (operands[2]) == 1) ||
2380 (INTVAL (operands[2]) - INTVAL (operands[1]) == 1))"
2381 "* return mcore_output_cmov (operands, 0, NULL);"
2382 [(set_attr "length" "4")])
2384 (define_insn "movf1"
2385 [(set (match_operand:SI 0 "mcore_arith_reg_operand" "=r,r,r,r")
2387 (eq (ne:SI (reg:CC 17) (const_int 0)) (const_int 0))
2388 (match_operand:SI 1 "mcore_arith_imm_operand" "r,0,U,0")
2389 (match_operand:SI 2 "mcore_arith_imm_operand" "0,r,0,U")))]
2397 ;; ------------------------------------------------------------------------
2399 ;; ------------------------------------------------------------------------
2401 ;; Combine creates this from an andn instruction in a scc sequence.
2402 ;; We must recognize it to get conditional moves generated.
2404 ; experimental conditional move with two constants +/- 1 BRC
2406 (define_insn "movtK_6"
2407 [(set (match_operand:SI 0 "mcore_arith_reg_operand" "=r")
2409 (eq (match_operand:SI 1 "mcore_arith_reg_operand" "r")
2411 (match_operand:SI 2 "mcore_arith_O_operand" "O")
2412 (match_operand:SI 3 "mcore_arith_O_operand" "O")))]
2413 "GET_CODE (operands[1]) == CONST_INT &&
2414 GET_CODE (operands[2]) == CONST_INT &&
2415 ((INTVAL (operands[2]) - INTVAL (operands[3]) == 1) ||
2416 (INTVAL (operands[3]) - INTVAL (operands[2]) == 1))"
2419 rtx out_operands[4];
2420 out_operands[0] = operands[0];
2421 out_operands[1] = operands[2];
2422 out_operands[2] = operands[3];
2423 out_operands[3] = operands[1];
2425 return mcore_output_cmov (out_operands, 0, \"cmpnei %3,0\");
2427 [(set_attr "length" "6")])
2429 (define_insn "movf3"
2430 [(set (match_operand:SI 0 "mcore_arith_reg_operand" "=r,r,r,r")
2431 (if_then_else:SI (eq (match_operand:SI 1 "mcore_arith_reg_operand" "r,r,r,r")
2433 (match_operand:SI 2 "mcore_arith_imm_operand" "r,0,U,0")
2434 (match_operand:SI 3 "mcore_arith_imm_operand" "0,r,0,U")))]
2437 cmpnei %1,0\;movf %0,%2
2438 cmpnei %1,0\;movt %0,%3
2439 cmpnei %1,0\;clrf %0
2440 cmpnei %1,0\;clrt %0"
2441 [(set_attr "length" "4")])
2443 ;; ------------------------------------------------------------------------
2445 ;; ------------------------------------------------------------------------
2447 ; experimental conditional move with two constants +/- 1 BRC
2448 (define_insn "movtK_7"
2449 [(set (match_operand:SI 0 "mcore_arith_reg_operand" "=r")
2451 (ne (eq:SI (reg:CC 17) (const_int 0)) (const_int 0))
2452 (match_operand:SI 1 "mcore_arith_O_operand" "O")
2453 (match_operand:SI 2 "mcore_arith_O_operand" "O")))]
2454 "GET_CODE (operands[1]) == CONST_INT &&
2455 GET_CODE (operands[2]) == CONST_INT &&
2456 ((INTVAL (operands[1]) - INTVAL (operands[2]) == 1) ||
2457 (INTVAL (operands[2]) - INTVAL (operands[1]) == 1))"
2458 "* return mcore_output_cmov (operands, 0, NULL);"
2459 [(set_attr "length" "4")])
2461 (define_insn "movf4"
2462 [(set (match_operand:SI 0 "mcore_arith_reg_operand" "=r,r,r,r")
2464 (ne (eq:SI (reg:CC 17) (const_int 0)) (const_int 0))
2465 (match_operand:SI 1 "mcore_arith_imm_operand" "r,0,U,0")
2466 (match_operand:SI 2 "mcore_arith_imm_operand" "0,r,0,U")))]
2474 ;; ------------------------------------------------------------------------
2476 ;; ------------------------------------------------------------------------
2478 ; experimental conditional move with two constants +/- 1 BRC
2479 (define_insn "movtK_8"
2480 [(set (match_operand:SI 0 "mcore_arith_reg_operand" "=r")
2482 (ne (ne:SI (reg:CC 17) (const_int 0)) (const_int 0))
2483 (match_operand:SI 1 "mcore_arith_O_operand" "O")
2484 (match_operand:SI 2 "mcore_arith_O_operand" "O")))]
2485 "GET_CODE (operands[1]) == CONST_INT &&
2486 GET_CODE (operands[2]) == CONST_INT &&
2487 ((INTVAL (operands[1]) - INTVAL (operands[2]) == 1) ||
2488 (INTVAL (operands[2]) - INTVAL (operands[1]) == 1))"
2489 "* return mcore_output_cmov (operands, 1, NULL);"
2490 [(set_attr "length" "4")])
2492 (define_insn "movt4"
2493 [(set (match_operand:SI 0 "mcore_arith_reg_operand" "=r,r,r,r")
2495 (ne (ne:SI (reg:CC 17) (const_int 0)) (const_int 0))
2496 (match_operand:SI 1 "mcore_arith_imm_operand" "r,0,U,0")
2497 (match_operand:SI 2 "mcore_arith_imm_operand" "0,r,0,U")))]
2505 ;; Also need patterns to recognize lt/ge, since otherwise the compiler will
2506 ;; try to output not/asri/tstne/movf.
2508 ;; ------------------------------------------------------------------------
2510 ;; ------------------------------------------------------------------------
2512 ; experimental conditional move with two constants +/- 1 BRC
2513 (define_insn "movtK_9"
2514 [(set (match_operand:SI 0 "mcore_arith_reg_operand" "=r")
2516 (lt (match_operand:SI 1 "mcore_arith_reg_operand" "r")
2518 (match_operand:SI 2 "mcore_arith_O_operand" "O")
2519 (match_operand:SI 3 "mcore_arith_O_operand" "O")))]
2520 "GET_CODE (operands[2]) == CONST_INT &&
2521 GET_CODE (operands[3]) == CONST_INT &&
2522 ((INTVAL (operands[2]) - INTVAL (operands[3]) == 1) ||
2523 (INTVAL (operands[3]) - INTVAL (operands[2]) == 1))"
2526 rtx out_operands[4];
2527 out_operands[0] = operands[0];
2528 out_operands[1] = operands[2];
2529 out_operands[2] = operands[3];
2530 out_operands[3] = operands[1];
2532 return mcore_output_cmov (out_operands, 1, \"btsti %3,31\");
2534 [(set_attr "length" "6")])
2536 (define_insn "movt5"
2537 [(set (match_operand:SI 0 "mcore_arith_reg_operand" "=r,r,r,r")
2538 (if_then_else:SI (lt (match_operand:SI 1 "mcore_arith_reg_operand" "r,r,r,r")
2540 (match_operand:SI 2 "mcore_arith_imm_operand" "r,0,U,0")
2541 (match_operand:SI 3 "mcore_arith_imm_operand" "0,r,0,U")))]
2544 btsti %1,31\;movt %0,%2
2545 btsti %1,31\;movf %0,%3
2546 btsti %1,31\;clrt %0
2547 btsti %1,31\;clrf %0"
2548 [(set_attr "length" "4")])
2551 ;; ------------------------------------------------------------------------
2553 ;; ------------------------------------------------------------------------
2555 ; experimental conditional move with two constants +/- 1 BRC
2556 (define_insn "movtK_10"
2557 [(set (match_operand:SI 0 "mcore_arith_reg_operand" "=r")
2559 (ge (match_operand:SI 1 "mcore_arith_reg_operand" "r")
2561 (match_operand:SI 2 "mcore_arith_O_operand" "O")
2562 (match_operand:SI 3 "mcore_arith_O_operand" "O")))]
2563 "GET_CODE (operands[2]) == CONST_INT &&
2564 GET_CODE (operands[3]) == CONST_INT &&
2565 ((INTVAL (operands[2]) - INTVAL (operands[3]) == 1) ||
2566 (INTVAL (operands[3]) - INTVAL (operands[2]) == 1))"
2569 rtx out_operands[4];
2570 out_operands[0] = operands[0];
2571 out_operands[1] = operands[2];
2572 out_operands[2] = operands[3];
2573 out_operands[3] = operands[1];
2575 return mcore_output_cmov (out_operands, 0, \"btsti %3,31\");
2577 [(set_attr "length" "6")])
2579 (define_insn "movf5"
2580 [(set (match_operand:SI 0 "mcore_arith_reg_operand" "=r,r,r,r")
2581 (if_then_else:SI (ge (match_operand:SI 1 "mcore_arith_reg_operand" "r,r,r,r")
2583 (match_operand:SI 2 "mcore_arith_imm_operand" "r,0,U,0")
2584 (match_operand:SI 3 "mcore_arith_imm_operand" "0,r,0,U")))]
2587 btsti %1,31\;movf %0,%2
2588 btsti %1,31\;movt %0,%3
2589 btsti %1,31\;clrf %0
2590 btsti %1,31\;clrt %0"
2591 [(set_attr "length" "4")])
2593 ;; ------------------------------------------------------------------------
2594 ;; Bitfield extract (xtrbN)
2595 ;; ------------------------------------------------------------------------
2597 ; sometimes we're better off using QI/HI mode and letting the machine indep.
2598 ; part expand insv and extv.
2600 ; e.g., sequences like:a [an insertion]
2603 ; movi r7,0x00ffffff
2605 ; stw r8,(r6) r8 dead
2610 ; stb r8,(r6) r8 dead
2612 ; it looks like always using SI mode is a win except in this type of code
2613 ; (when adjacent bit fields collapse on a byte or halfword boundary). when
2614 ; expanding with SI mode, non-adjacent bit field masks fold, but with QI/HI
2615 ; mode, they do not. one thought is to add some peepholes to cover cases
2616 ; like the above, but this is not a general solution.
2618 ; -mword-bitfields expands/inserts using SI mode. otherwise, do it with
2619 ; the smallest mode possible (using the machine indep. expansions). BRC
2621 ;(define_expand "extv"
2622 ; [(set (match_operand:SI 0 "mcore_arith_reg_operand" "")
2623 ; (sign_extract:SI (match_operand:SI 1 "mcore_arith_reg_operand" "")
2624 ; (match_operand:SI 2 "const_int_operand" "")
2625 ; (match_operand:SI 3 "const_int_operand" "")))
2626 ; (clobber (reg:CC 17))]
2630 ; if (INTVAL (operands[1]) != 8 || INTVAL (operands[2]) % 8 != 0)
2632 ; if (TARGET_W_FIELD)
2634 ; rtx lshft = GEN_INT (32 - (INTVAL (operands[2]) + INTVAL (operands[3])));
2635 ; rtx rshft = GEN_INT (32 - INTVAL (operands[2]));
2637 ; emit_insn (gen_rtx_SET (SImode, operands[0], operands[1]));
2638 ; emit_insn (gen_rtx_SET (SImode, operands[0],
2639 ; gen_rtx_ASHIFT (SImode, operands[0], lshft)));
2640 ; emit_insn (gen_rtx_SET (SImode, operands[0],
2641 ; gen_rtx_ASHIFTRT (SImode, operands[0], rshft)));
2649 (define_expand "extv"
2650 [(set (match_operand:SI 0 "mcore_arith_reg_operand" "")
2651 (sign_extract:SI (match_operand:SI 1 "mcore_arith_reg_operand" "")
2652 (match_operand:SI 2 "const_int_operand" "")
2653 (match_operand:SI 3 "const_int_operand" "")))
2654 (clobber (reg:CC 17))]
2658 if (INTVAL (operands[2]) == 8 && INTVAL (operands[3]) % 8 == 0)
2660 /* 8 bit field, aligned properly, use the xtrb[0123]+sext sequence. */
2661 /* not DONE, not FAIL, but let the RTL get generated.... */
2663 else if (TARGET_W_FIELD)
2665 /* Arbitrary placement; note that the tree->rtl generator will make
2666 something close to this if we return FAIL */
2667 rtx lshft = GEN_INT (32 - (INTVAL (operands[2]) + INTVAL (operands[3])));
2668 rtx rshft = GEN_INT (32 - INTVAL (operands[2]));
2669 rtx tmp1 = gen_reg_rtx (SImode);
2670 rtx tmp2 = gen_reg_rtx (SImode);
2672 emit_insn (gen_rtx_SET (SImode, tmp1, operands[1]));
2673 emit_insn (gen_rtx_SET (SImode, tmp2,
2674 gen_rtx_ASHIFT (SImode, tmp1, lshft)));
2675 emit_insn (gen_rtx_SET (SImode, operands[0],
2676 gen_rtx_ASHIFTRT (SImode, tmp2, rshft)));
2681 /* Let the caller choose an alternate sequence. */
2686 (define_expand "extzv"
2687 [(set (match_operand:SI 0 "mcore_arith_reg_operand" "")
2688 (zero_extract:SI (match_operand:SI 1 "mcore_arith_reg_operand" "")
2689 (match_operand:SI 2 "const_int_operand" "")
2690 (match_operand:SI 3 "const_int_operand" "")))
2691 (clobber (reg:CC 17))]
2695 if (INTVAL (operands[2]) == 8 && INTVAL (operands[3]) % 8 == 0)
2697 /* 8 bit field, aligned properly, use the xtrb[0123] sequence. */
2698 /* Let the template generate some RTL.... */
2700 else if (CONST_OK_FOR_K ((1 << INTVAL (operands[2])) - 1))
2702 /* A narrow bit-field (<=5 bits) means we can do a shift to put
2703 it in place and then use an andi to extract it.
2704 This is as good as a shiftleft/shiftright. */
2707 rtx mask = GEN_INT ((1 << INTVAL (operands[2])) - 1);
2709 if (INTVAL (operands[3]) == 0)
2711 shifted = operands[1];
2715 rtx rshft = GEN_INT (INTVAL (operands[3]));
2716 shifted = gen_reg_rtx (SImode);
2717 emit_insn (gen_rtx_SET (SImode, shifted,
2718 gen_rtx_LSHIFTRT (SImode, operands[1], rshft)));
2720 emit_insn (gen_rtx_SET (SImode, operands[0],
2721 gen_rtx_AND (SImode, shifted, mask)));
2724 else if (TARGET_W_FIELD)
2726 /* Arbitrary pattern; play shift/shift games to get it.
2727 * this is pretty much what the caller will do if we say FAIL */
2728 rtx lshft = GEN_INT (32 - (INTVAL (operands[2]) + INTVAL (operands[3])));
2729 rtx rshft = GEN_INT (32 - INTVAL (operands[2]));
2730 rtx tmp1 = gen_reg_rtx (SImode);
2731 rtx tmp2 = gen_reg_rtx (SImode);
2733 emit_insn (gen_rtx_SET (SImode, tmp1, operands[1]));
2734 emit_insn (gen_rtx_SET (SImode, tmp2,
2735 gen_rtx_ASHIFT (SImode, tmp1, lshft)));
2736 emit_insn (gen_rtx_SET (SImode, operands[0],
2737 gen_rtx_LSHIFTRT (SImode, tmp2, rshft)));
2742 /* Make the compiler figure out some alternative mechanism. */
2746 /* Emit the RTL pattern; something will match it later. */
2749 (define_expand "insv"
2750 [(set (zero_extract:SI (match_operand:SI 0 "mcore_arith_reg_operand" "")
2751 (match_operand:SI 1 "const_int_operand" "")
2752 (match_operand:SI 2 "const_int_operand" ""))
2753 (match_operand:SI 3 "general_operand" ""))
2754 (clobber (reg:CC 17))]
2758 if (mcore_expand_insv (operands))
2769 ;; the xtrb[0123] instructions handily get at 8-bit fields on nice boundaries.
2770 ;; but then, they do force you through r1.
2772 ;; the combiner will build such patterns for us, so we'll make them available
2775 ;; Note that we have both SIGNED and UNSIGNED versions of these...
2779 ;; These no longer worry about the clobbering of CC bit; not sure this is
2782 ;; the SIGNED versions of these
2785 [(set (match_operand:SI 0 "mcore_arith_reg_operand" "=r,b")
2786 (sign_extract:SI (match_operand:SI 1 "mcore_arith_reg_operand" "0,r") (const_int 8) (const_int 24)))]
2790 xtrb0 %0,%1\;sextb %0"
2791 [(set_attr "type" "shift")])
2794 [(set (match_operand:SI 0 "mcore_arith_reg_operand" "=b")
2795 (sign_extract:SI (match_operand:SI 1 "mcore_arith_reg_operand" "r") (const_int 8) (const_int 16)))]
2797 "xtrb1 %0,%1\;sextb %0"
2798 [(set_attr "type" "shift")])
2801 [(set (match_operand:SI 0 "mcore_arith_reg_operand" "=b")
2802 (sign_extract:SI (match_operand:SI 1 "mcore_arith_reg_operand" "r") (const_int 8) (const_int 8)))]
2804 "xtrb2 %0,%1\;sextb %0"
2805 [(set_attr "type" "shift")])
2808 [(set (match_operand:SI 0 "mcore_arith_reg_operand" "=r")
2809 (sign_extract:SI (match_operand:SI 1 "mcore_arith_reg_operand" "0") (const_int 8) (const_int 0)))]
2812 [(set_attr "type" "shift")])
2814 ;; the UNSIGNED uses of xtrb[0123]
2817 [(set (match_operand:SI 0 "mcore_arith_reg_operand" "=r,b")
2818 (zero_extract:SI (match_operand:SI 1 "mcore_arith_reg_operand" "0,r") (const_int 8) (const_int 24)))]
2823 [(set_attr "type" "shift")])
2826 [(set (match_operand:SI 0 "mcore_arith_reg_operand" "=b")
2827 (zero_extract:SI (match_operand:SI 1 "mcore_arith_reg_operand" "r") (const_int 8) (const_int 16)))]
2830 [(set_attr "type" "shift")])
2833 [(set (match_operand:SI 0 "mcore_arith_reg_operand" "=b")
2834 (zero_extract:SI (match_operand:SI 1 "mcore_arith_reg_operand" "r") (const_int 8) (const_int 8)))]
2837 [(set_attr "type" "shift")])
2839 ;; This can be peepholed if it follows a ldb ...
2841 [(set (match_operand:SI 0 "mcore_arith_reg_operand" "=r,b")
2842 (zero_extract:SI (match_operand:SI 1 "mcore_arith_reg_operand" "0,r") (const_int 8) (const_int 0)))]
2846 xtrb3 %0,%1\;zextb %0"
2847 [(set_attr "type" "shift")])
2850 ;; ------------------------------------------------------------------------
2851 ;; Block move - adapted from m88k.md
2852 ;; ------------------------------------------------------------------------
2854 (define_expand "movmemsi"
2855 [(parallel [(set (mem:BLK (match_operand:BLK 0 "" ""))
2856 (mem:BLK (match_operand:BLK 1 "" "")))
2857 (use (match_operand:SI 2 "general_operand" ""))
2858 (use (match_operand:SI 3 "immediate_operand" ""))])]
2862 if (mcore_expand_block_move (operands))
2868 ;; ;;; ??? These patterns are meant to be generated from expand_block_move,
2869 ;; ;;; but they currently are not.
2872 ;; [(set (match_operand:QI 0 "mcore_arith_reg_operand" "=r")
2873 ;; (match_operand:BLK 1 "mcore_general_movsrc_operand" "m"))]
2876 ;; [(set_attr "type" "load")])
2879 ;; [(set (match_operand:HI 0 "mcore_arith_reg_operand" "=r")
2880 ;; (match_operand:BLK 1 "mcore_general_movsrc_operand" "m"))]
2883 ;; [(set_attr "type" "load")])
2886 ;; [(set (match_operand:SI 0 "mcore_arith_reg_operand" "=r")
2887 ;; (match_operand:BLK 1 "mcore_general_movsrc_operand" "m"))]
2890 ;; [(set_attr "type" "load")])
2893 ;; [(set (match_operand:BLK 0 "mcore_general_movdst_operand" "=m")
2894 ;; (match_operand:QI 1 "mcore_arith_reg_operand" "r"))]
2897 ;; [(set_attr "type" "store")])
2900 ;; [(set (match_operand:BLK 0 "mcore_general_movdst_operand" "=m")
2901 ;; (match_operand:HI 1 "mcore_arith_reg_operand" "r"))]
2904 ;; [(set_attr "type" "store")])
2907 ;; [(set (match_operand:BLK 0 "mcore_general_movdst_operand" "=m")
2908 ;; (match_operand:SI 1 "mcore_arith_reg_operand" "r"))]
2911 ;; [(set_attr "type" "store")])
2913 ;; ------------------------------------------------------------------------
2914 ;; Misc Optimizing quirks
2915 ;; ------------------------------------------------------------------------
2917 ;; pair to catch constructs like: (int *)((p+=4)-4) which happen
2918 ;; in stdarg/varargs traversal. This changes a 3 insn sequence to a 2
2919 ;; insn sequence. -- RBE 11/30/95
2922 (set (match_operand:SI 0 "mcore_arith_reg_operand" "=r")
2923 (match_operand:SI 1 "mcore_arith_reg_operand" "+r"))
2924 (set (match_dup 1) (plus:SI (match_dup 1) (match_operand 2 "mcore_arith_any_imm_operand" "")))])]
2925 "GET_CODE(operands[2]) == CONST_INT"
2927 [(set_attr "length" "4")])
2931 (set (match_operand:SI 0 "mcore_arith_reg_operand" "")
2932 (match_operand:SI 1 "mcore_arith_reg_operand" ""))
2933 (set (match_dup 1) (plus:SI (match_dup 1) (match_operand 2 "mcore_arith_any_imm_operand" "")))])]
2934 "GET_CODE(operands[2]) == CONST_INT &&
2935 operands[0] != operands[1]"
2936 [(set (match_dup 0) (match_dup 1))
2937 (set (match_dup 1) (plus:SI (match_dup 1) (match_dup 2)))])
2942 ; note: in the following patterns, use mcore_is_dead() to ensure that the
2943 ; reg we may be trashing really is dead. reload doesn't always mark
2944 ; deaths, so mcore_is_dead() (see mcore.c) scans forward to find its death. BRC
2946 ;;; A peephole to convert the 3 instruction sequence generated by reload
2947 ;;; to load a FP-offset address into a 2 instruction sequence.
2948 ;;; ??? This probably never matches anymore.
2950 [(set (match_operand:SI 0 "mcore_arith_reg_operand" "r")
2951 (match_operand:SI 1 "const_int_operand" "J"))
2952 (set (match_dup 0) (neg:SI (match_dup 0)))
2954 (plus:SI (match_dup 0)
2955 (match_operand:SI 2 "mcore_arith_reg_operand" "r")))]
2956 "CONST_OK_FOR_J (INTVAL (operands[1]))"
2957 "error\;mov %0,%2\;subi %0,%1")
2959 ;; Moves of inlinable constants are done late, so when a 'not' is generated
2960 ;; it is never combined with the following 'and' to generate an 'andn' b/c
2961 ;; the combiner never sees it. use a peephole to pick up this case (happens
2962 ;; mostly with bitfields) BRC
2965 [(set (match_operand:SI 0 "mcore_arith_reg_operand" "r")
2966 (match_operand:SI 1 "const_int_operand" "i"))
2967 (set (match_operand:SI 2 "mcore_arith_reg_operand" "r")
2968 (and:SI (match_dup 2) (match_dup 0)))]
2969 "mcore_const_trick_uses_not (INTVAL (operands[1])) &&
2970 operands[0] != operands[2] &&
2971 mcore_is_dead (insn, operands[0])"
2972 "* return mcore_output_andn (insn, operands);")
2974 ; when setting or clearing just two bits, it's cheapest to use two bseti's
2975 ; or bclri's. only happens when relaxing immediates. BRC
2978 [(set (match_operand:SI 0 "mcore_arith_reg_operand" "")
2979 (match_operand:SI 1 "const_int_operand" ""))
2980 (set (match_operand:SI 2 "mcore_arith_reg_operand" "")
2981 (ior:SI (match_dup 2) (match_dup 0)))]
2983 && mcore_num_ones (INTVAL (operands[1])) == 2
2984 && mcore_is_dead (insn, operands[0])"
2985 "* return mcore_output_bseti (operands[2], INTVAL (operands[1]));")
2988 [(set (match_operand:SI 0 "mcore_arith_reg_operand" "")
2989 (match_operand:SI 1 "const_int_operand" ""))
2990 (set (match_operand:SI 2 "mcore_arith_reg_operand" "")
2991 (and:SI (match_dup 2) (match_dup 0)))]
2992 "TARGET_HARDLIT && mcore_num_zeros (INTVAL (operands[1])) == 2 &&
2993 mcore_is_dead (insn, operands[0])"
2994 "* return mcore_output_bclri (operands[2], INTVAL (operands[1]));")
2996 ; change an and with a mask that has a single cleared bit into a bclri. this
2997 ; handles QI and HI mode values using the knowledge that the most significant
2998 ; bits don't matter.
3001 [(set (match_operand:SI 0 "mcore_arith_reg_operand" "")
3002 (match_operand:SI 1 "const_int_operand" ""))
3003 (set (match_operand:SI 2 "mcore_arith_reg_operand" "")
3004 (and:SI (match_operand:SI 3 "mcore_arith_reg_operand" "")
3006 "GET_CODE (operands[3]) == SUBREG &&
3007 GET_MODE (SUBREG_REG (operands[3])) == QImode &&
3008 mcore_num_zeros (INTVAL (operands[1]) | 0xffffff00) == 1 &&
3009 mcore_is_dead (insn, operands[0])"
3011 if (! mcore_is_same_reg (operands[2], operands[3]))
3012 output_asm_insn (\"mov\\t%2,%3\", operands);
3013 return mcore_output_bclri (operands[2], INTVAL (operands[1]) | 0xffffff00);")
3015 /* Do not fold these together -- mode is lost at final output phase. */
3018 [(set (match_operand:SI 0 "mcore_arith_reg_operand" "")
3019 (match_operand:SI 1 "const_int_operand" ""))
3020 (set (match_operand:SI 2 "mcore_arith_reg_operand" "")
3021 (and:SI (match_operand:SI 3 "mcore_arith_reg_operand" "")
3023 "GET_CODE (operands[3]) == SUBREG &&
3024 GET_MODE (SUBREG_REG (operands[3])) == HImode &&
3025 mcore_num_zeros (INTVAL (operands[1]) | 0xffff0000) == 1 &&
3026 operands[2] == operands[3] &&
3027 mcore_is_dead (insn, operands[0])"
3029 if (! mcore_is_same_reg (operands[2], operands[3]))
3030 output_asm_insn (\"mov\\t%2,%3\", operands);
3031 return mcore_output_bclri (operands[2], INTVAL (operands[1]) | 0xffff0000);")
3033 ; This peephole helps when using -mwide-bitfields to widen fields so they
3034 ; collapse. This, however, has the effect that a narrower mode is not used
3037 ; e.g., sequences like:
3040 ; movi r7,0x00ffffff
3042 ; stw r8,(r6) r8 dead
3044 ; get peepholed to become:
3047 ; stb r8,(r6) r8 dead
3049 ; Do only easy addresses that have no offset. This peephole is also applied
3050 ; to halfwords. We need to check that the load is non-volatile before we get
3054 [(set (match_operand:SI 0 "mcore_arith_reg_operand" "")
3055 (match_operand:SI 1 "memory_operand" ""))
3056 (set (match_operand:SI 2 "mcore_arith_reg_operand" "")
3057 (match_operand:SI 3 "const_int_operand" ""))
3058 (set (match_dup 0) (and:SI (match_dup 0) (match_dup 2)))
3059 (set (match_operand:SI 4 "memory_operand" "") (match_dup 0))]
3060 "mcore_is_dead (insn, operands[0]) &&
3061 ! MEM_VOLATILE_P (operands[1]) &&
3062 mcore_is_dead (insn, operands[2]) &&
3063 (mcore_byte_offset (INTVAL (operands[3])) > -1 ||
3064 mcore_halfword_offset (INTVAL (operands[3])) > -1) &&
3065 ! MEM_VOLATILE_P (operands[4]) &&
3066 GET_CODE (XEXP (operands[4], 0)) == REG"
3070 enum machine_mode mode;
3071 rtx base_reg = XEXP (operands[4], 0);
3073 if ((ofs = mcore_byte_offset (INTVAL (operands[3]))) > -1)
3075 else if ((ofs = mcore_halfword_offset (INTVAL (operands[3]))) > -1)
3081 operands[4] = gen_rtx_MEM (mode,
3082 gen_rtx_PLUS (SImode, base_reg, GEN_INT(ofs)));
3084 operands[4] = gen_rtx_MEM (mode, base_reg);
3087 return \"movi %0,0\\n\\tst.b %0,%4\";
3089 return \"movi %0,0\\n\\tst.h %0,%4\";
3092 ; from sop11. get btsti's for (LT A 0) where A is a QI or HI value
3095 [(set (match_operand:SI 0 "mcore_arith_reg_operand" "r")
3096 (sign_extend:SI (match_operand:QI 1 "mcore_arith_reg_operand" "0")))
3098 (lt:CC (match_dup 0)
3100 "mcore_is_dead (insn, operands[0])"
3104 [(set (match_operand:SI 0 "mcore_arith_reg_operand" "r")
3105 (sign_extend:SI (match_operand:HI 1 "mcore_arith_reg_operand" "0")))
3107 (lt:CC (match_dup 0)
3109 "mcore_is_dead (insn, operands[0])"
3112 ; Pick up a tst. This combination happens because the immediate is not
3113 ; allowed to fold into one of the operands of the tst. Does not happen
3114 ; when relaxing immediates. BRC
3117 [(set (match_operand:SI 0 "mcore_arith_reg_operand" "")
3118 (match_operand:SI 1 "mcore_arith_reg_operand" ""))
3120 (and:SI (match_dup 0)
3121 (match_operand:SI 2 "mcore_literal_K_operand" "")))
3122 (set (reg:CC 17) (ne:CC (match_dup 0) (const_int 0)))]
3123 "mcore_is_dead (insn, operands[0])"
3124 "movi %0,%2\;tst %1,%0")
3127 [(set (match_operand:SI 0 "mcore_arith_reg_operand" "")
3128 (if_then_else:SI (ne (zero_extract:SI
3129 (match_operand:SI 1 "mcore_arith_reg_operand" "")
3131 (match_operand:SI 2 "mcore_literal_K_operand" ""))
3133 (match_operand:SI 3 "mcore_arith_imm_operand" "")
3134 (match_operand:SI 4 "mcore_arith_imm_operand" "")))
3135 (set (reg:CC 17) (ne:CC (match_dup 0) (const_int 0)))]
3139 unsigned int op0 = REGNO (operands[0]);
3141 if (GET_CODE (operands[3]) == REG)
3143 if (REGNO (operands[3]) == op0 && GET_CODE (operands[4]) == CONST_INT
3144 && INTVAL (operands[4]) == 0)
3145 return \"btsti %1,%2\\n\\tclrf %0\";
3146 else if (GET_CODE (operands[4]) == REG)
3148 if (REGNO (operands[4]) == op0)
3149 return \"btsti %1,%2\\n\\tmovf %0,%3\";
3150 else if (REGNO (operands[3]) == op0)
3151 return \"btsti %1,%2\\n\\tmovt %0,%4\";
3156 else if (GET_CODE (operands[3]) == CONST_INT
3157 && INTVAL (operands[3]) == 0
3158 && GET_CODE (operands[4]) == REG)
3159 return \"btsti %1,%2\\n\\tclrt %0\";
3164 ; experimental - do the constant folding ourselves. note that this isn't
3165 ; re-applied like we'd really want. i.e., four ands collapse into two
3166 ; instead of one. this is because peepholes are applied as a sliding
3167 ; window. the peephole does not generate new rtl's, but instead slides
3168 ; across the rtl's generating machine instructions. it would be nice
3169 ; if the peephole optimizer is changed to re-apply patterns and to gen
3170 ; new rtl's. this is more flexible. the pattern below helps when we're
3171 ; not using relaxed immediates. BRC
3174 ; [(set (match_operand:SI 0 "mcore_arith_reg_operand" "")
3175 ; (match_operand:SI 1 "const_int_operand" ""))
3176 ; (set (match_operand:SI 2 "mcore_arith_reg_operand" "")
3177 ; (and:SI (match_dup 2) (match_dup 0)))
3178 ; (set (match_dup 0)
3179 ; (match_operand:SI 3 "const_int_operand" ""))
3180 ; (set (match_dup 2)
3181 ; (and:SI (match_dup 2) (match_dup 0)))]
3182 ; "!TARGET_RELAX_IMM && mcore_is_dead (insn, operands[0]) &&
3183 ; mcore_const_ok_for_inline (INTVAL (operands[1]) & INTVAL (operands[3]))"
3186 ; rtx out_operands[2];
3187 ; out_operands[0] = operands[0];
3188 ; out_operands[1] = GEN_INT (INTVAL (operands[1]) & INTVAL (operands[3]));
3190 ; output_inline_const (SImode, out_operands);
3192 ; output_asm_insn (\"and %2,%0\", operands);
3197 ; BRC: for inlining get rid of extra test - experimental
3199 ; [(set (match_operand:SI 0 "mcore_arith_reg_operand" "r")
3200 ; (ne:SI (reg:CC 17) (const_int 0)))
3201 ; (set (reg:CC 17) (ne:CC (match_dup 0) (const_int 0)))
3203 ; (if_then_else (eq (reg:CC 17) (const_int 0))
3204 ; (label_ref (match_operand 1 "" ""))
3209 ; if (get_attr_length (insn) == 10)
3211 ; output_asm_insn (\"bt 2f\\n\\tjmpi [1f]\", operands);
3212 ; output_asm_insn (\".align 2\\n1:\", operands);
3213 ; output_asm_insn (\".long %1\\n2:\", operands);
3216 ; return \"bf %l1\";
3220 ;;; Special patterns for dealing with the constant pool.
3222 ;;; 4 byte integer in line.
3224 (define_insn "consttable_4"
3225 [(unspec_volatile [(match_operand:SI 0 "general_operand" "=g")] 0)]
3229 assemble_integer (operands[0], 4, BITS_PER_WORD, 1);
3232 [(set_attr "length" "4")])
3234 ;;; align to a four byte boundary.
3236 (define_insn "align_4"
3237 [(unspec_volatile [(const_int 0)] 1)]
3241 ;;; Handle extra constant pool entries created during final pass.
3243 (define_insn "consttable_end"
3244 [(unspec_volatile [(const_int 0)] 2)]
3246 "* return mcore_output_jump_label_table ();")
3249 ;; Stack allocation -- in particular, for alloca().
3250 ;; this is *not* what we use for entry into functions.
3252 ;; This is how we allocate stack space. If we are allocating a
3253 ;; constant amount of space and we know it is less than 4096
3254 ;; bytes, we need do nothing.
3256 ;; If it is more than 4096 bytes, we need to probe the stack
3259 ;; operands[1], the distance is a POSITIVE number indicating that we
3260 ;; are allocating stack space
3262 (define_expand "allocate_stack"
3265 (match_operand:SI 1 "general_operand" "")))
3266 (set (match_operand:SI 0 "register_operand" "=r")
3271 /* If he wants no probing, just do it for him. */
3272 if (mcore_stack_increment == 0)
3274 emit_insn (gen_addsi3 (stack_pointer_rtx, stack_pointer_rtx,operands[1]));
3275 ;; emit_move_insn (operands[0], virtual_stack_dynamic_rtx);
3279 /* For small constant growth, we unroll the code. */
3280 if (GET_CODE (operands[1]) == CONST_INT
3281 && INTVAL (operands[1]) < 8 * STACK_UNITS_MAXSTEP)
3283 HOST_WIDE_INT left = INTVAL(operands[1]);
3285 /* If it's a long way, get close enough for a last shot. */
3286 if (left >= STACK_UNITS_MAXSTEP)
3288 rtx tmp = gen_reg_rtx (Pmode);
3289 emit_insn (gen_movsi (tmp, GEN_INT (STACK_UNITS_MAXSTEP)));
3292 rtx memref = gen_rtx_MEM (SImode, stack_pointer_rtx);
3294 MEM_VOLATILE_P (memref) = 1;
3295 emit_insn (gen_subsi3 (stack_pointer_rtx, stack_pointer_rtx, tmp));
3296 emit_insn (gen_movsi (memref, stack_pointer_rtx));
3297 left -= STACK_UNITS_MAXSTEP;
3299 while (left > STACK_UNITS_MAXSTEP);
3301 /* Perform the final adjustment. */
3302 emit_insn (gen_addsi3 (stack_pointer_rtx, stack_pointer_rtx, GEN_INT (-left)));
3303 ;; emit_move_insn (operands[0], virtual_stack_dynamic_rtx);
3309 rtx loop_label = gen_label_rtx ();
3310 rtx step = gen_reg_rtx (Pmode);
3311 rtx tmp = gen_reg_rtx (Pmode);
3315 emit_insn (gen_movsi (tmp, operands[1]));
3316 emit_insn (gen_movsi (step, GEN_INT (STACK_UNITS_MAXSTEP)));
3318 if (GET_CODE (operands[1]) != CONST_INT)
3320 out_label = gen_label_rtx ();
3321 emit_insn (gen_cmpsi (step, tmp)); /* quick out */
3322 emit_jump_insn (gen_bgeu (out_label));
3325 /* Run a loop that steps it incrementally. */
3326 emit_label (loop_label);
3328 /* Extend a step, probe, and adjust remaining count. */
3329 emit_insn(gen_subsi3(stack_pointer_rtx, stack_pointer_rtx, step));
3330 memref = gen_rtx_MEM (SImode, stack_pointer_rtx);
3331 MEM_VOLATILE_P (memref) = 1;
3332 emit_insn(gen_movsi(memref, stack_pointer_rtx));
3333 emit_insn(gen_subsi3(tmp, tmp, step));
3335 /* Loop condition -- going back up. */
3336 emit_insn (gen_cmpsi (step, tmp));
3337 emit_jump_insn (gen_bltu (loop_label));
3340 emit_label (out_label);
3342 /* Bump the residual. */
3343 emit_insn (gen_subsi3 (stack_pointer_rtx, stack_pointer_rtx, tmp));
3344 ;; emit_move_insn (operands[0], virtual_stack_dynamic_rtx);
3347 /* simple one-shot -- ensure register and do a subtract.
3348 * This does NOT comply with the ABI. */
3349 emit_insn (gen_movsi (tmp, operands[1]));
3350 emit_insn (gen_subsi3 (stack_pointer_rtx, stack_pointer_rtx, tmp));
3351 ;; emit_move_insn (operands[0], virtual_stack_dynamic_rtx);