1 /* Subroutines used for code generation on intel 80960.
2 Copyright (C) 1992, 1995, 1996, 1997, 1998, 1999, 2000, 2001
3 Free Software Foundation, Inc.
4 Contributed by Steven McGeady, Intel Corp.
5 Additional Work by Glenn Colon-Bonet, Jonathan Shapiro, Andy Wilson
6 Converted to GCC 2.0 by Jim Wilson and Michael Tiemann, Cygnus Support.
8 This file is part of GNU CC.
10 GNU CC is free software; you can redistribute it and/or modify
11 it under the terms of the GNU General Public License as published by
12 the Free Software Foundation; either version 2, or (at your option)
15 GNU CC is distributed in the hope that it will be useful,
16 but WITHOUT ANY WARRANTY; without even the implied warranty of
17 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
18 GNU General Public License for more details.
20 You should have received a copy of the GNU General Public License
21 along with GNU CC; see the file COPYING. If not, write to
22 the Free Software Foundation, 59 Temple Place - Suite 330,
23 Boston, MA 02111-1307, USA. */
27 #include "coretypes.h"
32 #include "hard-reg-set.h"
34 #include "insn-config.h"
35 #include "conditions.h"
37 #include "insn-attr.h"
47 #include "target-def.h"
49 static void i960_output_function_prologue PARAMS ((FILE *, HOST_WIDE_INT));
50 static void i960_output_function_epilogue PARAMS ((FILE *, HOST_WIDE_INT));
51 static void i960_output_mi_thunk PARAMS ((FILE *, tree, HOST_WIDE_INT,
52 HOST_WIDE_INT, tree));
53 static bool i960_rtx_costs PARAMS ((rtx, int, int, int *));
54 static int i960_address_cost PARAMS ((rtx));
56 /* Save the operands last given to a compare for use when we
57 generate a scc or bcc insn. */
59 rtx i960_compare_op0, i960_compare_op1;
61 /* Used to implement #pragma align/noalign. Initialized by OVERRIDE_OPTIONS
64 int i960_maxbitalignment;
65 int i960_last_maxbitalignment;
67 /* Used to implement switching between MEM and ALU insn types, for better
68 C series performance. */
70 enum insn_types i960_last_insn_type;
72 /* The leaf-procedure return register. Set only if this is a leaf routine. */
74 static int i960_leaf_ret_reg;
76 /* True if replacing tail calls with jumps is OK. */
78 static int tail_call_ok;
80 /* A string containing a list of insns to emit in the epilogue so as to
81 restore all registers saved by the prologue. Created by the prologue
82 code as it saves registers away. */
84 char epilogue_string[1000];
86 /* A unique number (per function) for return labels. */
88 static int ret_label = 0;
90 /* This is true if FNDECL is either a varargs or a stdarg function.
91 This is used to help identify functions that use an argument block. */
93 #define VARARGS_STDARG_FUNCTION(FNDECL) \
94 (TYPE_ARG_TYPES (TREE_TYPE (FNDECL)) != 0 \
95 && (TREE_VALUE (tree_last (TYPE_ARG_TYPES (TREE_TYPE (FNDECL))))) \
98 /* Initialize the GCC target structure. */
99 #undef TARGET_ASM_ALIGNED_SI_OP
100 #define TARGET_ASM_ALIGNED_SI_OP "\t.word\t"
102 #undef TARGET_ASM_FUNCTION_PROLOGUE
103 #define TARGET_ASM_FUNCTION_PROLOGUE i960_output_function_prologue
104 #undef TARGET_ASM_FUNCTION_EPILOGUE
105 #define TARGET_ASM_FUNCTION_EPILOGUE i960_output_function_epilogue
107 #undef TARGET_ASM_OUTPUT_MI_THUNK
108 #define TARGET_ASM_OUTPUT_MI_THUNK i960_output_mi_thunk
109 #undef TARGET_CAN_ASM_OUTPUT_MI_THUNK
110 #define TARGET_CAN_ASM_OUTPUT_MI_THUNK default_can_output_mi_thunk_no_vcall
112 #undef TARGET_RTX_COSTS
113 #define TARGET_RTX_COSTS i960_rtx_costs
114 #undef TARGET_ADDRESS_COST
115 #define TARGET_ADDRESS_COST i960_address_cost
117 struct gcc_target targetm = TARGET_INITIALIZER;
119 /* Override conflicting target switch options.
120 Doesn't actually detect if more than one -mARCH option is given, but
121 does handle the case of two blatantly conflicting -mARCH options.
123 Also initialize variables before compiling any files. */
128 if (TARGET_K_SERIES && TARGET_C_SERIES)
130 warning ("conflicting architectures defined - using C series");
131 target_flags &= ~TARGET_FLAG_K_SERIES;
133 if (TARGET_K_SERIES && TARGET_MC)
135 warning ("conflicting architectures defined - using K series");
136 target_flags &= ~TARGET_FLAG_MC;
138 if (TARGET_C_SERIES && TARGET_MC)
140 warning ("conflicting architectures defined - using C series");
141 target_flags &= ~TARGET_FLAG_MC;
143 if (TARGET_IC_COMPAT3_0)
145 flag_short_enums = 1;
146 flag_signed_char = 1;
147 target_flags |= TARGET_FLAG_CLEAN_LINKAGE;
148 if (TARGET_IC_COMPAT2_0)
150 warning ("iC2.0 and iC3.0 are incompatible - using iC3.0");
151 target_flags &= ~TARGET_FLAG_IC_COMPAT2_0;
154 if (TARGET_IC_COMPAT2_0)
156 flag_signed_char = 1;
157 target_flags |= TARGET_FLAG_CLEAN_LINKAGE;
160 if (TARGET_IC_COMPAT2_0)
162 i960_maxbitalignment = 8;
163 i960_last_maxbitalignment = 128;
167 i960_maxbitalignment = 128;
168 i960_last_maxbitalignment = 8;
171 /* Tell the compiler which flavor of TFmode we're using. */
172 real_format_for_mode[TFmode - QFmode] = &ieee_extended_intel_128_format;
175 /* Return true if OP can be used as the source of an fp move insn. */
178 fpmove_src_operand (op, mode)
180 enum machine_mode mode;
182 return (GET_CODE (op) == CONST_DOUBLE || general_operand (op, mode));
186 /* Return true if OP is a register or zero. */
189 reg_or_zero_operand (op, mode)
191 enum machine_mode mode;
193 return register_operand (op, mode) || op == const0_rtx;
197 /* Return truth value of whether OP can be used as an operands in a three
198 address arithmetic insn (such as add %o1,7,%l2) of mode MODE. */
201 arith_operand (op, mode)
203 enum machine_mode mode;
205 return (register_operand (op, mode) || literal (op, mode));
208 /* Return truth value of whether OP can be used as an operands in a three
209 address logic insn, possibly complementing OP, of mode MODE. */
212 logic_operand (op, mode)
214 enum machine_mode mode;
216 return (register_operand (op, mode)
217 || (GET_CODE (op) == CONST_INT
218 && INTVAL(op) >= -32 && INTVAL(op) < 32));
221 /* Return true if OP is a register or a valid floating point literal. */
224 fp_arith_operand (op, mode)
226 enum machine_mode mode;
228 return (register_operand (op, mode) || fp_literal (op, mode));
231 /* Return true if OP is a register or a valid signed integer literal. */
234 signed_arith_operand (op, mode)
236 enum machine_mode mode;
238 return (register_operand (op, mode) || signed_literal (op, mode));
241 /* Return truth value of whether OP is an integer which fits the
242 range constraining immediate operands in three-address insns. */
247 enum machine_mode mode ATTRIBUTE_UNUSED;
249 return ((GET_CODE (op) == CONST_INT) && INTVAL(op) >= 0 && INTVAL(op) < 32);
252 /* Return true if OP is a float constant of 1. */
255 fp_literal_one (op, mode)
257 enum machine_mode mode;
259 return (TARGET_NUMERICS && mode == GET_MODE (op) && op == CONST1_RTX (mode));
262 /* Return true if OP is a float constant of 0. */
265 fp_literal_zero (op, mode)
267 enum machine_mode mode;
269 return (TARGET_NUMERICS && mode == GET_MODE (op) && op == CONST0_RTX (mode));
272 /* Return true if OP is a valid floating point literal. */
277 enum machine_mode mode;
279 return fp_literal_zero (op, mode) || fp_literal_one (op, mode);
282 /* Return true if OP is a valid signed immediate constant. */
285 signed_literal(op, mode)
287 enum machine_mode mode ATTRIBUTE_UNUSED;
289 return ((GET_CODE (op) == CONST_INT) && INTVAL(op) > -32 && INTVAL(op) < 32);
292 /* Return truth value of statement that OP is a symbolic memory
293 operand of mode MODE. */
296 symbolic_memory_operand (op, mode)
298 enum machine_mode mode ATTRIBUTE_UNUSED;
300 if (GET_CODE (op) == SUBREG)
301 op = SUBREG_REG (op);
302 if (GET_CODE (op) != MEM)
305 return (GET_CODE (op) == SYMBOL_REF || GET_CODE (op) == CONST
306 || GET_CODE (op) == HIGH || GET_CODE (op) == LABEL_REF);
309 /* Return truth value of whether OP is EQ or NE. */
314 enum machine_mode mode ATTRIBUTE_UNUSED;
316 return (GET_CODE (op) == EQ || GET_CODE (op) == NE);
319 /* OP is an integer register or a constant. */
322 arith32_operand (op, mode)
324 enum machine_mode mode;
326 if (register_operand (op, mode))
328 return (CONSTANT_P (op));
331 /* Return true if OP is an integer constant which is a power of 2. */
334 power2_operand (op,mode)
336 enum machine_mode mode ATTRIBUTE_UNUSED;
338 if (GET_CODE (op) != CONST_INT)
341 return exact_log2 (INTVAL (op)) >= 0;
344 /* Return true if OP is an integer constant which is the complement of a
348 cmplpower2_operand (op, mode)
350 enum machine_mode mode ATTRIBUTE_UNUSED;
352 if (GET_CODE (op) != CONST_INT)
355 return exact_log2 (~ INTVAL (op)) >= 0;
358 /* If VAL has only one bit set, return the index of that bit. Otherwise
367 for (i = 0; val != 0; i++, val >>= 1)
379 /* Return nonzero if OP is a mask, i.e. all one bits are consecutive.
380 The return value indicates how many consecutive nonzero bits exist
381 if this is a mask. This is the same as the next function, except that
382 it does not indicate what the start and stop bit positions are. */
388 register int start, end = 0, i;
391 for (i = 0; val != 0; val >>= 1, i++)
401 /* Still looking for the first bit. */
405 /* We've seen the start of a bit sequence, and now a zero. There
406 must be more one bits, otherwise we would have exited the loop.
407 Therefore, it is not a mask. */
412 /* The bit string has ones from START to END bit positions only. */
413 return end - start + 1;
416 /* If VAL is a mask, then return nonzero, with S set to the starting bit
417 position and E set to the ending bit position of the mask. The return
418 value indicates how many consecutive bits exist in the mask. This is
419 the same as the previous function, except that it also indicates the
420 start and end bit positions of the mask. */
427 register int start, end, i;
431 for (i = 0; val != 0; val >>= 1, i++)
442 /* Still looking for the first bit. */
446 /* We've seen the start of a bit sequence, and now a zero. There
447 must be more one bits, otherwise we would have exited the loop.
448 Therefor, it is not a mask. */
457 /* The bit string has ones from START to END bit positions only. */
460 return ((start < 0) ? 0 : end - start + 1);
463 /* Return the machine mode to use for a comparison. */
466 select_cc_mode (op, x)
468 rtx x ATTRIBUTE_UNUSED;
470 if (op == GTU || op == LTU || op == GEU || op == LEU)
475 /* X and Y are two things to compare using CODE. Emit the compare insn and
476 return the rtx for register 36 in the proper mode. */
479 gen_compare_reg (code, x, y)
484 enum machine_mode ccmode = SELECT_CC_MODE (code, x, y);
485 enum machine_mode mode
486 = GET_MODE (x) == VOIDmode ? GET_MODE (y) : GET_MODE (x);
490 if (! arith_operand (x, mode))
491 x = force_reg (SImode, x);
492 if (! arith_operand (y, mode))
493 y = force_reg (SImode, y);
496 cc_reg = gen_rtx_REG (ccmode, 36);
497 emit_insn (gen_rtx_SET (VOIDmode, cc_reg,
498 gen_rtx_COMPARE (ccmode, x, y)));
503 /* For the i960, REG is cost 1, REG+immed CONST is cost 2, REG+REG is cost 2,
504 REG+nonimmed CONST is cost 4. REG+SYMBOL_REF, SYMBOL_REF, and similar
505 are 4. Indexed addresses are cost 6. */
507 /* ??? Try using just RTX_COST, i.e. not defining ADDRESS_COST. */
510 i960_address_cost (x)
513 if (GET_CODE (x) == REG)
516 /* This is a MEMA operand -- it's free. */
517 if (GET_CODE (x) == CONST_INT
519 && INTVAL (x) < 4096)
522 if (GET_CODE (x) == PLUS)
524 rtx base = XEXP (x, 0);
525 rtx offset = XEXP (x, 1);
527 if (GET_CODE (base) == SUBREG)
528 base = SUBREG_REG (base);
529 if (GET_CODE (offset) == SUBREG)
530 offset = SUBREG_REG (offset);
532 if (GET_CODE (base) == REG)
534 if (GET_CODE (offset) == REG)
536 if (GET_CODE (offset) == CONST_INT)
538 if ((unsigned)INTVAL (offset) < 2047)
542 if (CONSTANT_P (offset))
545 if (GET_CODE (base) == PLUS || GET_CODE (base) == MULT)
548 /* This is an invalid address. The return value doesn't matter, but
549 for convenience we make this more expensive than anything else. */
552 if (GET_CODE (x) == MULT)
555 /* Symbol_refs and other unrecognized addresses are cost 4. */
559 /* Emit insns to move operands[1] into operands[0].
561 Return 1 if we have written out everything that needs to be done to
562 do the move. Otherwise, return 0 and the caller will emit the move
566 emit_move_sequence (operands, mode)
568 enum machine_mode mode;
570 /* We can only store registers to memory. */
572 if (GET_CODE (operands[0]) == MEM && GET_CODE (operands[1]) != REG
573 && (operands[1] != const0_rtx || current_function_args_size
574 || current_function_stdarg
575 || rtx_equal_function_value_matters))
576 /* Here we use the same test as movsi+1 pattern -- see i960.md. */
577 operands[1] = force_reg (mode, operands[1]);
579 /* Storing multi-word values in unaligned hard registers to memory may
580 require a scratch since we have to store them a register at a time and
581 adding 4 to the memory address may not yield a valid insn. */
582 /* ??? We don't always need the scratch, but that would complicate things.
584 /* ??? We must also handle stores to pseudos here, because the pseudo may be
585 replaced with a MEM later. This would be cleaner if we didn't have
586 a separate pattern for unaligned DImode/TImode stores. */
587 if (GET_MODE_SIZE (mode) > UNITS_PER_WORD
588 && (GET_CODE (operands[0]) == MEM
589 || (GET_CODE (operands[0]) == REG
590 && REGNO (operands[0]) >= FIRST_PSEUDO_REGISTER))
591 && GET_CODE (operands[1]) == REG
592 && REGNO (operands[1]) < FIRST_PSEUDO_REGISTER
593 && ! HARD_REGNO_MODE_OK (REGNO (operands[1]), mode))
595 emit_insn (gen_rtx_PARALLEL
598 gen_rtx_SET (VOIDmode, operands[0], operands[1]),
599 gen_rtx_CLOBBER (VOIDmode,
600 gen_rtx_SCRATCH (Pmode)))));
607 /* Output assembler to move a double word value. */
610 i960_output_move_double (dst, src)
615 if (GET_CODE (dst) == REG
616 && GET_CODE (src) == REG)
618 if ((REGNO (src) & 1)
619 || (REGNO (dst) & 1))
621 /* We normally copy the low-numbered register first. However, if
622 the second source register is the same as the first destination
623 register, we must copy in the opposite order. */
624 if (REGNO (src) + 1 == REGNO (dst))
625 return "mov %D1,%D0\n\tmov %1,%0";
627 return "mov %1,%0\n\tmov %D1,%D0";
632 else if (GET_CODE (dst) == REG
633 && GET_CODE (src) == CONST_INT
634 && CONST_OK_FOR_LETTER_P (INTVAL (src), 'I'))
637 return "mov %1,%0\n\tmov 0,%D0";
641 else if (GET_CODE (dst) == REG
642 && GET_CODE (src) == MEM)
646 /* One can optimize a few cases here, but you have to be
647 careful of clobbering registers used in the address and
651 operands[2] = gen_rtx_REG (Pmode, REGNO (dst) + 1);
652 operands[3] = gen_rtx_MEM (word_mode, operands[2]);
653 operands[4] = adjust_address (operands[3], word_mode,
656 ("lda %1,%2\n\tld %3,%0\n\tld %4,%D0", operands);
662 else if (GET_CODE (dst) == MEM
663 && GET_CODE (src) == REG)
668 operands[1] = adjust_address (dst, word_mode, UNITS_PER_WORD);
669 if (! memory_address_p (word_mode, XEXP (operands[1], 0)))
672 output_asm_insn ("st %2,%0\n\tst %D2,%1", operands);
681 /* Output assembler to move a double word zero. */
684 i960_output_move_double_zero (dst)
691 operands[1] = adjust_address (dst, word_mode, 4);
692 output_asm_insn ("st g14,%0\n\tst g14,%1", operands);
697 /* Output assembler to move a quad word value. */
700 i960_output_move_quad (dst, src)
705 if (GET_CODE (dst) == REG
706 && GET_CODE (src) == REG)
708 if ((REGNO (src) & 3)
709 || (REGNO (dst) & 3))
711 /* We normally copy starting with the low numbered register.
712 However, if there is an overlap such that the first dest reg
713 is <= the last source reg but not < the first source reg, we
714 must copy in the opposite order. */
715 if (REGNO (dst) <= REGNO (src) + 3
716 && REGNO (dst) >= REGNO (src))
717 return "mov %F1,%F0\n\tmov %E1,%E0\n\tmov %D1,%D0\n\tmov %1,%0";
719 return "mov %1,%0\n\tmov %D1,%D0\n\tmov %E1,%E0\n\tmov %F1,%F0";
724 else if (GET_CODE (dst) == REG
725 && GET_CODE (src) == CONST_INT
726 && CONST_OK_FOR_LETTER_P (INTVAL (src), 'I'))
729 return "mov %1,%0\n\tmov 0,%D0\n\tmov 0,%E0\n\tmov 0,%F0";
733 else if (GET_CODE (dst) == REG
734 && GET_CODE (src) == MEM)
738 /* One can optimize a few cases here, but you have to be
739 careful of clobbering registers used in the address and
743 operands[2] = gen_rtx_REG (Pmode, REGNO (dst) + 3);
744 operands[3] = gen_rtx_MEM (word_mode, operands[2]);
746 = adjust_address (operands[3], word_mode, UNITS_PER_WORD);
748 = adjust_address (operands[4], word_mode, UNITS_PER_WORD);
750 = adjust_address (operands[5], word_mode, UNITS_PER_WORD);
751 output_asm_insn ("lda %1,%2\n\tld %3,%0\n\tld %4,%D0\n\tld %5,%E0\n\tld %6,%F0", operands);
757 else if (GET_CODE (dst) == MEM
758 && GET_CODE (src) == REG)
763 operands[1] = adjust_address (dst, word_mode, UNITS_PER_WORD);
764 operands[2] = adjust_address (dst, word_mode, 2 * UNITS_PER_WORD);
765 operands[3] = adjust_address (dst, word_mode, 3 * UNITS_PER_WORD);
766 if (! memory_address_p (word_mode, XEXP (operands[3], 0)))
769 output_asm_insn ("st %4,%0\n\tst %D4,%1\n\tst %E4,%2\n\tst %F4,%3", operands);
778 /* Output assembler to move a quad word zero. */
781 i960_output_move_quad_zero (dst)
788 operands[1] = adjust_address (dst, word_mode, 4);
789 operands[2] = adjust_address (dst, word_mode, 8);
790 operands[3] = adjust_address (dst, word_mode, 12);
791 output_asm_insn ("st g14,%0\n\tst g14,%1\n\tst g14,%2\n\tst g14,%3", operands);
797 /* Emit insns to load a constant to non-floating point registers.
798 Uses several strategies to try to use as few insns as possible. */
801 i960_output_ldconst (dst, src)
802 register rtx dst, src;
805 register unsigned rsrc2;
806 enum machine_mode mode = GET_MODE (dst);
809 operands[0] = operands[2] = dst;
810 operands[1] = operands[3] = src;
812 /* Anything that isn't a compile time constant, such as a SYMBOL_REF,
813 must be a ldconst insn. */
815 if (GET_CODE (src) != CONST_INT && GET_CODE (src) != CONST_DOUBLE)
817 output_asm_insn ("ldconst %1,%0", operands);
820 else if (mode == TFmode)
826 if (fp_literal_zero (src, TFmode))
829 REAL_VALUE_FROM_CONST_DOUBLE (d, src);
830 REAL_VALUE_TO_TARGET_LONG_DOUBLE (d, value_long);
832 output_asm_insn ("# ldconst %1,%0",operands);
834 for (i = 0; i < 3; i++)
836 operands[0] = gen_rtx_REG (SImode, REGNO (dst) + i);
837 operands[1] = GEN_INT (value_long[i]);
838 output_asm_insn (i960_output_ldconst (operands[0], operands[1]),
844 else if (mode == DFmode)
848 if (fp_literal_zero (src, DFmode))
851 split_double (src, &first, &second);
853 output_asm_insn ("# ldconst %1,%0",operands);
855 operands[0] = gen_rtx_REG (SImode, REGNO (dst));
857 output_asm_insn (i960_output_ldconst (operands[0], operands[1]),
859 operands[0] = gen_rtx_REG (SImode, REGNO (dst) + 1);
860 operands[1] = second;
861 output_asm_insn (i960_output_ldconst (operands[0], operands[1]),
865 else if (mode == SFmode)
870 REAL_VALUE_FROM_CONST_DOUBLE (d, src);
871 REAL_VALUE_TO_TARGET_SINGLE (d, value);
873 output_asm_insn ("# ldconst %1,%0",operands);
874 operands[0] = gen_rtx_REG (SImode, REGNO (dst));
875 operands[1] = GEN_INT (value);
876 output_asm_insn (i960_output_ldconst (operands[0], operands[1]),
880 else if (mode == TImode)
882 /* ??? This is currently not handled at all. */
885 /* Note: lowest order word goes in lowest numbered reg. */
886 rsrc1 = INTVAL (src);
887 if (rsrc1 >= 0 && rsrc1 < 32)
890 output_asm_insn ("movq\t0,%0\t# ldconstq %1,%0",operands);
891 /* Go pick up the low-order word. */
893 else if (mode == DImode)
895 rtx upperhalf, lowerhalf, xoperands[2];
897 if (GET_CODE (src) == CONST_DOUBLE || GET_CODE (src) == CONST_INT)
898 split_double (src, &lowerhalf, &upperhalf);
903 /* Note: lowest order word goes in lowest numbered reg. */
904 /* Numbers from 0 to 31 can be handled with a single insn. */
905 rsrc1 = INTVAL (lowerhalf);
906 if (upperhalf == const0_rtx && rsrc1 >= 0 && rsrc1 < 32)
909 /* Output the upper half with a recursive call. */
910 xoperands[0] = gen_rtx_REG (SImode, REGNO (dst) + 1);
911 xoperands[1] = upperhalf;
912 output_asm_insn (i960_output_ldconst (xoperands[0], xoperands[1]),
914 /* The lower word is emitted as normally. */
918 rsrc1 = INTVAL (src);
924 else if (mode == HImode)
933 /* ldconst 0..31,X -> mov 0..31,X */
936 if (i960_last_insn_type == I_TYPE_REG && TARGET_C_SERIES)
941 /* ldconst 32..63,X -> add 31,nn,X */
944 if (i960_last_insn_type == I_TYPE_REG && TARGET_C_SERIES)
946 operands[1] = GEN_INT (rsrc1 - 31);
947 output_asm_insn ("addo\t31,%1,%0\t# ldconst %3,%0", operands);
953 /* ldconst -1..-31 -> sub 0,0..31,X */
956 /* return 'sub -(%1),0,%0' */
957 operands[1] = GEN_INT (- rsrc1);
958 output_asm_insn ("subo\t%1,0,%0\t# ldconst %3,%0", operands);
962 /* ldconst -32 -> not 31,X */
965 operands[1] = GEN_INT (~rsrc1);
966 output_asm_insn ("not\t%1,%0 # ldconst %3,%0", operands);
971 /* If const is a single bit. */
972 if (bitpos (rsrc1) >= 0)
974 operands[1] = GEN_INT (bitpos (rsrc1));
975 output_asm_insn ("setbit\t%1,0,%0\t# ldconst %3,%0", operands);
979 /* If const is a bit string of less than 6 bits (1..31 shifted). */
984 if (bitstr (rsrc1, &s, &e) < 6)
986 rsrc2 = ((unsigned int) rsrc1) >> s;
987 operands[1] = GEN_INT (rsrc2);
988 operands[2] = GEN_INT (s);
989 output_asm_insn ("shlo\t%2,%1,%0\t# ldconst %3,%0", operands);
994 /* Unimplemented cases:
995 const is in range 0..31 but rotated around end of word:
996 ror 31,3,g0 -> ldconst 0xe0000003,g0
998 and any 2 instruction cases that might be worthwhile */
1000 output_asm_insn ("ldconst %1,%0", operands);
1004 /* Determine if there is an opportunity for a bypass optimization.
1005 Bypass succeeds on the 960K* if the destination of the previous
1006 instruction is the second operand of the current instruction.
1007 Bypass always succeeds on the C*.
1009 Return 1 if the pattern should interchange the operands.
1011 CMPBR_FLAG is true if this is for a compare-and-branch insn.
1012 OP1 and OP2 are the two source operands of a 3 operand insn. */
1015 i960_bypass (insn, op1, op2, cmpbr_flag)
1016 register rtx insn, op1, op2;
1019 register rtx prev_insn, prev_dest;
1021 if (TARGET_C_SERIES)
1024 /* Can't do this if op1 isn't a register. */
1028 /* Can't do this for a compare-and-branch if both ops aren't regs. */
1029 if (cmpbr_flag && ! REG_P (op2))
1032 prev_insn = prev_real_insn (insn);
1034 if (prev_insn && GET_CODE (prev_insn) == INSN
1035 && GET_CODE (PATTERN (prev_insn)) == SET)
1037 prev_dest = SET_DEST (PATTERN (prev_insn));
1038 if ((GET_CODE (prev_dest) == REG && REGNO (prev_dest) == REGNO (op1))
1039 || (GET_CODE (prev_dest) == SUBREG
1040 && GET_CODE (SUBREG_REG (prev_dest)) == REG
1041 && REGNO (SUBREG_REG (prev_dest)) == REGNO (op1)))
1047 /* Output the code which declares the function name. This also handles
1048 leaf routines, which have special requirements, and initializes some
1049 global variables. */
1052 i960_function_name_declare (file, name, fndecl)
1061 /* Increment global return label. */
1065 /* Compute whether tail calls and leaf routine optimizations can be performed
1066 for this function. */
1068 if (TARGET_TAILCALL)
1073 if (TARGET_LEAFPROC)
1078 /* Even if nobody uses extra parms, can't have leafproc or tail calls if
1079 argblock, because argblock uses g14 implicitly. */
1081 if (current_function_args_size != 0 || VARARGS_STDARG_FUNCTION (fndecl))
1087 /* See if caller passes in an address to return value. */
1089 if (aggregate_value_p (DECL_RESULT (fndecl)))
1095 /* Can not use tail calls or make this a leaf routine if there is a non
1098 if (get_frame_size () != 0)
1101 /* I don't understand this condition, and do not think that it is correct.
1102 Apparently this is just checking whether the frame pointer is used, and
1103 we can't trust regs_ever_live[fp] since it is (almost?) always set. */
1106 for (insn = get_insns (); insn; insn = NEXT_INSN (insn))
1107 if (GET_CODE (insn) == INSN
1108 && reg_mentioned_p (frame_pointer_rtx, insn))
1114 /* Check for CALL insns. Can not be a leaf routine if there are any. */
1117 for (insn = get_insns (); insn; insn = NEXT_INSN (insn))
1118 if (GET_CODE (insn) == CALL_INSN)
1124 /* Can not be a leaf routine if any non-call clobbered registers are
1125 used in this function. */
1128 for (i = 0, j = 0; i < FIRST_PSEUDO_REGISTER; i++)
1129 if (regs_ever_live[i]
1130 && ((! call_used_regs[i]) || (i > 7 && i < 12)))
1132 /* Global registers. */
1133 if (i < 16 && i > 7 && i != 13)
1135 /* Local registers. */
1140 /* Now choose a leaf return register, if we can find one, and if it is
1141 OK for this to be a leaf routine. */
1143 i960_leaf_ret_reg = -1;
1145 if (optimize && leaf_proc_ok)
1147 for (i960_leaf_ret_reg = -1, i = 0; i < 8; i++)
1148 if (regs_ever_live[i] == 0)
1150 i960_leaf_ret_reg = i;
1151 regs_ever_live[i] = 1;
1156 /* Do this after choosing the leaf return register, so it will be listed
1157 if one was chosen. */
1159 fprintf (file, "\t# Function '%s'\n", (name[0] == '*' ? &name[1] : name));
1160 fprintf (file, "\t# Registers used: ");
1162 for (i = 0, j = 0; i < FIRST_PSEUDO_REGISTER; i++)
1164 if (regs_ever_live[i])
1166 fprintf (file, "%s%s ", reg_names[i], call_used_regs[i] ? "" : "*");
1168 if (i > 15 && j == 0)
1170 fprintf (file,"\n\t#\t\t ");
1176 fprintf (file, "\n");
1178 if (i960_leaf_ret_reg >= 0)
1180 /* Make it a leaf procedure. */
1182 if (TREE_PUBLIC (fndecl))
1183 fprintf (file,"\t.globl\t%s.lf\n", (name[0] == '*' ? &name[1] : name));
1185 fprintf (file, "\t.leafproc\t");
1186 assemble_name (file, name);
1187 fprintf (file, ",%s.lf\n", (name[0] == '*' ? &name[1] : name));
1188 ASM_OUTPUT_LABEL (file, name);
1189 fprintf (file, "\tlda Li960R%d,g14\n", ret_label);
1190 fprintf (file, "%s.lf:\n", (name[0] == '*' ? &name[1] : name));
1191 fprintf (file, "\tmov g14,g%d\n", i960_leaf_ret_reg);
1193 if (TARGET_C_SERIES)
1195 fprintf (file, "\tlda 0,g14\n");
1196 i960_last_insn_type = I_TYPE_MEM;
1200 fprintf (file, "\tmov 0,g14\n");
1201 i960_last_insn_type = I_TYPE_REG;
1206 ASM_OUTPUT_LABEL (file, name);
1207 i960_last_insn_type = I_TYPE_CTRL;
1211 /* Compute and return the frame size. */
1214 compute_frame_size (size)
1218 int outgoing_args_size = current_function_outgoing_args_size;
1220 /* The STARTING_FRAME_OFFSET is totally hidden to us as far
1221 as size is concerned. */
1222 actual_fsize = (size + 15) & -16;
1223 actual_fsize += (outgoing_args_size + 15) & -16;
1225 return actual_fsize;
1228 /* Here register group is range of registers which can be moved by
1229 one i960 instruction. */
1237 static int i960_form_reg_groups PARAMS ((int, int, int *, int, struct reg_group *));
1238 static int i960_reg_group_compare PARAMS ((const void *, const void *));
1239 static int i960_split_reg_group PARAMS ((struct reg_group *, int, int));
1240 static void i960_arg_size_and_align PARAMS ((enum machine_mode, tree, int *, int *));
1242 /* The following functions forms the biggest as possible register
1243 groups with registers in STATE. REGS contain states of the
1244 registers in range [start, finish_reg). The function returns the
1245 number of groups formed. */
1247 i960_form_reg_groups (start_reg, finish_reg, regs, state, reg_groups)
1252 struct reg_group *reg_groups;
1257 for (i = start_reg; i < finish_reg; )
1259 if (regs [i] != state)
1264 else if (i % 2 != 0 || regs [i + 1] != state)
1265 reg_groups [nw].length = 1;
1266 else if (i % 4 != 0 || regs [i + 2] != state)
1267 reg_groups [nw].length = 2;
1268 else if (regs [i + 3] != state)
1269 reg_groups [nw].length = 3;
1271 reg_groups [nw].length = 4;
1272 reg_groups [nw].start_reg = i;
1273 i += reg_groups [nw].length;
1279 /* We sort register winodws in descending order by length. */
1281 i960_reg_group_compare (group1, group2)
1285 const struct reg_group *w1 = group1;
1286 const struct reg_group *w2 = group2;
1288 if (w1->length > w2->length)
1290 else if (w1->length < w2->length)
1296 /* Split the first register group in REG_GROUPS on subgroups one of
1297 which will contain SUBGROUP_LENGTH registers. The function
1298 returns new number of winodws. */
1300 i960_split_reg_group (reg_groups, nw, subgroup_length)
1301 struct reg_group *reg_groups;
1303 int subgroup_length;
1305 if (subgroup_length < reg_groups->length - subgroup_length)
1306 /* This guarantees correct alignments of the two subgroups for
1307 i960 (see spliting for the group length 2, 3, 4). More
1308 generalized algorithm would require splitting the group more
1310 subgroup_length = reg_groups->length - subgroup_length;
1311 /* More generalized algorithm would require to try merging
1312 subgroups here. But in case i960 it always results in failure
1313 because of register group alignment. */
1314 reg_groups[nw].length = reg_groups->length - subgroup_length;
1315 reg_groups[nw].start_reg = reg_groups->start_reg + subgroup_length;
1317 reg_groups->length = subgroup_length;
1318 qsort (reg_groups, nw, sizeof (struct reg_group), i960_reg_group_compare);
1322 /* Output code for the function prologue. */
1325 i960_output_function_prologue (file, size)
1329 register int i, j, nr;
1330 int n_saved_regs = 0;
1331 int n_remaining_saved_regs;
1332 HOST_WIDE_INT lvar_size;
1333 HOST_WIDE_INT actual_fsize, offset;
1335 struct reg_group *g, *l;
1337 /* -1 if reg must be saved on proc entry, 0 if available, 1 if saved
1339 int regs[FIRST_PSEUDO_REGISTER];
1340 /* All global registers (which must be saved) divided by groups. */
1341 struct reg_group global_reg_groups [16];
1342 /* All local registers (which are available) divided by groups. */
1343 struct reg_group local_reg_groups [16];
1346 for (i = 0; i < FIRST_PSEUDO_REGISTER; i++)
1347 if (regs_ever_live[i]
1348 && ((! call_used_regs[i]) || (i > 7 && i < 12))
1349 /* No need to save the static chain pointer. */
1350 && ! (i == STATIC_CHAIN_REGNUM && current_function_needs_context))
1353 /* Count global registers that need saving. */
1360 n_remaining_saved_regs = n_saved_regs;
1362 epilogue_string[0] = '\0';
1364 if (current_function_profile)
1366 /* When profiling, we may use registers 20 to 27 to save arguments, so
1367 they can't be used here for saving globals. J is the number of
1368 argument registers the mcount call will save. */
1369 for (j = 7; j >= 0 && ! regs_ever_live[j]; j--)
1372 for (i = 20; i <= j + 20; i++)
1376 gnw = i960_form_reg_groups (0, 16, regs, -1, global_reg_groups);
1377 lnw = i960_form_reg_groups (19, 32, regs, 0, local_reg_groups);
1378 qsort (global_reg_groups, gnw, sizeof (struct reg_group),
1379 i960_reg_group_compare);
1380 qsort (local_reg_groups, lnw, sizeof (struct reg_group),
1381 i960_reg_group_compare);
1382 for (g = global_reg_groups, l = local_reg_groups; lnw != 0 && gnw != 0;)
1384 if (g->length == l->length)
1386 fprintf (file, "\tmov%s %s,%s\n",
1387 ((g->length == 4) ? "q" :
1388 (g->length == 3) ? "t" :
1389 (g->length == 2) ? "l" : ""),
1390 reg_names[(unsigned char) g->start_reg],
1391 reg_names[(unsigned char) l->start_reg]);
1392 sprintf (tmpstr, "\tmov%s %s,%s\n",
1393 ((g->length == 4) ? "q" :
1394 (g->length == 3) ? "t" :
1395 (g->length == 2) ? "l" : ""),
1396 reg_names[(unsigned char) l->start_reg],
1397 reg_names[(unsigned char) g->start_reg]);
1398 strcat (epilogue_string, tmpstr);
1399 n_remaining_saved_regs -= g->length;
1400 for (i = 0; i < g->length; i++)
1402 regs [i + g->start_reg] = 1;
1403 regs [i + l->start_reg] = -1;
1404 regs_ever_live [i + l->start_reg] = 1;
1411 else if (g->length > l->length)
1412 gnw = i960_split_reg_group (g, gnw, l->length);
1414 lnw = i960_split_reg_group (l, lnw, g->length);
1417 actual_fsize = compute_frame_size (size) + 4 * n_remaining_saved_regs;
1419 /* ??? The 1.2.1 compiler does this also. This is meant to round the frame
1420 size up to the nearest multiple of 16. I don't know whether this is
1421 necessary, or even desirable.
1423 The frame pointer must be aligned, but the call instruction takes care of
1424 that. If we leave the stack pointer unaligned, we may save a little on
1425 dynamic stack allocation. And we don't lose, at least according to the
1427 actual_fsize = (actual_fsize + 15) & ~0xF;
1430 /* Check stack limit if necessary. */
1431 if (current_function_limit_stack)
1433 rtx min_stack = stack_limit_rtx;
1434 if (actual_fsize != 0)
1435 min_stack = plus_constant (stack_limit_rtx, -actual_fsize);
1437 /* Now, emulate a little bit of reload. We want to turn 'min_stack'
1438 into an arith_operand. Use register 20 as the temporary. */
1439 if (legitimate_address_p (Pmode, min_stack, 1)
1440 && !arith_operand (min_stack, Pmode))
1442 rtx tmp = gen_rtx_MEM (Pmode, min_stack);
1443 fputs ("\tlda\t", file);
1444 i960_print_operand (file, tmp, 0);
1445 fputs (",r4\n", file);
1446 min_stack = gen_rtx_REG (Pmode, 20);
1448 if (arith_operand (min_stack, Pmode))
1450 fputs ("\tcmpo\tsp,", file);
1451 i960_print_operand (file, min_stack, 0);
1452 fputs ("\n\tfaultge.f\n", file);
1455 warning ("stack limit expression is not supported");
1458 /* Allocate space for register save and locals. */
1459 if (actual_fsize > 0)
1461 if (actual_fsize < 32)
1462 fprintf (file, "\taddo %d,sp,sp\n", actual_fsize);
1464 fprintf (file, "\tlda\t%d(sp),sp\n", actual_fsize);
1467 /* Take hardware register save area created by the call instruction
1468 into account, but store them before the argument block area. */
1469 lvar_size = actual_fsize - compute_frame_size (0) - n_remaining_saved_regs * 4;
1470 offset = STARTING_FRAME_OFFSET + lvar_size;
1471 /* Save registers on stack if needed. */
1472 /* ??? Is it worth to use the same algorithm as one for saving
1473 global registers in local registers? */
1474 for (i = 0, j = n_remaining_saved_regs; j > 0 && i < 16; i++)
1481 if (i <= 14 && i % 2 == 0 && regs[i+1] == -1 && offset % 2 == 0)
1484 if (nr == 2 && i <= 12 && i % 4 == 0 && regs[i+2] == -1
1488 if (nr == 3 && regs[i+3] == -1)
1491 fprintf (file,"\tst%s %s,%d(fp)\n",
1494 (nr == 2) ? "l" : ""),
1495 reg_names[i], offset);
1496 sprintf (tmpstr,"\tld%s %d(fp),%s\n",
1499 (nr == 2) ? "l" : ""),
1500 offset, reg_names[i]);
1501 strcat (epilogue_string, tmpstr);
1507 if (actual_fsize == 0)
1510 fprintf (file, "\t#Prologue stats:\n");
1511 fprintf (file, "\t# Total Frame Size: %d bytes\n", actual_fsize);
1514 fprintf (file, "\t# Local Variable Size: %d bytes\n", lvar_size);
1516 fprintf (file, "\t# Register Save Size: %d regs, %d bytes\n",
1517 n_saved_regs, n_saved_regs * 4);
1518 fprintf (file, "\t#End Prologue#\n");
1521 /* Output code for the function profiler. */
1524 output_function_profiler (file, labelno)
1528 /* The last used parameter register. */
1530 int i, j, increment;
1531 int varargs_stdarg_function
1532 = VARARGS_STDARG_FUNCTION (current_function_decl);
1534 /* Figure out the last used parameter register. The proper thing to do
1535 is to walk incoming args of the function. A function might have live
1536 parameter registers even if it has no incoming args. Note that we
1537 don't have to save parameter registers g8 to g11 because they are
1540 /* See also output_function_prologue, which tries to use local registers
1541 for preserved call-saved global registers. */
1543 for (last_parm_reg = 7;
1544 last_parm_reg >= 0 && ! regs_ever_live[last_parm_reg];
1548 /* Save parameter registers in regs r4 (20) to r11 (27). */
1550 for (i = 0, j = 4; i <= last_parm_reg; i += increment, j += increment)
1552 if (i % 4 == 0 && (last_parm_reg - i) >= 3)
1554 else if (i % 4 == 0 && (last_parm_reg - i) >= 2)
1556 else if (i % 2 == 0 && (last_parm_reg - i) >= 1)
1561 fprintf (file, "\tmov%s g%d,r%d\n",
1562 (increment == 4 ? "q" : increment == 3 ? "t"
1563 : increment == 2 ? "l": ""), i, j);
1566 /* If this function uses the arg pointer, then save it in r3 and then
1569 if (current_function_args_size != 0 || varargs_stdarg_function)
1570 fprintf (file, "\tmov g14,r3\n\tmov 0,g14\n");
1572 /* Load location address into g0 and call mcount. */
1574 fprintf (file, "\tlda\tLP%d,g0\n\tcallx\tmcount\n", labelno);
1576 /* If this function uses the arg pointer, restore it. */
1578 if (current_function_args_size != 0 || varargs_stdarg_function)
1579 fprintf (file, "\tmov r3,g14\n");
1581 /* Restore parameter registers. */
1583 for (i = 0, j = 4; i <= last_parm_reg; i += increment, j += increment)
1585 if (i % 4 == 0 && (last_parm_reg - i) >= 3)
1587 else if (i % 4 == 0 && (last_parm_reg - i) >= 2)
1589 else if (i % 2 == 0 && (last_parm_reg - i) >= 1)
1594 fprintf (file, "\tmov%s r%d,g%d\n",
1595 (increment == 4 ? "q" : increment == 3 ? "t"
1596 : increment == 2 ? "l": ""), j, i);
1600 /* Output code for the function epilogue. */
1603 i960_output_function_epilogue (file, size)
1605 HOST_WIDE_INT size ATTRIBUTE_UNUSED;
1607 if (i960_leaf_ret_reg >= 0)
1609 fprintf (file, "Li960R%d: ret\n", ret_label);
1613 if (*epilogue_string == 0)
1617 /* Emit a return insn, but only if control can fall through to here. */
1619 tmp = get_last_insn ();
1622 if (GET_CODE (tmp) == BARRIER)
1624 if (GET_CODE (tmp) == CODE_LABEL)
1626 if (GET_CODE (tmp) == JUMP_INSN)
1628 if (GET_CODE (PATTERN (tmp)) == RETURN)
1632 if (GET_CODE (tmp) == NOTE)
1634 tmp = PREV_INSN (tmp);
1639 fprintf (file, "Li960R%d: ret\n", ret_label);
1643 fprintf (file, "Li960R%d:\n", ret_label);
1645 fprintf (file, "\t#EPILOGUE#\n");
1647 /* Output the string created by the prologue which will restore all
1648 registers saved by the prologue. */
1650 if (epilogue_string[0] != '\0')
1651 fprintf (file, "%s", epilogue_string);
1653 /* Must clear g14 on return if this function set it.
1654 Only varargs/stdarg functions modify g14. */
1656 if (VARARGS_STDARG_FUNCTION (current_function_decl))
1657 fprintf (file, "\tmov 0,g14\n");
1659 fprintf (file, "\tret\n");
1660 fprintf (file, "\t#End Epilogue#\n");
1663 /* Output code for a call insn. */
1666 i960_output_call_insn (target, argsize_rtx, arg_pointer, insn)
1667 register rtx target, argsize_rtx, arg_pointer, insn;
1669 int argsize = INTVAL (argsize_rtx);
1670 rtx nexti = next_real_insn (insn);
1672 int varargs_stdarg_function
1673 = VARARGS_STDARG_FUNCTION (current_function_decl);
1675 operands[0] = target;
1676 operands[1] = arg_pointer;
1678 if (current_function_args_size != 0 || varargs_stdarg_function)
1679 output_asm_insn ("mov g14,r3", operands);
1682 output_asm_insn ("lda %a1,g14", operands);
1683 else if (current_function_args_size != 0 || varargs_stdarg_function)
1684 output_asm_insn ("mov 0,g14", operands);
1686 /* The code used to assume that calls to SYMBOL_REFs could not be more
1687 than 24 bits away (b vs bx, callj vs callx). This is not true. This
1688 feature is now implemented by relaxing in the GNU linker. It can convert
1689 bx to b if in range, and callx to calls/call/balx/bal as appropriate. */
1691 /* Nexti could be zero if the called routine is volatile. */
1692 if (optimize && (*epilogue_string == 0) && argsize == 0 && tail_call_ok
1693 && (nexti == 0 || GET_CODE (PATTERN (nexti)) == RETURN))
1695 /* Delete following return insn. */
1696 if (nexti && no_labels_between_p (insn, nexti))
1697 delete_insn (nexti);
1698 output_asm_insn ("bx %0", operands);
1699 return "# notreached";
1702 output_asm_insn ("callx %0", operands);
1704 /* If the caller sets g14 to the address of the argblock, then the caller
1705 must clear it after the return. */
1707 if (current_function_args_size != 0 || varargs_stdarg_function)
1708 output_asm_insn ("mov r3,g14", operands);
1709 else if (argsize > 48)
1710 output_asm_insn ("mov 0,g14", operands);
1715 /* Output code for a return insn. */
1718 i960_output_ret_insn (insn)
1721 static char lbuf[20];
1723 if (*epilogue_string != 0)
1725 if (! TARGET_CODE_ALIGN && next_real_insn (insn) == 0)
1728 sprintf (lbuf, "b Li960R%d", ret_label);
1732 /* Must clear g14 on return if this function set it.
1733 Only varargs/stdarg functions modify g14. */
1735 if (VARARGS_STDARG_FUNCTION (current_function_decl))
1736 output_asm_insn ("mov 0,g14", 0);
1738 if (i960_leaf_ret_reg >= 0)
1740 sprintf (lbuf, "bx (%s)", reg_names[i960_leaf_ret_reg]);
1746 /* Print the operand represented by rtx X formatted by code CODE. */
1749 i960_print_operand (file, x, code)
1754 enum rtx_code rtxcode = x ? GET_CODE (x) : NIL;
1761 /* Second reg of a double or quad. */
1762 fprintf (file, "%s", reg_names[REGNO (x)+1]);
1766 /* Third reg of a quad. */
1767 fprintf (file, "%s", reg_names[REGNO (x)+2]);
1771 /* Fourth reg of a quad. */
1772 fprintf (file, "%s", reg_names[REGNO (x)+3]);
1776 fprintf (file, "%s", reg_names[REGNO (x)]);
1784 else if (rtxcode == MEM)
1786 output_address (XEXP (x, 0));
1789 else if (rtxcode == CONST_INT)
1791 HOST_WIDE_INT val = INTVAL (x);
1794 if (val > 9999 || val < -999)
1795 fprintf (file, "0x%x", val);
1797 fprintf (file, "%d", val);
1800 else if (rtxcode == CONST_DOUBLE)
1804 if (x == CONST0_RTX (GET_MODE (x)))
1806 fprintf (file, "0f0.0");
1809 else if (x == CONST1_RTX (GET_MODE (x)))
1811 fprintf (file, "0f1.0");
1815 real_to_decimal (dstr, CONST_DOUBLE_REAL_VALUE (x), sizeof (dstr), 0, 1);
1816 fprintf (file, "0f%s", dstr);
1823 /* Branch or jump, depending on assembler. */
1824 if (TARGET_ASM_COMPAT)
1831 /* Sign of condition. */
1832 if ((rtxcode == EQ) || (rtxcode == NE) || (rtxcode == GTU)
1833 || (rtxcode == LTU) || (rtxcode == GEU) || (rtxcode == LEU))
1835 else if ((rtxcode == GT) || (rtxcode == LT)
1836 || (rtxcode == GE) || (rtxcode == LE))
1843 /* Inverted condition. */
1844 rtxcode = reverse_condition (rtxcode);
1848 /* Inverted condition w/ reversed operands. */
1849 rtxcode = reverse_condition (rtxcode);
1853 /* Reversed operand condition. */
1854 rtxcode = swap_condition (rtxcode);
1858 /* Normal condition. */
1860 if (rtxcode == EQ) { fputs ("e", file); return; }
1861 else if (rtxcode == NE) { fputs ("ne", file); return; }
1862 else if (rtxcode == GT) { fputs ("g", file); return; }
1863 else if (rtxcode == GTU) { fputs ("g", file); return; }
1864 else if (rtxcode == LT) { fputs ("l", file); return; }
1865 else if (rtxcode == LTU) { fputs ("l", file); return; }
1866 else if (rtxcode == GE) { fputs ("ge", file); return; }
1867 else if (rtxcode == GEU) { fputs ("ge", file); return; }
1868 else if (rtxcode == LE) { fputs ("le", file); return; }
1869 else if (rtxcode == LEU) { fputs ("le", file); return; }
1874 /* For conditional branches, substitute ".t" or ".f". */
1875 if (TARGET_BRANCH_PREDICT)
1877 x = find_reg_note (current_output_insn, REG_BR_PROB, 0);
1880 int pred_val = INTVAL (XEXP (x, 0));
1881 fputs ((pred_val < REG_BR_PROB_BASE / 2 ? ".f" : ".t"), file);
1887 output_addr_const (file, x);
1897 /* Print a memory address as an operand to reference that memory location.
1899 This is exactly the same as legitimate_address_p, except that it the prints
1900 addresses instead of recognizing them. */
1903 i960_print_operand_addr (file, addr)
1915 if (GET_CODE (addr) == REG)
1917 else if (CONSTANT_P (addr))
1919 else if (GET_CODE (addr) == PLUS)
1923 op0 = XEXP (addr, 0);
1924 op1 = XEXP (addr, 1);
1926 if (GET_CODE (op0) == REG)
1929 if (GET_CODE (op1) == REG)
1931 else if (CONSTANT_P (op1))
1936 else if (GET_CODE (op0) == PLUS)
1938 if (GET_CODE (XEXP (op0, 0)) == MULT)
1940 ireg = XEXP (XEXP (op0, 0), 0);
1941 scale = XEXP (XEXP (op0, 0), 1);
1942 if (GET_CODE (XEXP (op0, 1)) == REG)
1944 breg = XEXP (op0, 1);
1950 else if (GET_CODE (XEXP (op0, 0)) == REG)
1952 breg = XEXP (op0, 0);
1953 if (GET_CODE (XEXP (op0, 1)) == REG)
1955 ireg = XEXP (op0, 1);
1964 else if (GET_CODE (op0) == MULT)
1966 ireg = XEXP (op0, 0);
1967 scale = XEXP (op0, 1);
1968 if (GET_CODE (op1) == REG)
1970 else if (CONSTANT_P (op1))
1978 else if (GET_CODE (addr) == MULT)
1980 ireg = XEXP (addr, 0);
1981 scale = XEXP (addr, 1);
1987 output_addr_const (file, offset);
1989 fprintf (file, "(%s)", reg_names[REGNO (breg)]);
1991 fprintf (file, "[%s*%d]", reg_names[REGNO (ireg)], INTVAL (scale));
1994 /* GO_IF_LEGITIMATE_ADDRESS recognizes an RTL expression
1995 that is a valid memory address for an instruction.
1996 The MODE argument is the machine mode for the MEM expression
1997 that wants to use this address.
1999 On 80960, legitimate addresses are:
2001 disp (12 or 32 bit) ld foo,r0
2002 base + index ld (g0)[g1*1],r0
2003 base + displ ld 0xf00(g0),r0
2004 base + index*scale + displ ld 0xf00(g0)[g1*4],r0
2005 index*scale + base ld (g0)[g1*4],r0
2006 index*scale + displ ld 0xf00[g1*4],r0
2007 index*scale ld [g1*4],r0
2008 index + base + displ ld 0xf00(g0)[g1*1],r0
2010 In each case, scale can be 1, 2, 4, 8, or 16. */
2012 /* This is exactly the same as i960_print_operand_addr, except that
2013 it recognizes addresses instead of printing them.
2015 It only recognizes address in canonical form. LEGITIMIZE_ADDRESS should
2016 convert common non-canonical forms to canonical form so that they will
2019 /* These two macros allow us to accept either a REG or a SUBREG anyplace
2020 where a register is valid. */
2022 #define RTX_OK_FOR_BASE_P(X, STRICT) \
2023 ((GET_CODE (X) == REG \
2024 && (STRICT ? REG_OK_FOR_BASE_P_STRICT (X) : REG_OK_FOR_BASE_P (X))) \
2025 || (GET_CODE (X) == SUBREG \
2026 && GET_CODE (SUBREG_REG (X)) == REG \
2027 && (STRICT ? REG_OK_FOR_BASE_P_STRICT (SUBREG_REG (X)) \
2028 : REG_OK_FOR_BASE_P (SUBREG_REG (X)))))
2030 #define RTX_OK_FOR_INDEX_P(X, STRICT) \
2031 ((GET_CODE (X) == REG \
2032 && (STRICT ? REG_OK_FOR_INDEX_P_STRICT (X) : REG_OK_FOR_INDEX_P (X)))\
2033 || (GET_CODE (X) == SUBREG \
2034 && GET_CODE (SUBREG_REG (X)) == REG \
2035 && (STRICT ? REG_OK_FOR_INDEX_P_STRICT (SUBREG_REG (X)) \
2036 : REG_OK_FOR_INDEX_P (SUBREG_REG (X)))))
2039 legitimate_address_p (mode, addr, strict)
2040 enum machine_mode mode ATTRIBUTE_UNUSED;
2044 if (RTX_OK_FOR_BASE_P (addr, strict))
2046 else if (CONSTANT_P (addr))
2048 else if (GET_CODE (addr) == PLUS)
2052 if (! TARGET_COMPLEX_ADDR && ! reload_completed)
2055 op0 = XEXP (addr, 0);
2056 op1 = XEXP (addr, 1);
2058 if (RTX_OK_FOR_BASE_P (op0, strict))
2060 if (RTX_OK_FOR_INDEX_P (op1, strict))
2062 else if (CONSTANT_P (op1))
2067 else if (GET_CODE (op0) == PLUS)
2069 if (GET_CODE (XEXP (op0, 0)) == MULT)
2071 if (! (RTX_OK_FOR_INDEX_P (XEXP (XEXP (op0, 0), 0), strict)
2072 && SCALE_TERM_P (XEXP (XEXP (op0, 0), 1))))
2075 if (RTX_OK_FOR_BASE_P (XEXP (op0, 1), strict)
2076 && CONSTANT_P (op1))
2081 else if (RTX_OK_FOR_BASE_P (XEXP (op0, 0), strict))
2083 if (RTX_OK_FOR_INDEX_P (XEXP (op0, 1), strict)
2084 && CONSTANT_P (op1))
2092 else if (GET_CODE (op0) == MULT)
2094 if (! (RTX_OK_FOR_INDEX_P (XEXP (op0, 0), strict)
2095 && SCALE_TERM_P (XEXP (op0, 1))))
2098 if (RTX_OK_FOR_BASE_P (op1, strict))
2100 else if (CONSTANT_P (op1))
2108 else if (GET_CODE (addr) == MULT)
2110 if (! TARGET_COMPLEX_ADDR && ! reload_completed)
2113 return (RTX_OK_FOR_INDEX_P (XEXP (addr, 0), strict)
2114 && SCALE_TERM_P (XEXP (addr, 1)));
2120 /* Try machine-dependent ways of modifying an illegitimate address
2121 to be legitimate. If we find one, return the new, valid address.
2122 This macro is used in only one place: `memory_address' in explow.c.
2124 This converts some non-canonical addresses to canonical form so they
2125 can be recognized. */
2128 legitimize_address (x, oldx, mode)
2130 register rtx oldx ATTRIBUTE_UNUSED;
2131 enum machine_mode mode ATTRIBUTE_UNUSED;
2133 if (GET_CODE (x) == SYMBOL_REF)
2136 x = copy_to_reg (x);
2139 if (! TARGET_COMPLEX_ADDR && ! reload_completed)
2142 /* Canonicalize (plus (mult (reg) (const)) (plus (reg) (const)))
2143 into (plus (plus (mult (reg) (const)) (reg)) (const)). This can be
2144 created by virtual register instantiation, register elimination, and
2145 similar optimizations. */
2146 if (GET_CODE (x) == PLUS && GET_CODE (XEXP (x, 0)) == MULT
2147 && GET_CODE (XEXP (x, 1)) == PLUS)
2148 x = gen_rtx_PLUS (Pmode,
2149 gen_rtx_PLUS (Pmode, XEXP (x, 0), XEXP (XEXP (x, 1), 0)),
2150 XEXP (XEXP (x, 1), 1));
2152 /* Canonicalize (plus (plus (mult (reg) (const)) (plus (reg) (const))) const)
2153 into (plus (plus (mult (reg) (const)) (reg)) (const)). */
2154 else if (GET_CODE (x) == PLUS && GET_CODE (XEXP (x, 0)) == PLUS
2155 && GET_CODE (XEXP (XEXP (x, 0), 0)) == MULT
2156 && GET_CODE (XEXP (XEXP (x, 0), 1)) == PLUS
2157 && CONSTANT_P (XEXP (x, 1)))
2159 rtx constant, other;
2161 if (GET_CODE (XEXP (x, 1)) == CONST_INT)
2163 constant = XEXP (x, 1);
2164 other = XEXP (XEXP (XEXP (x, 0), 1), 1);
2166 else if (GET_CODE (XEXP (XEXP (XEXP (x, 0), 1), 1)) == CONST_INT)
2168 constant = XEXP (XEXP (XEXP (x, 0), 1), 1);
2169 other = XEXP (x, 1);
2172 constant = 0, other = 0;
2175 x = gen_rtx_PLUS (Pmode,
2176 gen_rtx_PLUS (Pmode, XEXP (XEXP (x, 0), 0),
2177 XEXP (XEXP (XEXP (x, 0), 1), 0)),
2178 plus_constant (other, INTVAL (constant)));
2185 /* Return the most stringent alignment that we are willing to consider
2186 objects of size SIZE and known alignment ALIGN as having. */
2189 i960_alignment (size, align)
2195 if (! TARGET_STRICT_ALIGN)
2196 if (TARGET_IC_COMPAT2_0 || align >= 4)
2198 i = i960_object_bytes_bitalign (size) / BITS_PER_UNIT;
2209 hard_regno_mode_ok (regno, mode)
2211 enum machine_mode mode;
2217 case CCmode: case CC_UNSmode: case CC_CHKmode:
2220 case DImode: case DFmode:
2221 return (regno & 1) == 0;
2223 case TImode: case TFmode:
2224 return (regno & 3) == 0;
2230 else if (regno >= 32 && regno < 36)
2234 case SFmode: case DFmode: case TFmode:
2235 case SCmode: case DCmode:
2242 else if (regno == 36)
2246 case CCmode: case CC_UNSmode: case CC_CHKmode:
2253 else if (regno == 37)
2260 /* Return the minimum alignment of an expression rtx X in bytes. This takes
2261 advantage of machine specific facts, such as knowing that the frame pointer
2262 is always 16 byte aligned. */
2265 i960_expr_alignment (x, size)
2274 switch (GET_CODE(x))
2279 if ((align & 0xf) == 0)
2281 else if ((align & 0x7) == 0)
2283 else if ((align & 0x3) == 0)
2285 else if ((align & 0x1) == 0)
2292 align = MIN (i960_expr_alignment (XEXP (x, 0), size),
2293 i960_expr_alignment (XEXP (x, 1), size));
2297 /* If this is a valid program, objects are guaranteed to be
2298 correctly aligned for whatever size the reference actually is. */
2299 align = i960_object_bytes_bitalign (size) / BITS_PER_UNIT;
2303 if (REGNO (x) == FRAME_POINTER_REGNUM)
2308 align = i960_expr_alignment (XEXP (x, 0), size);
2310 if (GET_CODE (XEXP (x, 1)) == CONST_INT)
2312 align = align << INTVAL (XEXP (x, 1));
2313 align = MIN (align, 16);
2318 align = (i960_expr_alignment (XEXP (x, 0), size) *
2319 i960_expr_alignment (XEXP (x, 1), size));
2321 align = MIN (align, 16);
2330 /* Return true if it is possible to reference both BASE and OFFSET, which
2331 have alignment at least as great as 4 byte, as if they had alignment valid
2332 for an object of size SIZE. */
2335 i960_improve_align (base, offset, size)
2342 /* We have at least a word reference to the object, so we know it has to
2343 be aligned at least to 4 bytes. */
2345 i = MIN (i960_expr_alignment (base, 4),
2346 i960_expr_alignment (offset, 4));
2350 /* We know the size of the request. If strict align is not enabled, we
2351 can guess that the alignment is OK for the requested size. */
2353 if (! TARGET_STRICT_ALIGN)
2354 if ((j = (i960_object_bytes_bitalign (size) / BITS_PER_UNIT)) > i)
2360 /* Return true if it is possible to access BASE and OFFSET, which have 4 byte
2361 (SImode) alignment as if they had 16 byte (TImode) alignment. */
2364 i960_si_ti (base, offset)
2368 return i960_improve_align (base, offset, 16);
2371 /* Return true if it is possible to access BASE and OFFSET, which have 4 byte
2372 (SImode) alignment as if they had 8 byte (DImode) alignment. */
2375 i960_si_di (base, offset)
2379 return i960_improve_align (base, offset, 8);
2382 /* Return raw values of size and alignment (in words) for the data
2383 type being accessed. These values will be rounded by the caller. */
2386 i960_arg_size_and_align (mode, type, size_out, align_out)
2387 enum machine_mode mode;
2394 /* Use formal alignment requirements of type being passed, except make
2395 it at least a word. If we don't have a type, this is a library call,
2396 and the parm has to be of scalar type. In this case, consider its
2397 formal alignment requirement to be its size in words. */
2399 if (mode == BLKmode)
2400 size = (int_size_in_bytes (type) + UNITS_PER_WORD - 1) / UNITS_PER_WORD;
2401 else if (mode == VOIDmode)
2403 /* End of parm list. */
2404 if (type == 0 || TYPE_MODE (type) != VOIDmode)
2409 size = (GET_MODE_SIZE (mode) + UNITS_PER_WORD - 1) / UNITS_PER_WORD;
2413 else if (TYPE_ALIGN (type) >= BITS_PER_WORD)
2414 align = TYPE_ALIGN (type) / BITS_PER_WORD;
2422 /* On the 80960 the first 12 args are in registers and the rest are pushed.
2423 Any arg that is bigger than 4 words is placed on the stack and all
2424 subsequent arguments are placed on the stack.
2426 Additionally, parameters with an alignment requirement stronger than
2427 a word must be aligned appropriately. Note that this means that a
2428 64 bit object with a 32 bit alignment is not 64 bit aligned and may be
2429 passed in an odd/even register pair. */
2431 /* Update CUM to advance past an argument described by MODE and TYPE. */
2434 i960_function_arg_advance (cum, mode, type, named)
2435 CUMULATIVE_ARGS *cum;
2436 enum machine_mode mode;
2438 int named ATTRIBUTE_UNUSED;
2442 i960_arg_size_and_align (mode, type, &size, &align);
2444 if (size > 4 || cum->ca_nstackparms != 0
2445 || (size + ROUND_PARM (cum->ca_nregparms, align)) > NPARM_REGS
2446 || MUST_PASS_IN_STACK (mode, type))
2448 /* Indicate that all the registers are in use, even if all are not,
2449 so va_start will compute the right value. */
2450 cum->ca_nregparms = NPARM_REGS;
2451 cum->ca_nstackparms = ROUND_PARM (cum->ca_nstackparms, align) + size;
2454 cum->ca_nregparms = ROUND_PARM (cum->ca_nregparms, align) + size;
2457 /* Return the register that the argument described by MODE and TYPE is
2458 passed in, or else return 0 if it is passed on the stack. */
2461 i960_function_arg (cum, mode, type, named)
2462 CUMULATIVE_ARGS *cum;
2463 enum machine_mode mode;
2465 int named ATTRIBUTE_UNUSED;
2470 if (mode == VOIDmode)
2473 i960_arg_size_and_align (mode, type, &size, &align);
2475 if (size > 4 || cum->ca_nstackparms != 0
2476 || (size + ROUND_PARM (cum->ca_nregparms, align)) > NPARM_REGS
2477 || MUST_PASS_IN_STACK (mode, type))
2479 cum->ca_nstackparms = ROUND_PARM (cum->ca_nstackparms, align);
2484 cum->ca_nregparms = ROUND_PARM (cum->ca_nregparms, align);
2485 ret = gen_rtx_REG (mode, cum->ca_nregparms);
2491 /* Return the number of bits that an object of size N bytes is aligned to. */
2494 i960_object_bytes_bitalign (n)
2498 else if (n > 4) n = 64;
2499 else if (n > 2) n = 32;
2500 else if (n > 1) n = 16;
2506 /* Compute the alignment for an aggregate type TSIZE.
2507 Alignment is MAX (greatest member alignment,
2508 MIN (pragma align, structure size alignment)). */
2511 i960_round_align (align, type)
2518 if (TARGET_OLD_ALIGN || TYPE_PACKED (type))
2520 if (TREE_CODE (type) != RECORD_TYPE)
2522 tsize = TYPE_SIZE (type);
2524 if (! tsize || TREE_CODE (tsize) != INTEGER_CST)
2527 new_align = i960_object_bytes_bitalign (TREE_INT_CST_LOW (tsize)
2529 /* Handle #pragma align. */
2530 if (new_align > i960_maxbitalignment)
2531 new_align = i960_maxbitalignment;
2533 if (align < new_align)
2539 /* Do any needed setup for a varargs function. For the i960, we must
2540 create a register parameter block if one doesn't exist, and then copy
2541 all register parameters to memory. */
2544 i960_setup_incoming_varargs (cum, mode, type, pretend_size, no_rtl)
2545 CUMULATIVE_ARGS *cum;
2546 enum machine_mode mode ATTRIBUTE_UNUSED;
2547 tree type ATTRIBUTE_UNUSED;
2548 int *pretend_size ATTRIBUTE_UNUSED;
2551 /* Note: for a varargs fn with only a va_alist argument, this is 0. */
2552 int first_reg = cum->ca_nregparms;
2554 /* Copy only unnamed register arguments to memory. If there are
2555 any stack parms, there are no unnamed arguments in registers, and
2556 an argument block was already allocated by the caller.
2557 Remember that any arg bigger than 4 words is passed on the stack as
2558 are all subsequent args.
2560 If there are no stack arguments but there are exactly NPARM_REGS
2561 registers, either there were no extra arguments or the caller
2562 allocated an argument block. */
2564 if (cum->ca_nstackparms == 0 && first_reg < NPARM_REGS && !no_rtl)
2566 rtx label = gen_label_rtx ();
2567 rtx regblock, fake_arg_pointer_rtx;
2569 /* Use a different rtx than arg_pointer_rtx so that cse and friends
2570 can go on believing that the argument pointer can never be zero. */
2571 fake_arg_pointer_rtx = gen_raw_REG (Pmode, ARG_POINTER_REGNUM);
2573 /* If the argument pointer is 0, no arguments were passed on the stack
2574 and we need to allocate a chunk to save the registers (if any
2575 arguments were passed on the stack the caller would allocate the
2576 48 bytes as well). We must allocate all 48 bytes (12*4) because
2577 va_start assumes it. */
2578 emit_insn (gen_cmpsi (fake_arg_pointer_rtx, const0_rtx));
2579 emit_jump_insn (gen_bne (label));
2580 emit_insn (gen_rtx_SET (VOIDmode, fake_arg_pointer_rtx,
2581 stack_pointer_rtx));
2582 emit_insn (gen_rtx_SET (VOIDmode, stack_pointer_rtx,
2583 memory_address (SImode,
2584 plus_constant (stack_pointer_rtx,
2588 /* ??? Note that we unnecessarily store one extra register for stdarg
2589 fns. We could optimize this, but it's kept as for now. */
2590 regblock = gen_rtx_MEM (BLKmode,
2591 plus_constant (arg_pointer_rtx, first_reg * 4));
2592 set_mem_alias_set (regblock, get_varargs_alias_set ());
2593 set_mem_align (regblock, BITS_PER_WORD);
2594 move_block_from_reg (first_reg, regblock,
2595 NPARM_REGS - first_reg,
2596 (NPARM_REGS - first_reg) * UNITS_PER_WORD);
2600 /* Define the `__builtin_va_list' type for the ABI. */
2603 i960_build_va_list ()
2605 return build_array_type (unsigned_type_node,
2606 build_index_type (size_one_node));
2609 /* Implement `va_start' for varargs and stdarg. */
2612 i960_va_start (valist, nextarg)
2614 rtx nextarg ATTRIBUTE_UNUSED;
2616 tree s, t, base, num;
2617 rtx fake_arg_pointer_rtx;
2619 /* The array type always decays to a pointer before we get here, so we
2620 can't use ARRAY_REF. */
2621 base = build1 (INDIRECT_REF, unsigned_type_node, valist);
2622 num = build1 (INDIRECT_REF, unsigned_type_node,
2623 build (PLUS_EXPR, unsigned_type_node, valist,
2624 TYPE_SIZE_UNIT (TREE_TYPE (valist))));
2626 /* Use a different rtx than arg_pointer_rtx so that cse and friends
2627 can go on believing that the argument pointer can never be zero. */
2628 fake_arg_pointer_rtx = gen_raw_REG (Pmode, ARG_POINTER_REGNUM);
2629 s = make_tree (unsigned_type_node, fake_arg_pointer_rtx);
2630 t = build (MODIFY_EXPR, unsigned_type_node, base, s);
2631 TREE_SIDE_EFFECTS (t) = 1;
2632 expand_expr (t, const0_rtx, VOIDmode, EXPAND_NORMAL);
2634 s = build_int_2 ((current_function_args_info.ca_nregparms
2635 + current_function_args_info.ca_nstackparms) * 4, 0);
2636 t = build (MODIFY_EXPR, unsigned_type_node, num, s);
2637 TREE_SIDE_EFFECTS (t) = 1;
2638 expand_expr (t, const0_rtx, VOIDmode, EXPAND_NORMAL);
2641 /* Implement `va_arg'. */
2644 i960_va_arg (valist, type)
2647 HOST_WIDE_INT siz, ali;
2648 tree base, num, pad, next, this, t1, t2, int48;
2651 /* The array type always decays to a pointer before we get here, so we
2652 can't use ARRAY_REF. */
2653 base = build1 (INDIRECT_REF, unsigned_type_node, valist);
2654 num = build1 (INDIRECT_REF, unsigned_type_node,
2655 build (PLUS_EXPR, unsigned_type_node, valist,
2656 TYPE_SIZE_UNIT (TREE_TYPE (valist))));
2658 /* Round up sizeof(type) to a word. */
2659 siz = (int_size_in_bytes (type) + UNITS_PER_WORD - 1) & -UNITS_PER_WORD;
2661 /* Round up alignment to a word. */
2662 ali = TYPE_ALIGN (type);
2663 if (ali < BITS_PER_WORD)
2664 ali = BITS_PER_WORD;
2665 ali /= BITS_PER_UNIT;
2667 /* Align NUM appropriate for the argument. */
2668 pad = fold (build (PLUS_EXPR, unsigned_type_node, num,
2669 build_int_2 (ali - 1, 0)));
2670 pad = fold (build (BIT_AND_EXPR, unsigned_type_node, pad,
2671 build_int_2 (-ali, -1)));
2672 pad = save_expr (pad);
2674 /* Increment VPAD past this argument. */
2675 next = fold (build (PLUS_EXPR, unsigned_type_node, pad,
2676 build_int_2 (siz, 0)));
2677 next = save_expr (next);
2679 /* Find the offset for the current argument. Mind peculiar overflow
2680 from registers to stack. */
2681 int48 = build_int_2 (48, 0);
2683 t2 = integer_one_node;
2685 t2 = fold (build (GT_EXPR, integer_type_node, next, int48));
2686 t1 = fold (build (LE_EXPR, integer_type_node, num, int48));
2687 t1 = fold (build (TRUTH_AND_EXPR, integer_type_node, t1, t2));
2688 this = fold (build (COND_EXPR, unsigned_type_node, t1, int48, pad));
2690 /* Find the address for the current argument. */
2691 t1 = fold (build (PLUS_EXPR, unsigned_type_node, base, this));
2692 t1 = build1 (NOP_EXPR, ptr_type_node, t1);
2693 addr_rtx = expand_expr (t1, NULL_RTX, Pmode, EXPAND_NORMAL);
2695 /* Increment NUM. */
2696 t1 = build (MODIFY_EXPR, unsigned_type_node, num, next);
2697 TREE_SIDE_EFFECTS (t1) = 1;
2698 expand_expr (t1, const0_rtx, VOIDmode, EXPAND_NORMAL);
2703 /* Calculate the final size of the reg parm stack space for the current
2704 function, based on how many bytes would be allocated on the stack. */
2707 i960_final_reg_parm_stack_space (const_size, var_size)
2711 if (var_size || const_size > 48)
2717 /* Calculate the size of the reg parm stack space. This is a bit complicated
2721 i960_reg_parm_stack_space (fndecl)
2724 /* In this case, we are called from emit_library_call, and we don't need
2725 to pretend we have more space for parameters than what's apparent. */
2729 /* In this case, we are called from locate_and_pad_parms when we're
2730 not IN_REGS, so we have an arg block. */
2731 if (fndecl != current_function_decl)
2734 /* Otherwise, we have an arg block if the current function has more than
2735 48 bytes of parameters. */
2736 if (current_function_args_size != 0 || VARARGS_STDARG_FUNCTION (fndecl))
2742 /* Return the register class of a scratch register needed to copy IN into
2743 or out of a register in CLASS in MODE. If it can be done directly,
2744 NO_REGS is returned. */
2747 secondary_reload_class (class, mode, in)
2748 enum reg_class class;
2749 enum machine_mode mode;
2754 if (GET_CODE (in) == REG || GET_CODE (in) == SUBREG)
2755 regno = true_regnum (in);
2757 /* We can place anything into LOCAL_OR_GLOBAL_REGS and can put
2758 LOCAL_OR_GLOBAL_REGS into anything. */
2759 if (class == LOCAL_OR_GLOBAL_REGS || class == LOCAL_REGS
2760 || class == GLOBAL_REGS || (regno >= 0 && regno < 32))
2763 /* We can place any hard register, 0.0, and 1.0 into FP_REGS. */
2764 if (class == FP_REGS
2765 && ((regno >= 0 && regno < FIRST_PSEUDO_REGISTER)
2766 || in == CONST0_RTX (mode) || in == CONST1_RTX (mode)))
2769 return LOCAL_OR_GLOBAL_REGS;
2772 /* Look at the opcode P, and set i96_last_insn_type to indicate which
2773 function unit it executed on. */
2775 /* ??? This would make more sense as an attribute. */
2778 i960_scan_opcode (p)
2790 /* Ret is not actually of type REG, but it won't matter, because no
2791 insn will ever follow it. */
2794 i960_last_insn_type = I_TYPE_REG;
2798 if (p[1] == 'x' || p[3] == 'x')
2799 i960_last_insn_type = I_TYPE_MEM;
2800 i960_last_insn_type = I_TYPE_CTRL;
2805 i960_last_insn_type = I_TYPE_CTRL;
2812 i960_last_insn_type = I_TYPE_MEM;
2814 i960_last_insn_type = I_TYPE_CTRL;
2816 else if (p[1] == 'm')
2819 i960_last_insn_type = I_TYPE_REG;
2820 else if (p[4] == 'b' || p[4] == 'j')
2821 i960_last_insn_type = I_TYPE_CTRL;
2823 i960_last_insn_type = I_TYPE_REG;
2826 i960_last_insn_type = I_TYPE_REG;
2830 i960_last_insn_type = I_TYPE_MEM;
2835 i960_last_insn_type = I_TYPE_MEM;
2837 i960_last_insn_type = I_TYPE_REG;
2843 i960_output_mi_thunk (file, thunk, delta, vcall_offset, function)
2845 tree thunk ATTRIBUTE_UNUSED;
2846 HOST_WIDE_INT delta;
2847 HOST_WIDE_INT vcall_offset ATTRIBUTE_UNUSED;
2851 if (d < 0 && d > -32)
2852 fprintf (file, "\tsubo %d,g0,g0\n", -d);
2853 else if (d > 0 && d < 32)
2854 fprintf (file, "\taddo %d,g0,g0\n", d);
2857 fprintf (file, "\tldconst %d,r5\n", d);
2858 fprintf (file, "\taddo r5,g0,g0\n");
2860 fprintf (file, "\tbx ");
2861 assemble_name (file, XSTR (XEXP (DECL_RTL (function), 0), 0));
2862 fprintf (file, "\n");
2866 i960_rtx_costs (x, code, outer_code, total)
2868 int code, outer_code;
2873 /* Constants that can be (non-ldconst) insn operands are cost 0.
2874 Constants that can be non-ldconst operands in rare cases are cost 1.
2875 Other constants have higher costs.
2877 Must check for OUTER_CODE of SET for power2_operand, because
2878 reload_cse_move2add calls us with OUTER_CODE of PLUS to decide
2879 when to replace set with add. */
2882 if ((INTVAL (x) >= 0 && INTVAL (x) < 32)
2883 || (outer_code == SET && power2_operand (x, VOIDmode)))
2888 else if (INTVAL (x) >= -31 && INTVAL (x) < 0)
2898 *total = (TARGET_C_SERIES ? 6 : 8);
2902 if (x == CONST0_RTX (DFmode) || x == CONST0_RTX (SFmode)
2903 || x == CONST1_RTX (DFmode) || x == CONST1_RTX (SFmode))