1 /* Subroutines used for code generation on intel 80960.
2 Copyright (C) 1992, 1995, 1996, 1997, 1998, 1999, 2000, 2001
3 Free Software Foundation, Inc.
4 Contributed by Steven McGeady, Intel Corp.
5 Additional Work by Glenn Colon-Bonet, Jonathan Shapiro, Andy Wilson
6 Converted to GCC 2.0 by Jim Wilson and Michael Tiemann, Cygnus Support.
8 This file is part of GNU CC.
10 GNU CC is free software; you can redistribute it and/or modify
11 it under the terms of the GNU General Public License as published by
12 the Free Software Foundation; either version 2, or (at your option)
15 GNU CC is distributed in the hope that it will be useful,
16 but WITHOUT ANY WARRANTY; without even the implied warranty of
17 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
18 GNU General Public License for more details.
20 You should have received a copy of the GNU General Public License
21 along with GNU CC; see the file COPYING. If not, write to
22 the Free Software Foundation, 59 Temple Place - Suite 330,
23 Boston, MA 02111-1307, USA. */
27 #include "coretypes.h"
32 #include "hard-reg-set.h"
34 #include "insn-config.h"
35 #include "conditions.h"
37 #include "insn-attr.h"
47 #include "target-def.h"
49 static void i960_output_function_prologue PARAMS ((FILE *, HOST_WIDE_INT));
50 static void i960_output_function_epilogue PARAMS ((FILE *, HOST_WIDE_INT));
51 static void i960_output_mi_thunk PARAMS ((FILE *, tree, HOST_WIDE_INT,
52 HOST_WIDE_INT, tree));
53 static bool i960_rtx_costs PARAMS ((rtx, int, int, int *));
55 /* Save the operands last given to a compare for use when we
56 generate a scc or bcc insn. */
58 rtx i960_compare_op0, i960_compare_op1;
60 /* Used to implement #pragma align/noalign. Initialized by OVERRIDE_OPTIONS
63 int i960_maxbitalignment;
64 int i960_last_maxbitalignment;
66 /* Used to implement switching between MEM and ALU insn types, for better
67 C series performance. */
69 enum insn_types i960_last_insn_type;
71 /* The leaf-procedure return register. Set only if this is a leaf routine. */
73 static int i960_leaf_ret_reg;
75 /* True if replacing tail calls with jumps is OK. */
77 static int tail_call_ok;
79 /* A string containing a list of insns to emit in the epilogue so as to
80 restore all registers saved by the prologue. Created by the prologue
81 code as it saves registers away. */
83 char epilogue_string[1000];
85 /* A unique number (per function) for return labels. */
87 static int ret_label = 0;
89 /* This is true if FNDECL is either a varargs or a stdarg function.
90 This is used to help identify functions that use an argument block. */
92 #define VARARGS_STDARG_FUNCTION(FNDECL) \
93 (TYPE_ARG_TYPES (TREE_TYPE (FNDECL)) != 0 \
94 && (TREE_VALUE (tree_last (TYPE_ARG_TYPES (TREE_TYPE (FNDECL))))) \
97 /* Initialize the GCC target structure. */
98 #undef TARGET_ASM_ALIGNED_SI_OP
99 #define TARGET_ASM_ALIGNED_SI_OP "\t.word\t"
101 #undef TARGET_ASM_FUNCTION_PROLOGUE
102 #define TARGET_ASM_FUNCTION_PROLOGUE i960_output_function_prologue
103 #undef TARGET_ASM_FUNCTION_EPILOGUE
104 #define TARGET_ASM_FUNCTION_EPILOGUE i960_output_function_epilogue
106 #undef TARGET_ASM_OUTPUT_MI_THUNK
107 #define TARGET_ASM_OUTPUT_MI_THUNK i960_output_mi_thunk
108 #undef TARGET_CAN_ASM_OUTPUT_MI_THUNK
109 #define TARGET_CAN_ASM_OUTPUT_MI_THUNK default_can_output_mi_thunk_no_vcall
111 #undef TARGET_RTX_COSTS
112 #define TARGET_RTX_COSTS i960_rtx_costs
114 struct gcc_target targetm = TARGET_INITIALIZER;
116 /* Override conflicting target switch options.
117 Doesn't actually detect if more than one -mARCH option is given, but
118 does handle the case of two blatantly conflicting -mARCH options.
120 Also initialize variables before compiling any files. */
125 if (TARGET_K_SERIES && TARGET_C_SERIES)
127 warning ("conflicting architectures defined - using C series");
128 target_flags &= ~TARGET_FLAG_K_SERIES;
130 if (TARGET_K_SERIES && TARGET_MC)
132 warning ("conflicting architectures defined - using K series");
133 target_flags &= ~TARGET_FLAG_MC;
135 if (TARGET_C_SERIES && TARGET_MC)
137 warning ("conflicting architectures defined - using C series");
138 target_flags &= ~TARGET_FLAG_MC;
140 if (TARGET_IC_COMPAT3_0)
142 flag_short_enums = 1;
143 flag_signed_char = 1;
144 target_flags |= TARGET_FLAG_CLEAN_LINKAGE;
145 if (TARGET_IC_COMPAT2_0)
147 warning ("iC2.0 and iC3.0 are incompatible - using iC3.0");
148 target_flags &= ~TARGET_FLAG_IC_COMPAT2_0;
151 if (TARGET_IC_COMPAT2_0)
153 flag_signed_char = 1;
154 target_flags |= TARGET_FLAG_CLEAN_LINKAGE;
157 if (TARGET_IC_COMPAT2_0)
159 i960_maxbitalignment = 8;
160 i960_last_maxbitalignment = 128;
164 i960_maxbitalignment = 128;
165 i960_last_maxbitalignment = 8;
168 /* Tell the compiler which flavor of TFmode we're using. */
169 real_format_for_mode[TFmode - QFmode] = &ieee_extended_intel_128_format;
172 /* Return true if OP can be used as the source of an fp move insn. */
175 fpmove_src_operand (op, mode)
177 enum machine_mode mode;
179 return (GET_CODE (op) == CONST_DOUBLE || general_operand (op, mode));
183 /* Return true if OP is a register or zero. */
186 reg_or_zero_operand (op, mode)
188 enum machine_mode mode;
190 return register_operand (op, mode) || op == const0_rtx;
194 /* Return truth value of whether OP can be used as an operands in a three
195 address arithmetic insn (such as add %o1,7,%l2) of mode MODE. */
198 arith_operand (op, mode)
200 enum machine_mode mode;
202 return (register_operand (op, mode) || literal (op, mode));
205 /* Return truth value of whether OP can be used as an operands in a three
206 address logic insn, possibly complementing OP, of mode MODE. */
209 logic_operand (op, mode)
211 enum machine_mode mode;
213 return (register_operand (op, mode)
214 || (GET_CODE (op) == CONST_INT
215 && INTVAL(op) >= -32 && INTVAL(op) < 32));
218 /* Return true if OP is a register or a valid floating point literal. */
221 fp_arith_operand (op, mode)
223 enum machine_mode mode;
225 return (register_operand (op, mode) || fp_literal (op, mode));
228 /* Return true if OP is a register or a valid signed integer literal. */
231 signed_arith_operand (op, mode)
233 enum machine_mode mode;
235 return (register_operand (op, mode) || signed_literal (op, mode));
238 /* Return truth value of whether OP is an integer which fits the
239 range constraining immediate operands in three-address insns. */
244 enum machine_mode mode ATTRIBUTE_UNUSED;
246 return ((GET_CODE (op) == CONST_INT) && INTVAL(op) >= 0 && INTVAL(op) < 32);
249 /* Return true if OP is a float constant of 1. */
252 fp_literal_one (op, mode)
254 enum machine_mode mode;
256 return (TARGET_NUMERICS && mode == GET_MODE (op) && op == CONST1_RTX (mode));
259 /* Return true if OP is a float constant of 0. */
262 fp_literal_zero (op, mode)
264 enum machine_mode mode;
266 return (TARGET_NUMERICS && mode == GET_MODE (op) && op == CONST0_RTX (mode));
269 /* Return true if OP is a valid floating point literal. */
274 enum machine_mode mode;
276 return fp_literal_zero (op, mode) || fp_literal_one (op, mode);
279 /* Return true if OP is a valid signed immediate constant. */
282 signed_literal(op, mode)
284 enum machine_mode mode ATTRIBUTE_UNUSED;
286 return ((GET_CODE (op) == CONST_INT) && INTVAL(op) > -32 && INTVAL(op) < 32);
289 /* Return truth value of statement that OP is a symbolic memory
290 operand of mode MODE. */
293 symbolic_memory_operand (op, mode)
295 enum machine_mode mode ATTRIBUTE_UNUSED;
297 if (GET_CODE (op) == SUBREG)
298 op = SUBREG_REG (op);
299 if (GET_CODE (op) != MEM)
302 return (GET_CODE (op) == SYMBOL_REF || GET_CODE (op) == CONST
303 || GET_CODE (op) == HIGH || GET_CODE (op) == LABEL_REF);
306 /* Return truth value of whether OP is EQ or NE. */
311 enum machine_mode mode ATTRIBUTE_UNUSED;
313 return (GET_CODE (op) == EQ || GET_CODE (op) == NE);
316 /* OP is an integer register or a constant. */
319 arith32_operand (op, mode)
321 enum machine_mode mode;
323 if (register_operand (op, mode))
325 return (CONSTANT_P (op));
328 /* Return true if OP is an integer constant which is a power of 2. */
331 power2_operand (op,mode)
333 enum machine_mode mode ATTRIBUTE_UNUSED;
335 if (GET_CODE (op) != CONST_INT)
338 return exact_log2 (INTVAL (op)) >= 0;
341 /* Return true if OP is an integer constant which is the complement of a
345 cmplpower2_operand (op, mode)
347 enum machine_mode mode ATTRIBUTE_UNUSED;
349 if (GET_CODE (op) != CONST_INT)
352 return exact_log2 (~ INTVAL (op)) >= 0;
355 /* If VAL has only one bit set, return the index of that bit. Otherwise
364 for (i = 0; val != 0; i++, val >>= 1)
376 /* Return nonzero if OP is a mask, i.e. all one bits are consecutive.
377 The return value indicates how many consecutive nonzero bits exist
378 if this is a mask. This is the same as the next function, except that
379 it does not indicate what the start and stop bit positions are. */
385 register int start, end = 0, i;
388 for (i = 0; val != 0; val >>= 1, i++)
398 /* Still looking for the first bit. */
402 /* We've seen the start of a bit sequence, and now a zero. There
403 must be more one bits, otherwise we would have exited the loop.
404 Therefore, it is not a mask. */
409 /* The bit string has ones from START to END bit positions only. */
410 return end - start + 1;
413 /* If VAL is a mask, then return nonzero, with S set to the starting bit
414 position and E set to the ending bit position of the mask. The return
415 value indicates how many consecutive bits exist in the mask. This is
416 the same as the previous function, except that it also indicates the
417 start and end bit positions of the mask. */
424 register int start, end, i;
428 for (i = 0; val != 0; val >>= 1, i++)
439 /* Still looking for the first bit. */
443 /* We've seen the start of a bit sequence, and now a zero. There
444 must be more one bits, otherwise we would have exited the loop.
445 Therefor, it is not a mask. */
454 /* The bit string has ones from START to END bit positions only. */
457 return ((start < 0) ? 0 : end - start + 1);
460 /* Return the machine mode to use for a comparison. */
463 select_cc_mode (op, x)
465 rtx x ATTRIBUTE_UNUSED;
467 if (op == GTU || op == LTU || op == GEU || op == LEU)
472 /* X and Y are two things to compare using CODE. Emit the compare insn and
473 return the rtx for register 36 in the proper mode. */
476 gen_compare_reg (code, x, y)
481 enum machine_mode ccmode = SELECT_CC_MODE (code, x, y);
482 enum machine_mode mode
483 = GET_MODE (x) == VOIDmode ? GET_MODE (y) : GET_MODE (x);
487 if (! arith_operand (x, mode))
488 x = force_reg (SImode, x);
489 if (! arith_operand (y, mode))
490 y = force_reg (SImode, y);
493 cc_reg = gen_rtx_REG (ccmode, 36);
494 emit_insn (gen_rtx_SET (VOIDmode, cc_reg,
495 gen_rtx_COMPARE (ccmode, x, y)));
500 /* For the i960, REG is cost 1, REG+immed CONST is cost 2, REG+REG is cost 2,
501 REG+nonimmed CONST is cost 4. REG+SYMBOL_REF, SYMBOL_REF, and similar
502 are 4. Indexed addresses are cost 6. */
504 /* ??? Try using just RTX_COST, i.e. not defining ADDRESS_COST. */
507 i960_address_cost (x)
511 /* Handled before calling here. */
512 if (GET_CODE (x) == REG)
515 /* This is a MEMA operand -- it's free. */
516 if (GET_CODE (x) == CONST_INT
518 && INTVAL (x) < 4096)
521 if (GET_CODE (x) == PLUS)
523 rtx base = XEXP (x, 0);
524 rtx offset = XEXP (x, 1);
526 if (GET_CODE (base) == SUBREG)
527 base = SUBREG_REG (base);
528 if (GET_CODE (offset) == SUBREG)
529 offset = SUBREG_REG (offset);
531 if (GET_CODE (base) == REG)
533 if (GET_CODE (offset) == REG)
535 if (GET_CODE (offset) == CONST_INT)
537 if ((unsigned)INTVAL (offset) < 2047)
541 if (CONSTANT_P (offset))
544 if (GET_CODE (base) == PLUS || GET_CODE (base) == MULT)
547 /* This is an invalid address. The return value doesn't matter, but
548 for convenience we make this more expensive than anything else. */
551 if (GET_CODE (x) == MULT)
554 /* Symbol_refs and other unrecognized addresses are cost 4. */
558 /* Emit insns to move operands[1] into operands[0].
560 Return 1 if we have written out everything that needs to be done to
561 do the move. Otherwise, return 0 and the caller will emit the move
565 emit_move_sequence (operands, mode)
567 enum machine_mode mode;
569 /* We can only store registers to memory. */
571 if (GET_CODE (operands[0]) == MEM && GET_CODE (operands[1]) != REG
572 && (operands[1] != const0_rtx || current_function_args_size
573 || current_function_stdarg
574 || rtx_equal_function_value_matters))
575 /* Here we use the same test as movsi+1 pattern -- see i960.md. */
576 operands[1] = force_reg (mode, operands[1]);
578 /* Storing multi-word values in unaligned hard registers to memory may
579 require a scratch since we have to store them a register at a time and
580 adding 4 to the memory address may not yield a valid insn. */
581 /* ??? We don't always need the scratch, but that would complicate things.
583 /* ??? We must also handle stores to pseudos here, because the pseudo may be
584 replaced with a MEM later. This would be cleaner if we didn't have
585 a separate pattern for unaligned DImode/TImode stores. */
586 if (GET_MODE_SIZE (mode) > UNITS_PER_WORD
587 && (GET_CODE (operands[0]) == MEM
588 || (GET_CODE (operands[0]) == REG
589 && REGNO (operands[0]) >= FIRST_PSEUDO_REGISTER))
590 && GET_CODE (operands[1]) == REG
591 && REGNO (operands[1]) < FIRST_PSEUDO_REGISTER
592 && ! HARD_REGNO_MODE_OK (REGNO (operands[1]), mode))
594 emit_insn (gen_rtx_PARALLEL
597 gen_rtx_SET (VOIDmode, operands[0], operands[1]),
598 gen_rtx_CLOBBER (VOIDmode,
599 gen_rtx_SCRATCH (Pmode)))));
606 /* Output assembler to move a double word value. */
609 i960_output_move_double (dst, src)
614 if (GET_CODE (dst) == REG
615 && GET_CODE (src) == REG)
617 if ((REGNO (src) & 1)
618 || (REGNO (dst) & 1))
620 /* We normally copy the low-numbered register first. However, if
621 the second source register is the same as the first destination
622 register, we must copy in the opposite order. */
623 if (REGNO (src) + 1 == REGNO (dst))
624 return "mov %D1,%D0\n\tmov %1,%0";
626 return "mov %1,%0\n\tmov %D1,%D0";
631 else if (GET_CODE (dst) == REG
632 && GET_CODE (src) == CONST_INT
633 && CONST_OK_FOR_LETTER_P (INTVAL (src), 'I'))
636 return "mov %1,%0\n\tmov 0,%D0";
640 else if (GET_CODE (dst) == REG
641 && GET_CODE (src) == MEM)
645 /* One can optimize a few cases here, but you have to be
646 careful of clobbering registers used in the address and
650 operands[2] = gen_rtx_REG (Pmode, REGNO (dst) + 1);
651 operands[3] = gen_rtx_MEM (word_mode, operands[2]);
652 operands[4] = adjust_address (operands[3], word_mode,
655 ("lda %1,%2\n\tld %3,%0\n\tld %4,%D0", operands);
661 else if (GET_CODE (dst) == MEM
662 && GET_CODE (src) == REG)
667 operands[1] = adjust_address (dst, word_mode, UNITS_PER_WORD);
668 if (! memory_address_p (word_mode, XEXP (operands[1], 0)))
671 output_asm_insn ("st %2,%0\n\tst %D2,%1", operands);
680 /* Output assembler to move a double word zero. */
683 i960_output_move_double_zero (dst)
690 operands[1] = adjust_address (dst, word_mode, 4);
691 output_asm_insn ("st g14,%0\n\tst g14,%1", operands);
696 /* Output assembler to move a quad word value. */
699 i960_output_move_quad (dst, src)
704 if (GET_CODE (dst) == REG
705 && GET_CODE (src) == REG)
707 if ((REGNO (src) & 3)
708 || (REGNO (dst) & 3))
710 /* We normally copy starting with the low numbered register.
711 However, if there is an overlap such that the first dest reg
712 is <= the last source reg but not < the first source reg, we
713 must copy in the opposite order. */
714 if (REGNO (dst) <= REGNO (src) + 3
715 && REGNO (dst) >= REGNO (src))
716 return "mov %F1,%F0\n\tmov %E1,%E0\n\tmov %D1,%D0\n\tmov %1,%0";
718 return "mov %1,%0\n\tmov %D1,%D0\n\tmov %E1,%E0\n\tmov %F1,%F0";
723 else if (GET_CODE (dst) == REG
724 && GET_CODE (src) == CONST_INT
725 && CONST_OK_FOR_LETTER_P (INTVAL (src), 'I'))
728 return "mov %1,%0\n\tmov 0,%D0\n\tmov 0,%E0\n\tmov 0,%F0";
732 else if (GET_CODE (dst) == REG
733 && GET_CODE (src) == MEM)
737 /* One can optimize a few cases here, but you have to be
738 careful of clobbering registers used in the address and
742 operands[2] = gen_rtx_REG (Pmode, REGNO (dst) + 3);
743 operands[3] = gen_rtx_MEM (word_mode, operands[2]);
745 = adjust_address (operands[3], word_mode, UNITS_PER_WORD);
747 = adjust_address (operands[4], word_mode, UNITS_PER_WORD);
749 = adjust_address (operands[5], word_mode, UNITS_PER_WORD);
750 output_asm_insn ("lda %1,%2\n\tld %3,%0\n\tld %4,%D0\n\tld %5,%E0\n\tld %6,%F0", operands);
756 else if (GET_CODE (dst) == MEM
757 && GET_CODE (src) == REG)
762 operands[1] = adjust_address (dst, word_mode, UNITS_PER_WORD);
763 operands[2] = adjust_address (dst, word_mode, 2 * UNITS_PER_WORD);
764 operands[3] = adjust_address (dst, word_mode, 3 * UNITS_PER_WORD);
765 if (! memory_address_p (word_mode, XEXP (operands[3], 0)))
768 output_asm_insn ("st %4,%0\n\tst %D4,%1\n\tst %E4,%2\n\tst %F4,%3", operands);
777 /* Output assembler to move a quad word zero. */
780 i960_output_move_quad_zero (dst)
787 operands[1] = adjust_address (dst, word_mode, 4);
788 operands[2] = adjust_address (dst, word_mode, 8);
789 operands[3] = adjust_address (dst, word_mode, 12);
790 output_asm_insn ("st g14,%0\n\tst g14,%1\n\tst g14,%2\n\tst g14,%3", operands);
796 /* Emit insns to load a constant to non-floating point registers.
797 Uses several strategies to try to use as few insns as possible. */
800 i960_output_ldconst (dst, src)
801 register rtx dst, src;
804 register unsigned rsrc2;
805 enum machine_mode mode = GET_MODE (dst);
808 operands[0] = operands[2] = dst;
809 operands[1] = operands[3] = src;
811 /* Anything that isn't a compile time constant, such as a SYMBOL_REF,
812 must be a ldconst insn. */
814 if (GET_CODE (src) != CONST_INT && GET_CODE (src) != CONST_DOUBLE)
816 output_asm_insn ("ldconst %1,%0", operands);
819 else if (mode == TFmode)
825 if (fp_literal_zero (src, TFmode))
828 REAL_VALUE_FROM_CONST_DOUBLE (d, src);
829 REAL_VALUE_TO_TARGET_LONG_DOUBLE (d, value_long);
831 output_asm_insn ("# ldconst %1,%0",operands);
833 for (i = 0; i < 3; i++)
835 operands[0] = gen_rtx_REG (SImode, REGNO (dst) + i);
836 operands[1] = GEN_INT (value_long[i]);
837 output_asm_insn (i960_output_ldconst (operands[0], operands[1]),
843 else if (mode == DFmode)
847 if (fp_literal_zero (src, DFmode))
850 split_double (src, &first, &second);
852 output_asm_insn ("# ldconst %1,%0",operands);
854 operands[0] = gen_rtx_REG (SImode, REGNO (dst));
856 output_asm_insn (i960_output_ldconst (operands[0], operands[1]),
858 operands[0] = gen_rtx_REG (SImode, REGNO (dst) + 1);
859 operands[1] = second;
860 output_asm_insn (i960_output_ldconst (operands[0], operands[1]),
864 else if (mode == SFmode)
869 REAL_VALUE_FROM_CONST_DOUBLE (d, src);
870 REAL_VALUE_TO_TARGET_SINGLE (d, value);
872 output_asm_insn ("# ldconst %1,%0",operands);
873 operands[0] = gen_rtx_REG (SImode, REGNO (dst));
874 operands[1] = GEN_INT (value);
875 output_asm_insn (i960_output_ldconst (operands[0], operands[1]),
879 else if (mode == TImode)
881 /* ??? This is currently not handled at all. */
884 /* Note: lowest order word goes in lowest numbered reg. */
885 rsrc1 = INTVAL (src);
886 if (rsrc1 >= 0 && rsrc1 < 32)
889 output_asm_insn ("movq\t0,%0\t# ldconstq %1,%0",operands);
890 /* Go pick up the low-order word. */
892 else if (mode == DImode)
894 rtx upperhalf, lowerhalf, xoperands[2];
896 if (GET_CODE (src) == CONST_DOUBLE || GET_CODE (src) == CONST_INT)
897 split_double (src, &lowerhalf, &upperhalf);
902 /* Note: lowest order word goes in lowest numbered reg. */
903 /* Numbers from 0 to 31 can be handled with a single insn. */
904 rsrc1 = INTVAL (lowerhalf);
905 if (upperhalf == const0_rtx && rsrc1 >= 0 && rsrc1 < 32)
908 /* Output the upper half with a recursive call. */
909 xoperands[0] = gen_rtx_REG (SImode, REGNO (dst) + 1);
910 xoperands[1] = upperhalf;
911 output_asm_insn (i960_output_ldconst (xoperands[0], xoperands[1]),
913 /* The lower word is emitted as normally. */
917 rsrc1 = INTVAL (src);
923 else if (mode == HImode)
932 /* ldconst 0..31,X -> mov 0..31,X */
935 if (i960_last_insn_type == I_TYPE_REG && TARGET_C_SERIES)
940 /* ldconst 32..63,X -> add 31,nn,X */
943 if (i960_last_insn_type == I_TYPE_REG && TARGET_C_SERIES)
945 operands[1] = GEN_INT (rsrc1 - 31);
946 output_asm_insn ("addo\t31,%1,%0\t# ldconst %3,%0", operands);
952 /* ldconst -1..-31 -> sub 0,0..31,X */
955 /* return 'sub -(%1),0,%0' */
956 operands[1] = GEN_INT (- rsrc1);
957 output_asm_insn ("subo\t%1,0,%0\t# ldconst %3,%0", operands);
961 /* ldconst -32 -> not 31,X */
964 operands[1] = GEN_INT (~rsrc1);
965 output_asm_insn ("not\t%1,%0 # ldconst %3,%0", operands);
970 /* If const is a single bit. */
971 if (bitpos (rsrc1) >= 0)
973 operands[1] = GEN_INT (bitpos (rsrc1));
974 output_asm_insn ("setbit\t%1,0,%0\t# ldconst %3,%0", operands);
978 /* If const is a bit string of less than 6 bits (1..31 shifted). */
983 if (bitstr (rsrc1, &s, &e) < 6)
985 rsrc2 = ((unsigned int) rsrc1) >> s;
986 operands[1] = GEN_INT (rsrc2);
987 operands[2] = GEN_INT (s);
988 output_asm_insn ("shlo\t%2,%1,%0\t# ldconst %3,%0", operands);
993 /* Unimplemented cases:
994 const is in range 0..31 but rotated around end of word:
995 ror 31,3,g0 -> ldconst 0xe0000003,g0
997 and any 2 instruction cases that might be worthwhile */
999 output_asm_insn ("ldconst %1,%0", operands);
1003 /* Determine if there is an opportunity for a bypass optimization.
1004 Bypass succeeds on the 960K* if the destination of the previous
1005 instruction is the second operand of the current instruction.
1006 Bypass always succeeds on the C*.
1008 Return 1 if the pattern should interchange the operands.
1010 CMPBR_FLAG is true if this is for a compare-and-branch insn.
1011 OP1 and OP2 are the two source operands of a 3 operand insn. */
1014 i960_bypass (insn, op1, op2, cmpbr_flag)
1015 register rtx insn, op1, op2;
1018 register rtx prev_insn, prev_dest;
1020 if (TARGET_C_SERIES)
1023 /* Can't do this if op1 isn't a register. */
1027 /* Can't do this for a compare-and-branch if both ops aren't regs. */
1028 if (cmpbr_flag && ! REG_P (op2))
1031 prev_insn = prev_real_insn (insn);
1033 if (prev_insn && GET_CODE (prev_insn) == INSN
1034 && GET_CODE (PATTERN (prev_insn)) == SET)
1036 prev_dest = SET_DEST (PATTERN (prev_insn));
1037 if ((GET_CODE (prev_dest) == REG && REGNO (prev_dest) == REGNO (op1))
1038 || (GET_CODE (prev_dest) == SUBREG
1039 && GET_CODE (SUBREG_REG (prev_dest)) == REG
1040 && REGNO (SUBREG_REG (prev_dest)) == REGNO (op1)))
1046 /* Output the code which declares the function name. This also handles
1047 leaf routines, which have special requirements, and initializes some
1048 global variables. */
1051 i960_function_name_declare (file, name, fndecl)
1060 /* Increment global return label. */
1064 /* Compute whether tail calls and leaf routine optimizations can be performed
1065 for this function. */
1067 if (TARGET_TAILCALL)
1072 if (TARGET_LEAFPROC)
1077 /* Even if nobody uses extra parms, can't have leafproc or tail calls if
1078 argblock, because argblock uses g14 implicitly. */
1080 if (current_function_args_size != 0 || VARARGS_STDARG_FUNCTION (fndecl))
1086 /* See if caller passes in an address to return value. */
1088 if (aggregate_value_p (DECL_RESULT (fndecl)))
1094 /* Can not use tail calls or make this a leaf routine if there is a non
1097 if (get_frame_size () != 0)
1100 /* I don't understand this condition, and do not think that it is correct.
1101 Apparently this is just checking whether the frame pointer is used, and
1102 we can't trust regs_ever_live[fp] since it is (almost?) always set. */
1105 for (insn = get_insns (); insn; insn = NEXT_INSN (insn))
1106 if (GET_CODE (insn) == INSN
1107 && reg_mentioned_p (frame_pointer_rtx, insn))
1113 /* Check for CALL insns. Can not be a leaf routine if there are any. */
1116 for (insn = get_insns (); insn; insn = NEXT_INSN (insn))
1117 if (GET_CODE (insn) == CALL_INSN)
1123 /* Can not be a leaf routine if any non-call clobbered registers are
1124 used in this function. */
1127 for (i = 0, j = 0; i < FIRST_PSEUDO_REGISTER; i++)
1128 if (regs_ever_live[i]
1129 && ((! call_used_regs[i]) || (i > 7 && i < 12)))
1131 /* Global registers. */
1132 if (i < 16 && i > 7 && i != 13)
1134 /* Local registers. */
1139 /* Now choose a leaf return register, if we can find one, and if it is
1140 OK for this to be a leaf routine. */
1142 i960_leaf_ret_reg = -1;
1144 if (optimize && leaf_proc_ok)
1146 for (i960_leaf_ret_reg = -1, i = 0; i < 8; i++)
1147 if (regs_ever_live[i] == 0)
1149 i960_leaf_ret_reg = i;
1150 regs_ever_live[i] = 1;
1155 /* Do this after choosing the leaf return register, so it will be listed
1156 if one was chosen. */
1158 fprintf (file, "\t# Function '%s'\n", (name[0] == '*' ? &name[1] : name));
1159 fprintf (file, "\t# Registers used: ");
1161 for (i = 0, j = 0; i < FIRST_PSEUDO_REGISTER; i++)
1163 if (regs_ever_live[i])
1165 fprintf (file, "%s%s ", reg_names[i], call_used_regs[i] ? "" : "*");
1167 if (i > 15 && j == 0)
1169 fprintf (file,"\n\t#\t\t ");
1175 fprintf (file, "\n");
1177 if (i960_leaf_ret_reg >= 0)
1179 /* Make it a leaf procedure. */
1181 if (TREE_PUBLIC (fndecl))
1182 fprintf (file,"\t.globl\t%s.lf\n", (name[0] == '*' ? &name[1] : name));
1184 fprintf (file, "\t.leafproc\t");
1185 assemble_name (file, name);
1186 fprintf (file, ",%s.lf\n", (name[0] == '*' ? &name[1] : name));
1187 ASM_OUTPUT_LABEL (file, name);
1188 fprintf (file, "\tlda Li960R%d,g14\n", ret_label);
1189 fprintf (file, "%s.lf:\n", (name[0] == '*' ? &name[1] : name));
1190 fprintf (file, "\tmov g14,g%d\n", i960_leaf_ret_reg);
1192 if (TARGET_C_SERIES)
1194 fprintf (file, "\tlda 0,g14\n");
1195 i960_last_insn_type = I_TYPE_MEM;
1199 fprintf (file, "\tmov 0,g14\n");
1200 i960_last_insn_type = I_TYPE_REG;
1205 ASM_OUTPUT_LABEL (file, name);
1206 i960_last_insn_type = I_TYPE_CTRL;
1210 /* Compute and return the frame size. */
1213 compute_frame_size (size)
1217 int outgoing_args_size = current_function_outgoing_args_size;
1219 /* The STARTING_FRAME_OFFSET is totally hidden to us as far
1220 as size is concerned. */
1221 actual_fsize = (size + 15) & -16;
1222 actual_fsize += (outgoing_args_size + 15) & -16;
1224 return actual_fsize;
1227 /* Here register group is range of registers which can be moved by
1228 one i960 instruction. */
1236 static int i960_form_reg_groups PARAMS ((int, int, int *, int, struct reg_group *));
1237 static int i960_reg_group_compare PARAMS ((const void *, const void *));
1238 static int i960_split_reg_group PARAMS ((struct reg_group *, int, int));
1239 static void i960_arg_size_and_align PARAMS ((enum machine_mode, tree, int *, int *));
1241 /* The following functions forms the biggest as possible register
1242 groups with registers in STATE. REGS contain states of the
1243 registers in range [start, finish_reg). The function returns the
1244 number of groups formed. */
1246 i960_form_reg_groups (start_reg, finish_reg, regs, state, reg_groups)
1251 struct reg_group *reg_groups;
1256 for (i = start_reg; i < finish_reg; )
1258 if (regs [i] != state)
1263 else if (i % 2 != 0 || regs [i + 1] != state)
1264 reg_groups [nw].length = 1;
1265 else if (i % 4 != 0 || regs [i + 2] != state)
1266 reg_groups [nw].length = 2;
1267 else if (regs [i + 3] != state)
1268 reg_groups [nw].length = 3;
1270 reg_groups [nw].length = 4;
1271 reg_groups [nw].start_reg = i;
1272 i += reg_groups [nw].length;
1278 /* We sort register winodws in descending order by length. */
1280 i960_reg_group_compare (group1, group2)
1284 const struct reg_group *w1 = group1;
1285 const struct reg_group *w2 = group2;
1287 if (w1->length > w2->length)
1289 else if (w1->length < w2->length)
1295 /* Split the first register group in REG_GROUPS on subgroups one of
1296 which will contain SUBGROUP_LENGTH registers. The function
1297 returns new number of winodws. */
1299 i960_split_reg_group (reg_groups, nw, subgroup_length)
1300 struct reg_group *reg_groups;
1302 int subgroup_length;
1304 if (subgroup_length < reg_groups->length - subgroup_length)
1305 /* This guarantees correct alignments of the two subgroups for
1306 i960 (see spliting for the group length 2, 3, 4). More
1307 generalized algorithm would require splitting the group more
1309 subgroup_length = reg_groups->length - subgroup_length;
1310 /* More generalized algorithm would require to try merging
1311 subgroups here. But in case i960 it always results in failure
1312 because of register group alignment. */
1313 reg_groups[nw].length = reg_groups->length - subgroup_length;
1314 reg_groups[nw].start_reg = reg_groups->start_reg + subgroup_length;
1316 reg_groups->length = subgroup_length;
1317 qsort (reg_groups, nw, sizeof (struct reg_group), i960_reg_group_compare);
1321 /* Output code for the function prologue. */
1324 i960_output_function_prologue (file, size)
1328 register int i, j, nr;
1329 int n_saved_regs = 0;
1330 int n_remaining_saved_regs;
1331 HOST_WIDE_INT lvar_size;
1332 HOST_WIDE_INT actual_fsize, offset;
1334 struct reg_group *g, *l;
1336 /* -1 if reg must be saved on proc entry, 0 if available, 1 if saved
1338 int regs[FIRST_PSEUDO_REGISTER];
1339 /* All global registers (which must be saved) divided by groups. */
1340 struct reg_group global_reg_groups [16];
1341 /* All local registers (which are available) divided by groups. */
1342 struct reg_group local_reg_groups [16];
1345 for (i = 0; i < FIRST_PSEUDO_REGISTER; i++)
1346 if (regs_ever_live[i]
1347 && ((! call_used_regs[i]) || (i > 7 && i < 12))
1348 /* No need to save the static chain pointer. */
1349 && ! (i == STATIC_CHAIN_REGNUM && current_function_needs_context))
1352 /* Count global registers that need saving. */
1359 n_remaining_saved_regs = n_saved_regs;
1361 epilogue_string[0] = '\0';
1363 if (current_function_profile)
1365 /* When profiling, we may use registers 20 to 27 to save arguments, so
1366 they can't be used here for saving globals. J is the number of
1367 argument registers the mcount call will save. */
1368 for (j = 7; j >= 0 && ! regs_ever_live[j]; j--)
1371 for (i = 20; i <= j + 20; i++)
1375 gnw = i960_form_reg_groups (0, 16, regs, -1, global_reg_groups);
1376 lnw = i960_form_reg_groups (19, 32, regs, 0, local_reg_groups);
1377 qsort (global_reg_groups, gnw, sizeof (struct reg_group),
1378 i960_reg_group_compare);
1379 qsort (local_reg_groups, lnw, sizeof (struct reg_group),
1380 i960_reg_group_compare);
1381 for (g = global_reg_groups, l = local_reg_groups; lnw != 0 && gnw != 0;)
1383 if (g->length == l->length)
1385 fprintf (file, "\tmov%s %s,%s\n",
1386 ((g->length == 4) ? "q" :
1387 (g->length == 3) ? "t" :
1388 (g->length == 2) ? "l" : ""),
1389 reg_names[(unsigned char) g->start_reg],
1390 reg_names[(unsigned char) l->start_reg]);
1391 sprintf (tmpstr, "\tmov%s %s,%s\n",
1392 ((g->length == 4) ? "q" :
1393 (g->length == 3) ? "t" :
1394 (g->length == 2) ? "l" : ""),
1395 reg_names[(unsigned char) l->start_reg],
1396 reg_names[(unsigned char) g->start_reg]);
1397 strcat (epilogue_string, tmpstr);
1398 n_remaining_saved_regs -= g->length;
1399 for (i = 0; i < g->length; i++)
1401 regs [i + g->start_reg] = 1;
1402 regs [i + l->start_reg] = -1;
1403 regs_ever_live [i + l->start_reg] = 1;
1410 else if (g->length > l->length)
1411 gnw = i960_split_reg_group (g, gnw, l->length);
1413 lnw = i960_split_reg_group (l, lnw, g->length);
1416 actual_fsize = compute_frame_size (size) + 4 * n_remaining_saved_regs;
1418 /* ??? The 1.2.1 compiler does this also. This is meant to round the frame
1419 size up to the nearest multiple of 16. I don't know whether this is
1420 necessary, or even desirable.
1422 The frame pointer must be aligned, but the call instruction takes care of
1423 that. If we leave the stack pointer unaligned, we may save a little on
1424 dynamic stack allocation. And we don't lose, at least according to the
1426 actual_fsize = (actual_fsize + 15) & ~0xF;
1429 /* Check stack limit if necessary. */
1430 if (current_function_limit_stack)
1432 rtx min_stack = stack_limit_rtx;
1433 if (actual_fsize != 0)
1434 min_stack = plus_constant (stack_limit_rtx, -actual_fsize);
1436 /* Now, emulate a little bit of reload. We want to turn 'min_stack'
1437 into an arith_operand. Use register 20 as the temporary. */
1438 if (legitimate_address_p (Pmode, min_stack, 1)
1439 && !arith_operand (min_stack, Pmode))
1441 rtx tmp = gen_rtx_MEM (Pmode, min_stack);
1442 fputs ("\tlda\t", file);
1443 i960_print_operand (file, tmp, 0);
1444 fputs (",r4\n", file);
1445 min_stack = gen_rtx_REG (Pmode, 20);
1447 if (arith_operand (min_stack, Pmode))
1449 fputs ("\tcmpo\tsp,", file);
1450 i960_print_operand (file, min_stack, 0);
1451 fputs ("\n\tfaultge.f\n", file);
1454 warning ("stack limit expression is not supported");
1457 /* Allocate space for register save and locals. */
1458 if (actual_fsize > 0)
1460 if (actual_fsize < 32)
1461 fprintf (file, "\taddo %d,sp,sp\n", actual_fsize);
1463 fprintf (file, "\tlda\t%d(sp),sp\n", actual_fsize);
1466 /* Take hardware register save area created by the call instruction
1467 into account, but store them before the argument block area. */
1468 lvar_size = actual_fsize - compute_frame_size (0) - n_remaining_saved_regs * 4;
1469 offset = STARTING_FRAME_OFFSET + lvar_size;
1470 /* Save registers on stack if needed. */
1471 /* ??? Is it worth to use the same algorithm as one for saving
1472 global registers in local registers? */
1473 for (i = 0, j = n_remaining_saved_regs; j > 0 && i < 16; i++)
1480 if (i <= 14 && i % 2 == 0 && regs[i+1] == -1 && offset % 2 == 0)
1483 if (nr == 2 && i <= 12 && i % 4 == 0 && regs[i+2] == -1
1487 if (nr == 3 && regs[i+3] == -1)
1490 fprintf (file,"\tst%s %s,%d(fp)\n",
1493 (nr == 2) ? "l" : ""),
1494 reg_names[i], offset);
1495 sprintf (tmpstr,"\tld%s %d(fp),%s\n",
1498 (nr == 2) ? "l" : ""),
1499 offset, reg_names[i]);
1500 strcat (epilogue_string, tmpstr);
1506 if (actual_fsize == 0)
1509 fprintf (file, "\t#Prologue stats:\n");
1510 fprintf (file, "\t# Total Frame Size: %d bytes\n", actual_fsize);
1513 fprintf (file, "\t# Local Variable Size: %d bytes\n", lvar_size);
1515 fprintf (file, "\t# Register Save Size: %d regs, %d bytes\n",
1516 n_saved_regs, n_saved_regs * 4);
1517 fprintf (file, "\t#End Prologue#\n");
1520 /* Output code for the function profiler. */
1523 output_function_profiler (file, labelno)
1527 /* The last used parameter register. */
1529 int i, j, increment;
1530 int varargs_stdarg_function
1531 = VARARGS_STDARG_FUNCTION (current_function_decl);
1533 /* Figure out the last used parameter register. The proper thing to do
1534 is to walk incoming args of the function. A function might have live
1535 parameter registers even if it has no incoming args. Note that we
1536 don't have to save parameter registers g8 to g11 because they are
1539 /* See also output_function_prologue, which tries to use local registers
1540 for preserved call-saved global registers. */
1542 for (last_parm_reg = 7;
1543 last_parm_reg >= 0 && ! regs_ever_live[last_parm_reg];
1547 /* Save parameter registers in regs r4 (20) to r11 (27). */
1549 for (i = 0, j = 4; i <= last_parm_reg; i += increment, j += increment)
1551 if (i % 4 == 0 && (last_parm_reg - i) >= 3)
1553 else if (i % 4 == 0 && (last_parm_reg - i) >= 2)
1555 else if (i % 2 == 0 && (last_parm_reg - i) >= 1)
1560 fprintf (file, "\tmov%s g%d,r%d\n",
1561 (increment == 4 ? "q" : increment == 3 ? "t"
1562 : increment == 2 ? "l": ""), i, j);
1565 /* If this function uses the arg pointer, then save it in r3 and then
1568 if (current_function_args_size != 0 || varargs_stdarg_function)
1569 fprintf (file, "\tmov g14,r3\n\tmov 0,g14\n");
1571 /* Load location address into g0 and call mcount. */
1573 fprintf (file, "\tlda\tLP%d,g0\n\tcallx\tmcount\n", labelno);
1575 /* If this function uses the arg pointer, restore it. */
1577 if (current_function_args_size != 0 || varargs_stdarg_function)
1578 fprintf (file, "\tmov r3,g14\n");
1580 /* Restore parameter registers. */
1582 for (i = 0, j = 4; i <= last_parm_reg; i += increment, j += increment)
1584 if (i % 4 == 0 && (last_parm_reg - i) >= 3)
1586 else if (i % 4 == 0 && (last_parm_reg - i) >= 2)
1588 else if (i % 2 == 0 && (last_parm_reg - i) >= 1)
1593 fprintf (file, "\tmov%s r%d,g%d\n",
1594 (increment == 4 ? "q" : increment == 3 ? "t"
1595 : increment == 2 ? "l": ""), j, i);
1599 /* Output code for the function epilogue. */
1602 i960_output_function_epilogue (file, size)
1604 HOST_WIDE_INT size ATTRIBUTE_UNUSED;
1606 if (i960_leaf_ret_reg >= 0)
1608 fprintf (file, "Li960R%d: ret\n", ret_label);
1612 if (*epilogue_string == 0)
1616 /* Emit a return insn, but only if control can fall through to here. */
1618 tmp = get_last_insn ();
1621 if (GET_CODE (tmp) == BARRIER)
1623 if (GET_CODE (tmp) == CODE_LABEL)
1625 if (GET_CODE (tmp) == JUMP_INSN)
1627 if (GET_CODE (PATTERN (tmp)) == RETURN)
1631 if (GET_CODE (tmp) == NOTE)
1633 tmp = PREV_INSN (tmp);
1638 fprintf (file, "Li960R%d: ret\n", ret_label);
1642 fprintf (file, "Li960R%d:\n", ret_label);
1644 fprintf (file, "\t#EPILOGUE#\n");
1646 /* Output the string created by the prologue which will restore all
1647 registers saved by the prologue. */
1649 if (epilogue_string[0] != '\0')
1650 fprintf (file, "%s", epilogue_string);
1652 /* Must clear g14 on return if this function set it.
1653 Only varargs/stdarg functions modify g14. */
1655 if (VARARGS_STDARG_FUNCTION (current_function_decl))
1656 fprintf (file, "\tmov 0,g14\n");
1658 fprintf (file, "\tret\n");
1659 fprintf (file, "\t#End Epilogue#\n");
1662 /* Output code for a call insn. */
1665 i960_output_call_insn (target, argsize_rtx, arg_pointer, insn)
1666 register rtx target, argsize_rtx, arg_pointer, insn;
1668 int argsize = INTVAL (argsize_rtx);
1669 rtx nexti = next_real_insn (insn);
1671 int varargs_stdarg_function
1672 = VARARGS_STDARG_FUNCTION (current_function_decl);
1674 operands[0] = target;
1675 operands[1] = arg_pointer;
1677 if (current_function_args_size != 0 || varargs_stdarg_function)
1678 output_asm_insn ("mov g14,r3", operands);
1681 output_asm_insn ("lda %a1,g14", operands);
1682 else if (current_function_args_size != 0 || varargs_stdarg_function)
1683 output_asm_insn ("mov 0,g14", operands);
1685 /* The code used to assume that calls to SYMBOL_REFs could not be more
1686 than 24 bits away (b vs bx, callj vs callx). This is not true. This
1687 feature is now implemented by relaxing in the GNU linker. It can convert
1688 bx to b if in range, and callx to calls/call/balx/bal as appropriate. */
1690 /* Nexti could be zero if the called routine is volatile. */
1691 if (optimize && (*epilogue_string == 0) && argsize == 0 && tail_call_ok
1692 && (nexti == 0 || GET_CODE (PATTERN (nexti)) == RETURN))
1694 /* Delete following return insn. */
1695 if (nexti && no_labels_between_p (insn, nexti))
1696 delete_insn (nexti);
1697 output_asm_insn ("bx %0", operands);
1698 return "# notreached";
1701 output_asm_insn ("callx %0", operands);
1703 /* If the caller sets g14 to the address of the argblock, then the caller
1704 must clear it after the return. */
1706 if (current_function_args_size != 0 || varargs_stdarg_function)
1707 output_asm_insn ("mov r3,g14", operands);
1708 else if (argsize > 48)
1709 output_asm_insn ("mov 0,g14", operands);
1714 /* Output code for a return insn. */
1717 i960_output_ret_insn (insn)
1720 static char lbuf[20];
1722 if (*epilogue_string != 0)
1724 if (! TARGET_CODE_ALIGN && next_real_insn (insn) == 0)
1727 sprintf (lbuf, "b Li960R%d", ret_label);
1731 /* Must clear g14 on return if this function set it.
1732 Only varargs/stdarg functions modify g14. */
1734 if (VARARGS_STDARG_FUNCTION (current_function_decl))
1735 output_asm_insn ("mov 0,g14", 0);
1737 if (i960_leaf_ret_reg >= 0)
1739 sprintf (lbuf, "bx (%s)", reg_names[i960_leaf_ret_reg]);
1745 /* Print the operand represented by rtx X formatted by code CODE. */
1748 i960_print_operand (file, x, code)
1753 enum rtx_code rtxcode = x ? GET_CODE (x) : NIL;
1760 /* Second reg of a double or quad. */
1761 fprintf (file, "%s", reg_names[REGNO (x)+1]);
1765 /* Third reg of a quad. */
1766 fprintf (file, "%s", reg_names[REGNO (x)+2]);
1770 /* Fourth reg of a quad. */
1771 fprintf (file, "%s", reg_names[REGNO (x)+3]);
1775 fprintf (file, "%s", reg_names[REGNO (x)]);
1783 else if (rtxcode == MEM)
1785 output_address (XEXP (x, 0));
1788 else if (rtxcode == CONST_INT)
1790 HOST_WIDE_INT val = INTVAL (x);
1793 if (val > 9999 || val < -999)
1794 fprintf (file, "0x%x", val);
1796 fprintf (file, "%d", val);
1799 else if (rtxcode == CONST_DOUBLE)
1803 if (x == CONST0_RTX (GET_MODE (x)))
1805 fprintf (file, "0f0.0");
1808 else if (x == CONST1_RTX (GET_MODE (x)))
1810 fprintf (file, "0f1.0");
1814 real_to_decimal (dstr, CONST_DOUBLE_REAL_VALUE (x), sizeof (dstr), 0, 1);
1815 fprintf (file, "0f%s", dstr);
1822 /* Branch or jump, depending on assembler. */
1823 if (TARGET_ASM_COMPAT)
1830 /* Sign of condition. */
1831 if ((rtxcode == EQ) || (rtxcode == NE) || (rtxcode == GTU)
1832 || (rtxcode == LTU) || (rtxcode == GEU) || (rtxcode == LEU))
1834 else if ((rtxcode == GT) || (rtxcode == LT)
1835 || (rtxcode == GE) || (rtxcode == LE))
1842 /* Inverted condition. */
1843 rtxcode = reverse_condition (rtxcode);
1847 /* Inverted condition w/ reversed operands. */
1848 rtxcode = reverse_condition (rtxcode);
1852 /* Reversed operand condition. */
1853 rtxcode = swap_condition (rtxcode);
1857 /* Normal condition. */
1859 if (rtxcode == EQ) { fputs ("e", file); return; }
1860 else if (rtxcode == NE) { fputs ("ne", file); return; }
1861 else if (rtxcode == GT) { fputs ("g", file); return; }
1862 else if (rtxcode == GTU) { fputs ("g", file); return; }
1863 else if (rtxcode == LT) { fputs ("l", file); return; }
1864 else if (rtxcode == LTU) { fputs ("l", file); return; }
1865 else if (rtxcode == GE) { fputs ("ge", file); return; }
1866 else if (rtxcode == GEU) { fputs ("ge", file); return; }
1867 else if (rtxcode == LE) { fputs ("le", file); return; }
1868 else if (rtxcode == LEU) { fputs ("le", file); return; }
1873 /* For conditional branches, substitute ".t" or ".f". */
1874 if (TARGET_BRANCH_PREDICT)
1876 x = find_reg_note (current_output_insn, REG_BR_PROB, 0);
1879 int pred_val = INTVAL (XEXP (x, 0));
1880 fputs ((pred_val < REG_BR_PROB_BASE / 2 ? ".f" : ".t"), file);
1886 output_addr_const (file, x);
1896 /* Print a memory address as an operand to reference that memory location.
1898 This is exactly the same as legitimate_address_p, except that it the prints
1899 addresses instead of recognizing them. */
1902 i960_print_operand_addr (file, addr)
1914 if (GET_CODE (addr) == REG)
1916 else if (CONSTANT_P (addr))
1918 else if (GET_CODE (addr) == PLUS)
1922 op0 = XEXP (addr, 0);
1923 op1 = XEXP (addr, 1);
1925 if (GET_CODE (op0) == REG)
1928 if (GET_CODE (op1) == REG)
1930 else if (CONSTANT_P (op1))
1935 else if (GET_CODE (op0) == PLUS)
1937 if (GET_CODE (XEXP (op0, 0)) == MULT)
1939 ireg = XEXP (XEXP (op0, 0), 0);
1940 scale = XEXP (XEXP (op0, 0), 1);
1941 if (GET_CODE (XEXP (op0, 1)) == REG)
1943 breg = XEXP (op0, 1);
1949 else if (GET_CODE (XEXP (op0, 0)) == REG)
1951 breg = XEXP (op0, 0);
1952 if (GET_CODE (XEXP (op0, 1)) == REG)
1954 ireg = XEXP (op0, 1);
1963 else if (GET_CODE (op0) == MULT)
1965 ireg = XEXP (op0, 0);
1966 scale = XEXP (op0, 1);
1967 if (GET_CODE (op1) == REG)
1969 else if (CONSTANT_P (op1))
1977 else if (GET_CODE (addr) == MULT)
1979 ireg = XEXP (addr, 0);
1980 scale = XEXP (addr, 1);
1986 output_addr_const (file, offset);
1988 fprintf (file, "(%s)", reg_names[REGNO (breg)]);
1990 fprintf (file, "[%s*%d]", reg_names[REGNO (ireg)], INTVAL (scale));
1993 /* GO_IF_LEGITIMATE_ADDRESS recognizes an RTL expression
1994 that is a valid memory address for an instruction.
1995 The MODE argument is the machine mode for the MEM expression
1996 that wants to use this address.
1998 On 80960, legitimate addresses are:
2000 disp (12 or 32 bit) ld foo,r0
2001 base + index ld (g0)[g1*1],r0
2002 base + displ ld 0xf00(g0),r0
2003 base + index*scale + displ ld 0xf00(g0)[g1*4],r0
2004 index*scale + base ld (g0)[g1*4],r0
2005 index*scale + displ ld 0xf00[g1*4],r0
2006 index*scale ld [g1*4],r0
2007 index + base + displ ld 0xf00(g0)[g1*1],r0
2009 In each case, scale can be 1, 2, 4, 8, or 16. */
2011 /* This is exactly the same as i960_print_operand_addr, except that
2012 it recognizes addresses instead of printing them.
2014 It only recognizes address in canonical form. LEGITIMIZE_ADDRESS should
2015 convert common non-canonical forms to canonical form so that they will
2018 /* These two macros allow us to accept either a REG or a SUBREG anyplace
2019 where a register is valid. */
2021 #define RTX_OK_FOR_BASE_P(X, STRICT) \
2022 ((GET_CODE (X) == REG \
2023 && (STRICT ? REG_OK_FOR_BASE_P_STRICT (X) : REG_OK_FOR_BASE_P (X))) \
2024 || (GET_CODE (X) == SUBREG \
2025 && GET_CODE (SUBREG_REG (X)) == REG \
2026 && (STRICT ? REG_OK_FOR_BASE_P_STRICT (SUBREG_REG (X)) \
2027 : REG_OK_FOR_BASE_P (SUBREG_REG (X)))))
2029 #define RTX_OK_FOR_INDEX_P(X, STRICT) \
2030 ((GET_CODE (X) == REG \
2031 && (STRICT ? REG_OK_FOR_INDEX_P_STRICT (X) : REG_OK_FOR_INDEX_P (X)))\
2032 || (GET_CODE (X) == SUBREG \
2033 && GET_CODE (SUBREG_REG (X)) == REG \
2034 && (STRICT ? REG_OK_FOR_INDEX_P_STRICT (SUBREG_REG (X)) \
2035 : REG_OK_FOR_INDEX_P (SUBREG_REG (X)))))
2038 legitimate_address_p (mode, addr, strict)
2039 enum machine_mode mode ATTRIBUTE_UNUSED;
2043 if (RTX_OK_FOR_BASE_P (addr, strict))
2045 else if (CONSTANT_P (addr))
2047 else if (GET_CODE (addr) == PLUS)
2051 if (! TARGET_COMPLEX_ADDR && ! reload_completed)
2054 op0 = XEXP (addr, 0);
2055 op1 = XEXP (addr, 1);
2057 if (RTX_OK_FOR_BASE_P (op0, strict))
2059 if (RTX_OK_FOR_INDEX_P (op1, strict))
2061 else if (CONSTANT_P (op1))
2066 else if (GET_CODE (op0) == PLUS)
2068 if (GET_CODE (XEXP (op0, 0)) == MULT)
2070 if (! (RTX_OK_FOR_INDEX_P (XEXP (XEXP (op0, 0), 0), strict)
2071 && SCALE_TERM_P (XEXP (XEXP (op0, 0), 1))))
2074 if (RTX_OK_FOR_BASE_P (XEXP (op0, 1), strict)
2075 && CONSTANT_P (op1))
2080 else if (RTX_OK_FOR_BASE_P (XEXP (op0, 0), strict))
2082 if (RTX_OK_FOR_INDEX_P (XEXP (op0, 1), strict)
2083 && CONSTANT_P (op1))
2091 else if (GET_CODE (op0) == MULT)
2093 if (! (RTX_OK_FOR_INDEX_P (XEXP (op0, 0), strict)
2094 && SCALE_TERM_P (XEXP (op0, 1))))
2097 if (RTX_OK_FOR_BASE_P (op1, strict))
2099 else if (CONSTANT_P (op1))
2107 else if (GET_CODE (addr) == MULT)
2109 if (! TARGET_COMPLEX_ADDR && ! reload_completed)
2112 return (RTX_OK_FOR_INDEX_P (XEXP (addr, 0), strict)
2113 && SCALE_TERM_P (XEXP (addr, 1)));
2119 /* Try machine-dependent ways of modifying an illegitimate address
2120 to be legitimate. If we find one, return the new, valid address.
2121 This macro is used in only one place: `memory_address' in explow.c.
2123 This converts some non-canonical addresses to canonical form so they
2124 can be recognized. */
2127 legitimize_address (x, oldx, mode)
2129 register rtx oldx ATTRIBUTE_UNUSED;
2130 enum machine_mode mode ATTRIBUTE_UNUSED;
2132 if (GET_CODE (x) == SYMBOL_REF)
2135 x = copy_to_reg (x);
2138 if (! TARGET_COMPLEX_ADDR && ! reload_completed)
2141 /* Canonicalize (plus (mult (reg) (const)) (plus (reg) (const)))
2142 into (plus (plus (mult (reg) (const)) (reg)) (const)). This can be
2143 created by virtual register instantiation, register elimination, and
2144 similar optimizations. */
2145 if (GET_CODE (x) == PLUS && GET_CODE (XEXP (x, 0)) == MULT
2146 && GET_CODE (XEXP (x, 1)) == PLUS)
2147 x = gen_rtx_PLUS (Pmode,
2148 gen_rtx_PLUS (Pmode, XEXP (x, 0), XEXP (XEXP (x, 1), 0)),
2149 XEXP (XEXP (x, 1), 1));
2151 /* Canonicalize (plus (plus (mult (reg) (const)) (plus (reg) (const))) const)
2152 into (plus (plus (mult (reg) (const)) (reg)) (const)). */
2153 else if (GET_CODE (x) == PLUS && GET_CODE (XEXP (x, 0)) == PLUS
2154 && GET_CODE (XEXP (XEXP (x, 0), 0)) == MULT
2155 && GET_CODE (XEXP (XEXP (x, 0), 1)) == PLUS
2156 && CONSTANT_P (XEXP (x, 1)))
2158 rtx constant, other;
2160 if (GET_CODE (XEXP (x, 1)) == CONST_INT)
2162 constant = XEXP (x, 1);
2163 other = XEXP (XEXP (XEXP (x, 0), 1), 1);
2165 else if (GET_CODE (XEXP (XEXP (XEXP (x, 0), 1), 1)) == CONST_INT)
2167 constant = XEXP (XEXP (XEXP (x, 0), 1), 1);
2168 other = XEXP (x, 1);
2171 constant = 0, other = 0;
2174 x = gen_rtx_PLUS (Pmode,
2175 gen_rtx_PLUS (Pmode, XEXP (XEXP (x, 0), 0),
2176 XEXP (XEXP (XEXP (x, 0), 1), 0)),
2177 plus_constant (other, INTVAL (constant)));
2184 /* Return the most stringent alignment that we are willing to consider
2185 objects of size SIZE and known alignment ALIGN as having. */
2188 i960_alignment (size, align)
2194 if (! TARGET_STRICT_ALIGN)
2195 if (TARGET_IC_COMPAT2_0 || align >= 4)
2197 i = i960_object_bytes_bitalign (size) / BITS_PER_UNIT;
2208 hard_regno_mode_ok (regno, mode)
2210 enum machine_mode mode;
2216 case CCmode: case CC_UNSmode: case CC_CHKmode:
2219 case DImode: case DFmode:
2220 return (regno & 1) == 0;
2222 case TImode: case TFmode:
2223 return (regno & 3) == 0;
2229 else if (regno >= 32 && regno < 36)
2233 case SFmode: case DFmode: case TFmode:
2234 case SCmode: case DCmode:
2241 else if (regno == 36)
2245 case CCmode: case CC_UNSmode: case CC_CHKmode:
2252 else if (regno == 37)
2259 /* Return the minimum alignment of an expression rtx X in bytes. This takes
2260 advantage of machine specific facts, such as knowing that the frame pointer
2261 is always 16 byte aligned. */
2264 i960_expr_alignment (x, size)
2273 switch (GET_CODE(x))
2278 if ((align & 0xf) == 0)
2280 else if ((align & 0x7) == 0)
2282 else if ((align & 0x3) == 0)
2284 else if ((align & 0x1) == 0)
2291 align = MIN (i960_expr_alignment (XEXP (x, 0), size),
2292 i960_expr_alignment (XEXP (x, 1), size));
2296 /* If this is a valid program, objects are guaranteed to be
2297 correctly aligned for whatever size the reference actually is. */
2298 align = i960_object_bytes_bitalign (size) / BITS_PER_UNIT;
2302 if (REGNO (x) == FRAME_POINTER_REGNUM)
2307 align = i960_expr_alignment (XEXP (x, 0), size);
2309 if (GET_CODE (XEXP (x, 1)) == CONST_INT)
2311 align = align << INTVAL (XEXP (x, 1));
2312 align = MIN (align, 16);
2317 align = (i960_expr_alignment (XEXP (x, 0), size) *
2318 i960_expr_alignment (XEXP (x, 1), size));
2320 align = MIN (align, 16);
2329 /* Return true if it is possible to reference both BASE and OFFSET, which
2330 have alignment at least as great as 4 byte, as if they had alignment valid
2331 for an object of size SIZE. */
2334 i960_improve_align (base, offset, size)
2341 /* We have at least a word reference to the object, so we know it has to
2342 be aligned at least to 4 bytes. */
2344 i = MIN (i960_expr_alignment (base, 4),
2345 i960_expr_alignment (offset, 4));
2349 /* We know the size of the request. If strict align is not enabled, we
2350 can guess that the alignment is OK for the requested size. */
2352 if (! TARGET_STRICT_ALIGN)
2353 if ((j = (i960_object_bytes_bitalign (size) / BITS_PER_UNIT)) > i)
2359 /* Return true if it is possible to access BASE and OFFSET, which have 4 byte
2360 (SImode) alignment as if they had 16 byte (TImode) alignment. */
2363 i960_si_ti (base, offset)
2367 return i960_improve_align (base, offset, 16);
2370 /* Return true if it is possible to access BASE and OFFSET, which have 4 byte
2371 (SImode) alignment as if they had 8 byte (DImode) alignment. */
2374 i960_si_di (base, offset)
2378 return i960_improve_align (base, offset, 8);
2381 /* Return raw values of size and alignment (in words) for the data
2382 type being accessed. These values will be rounded by the caller. */
2385 i960_arg_size_and_align (mode, type, size_out, align_out)
2386 enum machine_mode mode;
2393 /* Use formal alignment requirements of type being passed, except make
2394 it at least a word. If we don't have a type, this is a library call,
2395 and the parm has to be of scalar type. In this case, consider its
2396 formal alignment requirement to be its size in words. */
2398 if (mode == BLKmode)
2399 size = (int_size_in_bytes (type) + UNITS_PER_WORD - 1) / UNITS_PER_WORD;
2400 else if (mode == VOIDmode)
2402 /* End of parm list. */
2403 if (type == 0 || TYPE_MODE (type) != VOIDmode)
2408 size = (GET_MODE_SIZE (mode) + UNITS_PER_WORD - 1) / UNITS_PER_WORD;
2412 else if (TYPE_ALIGN (type) >= BITS_PER_WORD)
2413 align = TYPE_ALIGN (type) / BITS_PER_WORD;
2421 /* On the 80960 the first 12 args are in registers and the rest are pushed.
2422 Any arg that is bigger than 4 words is placed on the stack and all
2423 subsequent arguments are placed on the stack.
2425 Additionally, parameters with an alignment requirement stronger than
2426 a word must be aligned appropriately. Note that this means that a
2427 64 bit object with a 32 bit alignment is not 64 bit aligned and may be
2428 passed in an odd/even register pair. */
2430 /* Update CUM to advance past an argument described by MODE and TYPE. */
2433 i960_function_arg_advance (cum, mode, type, named)
2434 CUMULATIVE_ARGS *cum;
2435 enum machine_mode mode;
2437 int named ATTRIBUTE_UNUSED;
2441 i960_arg_size_and_align (mode, type, &size, &align);
2443 if (size > 4 || cum->ca_nstackparms != 0
2444 || (size + ROUND_PARM (cum->ca_nregparms, align)) > NPARM_REGS
2445 || MUST_PASS_IN_STACK (mode, type))
2447 /* Indicate that all the registers are in use, even if all are not,
2448 so va_start will compute the right value. */
2449 cum->ca_nregparms = NPARM_REGS;
2450 cum->ca_nstackparms = ROUND_PARM (cum->ca_nstackparms, align) + size;
2453 cum->ca_nregparms = ROUND_PARM (cum->ca_nregparms, align) + size;
2456 /* Return the register that the argument described by MODE and TYPE is
2457 passed in, or else return 0 if it is passed on the stack. */
2460 i960_function_arg (cum, mode, type, named)
2461 CUMULATIVE_ARGS *cum;
2462 enum machine_mode mode;
2464 int named ATTRIBUTE_UNUSED;
2469 if (mode == VOIDmode)
2472 i960_arg_size_and_align (mode, type, &size, &align);
2474 if (size > 4 || cum->ca_nstackparms != 0
2475 || (size + ROUND_PARM (cum->ca_nregparms, align)) > NPARM_REGS
2476 || MUST_PASS_IN_STACK (mode, type))
2478 cum->ca_nstackparms = ROUND_PARM (cum->ca_nstackparms, align);
2483 cum->ca_nregparms = ROUND_PARM (cum->ca_nregparms, align);
2484 ret = gen_rtx_REG (mode, cum->ca_nregparms);
2490 /* Return the number of bits that an object of size N bytes is aligned to. */
2493 i960_object_bytes_bitalign (n)
2497 else if (n > 4) n = 64;
2498 else if (n > 2) n = 32;
2499 else if (n > 1) n = 16;
2505 /* Compute the alignment for an aggregate type TSIZE.
2506 Alignment is MAX (greatest member alignment,
2507 MIN (pragma align, structure size alignment)). */
2510 i960_round_align (align, type)
2517 if (TARGET_OLD_ALIGN || TYPE_PACKED (type))
2519 if (TREE_CODE (type) != RECORD_TYPE)
2521 tsize = TYPE_SIZE (type);
2523 if (! tsize || TREE_CODE (tsize) != INTEGER_CST)
2526 new_align = i960_object_bytes_bitalign (TREE_INT_CST_LOW (tsize)
2528 /* Handle #pragma align. */
2529 if (new_align > i960_maxbitalignment)
2530 new_align = i960_maxbitalignment;
2532 if (align < new_align)
2538 /* Do any needed setup for a varargs function. For the i960, we must
2539 create a register parameter block if one doesn't exist, and then copy
2540 all register parameters to memory. */
2543 i960_setup_incoming_varargs (cum, mode, type, pretend_size, no_rtl)
2544 CUMULATIVE_ARGS *cum;
2545 enum machine_mode mode ATTRIBUTE_UNUSED;
2546 tree type ATTRIBUTE_UNUSED;
2547 int *pretend_size ATTRIBUTE_UNUSED;
2550 /* Note: for a varargs fn with only a va_alist argument, this is 0. */
2551 int first_reg = cum->ca_nregparms;
2553 /* Copy only unnamed register arguments to memory. If there are
2554 any stack parms, there are no unnamed arguments in registers, and
2555 an argument block was already allocated by the caller.
2556 Remember that any arg bigger than 4 words is passed on the stack as
2557 are all subsequent args.
2559 If there are no stack arguments but there are exactly NPARM_REGS
2560 registers, either there were no extra arguments or the caller
2561 allocated an argument block. */
2563 if (cum->ca_nstackparms == 0 && first_reg < NPARM_REGS && !no_rtl)
2565 rtx label = gen_label_rtx ();
2566 rtx regblock, fake_arg_pointer_rtx;
2568 /* Use a different rtx than arg_pointer_rtx so that cse and friends
2569 can go on believing that the argument pointer can never be zero. */
2570 fake_arg_pointer_rtx = gen_raw_REG (Pmode, ARG_POINTER_REGNUM);
2572 /* If the argument pointer is 0, no arguments were passed on the stack
2573 and we need to allocate a chunk to save the registers (if any
2574 arguments were passed on the stack the caller would allocate the
2575 48 bytes as well). We must allocate all 48 bytes (12*4) because
2576 va_start assumes it. */
2577 emit_insn (gen_cmpsi (fake_arg_pointer_rtx, const0_rtx));
2578 emit_jump_insn (gen_bne (label));
2579 emit_insn (gen_rtx_SET (VOIDmode, fake_arg_pointer_rtx,
2580 stack_pointer_rtx));
2581 emit_insn (gen_rtx_SET (VOIDmode, stack_pointer_rtx,
2582 memory_address (SImode,
2583 plus_constant (stack_pointer_rtx,
2587 /* ??? Note that we unnecessarily store one extra register for stdarg
2588 fns. We could optimize this, but it's kept as for now. */
2589 regblock = gen_rtx_MEM (BLKmode,
2590 plus_constant (arg_pointer_rtx, first_reg * 4));
2591 set_mem_alias_set (regblock, get_varargs_alias_set ());
2592 set_mem_align (regblock, BITS_PER_WORD);
2593 move_block_from_reg (first_reg, regblock,
2594 NPARM_REGS - first_reg,
2595 (NPARM_REGS - first_reg) * UNITS_PER_WORD);
2599 /* Define the `__builtin_va_list' type for the ABI. */
2602 i960_build_va_list ()
2604 return build_array_type (unsigned_type_node,
2605 build_index_type (size_one_node));
2608 /* Implement `va_start' for varargs and stdarg. */
2611 i960_va_start (valist, nextarg)
2613 rtx nextarg ATTRIBUTE_UNUSED;
2615 tree s, t, base, num;
2616 rtx fake_arg_pointer_rtx;
2618 /* The array type always decays to a pointer before we get here, so we
2619 can't use ARRAY_REF. */
2620 base = build1 (INDIRECT_REF, unsigned_type_node, valist);
2621 num = build1 (INDIRECT_REF, unsigned_type_node,
2622 build (PLUS_EXPR, unsigned_type_node, valist,
2623 TYPE_SIZE_UNIT (TREE_TYPE (valist))));
2625 /* Use a different rtx than arg_pointer_rtx so that cse and friends
2626 can go on believing that the argument pointer can never be zero. */
2627 fake_arg_pointer_rtx = gen_raw_REG (Pmode, ARG_POINTER_REGNUM);
2628 s = make_tree (unsigned_type_node, fake_arg_pointer_rtx);
2629 t = build (MODIFY_EXPR, unsigned_type_node, base, s);
2630 TREE_SIDE_EFFECTS (t) = 1;
2631 expand_expr (t, const0_rtx, VOIDmode, EXPAND_NORMAL);
2633 s = build_int_2 ((current_function_args_info.ca_nregparms
2634 + current_function_args_info.ca_nstackparms) * 4, 0);
2635 t = build (MODIFY_EXPR, unsigned_type_node, num, s);
2636 TREE_SIDE_EFFECTS (t) = 1;
2637 expand_expr (t, const0_rtx, VOIDmode, EXPAND_NORMAL);
2640 /* Implement `va_arg'. */
2643 i960_va_arg (valist, type)
2646 HOST_WIDE_INT siz, ali;
2647 tree base, num, pad, next, this, t1, t2, int48;
2650 /* The array type always decays to a pointer before we get here, so we
2651 can't use ARRAY_REF. */
2652 base = build1 (INDIRECT_REF, unsigned_type_node, valist);
2653 num = build1 (INDIRECT_REF, unsigned_type_node,
2654 build (PLUS_EXPR, unsigned_type_node, valist,
2655 TYPE_SIZE_UNIT (TREE_TYPE (valist))));
2657 /* Round up sizeof(type) to a word. */
2658 siz = (int_size_in_bytes (type) + UNITS_PER_WORD - 1) & -UNITS_PER_WORD;
2660 /* Round up alignment to a word. */
2661 ali = TYPE_ALIGN (type);
2662 if (ali < BITS_PER_WORD)
2663 ali = BITS_PER_WORD;
2664 ali /= BITS_PER_UNIT;
2666 /* Align NUM appropriate for the argument. */
2667 pad = fold (build (PLUS_EXPR, unsigned_type_node, num,
2668 build_int_2 (ali - 1, 0)));
2669 pad = fold (build (BIT_AND_EXPR, unsigned_type_node, pad,
2670 build_int_2 (-ali, -1)));
2671 pad = save_expr (pad);
2673 /* Increment VPAD past this argument. */
2674 next = fold (build (PLUS_EXPR, unsigned_type_node, pad,
2675 build_int_2 (siz, 0)));
2676 next = save_expr (next);
2678 /* Find the offset for the current argument. Mind peculiar overflow
2679 from registers to stack. */
2680 int48 = build_int_2 (48, 0);
2682 t2 = integer_one_node;
2684 t2 = fold (build (GT_EXPR, integer_type_node, next, int48));
2685 t1 = fold (build (LE_EXPR, integer_type_node, num, int48));
2686 t1 = fold (build (TRUTH_AND_EXPR, integer_type_node, t1, t2));
2687 this = fold (build (COND_EXPR, unsigned_type_node, t1, int48, pad));
2689 /* Find the address for the current argument. */
2690 t1 = fold (build (PLUS_EXPR, unsigned_type_node, base, this));
2691 t1 = build1 (NOP_EXPR, ptr_type_node, t1);
2692 addr_rtx = expand_expr (t1, NULL_RTX, Pmode, EXPAND_NORMAL);
2694 /* Increment NUM. */
2695 t1 = build (MODIFY_EXPR, unsigned_type_node, num, next);
2696 TREE_SIDE_EFFECTS (t1) = 1;
2697 expand_expr (t1, const0_rtx, VOIDmode, EXPAND_NORMAL);
2702 /* Calculate the final size of the reg parm stack space for the current
2703 function, based on how many bytes would be allocated on the stack. */
2706 i960_final_reg_parm_stack_space (const_size, var_size)
2710 if (var_size || const_size > 48)
2716 /* Calculate the size of the reg parm stack space. This is a bit complicated
2720 i960_reg_parm_stack_space (fndecl)
2723 /* In this case, we are called from emit_library_call, and we don't need
2724 to pretend we have more space for parameters than what's apparent. */
2728 /* In this case, we are called from locate_and_pad_parms when we're
2729 not IN_REGS, so we have an arg block. */
2730 if (fndecl != current_function_decl)
2733 /* Otherwise, we have an arg block if the current function has more than
2734 48 bytes of parameters. */
2735 if (current_function_args_size != 0 || VARARGS_STDARG_FUNCTION (fndecl))
2741 /* Return the register class of a scratch register needed to copy IN into
2742 or out of a register in CLASS in MODE. If it can be done directly,
2743 NO_REGS is returned. */
2746 secondary_reload_class (class, mode, in)
2747 enum reg_class class;
2748 enum machine_mode mode;
2753 if (GET_CODE (in) == REG || GET_CODE (in) == SUBREG)
2754 regno = true_regnum (in);
2756 /* We can place anything into LOCAL_OR_GLOBAL_REGS and can put
2757 LOCAL_OR_GLOBAL_REGS into anything. */
2758 if (class == LOCAL_OR_GLOBAL_REGS || class == LOCAL_REGS
2759 || class == GLOBAL_REGS || (regno >= 0 && regno < 32))
2762 /* We can place any hard register, 0.0, and 1.0 into FP_REGS. */
2763 if (class == FP_REGS
2764 && ((regno >= 0 && regno < FIRST_PSEUDO_REGISTER)
2765 || in == CONST0_RTX (mode) || in == CONST1_RTX (mode)))
2768 return LOCAL_OR_GLOBAL_REGS;
2771 /* Look at the opcode P, and set i96_last_insn_type to indicate which
2772 function unit it executed on. */
2774 /* ??? This would make more sense as an attribute. */
2777 i960_scan_opcode (p)
2789 /* Ret is not actually of type REG, but it won't matter, because no
2790 insn will ever follow it. */
2793 i960_last_insn_type = I_TYPE_REG;
2797 if (p[1] == 'x' || p[3] == 'x')
2798 i960_last_insn_type = I_TYPE_MEM;
2799 i960_last_insn_type = I_TYPE_CTRL;
2804 i960_last_insn_type = I_TYPE_CTRL;
2811 i960_last_insn_type = I_TYPE_MEM;
2813 i960_last_insn_type = I_TYPE_CTRL;
2815 else if (p[1] == 'm')
2818 i960_last_insn_type = I_TYPE_REG;
2819 else if (p[4] == 'b' || p[4] == 'j')
2820 i960_last_insn_type = I_TYPE_CTRL;
2822 i960_last_insn_type = I_TYPE_REG;
2825 i960_last_insn_type = I_TYPE_REG;
2829 i960_last_insn_type = I_TYPE_MEM;
2834 i960_last_insn_type = I_TYPE_MEM;
2836 i960_last_insn_type = I_TYPE_REG;
2842 i960_output_mi_thunk (file, thunk, delta, vcall_offset, function)
2844 tree thunk ATTRIBUTE_UNUSED;
2845 HOST_WIDE_INT delta;
2846 HOST_WIDE_INT vcall_offset ATTRIBUTE_UNUSED;
2850 if (d < 0 && d > -32)
2851 fprintf (file, "\tsubo %d,g0,g0\n", -d);
2852 else if (d > 0 && d < 32)
2853 fprintf (file, "\taddo %d,g0,g0\n", d);
2856 fprintf (file, "\tldconst %d,r5\n", d);
2857 fprintf (file, "\taddo r5,g0,g0\n");
2859 fprintf (file, "\tbx ");
2860 assemble_name (file, XSTR (XEXP (DECL_RTL (function), 0), 0));
2861 fprintf (file, "\n");
2865 i960_rtx_costs (x, code, outer_code, total)
2867 int code, outer_code;
2872 /* Constants that can be (non-ldconst) insn operands are cost 0.
2873 Constants that can be non-ldconst operands in rare cases are cost 1.
2874 Other constants have higher costs.
2876 Must check for OUTER_CODE of SET for power2_operand, because
2877 reload_cse_move2add calls us with OUTER_CODE of PLUS to decide
2878 when to replace set with add. */
2881 if ((INTVAL (x) >= 0 && INTVAL (x) < 32)
2882 || (outer_code == SET && power2_operand (x, VOIDmode)))
2887 else if (INTVAL (x) >= -31 && INTVAL (x) < 0)
2897 *total = (TARGET_C_SERIES ? 6 : 8);
2901 if (x == CONST0_RTX (DFmode) || x == CONST0_RTX (SFmode)
2902 || x == CONST1_RTX (DFmode) || x == CONST1_RTX (SFmode))