1 /* Subroutines used for code generation on the DEC Alpha.
2 Copyright (C) 1992, 93-97, 1998 Free Software Foundation, Inc.
3 Contributed by Richard Kenner (kenner@vlsi1.ultra.nyu.edu)
5 This file is part of GNU CC.
7 GNU CC is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License as published by
9 the Free Software Foundation; either version 2, or (at your option)
12 GNU CC is distributed in the hope that it will be useful,
13 but WITHOUT ANY WARRANTY; without even the implied warranty of
14 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 GNU General Public License for more details.
17 You should have received a copy of the GNU General Public License
18 along with GNU CC; see the file COPYING. If not, write to
19 the Free Software Foundation, 59 Temple Place - Suite 330,
20 Boston, MA 02111-1307, USA. */
28 #include "hard-reg-set.h"
30 #include "insn-config.h"
31 #include "conditions.h"
32 #include "insn-flags.h"
34 #include "insn-attr.h"
45 extern char *version_string;
46 extern int rtx_equal_function_value_matters;
48 /* Specify which cpu to schedule for. */
50 enum processor_type alpha_cpu;
51 static char* const alpha_cpu_name[] =
56 /* Specify how accurate floating-point traps need to be. */
58 enum alpha_trap_precision alpha_tp;
60 /* Specify the floating-point rounding mode. */
62 enum alpha_fp_rounding_mode alpha_fprm;
64 /* Specify which things cause traps. */
66 enum alpha_fp_trap_mode alpha_fptm;
68 /* Strings decoded into the above options. */
70 char *alpha_cpu_string; /* -mcpu= */
71 char *alpha_tp_string; /* -mtrap-precision=[p|s|i] */
72 char *alpha_fprm_string; /* -mfp-rounding-mode=[n|m|c|d] */
73 char *alpha_fptm_string; /* -mfp-trap-mode=[n|u|su|sui] */
74 char *alpha_mlat_string; /* -mmemory-latency= */
76 /* Save information from a "cmpxx" operation until the branch or scc is
79 rtx alpha_compare_op0, alpha_compare_op1;
80 int alpha_compare_fp_p;
82 /* Save the name of the current function as used by the assembler. This
83 is used by the epilogue. */
85 char *alpha_function_name;
87 /* Non-zero if inside of a function, because the Alpha asm can't
88 handle .files inside of functions. */
90 static int inside_function = FALSE;
92 /* Nonzero if the current function needs gp. */
94 int alpha_function_needs_gp;
96 /* If non-null, this rtx holds the return address for the function. */
98 static rtx alpha_return_addr_rtx;
100 /* The number of cycles of latency we should assume on memory reads. */
102 int alpha_memory_latency = 3;
104 /* Declarations of static functions. */
105 static void alpha_set_memflags_1 PROTO((rtx, int, int, int));
106 static rtx alpha_emit_set_const_1 PROTO((rtx, enum machine_mode,
107 HOST_WIDE_INT, int));
108 static void add_long_const PROTO((FILE *, HOST_WIDE_INT, int, int, int));
110 /* Compute the size of the save area in the stack. */
112 static void alpha_sa_mask PROTO((unsigned long *imaskP,
113 unsigned long *fmaskP));
115 /* Get the number of args of a function in one of two ways. */
117 #define NUM_ARGS current_function_args_info.num_args
119 #define NUM_ARGS current_function_args_info
129 /* Parse target option strings. */
134 /* 971208 -- EV6 scheduling parameters are still secret, so don't even
135 pretend and just schedule for an EV5 for now. -- r~ */
137 = TARGET_CPU_DEFAULT & MASK_CPU_EV6 ? PROCESSOR_EV6
138 : (TARGET_CPU_DEFAULT & MASK_CPU_EV5 ? PROCESSOR_EV5 : PROCESSOR_EV4);
140 if (alpha_cpu_string)
142 if (! strcmp (alpha_cpu_string, "ev4")
143 || ! strcmp (alpha_cpu_string, "21064"))
145 alpha_cpu = PROCESSOR_EV4;
146 target_flags &= ~ (MASK_BWX | MASK_CIX | MASK_MAX);
148 else if (! strcmp (alpha_cpu_string, "ev5")
149 || ! strcmp (alpha_cpu_string, "21164"))
151 alpha_cpu = PROCESSOR_EV5;
152 target_flags &= ~ (MASK_BWX | MASK_CIX | MASK_MAX);
154 else if (! strcmp (alpha_cpu_string, "ev56")
155 || ! strcmp (alpha_cpu_string, "21164a"))
157 alpha_cpu = PROCESSOR_EV5;
158 target_flags |= MASK_BWX;
159 target_flags &= ~ (MASK_CIX | MASK_MAX);
161 else if (! strcmp (alpha_cpu_string, "pca56")
162 || ! strcmp (alpha_cpu_string, "21164PC")
163 || ! strcmp (alpha_cpu_string, "21164pc"))
165 alpha_cpu = PROCESSOR_EV5;
166 target_flags |= MASK_BWX | MASK_MAX;
167 target_flags &= ~ MASK_CIX;
169 else if (! strcmp (alpha_cpu_string, "ev6")
170 || ! strcmp (alpha_cpu_string, "21264"))
172 alpha_cpu = PROCESSOR_EV6;
173 target_flags |= MASK_BWX | MASK_CIX | MASK_MAX;
176 error ("bad value `%s' for -mcpu switch", alpha_cpu_string);
179 alpha_tp = ALPHA_TP_PROG;
180 alpha_fprm = ALPHA_FPRM_NORM;
181 alpha_fptm = ALPHA_FPTM_N;
185 alpha_tp = ALPHA_TP_INSN;
186 alpha_fptm = ALPHA_FPTM_SU;
189 if (TARGET_IEEE_WITH_INEXACT)
191 alpha_tp = ALPHA_TP_INSN;
192 alpha_fptm = ALPHA_FPTM_SUI;
197 if (! strcmp (alpha_tp_string, "p"))
198 alpha_tp = ALPHA_TP_PROG;
199 else if (! strcmp (alpha_tp_string, "f"))
200 alpha_tp = ALPHA_TP_FUNC;
201 else if (! strcmp (alpha_tp_string, "i"))
202 alpha_tp = ALPHA_TP_INSN;
204 error ("bad value `%s' for -mtrap-precision switch", alpha_tp_string);
207 if (alpha_fprm_string)
209 if (! strcmp (alpha_fprm_string, "n"))
210 alpha_fprm = ALPHA_FPRM_NORM;
211 else if (! strcmp (alpha_fprm_string, "m"))
212 alpha_fprm = ALPHA_FPRM_MINF;
213 else if (! strcmp (alpha_fprm_string, "c"))
214 alpha_fprm = ALPHA_FPRM_CHOP;
215 else if (! strcmp (alpha_fprm_string,"d"))
216 alpha_fprm = ALPHA_FPRM_DYN;
218 error ("bad value `%s' for -mfp-rounding-mode switch",
222 if (alpha_fptm_string)
224 if (strcmp (alpha_fptm_string, "n") == 0)
225 alpha_fptm = ALPHA_FPTM_N;
226 else if (strcmp (alpha_fptm_string, "u") == 0)
227 alpha_fptm = ALPHA_FPTM_U;
228 else if (strcmp (alpha_fptm_string, "su") == 0)
229 alpha_fptm = ALPHA_FPTM_SU;
230 else if (strcmp (alpha_fptm_string, "sui") == 0)
231 alpha_fptm = ALPHA_FPTM_SUI;
233 error ("bad value `%s' for -mfp-trap-mode switch", alpha_fptm_string);
236 /* Do some sanity checks on the above option. */
238 if ((alpha_fptm == ALPHA_FPTM_SU || alpha_fptm == ALPHA_FPTM_SUI)
239 && alpha_tp != ALPHA_TP_INSN)
241 warning ("fp software completion requires -mtrap-precision=i");
242 alpha_tp = ALPHA_TP_INSN;
245 if (TARGET_FLOAT_VAX)
247 if (alpha_fprm == ALPHA_FPRM_MINF || alpha_fprm == ALPHA_FPRM_DYN)
249 warning ("rounding mode not supported for VAX floats");
250 alpha_fprm = ALPHA_FPRM_NORM;
252 if (alpha_fptm == ALPHA_FPTM_SUI)
254 warning ("trap mode not supported for VAX floats");
255 alpha_fptm = ALPHA_FPTM_SU;
263 if (!alpha_mlat_string)
264 alpha_mlat_string = "L1";
266 if (isdigit (alpha_mlat_string[0])
267 && (lat = strtol (alpha_mlat_string, &end, 10), *end == '\0'))
269 else if ((alpha_mlat_string[0] == 'L' || alpha_mlat_string[0] == 'l')
270 && isdigit (alpha_mlat_string[1])
271 && alpha_mlat_string[2] == '\0')
273 static int const cache_latency[][4] =
275 { 3, 30, -1 }, /* ev4 -- Bcache is a guess */
276 { 2, 12, 38 }, /* ev5 -- Bcache from PC164 LMbench numbers */
277 { 3, 13, -1 }, /* ev6 -- Ho hum, doesn't exist yet */
280 lat = alpha_mlat_string[1] - '0';
281 if (lat < 0 || lat > 3 || cache_latency[alpha_cpu][lat-1] == -1)
283 warning ("L%d cache latency unknown for %s",
284 lat, alpha_cpu_name[alpha_cpu]);
288 lat = cache_latency[alpha_cpu][lat-1];
290 else if (! strcmp (alpha_mlat_string, "main"))
292 /* Most current memories have about 370ns latency. This is
293 a reasonable guess for a fast cpu. */
298 warning ("bad value `%s' for -mmemory-latency", alpha_mlat_string);
302 alpha_memory_latency = lat;
305 /* Default the definition of "small data" to 8 bytes. */
310 /* Returns 1 if VALUE is a mask that contains full bytes of zero or ones. */
318 for (i = 0; i < HOST_BITS_PER_WIDE_INT / HOST_BITS_PER_CHAR;
320 if ((value & 0xff) != 0 && (value & 0xff) != 0xff)
326 /* Returns 1 if OP is either the constant zero or a register. If a
327 register, it must be in the proper mode unless MODE is VOIDmode. */
330 reg_or_0_operand (op, mode)
332 enum machine_mode mode;
334 return op == const0_rtx || register_operand (op, mode);
337 /* Return 1 if OP is a constant in the range of 0-63 (for a shift) or
341 reg_or_6bit_operand (op, mode)
343 enum machine_mode mode;
345 return ((GET_CODE (op) == CONST_INT
346 && (unsigned HOST_WIDE_INT) INTVAL (op) < 64)
347 || register_operand (op, mode));
351 /* Return 1 if OP is an 8-bit constant or any register. */
354 reg_or_8bit_operand (op, mode)
356 enum machine_mode mode;
358 return ((GET_CODE (op) == CONST_INT
359 && (unsigned HOST_WIDE_INT) INTVAL (op) < 0x100)
360 || register_operand (op, mode));
363 /* Return 1 if OP is an 8-bit constant. */
366 cint8_operand (op, mode)
368 enum machine_mode mode;
370 return (GET_CODE (op) == CONST_INT
371 && (unsigned HOST_WIDE_INT) INTVAL (op) < 0x100);
374 /* Return 1 if the operand is a valid second operand to an add insn. */
377 add_operand (op, mode)
379 enum machine_mode mode;
381 if (GET_CODE (op) == CONST_INT)
382 return (CONST_OK_FOR_LETTER_P (INTVAL (op), 'K')
383 || CONST_OK_FOR_LETTER_P (INTVAL (op), 'L')
384 || CONST_OK_FOR_LETTER_P (INTVAL (op), 'O'));
386 return register_operand (op, mode);
389 /* Return 1 if the operand is a valid second operand to a sign-extending
393 sext_add_operand (op, mode)
395 enum machine_mode mode;
397 if (GET_CODE (op) == CONST_INT)
398 return ((unsigned HOST_WIDE_INT) INTVAL (op) < 255
399 || (unsigned HOST_WIDE_INT) (- INTVAL (op)) < 255);
401 return register_operand (op, mode);
404 /* Return 1 if OP is the constant 4 or 8. */
407 const48_operand (op, mode)
409 enum machine_mode mode;
411 return (GET_CODE (op) == CONST_INT
412 && (INTVAL (op) == 4 || INTVAL (op) == 8));
415 /* Return 1 if OP is a valid first operand to an AND insn. */
418 and_operand (op, mode)
420 enum machine_mode mode;
422 if (GET_CODE (op) == CONST_DOUBLE && GET_MODE (op) == VOIDmode)
423 return (zap_mask (CONST_DOUBLE_LOW (op))
424 && zap_mask (CONST_DOUBLE_HIGH (op)));
426 if (GET_CODE (op) == CONST_INT)
427 return ((unsigned HOST_WIDE_INT) INTVAL (op) < 0x100
428 || (unsigned HOST_WIDE_INT) ~ INTVAL (op) < 0x100
429 || zap_mask (INTVAL (op)));
431 return register_operand (op, mode);
434 /* Return 1 if OP is a valid first operand to an IOR or XOR insn. */
437 or_operand (op, mode)
439 enum machine_mode mode;
441 if (GET_CODE (op) == CONST_INT)
442 return ((unsigned HOST_WIDE_INT) INTVAL (op) < 0x100
443 || (unsigned HOST_WIDE_INT) ~ INTVAL (op) < 0x100);
445 return register_operand (op, mode);
448 /* Return 1 if OP is a constant that is the width, in bits, of an integral
449 mode smaller than DImode. */
452 mode_width_operand (op, mode)
454 enum machine_mode mode;
456 return (GET_CODE (op) == CONST_INT
457 && (INTVAL (op) == 8 || INTVAL (op) == 16
458 || INTVAL (op) == 32 || INTVAL (op) == 64));
461 /* Return 1 if OP is a constant that is the width of an integral machine mode
462 smaller than an integer. */
465 mode_mask_operand (op, mode)
467 enum machine_mode mode;
469 #if HOST_BITS_PER_WIDE_INT == 32
470 if (GET_CODE (op) == CONST_DOUBLE)
471 return (CONST_DOUBLE_LOW (op) == -1
472 && (CONST_DOUBLE_HIGH (op) == -1
473 || CONST_DOUBLE_HIGH (op) == 0));
475 if (GET_CODE (op) == CONST_DOUBLE)
476 return (CONST_DOUBLE_LOW (op) == -1 && CONST_DOUBLE_HIGH (op) == 0);
479 return (GET_CODE (op) == CONST_INT
480 && (INTVAL (op) == 0xff
481 || INTVAL (op) == 0xffff
482 || INTVAL (op) == 0xffffffff
483 #if HOST_BITS_PER_WIDE_INT == 64
484 || INTVAL (op) == 0xffffffffffffffff
489 /* Return 1 if OP is a multiple of 8 less than 64. */
492 mul8_operand (op, mode)
494 enum machine_mode mode;
496 return (GET_CODE (op) == CONST_INT
497 && (unsigned HOST_WIDE_INT) INTVAL (op) < 64
498 && (INTVAL (op) & 7) == 0);
501 /* Return 1 if OP is the constant zero in floating-point. */
504 fp0_operand (op, mode)
506 enum machine_mode mode;
508 return (GET_MODE (op) == mode
509 && GET_MODE_CLASS (mode) == MODE_FLOAT && op == CONST0_RTX (mode));
512 /* Return 1 if OP is the floating-point constant zero or a register. */
515 reg_or_fp0_operand (op, mode)
517 enum machine_mode mode;
519 return fp0_operand (op, mode) || register_operand (op, mode);
522 /* Return 1 if OP is a register or a constant integer. */
526 reg_or_cint_operand (op, mode)
528 enum machine_mode mode;
530 return GET_CODE (op) == CONST_INT || register_operand (op, mode);
533 /* Return 1 if OP is something that can be reloaded into a register;
534 if it is a MEM, it need not be valid. */
537 some_operand (op, mode)
539 enum machine_mode mode;
541 if (mode != VOIDmode && GET_MODE (op) != VOIDmode && mode != GET_MODE (op))
544 switch (GET_CODE (op))
546 case REG: case MEM: case CONST_DOUBLE:
547 case CONST_INT: case LABEL_REF: case SYMBOL_REF: case CONST:
551 return some_operand (SUBREG_REG (op), VOIDmode);
560 /* Return 1 if OP is a valid operand for the source of a move insn. */
563 input_operand (op, mode)
565 enum machine_mode mode;
567 if (mode != VOIDmode && GET_MODE (op) != VOIDmode && mode != GET_MODE (op))
570 if (GET_MODE_CLASS (mode) == MODE_FLOAT && GET_MODE (op) != mode)
573 switch (GET_CODE (op))
578 /* This handles both the Windows/NT and OSF cases. */
579 return mode == ptr_mode || mode == DImode;
585 if (register_operand (op, mode))
587 /* ... fall through ... */
589 return ((TARGET_BWX || (mode != HImode && mode != QImode))
590 && general_operand (op, mode));
593 return GET_MODE_CLASS (mode) == MODE_FLOAT && op == CONST0_RTX (mode);
596 return mode == QImode || mode == HImode || add_operand (op, mode);
605 /* Return 1 if OP is a SYMBOL_REF for a function known to be in this
609 current_file_function_operand (op, mode)
611 enum machine_mode mode;
613 return (GET_CODE (op) == SYMBOL_REF
614 && ! profile_flag && ! profile_block_flag
615 && (SYMBOL_REF_FLAG (op)
616 || op == XEXP (DECL_RTL (current_function_decl), 0)));
619 /* Return 1 if OP is a valid operand for the MEM of a CALL insn. */
622 call_operand (op, mode)
624 enum machine_mode mode;
629 return (GET_CODE (op) == SYMBOL_REF
630 || (GET_CODE (op) == REG
631 && (TARGET_OPEN_VMS || TARGET_WINDOWS_NT || REGNO (op) == 27)));
634 /* Return 1 if OP is a valid Alpha comparison operator. Here we know which
635 comparisons are valid in which insn. */
638 alpha_comparison_operator (op, mode)
640 enum machine_mode mode;
642 enum rtx_code code = GET_CODE (op);
644 if (mode != GET_MODE (op) || GET_RTX_CLASS (code) != '<')
647 return (code == EQ || code == LE || code == LT
648 || (mode == DImode && (code == LEU || code == LTU)));
651 /* Return 1 if OP is a valid Alpha swapped comparison operator. */
654 alpha_swapped_comparison_operator (op, mode)
656 enum machine_mode mode;
658 enum rtx_code code = GET_CODE (op);
660 if (mode != GET_MODE (op) || GET_RTX_CLASS (code) != '<')
663 code = swap_condition (code);
664 return (code == EQ || code == LE || code == LT
665 || (mode == DImode && (code == LEU || code == LTU)));
668 /* Return 1 if OP is a signed comparison operation. */
671 signed_comparison_operator (op, mode)
673 enum machine_mode mode;
675 switch (GET_CODE (op))
677 case EQ: case NE: case LE: case LT: case GE: case GT:
687 /* Return 1 if this is a divide or modulus operator. */
690 divmod_operator (op, mode)
692 enum machine_mode mode;
694 switch (GET_CODE (op))
696 case DIV: case MOD: case UDIV: case UMOD:
706 /* Return 1 if this memory address is a known aligned register plus
707 a constant. It must be a valid address. This means that we can do
708 this as an aligned reference plus some offset.
710 Take into account what reload will do.
712 We could say that out-of-range stack slots are alignable, but that would
713 complicate get_aligned_mem and it isn't worth the trouble since few
714 functions have large stack space. */
717 aligned_memory_operand (op, mode)
719 enum machine_mode mode;
721 if (GET_CODE (op) == SUBREG)
723 if (GET_MODE (op) != mode)
725 op = SUBREG_REG (op);
726 mode = GET_MODE (op);
729 if (reload_in_progress && GET_CODE (op) == REG
730 && REGNO (op) >= FIRST_PSEUDO_REGISTER)
731 op = reg_equiv_mem[REGNO (op)];
733 if (GET_CODE (op) != MEM || GET_MODE (op) != mode
734 || ! memory_address_p (mode, XEXP (op, 0)))
739 if (GET_CODE (op) == PLUS)
742 return (GET_CODE (op) == REG
743 && REGNO_POINTER_ALIGN (REGNO (op)) >= 4);
746 /* Similar, but return 1 if OP is a MEM which is not alignable. */
749 unaligned_memory_operand (op, mode)
751 enum machine_mode mode;
753 if (GET_CODE (op) == SUBREG)
755 if (GET_MODE (op) != mode)
757 op = SUBREG_REG (op);
758 mode = GET_MODE (op);
761 if (reload_in_progress && GET_CODE (op) == REG
762 && REGNO (op) >= FIRST_PSEUDO_REGISTER)
763 op = reg_equiv_mem[REGNO (op)];
765 if (GET_CODE (op) != MEM || GET_MODE (op) != mode)
770 if (! memory_address_p (mode, op))
773 if (GET_CODE (op) == PLUS)
776 return (GET_CODE (op) != REG
777 || REGNO_POINTER_ALIGN (REGNO (op)) < 4);
780 /* Return 1 if OP is either a register or an unaligned memory location. */
783 reg_or_unaligned_mem_operand (op, mode)
785 enum machine_mode mode;
787 return register_operand (op, mode) || unaligned_memory_operand (op, mode);
790 /* Return 1 if OP is any memory location. During reload a pseudo matches. */
793 any_memory_operand (op, mode)
795 enum machine_mode mode;
797 return (GET_CODE (op) == MEM
798 || (GET_CODE (op) == SUBREG && GET_CODE (SUBREG_REG (op)) == REG)
799 || (reload_in_progress && GET_CODE (op) == REG
800 && REGNO (op) >= FIRST_PSEUDO_REGISTER)
801 || (reload_in_progress && GET_CODE (op) == SUBREG
802 && GET_CODE (SUBREG_REG (op)) == REG
803 && REGNO (SUBREG_REG (op)) >= FIRST_PSEUDO_REGISTER));
806 /* REF is an alignable memory location. Place an aligned SImode
807 reference into *PALIGNED_MEM and the number of bits to shift into
811 get_aligned_mem (ref, paligned_mem, pbitnum)
813 rtx *paligned_mem, *pbitnum;
816 HOST_WIDE_INT offset = 0;
818 if (GET_CODE (ref) == SUBREG)
820 offset = SUBREG_WORD (ref) * UNITS_PER_WORD;
821 if (BYTES_BIG_ENDIAN)
822 offset -= (MIN (UNITS_PER_WORD, GET_MODE_SIZE (GET_MODE (ref)))
823 - MIN (UNITS_PER_WORD,
824 GET_MODE_SIZE (GET_MODE (SUBREG_REG (ref)))));
825 ref = SUBREG_REG (ref);
828 if (GET_CODE (ref) == REG)
829 ref = reg_equiv_mem[REGNO (ref)];
831 if (reload_in_progress)
832 base = find_replacement (&XEXP (ref, 0));
834 base = XEXP (ref, 0);
836 if (GET_CODE (base) == PLUS)
837 offset += INTVAL (XEXP (base, 1)), base = XEXP (base, 0);
839 *paligned_mem = gen_rtx_MEM (SImode,
840 plus_constant (base, offset & ~3));
841 MEM_IN_STRUCT_P (*paligned_mem) = MEM_IN_STRUCT_P (ref);
842 MEM_VOLATILE_P (*paligned_mem) = MEM_VOLATILE_P (ref);
843 RTX_UNCHANGING_P (*paligned_mem) = RTX_UNCHANGING_P (ref);
845 *pbitnum = GEN_INT ((offset & 3) * 8);
848 /* Similar, but just get the address. Handle the two reload cases.
849 Add EXTRA_OFFSET to the address we return. */
852 get_unaligned_address (ref, extra_offset)
857 HOST_WIDE_INT offset = 0;
859 if (GET_CODE (ref) == SUBREG)
861 offset = SUBREG_WORD (ref) * UNITS_PER_WORD;
862 if (BYTES_BIG_ENDIAN)
863 offset -= (MIN (UNITS_PER_WORD, GET_MODE_SIZE (GET_MODE (ref)))
864 - MIN (UNITS_PER_WORD,
865 GET_MODE_SIZE (GET_MODE (SUBREG_REG (ref)))));
866 ref = SUBREG_REG (ref);
869 if (GET_CODE (ref) == REG)
870 ref = reg_equiv_mem[REGNO (ref)];
872 if (reload_in_progress)
873 base = find_replacement (&XEXP (ref, 0));
875 base = XEXP (ref, 0);
877 if (GET_CODE (base) == PLUS)
878 offset += INTVAL (XEXP (base, 1)), base = XEXP (base, 0);
880 return plus_constant (base, offset + extra_offset);
883 /* Subfunction of the following function. Update the flags of any MEM
884 found in part of X. */
887 alpha_set_memflags_1 (x, in_struct_p, volatile_p, unchanging_p)
889 int in_struct_p, volatile_p, unchanging_p;
893 switch (GET_CODE (x))
897 for (i = XVECLEN (x, 0) - 1; i >= 0; i--)
898 alpha_set_memflags_1 (XVECEXP (x, 0, i), in_struct_p, volatile_p,
903 alpha_set_memflags_1 (PATTERN (x), in_struct_p, volatile_p,
908 alpha_set_memflags_1 (SET_DEST (x), in_struct_p, volatile_p,
910 alpha_set_memflags_1 (SET_SRC (x), in_struct_p, volatile_p,
915 MEM_IN_STRUCT_P (x) = in_struct_p;
916 MEM_VOLATILE_P (x) = volatile_p;
917 RTX_UNCHANGING_P (x) = unchanging_p;
925 /* Given INSN, which is either an INSN or a SEQUENCE generated to
926 perform a memory operation, look for any MEMs in either a SET_DEST or
927 a SET_SRC and copy the in-struct, unchanging, and volatile flags from
928 REF into each of the MEMs found. If REF is not a MEM, don't do
932 alpha_set_memflags (insn, ref)
936 /* Note that it is always safe to get these flags, though they won't
937 be what we think if REF is not a MEM. */
938 int in_struct_p = MEM_IN_STRUCT_P (ref);
939 int volatile_p = MEM_VOLATILE_P (ref);
940 int unchanging_p = RTX_UNCHANGING_P (ref);
942 if (GET_CODE (ref) != MEM
943 || (! in_struct_p && ! volatile_p && ! unchanging_p))
946 alpha_set_memflags_1 (insn, in_struct_p, volatile_p, unchanging_p);
949 /* Try to output insns to set TARGET equal to the constant C if it can be
950 done in less than N insns. Do all computations in MODE. Returns the place
951 where the output has been placed if it can be done and the insns have been
952 emitted. If it would take more than N insns, zero is returned and no
953 insns and emitted. */
956 alpha_emit_set_const (target, mode, c, n)
958 enum machine_mode mode;
965 /* Try 1 insn, then 2, then up to N. */
966 for (i = 1; i <= n; i++)
967 if ((pat = alpha_emit_set_const_1 (target, mode, c, i)) != 0)
973 /* Internal routine for the above to check for N or below insns. */
976 alpha_emit_set_const_1 (target, mode, c, n)
978 enum machine_mode mode;
982 HOST_WIDE_INT new = c;
984 /* Use a pseudo if highly optimizing and still generating RTL. */
986 = (flag_expensive_optimizations && rtx_equal_function_value_matters
990 #if HOST_BITS_PER_WIDE_INT == 64
991 /* We are only called for SImode and DImode. If this is SImode, ensure that
992 we are sign extended to a full word. This does not make any sense when
993 cross-compiling on a narrow machine. */
996 c = (c & 0xffffffff) - 2 * (c & 0x80000000);
999 /* If this is a sign-extended 32-bit constant, we can do this in at most
1000 three insns, so do it if we have enough insns left. We always have
1001 a sign-extended 32-bit constant when compiling on a narrow machine. */
1003 if (HOST_BITS_PER_WIDE_INT != 64
1004 || c >> 31 == -1 || c >> 31 == 0)
1006 HOST_WIDE_INT low = (c & 0xffff) - 2 * (c & 0x8000);
1007 HOST_WIDE_INT tmp1 = c - low;
1009 = ((tmp1 >> 16) & 0xffff) - 2 * ((tmp1 >> 16) & 0x8000);
1010 HOST_WIDE_INT extra = 0;
1012 /* If HIGH will be interpreted as negative but the constant is
1013 positive, we must adjust it to do two ldha insns. */
1015 if ((high & 0x8000) != 0 && c >= 0)
1019 high = ((tmp1 >> 16) & 0xffff) - 2 * ((tmp1 >> 16) & 0x8000);
1022 if (c == low || (low == 0 && extra == 0))
1024 /* We used to use copy_to_suggested_reg (GEN_INT (c), target, mode)
1025 but that meant that we can't handle INT_MIN on 32-bit machines
1026 (like NT/Alpha), because we recurse indefinitely through
1027 emit_move_insn to gen_movdi. So instead, since we know exactly
1028 what we want, create it explicitly. */
1031 target = gen_reg_rtx (mode);
1032 emit_insn (gen_rtx_SET (VOIDmode, target, GEN_INT (c)));
1035 else if (n >= 2 + (extra != 0))
1037 temp = copy_to_suggested_reg (GEN_INT (low), subtarget, mode);
1040 temp = expand_binop (mode, add_optab, temp, GEN_INT (extra << 16),
1041 subtarget, 0, OPTAB_WIDEN);
1043 return expand_binop (mode, add_optab, temp, GEN_INT (high << 16),
1044 target, 0, OPTAB_WIDEN);
1048 /* If we couldn't do it that way, try some other methods. But if we have
1049 no instructions left, don't bother. Likewise, if this is SImode and
1050 we can't make pseudos, we can't do anything since the expand_binop
1051 and expand_unop calls will widen and try to make pseudos. */
1054 || (mode == SImode && ! rtx_equal_function_value_matters))
1057 #if HOST_BITS_PER_WIDE_INT == 64
1058 /* First, see if can load a value into the target that is the same as the
1059 constant except that all bytes that are 0 are changed to be 0xff. If we
1060 can, then we can do a ZAPNOT to obtain the desired constant. */
1062 for (i = 0; i < 64; i += 8)
1063 if ((new & ((HOST_WIDE_INT) 0xff << i)) == 0)
1064 new |= (HOST_WIDE_INT) 0xff << i;
1066 /* We are only called for SImode and DImode. If this is SImode, ensure that
1067 we are sign extended to a full word. */
1070 new = (new & 0xffffffff) - 2 * (new & 0x80000000);
1073 && (temp = alpha_emit_set_const (subtarget, mode, new, n - 1)) != 0)
1074 return expand_binop (mode, and_optab, temp, GEN_INT (c | ~ new),
1075 target, 0, OPTAB_WIDEN);
1078 /* Next, see if we can load a related constant and then shift and possibly
1079 negate it to get the constant we want. Try this once each increasing
1080 numbers of insns. */
1082 for (i = 1; i < n; i++)
1084 /* First try complementing. */
1085 if ((temp = alpha_emit_set_const (subtarget, mode, ~ c, i)) != 0)
1086 return expand_unop (mode, one_cmpl_optab, temp, target, 0);
1088 /* Next try to form a constant and do a left shift. We can do this
1089 if some low-order bits are zero; the exact_log2 call below tells
1090 us that information. The bits we are shifting out could be any
1091 value, but here we'll just try the 0- and sign-extended forms of
1092 the constant. To try to increase the chance of having the same
1093 constant in more than one insn, start at the highest number of
1094 bits to shift, but try all possibilities in case a ZAPNOT will
1097 if ((bits = exact_log2 (c & - c)) > 0)
1098 for (; bits > 0; bits--)
1099 if ((temp = (alpha_emit_set_const
1101 (unsigned HOST_WIDE_INT) c >> bits, i))) != 0
1102 || ((temp = (alpha_emit_set_const
1104 ((unsigned HOST_WIDE_INT) c) >> bits, i)))
1106 return expand_binop (mode, ashl_optab, temp, GEN_INT (bits),
1107 target, 0, OPTAB_WIDEN);
1109 /* Now try high-order zero bits. Here we try the shifted-in bits as
1110 all zero and all ones. Be careful to avoid shifting outside the
1111 mode and to avoid shifting outside the host wide int size. */
1112 /* On narrow hosts, don't shift a 1 into the high bit, since we'll
1113 confuse the recursive call and set all of the high 32 bits. */
1115 if ((bits = (MIN (HOST_BITS_PER_WIDE_INT, GET_MODE_SIZE (mode) * 8)
1116 - floor_log2 (c) - 1 - (HOST_BITS_PER_WIDE_INT < 64))) > 0)
1117 for (; bits > 0; bits--)
1118 if ((temp = alpha_emit_set_const (subtarget, mode,
1120 || ((temp = (alpha_emit_set_const
1122 ((c << bits) | (((HOST_WIDE_INT) 1 << bits) - 1)),
1125 return expand_binop (mode, lshr_optab, temp, GEN_INT (bits),
1126 target, 1, OPTAB_WIDEN);
1128 /* Now try high-order 1 bits. We get that with a sign-extension.
1129 But one bit isn't enough here. Be careful to avoid shifting outside
1130 the mode and to avoid shifting outside the host wide int size. */
1132 if ((bits = (MIN (HOST_BITS_PER_WIDE_INT, GET_MODE_SIZE (mode) * 8)
1133 - floor_log2 (~ c) - 2)) > 0)
1134 for (; bits > 0; bits--)
1135 if ((temp = alpha_emit_set_const (subtarget, mode,
1137 || ((temp = (alpha_emit_set_const
1139 ((c << bits) | (((HOST_WIDE_INT) 1 << bits) - 1)),
1142 return expand_binop (mode, ashr_optab, temp, GEN_INT (bits),
1143 target, 0, OPTAB_WIDEN);
1149 #if HOST_BITS_PER_WIDE_INT == 64
1150 /* Having failed to find a 3 insn sequence in alpha_emit_set_const,
1151 fall back to a straight forward decomposition. We do this to avoid
1152 exponential run times encountered when looking for longer sequences
1153 with alpha_emit_set_const. */
1156 alpha_emit_set_long_const (target, c)
1160 /* Use a pseudo if highly optimizing and still generating RTL. */
1162 = (flag_expensive_optimizations && rtx_equal_function_value_matters
1164 HOST_WIDE_INT d1, d2, d3, d4;
1167 /* Decompose the entire word */
1168 d1 = ((c & 0xffff) ^ 0x8000) - 0x8000;
1170 d2 = ((c & 0xffffffff) ^ 0x80000000) - 0x80000000;
1172 d3 = ((c & 0xffff) ^ 0x8000) - 0x8000;
1174 d4 = ((c & 0xffffffff) ^ 0x80000000) - 0x80000000;
1179 /* Construct the high word */
1181 r1 = copy_to_suggested_reg (GEN_INT (d4), subtarget, DImode);
1183 r1 = copy_to_suggested_reg (GEN_INT (d3), subtarget, DImode);
1185 r1 = expand_binop (DImode, add_optab, GEN_INT (d3), GEN_INT (d4),
1186 subtarget, 0, OPTAB_WIDEN);
1188 /* Shift it into place */
1189 r2 = expand_binop (DImode, ashl_optab, r1, GEN_INT (32),
1190 subtarget, 0, OPTAB_WIDEN);
1192 if (subtarget == 0 && d1 == d3 && d2 == d4)
1193 r1 = expand_binop (DImode, add_optab, r1, r2, subtarget, 0, OPTAB_WIDEN);
1198 /* Add in the low word */
1200 r1 = expand_binop (DImode, add_optab, r1, GEN_INT (d2),
1201 subtarget, 0, OPTAB_WIDEN);
1203 r1 = expand_binop (DImode, add_optab, r1, GEN_INT (d1),
1204 subtarget, 0, OPTAB_WIDEN);
1208 r1 = copy_to_suggested_reg(r1, target, DImode);
1212 #endif /* HOST_BITS_PER_WIDE_INT == 64 */
1214 /* Rewrite a comparison against zero CMP of the form
1215 (CODE (cc0) (const_int 0)) so it can be written validly in
1216 a conditional move (if_then_else CMP ...).
1217 If both of the operands that set cc0 are non-zero we must emit
1218 an insn to perform the compare (it can't be done within
1219 the conditional move). */
1221 alpha_emit_conditional_move (cmp, mode)
1223 enum machine_mode mode;
1225 enum rtx_code code = GET_CODE (cmp);
1226 enum rtx_code cmov_code = NE;
1227 rtx op0 = alpha_compare_op0;
1228 rtx op1 = alpha_compare_op1;
1229 enum machine_mode cmp_mode
1230 = (GET_MODE (op0) == VOIDmode ? DImode : GET_MODE (op0));
1231 enum machine_mode cmp_op_mode = alpha_compare_fp_p ? DFmode : DImode;
1234 if (alpha_compare_fp_p != FLOAT_MODE_P (mode))
1237 /* We may be able to use a conditional move directly.
1238 This avoids emitting spurious compares. */
1239 if (signed_comparison_operator (cmp, cmp_op_mode)
1240 && (op0 == CONST0_RTX (cmp_mode) || op1 == CONST0_RTX (cmp_mode)))
1241 return gen_rtx_fmt_ee (code, VOIDmode, op0, op1);
1243 /* We can't put the comparison insides a conditional move;
1244 emit a compare instruction and put that inside the
1245 conditional move. Make sure we emit only comparisons we have;
1246 swap or reverse as necessary. */
1250 case EQ: case LE: case LT: case LEU: case LTU:
1251 /* We have these compares: */
1255 /* This must be reversed. */
1256 code = reverse_condition (code);
1260 case GE: case GT: case GEU: case GTU:
1261 /* These must be swapped. Make sure the new first operand is in
1263 code = swap_condition (code);
1264 tem = op0, op0 = op1, op1 = tem;
1265 op0 = force_reg (cmp_mode, op0);
1272 tem = gen_reg_rtx (cmp_op_mode);
1273 emit_move_insn (tem, gen_rtx_fmt_ee (code, cmp_op_mode, op0, op1));
1274 return gen_rtx_fmt_ee (cmov_code, VOIDmode, tem, CONST0_RTX (cmp_op_mode));
1277 /* Use ext[wlq][lh] as the Architecture Handbook describes for extracting
1281 word: ldq_u r1,X(r11) ldq_u r1,X(r11)
1282 ldq_u r2,X+1(r11) ldq_u r2,X+1(r11)
1283 lda r3,X(r11) lda r3,X+2(r11)
1284 extwl r1,r3,r1 extql r1,r3,r1
1285 extwh r2,r3,r2 extqh r2,r3,r2
1286 or r1.r2.r1 or r1,r2,r1
1289 long: ldq_u r1,X(r11) ldq_u r1,X(r11)
1290 ldq_u r2,X+3(r11) ldq_u r2,X+3(r11)
1291 lda r3,X(r11) lda r3,X(r11)
1292 extll r1,r3,r1 extll r1,r3,r1
1293 extlh r2,r3,r2 extlh r2,r3,r2
1294 or r1.r2.r1 addl r1,r2,r1
1296 quad: ldq_u r1,X(r11)
1305 alpha_expand_unaligned_load (tgt, mem, size, ofs, sign)
1307 HOST_WIDE_INT size, ofs;
1310 rtx meml, memh, addr, extl, exth;
1311 enum machine_mode mode;
1313 meml = gen_reg_rtx (DImode);
1314 memh = gen_reg_rtx (DImode);
1315 addr = gen_reg_rtx (DImode);
1316 extl = gen_reg_rtx (DImode);
1317 exth = gen_reg_rtx (DImode);
1319 emit_move_insn (meml,
1320 change_address (mem, DImode,
1321 gen_rtx_AND (DImode,
1322 plus_constant (XEXP (mem, 0),
1326 emit_move_insn (memh,
1327 change_address (mem, DImode,
1328 gen_rtx_AND (DImode,
1329 plus_constant (XEXP (mem, 0),
1333 if (sign && size == 2)
1335 emit_move_insn (addr, plus_constant (XEXP (mem, 0), ofs+2));
1337 emit_insn (gen_extxl (extl, meml, GEN_INT (64), addr));
1338 emit_insn (gen_extqh (exth, memh, addr));
1340 addr = expand_binop (DImode, ior_optab, extl, exth, addr, 1, OPTAB_WIDEN);
1341 addr = expand_binop (DImode, ashr_optab, addr, GEN_INT (48),
1342 addr, 1, OPTAB_WIDEN);
1346 emit_move_insn (addr, plus_constant (XEXP (mem, 0), ofs));
1347 emit_insn (gen_extxl (extl, meml, GEN_INT (size*8), addr));
1351 emit_insn (gen_extwh (exth, memh, addr));
1356 emit_insn (gen_extlh (exth, memh, addr));
1361 emit_insn (gen_extqh (exth, memh, addr));
1366 addr = expand_binop (mode, ior_optab, gen_lowpart (mode, extl),
1367 gen_lowpart (mode, exth), gen_lowpart (mode, tgt),
1372 emit_move_insn (tgt, gen_lowpart(GET_MODE (tgt), addr));
1375 /* Similarly, use ins and msk instructions to perform unaligned stores. */
1378 alpha_expand_unaligned_store (dst, src, size, ofs)
1380 HOST_WIDE_INT size, ofs;
1382 rtx dstl, dsth, addr, insl, insh, meml, memh;
1384 dstl = gen_reg_rtx (DImode);
1385 dsth = gen_reg_rtx (DImode);
1386 insl = gen_reg_rtx (DImode);
1387 insh = gen_reg_rtx (DImode);
1389 meml = change_address (dst, DImode,
1390 gen_rtx_AND (DImode,
1391 plus_constant (XEXP (dst, 0), ofs),
1393 memh = change_address (dst, DImode,
1394 gen_rtx_AND (DImode,
1395 plus_constant (XEXP (dst, 0),
1399 emit_move_insn (dsth, memh);
1400 emit_move_insn (dstl, meml);
1401 addr = copy_addr_to_reg (plus_constant (XEXP (dst, 0), ofs));
1403 if (src != const0_rtx)
1405 emit_insn (gen_insxh (insh, gen_lowpart (DImode, src),
1406 GEN_INT (size*8), addr));
1411 emit_insn (gen_inswl (insl, gen_lowpart (HImode, src), addr));
1414 emit_insn (gen_insll (insl, gen_lowpart (SImode, src), addr));
1417 emit_insn (gen_insql (insl, src, addr));
1422 emit_insn (gen_mskxh (dsth, dsth, GEN_INT (size*8), addr));
1427 emit_insn (gen_mskxl (dstl, dstl, GEN_INT (0xffff), addr));
1430 emit_insn (gen_mskxl (dstl, dstl, GEN_INT (0xffffffff), addr));
1434 #if HOST_BITS_PER_WIDE_INT == 32
1435 rtx msk = immed_double_const (0xffffffff, 0xffffffff, DImode);
1437 rtx msk = immed_double_const (0xffffffffffffffff, 0, DImode);
1439 emit_insn (gen_mskxl (dstl, dstl, msk, addr));
1444 if (src != const0_rtx)
1446 dsth = expand_binop (DImode, ior_optab, insh, dsth, dsth, 0, OPTAB_WIDEN);
1447 dstl = expand_binop (DImode, ior_optab, insl, dstl, dstl, 0, OPTAB_WIDEN);
1450 /* Must store high before low for degenerate case of aligned. */
1451 emit_move_insn (memh, dsth);
1452 emit_move_insn (meml, dstl);
1455 /* The block move code tries to maximize speed by separating loads and
1456 stores at the expense of register pressure: we load all of the data
1457 before we store it back out. There are two secondary effects worth
1458 mentioning, that this speeds copying to/from aligned and unaligned
1459 buffers, and that it makes the code significantly easier to write. */
1461 #define MAX_MOVE_WORDS 8
1463 /* Load an integral number of consecutive unaligned quadwords. */
1466 alpha_expand_unaligned_load_words (out_regs, smem, words, ofs)
1469 HOST_WIDE_INT words, ofs;
1471 rtx const im8 = GEN_INT (-8);
1472 rtx const i64 = GEN_INT (64);
1473 rtx ext_tmps[MAX_MOVE_WORDS], data_regs[MAX_MOVE_WORDS+1];
1477 /* Generate all the tmp registers we need. */
1478 for (i = 0; i < words; ++i)
1480 data_regs[i] = out_regs[i];
1481 ext_tmps[i] = gen_reg_rtx (DImode);
1483 data_regs[words] = gen_reg_rtx (DImode);
1486 smem = change_address (smem, GET_MODE (smem),
1487 plus_constant (XEXP (smem, 0), ofs));
1489 /* Load up all of the source data. */
1490 for (i = 0; i < words; ++i)
1492 emit_move_insn (data_regs[i],
1493 change_address (smem, DImode,
1494 gen_rtx_AND (DImode,
1495 plus_constant (XEXP(smem,0),
1499 emit_move_insn (data_regs[words],
1500 change_address (smem, DImode,
1501 gen_rtx_AND (DImode,
1502 plus_constant (XEXP(smem,0),
1506 /* Extract the half-word fragments. Unfortunately DEC decided to make
1507 extxh with offset zero a noop instead of zeroing the register, so
1508 we must take care of that edge condition ourselves with cmov. */
1510 sreg = copy_addr_to_reg (XEXP (smem, 0));
1511 areg = expand_binop (DImode, and_optab, sreg, GEN_INT (7), NULL,
1513 for (i = 0; i < words; ++i)
1515 emit_insn (gen_extxl (data_regs[i], data_regs[i], i64, sreg));
1517 emit_insn (gen_extqh (ext_tmps[i], data_regs[i+1], sreg));
1518 emit_insn (gen_rtx_SET (VOIDmode, ext_tmps[i],
1519 gen_rtx_IF_THEN_ELSE (DImode,
1520 gen_rtx_EQ (DImode, areg,
1522 const0_rtx, ext_tmps[i])));
1525 /* Merge the half-words into whole words. */
1526 for (i = 0; i < words; ++i)
1528 out_regs[i] = expand_binop (DImode, ior_optab, data_regs[i],
1529 ext_tmps[i], data_regs[i], 1, OPTAB_WIDEN);
1533 /* Store an integral number of consecutive unaligned quadwords. DATA_REGS
1534 may be NULL to store zeros. */
1537 alpha_expand_unaligned_store_words (data_regs, dmem, words, ofs)
1540 HOST_WIDE_INT words, ofs;
1542 rtx const im8 = GEN_INT (-8);
1543 rtx const i64 = GEN_INT (64);
1544 #if HOST_BITS_PER_WIDE_INT == 32
1545 rtx const im1 = immed_double_const (0xffffffff, 0xffffffff, DImode);
1547 rtx const im1 = immed_double_const (0xffffffffffffffff, 0, DImode);
1549 rtx ins_tmps[MAX_MOVE_WORDS];
1550 rtx st_tmp_1, st_tmp_2, dreg;
1551 rtx st_addr_1, st_addr_2;
1554 /* Generate all the tmp registers we need. */
1555 if (data_regs != NULL)
1556 for (i = 0; i < words; ++i)
1557 ins_tmps[i] = gen_reg_rtx(DImode);
1558 st_tmp_1 = gen_reg_rtx(DImode);
1559 st_tmp_2 = gen_reg_rtx(DImode);
1562 dmem = change_address (dmem, GET_MODE (dmem),
1563 plus_constant (XEXP (dmem, 0), ofs));
1566 st_addr_2 = change_address (dmem, DImode,
1567 gen_rtx_AND (DImode,
1568 plus_constant (XEXP(dmem,0),
1571 st_addr_1 = change_address (dmem, DImode,
1572 gen_rtx_AND (DImode,
1576 /* Load up the destination end bits. */
1577 emit_move_insn (st_tmp_2, st_addr_2);
1578 emit_move_insn (st_tmp_1, st_addr_1);
1580 /* Shift the input data into place. */
1581 dreg = copy_addr_to_reg (XEXP (dmem, 0));
1582 if (data_regs != NULL)
1584 for (i = words-1; i >= 0; --i)
1586 emit_insn (gen_insxh (ins_tmps[i], data_regs[i], i64, dreg));
1587 emit_insn (gen_insql (data_regs[i], data_regs[i], dreg));
1589 for (i = words-1; i > 0; --i)
1591 ins_tmps[i-1] = expand_binop (DImode, ior_optab, data_regs[i],
1592 ins_tmps[i-1], ins_tmps[i-1], 1,
1597 /* Split and merge the ends with the destination data. */
1598 emit_insn (gen_mskxh (st_tmp_2, st_tmp_2, i64, dreg));
1599 emit_insn (gen_mskxl (st_tmp_1, st_tmp_1, im1, dreg));
1601 if (data_regs != NULL)
1603 st_tmp_2 = expand_binop (DImode, ior_optab, st_tmp_2, ins_tmps[words-1],
1604 st_tmp_2, 1, OPTAB_WIDEN);
1605 st_tmp_1 = expand_binop (DImode, ior_optab, st_tmp_1, data_regs[0],
1606 st_tmp_1, 1, OPTAB_WIDEN);
1610 emit_move_insn (st_addr_2, st_tmp_2);
1611 for (i = words-1; i > 0; --i)
1613 emit_move_insn (change_address (dmem, DImode,
1614 gen_rtx_AND (DImode,
1615 plus_constant(XEXP (dmem,0),
1618 data_regs ? ins_tmps[i-1] : const0_rtx);
1620 emit_move_insn (st_addr_1, st_tmp_1);
1624 /* Expand string/block move operations.
1626 operands[0] is the pointer to the destination.
1627 operands[1] is the pointer to the source.
1628 operands[2] is the number of bytes to move.
1629 operands[3] is the alignment. */
1632 alpha_expand_block_move (operands)
1635 rtx bytes_rtx = operands[2];
1636 rtx align_rtx = operands[3];
1637 HOST_WIDE_INT bytes = INTVAL (bytes_rtx);
1638 HOST_WIDE_INT src_align = INTVAL (align_rtx);
1639 HOST_WIDE_INT dst_align = src_align;
1640 rtx orig_src = operands[1];
1641 rtx orig_dst = operands[0];
1642 rtx data_regs[2*MAX_MOVE_WORDS+16];
1644 int i, words, ofs, nregs = 0;
1648 if (bytes > MAX_MOVE_WORDS*8)
1651 /* Look for additional alignment information from recorded register info. */
1653 tmp = XEXP (orig_src, 0);
1654 if (GET_CODE (tmp) == REG)
1656 if (REGNO_POINTER_ALIGN (REGNO (tmp)) > src_align)
1657 src_align = REGNO_POINTER_ALIGN (REGNO (tmp));
1659 else if (GET_CODE (tmp) == PLUS
1660 && GET_CODE (XEXP (tmp, 0)) == REG
1661 && GET_CODE (XEXP (tmp, 1)) == CONST_INT)
1663 HOST_WIDE_INT c = INTVAL (XEXP (tmp, 1));
1664 int a = REGNO_POINTER_ALIGN (REGNO (XEXP (tmp, 0)));
1668 if (a >= 8 && c % 8 == 0)
1670 else if (a >= 4 && c % 4 == 0)
1672 else if (a >= 2 && c % 2 == 0)
1677 tmp = XEXP (orig_dst, 0);
1678 if (GET_CODE (tmp) == REG)
1680 if (REGNO_POINTER_ALIGN (REGNO (tmp)) > dst_align)
1681 dst_align = REGNO_POINTER_ALIGN (REGNO (tmp));
1683 else if (GET_CODE (tmp) == PLUS
1684 && GET_CODE (XEXP (tmp, 0)) == REG
1685 && GET_CODE (XEXP (tmp, 1)) == CONST_INT)
1687 HOST_WIDE_INT c = INTVAL (XEXP (tmp, 1));
1688 int a = REGNO_POINTER_ALIGN (REGNO (XEXP (tmp, 0)));
1692 if (a >= 8 && c % 8 == 0)
1694 else if (a >= 4 && c % 4 == 0)
1696 else if (a >= 2 && c % 2 == 0)
1702 * Load the entire block into registers.
1705 if (GET_CODE (XEXP (orig_src, 0)) == ADDRESSOF)
1707 enum machine_mode mode;
1708 tmp = XEXP (XEXP (orig_src, 0), 0);
1710 mode = mode_for_size (bytes, MODE_INT, 1);
1712 && GET_MODE_SIZE (GET_MODE (tmp)) <= bytes)
1714 /* Whee! Optimize the load to use the existing register. */
1715 data_regs[nregs++] = gen_lowpart (mode, tmp);
1719 /* ??? We could potentially be copying 3 bytes or whatnot from
1720 a wider reg. Probably not worth worrying about. */
1721 /* No appropriate mode; fall back on memory. */
1722 orig_src = change_address (orig_src, GET_MODE (orig_src),
1723 copy_addr_to_reg (XEXP (orig_src, 0)));
1727 if (src_align >= 8 && bytes >= 8)
1731 for (i = 0; i < words; ++i)
1732 data_regs[nregs+i] = gen_reg_rtx(DImode);
1734 for (i = 0; i < words; ++i)
1736 emit_move_insn (data_regs[nregs+i],
1737 change_address(orig_src, DImode,
1738 plus_constant (XEXP (orig_src, 0),
1746 if (src_align >= 4 && bytes >= 4)
1750 for (i = 0; i < words; ++i)
1751 data_regs[nregs+i] = gen_reg_rtx(SImode);
1753 for (i = 0; i < words; ++i)
1755 emit_move_insn (data_regs[nregs+i],
1756 change_address(orig_src, SImode,
1757 plus_constant (XEXP (orig_src, 0),
1769 for (i = 0; i < words+1; ++i)
1770 data_regs[nregs+i] = gen_reg_rtx(DImode);
1772 alpha_expand_unaligned_load_words(data_regs+nregs, orig_src, words, ofs);
1778 if (!TARGET_BWX && bytes >= 8)
1780 data_regs[nregs++] = tmp = gen_reg_rtx (DImode);
1781 alpha_expand_unaligned_load (tmp, orig_src, 8, ofs, 0);
1785 if (!TARGET_BWX && bytes >= 4)
1787 data_regs[nregs++] = tmp = gen_reg_rtx (SImode);
1788 alpha_expand_unaligned_load (tmp, orig_src, 4, ofs, 0);
1797 data_regs[nregs++] = tmp = gen_reg_rtx (HImode);
1798 emit_move_insn (tmp,
1799 change_address (orig_src, HImode,
1800 plus_constant (XEXP (orig_src, 0),
1804 } while (bytes >= 2);
1806 else if (!TARGET_BWX)
1808 data_regs[nregs++] = tmp = gen_reg_rtx (HImode);
1809 alpha_expand_unaligned_load (tmp, orig_src, 2, ofs, 0);
1816 data_regs[nregs++] = tmp = gen_reg_rtx (QImode);
1817 emit_move_insn (tmp,
1818 change_address (orig_src, QImode,
1819 plus_constant (XEXP (orig_src, 0),
1826 if (nregs > sizeof(data_regs)/sizeof(*data_regs))
1830 * Now save it back out again.
1835 if (GET_CODE (XEXP (orig_dst, 0)) == ADDRESSOF)
1837 enum machine_mode mode;
1838 tmp = XEXP (XEXP (orig_dst, 0), 0);
1840 mode = mode_for_size (bytes, MODE_INT, 1);
1841 if (GET_MODE (tmp) == mode && nregs == 1)
1843 emit_move_insn (tmp, data_regs[0]);
1848 /* ??? If nregs > 1, consider reconstructing the word in regs. */
1849 /* ??? Optimize mode < dst_mode with strict_low_part. */
1850 /* No appropriate mode; fall back on memory. */
1851 orig_dst = change_address (orig_dst, GET_MODE (orig_dst),
1852 copy_addr_to_reg (XEXP (orig_dst, 0)));
1855 /* Write out the data in whatever chunks reading the source allowed. */
1858 while (i < nregs && GET_MODE (data_regs[i]) == DImode)
1860 emit_move_insn (change_address(orig_dst, DImode,
1861 plus_constant (XEXP (orig_dst, 0),
1870 /* If the source has remaining DImode regs, write them out in
1872 while (i < nregs && GET_MODE (data_regs[i]) == DImode)
1874 tmp = expand_binop (DImode, lshr_optab, data_regs[i], GEN_INT (32),
1875 NULL_RTX, 1, OPTAB_WIDEN);
1877 emit_move_insn (change_address(orig_dst, SImode,
1878 plus_constant (XEXP (orig_dst, 0),
1880 gen_lowpart (SImode, data_regs[i]));
1881 emit_move_insn (change_address(orig_dst, SImode,
1882 plus_constant (XEXP (orig_dst, 0),
1884 gen_lowpart (SImode, tmp));
1889 while (i < nregs && GET_MODE (data_regs[i]) == SImode)
1891 emit_move_insn (change_address(orig_dst, SImode,
1892 plus_constant (XEXP (orig_dst, 0),
1899 if (i < nregs && GET_MODE (data_regs[i]) == DImode)
1901 /* Write out a remaining block of words using unaligned methods. */
1903 for (words = 1; i+words < nregs ; ++words)
1904 if (GET_MODE (data_regs[i+words]) != DImode)
1908 alpha_expand_unaligned_store (orig_dst, data_regs[i], 8, ofs);
1910 alpha_expand_unaligned_store_words (data_regs+i, orig_dst, words, ofs);
1916 /* Due to the above, this won't be aligned. */
1917 /* ??? If we have more than one of these, consider constructing full
1918 words in registers and using alpha_expand_unaligned_store_words. */
1919 while (i < nregs && GET_MODE (data_regs[i]) == SImode)
1921 alpha_expand_unaligned_store (orig_dst, data_regs[i], 4, ofs);
1927 while (i < nregs && GET_MODE (data_regs[i]) == HImode)
1929 emit_move_insn (change_address (orig_dst, HImode,
1930 plus_constant (XEXP (orig_dst, 0),
1937 while (i < nregs && GET_MODE (data_regs[i]) == HImode)
1939 alpha_expand_unaligned_store (orig_dst, data_regs[i], 2, ofs);
1943 while (i < nregs && GET_MODE (data_regs[i]) == QImode)
1945 emit_move_insn (change_address (orig_dst, QImode,
1946 plus_constant (XEXP (orig_dst, 0),
1961 alpha_expand_block_clear (operands)
1964 rtx bytes_rtx = operands[1];
1965 rtx align_rtx = operands[2];
1966 HOST_WIDE_INT bytes = INTVAL (bytes_rtx);
1967 HOST_WIDE_INT align = INTVAL (align_rtx);
1968 rtx orig_dst = operands[0];
1970 HOST_WIDE_INT i, words, ofs = 0;
1974 if (bytes > MAX_MOVE_WORDS*8)
1977 /* Look for stricter alignment. */
1979 tmp = XEXP (orig_dst, 0);
1980 if (GET_CODE (tmp) == REG)
1982 if (REGNO_POINTER_ALIGN (REGNO (tmp)) > align)
1983 align = REGNO_POINTER_ALIGN (REGNO (tmp));
1985 else if (GET_CODE (tmp) == PLUS
1986 && GET_CODE (XEXP (tmp, 0)) == REG
1987 && GET_CODE (XEXP (tmp, 1)) == CONST_INT)
1989 HOST_WIDE_INT c = INTVAL (XEXP (tmp, 1));
1990 int a = REGNO_POINTER_ALIGN (REGNO (XEXP (tmp, 0)));
1994 if (a >= 8 && c % 8 == 0)
1996 else if (a >= 4 && c % 4 == 0)
1998 else if (a >= 2 && c % 2 == 0)
2003 /* Handle a block of contiguous words first. */
2005 if (align >= 8 && bytes >= 8)
2009 for (i = 0; i < words; ++i)
2011 emit_move_insn (change_address(orig_dst, DImode,
2012 plus_constant (XEXP (orig_dst, 0),
2020 else if (align >= 4 && bytes >= 4)
2024 for (i = 0; i < words; ++i)
2026 emit_move_insn (change_address(orig_dst, SImode,
2027 plus_constant (XEXP (orig_dst, 0),
2035 else if (bytes >= 16)
2039 alpha_expand_unaligned_store_words (NULL, orig_dst, words);
2045 /* Next clean up any trailing pieces. We know from the contiguous
2046 block move that there are no aligned SImode or DImode hunks left. */
2048 if (!TARGET_BWX && bytes >= 8)
2050 alpha_expand_unaligned_store (orig_dst, const0_rtx, 8, ofs);
2054 if (!TARGET_BWX && bytes >= 4)
2056 alpha_expand_unaligned_store (orig_dst, const0_rtx, 4, ofs);
2065 emit_move_insn (change_address (orig_dst, HImode,
2066 plus_constant (XEXP (orig_dst, 0),
2071 } while (bytes >= 2);
2073 else if (!TARGET_BWX)
2075 alpha_expand_unaligned_store (orig_dst, const0_rtx, 2, ofs);
2082 emit_move_insn (change_address (orig_dst, QImode,
2083 plus_constant (XEXP (orig_dst, 0),
2094 /* Adjust the cost of a scheduling dependency. Return the new cost of
2095 a dependency LINK or INSN on DEP_INSN. COST is the current cost. */
2098 alpha_adjust_cost (insn, link, dep_insn, cost)
2105 enum attr_type insn_type, dep_insn_type;
2107 /* If the dependence is an anti-dependence, there is no cost. For an
2108 output dependence, there is sometimes a cost, but it doesn't seem
2109 worth handling those few cases. */
2111 if (REG_NOTE_KIND (link) != 0)
2114 /* If we can't recognize the insns, we can't really do anything. */
2115 if (recog_memoized (insn) < 0 || recog_memoized (dep_insn) < 0)
2118 insn_type = get_attr_type (insn);
2119 dep_insn_type = get_attr_type (dep_insn);
2121 /* Bring in the user-defined memory latency. */
2122 if (dep_insn_type == TYPE_ILD
2123 || dep_insn_type == TYPE_FLD
2124 || dep_insn_type == TYPE_LDSYM)
2125 cost += alpha_memory_latency-1;
2130 /* On EV4, if INSN is a store insn and DEP_INSN is setting the data
2131 being stored, we can sometimes lower the cost. */
2133 if ((insn_type == TYPE_IST || insn_type == TYPE_FST)
2134 && (set = single_set (dep_insn)) != 0
2135 && GET_CODE (PATTERN (insn)) == SET
2136 && rtx_equal_p (SET_DEST (set), SET_SRC (PATTERN (insn))))
2138 switch (dep_insn_type)
2142 /* No savings here. */
2146 /* In these cases, we save one cycle. */
2150 /* In all other cases, we save two cycles. */
2151 return MAX (0, cost - 2);
2155 /* Another case that needs adjustment is an arithmetic or logical
2156 operation. It's cost is usually one cycle, but we default it to
2157 two in the MD file. The only case that it is actually two is
2158 for the address in loads, stores, and jumps. */
2160 if (dep_insn_type == TYPE_IADD || dep_insn_type == TYPE_ILOG)
2175 /* The final case is when a compare feeds into an integer branch;
2176 the cost is only one cycle in that case. */
2178 if (dep_insn_type == TYPE_ICMP && insn_type == TYPE_IBR)
2183 /* And the lord DEC saith: "A special bypass provides an effective
2184 latency of 0 cycles for an ICMP or ILOG insn producing the test
2185 operand of an IBR or ICMOV insn." */
2187 if ((dep_insn_type == TYPE_ICMP || dep_insn_type == TYPE_ILOG)
2188 && (set = single_set (dep_insn)) != 0)
2190 /* A branch only has one input. This must be it. */
2191 if (insn_type == TYPE_IBR)
2193 /* A conditional move has three, make sure it is the test. */
2194 if (insn_type == TYPE_ICMOV
2195 && GET_CODE (set_src = PATTERN (insn)) == SET
2196 && GET_CODE (set_src = SET_SRC (set_src)) == IF_THEN_ELSE
2197 && rtx_equal_p (SET_DEST (set), XEXP (set_src, 0)))
2201 /* "The multiplier is unable to receive data from IEU bypass paths.
2202 The instruction issues at the expected time, but its latency is
2203 increased by the time it takes for the input data to become
2204 available to the multiplier" -- which happens in pipeline stage
2205 six, when results are comitted to the register file. */
2207 if (insn_type == TYPE_IMUL)
2209 switch (dep_insn_type)
2211 /* These insns produce their results in pipeline stage five. */
2218 /* Other integer insns produce results in pipeline stage four. */
2226 /* There is additional latency to move the result of (most) FP
2227 operations anywhere but the FP register file. */
2229 if ((insn_type == TYPE_FST || insn_type == TYPE_FTOI)
2230 && (dep_insn_type == TYPE_FADD ||
2231 dep_insn_type == TYPE_FMUL ||
2232 dep_insn_type == TYPE_FCMOV))
2238 /* Otherwise, return the default cost. */
2242 /* Functions to save and restore alpha_return_addr_rtx. */
2244 struct machine_function
2250 alpha_save_machine_status (p)
2253 struct machine_function *machine =
2254 (struct machine_function *) xmalloc (sizeof (struct machine_function));
2256 p->machine = machine;
2257 machine->ra_rtx = alpha_return_addr_rtx;
2261 alpha_restore_machine_status (p)
2264 struct machine_function *machine = p->machine;
2266 alpha_return_addr_rtx = machine->ra_rtx;
2269 p->machine = (struct machine_function *)0;
2272 /* Do anything needed before RTL is emitted for each function. */
2275 alpha_init_expanders ()
2277 alpha_return_addr_rtx = NULL_RTX;
2279 /* Arrange to save and restore machine status around nested functions. */
2280 save_machine_status = alpha_save_machine_status;
2281 restore_machine_status = alpha_restore_machine_status;
2284 /* Start the ball rolling with RETURN_ADDR_RTX. */
2287 alpha_return_addr (count, frame)
2296 if (alpha_return_addr_rtx)
2297 return alpha_return_addr_rtx;
2299 /* No rtx yet. Invent one, and initialize it from $26 in the prologue. */
2300 alpha_return_addr_rtx = gen_reg_rtx (Pmode);
2301 init = gen_rtx_SET (Pmode, alpha_return_addr_rtx,
2302 gen_rtx_REG (Pmode, REG_RA));
2304 /* Emit the insn to the prologue with the other argument copies. */
2305 push_topmost_sequence ();
2306 emit_insn_after (init, get_insns ());
2307 pop_topmost_sequence ();
2309 return alpha_return_addr_rtx;
2313 alpha_ra_ever_killed ()
2315 if (!alpha_return_addr_rtx)
2316 return regs_ever_live[REG_RA];
2318 return reg_set_between_p (gen_rtx_REG (Pmode, REG_RA),
2319 get_insns(), NULL_RTX);
2323 /* Print an operand. Recognize special options, documented below. */
2326 print_operand (file, x, code)
2336 /* Generates fp-rounding mode suffix: nothing for normal, 'c' for
2337 chopped, 'm' for minus-infinity, and 'd' for dynamic rounding
2338 mode. alpha_fprm controls which suffix is generated. */
2341 case ALPHA_FPRM_NORM:
2343 case ALPHA_FPRM_MINF:
2346 case ALPHA_FPRM_CHOP:
2349 case ALPHA_FPRM_DYN:
2356 /* Generates trap-mode suffix for instructions that accept the su
2357 suffix only (cmpt et al). */
2358 if (alpha_tp == ALPHA_TP_INSN)
2363 /* Generates trap-mode suffix for instructions that accept the u, su,
2364 and sui suffix. This is the bulk of the IEEE floating point
2365 instructions (addt et al). */
2376 case ALPHA_FPTM_SUI:
2377 fputs ("sui", file);
2383 /* Generates trap-mode suffix for instructions that accept the sui
2384 suffix (cvtqt and cvtqs). */
2387 case ALPHA_FPTM_N: case ALPHA_FPTM_U:
2388 case ALPHA_FPTM_SU: /* cvtqt/cvtqs can't cause underflow */
2390 case ALPHA_FPTM_SUI:
2391 fputs ("sui", file);
2397 /* Generates single precision instruction suffix. */
2398 fprintf (file, "%c", (TARGET_FLOAT_VAX ? 'f' : 's'));
2402 /* Generates double precision instruction suffix. */
2403 fprintf (file, "%c", (TARGET_FLOAT_VAX ? 'g' : 't'));
2407 /* If this operand is the constant zero, write it as "$31". */
2408 if (GET_CODE (x) == REG)
2409 fprintf (file, "%s", reg_names[REGNO (x)]);
2410 else if (x == CONST0_RTX (GET_MODE (x)))
2411 fprintf (file, "$31");
2413 output_operand_lossage ("invalid %%r value");
2418 /* Similar, but for floating-point. */
2419 if (GET_CODE (x) == REG)
2420 fprintf (file, "%s", reg_names[REGNO (x)]);
2421 else if (x == CONST0_RTX (GET_MODE (x)))
2422 fprintf (file, "$f31");
2424 output_operand_lossage ("invalid %%R value");
2429 /* Write the 1's complement of a constant. */
2430 if (GET_CODE (x) != CONST_INT)
2431 output_operand_lossage ("invalid %%N value");
2433 fprintf (file, HOST_WIDE_INT_PRINT_DEC, ~ INTVAL (x));
2437 /* Write 1 << C, for a constant C. */
2438 if (GET_CODE (x) != CONST_INT)
2439 output_operand_lossage ("invalid %%P value");
2441 fprintf (file, HOST_WIDE_INT_PRINT_DEC, (HOST_WIDE_INT) 1 << INTVAL (x));
2445 /* Write the high-order 16 bits of a constant, sign-extended. */
2446 if (GET_CODE (x) != CONST_INT)
2447 output_operand_lossage ("invalid %%h value");
2449 fprintf (file, HOST_WIDE_INT_PRINT_DEC, INTVAL (x) >> 16);
2453 /* Write the low-order 16 bits of a constant, sign-extended. */
2454 if (GET_CODE (x) != CONST_INT)
2455 output_operand_lossage ("invalid %%L value");
2457 fprintf (file, HOST_WIDE_INT_PRINT_DEC,
2458 (INTVAL (x) & 0xffff) - 2 * (INTVAL (x) & 0x8000));
2462 /* Write mask for ZAP insn. */
2463 if (GET_CODE (x) == CONST_DOUBLE)
2465 HOST_WIDE_INT mask = 0;
2466 HOST_WIDE_INT value;
2468 value = CONST_DOUBLE_LOW (x);
2469 for (i = 0; i < HOST_BITS_PER_WIDE_INT / HOST_BITS_PER_CHAR;
2474 value = CONST_DOUBLE_HIGH (x);
2475 for (i = 0; i < HOST_BITS_PER_WIDE_INT / HOST_BITS_PER_CHAR;
2478 mask |= (1 << (i + sizeof (int)));
2480 fprintf (file, HOST_WIDE_INT_PRINT_DEC, mask & 0xff);
2483 else if (GET_CODE (x) == CONST_INT)
2485 HOST_WIDE_INT mask = 0, value = INTVAL (x);
2487 for (i = 0; i < 8; i++, value >>= 8)
2491 fprintf (file, HOST_WIDE_INT_PRINT_DEC, mask);
2494 output_operand_lossage ("invalid %%m value");
2498 /* 'b', 'w', 'l', or 'q' as the value of the constant. */
2499 if (GET_CODE (x) != CONST_INT
2500 || (INTVAL (x) != 8 && INTVAL (x) != 16
2501 && INTVAL (x) != 32 && INTVAL (x) != 64))
2502 output_operand_lossage ("invalid %%M value");
2504 fprintf (file, "%s",
2505 (INTVAL (x) == 8 ? "b"
2506 : INTVAL (x) == 16 ? "w"
2507 : INTVAL (x) == 32 ? "l"
2512 /* Similar, except do it from the mask. */
2513 if (GET_CODE (x) == CONST_INT && INTVAL (x) == 0xff)
2514 fprintf (file, "b");
2515 else if (GET_CODE (x) == CONST_INT && INTVAL (x) == 0xffff)
2516 fprintf (file, "w");
2517 else if (GET_CODE (x) == CONST_INT && INTVAL (x) == 0xffffffff)
2518 fprintf (file, "l");
2519 #if HOST_BITS_PER_WIDE_INT == 32
2520 else if (GET_CODE (x) == CONST_DOUBLE
2521 && CONST_DOUBLE_HIGH (x) == 0
2522 && CONST_DOUBLE_LOW (x) == -1)
2523 fprintf (file, "l");
2524 else if (GET_CODE (x) == CONST_DOUBLE
2525 && CONST_DOUBLE_HIGH (x) == -1
2526 && CONST_DOUBLE_LOW (x) == -1)
2527 fprintf (file, "q");
2529 else if (GET_CODE (x) == CONST_INT && INTVAL (x) == 0xffffffffffffffff)
2530 fprintf (file, "q");
2531 else if (GET_CODE (x) == CONST_DOUBLE
2532 && CONST_DOUBLE_HIGH (x) == 0
2533 && CONST_DOUBLE_LOW (x) == -1)
2534 fprintf (file, "q");
2537 output_operand_lossage ("invalid %%U value");
2541 /* Write the constant value divided by 8. */
2542 if (GET_CODE (x) != CONST_INT
2543 && (unsigned HOST_WIDE_INT) INTVAL (x) >= 64
2544 && (INTVAL (x) & 7) != 8)
2545 output_operand_lossage ("invalid %%s value");
2547 fprintf (file, HOST_WIDE_INT_PRINT_DEC, INTVAL (x) / 8);
2551 /* Same, except compute (64 - c) / 8 */
2553 if (GET_CODE (x) != CONST_INT
2554 && (unsigned HOST_WIDE_INT) INTVAL (x) >= 64
2555 && (INTVAL (x) & 7) != 8)
2556 output_operand_lossage ("invalid %%s value");
2558 fprintf (file, HOST_WIDE_INT_PRINT_DEC, (64 - INTVAL (x)) / 8);
2561 case 'C': case 'D': case 'c': case 'd':
2562 /* Write out comparison name. */
2564 enum rtx_code c = GET_CODE (x);
2566 if (GET_RTX_CLASS (c) != '<')
2567 output_operand_lossage ("invalid %%C value");
2570 c = reverse_condition (c);
2571 else if (code == 'c')
2572 c = swap_condition (c);
2573 else if (code == 'd')
2574 c = swap_condition (reverse_condition (c));
2577 fprintf (file, "ule");
2579 fprintf (file, "ult");
2581 fprintf (file, "%s", GET_RTX_NAME (c));
2586 /* Write the divide or modulus operator. */
2587 switch (GET_CODE (x))
2590 fprintf (file, "div%s", GET_MODE (x) == SImode ? "l" : "q");
2593 fprintf (file, "div%su", GET_MODE (x) == SImode ? "l" : "q");
2596 fprintf (file, "rem%s", GET_MODE (x) == SImode ? "l" : "q");
2599 fprintf (file, "rem%su", GET_MODE (x) == SImode ? "l" : "q");
2602 output_operand_lossage ("invalid %%E value");
2608 /* Write "_u" for unaligned access. */
2609 if (GET_CODE (x) == MEM && GET_CODE (XEXP (x, 0)) == AND)
2610 fprintf (file, "_u");
2614 if (GET_CODE (x) == REG)
2615 fprintf (file, "%s", reg_names[REGNO (x)]);
2616 else if (GET_CODE (x) == MEM)
2617 output_address (XEXP (x, 0));
2619 output_addr_const (file, x);
2623 output_operand_lossage ("invalid %%xn code");
2627 /* Do what is necessary for `va_start'. The argument is ignored;
2628 We look at the current function to determine if stdarg or varargs
2629 is used and fill in an initial va_list. A pointer to this constructor
2633 alpha_builtin_saveregs (arglist)
2636 rtx block, addr, dest, argsize;
2637 tree fntype = TREE_TYPE (current_function_decl);
2638 int stdarg = (TYPE_ARG_TYPES (fntype) != 0
2639 && (TREE_VALUE (tree_last (TYPE_ARG_TYPES (fntype)))
2640 != void_type_node));
2642 /* Compute the current position into the args, taking into account
2643 both registers and memory. Both of these are already included in
2646 argsize = GEN_INT (NUM_ARGS * UNITS_PER_WORD);
2648 /* For Unix, SETUP_INCOMING_VARARGS moves the starting address base up by 48,
2649 storing fp arg registers in the first 48 bytes, and the integer arg
2650 registers in the next 48 bytes. This is only done, however, if any
2651 integer registers need to be stored.
2653 If no integer registers need be stored, then we must subtract 48 in
2654 order to account for the integer arg registers which are counted in
2655 argsize above, but which are not actually stored on the stack. */
2657 if (TARGET_OPEN_VMS)
2658 addr = plus_constant (virtual_incoming_args_rtx,
2659 NUM_ARGS <= 5 + stdarg
2660 ? UNITS_PER_WORD : - 6 * UNITS_PER_WORD);
2662 addr = (NUM_ARGS <= 5 + stdarg
2663 ? plus_constant (virtual_incoming_args_rtx,
2665 : plus_constant (virtual_incoming_args_rtx,
2666 - (6 * UNITS_PER_WORD)));
2668 /* For VMS, we include the argsize, while on Unix, it's handled as
2669 a separate field. */
2670 if (TARGET_OPEN_VMS)
2671 addr = plus_constant (addr, INTVAL (argsize));
2673 addr = force_operand (addr, NULL_RTX);
2675 #ifdef POINTERS_EXTEND_UNSIGNED
2676 addr = convert_memory_address (ptr_mode, addr);
2679 if (TARGET_OPEN_VMS)
2683 /* Allocate the va_list constructor */
2684 block = assign_stack_local (BLKmode, 2 * UNITS_PER_WORD, BITS_PER_WORD);
2685 RTX_UNCHANGING_P (block) = 1;
2686 RTX_UNCHANGING_P (XEXP (block, 0)) = 1;
2688 /* Store the address of the first integer register in the __base
2691 dest = change_address (block, ptr_mode, XEXP (block, 0));
2692 emit_move_insn (dest, addr);
2694 if (flag_check_memory_usage)
2695 emit_library_call (chkr_set_right_libfunc, 1, VOIDmode, 3,
2697 GEN_INT (GET_MODE_SIZE (ptr_mode)),
2698 TYPE_MODE (sizetype),
2699 GEN_INT (MEMORY_USE_RW),
2700 TYPE_MODE (integer_type_node));
2702 /* Store the argsize as the __va_offset member. */
2703 dest = change_address (block, TYPE_MODE (integer_type_node),
2704 plus_constant (XEXP (block, 0),
2705 POINTER_SIZE/BITS_PER_UNIT));
2706 emit_move_insn (dest, argsize);
2708 if (flag_check_memory_usage)
2709 emit_library_call (chkr_set_right_libfunc, 1, VOIDmode, 3,
2711 GEN_INT (GET_MODE_SIZE
2712 (TYPE_MODE (integer_type_node))),
2713 TYPE_MODE (sizetype),
2714 GEN_INT (MEMORY_USE_RW),
2715 TYPE_MODE (integer_type_node));
2717 /* Return the address of the va_list constructor, but don't put it in a
2718 register. Doing so would fail when not optimizing and produce worse
2719 code when optimizing. */
2720 return XEXP (block, 0);
2724 /* This page contains routines that are used to determine what the function
2725 prologue and epilogue code will do and write them out. */
2727 /* Compute the size of the save area in the stack. */
2731 /* These variables are used for communication between the following functions.
2732 They indicate various things about the current function being compiled
2733 that are used to tell what kind of prologue, epilogue and procedure
2734 descriptior to generate. */
2736 /* Nonzero if we need a stack procedure. */
2737 static int is_stack_procedure;
2739 /* Register number (either FP or SP) that is used to unwind the frame. */
2740 static int unwind_regno;
2742 /* Register number used to save FP. We need not have one for RA since
2743 we don't modify it for register procedures. This is only defined
2744 for register frame procedures. */
2745 static int save_fp_regno;
2747 /* Register number used to reference objects off our PV. */
2748 static int base_regno;
2750 /* Compute register masks for saved registers. */
2753 alpha_sa_mask (imaskP, fmaskP)
2754 unsigned long *imaskP;
2755 unsigned long *fmaskP;
2757 unsigned long imask = 0;
2758 unsigned long fmask = 0;
2761 if (is_stack_procedure)
2762 imask |= (1L << HARD_FRAME_POINTER_REGNUM);
2764 /* One for every register we have to save. */
2766 for (i = 0; i < FIRST_PSEUDO_REGISTER; i++)
2767 if (! fixed_regs[i] && ! call_used_regs[i]
2768 && regs_ever_live[i] && i != REG_RA)
2773 fmask |= (1L << (i - 32));
2786 HOST_WIDE_INT stack_needed;
2789 /* One for every register we have to save. */
2791 for (i = 0; i < FIRST_PSEUDO_REGISTER; i++)
2792 if (! fixed_regs[i] && ! call_used_regs[i]
2793 && regs_ever_live[i] && i != REG_RA)
2796 /* Start by assuming we can use a register procedure if we don't make any
2797 calls (REG_RA not used) or need to save any registers and a stack
2798 procedure if we do. */
2799 is_stack_procedure = sa_size != 0 || alpha_ra_ever_killed ();
2801 /* Decide whether to refer to objects off our PV via FP or PV.
2802 If we need need FP for something else or if we receive a nonlocal
2803 goto (which expects PV to contain the value), we must use PV.
2804 Otherwise, start by assuming we can use FP. */
2805 base_regno = (frame_pointer_needed || current_function_has_nonlocal_label
2806 || is_stack_procedure
2807 || current_function_outgoing_args_size
2808 ? REG_PV : HARD_FRAME_POINTER_REGNUM);
2810 /* If we want to copy PV into FP, we need to find some register in which to
2815 if (base_regno == HARD_FRAME_POINTER_REGNUM)
2816 for (i = 0; i < 32; i++)
2817 if (! fixed_regs[i] && call_used_regs[i] && ! regs_ever_live[i])
2820 if (save_fp_regno == -1)
2821 base_regno = REG_PV, is_stack_procedure = 1;
2823 /* Stack unwinding should be done via FP unless we use it for PV. */
2825 = base_regno == REG_PV ? HARD_FRAME_POINTER_REGNUM : STACK_POINTER_REGNUM;
2827 /* If this is a stack procedure, allow space for saving FP and RA. */
2828 if (is_stack_procedure)
2835 alpha_pv_save_size ()
2838 return is_stack_procedure ? 8 : 0;
2845 return unwind_regno == HARD_FRAME_POINTER_REGNUM;
2848 #else /* ! OPEN_VMS */
2856 for (i = 0; i < FIRST_PSEUDO_REGISTER; i++)
2857 if (! fixed_regs[i] && ! call_used_regs[i]
2858 && regs_ever_live[i] && i != REG_RA)
2861 /* If some registers were saved but not reg 26, reg 26 must also
2862 be saved, so leave space for it. */
2863 if (size != 0 || alpha_ra_ever_killed ())
2866 /* Our size must be even (multiple of 16 bytes). */
2873 #endif /* ! OPEN_VMS */
2875 /* Return 1 if this function can directly return via $26. */
2880 return (! TARGET_OPEN_VMS && reload_completed && alpha_sa_size () == 0
2881 && get_frame_size () == 0
2882 && current_function_outgoing_args_size == 0
2883 && current_function_pretend_args_size == 0);
2886 /* Write a version stamp. Don't write anything if we are running as a
2887 cross-compiler. Otherwise, use the versions in /usr/include/stamp.h. */
2889 #if !defined(CROSS_COMPILE) && !defined(_WIN32) && !defined(__linux__) && !defined(VMS)
2894 alpha_write_verstamp (file)
2898 fprintf (file, "\t.verstamp %d %d\n", MS_STAMP, LS_STAMP);
2902 /* Write code to add constant C to register number IN_REG (possibly 31)
2903 and put the result into OUT_REG. Use TEMP_REG as a scratch register;
2904 usually this will be OUT_REG, but should not be if OUT_REG is
2905 STACK_POINTER_REGNUM, since it must be updated in a single instruction.
2906 Write the code to FILE. */
2909 add_long_const (file, c, in_reg, out_reg, temp_reg)
2912 int in_reg, out_reg, temp_reg;
2914 HOST_WIDE_INT low = (c & 0xffff) - 2 * (c & 0x8000);
2915 HOST_WIDE_INT tmp1 = c - low;
2916 HOST_WIDE_INT high = ((tmp1 >> 16) & 0xffff) - 2 * ((tmp1 >> 16) & 0x8000);
2917 HOST_WIDE_INT extra = 0;
2919 /* We don't have code to write out constants larger than 32 bits. */
2920 #if HOST_BITS_PER_LONG_INT == 64
2921 if ((unsigned HOST_WIDE_INT) c >> 32 != 0)
2925 /* If HIGH will be interpreted as negative, we must adjust it to do two
2926 ldha insns. Note that we will never be building a negative constant
2933 high = ((tmp1 >> 16) & 0xffff) - 2 * ((tmp1 >> 16) & 0x8000);
2938 int result_reg = (extra == 0 && high == 0) ? out_reg : temp_reg;
2940 if (low >= 0 && low < 255)
2941 fprintf (file, "\taddq $%d,%d,$%d\n", in_reg, low, result_reg);
2943 fprintf (file, "\tlda $%d,%d($%d)\n", result_reg, low, in_reg);
2945 in_reg = result_reg;
2950 int result_reg = (high == 0) ? out_reg : temp_reg;
2952 fprintf (file, "\tldah $%d,%d($%d)\n", result_reg, extra, in_reg);
2953 in_reg = result_reg;
2957 fprintf (file, "\tldah $%d,%d($%d)\n", out_reg, high, in_reg);
2960 /* Write function prologue. */
2964 /* On vms we have two kinds of functions:
2966 - stack frame (PROC_STACK)
2967 these are 'normal' functions with local vars and which are
2968 calling other functions
2969 - register frame (PROC_REGISTER)
2970 keeps all data in registers, needs no stack
2972 We must pass this to the assembler so it can generate the
2973 proper pdsc (procedure descriptor)
2974 This is done with the '.pdesc' command.
2976 size is the stack size needed for local variables. */
2979 output_prolog (file, size)
2983 unsigned long imask = 0;
2984 unsigned long fmask = 0;
2985 /* Stack space needed for pushing registers clobbered by us. */
2986 HOST_WIDE_INT sa_size;
2987 /* Complete stack size needed. */
2988 HOST_WIDE_INT frame_size;
2989 /* Offset from base reg to register save area. */
2991 /* Offset during register save. */
2993 /* Label for the procedure entry. */
2994 char *entry_label = (char *) alloca (strlen (alpha_function_name) + 6);
2997 sa_size = alpha_sa_size ();
2999 = ALPHA_ROUND (sa_size
3000 + (is_stack_procedure ? 8 : 0)
3001 + size + current_function_pretend_args_size);
3003 /* Issue function start and label. */
3004 fprintf (file, "\t.ent ");
3005 assemble_name (file, alpha_function_name);
3006 fprintf (file, "\n");
3007 sprintf (entry_label, "%s..en", alpha_function_name);
3008 ASM_OUTPUT_LABEL (file, entry_label);
3009 inside_function = TRUE;
3011 fprintf (file, "\t.base $%d\n", base_regno);
3013 /* Calculate register masks for clobbered registers. */
3015 if (is_stack_procedure)
3016 alpha_sa_mask (&imask, &fmask);
3018 /* Adjust the stack by the frame size. If the frame size is > 4096
3019 bytes, we need to be sure we probe somewhere in the first and last
3020 4096 bytes (we can probably get away without the latter test) and
3021 every 8192 bytes in between. If the frame size is > 32768, we
3022 do this in a loop. Otherwise, we generate the explicit probe
3025 Note that we are only allowed to adjust sp once in the prologue. */
3027 if (frame_size < 32768)
3029 if (frame_size > 4096)
3033 fprintf (file, "\tstq $31,-%d($30)\n", probed);
3035 while (probed + 8192 < frame_size)
3036 fprintf (file, "\tstq $31,-%d($30)\n", probed += 8192);
3038 /* We only have to do this probe if we aren't saving registers. */
3039 if (sa_size == 0 && probed + 4096 < frame_size)
3040 fprintf (file, "\tstq $31,-%d($30)\n", frame_size);
3043 if (frame_size != 0)
3044 fprintf (file, "\tlda $30,-%d($30)\n", frame_size);
3048 /* Here we generate code to set R4 to SP + 4096 and set R23 to the
3049 number of 8192 byte blocks to probe. We then probe each block
3050 in the loop and then set SP to the proper location. If the
3051 amount remaining is > 4096, we have to do one more probe if we
3052 are not saving any registers. */
3054 HOST_WIDE_INT blocks = (frame_size + 4096) / 8192;
3055 HOST_WIDE_INT leftover = frame_size + 4096 - blocks * 8192;
3057 add_long_const (file, blocks, 31, 23, 23);
3059 fprintf (file, "\tlda $22,4096($30)\n");
3062 assemble_name (file, alpha_function_name);
3063 fprintf (file, "..sc:\n");
3065 fprintf (file, "\tstq $31,-8192($22)\n");
3066 fprintf (file, "\tsubq $23,1,$23\n");
3067 fprintf (file, "\tlda $22,-8192($22)\n");
3069 fprintf (file, "\tbne $23,$");
3070 assemble_name (file, alpha_function_name);
3071 fprintf (file, "..sc\n");
3073 if (leftover > 4096 && sa_size == 0)
3074 fprintf (file, "\tstq $31,-%d($22)\n", leftover);
3076 fprintf (file, "\tlda $30,-%d($22)\n", leftover);
3079 if (is_stack_procedure)
3081 int reg_offset = rsa_offset;
3083 /* Store R26 (RA) first. */
3084 fprintf (file, "\tstq $26,%d($30)\n", reg_offset);
3087 /* Store integer regs. according to mask. */
3088 for (i = 0; i < 32; i++)
3089 if (imask & (1L<<i))
3091 fprintf (file, "\tstq $%d,%d($30)\n", i, reg_offset);
3095 /* Print the register mask and do floating-point saves. */
3098 fprintf (file, "\t.mask 0x%x,0\n", imask);
3100 for (i = 0; i < 32; i++)
3102 if (fmask & (1L << i))
3104 fprintf (file, "\tstt $f%d,%d($30)\n", i, reg_offset);
3109 /* Print the floating-point mask, if we've saved any fp register. */
3111 fprintf (file, "\t.fmask 0x%x,0\n", fmask);
3113 fprintf (file, "\tstq $27,0($30)\n");
3117 fprintf (file, "\t.fp_save $%d\n", save_fp_regno);
3118 fprintf (file, "\tbis $%d,$%d,$%d\n", HARD_FRAME_POINTER_REGNUM,
3119 HARD_FRAME_POINTER_REGNUM, save_fp_regno);
3122 if (base_regno != REG_PV)
3123 fprintf (file, "\tbis $%d,$%d,$%d\n", REG_PV, REG_PV, base_regno);
3125 if (unwind_regno == HARD_FRAME_POINTER_REGNUM)
3126 fprintf (file, "\tbis $%d,$%d,$%d\n", STACK_POINTER_REGNUM,
3127 STACK_POINTER_REGNUM, HARD_FRAME_POINTER_REGNUM);
3129 /* Describe our frame. */
3130 fprintf (file, "\t.frame $%d,%d,$26,%d\n",
3131 unwind_regno, frame_size, rsa_offset);
3133 /* If we have to allocate space for outgoing args, do it now. */
3134 if (current_function_outgoing_args_size != 0)
3135 fprintf (file, "\tlda $%d,%d($%d)\n", STACK_POINTER_REGNUM,
3136 - ALPHA_ROUND (current_function_outgoing_args_size),
3137 HARD_FRAME_POINTER_REGNUM);
3139 fprintf (file, "\t.prologue\n");
3141 readonly_section ();
3142 fprintf (file, "\t.align 3\n");
3143 assemble_name (file, alpha_function_name); fputs ("..na:\n", file);
3144 fputs ("\t.ascii \"", file);
3145 assemble_name (file, alpha_function_name);
3146 fputs ("\\0\"\n", file);
3149 fprintf (file, "\t.align 3\n");
3150 fputs ("\t.name ", file);
3151 assemble_name (file, alpha_function_name);
3152 fputs ("..na\n", file);
3153 ASM_OUTPUT_LABEL (file, alpha_function_name);
3154 fprintf (file, "\t.pdesc ");
3155 assemble_name (file, alpha_function_name);
3156 fprintf (file, "..en,%s\n", is_stack_procedure ? "stack" : "reg");
3157 alpha_need_linkage (alpha_function_name, 1);
3163 /* Write function epilogue. */
3166 output_epilog (file, size)
3170 unsigned long imask = 0;
3171 unsigned long fmask = 0;
3172 /* Stack space needed for pushing registers clobbered by us. */
3173 HOST_WIDE_INT sa_size = alpha_sa_size ();
3174 /* Complete stack size needed. */
3175 HOST_WIDE_INT frame_size
3176 = ALPHA_ROUND (sa_size
3177 + (is_stack_procedure ? 8 : 0)
3178 + size + current_function_pretend_args_size);
3180 rtx insn = get_last_insn ();
3182 /* If the last insn was a BARRIER, we don't have to write anything except
3183 the .end pseudo-op. */
3185 if (GET_CODE (insn) == NOTE)
3186 insn = prev_nonnote_insn (insn);
3188 if (insn == 0 || GET_CODE (insn) != BARRIER)
3190 /* Restore clobbered registers, load FP last. */
3192 if (is_stack_procedure)
3198 if (unwind_regno == HARD_FRAME_POINTER_REGNUM)
3199 fprintf (file, "\tbis $%d,$%d,$%d\n", HARD_FRAME_POINTER_REGNUM,
3200 HARD_FRAME_POINTER_REGNUM, STACK_POINTER_REGNUM);
3202 alpha_sa_mask (&imask, &fmask);
3204 /* Start reloading registers after RA. */
3205 reg_offset = rsa_offset + 8;
3207 for (i = 0; i < 32; i++)
3208 if (imask & (1L<<i))
3210 if (i == HARD_FRAME_POINTER_REGNUM)
3211 fp_offset = reg_offset;
3213 fprintf (file, "\tldq $%d,%d($30)\n",
3218 for (i = 0; i < 32; i++)
3219 if (fmask & (1L << i))
3221 fprintf (file, "\tldt $f%d,%d($30)\n", i, reg_offset);
3225 /* Restore R26 (RA). */
3226 fprintf (file, "\tldq $26,%d($30)\n", rsa_offset);
3228 /* Restore R29 (FP). */
3229 fprintf (file, "\tldq $29,%d($30)\n", fp_offset);
3232 fprintf (file, "\tbis $%d,$%d,$%d\n", save_fp_regno, save_fp_regno,
3233 HARD_FRAME_POINTER_REGNUM);
3235 if (frame_size != 0)
3237 if (frame_size < 32768)
3238 fprintf (file, "\tlda $30,%d($30)\n", frame_size);
3241 long high = frame_size >> 16;
3242 long low = frame_size & 0xffff;
3246 low = -32768 + (low & 0x7fff);
3248 fprintf (file, "\tldah $2,%ld($31)\n", high);
3249 fprintf (file, "\tlda $2,%ld($2)\n", low);
3250 fprintf (file, "\taddq $30,$2,$30\n");
3254 /* Finally return to the caller. */
3255 fprintf (file, "\tret $31,($26),1\n");
3258 /* End the function. */
3259 fprintf (file, "\t.end ");
3260 assemble_name (file, alpha_function_name);
3261 fprintf (file, "\n");
3262 inside_function = FALSE;
3264 /* Show that we know this function if it is called again. */
3265 SYMBOL_REF_FLAG (XEXP (DECL_RTL (current_function_decl), 0)) = 1;
3269 vms_valid_decl_attribute_p (decl, attributes, identifier, args)
3275 if (is_attribute_p ("overlaid", identifier))
3276 return (args == NULL_TREE);
3280 #else /* !OPEN_VMS */
3283 alpha_does_function_need_gp ()
3287 /* We never need a GP for Windows/NT. */
3288 if (TARGET_WINDOWS_NT)
3291 #ifdef TARGET_PROFILING_NEEDS_GP
3296 /* If we need a GP (we have a LDSYM insn or a CALL_INSN), load it first.
3297 Even if we are a static function, we still need to do this in case
3298 our address is taken and passed to something like qsort. */
3300 for (insn = get_insns (); insn; insn = NEXT_INSN (insn))
3301 if (GET_RTX_CLASS (GET_CODE (insn)) == 'i'
3302 && GET_CODE (PATTERN (insn)) != USE
3303 && GET_CODE (PATTERN (insn)) != CLOBBER)
3305 enum attr_type type = get_attr_type (insn);
3306 if (type == TYPE_LDSYM || type == TYPE_JSR)
3314 output_prolog (file, size)
3318 HOST_WIDE_INT out_args_size
3319 = ALPHA_ROUND (current_function_outgoing_args_size);
3320 HOST_WIDE_INT sa_size = alpha_sa_size ();
3321 HOST_WIDE_INT frame_size
3322 = (out_args_size + sa_size
3323 + ALPHA_ROUND (size + current_function_pretend_args_size));
3324 HOST_WIDE_INT reg_offset = out_args_size;
3325 HOST_WIDE_INT start_reg_offset = reg_offset;
3326 HOST_WIDE_INT actual_start_reg_offset = start_reg_offset;
3327 int int_reg_save_area_size = 0;
3328 unsigned reg_mask = 0;
3331 /* Ecoff can handle multiple .file directives, so put out file and lineno.
3332 We have to do that before the .ent directive as we cannot switch
3333 files within procedures with native ecoff because line numbers are
3334 linked to procedure descriptors.
3335 Outputting the lineno helps debugging of one line functions as they
3336 would otherwise get no line number at all. Please note that we would
3337 like to put out last_linenum from final.c, but it is not accessible. */
3339 if (write_symbols == SDB_DEBUG)
3341 ASM_OUTPUT_SOURCE_FILENAME (file,
3342 DECL_SOURCE_FILE (current_function_decl));
3343 if (debug_info_level != DINFO_LEVEL_TERSE)
3344 ASM_OUTPUT_SOURCE_LINE (file,
3345 DECL_SOURCE_LINE (current_function_decl));
3348 /* The assembly language programmer's guide states that the second argument
3349 to the .ent directive, the lex_level, is ignored by the assembler,
3350 so we might as well omit it. */
3352 if (!flag_inhibit_size_directive)
3354 fprintf (file, "\t.ent ");
3355 assemble_name (file, alpha_function_name);
3356 fprintf (file, "\n");
3358 ASM_OUTPUT_LABEL (file, alpha_function_name);
3359 inside_function = TRUE;
3361 if (TARGET_IEEE_CONFORMANT && !flag_inhibit_size_directive)
3362 /* Set flags in procedure descriptor to request IEEE-conformant
3363 math-library routines. The value we set it to is PDSC_EXC_IEEE
3364 (/usr/include/pdsc.h). */
3365 fprintf (file, "\t.eflag 48\n");
3367 /* Set up offsets to alpha virtual arg/local debugging pointer. */
3369 alpha_auto_offset = -frame_size + current_function_pretend_args_size;
3370 alpha_arg_offset = -frame_size + 48;
3372 alpha_function_needs_gp = alpha_does_function_need_gp ();
3374 if (TARGET_WINDOWS_NT == 0)
3376 if (alpha_function_needs_gp)
3377 fprintf (file, "\tldgp $29,0($27)\n");
3379 /* Put a label after the GP load so we can enter the function at it. */
3381 assemble_name (file, alpha_function_name);
3382 fprintf (file, "..ng:\n");
3385 /* Adjust the stack by the frame size. If the frame size is > 4096
3386 bytes, we need to be sure we probe somewhere in the first and last
3387 4096 bytes (we can probably get away without the latter test) and
3388 every 8192 bytes in between. If the frame size is > 32768, we
3389 do this in a loop. Otherwise, we generate the explicit probe
3392 Note that we are only allowed to adjust sp once in the prologue. */
3394 if (frame_size < 32768)
3396 if (frame_size > 4096)
3400 fprintf (file, "\tstq $31,-%d($30)\n", probed);
3402 while (probed + 8192 < frame_size)
3403 fprintf (file, "\tstq $31,-%d($30)\n", probed += 8192);
3405 /* We only have to do this probe if we aren't saving registers. */
3406 if (sa_size == 0 && probed + 4096 < frame_size)
3407 fprintf (file, "\tstq $31,-%d($30)\n", frame_size);
3410 if (frame_size != 0)
3411 fprintf (file, "\tlda $30,-%d($30)\n", frame_size);
3415 /* Here we generate code to set R4 to SP + 4096 and set R5 to the
3416 number of 8192 byte blocks to probe. We then probe each block
3417 in the loop and then set SP to the proper location. If the
3418 amount remaining is > 4096, we have to do one more probe if we
3419 are not saving any registers. */
3421 HOST_WIDE_INT blocks = (frame_size + 4096) / 8192;
3422 HOST_WIDE_INT leftover = frame_size + 4096 - blocks * 8192;
3424 add_long_const (file, blocks, 31, 5, 5);
3426 fprintf (file, "\tlda $4,4096($30)\n");
3429 assemble_name (file, alpha_function_name);
3430 fprintf (file, "..sc:\n");
3432 fprintf (file, "\tstq $31,-8192($4)\n");
3433 fprintf (file, "\tsubq $5,1,$5\n");
3434 fprintf (file, "\tlda $4,-8192($4)\n");
3436 fprintf (file, "\tbne $5,$");
3437 assemble_name (file, alpha_function_name);
3438 fprintf (file, "..sc\n");
3440 if (leftover > 4096 && sa_size == 0)
3441 fprintf (file, "\tstq $31,-%d($4)\n", leftover);
3443 fprintf (file, "\tlda $30,-%d($4)\n", leftover);
3446 /* Describe our frame. */
3447 if (!flag_inhibit_size_directive)
3449 fprintf (file, "\t.frame $%d,%d,$26,%d\n",
3450 (frame_pointer_needed
3451 ? HARD_FRAME_POINTER_REGNUM : STACK_POINTER_REGNUM),
3452 frame_size, current_function_pretend_args_size);
3455 /* Cope with very large offsets to the register save area. */
3457 if (reg_offset + sa_size > 0x8000)
3459 int low = ((reg_offset & 0xffff) ^ 0x8000) - 0x8000;
3460 if (low + sa_size <= 0x8000)
3462 add_long_const (file, reg_offset - low, 30, 24, 24);
3467 add_long_const (file, reg_offset, 30, 24, 24);
3473 /* Save register RA if any other register needs to be saved. */
3476 reg_mask |= 1 << REG_RA;
3477 fprintf (file, "\tstq $26,%d($%d)\n", reg_offset, sa_reg);
3479 int_reg_save_area_size += 8;
3482 /* Now save any other used integer registers required to be saved. */
3483 for (i = 0; i < 32; i++)
3484 if (! fixed_regs[i] && ! call_used_regs[i]
3485 && regs_ever_live[i] && i != REG_RA)
3488 fprintf (file, "\tstq $%d,%d($%d)\n", i, reg_offset, sa_reg);
3490 int_reg_save_area_size += 8;
3493 /* Print the register mask and do floating-point saves. */
3494 if (reg_mask && !flag_inhibit_size_directive)
3495 fprintf (file, "\t.mask 0x%x,%d\n", reg_mask,
3496 actual_start_reg_offset - frame_size);
3498 start_reg_offset = reg_offset;
3501 for (i = 0; i < 32; i++)
3502 if (! fixed_regs[i + 32] && ! call_used_regs[i + 32]
3503 && regs_ever_live[i + 32])
3506 fprintf (file, "\tstt $f%d,%d($%d)\n", i, reg_offset, sa_reg);
3510 /* Print the floating-point mask, if we've saved any fp register. */
3511 if (reg_mask && !flag_inhibit_size_directive)
3512 fprintf (file, "\t.fmask 0x%x,%d\n", reg_mask,
3513 actual_start_reg_offset - frame_size + int_reg_save_area_size);
3515 /* If we need a frame pointer, set it from the stack pointer. Note that
3516 this must always be the last instruction in the prologue. */
3517 if (frame_pointer_needed)
3518 fprintf (file, "\tbis $30,$30,$15\n");
3520 /* End the prologue and say if we used gp. */
3521 if (!flag_inhibit_size_directive)
3522 fprintf (file, "\t.prologue %d\n", alpha_function_needs_gp);
3525 /* Write function epilogue. */
3528 output_epilog (file, size)
3532 rtx insn = get_last_insn ();
3533 HOST_WIDE_INT out_args_size
3534 = ALPHA_ROUND (current_function_outgoing_args_size);
3535 HOST_WIDE_INT sa_size = alpha_sa_size ();
3536 HOST_WIDE_INT frame_size
3537 = (out_args_size + sa_size
3538 + ALPHA_ROUND (size + current_function_pretend_args_size));
3539 HOST_WIDE_INT reg_offset = out_args_size;
3541 = frame_pointer_needed && regs_ever_live[HARD_FRAME_POINTER_REGNUM];
3544 /* If the last insn was a BARRIER, we don't have to write anything except
3545 the .end pseudo-op. */
3546 if (GET_CODE (insn) == NOTE)
3547 insn = prev_nonnote_insn (insn);
3548 if (insn == 0 || GET_CODE (insn) != BARRIER)
3553 /* If we have a frame pointer, restore SP from it. */
3554 if (frame_pointer_needed)
3555 fprintf (file, "\tbis $15,$15,$30\n");
3557 /* Cope with large offsets to the register save area. */
3559 if (reg_offset + sa_size > 0x8000)
3561 int low = ((reg_offset & 0xffff) ^ 0x8000) - 0x8000;
3562 if (low + sa_size <= 0x8000)
3564 add_long_const (file, reg_offset - low, 30, 24, 24);
3569 add_long_const (file, reg_offset, 30, 24, 24);
3575 /* Restore all the registers, starting with the return address
3579 fprintf (file, "\tldq $26,%d($%d)\n", reg_offset, sa_reg);
3583 /* Now restore any other used integer registers that that we saved,
3584 except for FP if it is being used as FP, since it must be
3587 for (i = 0; i < 32; i++)
3588 if (! fixed_regs[i] && ! call_used_regs[i] && regs_ever_live[i]
3591 if (i == HARD_FRAME_POINTER_REGNUM && frame_pointer_needed)
3592 fp_offset = reg_offset;
3594 fprintf (file, "\tldq $%d,%d($%d)\n", i, reg_offset, sa_reg);
3598 for (i = 0; i < 32; i++)
3599 if (! fixed_regs[i + 32] && ! call_used_regs[i + 32]
3600 && regs_ever_live[i + 32])
3602 fprintf (file, "\tldt $f%d,%d($%d)\n", i, reg_offset, sa_reg);
3606 /* If the stack size is large and we have a frame pointer, compute the
3607 size of the stack into a register because the old FP restore, stack
3608 pointer adjust, and return are required to be consecutive
3610 if (frame_size > 32767 && restore_fp)
3611 add_long_const (file, frame_size, 31, 1, 1);
3613 /* If we needed a frame pointer and we have to restore it, do it
3614 now. This must be done in one instruction immediately
3615 before the SP update. */
3616 if (restore_fp && fp_offset)
3617 fprintf (file, "\tldq $15,%d($%d)\n", fp_offset, sa_reg);
3619 /* Now update the stack pointer, if needed. Only one instruction must
3620 modify the stack pointer. It must be the last instruction in the
3621 sequence and must be an ADDQ or LDA instruction. If the frame
3622 pointer was loaded above, we may only put one instruction here. */
3624 if (frame_size > 32768 && restore_fp)
3625 fprintf (file, "\taddq $1,$30,$30\n");
3627 add_long_const (file, frame_size, 30, 30, 1);
3629 /* Finally return to the caller. */
3630 fprintf (file, "\tret $31,($26),1\n");
3633 /* End the function. */
3634 if (!flag_inhibit_size_directive)
3636 fprintf (file, "\t.end ");
3637 assemble_name (file, alpha_function_name);
3638 fprintf (file, "\n");
3640 inside_function = FALSE;
3642 /* Show that we know this function if it is called again.
3644 Don't do this for global functions in object files destined for a
3645 shared library because the function may be overridden by the application
3647 ??? Is this just ELF? */
3649 if (!flag_pic || !TREE_PUBLIC (current_function_decl))
3650 SYMBOL_REF_FLAG (XEXP (DECL_RTL (current_function_decl), 0)) = 1;
3652 #endif /* !OPEN_VMS */
3654 /* Debugging support. */
3658 /* Count the number of sdb related labels are generated (to find block
3659 start and end boundaries). */
3661 int sdb_label_count = 0;
3663 /* Next label # for each statement. */
3665 static int sym_lineno = 0;
3667 /* Count the number of .file directives, so that .loc is up to date. */
3669 static int num_source_filenames = 0;
3671 /* Name of the file containing the current function. */
3673 static char *current_function_file = "";
3675 /* Offsets to alpha virtual arg/local debugging pointers. */
3677 long alpha_arg_offset;
3678 long alpha_auto_offset;
3680 /* Emit a new filename to a stream. */
3683 alpha_output_filename (stream, name)
3687 static int first_time = TRUE;
3688 char ltext_label_name[100];
3693 ++num_source_filenames;
3694 current_function_file = name;
3695 fprintf (stream, "\t.file\t%d ", num_source_filenames);
3696 output_quoted_string (stream, name);
3697 fprintf (stream, "\n");
3698 if (!TARGET_GAS && write_symbols == DBX_DEBUG)
3699 fprintf (stream, "\t#@stabs\n");
3702 else if (write_symbols == DBX_DEBUG)
3704 ASM_GENERATE_INTERNAL_LABEL (ltext_label_name, "Ltext", 0);
3705 fprintf (stream, "%s ", ASM_STABS_OP);
3706 output_quoted_string (stream, name);
3707 fprintf (stream, ",%d,0,0,%s\n", N_SOL, <ext_label_name[1]);
3710 else if (name != current_function_file
3711 && strcmp (name, current_function_file) != 0)
3713 if (inside_function && ! TARGET_GAS)
3714 fprintf (stream, "\t#.file\t%d ", num_source_filenames);
3717 ++num_source_filenames;
3718 current_function_file = name;
3719 fprintf (stream, "\t.file\t%d ", num_source_filenames);
3722 output_quoted_string (stream, name);
3723 fprintf (stream, "\n");
3727 /* Emit a linenumber to a stream. */
3730 alpha_output_lineno (stream, line)
3734 if (write_symbols == DBX_DEBUG)
3736 /* mips-tfile doesn't understand .stabd directives. */
3738 fprintf (stream, "$LM%d:\n\t%s %d,0,%d,$LM%d\n",
3739 sym_lineno, ASM_STABN_OP, N_SLINE, line, sym_lineno);
3742 fprintf (stream, "\n\t.loc\t%d %d\n", num_source_filenames, line);
3745 /* Structure to show the current status of registers and memory. */
3747 struct shadow_summary
3750 unsigned long i : 31; /* Mask of int regs */
3751 unsigned long fp : 31; /* Mask of fp regs */
3752 unsigned long mem : 1; /* mem == imem | fpmem */
3756 /* Summary the effects of expression X on the machine. Update SUM, a pointer
3757 to the summary structure. SET is nonzero if the insn is setting the
3758 object, otherwise zero. */
3761 summarize_insn (x, sum, set)
3763 struct shadow_summary *sum;
3772 switch (GET_CODE (x))
3774 /* ??? Note that this case would be incorrect if the Alpha had a
3775 ZERO_EXTRACT in SET_DEST. */
3777 summarize_insn (SET_SRC (x), sum, 0);
3778 summarize_insn (SET_DEST (x), sum, 1);
3782 summarize_insn (XEXP (x, 0), sum, 1);
3786 summarize_insn (XEXP (x, 0), sum, 0);
3790 for (i = ASM_OPERANDS_INPUT_LENGTH (x) - 1; i >= 0; i--)
3791 summarize_insn (ASM_OPERANDS_INPUT (x, i), sum, 0);
3795 for (i = XVECLEN (x, 0) - 1; i >= 0; i--)
3796 summarize_insn (XVECEXP (x, 0, i), sum, 0);
3805 int regno = REGNO (x);
3806 unsigned long mask = 1UL << (regno % 32);
3808 if (regno == 31 || regno == 63)
3814 sum->defd.i |= mask;
3816 sum->defd.fp |= mask;
3821 sum->used.i |= mask;
3823 sum->used.fp |= mask;
3834 /* Find the regs used in memory address computation: */
3835 summarize_insn (XEXP (x, 0), sum, 0);
3838 case CONST_INT: case CONST_DOUBLE:
3839 case SYMBOL_REF: case LABEL_REF: case CONST:
3842 /* Handle common unary and binary ops for efficiency. */
3843 case COMPARE: case PLUS: case MINUS: case MULT: case DIV:
3844 case MOD: case UDIV: case UMOD: case AND: case IOR:
3845 case XOR: case ASHIFT: case ROTATE: case ASHIFTRT: case LSHIFTRT:
3846 case ROTATERT: case SMIN: case SMAX: case UMIN: case UMAX:
3847 case NE: case EQ: case GE: case GT: case LE:
3848 case LT: case GEU: case GTU: case LEU: case LTU:
3849 summarize_insn (XEXP (x, 0), sum, 0);
3850 summarize_insn (XEXP (x, 1), sum, 0);
3853 case NEG: case NOT: case SIGN_EXTEND: case ZERO_EXTEND:
3854 case TRUNCATE: case FLOAT_EXTEND: case FLOAT_TRUNCATE: case FLOAT:
3855 case FIX: case UNSIGNED_FLOAT: case UNSIGNED_FIX: case ABS:
3856 case SQRT: case FFS:
3857 summarize_insn (XEXP (x, 0), sum, 0);
3861 format_ptr = GET_RTX_FORMAT (GET_CODE (x));
3862 for (i = GET_RTX_LENGTH (GET_CODE (x)) - 1; i >= 0; i--)
3863 switch (format_ptr[i])
3866 summarize_insn (XEXP (x, i), sum, 0);
3870 for (j = XVECLEN (x, i) - 1; j >= 0; j--)
3871 summarize_insn (XVECEXP (x, i, j), sum, 0);
3880 /* Ensure a sufficient number of `trapb' insns are in the code when the user
3881 requests code with a trap precision of functions or instructions.
3883 In naive mode, when the user requests a trap-precision of "instruction", a
3884 trapb is needed after every instruction that may generate a trap (and after
3885 jsr/bsr instructions, because called functions may import a trap from the
3886 caller). This ensures that the code is resumption safe but it is also slow.
3888 When optimizations are turned on, we delay issuing a trapb as long as
3889 possible. In this context, a trap shadow is the sequence of instructions
3890 that starts with a (potentially) trap generating instruction and extends to
3891 the next trapb or call_pal instruction (but GCC never generates call_pal by
3892 itself). We can delay (and therefore sometimes omit) a trapb subject to the
3893 following conditions:
3895 (a) On entry to the trap shadow, if any Alpha register or memory location
3896 contains a value that is used as an operand value by some instruction in
3897 the trap shadow (live on entry), then no instruction in the trap shadow
3898 may modify the register or memory location.
3900 (b) Within the trap shadow, the computation of the base register for a
3901 memory load or store instruction may not involve using the result
3902 of an instruction that might generate an UNPREDICTABLE result.
3904 (c) Within the trap shadow, no register may be used more than once as a
3905 destination register. (This is to make life easier for the trap-handler.)
3907 (d) The trap shadow may not include any branch instructions. */
3910 alpha_handle_trap_shadows (insns)
3913 struct shadow_summary shadow;
3914 int trap_pending, exception_nesting;
3917 if (alpha_tp == ALPHA_TP_PROG && !flag_exceptions)
3921 exception_nesting = 0;
3924 shadow.used.mem = 0;
3925 shadow.defd = shadow.used;
3927 for (i = insns; i ; i = NEXT_INSN (i))
3929 if (GET_CODE (i) == NOTE)
3931 switch (NOTE_LINE_NUMBER (i))
3933 case NOTE_INSN_EH_REGION_BEG:
3934 exception_nesting++;
3939 case NOTE_INSN_EH_REGION_END:
3940 exception_nesting--;
3945 case NOTE_INSN_EPILOGUE_BEG:
3946 if (trap_pending && alpha_tp >= ALPHA_TP_FUNC)
3951 else if (trap_pending)
3953 if (alpha_tp == ALPHA_TP_FUNC)
3955 if (GET_CODE (i) == JUMP_INSN
3956 && GET_CODE (PATTERN (i)) == RETURN)
3959 else if (alpha_tp == ALPHA_TP_INSN)
3963 struct shadow_summary sum;
3968 sum.defd = sum.used;
3970 switch (GET_CODE (i))
3973 /* Annoyingly, get_attr_trap will abort on these. */
3974 if (GET_CODE (PATTERN (i)) == USE
3975 || GET_CODE (PATTERN (i)) == CLOBBER)
3978 summarize_insn (PATTERN (i), &sum, 0);
3980 if ((sum.defd.i & shadow.defd.i)
3981 || (sum.defd.fp & shadow.defd.fp))
3983 /* (c) would be violated */
3987 /* Combine shadow with summary of current insn: */
3988 shadow.used.i |= sum.used.i;
3989 shadow.used.fp |= sum.used.fp;
3990 shadow.used.mem |= sum.used.mem;
3991 shadow.defd.i |= sum.defd.i;
3992 shadow.defd.fp |= sum.defd.fp;
3993 shadow.defd.mem |= sum.defd.mem;
3995 if ((sum.defd.i & shadow.used.i)
3996 || (sum.defd.fp & shadow.used.fp)
3997 || (sum.defd.mem & shadow.used.mem))
3999 /* (a) would be violated (also takes care of (b)) */
4000 if (get_attr_trap (i) == TRAP_YES
4001 && ((sum.defd.i & sum.used.i)
4002 || (sum.defd.fp & sum.used.fp)))
4021 emit_insn_before (gen_trapb (), i);
4025 shadow.used.mem = 0;
4026 shadow.defd = shadow.used;
4031 if ((exception_nesting > 0 || alpha_tp >= ALPHA_TP_FUNC)
4032 && GET_CODE (i) == INSN
4033 && GET_CODE (PATTERN (i)) != USE
4034 && GET_CODE (PATTERN (i)) != CLOBBER
4035 && get_attr_trap (i) == TRAP_YES)
4037 if (optimize && !trap_pending)
4038 summarize_insn (PATTERN (i), &shadow, 0);
4044 /* Machine dependant reorg pass. */
4050 alpha_handle_trap_shadows (insns);
4054 /* Check a floating-point value for validity for a particular machine mode. */
4056 static char *float_strings[] =
4058 /* These are for FLOAT_VAX. */
4059 "1.70141173319264430e+38", /* 2^127 (2^24 - 1) / 2^24 */
4060 "-1.70141173319264430e+38",
4061 "2.93873587705571877e-39", /* 2^-128 */
4062 "-2.93873587705571877e-39",
4063 /* These are for the default broken IEEE mode, which traps
4064 on infinity or denormal numbers. */
4065 "3.402823466385288598117e+38", /* 2^128 (1 - 2^-24) */
4066 "-3.402823466385288598117e+38",
4067 "1.1754943508222875079687e-38", /* 2^-126 */
4068 "-1.1754943508222875079687e-38",
4071 static REAL_VALUE_TYPE float_values[8];
4072 static int inited_float_values = 0;
4075 check_float_value (mode, d, overflow)
4076 enum machine_mode mode;
4081 if (TARGET_IEEE || TARGET_IEEE_CONFORMANT || TARGET_IEEE_WITH_INEXACT)
4084 if (inited_float_values == 0)
4087 for (i = 0; i < 8; i++)
4088 float_values[i] = REAL_VALUE_ATOF (float_strings[i], DFmode);
4090 inited_float_values = 1;
4096 REAL_VALUE_TYPE *fvptr;
4098 if (TARGET_FLOAT_VAX)
4099 fvptr = &float_values[0];
4101 fvptr = &float_values[4];
4103 bcopy ((char *) d, (char *) &r, sizeof (REAL_VALUE_TYPE));
4104 if (REAL_VALUES_LESS (fvptr[0], r))
4106 bcopy ((char *) &fvptr[0], (char *) d,
4107 sizeof (REAL_VALUE_TYPE));
4110 else if (REAL_VALUES_LESS (r, fvptr[1]))
4112 bcopy ((char *) &fvptr[1], (char *) d,
4113 sizeof (REAL_VALUE_TYPE));
4116 else if (REAL_VALUES_LESS (dconst0, r)
4117 && REAL_VALUES_LESS (r, fvptr[2]))
4119 bcopy ((char *) &dconst0, (char *) d, sizeof (REAL_VALUE_TYPE));
4122 else if (REAL_VALUES_LESS (r, dconst0)
4123 && REAL_VALUES_LESS (fvptr[3], r))
4125 bcopy ((char *) &dconst0, (char *) d, sizeof (REAL_VALUE_TYPE));
4135 /* Return the VMS argument type corresponding to MODE. */
4138 alpha_arg_type (mode)
4139 enum machine_mode mode;
4144 return TARGET_FLOAT_VAX ? FF : FS;
4146 return TARGET_FLOAT_VAX ? FD : FT;
4152 /* Return an rtx for an integer representing the VMS Argument Information
4156 alpha_arg_info_reg_val (cum)
4157 CUMULATIVE_ARGS cum;
4159 unsigned HOST_WIDE_INT regval = cum.num_args;
4162 for (i = 0; i < 6; i++)
4163 regval |= ((int) cum.atypes[i]) << (i * 3 + 8);
4165 return GEN_INT (regval);
4168 /* Structure to collect function names for final output
4171 enum links_kind {KIND_UNUSED, KIND_LOCAL, KIND_EXTERN};
4174 struct alpha_links {
4175 struct alpha_links *next;
4177 enum links_kind kind;
4180 static struct alpha_links *alpha_links_base = 0;
4182 /* Make (or fake) .linkage entry for function call.
4184 IS_LOCAL is 0 if name is used in call, 1 if name is used in definition. */
4187 alpha_need_linkage (name, is_local)
4192 struct alpha_links *lptr, *nptr;
4197 /* Is this name already defined ? */
4199 for (lptr = alpha_links_base; lptr; lptr = lptr->next)
4200 if (strcmp (lptr->name, name) == 0)
4204 /* Defined here but external assumed. */
4205 if (lptr->kind == KIND_EXTERN)
4206 lptr->kind = KIND_LOCAL;
4210 /* Used here but unused assumed. */
4211 if (lptr->kind == KIND_UNUSED)
4212 lptr->kind = KIND_LOCAL;
4217 nptr = (struct alpha_links *) xmalloc (sizeof (struct alpha_links));
4218 nptr->next = alpha_links_base;
4219 nptr->name = xstrdup (name);
4221 /* Assume external if no definition. */
4222 nptr->kind = (is_local ? KIND_UNUSED : KIND_EXTERN);
4224 /* Ensure we have an IDENTIFIER so assemble_name can mark is used. */
4225 get_identifier (name);
4227 alpha_links_base = nptr;
4234 alpha_write_linkage (stream)
4237 struct alpha_links *lptr, *nptr;
4239 readonly_section ();
4241 fprintf (stream, "\t.align 3\n");
4243 for (lptr = alpha_links_base; lptr; lptr = nptr)
4247 if (lptr->kind == KIND_UNUSED
4248 || ! TREE_SYMBOL_REFERENCED (get_identifier (lptr->name)))
4251 fprintf (stream, "$%s..lk:\n", lptr->name);
4252 if (lptr->kind == KIND_LOCAL)
4254 /* Local and used, build linkage pair. */
4255 fprintf (stream, "\t.quad %s..en\n", lptr->name);
4256 fprintf (stream, "\t.quad %s\n", lptr->name);
4259 /* External and used, request linkage pair. */
4260 fprintf (stream, "\t.linkage %s\n", lptr->name);
4267 alpha_need_linkage (name, is_local)
4273 #endif /* OPEN_VMS */