1 /* Subroutines used for code generation on the DEC Alpha.
2 Copyright (C) 1992, 93-97, 1998 Free Software Foundation, Inc.
3 Contributed by Richard Kenner (kenner@vlsi1.ultra.nyu.edu)
5 This file is part of GNU CC.
7 GNU CC is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License as published by
9 the Free Software Foundation; either version 2, or (at your option)
12 GNU CC is distributed in the hope that it will be useful,
13 but WITHOUT ANY WARRANTY; without even the implied warranty of
14 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 GNU General Public License for more details.
17 You should have received a copy of the GNU General Public License
18 along with GNU CC; see the file COPYING. If not, write to
19 the Free Software Foundation, 59 Temple Place - Suite 330,
20 Boston, MA 02111-1307, USA. */
28 #include "hard-reg-set.h"
30 #include "insn-config.h"
31 #include "conditions.h"
32 #include "insn-flags.h"
34 #include "insn-attr.h"
45 extern char *version_string;
46 extern int rtx_equal_function_value_matters;
48 /* Specify which cpu to schedule for. */
50 enum processor_type alpha_cpu;
51 static char* const alpha_cpu_name[] =
56 /* Specify how accurate floating-point traps need to be. */
58 enum alpha_trap_precision alpha_tp;
60 /* Specify the floating-point rounding mode. */
62 enum alpha_fp_rounding_mode alpha_fprm;
64 /* Specify which things cause traps. */
66 enum alpha_fp_trap_mode alpha_fptm;
68 /* Strings decoded into the above options. */
70 char *alpha_cpu_string; /* -mcpu= */
71 char *alpha_tp_string; /* -mtrap-precision=[p|s|i] */
72 char *alpha_fprm_string; /* -mfp-rounding-mode=[n|m|c|d] */
73 char *alpha_fptm_string; /* -mfp-trap-mode=[n|u|su|sui] */
74 char *alpha_mlat_string; /* -mmemory-latency= */
76 /* Save information from a "cmpxx" operation until the branch or scc is
79 rtx alpha_compare_op0, alpha_compare_op1;
80 int alpha_compare_fp_p;
82 /* Save the name of the current function as used by the assembler. This
83 is used by the epilogue. */
85 char *alpha_function_name;
87 /* Non-zero if inside of a function, because the Alpha asm can't
88 handle .files inside of functions. */
90 static int inside_function = FALSE;
92 /* Nonzero if the current function needs gp. */
94 int alpha_function_needs_gp;
96 /* If non-null, this rtx holds the return address for the function. */
98 static rtx alpha_return_addr_rtx;
100 /* The number of cycles of latency we should assume on memory reads. */
102 int alpha_memory_latency = 3;
104 /* Declarations of static functions. */
105 static void alpha_set_memflags_1 PROTO((rtx, int, int, int));
106 static rtx alpha_emit_set_const_1 PROTO((rtx, enum machine_mode,
107 HOST_WIDE_INT, int));
108 static void add_long_const PROTO((FILE *, HOST_WIDE_INT, int, int, int));
110 /* Compute the size of the save area in the stack. */
112 static void alpha_sa_mask PROTO((unsigned long *imaskP,
113 unsigned long *fmaskP));
115 /* Get the number of args of a function in one of two ways. */
117 #define NUM_ARGS current_function_args_info.num_args
119 #define NUM_ARGS current_function_args_info
129 /* Parse target option strings. */
134 /* 971208 -- EV6 scheduling parameters are still secret, so don't even
135 pretend and just schedule for an EV5 for now. -- r~ */
137 = TARGET_CPU_DEFAULT & MASK_CPU_EV6 ? PROCESSOR_EV6
138 : (TARGET_CPU_DEFAULT & MASK_CPU_EV5 ? PROCESSOR_EV5 : PROCESSOR_EV4);
140 if (alpha_cpu_string)
142 if (! strcmp (alpha_cpu_string, "ev4")
143 || ! strcmp (alpha_cpu_string, "21064"))
145 alpha_cpu = PROCESSOR_EV4;
146 target_flags &= ~ (MASK_BWX | MASK_CIX | MASK_MAX);
148 else if (! strcmp (alpha_cpu_string, "ev5")
149 || ! strcmp (alpha_cpu_string, "21164"))
151 alpha_cpu = PROCESSOR_EV5;
152 target_flags &= ~ (MASK_BWX | MASK_CIX | MASK_MAX);
154 else if (! strcmp (alpha_cpu_string, "ev56")
155 || ! strcmp (alpha_cpu_string, "21164a"))
157 alpha_cpu = PROCESSOR_EV5;
158 target_flags |= MASK_BWX;
159 target_flags &= ~ (MASK_CIX | MASK_MAX);
161 else if (! strcmp (alpha_cpu_string, "pca56")
162 || ! strcmp (alpha_cpu_string, "21164PC")
163 || ! strcmp (alpha_cpu_string, "21164pc"))
165 alpha_cpu = PROCESSOR_EV5;
166 target_flags |= MASK_BWX | MASK_MAX;
167 target_flags &= ~ MASK_CIX;
169 else if (! strcmp (alpha_cpu_string, "ev6")
170 || ! strcmp (alpha_cpu_string, "21264"))
172 alpha_cpu = PROCESSOR_EV6;
173 target_flags |= MASK_BWX | MASK_CIX | MASK_MAX;
176 error ("bad value `%s' for -mcpu switch", alpha_cpu_string);
179 alpha_tp = ALPHA_TP_PROG;
180 alpha_fprm = ALPHA_FPRM_NORM;
181 alpha_fptm = ALPHA_FPTM_N;
185 alpha_tp = ALPHA_TP_INSN;
186 alpha_fptm = ALPHA_FPTM_SU;
189 if (TARGET_IEEE_WITH_INEXACT)
191 alpha_tp = ALPHA_TP_INSN;
192 alpha_fptm = ALPHA_FPTM_SUI;
197 if (! strcmp (alpha_tp_string, "p"))
198 alpha_tp = ALPHA_TP_PROG;
199 else if (! strcmp (alpha_tp_string, "f"))
200 alpha_tp = ALPHA_TP_FUNC;
201 else if (! strcmp (alpha_tp_string, "i"))
202 alpha_tp = ALPHA_TP_INSN;
204 error ("bad value `%s' for -mtrap-precision switch", alpha_tp_string);
207 if (alpha_fprm_string)
209 if (! strcmp (alpha_fprm_string, "n"))
210 alpha_fprm = ALPHA_FPRM_NORM;
211 else if (! strcmp (alpha_fprm_string, "m"))
212 alpha_fprm = ALPHA_FPRM_MINF;
213 else if (! strcmp (alpha_fprm_string, "c"))
214 alpha_fprm = ALPHA_FPRM_CHOP;
215 else if (! strcmp (alpha_fprm_string,"d"))
216 alpha_fprm = ALPHA_FPRM_DYN;
218 error ("bad value `%s' for -mfp-rounding-mode switch",
222 if (alpha_fptm_string)
224 if (strcmp (alpha_fptm_string, "n") == 0)
225 alpha_fptm = ALPHA_FPTM_N;
226 else if (strcmp (alpha_fptm_string, "u") == 0)
227 alpha_fptm = ALPHA_FPTM_U;
228 else if (strcmp (alpha_fptm_string, "su") == 0)
229 alpha_fptm = ALPHA_FPTM_SU;
230 else if (strcmp (alpha_fptm_string, "sui") == 0)
231 alpha_fptm = ALPHA_FPTM_SUI;
233 error ("bad value `%s' for -mfp-trap-mode switch", alpha_fptm_string);
236 /* Do some sanity checks on the above option. */
238 if ((alpha_fptm == ALPHA_FPTM_SU || alpha_fptm == ALPHA_FPTM_SUI)
239 && alpha_tp != ALPHA_TP_INSN)
241 warning ("fp software completion requires -mtrap-precision=i");
242 alpha_tp = ALPHA_TP_INSN;
245 if (TARGET_FLOAT_VAX)
247 if (alpha_fprm == ALPHA_FPRM_MINF || alpha_fprm == ALPHA_FPRM_DYN)
249 warning ("rounding mode not supported for VAX floats");
250 alpha_fprm = ALPHA_FPRM_NORM;
252 if (alpha_fptm == ALPHA_FPTM_SUI)
254 warning ("trap mode not supported for VAX floats");
255 alpha_fptm = ALPHA_FPTM_SU;
263 if (!alpha_mlat_string)
264 alpha_mlat_string = "L1";
266 if (isdigit (alpha_mlat_string[0])
267 && (lat = strtol (alpha_mlat_string, &end, 10), *end == '\0'))
269 else if ((alpha_mlat_string[0] == 'L' || alpha_mlat_string[0] == 'l')
270 && isdigit (alpha_mlat_string[1])
271 && alpha_mlat_string[2] == '\0')
273 static int const cache_latency[][4] =
275 { 3, 30, -1 }, /* ev4 -- Bcache is a guess */
276 { 2, 12, 38 }, /* ev5 -- Bcache from PC164 LMbench numbers */
277 { 3, 13, -1 }, /* ev6 -- Ho hum, doesn't exist yet */
280 lat = alpha_mlat_string[1] - '0';
281 if (lat < 0 || lat > 3 || cache_latency[alpha_cpu][lat-1] == -1)
283 warning ("L%d cache latency unknown for %s",
284 lat, alpha_cpu_name[alpha_cpu]);
288 lat = cache_latency[alpha_cpu][lat-1];
290 else if (! strcmp (alpha_mlat_string, "main"))
292 /* Most current memories have about 370ns latency. This is
293 a reasonable guess for a fast cpu. */
298 warning ("bad value `%s' for -mmemory-latency", alpha_mlat_string);
302 alpha_memory_latency = lat;
306 /* Returns 1 if VALUE is a mask that contains full bytes of zero or ones. */
314 for (i = 0; i < HOST_BITS_PER_WIDE_INT / HOST_BITS_PER_CHAR;
316 if ((value & 0xff) != 0 && (value & 0xff) != 0xff)
322 /* Returns 1 if OP is either the constant zero or a register. If a
323 register, it must be in the proper mode unless MODE is VOIDmode. */
326 reg_or_0_operand (op, mode)
328 enum machine_mode mode;
330 return op == const0_rtx || register_operand (op, mode);
333 /* Return 1 if OP is a constant in the range of 0-63 (for a shift) or
337 reg_or_6bit_operand (op, mode)
339 enum machine_mode mode;
341 return ((GET_CODE (op) == CONST_INT
342 && (unsigned HOST_WIDE_INT) INTVAL (op) < 64)
343 || register_operand (op, mode));
347 /* Return 1 if OP is an 8-bit constant or any register. */
350 reg_or_8bit_operand (op, mode)
352 enum machine_mode mode;
354 return ((GET_CODE (op) == CONST_INT
355 && (unsigned HOST_WIDE_INT) INTVAL (op) < 0x100)
356 || register_operand (op, mode));
359 /* Return 1 if OP is an 8-bit constant. */
362 cint8_operand (op, mode)
364 enum machine_mode mode;
366 return (GET_CODE (op) == CONST_INT
367 && (unsigned HOST_WIDE_INT) INTVAL (op) < 0x100);
370 /* Return 1 if the operand is a valid second operand to an add insn. */
373 add_operand (op, mode)
375 enum machine_mode mode;
377 if (GET_CODE (op) == CONST_INT)
378 return (CONST_OK_FOR_LETTER_P (INTVAL (op), 'K')
379 || CONST_OK_FOR_LETTER_P (INTVAL (op), 'L')
380 || CONST_OK_FOR_LETTER_P (INTVAL (op), 'O'));
382 return register_operand (op, mode);
385 /* Return 1 if the operand is a valid second operand to a sign-extending
389 sext_add_operand (op, mode)
391 enum machine_mode mode;
393 if (GET_CODE (op) == CONST_INT)
394 return ((unsigned HOST_WIDE_INT) INTVAL (op) < 255
395 || (unsigned HOST_WIDE_INT) (- INTVAL (op)) < 255);
397 return register_operand (op, mode);
400 /* Return 1 if OP is the constant 4 or 8. */
403 const48_operand (op, mode)
405 enum machine_mode mode;
407 return (GET_CODE (op) == CONST_INT
408 && (INTVAL (op) == 4 || INTVAL (op) == 8));
411 /* Return 1 if OP is a valid first operand to an AND insn. */
414 and_operand (op, mode)
416 enum machine_mode mode;
418 if (GET_CODE (op) == CONST_DOUBLE && GET_MODE (op) == VOIDmode)
419 return (zap_mask (CONST_DOUBLE_LOW (op))
420 && zap_mask (CONST_DOUBLE_HIGH (op)));
422 if (GET_CODE (op) == CONST_INT)
423 return ((unsigned HOST_WIDE_INT) INTVAL (op) < 0x100
424 || (unsigned HOST_WIDE_INT) ~ INTVAL (op) < 0x100
425 || zap_mask (INTVAL (op)));
427 return register_operand (op, mode);
430 /* Return 1 if OP is a valid first operand to an IOR or XOR insn. */
433 or_operand (op, mode)
435 enum machine_mode mode;
437 if (GET_CODE (op) == CONST_INT)
438 return ((unsigned HOST_WIDE_INT) INTVAL (op) < 0x100
439 || (unsigned HOST_WIDE_INT) ~ INTVAL (op) < 0x100);
441 return register_operand (op, mode);
444 /* Return 1 if OP is a constant that is the width, in bits, of an integral
445 mode smaller than DImode. */
448 mode_width_operand (op, mode)
450 enum machine_mode mode;
452 return (GET_CODE (op) == CONST_INT
453 && (INTVAL (op) == 8 || INTVAL (op) == 16
454 || INTVAL (op) == 32 || INTVAL (op) == 64));
457 /* Return 1 if OP is a constant that is the width of an integral machine mode
458 smaller than an integer. */
461 mode_mask_operand (op, mode)
463 enum machine_mode mode;
465 #if HOST_BITS_PER_WIDE_INT == 32
466 if (GET_CODE (op) == CONST_DOUBLE)
467 return (CONST_DOUBLE_LOW (op) == -1
468 && (CONST_DOUBLE_HIGH (op) == -1
469 || CONST_DOUBLE_HIGH (op) == 0));
471 if (GET_CODE (op) == CONST_DOUBLE)
472 return (CONST_DOUBLE_LOW (op) == -1 && CONST_DOUBLE_HIGH (op) == 0);
475 return (GET_CODE (op) == CONST_INT
476 && (INTVAL (op) == 0xff
477 || INTVAL (op) == 0xffff
478 || INTVAL (op) == 0xffffffff
479 #if HOST_BITS_PER_WIDE_INT == 64
480 || INTVAL (op) == 0xffffffffffffffff
485 /* Return 1 if OP is a multiple of 8 less than 64. */
488 mul8_operand (op, mode)
490 enum machine_mode mode;
492 return (GET_CODE (op) == CONST_INT
493 && (unsigned HOST_WIDE_INT) INTVAL (op) < 64
494 && (INTVAL (op) & 7) == 0);
497 /* Return 1 if OP is the constant zero in floating-point. */
500 fp0_operand (op, mode)
502 enum machine_mode mode;
504 return (GET_MODE (op) == mode
505 && GET_MODE_CLASS (mode) == MODE_FLOAT && op == CONST0_RTX (mode));
508 /* Return 1 if OP is the floating-point constant zero or a register. */
511 reg_or_fp0_operand (op, mode)
513 enum machine_mode mode;
515 return fp0_operand (op, mode) || register_operand (op, mode);
518 /* Return 1 if OP is a register or a constant integer. */
522 reg_or_cint_operand (op, mode)
524 enum machine_mode mode;
526 return GET_CODE (op) == CONST_INT || register_operand (op, mode);
529 /* Return 1 if OP is something that can be reloaded into a register;
530 if it is a MEM, it need not be valid. */
533 some_operand (op, mode)
535 enum machine_mode mode;
537 if (mode != VOIDmode && GET_MODE (op) != VOIDmode && mode != GET_MODE (op))
540 switch (GET_CODE (op))
542 case REG: case MEM: case CONST_DOUBLE:
543 case CONST_INT: case LABEL_REF: case SYMBOL_REF: case CONST:
547 return some_operand (SUBREG_REG (op), VOIDmode);
556 /* Return 1 if OP is a valid operand for the source of a move insn. */
559 input_operand (op, mode)
561 enum machine_mode mode;
563 if (mode != VOIDmode && GET_MODE (op) != VOIDmode && mode != GET_MODE (op))
566 if (GET_MODE_CLASS (mode) == MODE_FLOAT && GET_MODE (op) != mode)
569 switch (GET_CODE (op))
574 /* This handles both the Windows/NT and OSF cases. */
575 return mode == ptr_mode || mode == DImode;
581 if (register_operand (op, mode))
583 /* ... fall through ... */
585 return ((TARGET_BWX || (mode != HImode && mode != QImode))
586 && general_operand (op, mode));
589 return GET_MODE_CLASS (mode) == MODE_FLOAT && op == CONST0_RTX (mode);
592 return mode == QImode || mode == HImode || add_operand (op, mode);
601 /* Return 1 if OP is a SYMBOL_REF for a function known to be in this
605 current_file_function_operand (op, mode)
607 enum machine_mode mode;
609 return (GET_CODE (op) == SYMBOL_REF
610 && ! profile_flag && ! profile_block_flag
611 && (SYMBOL_REF_FLAG (op)
612 || op == XEXP (DECL_RTL (current_function_decl), 0)));
615 /* Return 1 if OP is a valid operand for the MEM of a CALL insn. */
618 call_operand (op, mode)
620 enum machine_mode mode;
625 return (GET_CODE (op) == SYMBOL_REF
626 || (GET_CODE (op) == REG
627 && (TARGET_OPEN_VMS || TARGET_WINDOWS_NT || REGNO (op) == 27)));
630 /* Return 1 if OP is a valid Alpha comparison operator. Here we know which
631 comparisons are valid in which insn. */
634 alpha_comparison_operator (op, mode)
636 enum machine_mode mode;
638 enum rtx_code code = GET_CODE (op);
640 if (mode != GET_MODE (op) || GET_RTX_CLASS (code) != '<')
643 return (code == EQ || code == LE || code == LT
644 || (mode == DImode && (code == LEU || code == LTU)));
647 /* Return 1 if OP is a valid Alpha swapped comparison operator. */
650 alpha_swapped_comparison_operator (op, mode)
652 enum machine_mode mode;
654 enum rtx_code code = GET_CODE (op);
656 if (mode != GET_MODE (op) || GET_RTX_CLASS (code) != '<')
659 code = swap_condition (code);
660 return (code == EQ || code == LE || code == LT
661 || (mode == DImode && (code == LEU || code == LTU)));
664 /* Return 1 if OP is a signed comparison operation. */
667 signed_comparison_operator (op, mode)
669 enum machine_mode mode;
671 switch (GET_CODE (op))
673 case EQ: case NE: case LE: case LT: case GE: case GT:
683 /* Return 1 if this is a divide or modulus operator. */
686 divmod_operator (op, mode)
688 enum machine_mode mode;
690 switch (GET_CODE (op))
692 case DIV: case MOD: case UDIV: case UMOD:
702 /* Return 1 if this memory address is a known aligned register plus
703 a constant. It must be a valid address. This means that we can do
704 this as an aligned reference plus some offset.
706 Take into account what reload will do.
708 We could say that out-of-range stack slots are alignable, but that would
709 complicate get_aligned_mem and it isn't worth the trouble since few
710 functions have large stack space. */
713 aligned_memory_operand (op, mode)
715 enum machine_mode mode;
717 if (GET_CODE (op) == SUBREG)
719 if (GET_MODE (op) != mode)
721 op = SUBREG_REG (op);
722 mode = GET_MODE (op);
725 if (reload_in_progress && GET_CODE (op) == REG
726 && REGNO (op) >= FIRST_PSEUDO_REGISTER)
727 op = reg_equiv_mem[REGNO (op)];
729 if (GET_CODE (op) != MEM || GET_MODE (op) != mode
730 || ! memory_address_p (mode, XEXP (op, 0)))
735 if (GET_CODE (op) == PLUS)
738 return (GET_CODE (op) == REG
739 && REGNO_POINTER_ALIGN (REGNO (op)) >= 4);
742 /* Similar, but return 1 if OP is a MEM which is not alignable. */
745 unaligned_memory_operand (op, mode)
747 enum machine_mode mode;
749 if (GET_CODE (op) == SUBREG)
751 if (GET_MODE (op) != mode)
753 op = SUBREG_REG (op);
754 mode = GET_MODE (op);
757 if (reload_in_progress && GET_CODE (op) == REG
758 && REGNO (op) >= FIRST_PSEUDO_REGISTER)
759 op = reg_equiv_mem[REGNO (op)];
761 if (GET_CODE (op) != MEM || GET_MODE (op) != mode)
766 if (! memory_address_p (mode, op))
769 if (GET_CODE (op) == PLUS)
772 return (GET_CODE (op) != REG
773 || REGNO_POINTER_ALIGN (REGNO (op)) < 4);
776 /* Return 1 if OP is either a register or an unaligned memory location. */
779 reg_or_unaligned_mem_operand (op, mode)
781 enum machine_mode mode;
783 return register_operand (op, mode) || unaligned_memory_operand (op, mode);
786 /* Return 1 if OP is any memory location. During reload a pseudo matches. */
789 any_memory_operand (op, mode)
791 enum machine_mode mode;
793 return (GET_CODE (op) == MEM
794 || (GET_CODE (op) == SUBREG && GET_CODE (SUBREG_REG (op)) == REG)
795 || (reload_in_progress && GET_CODE (op) == REG
796 && REGNO (op) >= FIRST_PSEUDO_REGISTER)
797 || (reload_in_progress && GET_CODE (op) == SUBREG
798 && GET_CODE (SUBREG_REG (op)) == REG
799 && REGNO (SUBREG_REG (op)) >= FIRST_PSEUDO_REGISTER));
802 /* REF is an alignable memory location. Place an aligned SImode
803 reference into *PALIGNED_MEM and the number of bits to shift into
807 get_aligned_mem (ref, paligned_mem, pbitnum)
809 rtx *paligned_mem, *pbitnum;
812 HOST_WIDE_INT offset = 0;
814 if (GET_CODE (ref) == SUBREG)
816 offset = SUBREG_WORD (ref) * UNITS_PER_WORD;
817 if (BYTES_BIG_ENDIAN)
818 offset -= (MIN (UNITS_PER_WORD, GET_MODE_SIZE (GET_MODE (ref)))
819 - MIN (UNITS_PER_WORD,
820 GET_MODE_SIZE (GET_MODE (SUBREG_REG (ref)))));
821 ref = SUBREG_REG (ref);
824 if (GET_CODE (ref) == REG)
825 ref = reg_equiv_mem[REGNO (ref)];
827 if (reload_in_progress)
828 base = find_replacement (&XEXP (ref, 0));
830 base = XEXP (ref, 0);
832 if (GET_CODE (base) == PLUS)
833 offset += INTVAL (XEXP (base, 1)), base = XEXP (base, 0);
835 *paligned_mem = gen_rtx_MEM (SImode,
836 plus_constant (base, offset & ~3));
837 MEM_IN_STRUCT_P (*paligned_mem) = MEM_IN_STRUCT_P (ref);
838 MEM_VOLATILE_P (*paligned_mem) = MEM_VOLATILE_P (ref);
839 RTX_UNCHANGING_P (*paligned_mem) = RTX_UNCHANGING_P (ref);
841 *pbitnum = GEN_INT ((offset & 3) * 8);
844 /* Similar, but just get the address. Handle the two reload cases.
845 Add EXTRA_OFFSET to the address we return. */
848 get_unaligned_address (ref, extra_offset)
853 HOST_WIDE_INT offset = 0;
855 if (GET_CODE (ref) == SUBREG)
857 offset = SUBREG_WORD (ref) * UNITS_PER_WORD;
858 if (BYTES_BIG_ENDIAN)
859 offset -= (MIN (UNITS_PER_WORD, GET_MODE_SIZE (GET_MODE (ref)))
860 - MIN (UNITS_PER_WORD,
861 GET_MODE_SIZE (GET_MODE (SUBREG_REG (ref)))));
862 ref = SUBREG_REG (ref);
865 if (GET_CODE (ref) == REG)
866 ref = reg_equiv_mem[REGNO (ref)];
868 if (reload_in_progress)
869 base = find_replacement (&XEXP (ref, 0));
871 base = XEXP (ref, 0);
873 if (GET_CODE (base) == PLUS)
874 offset += INTVAL (XEXP (base, 1)), base = XEXP (base, 0);
876 return plus_constant (base, offset + extra_offset);
879 /* Subfunction of the following function. Update the flags of any MEM
880 found in part of X. */
883 alpha_set_memflags_1 (x, in_struct_p, volatile_p, unchanging_p)
885 int in_struct_p, volatile_p, unchanging_p;
889 switch (GET_CODE (x))
893 for (i = XVECLEN (x, 0) - 1; i >= 0; i--)
894 alpha_set_memflags_1 (XVECEXP (x, 0, i), in_struct_p, volatile_p,
899 alpha_set_memflags_1 (PATTERN (x), in_struct_p, volatile_p,
904 alpha_set_memflags_1 (SET_DEST (x), in_struct_p, volatile_p,
906 alpha_set_memflags_1 (SET_SRC (x), in_struct_p, volatile_p,
911 MEM_IN_STRUCT_P (x) = in_struct_p;
912 MEM_VOLATILE_P (x) = volatile_p;
913 RTX_UNCHANGING_P (x) = unchanging_p;
921 /* Given INSN, which is either an INSN or a SEQUENCE generated to
922 perform a memory operation, look for any MEMs in either a SET_DEST or
923 a SET_SRC and copy the in-struct, unchanging, and volatile flags from
924 REF into each of the MEMs found. If REF is not a MEM, don't do
928 alpha_set_memflags (insn, ref)
932 /* Note that it is always safe to get these flags, though they won't
933 be what we think if REF is not a MEM. */
934 int in_struct_p = MEM_IN_STRUCT_P (ref);
935 int volatile_p = MEM_VOLATILE_P (ref);
936 int unchanging_p = RTX_UNCHANGING_P (ref);
938 if (GET_CODE (ref) != MEM
939 || (! in_struct_p && ! volatile_p && ! unchanging_p))
942 alpha_set_memflags_1 (insn, in_struct_p, volatile_p, unchanging_p);
945 /* Try to output insns to set TARGET equal to the constant C if it can be
946 done in less than N insns. Do all computations in MODE. Returns the place
947 where the output has been placed if it can be done and the insns have been
948 emitted. If it would take more than N insns, zero is returned and no
949 insns and emitted. */
952 alpha_emit_set_const (target, mode, c, n)
954 enum machine_mode mode;
961 /* Try 1 insn, then 2, then up to N. */
962 for (i = 1; i <= n; i++)
963 if ((pat = alpha_emit_set_const_1 (target, mode, c, i)) != 0)
969 /* Internal routine for the above to check for N or below insns. */
972 alpha_emit_set_const_1 (target, mode, c, n)
974 enum machine_mode mode;
978 HOST_WIDE_INT new = c;
980 /* Use a pseudo if highly optimizing and still generating RTL. */
982 = (flag_expensive_optimizations && rtx_equal_function_value_matters
986 #if HOST_BITS_PER_WIDE_INT == 64
987 /* We are only called for SImode and DImode. If this is SImode, ensure that
988 we are sign extended to a full word. This does not make any sense when
989 cross-compiling on a narrow machine. */
992 c = (c & 0xffffffff) - 2 * (c & 0x80000000);
995 /* If this is a sign-extended 32-bit constant, we can do this in at most
996 three insns, so do it if we have enough insns left. We always have
997 a sign-extended 32-bit constant when compiling on a narrow machine. */
999 if (HOST_BITS_PER_WIDE_INT != 64
1000 || c >> 31 == -1 || c >> 31 == 0)
1002 HOST_WIDE_INT low = (c & 0xffff) - 2 * (c & 0x8000);
1003 HOST_WIDE_INT tmp1 = c - low;
1005 = ((tmp1 >> 16) & 0xffff) - 2 * ((tmp1 >> 16) & 0x8000);
1006 HOST_WIDE_INT extra = 0;
1008 /* If HIGH will be interpreted as negative but the constant is
1009 positive, we must adjust it to do two ldha insns. */
1011 if ((high & 0x8000) != 0 && c >= 0)
1015 high = ((tmp1 >> 16) & 0xffff) - 2 * ((tmp1 >> 16) & 0x8000);
1018 if (c == low || (low == 0 && extra == 0))
1020 /* We used to use copy_to_suggested_reg (GEN_INT (c), target, mode)
1021 but that meant that we can't handle INT_MIN on 32-bit machines
1022 (like NT/Alpha), because we recurse indefinitely through
1023 emit_move_insn to gen_movdi. So instead, since we know exactly
1024 what we want, create it explicitly. */
1027 target = gen_reg_rtx (mode);
1028 emit_insn (gen_rtx_SET (VOIDmode, target, GEN_INT (c)));
1031 else if (n >= 2 + (extra != 0))
1033 temp = copy_to_suggested_reg (GEN_INT (low), subtarget, mode);
1036 temp = expand_binop (mode, add_optab, temp, GEN_INT (extra << 16),
1037 subtarget, 0, OPTAB_WIDEN);
1039 return expand_binop (mode, add_optab, temp, GEN_INT (high << 16),
1040 target, 0, OPTAB_WIDEN);
1044 /* If we couldn't do it that way, try some other methods. But if we have
1045 no instructions left, don't bother. Likewise, if this is SImode and
1046 we can't make pseudos, we can't do anything since the expand_binop
1047 and expand_unop calls will widen and try to make pseudos. */
1050 || (mode == SImode && ! rtx_equal_function_value_matters))
1053 #if HOST_BITS_PER_WIDE_INT == 64
1054 /* First, see if can load a value into the target that is the same as the
1055 constant except that all bytes that are 0 are changed to be 0xff. If we
1056 can, then we can do a ZAPNOT to obtain the desired constant. */
1058 for (i = 0; i < 64; i += 8)
1059 if ((new & ((HOST_WIDE_INT) 0xff << i)) == 0)
1060 new |= (HOST_WIDE_INT) 0xff << i;
1062 /* We are only called for SImode and DImode. If this is SImode, ensure that
1063 we are sign extended to a full word. */
1066 new = (new & 0xffffffff) - 2 * (new & 0x80000000);
1069 && (temp = alpha_emit_set_const (subtarget, mode, new, n - 1)) != 0)
1070 return expand_binop (mode, and_optab, temp, GEN_INT (c | ~ new),
1071 target, 0, OPTAB_WIDEN);
1074 /* Next, see if we can load a related constant and then shift and possibly
1075 negate it to get the constant we want. Try this once each increasing
1076 numbers of insns. */
1078 for (i = 1; i < n; i++)
1080 /* First try complementing. */
1081 if ((temp = alpha_emit_set_const (subtarget, mode, ~ c, i)) != 0)
1082 return expand_unop (mode, one_cmpl_optab, temp, target, 0);
1084 /* Next try to form a constant and do a left shift. We can do this
1085 if some low-order bits are zero; the exact_log2 call below tells
1086 us that information. The bits we are shifting out could be any
1087 value, but here we'll just try the 0- and sign-extended forms of
1088 the constant. To try to increase the chance of having the same
1089 constant in more than one insn, start at the highest number of
1090 bits to shift, but try all possibilities in case a ZAPNOT will
1093 if ((bits = exact_log2 (c & - c)) > 0)
1094 for (; bits > 0; bits--)
1095 if ((temp = (alpha_emit_set_const
1097 (unsigned HOST_WIDE_INT) c >> bits, i))) != 0
1098 || ((temp = (alpha_emit_set_const
1100 ((unsigned HOST_WIDE_INT) c) >> bits, i)))
1102 return expand_binop (mode, ashl_optab, temp, GEN_INT (bits),
1103 target, 0, OPTAB_WIDEN);
1105 /* Now try high-order zero bits. Here we try the shifted-in bits as
1106 all zero and all ones. Be careful to avoid shifting outside the
1107 mode and to avoid shifting outside the host wide int size. */
1108 /* On narrow hosts, don't shift a 1 into the high bit, since we'll
1109 confuse the recursive call and set all of the high 32 bits. */
1111 if ((bits = (MIN (HOST_BITS_PER_WIDE_INT, GET_MODE_SIZE (mode) * 8)
1112 - floor_log2 (c) - 1 - (HOST_BITS_PER_WIDE_INT < 64))) > 0)
1113 for (; bits > 0; bits--)
1114 if ((temp = alpha_emit_set_const (subtarget, mode,
1116 || ((temp = (alpha_emit_set_const
1118 ((c << bits) | (((HOST_WIDE_INT) 1 << bits) - 1)),
1121 return expand_binop (mode, lshr_optab, temp, GEN_INT (bits),
1122 target, 1, OPTAB_WIDEN);
1124 /* Now try high-order 1 bits. We get that with a sign-extension.
1125 But one bit isn't enough here. Be careful to avoid shifting outside
1126 the mode and to avoid shifting outside the host wide int size. */
1128 if ((bits = (MIN (HOST_BITS_PER_WIDE_INT, GET_MODE_SIZE (mode) * 8)
1129 - floor_log2 (~ c) - 2)) > 0)
1130 for (; bits > 0; bits--)
1131 if ((temp = alpha_emit_set_const (subtarget, mode,
1133 || ((temp = (alpha_emit_set_const
1135 ((c << bits) | (((HOST_WIDE_INT) 1 << bits) - 1)),
1138 return expand_binop (mode, ashr_optab, temp, GEN_INT (bits),
1139 target, 0, OPTAB_WIDEN);
1145 #if HOST_BITS_PER_WIDE_INT == 64
1146 /* Having failed to find a 3 insn sequence in alpha_emit_set_const,
1147 fall back to a straight forward decomposition. We do this to avoid
1148 exponential run times encountered when looking for longer sequences
1149 with alpha_emit_set_const. */
1152 alpha_emit_set_long_const (target, c)
1156 /* Use a pseudo if highly optimizing and still generating RTL. */
1158 = (flag_expensive_optimizations && rtx_equal_function_value_matters
1160 HOST_WIDE_INT d1, d2, d3, d4;
1163 /* Decompose the entire word */
1164 d1 = ((c & 0xffff) ^ 0x8000) - 0x8000;
1166 d2 = ((c & 0xffffffff) ^ 0x80000000) - 0x80000000;
1168 d3 = ((c & 0xffff) ^ 0x8000) - 0x8000;
1170 d4 = ((c & 0xffffffff) ^ 0x80000000) - 0x80000000;
1175 /* Construct the high word */
1177 r1 = copy_to_suggested_reg (GEN_INT (d4), subtarget, DImode);
1179 r1 = copy_to_suggested_reg (GEN_INT (d3), subtarget, DImode);
1181 r1 = expand_binop (DImode, add_optab, GEN_INT (d3), GEN_INT (d4),
1182 subtarget, 0, OPTAB_WIDEN);
1184 /* Shift it into place */
1185 r2 = expand_binop (DImode, ashl_optab, r1, GEN_INT (32),
1186 subtarget, 0, OPTAB_WIDEN);
1188 if (subtarget == 0 && d1 == d3 && d2 == d4)
1189 r1 = expand_binop (DImode, add_optab, r1, r2, subtarget, 0, OPTAB_WIDEN);
1194 /* Add in the low word */
1196 r1 = expand_binop (DImode, add_optab, r1, GEN_INT (d2),
1197 subtarget, 0, OPTAB_WIDEN);
1199 r1 = expand_binop (DImode, add_optab, r1, GEN_INT (d1),
1200 subtarget, 0, OPTAB_WIDEN);
1204 r1 = copy_to_suggested_reg(r1, target, DImode);
1208 #endif /* HOST_BITS_PER_WIDE_INT == 64 */
1210 /* Rewrite a comparison against zero CMP of the form
1211 (CODE (cc0) (const_int 0)) so it can be written validly in
1212 a conditional move (if_then_else CMP ...).
1213 If both of the operands that set cc0 are non-zero we must emit
1214 an insn to perform the compare (it can't be done within
1215 the conditional move). */
1217 alpha_emit_conditional_move (cmp, mode)
1219 enum machine_mode mode;
1221 enum rtx_code code = GET_CODE (cmp);
1222 enum rtx_code cmov_code = NE;
1223 rtx op0 = alpha_compare_op0;
1224 rtx op1 = alpha_compare_op1;
1225 enum machine_mode cmp_mode
1226 = (GET_MODE (op0) == VOIDmode ? DImode : GET_MODE (op0));
1227 enum machine_mode cmp_op_mode = alpha_compare_fp_p ? DFmode : DImode;
1230 if (alpha_compare_fp_p != FLOAT_MODE_P (mode))
1233 /* We may be able to use a conditional move directly.
1234 This avoids emitting spurious compares. */
1235 if (signed_comparison_operator (cmp, cmp_op_mode)
1236 && (op0 == CONST0_RTX (cmp_mode) || op1 == CONST0_RTX (cmp_mode)))
1237 return gen_rtx_fmt_ee (code, VOIDmode, op0, op1);
1239 /* We can't put the comparison insides a conditional move;
1240 emit a compare instruction and put that inside the
1241 conditional move. Make sure we emit only comparisons we have;
1242 swap or reverse as necessary. */
1246 case EQ: case LE: case LT: case LEU: case LTU:
1247 /* We have these compares: */
1251 /* This must be reversed. */
1252 code = reverse_condition (code);
1256 case GE: case GT: case GEU: case GTU:
1257 /* These must be swapped. Make sure the new first operand is in
1259 code = swap_condition (code);
1260 tem = op0, op0 = op1, op1 = tem;
1261 op0 = force_reg (cmp_mode, op0);
1268 tem = gen_reg_rtx (cmp_op_mode);
1269 emit_move_insn (tem, gen_rtx_fmt_ee (code, cmp_op_mode, op0, op1));
1270 return gen_rtx_fmt_ee (cmov_code, VOIDmode, tem, CONST0_RTX (cmp_op_mode));
1273 /* Use ext[wlq][lh] as the Architecture Handbook describes for extracting
1277 word: ldq_u r1,X(r11) ldq_u r1,X(r11)
1278 ldq_u r2,X+1(r11) ldq_u r2,X+1(r11)
1279 lda r3,X(r11) lda r3,X+2(r11)
1280 extwl r1,r3,r1 extql r1,r3,r1
1281 extwh r2,r3,r2 extqh r2,r3,r2
1282 or r1.r2.r1 or r1,r2,r1
1285 long: ldq_u r1,X(r11) ldq_u r1,X(r11)
1286 ldq_u r2,X+3(r11) ldq_u r2,X+3(r11)
1287 lda r3,X(r11) lda r3,X(r11)
1288 extll r1,r3,r1 extll r1,r3,r1
1289 extlh r2,r3,r2 extlh r2,r3,r2
1290 or r1.r2.r1 addl r1,r2,r1
1292 quad: ldq_u r1,X(r11)
1301 alpha_expand_unaligned_load (tgt, mem, size, ofs, sign)
1303 HOST_WIDE_INT size, ofs;
1306 rtx meml, memh, addr, extl, exth;
1307 enum machine_mode mode;
1309 meml = gen_reg_rtx (DImode);
1310 memh = gen_reg_rtx (DImode);
1311 addr = gen_reg_rtx (DImode);
1312 extl = gen_reg_rtx (DImode);
1313 exth = gen_reg_rtx (DImode);
1315 emit_move_insn (meml,
1316 change_address (mem, DImode,
1317 gen_rtx_AND (DImode,
1318 plus_constant (XEXP (mem, 0),
1322 emit_move_insn (memh,
1323 change_address (mem, DImode,
1324 gen_rtx_AND (DImode,
1325 plus_constant (XEXP (mem, 0),
1329 if (sign && size == 2)
1331 emit_move_insn (addr, plus_constant (XEXP (mem, 0), ofs+2));
1333 emit_insn (gen_extxl (extl, meml, GEN_INT (64), addr));
1334 emit_insn (gen_extqh (exth, memh, addr));
1336 addr = expand_binop (DImode, ior_optab, extl, exth, addr, 1, OPTAB_WIDEN);
1337 addr = expand_binop (DImode, ashr_optab, addr, GEN_INT (48),
1338 addr, 1, OPTAB_WIDEN);
1342 emit_move_insn (addr, plus_constant (XEXP (mem, 0), ofs));
1343 emit_insn (gen_extxl (extl, meml, GEN_INT (size*8), addr));
1347 emit_insn (gen_extwh (exth, memh, addr));
1352 emit_insn (gen_extlh (exth, memh, addr));
1357 emit_insn (gen_extqh (exth, memh, addr));
1362 addr = expand_binop (mode, ior_optab, gen_lowpart (mode, extl),
1363 gen_lowpart (mode, exth), gen_lowpart (mode, tgt),
1368 emit_move_insn (tgt, gen_lowpart(GET_MODE (tgt), addr));
1371 /* Similarly, use ins and msk instructions to perform unaligned stores. */
1374 alpha_expand_unaligned_store (dst, src, size, ofs)
1376 HOST_WIDE_INT size, ofs;
1378 rtx dstl, dsth, addr, insl, insh, meml, memh;
1380 dstl = gen_reg_rtx (DImode);
1381 dsth = gen_reg_rtx (DImode);
1382 insl = gen_reg_rtx (DImode);
1383 insh = gen_reg_rtx (DImode);
1385 meml = change_address (dst, DImode,
1386 gen_rtx_AND (DImode,
1387 plus_constant (XEXP (dst, 0), ofs),
1389 memh = change_address (dst, DImode,
1390 gen_rtx_AND (DImode,
1391 plus_constant (XEXP (dst, 0),
1395 emit_move_insn (dsth, memh);
1396 emit_move_insn (dstl, meml);
1397 addr = copy_addr_to_reg (plus_constant (XEXP (dst, 0), ofs));
1399 if (src != const0_rtx)
1401 emit_insn (gen_insxh (insh, gen_lowpart (DImode, src),
1402 GEN_INT (size*8), addr));
1407 emit_insn (gen_inswl (insl, gen_lowpart (HImode, src), addr));
1410 emit_insn (gen_insll (insl, gen_lowpart (SImode, src), addr));
1413 emit_insn (gen_insql (insl, src, addr));
1418 emit_insn (gen_mskxh (dsth, dsth, GEN_INT (size*8), addr));
1423 emit_insn (gen_mskxl (dstl, dstl, GEN_INT (0xffff), addr));
1426 emit_insn (gen_mskxl (dstl, dstl, GEN_INT (0xffffffff), addr));
1430 #if HOST_BITS_PER_WIDE_INT == 32
1431 rtx msk = immed_double_const (0xffffffff, 0xffffffff, DImode);
1433 rtx msk = immed_double_const (0xffffffffffffffff, 0, DImode);
1435 emit_insn (gen_mskxl (dstl, dstl, msk, addr));
1440 if (src != const0_rtx)
1442 dsth = expand_binop (DImode, ior_optab, insh, dsth, dsth, 0, OPTAB_WIDEN);
1443 dstl = expand_binop (DImode, ior_optab, insl, dstl, dstl, 0, OPTAB_WIDEN);
1446 /* Must store high before low for degenerate case of aligned. */
1447 emit_move_insn (memh, dsth);
1448 emit_move_insn (meml, dstl);
1451 /* The block move code tries to maximize speed by separating loads and
1452 stores at the expense of register pressure: we load all of the data
1453 before we store it back out. There are two secondary effects worth
1454 mentioning, that this speeds copying to/from aligned and unaligned
1455 buffers, and that it makes the code significantly easier to write. */
1457 #define MAX_MOVE_WORDS 8
1459 /* Load an integral number of consecutive unaligned quadwords. */
1462 alpha_expand_unaligned_load_words (out_regs, smem, words, ofs)
1465 HOST_WIDE_INT words, ofs;
1467 rtx const im8 = GEN_INT (-8);
1468 rtx const i64 = GEN_INT (64);
1469 rtx ext_tmps[MAX_MOVE_WORDS], data_regs[MAX_MOVE_WORDS+1];
1473 /* Generate all the tmp registers we need. */
1474 for (i = 0; i < words; ++i)
1476 data_regs[i] = out_regs[i];
1477 ext_tmps[i] = gen_reg_rtx (DImode);
1479 data_regs[words] = gen_reg_rtx (DImode);
1482 smem = change_address (smem, GET_MODE (smem),
1483 plus_constant (XEXP (smem, 0), ofs));
1485 /* Load up all of the source data. */
1486 for (i = 0; i < words; ++i)
1488 emit_move_insn (data_regs[i],
1489 change_address (smem, DImode,
1490 gen_rtx_AND (DImode,
1491 plus_constant (XEXP(smem,0),
1495 emit_move_insn (data_regs[words],
1496 change_address (smem, DImode,
1497 gen_rtx_AND (DImode,
1498 plus_constant (XEXP(smem,0),
1502 /* Extract the half-word fragments. Unfortunately DEC decided to make
1503 extxh with offset zero a noop instead of zeroing the register, so
1504 we must take care of that edge condition ourselves with cmov. */
1506 sreg = copy_addr_to_reg (XEXP (smem, 0));
1507 areg = expand_binop (DImode, and_optab, sreg, GEN_INT (7), NULL,
1509 for (i = 0; i < words; ++i)
1511 emit_insn (gen_extxl (data_regs[i], data_regs[i], i64, sreg));
1513 emit_insn (gen_extqh (ext_tmps[i], data_regs[i+1], sreg));
1514 emit_insn (gen_rtx_SET (VOIDmode, ext_tmps[i],
1515 gen_rtx_IF_THEN_ELSE (DImode,
1516 gen_rtx_EQ (DImode, areg,
1518 const0_rtx, ext_tmps[i])));
1521 /* Merge the half-words into whole words. */
1522 for (i = 0; i < words; ++i)
1524 out_regs[i] = expand_binop (DImode, ior_optab, data_regs[i],
1525 ext_tmps[i], data_regs[i], 1, OPTAB_WIDEN);
1529 /* Store an integral number of consecutive unaligned quadwords. DATA_REGS
1530 may be NULL to store zeros. */
1533 alpha_expand_unaligned_store_words (data_regs, dmem, words, ofs)
1536 HOST_WIDE_INT words, ofs;
1538 rtx const im8 = GEN_INT (-8);
1539 rtx const i64 = GEN_INT (64);
1540 #if HOST_BITS_PER_WIDE_INT == 32
1541 rtx const im1 = immed_double_const (0xffffffff, 0xffffffff, DImode);
1543 rtx const im1 = immed_double_const (0xffffffffffffffff, 0, DImode);
1545 rtx ins_tmps[MAX_MOVE_WORDS];
1546 rtx st_tmp_1, st_tmp_2, dreg;
1547 rtx st_addr_1, st_addr_2;
1550 /* Generate all the tmp registers we need. */
1551 if (data_regs != NULL)
1552 for (i = 0; i < words; ++i)
1553 ins_tmps[i] = gen_reg_rtx(DImode);
1554 st_tmp_1 = gen_reg_rtx(DImode);
1555 st_tmp_2 = gen_reg_rtx(DImode);
1558 dmem = change_address (dmem, GET_MODE (dmem),
1559 plus_constant (XEXP (dmem, 0), ofs));
1562 st_addr_2 = change_address (dmem, DImode,
1563 gen_rtx_AND (DImode,
1564 plus_constant (XEXP(dmem,0),
1567 st_addr_1 = change_address (dmem, DImode,
1568 gen_rtx_AND (DImode,
1572 /* Load up the destination end bits. */
1573 emit_move_insn (st_tmp_2, st_addr_2);
1574 emit_move_insn (st_tmp_1, st_addr_1);
1576 /* Shift the input data into place. */
1577 dreg = copy_addr_to_reg (XEXP (dmem, 0));
1578 if (data_regs != NULL)
1580 for (i = words-1; i >= 0; --i)
1582 emit_insn (gen_insxh (ins_tmps[i], data_regs[i], i64, dreg));
1583 emit_insn (gen_insql (data_regs[i], data_regs[i], dreg));
1585 for (i = words-1; i > 0; --i)
1587 ins_tmps[i-1] = expand_binop (DImode, ior_optab, data_regs[i],
1588 ins_tmps[i-1], ins_tmps[i-1], 1,
1593 /* Split and merge the ends with the destination data. */
1594 emit_insn (gen_mskxh (st_tmp_2, st_tmp_2, i64, dreg));
1595 emit_insn (gen_mskxl (st_tmp_1, st_tmp_1, im1, dreg));
1597 if (data_regs != NULL)
1599 st_tmp_2 = expand_binop (DImode, ior_optab, st_tmp_2, ins_tmps[words-1],
1600 st_tmp_2, 1, OPTAB_WIDEN);
1601 st_tmp_1 = expand_binop (DImode, ior_optab, st_tmp_1, data_regs[0],
1602 st_tmp_1, 1, OPTAB_WIDEN);
1606 emit_move_insn (st_addr_2, st_tmp_2);
1607 for (i = words-1; i > 0; --i)
1609 emit_move_insn (change_address (dmem, DImode,
1610 gen_rtx_AND (DImode,
1611 plus_constant(XEXP (dmem,0),
1614 data_regs ? ins_tmps[i-1] : const0_rtx);
1616 emit_move_insn (st_addr_1, st_tmp_1);
1620 /* Expand string/block move operations.
1622 operands[0] is the pointer to the destination.
1623 operands[1] is the pointer to the source.
1624 operands[2] is the number of bytes to move.
1625 operands[3] is the alignment. */
1628 alpha_expand_block_move (operands)
1631 rtx bytes_rtx = operands[2];
1632 rtx align_rtx = operands[3];
1633 HOST_WIDE_INT bytes = INTVAL (bytes_rtx);
1634 HOST_WIDE_INT src_align = INTVAL (align_rtx);
1635 HOST_WIDE_INT dst_align = src_align;
1636 rtx orig_src = operands[1];
1637 rtx orig_dst = operands[0];
1638 rtx data_regs[2*MAX_MOVE_WORDS+16];
1640 int i, words, ofs, nregs = 0;
1644 if (bytes > MAX_MOVE_WORDS*8)
1647 /* Look for additional alignment information from recorded register info. */
1649 tmp = XEXP (orig_src, 0);
1650 if (GET_CODE (tmp) == REG)
1652 if (REGNO_POINTER_ALIGN (REGNO (tmp)) > src_align)
1653 src_align = REGNO_POINTER_ALIGN (REGNO (tmp));
1655 else if (GET_CODE (tmp) == PLUS
1656 && GET_CODE (XEXP (tmp, 0)) == REG
1657 && GET_CODE (XEXP (tmp, 1)) == CONST_INT)
1659 HOST_WIDE_INT c = INTVAL (XEXP (tmp, 1));
1660 int a = REGNO_POINTER_ALIGN (REGNO (XEXP (tmp, 0)));
1664 if (a >= 8 && c % 8 == 0)
1666 else if (a >= 4 && c % 4 == 0)
1668 else if (a >= 2 && c % 2 == 0)
1673 tmp = XEXP (orig_dst, 0);
1674 if (GET_CODE (tmp) == REG)
1676 if (REGNO_POINTER_ALIGN (REGNO (tmp)) > dst_align)
1677 dst_align = REGNO_POINTER_ALIGN (REGNO (tmp));
1679 else if (GET_CODE (tmp) == PLUS
1680 && GET_CODE (XEXP (tmp, 0)) == REG
1681 && GET_CODE (XEXP (tmp, 1)) == CONST_INT)
1683 HOST_WIDE_INT c = INTVAL (XEXP (tmp, 1));
1684 int a = REGNO_POINTER_ALIGN (REGNO (XEXP (tmp, 0)));
1688 if (a >= 8 && c % 8 == 0)
1690 else if (a >= 4 && c % 4 == 0)
1692 else if (a >= 2 && c % 2 == 0)
1698 * Load the entire block into registers.
1701 if (GET_CODE (XEXP (orig_src, 0)) == ADDRESSOF)
1703 enum machine_mode mode;
1704 tmp = XEXP (XEXP (orig_src, 0), 0);
1706 mode = mode_for_size (bytes, MODE_INT, 1);
1708 && GET_MODE_SIZE (GET_MODE (tmp)) <= bytes)
1710 /* Whee! Optimize the load to use the existing register. */
1711 data_regs[nregs++] = gen_lowpart (mode, tmp);
1715 /* ??? We could potentially be copying 3 bytes or whatnot from
1716 a wider reg. Probably not worth worrying about. */
1717 /* No appropriate mode; fall back on memory. */
1718 orig_src = change_address (orig_src, GET_MODE (orig_src),
1719 copy_addr_to_reg (XEXP (orig_src, 0)));
1723 if (src_align >= 8 && bytes >= 8)
1727 for (i = 0; i < words; ++i)
1728 data_regs[nregs+i] = gen_reg_rtx(DImode);
1730 for (i = 0; i < words; ++i)
1732 emit_move_insn (data_regs[nregs+i],
1733 change_address(orig_src, DImode,
1734 plus_constant (XEXP (orig_src, 0),
1742 if (src_align >= 4 && bytes >= 4)
1746 for (i = 0; i < words; ++i)
1747 data_regs[nregs+i] = gen_reg_rtx(SImode);
1749 for (i = 0; i < words; ++i)
1751 emit_move_insn (data_regs[nregs+i],
1752 change_address(orig_src, SImode,
1753 plus_constant (XEXP (orig_src, 0),
1765 for (i = 0; i < words+1; ++i)
1766 data_regs[nregs+i] = gen_reg_rtx(DImode);
1768 alpha_expand_unaligned_load_words(data_regs+nregs, orig_src, words, ofs);
1774 if (!TARGET_BWX && bytes >= 8)
1776 data_regs[nregs++] = tmp = gen_reg_rtx (DImode);
1777 alpha_expand_unaligned_load (tmp, orig_src, 8, ofs, 0);
1781 if (!TARGET_BWX && bytes >= 4)
1783 data_regs[nregs++] = tmp = gen_reg_rtx (SImode);
1784 alpha_expand_unaligned_load (tmp, orig_src, 4, ofs, 0);
1793 data_regs[nregs++] = tmp = gen_reg_rtx (HImode);
1794 emit_move_insn (tmp,
1795 change_address (orig_src, HImode,
1796 plus_constant (XEXP (orig_src, 0),
1800 } while (bytes >= 2);
1802 else if (!TARGET_BWX)
1804 data_regs[nregs++] = tmp = gen_reg_rtx (HImode);
1805 alpha_expand_unaligned_load (tmp, orig_src, 2, ofs, 0);
1812 data_regs[nregs++] = tmp = gen_reg_rtx (QImode);
1813 emit_move_insn (tmp,
1814 change_address (orig_src, QImode,
1815 plus_constant (XEXP (orig_src, 0),
1822 if (nregs > sizeof(data_regs)/sizeof(*data_regs))
1826 * Now save it back out again.
1831 if (GET_CODE (XEXP (orig_dst, 0)) == ADDRESSOF)
1833 enum machine_mode mode;
1834 tmp = XEXP (XEXP (orig_dst, 0), 0);
1836 mode = mode_for_size (bytes, MODE_INT, 1);
1837 if (GET_MODE (tmp) == mode && nregs == 1)
1839 emit_move_insn (tmp, data_regs[0]);
1844 /* ??? If nregs > 1, consider reconstructing the word in regs. */
1845 /* ??? Optimize mode < dst_mode with strict_low_part. */
1846 /* No appropriate mode; fall back on memory. */
1847 orig_dst = change_address (orig_dst, GET_MODE (orig_dst),
1848 copy_addr_to_reg (XEXP (orig_dst, 0)));
1851 /* Write out the data in whatever chunks reading the source allowed. */
1854 while (i < nregs && GET_MODE (data_regs[i]) == DImode)
1856 emit_move_insn (change_address(orig_dst, DImode,
1857 plus_constant (XEXP (orig_dst, 0),
1866 /* If the source has remaining DImode regs, write them out in
1868 while (i < nregs && GET_MODE (data_regs[i]) == DImode)
1870 tmp = expand_binop (DImode, lshr_optab, data_regs[i], GEN_INT (32),
1871 NULL_RTX, 1, OPTAB_WIDEN);
1873 emit_move_insn (change_address(orig_dst, SImode,
1874 plus_constant (XEXP (orig_dst, 0),
1876 gen_lowpart (SImode, data_regs[i]));
1877 emit_move_insn (change_address(orig_dst, SImode,
1878 plus_constant (XEXP (orig_dst, 0),
1880 gen_lowpart (SImode, tmp));
1885 while (i < nregs && GET_MODE (data_regs[i]) == SImode)
1887 emit_move_insn (change_address(orig_dst, SImode,
1888 plus_constant (XEXP (orig_dst, 0),
1895 if (i < nregs && GET_MODE (data_regs[i]) == DImode)
1897 /* Write out a remaining block of words using unaligned methods. */
1899 for (words = 1; i+words < nregs ; ++words)
1900 if (GET_MODE (data_regs[i+words]) != DImode)
1904 alpha_expand_unaligned_store (orig_dst, data_regs[i], 8, ofs);
1906 alpha_expand_unaligned_store_words (data_regs+i, orig_dst, words, ofs);
1912 /* Due to the above, this won't be aligned. */
1913 /* ??? If we have more than one of these, consider constructing full
1914 words in registers and using alpha_expand_unaligned_store_words. */
1915 while (i < nregs && GET_MODE (data_regs[i]) == SImode)
1917 alpha_expand_unaligned_store (orig_dst, data_regs[i], 4, ofs);
1923 while (i < nregs && GET_MODE (data_regs[i]) == HImode)
1925 emit_move_insn (change_address (orig_dst, HImode,
1926 plus_constant (XEXP (orig_dst, 0),
1933 while (i < nregs && GET_MODE (data_regs[i]) == HImode)
1935 alpha_expand_unaligned_store (orig_dst, data_regs[i], 2, ofs);
1939 while (i < nregs && GET_MODE (data_regs[i]) == QImode)
1941 emit_move_insn (change_address (orig_dst, QImode,
1942 plus_constant (XEXP (orig_dst, 0),
1957 alpha_expand_block_clear (operands)
1960 rtx bytes_rtx = operands[1];
1961 rtx align_rtx = operands[2];
1962 HOST_WIDE_INT bytes = INTVAL (bytes_rtx);
1963 HOST_WIDE_INT align = INTVAL (align_rtx);
1964 rtx orig_dst = operands[0];
1966 HOST_WIDE_INT i, words, ofs = 0;
1970 if (bytes > MAX_MOVE_WORDS*8)
1973 /* Look for stricter alignment. */
1975 tmp = XEXP (orig_dst, 0);
1976 if (GET_CODE (tmp) == REG)
1978 if (REGNO_POINTER_ALIGN (REGNO (tmp)) > align)
1979 align = REGNO_POINTER_ALIGN (REGNO (tmp));
1981 else if (GET_CODE (tmp) == PLUS
1982 && GET_CODE (XEXP (tmp, 0)) == REG
1983 && GET_CODE (XEXP (tmp, 1)) == CONST_INT)
1985 HOST_WIDE_INT c = INTVAL (XEXP (tmp, 1));
1986 int a = REGNO_POINTER_ALIGN (REGNO (XEXP (tmp, 0)));
1990 if (a >= 8 && c % 8 == 0)
1992 else if (a >= 4 && c % 4 == 0)
1994 else if (a >= 2 && c % 2 == 0)
1999 /* Handle a block of contiguous words first. */
2001 if (align >= 8 && bytes >= 8)
2005 for (i = 0; i < words; ++i)
2007 emit_move_insn (change_address(orig_dst, DImode,
2008 plus_constant (XEXP (orig_dst, 0),
2016 else if (align >= 4 && bytes >= 4)
2020 for (i = 0; i < words; ++i)
2022 emit_move_insn (change_address(orig_dst, SImode,
2023 plus_constant (XEXP (orig_dst, 0),
2031 else if (bytes >= 16)
2035 alpha_expand_unaligned_store_words (NULL, orig_dst, words);
2041 /* Next clean up any trailing pieces. We know from the contiguous
2042 block move that there are no aligned SImode or DImode hunks left. */
2044 if (!TARGET_BWX && bytes >= 8)
2046 alpha_expand_unaligned_store (orig_dst, const0_rtx, 8, ofs);
2050 if (!TARGET_BWX && bytes >= 4)
2052 alpha_expand_unaligned_store (orig_dst, const0_rtx, 4, ofs);
2061 emit_move_insn (change_address (orig_dst, HImode,
2062 plus_constant (XEXP (orig_dst, 0),
2067 } while (bytes >= 2);
2069 else if (!TARGET_BWX)
2071 alpha_expand_unaligned_store (orig_dst, const0_rtx, 2, ofs);
2078 emit_move_insn (change_address (orig_dst, QImode,
2079 plus_constant (XEXP (orig_dst, 0),
2090 /* Adjust the cost of a scheduling dependency. Return the new cost of
2091 a dependency LINK or INSN on DEP_INSN. COST is the current cost. */
2094 alpha_adjust_cost (insn, link, dep_insn, cost)
2101 enum attr_type insn_type, dep_insn_type;
2103 /* If the dependence is an anti-dependence, there is no cost. For an
2104 output dependence, there is sometimes a cost, but it doesn't seem
2105 worth handling those few cases. */
2107 if (REG_NOTE_KIND (link) != 0)
2110 /* If we can't recognize the insns, we can't really do anything. */
2111 if (recog_memoized (insn) < 0 || recog_memoized (dep_insn) < 0)
2114 insn_type = get_attr_type (insn);
2115 dep_insn_type = get_attr_type (dep_insn);
2117 /* Bring in the user-defined memory latency. */
2118 if (dep_insn_type == TYPE_ILD
2119 || dep_insn_type == TYPE_FLD
2120 || dep_insn_type == TYPE_LDSYM)
2121 cost += alpha_memory_latency-1;
2126 /* On EV4, if INSN is a store insn and DEP_INSN is setting the data
2127 being stored, we can sometimes lower the cost. */
2129 if ((insn_type == TYPE_IST || insn_type == TYPE_FST)
2130 && (set = single_set (dep_insn)) != 0
2131 && GET_CODE (PATTERN (insn)) == SET
2132 && rtx_equal_p (SET_DEST (set), SET_SRC (PATTERN (insn))))
2134 switch (dep_insn_type)
2138 /* No savings here. */
2142 /* In these cases, we save one cycle. */
2146 /* In all other cases, we save two cycles. */
2147 return MAX (0, cost - 2);
2151 /* Another case that needs adjustment is an arithmetic or logical
2152 operation. It's cost is usually one cycle, but we default it to
2153 two in the MD file. The only case that it is actually two is
2154 for the address in loads, stores, and jumps. */
2156 if (dep_insn_type == TYPE_IADD || dep_insn_type == TYPE_ILOG)
2171 /* The final case is when a compare feeds into an integer branch;
2172 the cost is only one cycle in that case. */
2174 if (dep_insn_type == TYPE_ICMP && insn_type == TYPE_IBR)
2179 /* And the lord DEC saith: "A special bypass provides an effective
2180 latency of 0 cycles for an ICMP or ILOG insn producing the test
2181 operand of an IBR or ICMOV insn." */
2183 if ((dep_insn_type == TYPE_ICMP || dep_insn_type == TYPE_ILOG)
2184 && (set = single_set (dep_insn)) != 0)
2186 /* A branch only has one input. This must be it. */
2187 if (insn_type == TYPE_IBR)
2189 /* A conditional move has three, make sure it is the test. */
2190 if (insn_type == TYPE_ICMOV
2191 && GET_CODE (set_src = PATTERN (insn)) == SET
2192 && GET_CODE (set_src = SET_SRC (set_src)) == IF_THEN_ELSE
2193 && rtx_equal_p (SET_DEST (set), XEXP (set_src, 0)))
2197 /* "The multiplier is unable to receive data from IEU bypass paths.
2198 The instruction issues at the expected time, but its latency is
2199 increased by the time it takes for the input data to become
2200 available to the multiplier" -- which happens in pipeline stage
2201 six, when results are comitted to the register file. */
2203 if (insn_type == TYPE_IMUL)
2205 switch (dep_insn_type)
2207 /* These insns produce their results in pipeline stage five. */
2214 /* Other integer insns produce results in pipeline stage four. */
2222 /* There is additional latency to move the result of (most) FP
2223 operations anywhere but the FP register file. */
2225 if ((insn_type == TYPE_FST || insn_type == TYPE_FTOI)
2226 && (dep_insn_type == TYPE_FADD ||
2227 dep_insn_type == TYPE_FMUL ||
2228 dep_insn_type == TYPE_FCMOV))
2234 /* Otherwise, return the default cost. */
2238 /* Functions to save and restore alpha_return_addr_rtx. */
2240 struct machine_function
2246 alpha_save_machine_status (p)
2249 struct machine_function *machine =
2250 (struct machine_function *) xmalloc (sizeof (struct machine_function));
2252 p->machine = machine;
2253 machine->ra_rtx = alpha_return_addr_rtx;
2257 alpha_restore_machine_status (p)
2260 struct machine_function *machine = p->machine;
2262 alpha_return_addr_rtx = machine->ra_rtx;
2265 p->machine = (struct machine_function *)0;
2268 /* Do anything needed before RTL is emitted for each function. */
2271 alpha_init_expanders ()
2273 alpha_return_addr_rtx = NULL_RTX;
2275 /* Arrange to save and restore machine status around nested functions. */
2276 save_machine_status = alpha_save_machine_status;
2277 restore_machine_status = alpha_restore_machine_status;
2280 /* Start the ball rolling with RETURN_ADDR_RTX. */
2283 alpha_return_addr (count, frame)
2292 if (alpha_return_addr_rtx)
2293 return alpha_return_addr_rtx;
2295 /* No rtx yet. Invent one, and initialize it from $26 in the prologue. */
2296 alpha_return_addr_rtx = gen_reg_rtx (Pmode);
2297 init = gen_rtx_SET (Pmode, alpha_return_addr_rtx,
2298 gen_rtx_REG (Pmode, REG_RA));
2300 /* Emit the insn to the prologue with the other argument copies. */
2301 push_topmost_sequence ();
2302 emit_insn_after (init, get_insns ());
2303 pop_topmost_sequence ();
2305 return alpha_return_addr_rtx;
2309 alpha_ra_ever_killed ()
2313 if (!alpha_return_addr_rtx)
2314 return regs_ever_live[REG_RA];
2316 return reg_set_between_p (gen_rtx_REG (Pmode, REG_RA),
2317 get_insns(), NULL_RTX);
2321 /* Print an operand. Recognize special options, documented below. */
2324 print_operand (file, x, code)
2334 /* Generates fp-rounding mode suffix: nothing for normal, 'c' for
2335 chopped, 'm' for minus-infinity, and 'd' for dynamic rounding
2336 mode. alpha_fprm controls which suffix is generated. */
2339 case ALPHA_FPRM_NORM:
2341 case ALPHA_FPRM_MINF:
2344 case ALPHA_FPRM_CHOP:
2347 case ALPHA_FPRM_DYN:
2354 /* Generates trap-mode suffix for instructions that accept the su
2355 suffix only (cmpt et al). */
2356 if (alpha_tp == ALPHA_TP_INSN)
2361 /* Generates trap-mode suffix for instructions that accept the u, su,
2362 and sui suffix. This is the bulk of the IEEE floating point
2363 instructions (addt et al). */
2374 case ALPHA_FPTM_SUI:
2375 fputs ("sui", file);
2381 /* Generates trap-mode suffix for instructions that accept the sui
2382 suffix (cvtqt and cvtqs). */
2385 case ALPHA_FPTM_N: case ALPHA_FPTM_U:
2386 case ALPHA_FPTM_SU: /* cvtqt/cvtqs can't cause underflow */
2388 case ALPHA_FPTM_SUI:
2389 fputs ("sui", file);
2395 /* Generates single precision instruction suffix. */
2396 fprintf (file, "%c", (TARGET_FLOAT_VAX ? 'f' : 's'));
2400 /* Generates double precision instruction suffix. */
2401 fprintf (file, "%c", (TARGET_FLOAT_VAX ? 'g' : 't'));
2405 /* If this operand is the constant zero, write it as "$31". */
2406 if (GET_CODE (x) == REG)
2407 fprintf (file, "%s", reg_names[REGNO (x)]);
2408 else if (x == CONST0_RTX (GET_MODE (x)))
2409 fprintf (file, "$31");
2411 output_operand_lossage ("invalid %%r value");
2416 /* Similar, but for floating-point. */
2417 if (GET_CODE (x) == REG)
2418 fprintf (file, "%s", reg_names[REGNO (x)]);
2419 else if (x == CONST0_RTX (GET_MODE (x)))
2420 fprintf (file, "$f31");
2422 output_operand_lossage ("invalid %%R value");
2427 /* Write the 1's complement of a constant. */
2428 if (GET_CODE (x) != CONST_INT)
2429 output_operand_lossage ("invalid %%N value");
2431 fprintf (file, HOST_WIDE_INT_PRINT_DEC, ~ INTVAL (x));
2435 /* Write 1 << C, for a constant C. */
2436 if (GET_CODE (x) != CONST_INT)
2437 output_operand_lossage ("invalid %%P value");
2439 fprintf (file, HOST_WIDE_INT_PRINT_DEC, (HOST_WIDE_INT) 1 << INTVAL (x));
2443 /* Write the high-order 16 bits of a constant, sign-extended. */
2444 if (GET_CODE (x) != CONST_INT)
2445 output_operand_lossage ("invalid %%h value");
2447 fprintf (file, HOST_WIDE_INT_PRINT_DEC, INTVAL (x) >> 16);
2451 /* Write the low-order 16 bits of a constant, sign-extended. */
2452 if (GET_CODE (x) != CONST_INT)
2453 output_operand_lossage ("invalid %%L value");
2455 fprintf (file, HOST_WIDE_INT_PRINT_DEC,
2456 (INTVAL (x) & 0xffff) - 2 * (INTVAL (x) & 0x8000));
2460 /* Write mask for ZAP insn. */
2461 if (GET_CODE (x) == CONST_DOUBLE)
2463 HOST_WIDE_INT mask = 0;
2464 HOST_WIDE_INT value;
2466 value = CONST_DOUBLE_LOW (x);
2467 for (i = 0; i < HOST_BITS_PER_WIDE_INT / HOST_BITS_PER_CHAR;
2472 value = CONST_DOUBLE_HIGH (x);
2473 for (i = 0; i < HOST_BITS_PER_WIDE_INT / HOST_BITS_PER_CHAR;
2476 mask |= (1 << (i + sizeof (int)));
2478 fprintf (file, HOST_WIDE_INT_PRINT_DEC, mask & 0xff);
2481 else if (GET_CODE (x) == CONST_INT)
2483 HOST_WIDE_INT mask = 0, value = INTVAL (x);
2485 for (i = 0; i < 8; i++, value >>= 8)
2489 fprintf (file, HOST_WIDE_INT_PRINT_DEC, mask);
2492 output_operand_lossage ("invalid %%m value");
2496 /* 'b', 'w', 'l', or 'q' as the value of the constant. */
2497 if (GET_CODE (x) != CONST_INT
2498 || (INTVAL (x) != 8 && INTVAL (x) != 16
2499 && INTVAL (x) != 32 && INTVAL (x) != 64))
2500 output_operand_lossage ("invalid %%M value");
2502 fprintf (file, "%s",
2503 (INTVAL (x) == 8 ? "b"
2504 : INTVAL (x) == 16 ? "w"
2505 : INTVAL (x) == 32 ? "l"
2510 /* Similar, except do it from the mask. */
2511 if (GET_CODE (x) == CONST_INT && INTVAL (x) == 0xff)
2512 fprintf (file, "b");
2513 else if (GET_CODE (x) == CONST_INT && INTVAL (x) == 0xffff)
2514 fprintf (file, "w");
2515 else if (GET_CODE (x) == CONST_INT && INTVAL (x) == 0xffffffff)
2516 fprintf (file, "l");
2517 #if HOST_BITS_PER_WIDE_INT == 32
2518 else if (GET_CODE (x) == CONST_DOUBLE
2519 && CONST_DOUBLE_HIGH (x) == 0
2520 && CONST_DOUBLE_LOW (x) == -1)
2521 fprintf (file, "l");
2522 else if (GET_CODE (x) == CONST_DOUBLE
2523 && CONST_DOUBLE_HIGH (x) == -1
2524 && CONST_DOUBLE_LOW (x) == -1)
2525 fprintf (file, "q");
2527 else if (GET_CODE (x) == CONST_INT && INTVAL (x) == 0xffffffffffffffff)
2528 fprintf (file, "q");
2529 else if (GET_CODE (x) == CONST_DOUBLE
2530 && CONST_DOUBLE_HIGH (x) == 0
2531 && CONST_DOUBLE_LOW (x) == -1)
2532 fprintf (file, "q");
2535 output_operand_lossage ("invalid %%U value");
2539 /* Write the constant value divided by 8. */
2540 if (GET_CODE (x) != CONST_INT
2541 && (unsigned HOST_WIDE_INT) INTVAL (x) >= 64
2542 && (INTVAL (x) & 7) != 8)
2543 output_operand_lossage ("invalid %%s value");
2545 fprintf (file, HOST_WIDE_INT_PRINT_DEC, INTVAL (x) / 8);
2549 /* Same, except compute (64 - c) / 8 */
2551 if (GET_CODE (x) != CONST_INT
2552 && (unsigned HOST_WIDE_INT) INTVAL (x) >= 64
2553 && (INTVAL (x) & 7) != 8)
2554 output_operand_lossage ("invalid %%s value");
2556 fprintf (file, HOST_WIDE_INT_PRINT_DEC, (64 - INTVAL (x)) / 8);
2559 case 'C': case 'D': case 'c': case 'd':
2560 /* Write out comparison name. */
2562 enum rtx_code c = GET_CODE (x);
2564 if (GET_RTX_CLASS (c) != '<')
2565 output_operand_lossage ("invalid %%C value");
2568 c = reverse_condition (c);
2569 else if (code == 'c')
2570 c = swap_condition (c);
2571 else if (code == 'd')
2572 c = swap_condition (reverse_condition (c));
2575 fprintf (file, "ule");
2577 fprintf (file, "ult");
2579 fprintf (file, "%s", GET_RTX_NAME (c));
2584 /* Write the divide or modulus operator. */
2585 switch (GET_CODE (x))
2588 fprintf (file, "div%s", GET_MODE (x) == SImode ? "l" : "q");
2591 fprintf (file, "div%su", GET_MODE (x) == SImode ? "l" : "q");
2594 fprintf (file, "rem%s", GET_MODE (x) == SImode ? "l" : "q");
2597 fprintf (file, "rem%su", GET_MODE (x) == SImode ? "l" : "q");
2600 output_operand_lossage ("invalid %%E value");
2606 /* Write "_u" for unaligned access. */
2607 if (GET_CODE (x) == MEM && GET_CODE (XEXP (x, 0)) == AND)
2608 fprintf (file, "_u");
2612 if (GET_CODE (x) == REG)
2613 fprintf (file, "%s", reg_names[REGNO (x)]);
2614 else if (GET_CODE (x) == MEM)
2615 output_address (XEXP (x, 0));
2617 output_addr_const (file, x);
2621 output_operand_lossage ("invalid %%xn code");
2625 /* Do what is necessary for `va_start'. The argument is ignored;
2626 We look at the current function to determine if stdarg or varargs
2627 is used and fill in an initial va_list. A pointer to this constructor
2631 alpha_builtin_saveregs (arglist)
2634 rtx block, addr, dest, argsize;
2635 tree fntype = TREE_TYPE (current_function_decl);
2636 int stdarg = (TYPE_ARG_TYPES (fntype) != 0
2637 && (TREE_VALUE (tree_last (TYPE_ARG_TYPES (fntype)))
2638 != void_type_node));
2640 /* Compute the current position into the args, taking into account
2641 both registers and memory. Both of these are already included in
2644 argsize = GEN_INT (NUM_ARGS * UNITS_PER_WORD);
2646 /* For Unix, SETUP_INCOMING_VARARGS moves the starting address base up by 48,
2647 storing fp arg registers in the first 48 bytes, and the integer arg
2648 registers in the next 48 bytes. This is only done, however, if any
2649 integer registers need to be stored.
2651 If no integer registers need be stored, then we must subtract 48 in
2652 order to account for the integer arg registers which are counted in
2653 argsize above, but which are not actually stored on the stack. */
2655 if (TARGET_OPEN_VMS)
2656 addr = plus_constant (virtual_incoming_args_rtx,
2657 NUM_ARGS <= 5 + stdarg
2658 ? UNITS_PER_WORD : - 6 * UNITS_PER_WORD);
2660 addr = (NUM_ARGS <= 5 + stdarg
2661 ? plus_constant (virtual_incoming_args_rtx,
2663 : plus_constant (virtual_incoming_args_rtx,
2664 - (6 * UNITS_PER_WORD)));
2666 /* For VMS, we include the argsize, while on Unix, it's handled as
2667 a separate field. */
2668 if (TARGET_OPEN_VMS)
2669 addr = plus_constant (addr, INTVAL (argsize));
2671 addr = force_operand (addr, NULL_RTX);
2673 #ifdef POINTERS_EXTEND_UNSIGNED
2674 addr = convert_memory_address (ptr_mode, addr);
2677 if (TARGET_OPEN_VMS)
2681 /* Allocate the va_list constructor */
2682 block = assign_stack_local (BLKmode, 2 * UNITS_PER_WORD, BITS_PER_WORD);
2683 RTX_UNCHANGING_P (block) = 1;
2684 RTX_UNCHANGING_P (XEXP (block, 0)) = 1;
2686 /* Store the address of the first integer register in the __base
2689 dest = change_address (block, ptr_mode, XEXP (block, 0));
2690 emit_move_insn (dest, addr);
2692 if (flag_check_memory_usage)
2693 emit_library_call (chkr_set_right_libfunc, 1, VOIDmode, 3,
2695 GEN_INT (GET_MODE_SIZE (ptr_mode)),
2696 TYPE_MODE (sizetype),
2697 GEN_INT (MEMORY_USE_RW),
2698 TYPE_MODE (integer_type_node));
2700 /* Store the argsize as the __va_offset member. */
2701 dest = change_address (block, TYPE_MODE (integer_type_node),
2702 plus_constant (XEXP (block, 0),
2703 POINTER_SIZE/BITS_PER_UNIT));
2704 emit_move_insn (dest, argsize);
2706 if (flag_check_memory_usage)
2707 emit_library_call (chkr_set_right_libfunc, 1, VOIDmode, 3,
2709 GEN_INT (GET_MODE_SIZE
2710 (TYPE_MODE (integer_type_node))),
2711 TYPE_MODE (sizetype),
2712 GEN_INT (MEMORY_USE_RW),
2713 TYPE_MODE (integer_type_node));
2715 /* Return the address of the va_list constructor, but don't put it in a
2716 register. Doing so would fail when not optimizing and produce worse
2717 code when optimizing. */
2718 return XEXP (block, 0);
2722 /* This page contains routines that are used to determine what the function
2723 prologue and epilogue code will do and write them out. */
2725 /* Compute the size of the save area in the stack. */
2729 /* These variables are used for communication between the following functions.
2730 They indicate various things about the current function being compiled
2731 that are used to tell what kind of prologue, epilogue and procedure
2732 descriptior to generate. */
2734 /* Nonzero if we need a stack procedure. */
2735 static int is_stack_procedure;
2737 /* Register number (either FP or SP) that is used to unwind the frame. */
2738 static int unwind_regno;
2740 /* Register number used to save FP. We need not have one for RA since
2741 we don't modify it for register procedures. This is only defined
2742 for register frame procedures. */
2743 static int save_fp_regno;
2745 /* Register number used to reference objects off our PV. */
2746 static int base_regno;
2748 /* Compute register masks for saved registers. */
2751 alpha_sa_mask (imaskP, fmaskP)
2752 unsigned long *imaskP;
2753 unsigned long *fmaskP;
2755 unsigned long imask = 0;
2756 unsigned long fmask = 0;
2759 if (is_stack_procedure)
2760 imask |= (1L << HARD_FRAME_POINTER_REGNUM);
2762 /* One for every register we have to save. */
2764 for (i = 0; i < FIRST_PSEUDO_REGISTER; i++)
2765 if (! fixed_regs[i] && ! call_used_regs[i]
2766 && regs_ever_live[i] && i != REG_RA)
2771 fmask |= (1L << (i - 32));
2784 HOST_WIDE_INT stack_needed;
2787 /* One for every register we have to save. */
2789 for (i = 0; i < FIRST_PSEUDO_REGISTER; i++)
2790 if (! fixed_regs[i] && ! call_used_regs[i]
2791 && regs_ever_live[i] && i != REG_RA)
2794 /* Start by assuming we can use a register procedure if we don't make any
2795 calls (REG_RA not used) or need to save any registers and a stack
2796 procedure if we do. */
2797 is_stack_procedure = sa_size != 0 || alpha_ra_ever_killed ();
2799 /* Decide whether to refer to objects off our PV via FP or PV.
2800 If we need need FP for something else or if we receive a nonlocal
2801 goto (which expects PV to contain the value), we must use PV.
2802 Otherwise, start by assuming we can use FP. */
2803 base_regno = (frame_pointer_needed || current_function_has_nonlocal_label
2804 || is_stack_procedure
2805 || current_function_outgoing_args_size
2806 ? REG_PV : HARD_FRAME_POINTER_REGNUM);
2808 /* If we want to copy PV into FP, we need to find some register in which to
2813 if (base_regno == HARD_FRAME_POINTER_REGNUM)
2814 for (i = 0; i < 32; i++)
2815 if (! fixed_regs[i] && call_used_regs[i] && ! regs_ever_live[i])
2818 if (save_fp_regno == -1)
2819 base_regno = REG_PV, is_stack_procedure = 1;
2821 /* Stack unwinding should be done via FP unless we use it for PV. */
2823 = base_regno == REG_PV ? HARD_FRAME_POINTER_REGNUM : STACK_POINTER_REGNUM;
2825 /* If this is a stack procedure, allow space for saving FP and RA. */
2826 if (is_stack_procedure)
2833 alpha_pv_save_size ()
2836 return is_stack_procedure ? 8 : 0;
2843 return unwind_regno == HARD_FRAME_POINTER_REGNUM;
2846 #else /* ! OPEN_VMS */
2854 for (i = 0; i < FIRST_PSEUDO_REGISTER; i++)
2855 if (! fixed_regs[i] && ! call_used_regs[i]
2856 && regs_ever_live[i] && i != REG_RA)
2859 /* If some registers were saved but not reg 26, reg 26 must also
2860 be saved, so leave space for it. */
2861 if (size != 0 || alpha_ra_ever_killed ())
2864 /* Our size must be even (multiple of 16 bytes). */
2871 #endif /* ! OPEN_VMS */
2873 /* Return 1 if this function can directly return via $26. */
2878 return (! TARGET_OPEN_VMS && reload_completed && alpha_sa_size () == 0
2879 && get_frame_size () == 0
2880 && current_function_outgoing_args_size == 0
2881 && current_function_pretend_args_size == 0);
2884 /* Write a version stamp. Don't write anything if we are running as a
2885 cross-compiler. Otherwise, use the versions in /usr/include/stamp.h. */
2887 #if !defined(CROSS_COMPILE) && !defined(_WIN32) && !defined(__linux__) && !defined(VMS)
2892 alpha_write_verstamp (file)
2896 fprintf (file, "\t.verstamp %d %d\n", MS_STAMP, LS_STAMP);
2900 /* Write code to add constant C to register number IN_REG (possibly 31)
2901 and put the result into OUT_REG. Use TEMP_REG as a scratch register;
2902 usually this will be OUT_REG, but should not be if OUT_REG is
2903 STACK_POINTER_REGNUM, since it must be updated in a single instruction.
2904 Write the code to FILE. */
2907 add_long_const (file, c, in_reg, out_reg, temp_reg)
2910 int in_reg, out_reg, temp_reg;
2912 HOST_WIDE_INT low = (c & 0xffff) - 2 * (c & 0x8000);
2913 HOST_WIDE_INT tmp1 = c - low;
2914 HOST_WIDE_INT high = ((tmp1 >> 16) & 0xffff) - 2 * ((tmp1 >> 16) & 0x8000);
2915 HOST_WIDE_INT extra = 0;
2917 /* We don't have code to write out constants larger than 32 bits. */
2918 #if HOST_BITS_PER_LONG_INT == 64
2919 if ((unsigned HOST_WIDE_INT) c >> 32 != 0)
2923 /* If HIGH will be interpreted as negative, we must adjust it to do two
2924 ldha insns. Note that we will never be building a negative constant
2931 high = ((tmp1 >> 16) & 0xffff) - 2 * ((tmp1 >> 16) & 0x8000);
2936 int result_reg = (extra == 0 && high == 0) ? out_reg : temp_reg;
2938 if (low >= 0 && low < 255)
2939 fprintf (file, "\taddq $%d,%d,$%d\n", in_reg, low, result_reg);
2941 fprintf (file, "\tlda $%d,%d($%d)\n", result_reg, low, in_reg);
2943 in_reg = result_reg;
2948 int result_reg = (high == 0) ? out_reg : temp_reg;
2950 fprintf (file, "\tldah $%d,%d($%d)\n", result_reg, extra, in_reg);
2951 in_reg = result_reg;
2955 fprintf (file, "\tldah $%d,%d($%d)\n", out_reg, high, in_reg);
2958 /* Write function prologue. */
2962 /* On vms we have two kinds of functions:
2964 - stack frame (PROC_STACK)
2965 these are 'normal' functions with local vars and which are
2966 calling other functions
2967 - register frame (PROC_REGISTER)
2968 keeps all data in registers, needs no stack
2970 We must pass this to the assembler so it can generate the
2971 proper pdsc (procedure descriptor)
2972 This is done with the '.pdesc' command.
2974 size is the stack size needed for local variables. */
2977 output_prolog (file, size)
2981 unsigned long imask = 0;
2982 unsigned long fmask = 0;
2983 /* Stack space needed for pushing registers clobbered by us. */
2984 HOST_WIDE_INT sa_size;
2985 /* Complete stack size needed. */
2986 HOST_WIDE_INT frame_size;
2987 /* Offset from base reg to register save area. */
2989 /* Offset during register save. */
2991 /* Label for the procedure entry. */
2992 char *entry_label = (char *) alloca (strlen (alpha_function_name) + 6);
2995 sa_size = alpha_sa_size ();
2997 = ALPHA_ROUND (sa_size
2998 + (is_stack_procedure ? 8 : 0)
2999 + size + current_function_pretend_args_size);
3001 /* Issue function start and label. */
3002 fprintf (file, "\t.ent ");
3003 assemble_name (file, alpha_function_name);
3004 fprintf (file, "\n");
3005 sprintf (entry_label, "%s..en", alpha_function_name);
3006 ASM_OUTPUT_LABEL (file, entry_label);
3007 inside_function = TRUE;
3009 fprintf (file, "\t.base $%d\n", base_regno);
3011 /* Calculate register masks for clobbered registers. */
3013 if (is_stack_procedure)
3014 alpha_sa_mask (&imask, &fmask);
3016 /* Adjust the stack by the frame size. If the frame size is > 4096
3017 bytes, we need to be sure we probe somewhere in the first and last
3018 4096 bytes (we can probably get away without the latter test) and
3019 every 8192 bytes in between. If the frame size is > 32768, we
3020 do this in a loop. Otherwise, we generate the explicit probe
3023 Note that we are only allowed to adjust sp once in the prologue. */
3025 if (frame_size < 32768)
3027 if (frame_size > 4096)
3031 fprintf (file, "\tstq $31,-%d($30)\n", probed);
3033 while (probed + 8192 < frame_size)
3034 fprintf (file, "\tstq $31,-%d($30)\n", probed += 8192);
3036 /* We only have to do this probe if we aren't saving registers. */
3037 if (sa_size == 0 && probed + 4096 < frame_size)
3038 fprintf (file, "\tstq $31,-%d($30)\n", frame_size);
3041 if (frame_size != 0)
3042 fprintf (file, "\tlda $30,-%d($30)\n", frame_size);
3046 /* Here we generate code to set R4 to SP + 4096 and set R23 to the
3047 number of 8192 byte blocks to probe. We then probe each block
3048 in the loop and then set SP to the proper location. If the
3049 amount remaining is > 4096, we have to do one more probe if we
3050 are not saving any registers. */
3052 HOST_WIDE_INT blocks = (frame_size + 4096) / 8192;
3053 HOST_WIDE_INT leftover = frame_size + 4096 - blocks * 8192;
3055 add_long_const (file, blocks, 31, 23, 23);
3057 fprintf (file, "\tlda $22,4096($30)\n");
3060 assemble_name (file, alpha_function_name);
3061 fprintf (file, "..sc:\n");
3063 fprintf (file, "\tstq $31,-8192($22)\n");
3064 fprintf (file, "\tsubq $23,1,$23\n");
3065 fprintf (file, "\tlda $22,-8192($22)\n");
3067 fprintf (file, "\tbne $23,$");
3068 assemble_name (file, alpha_function_name);
3069 fprintf (file, "..sc\n");
3071 if (leftover > 4096 && sa_size == 0)
3072 fprintf (file, "\tstq $31,-%d($22)\n", leftover);
3074 fprintf (file, "\tlda $30,-%d($22)\n", leftover);
3077 if (is_stack_procedure)
3079 int reg_offset = rsa_offset;
3081 /* Store R26 (RA) first. */
3082 fprintf (file, "\tstq $26,%d($30)\n", reg_offset);
3085 /* Store integer regs. according to mask. */
3086 for (i = 0; i < 32; i++)
3087 if (imask & (1L<<i))
3089 fprintf (file, "\tstq $%d,%d($30)\n", i, reg_offset);
3093 /* Print the register mask and do floating-point saves. */
3096 fprintf (file, "\t.mask 0x%x,0\n", imask);
3098 for (i = 0; i < 32; i++)
3100 if (fmask & (1L << i))
3102 fprintf (file, "\tstt $f%d,%d($30)\n", i, reg_offset);
3107 /* Print the floating-point mask, if we've saved any fp register. */
3109 fprintf (file, "\t.fmask 0x%x,0\n", fmask);
3111 fprintf (file, "\tstq $27,0($30)\n");
3115 fprintf (file, "\t.fp_save $%d\n", save_fp_regno);
3116 fprintf (file, "\tbis $%d,$%d,$%d\n", HARD_FRAME_POINTER_REGNUM,
3117 HARD_FRAME_POINTER_REGNUM, save_fp_regno);
3120 if (base_regno != REG_PV)
3121 fprintf (file, "\tbis $%d,$%d,$%d\n", REG_PV, REG_PV, base_regno);
3123 if (unwind_regno == HARD_FRAME_POINTER_REGNUM)
3124 fprintf (file, "\tbis $%d,$%d,$%d\n", STACK_POINTER_REGNUM,
3125 STACK_POINTER_REGNUM, HARD_FRAME_POINTER_REGNUM);
3127 /* Describe our frame. */
3128 fprintf (file, "\t.frame $%d,%d,$26,%d\n",
3129 unwind_regno, frame_size, rsa_offset);
3131 /* If we have to allocate space for outgoing args, do it now. */
3132 if (current_function_outgoing_args_size != 0)
3133 fprintf (file, "\tlda $%d,%d($%d)\n", STACK_POINTER_REGNUM,
3134 - ALPHA_ROUND (current_function_outgoing_args_size),
3135 HARD_FRAME_POINTER_REGNUM);
3137 fprintf (file, "\t.prologue\n");
3139 readonly_section ();
3140 fprintf (file, "\t.align 3\n");
3141 assemble_name (file, alpha_function_name); fputs ("..na:\n", file);
3142 fputs ("\t.ascii \"", file);
3143 assemble_name (file, alpha_function_name);
3144 fputs ("\\0\"\n", file);
3147 fprintf (file, "\t.align 3\n");
3148 fputs ("\t.name ", file);
3149 assemble_name (file, alpha_function_name);
3150 fputs ("..na\n", file);
3151 ASM_OUTPUT_LABEL (file, alpha_function_name);
3152 fprintf (file, "\t.pdesc ");
3153 assemble_name (file, alpha_function_name);
3154 fprintf (file, "..en,%s\n", is_stack_procedure ? "stack" : "reg");
3155 alpha_need_linkage (alpha_function_name, 1);
3161 /* Write function epilogue. */
3164 output_epilog (file, size)
3168 unsigned long imask = 0;
3169 unsigned long fmask = 0;
3170 /* Stack space needed for pushing registers clobbered by us. */
3171 HOST_WIDE_INT sa_size = alpha_sa_size ();
3172 /* Complete stack size needed. */
3173 HOST_WIDE_INT frame_size
3174 = ALPHA_ROUND (sa_size
3175 + (is_stack_procedure ? 8 : 0)
3176 + size + current_function_pretend_args_size);
3178 rtx insn = get_last_insn ();
3180 /* If the last insn was a BARRIER, we don't have to write anything except
3181 the .end pseudo-op. */
3183 if (GET_CODE (insn) == NOTE)
3184 insn = prev_nonnote_insn (insn);
3186 if (insn == 0 || GET_CODE (insn) != BARRIER)
3188 /* Restore clobbered registers, load FP last. */
3190 if (is_stack_procedure)
3196 if (unwind_regno == HARD_FRAME_POINTER_REGNUM)
3197 fprintf (file, "\tbis $%d,$%d,$%d\n", HARD_FRAME_POINTER_REGNUM,
3198 HARD_FRAME_POINTER_REGNUM, STACK_POINTER_REGNUM);
3200 alpha_sa_mask (&imask, &fmask);
3202 /* Start reloading registers after RA. */
3203 reg_offset = rsa_offset + 8;
3205 for (i = 0; i < 32; i++)
3206 if (imask & (1L<<i))
3208 if (i == HARD_FRAME_POINTER_REGNUM)
3209 fp_offset = reg_offset;
3211 fprintf (file, "\tldq $%d,%d($30)\n",
3216 for (i = 0; i < 32; i++)
3217 if (fmask & (1L << i))
3219 fprintf (file, "\tldt $f%d,%d($30)\n", i, reg_offset);
3223 /* Restore R26 (RA). */
3224 fprintf (file, "\tldq $26,%d($30)\n", rsa_offset);
3226 /* Restore R29 (FP). */
3227 fprintf (file, "\tldq $29,%d($30)\n", fp_offset);
3230 fprintf (file, "\tbis $%d,$%d,$%d\n", save_fp_regno, save_fp_regno,
3231 HARD_FRAME_POINTER_REGNUM);
3233 if (frame_size != 0)
3235 if (frame_size < 32768)
3236 fprintf (file, "\tlda $30,%d($30)\n", frame_size);
3239 long high = frame_size >> 16;
3240 long low = frame_size & 0xffff;
3244 low = -32768 + (low & 0x7fff);
3246 fprintf (file, "\tldah $2,%ld($31)\n", high);
3247 fprintf (file, "\tlda $2,%ld($2)\n", low);
3248 fprintf (file, "\taddq $30,$2,$30\n");
3252 /* Finally return to the caller. */
3253 fprintf (file, "\tret $31,($26),1\n");
3256 /* End the function. */
3257 fprintf (file, "\t.end ");
3258 assemble_name (file, alpha_function_name);
3259 fprintf (file, "\n");
3260 inside_function = FALSE;
3262 /* Show that we know this function if it is called again. */
3263 SYMBOL_REF_FLAG (XEXP (DECL_RTL (current_function_decl), 0)) = 1;
3267 vms_valid_decl_attribute_p (decl, attributes, identifier, args)
3273 if (is_attribute_p ("overlaid", identifier))
3274 return (args == NULL_TREE);
3278 #else /* !OPEN_VMS */
3281 alpha_does_function_need_gp ()
3285 /* We never need a GP for Windows/NT. */
3286 if (TARGET_WINDOWS_NT)
3289 #ifdef TARGET_PROFILING_NEEDS_GP
3294 /* If we need a GP (we have a LDSYM insn or a CALL_INSN), load it first.
3295 Even if we are a static function, we still need to do this in case
3296 our address is taken and passed to something like qsort. */
3298 for (insn = get_insns (); insn; insn = NEXT_INSN (insn))
3299 if (GET_RTX_CLASS (GET_CODE (insn)) == 'i'
3300 && GET_CODE (PATTERN (insn)) != USE
3301 && GET_CODE (PATTERN (insn)) != CLOBBER)
3303 enum attr_type type = get_attr_type (insn);
3304 if (type == TYPE_LDSYM || type == TYPE_JSR)
3312 output_prolog (file, size)
3316 HOST_WIDE_INT out_args_size
3317 = ALPHA_ROUND (current_function_outgoing_args_size);
3318 HOST_WIDE_INT sa_size = alpha_sa_size ();
3319 HOST_WIDE_INT frame_size
3320 = (out_args_size + sa_size
3321 + ALPHA_ROUND (size + current_function_pretend_args_size));
3322 HOST_WIDE_INT reg_offset = out_args_size;
3323 HOST_WIDE_INT start_reg_offset = reg_offset;
3324 HOST_WIDE_INT actual_start_reg_offset = start_reg_offset;
3325 int int_reg_save_area_size = 0;
3326 unsigned reg_mask = 0;
3329 /* Ecoff can handle multiple .file directives, so put out file and lineno.
3330 We have to do that before the .ent directive as we cannot switch
3331 files within procedures with native ecoff because line numbers are
3332 linked to procedure descriptors.
3333 Outputting the lineno helps debugging of one line functions as they
3334 would otherwise get no line number at all. Please note that we would
3335 like to put out last_linenum from final.c, but it is not accessible. */
3337 if (write_symbols == SDB_DEBUG)
3339 ASM_OUTPUT_SOURCE_FILENAME (file,
3340 DECL_SOURCE_FILE (current_function_decl));
3341 if (debug_info_level != DINFO_LEVEL_TERSE)
3342 ASM_OUTPUT_SOURCE_LINE (file,
3343 DECL_SOURCE_LINE (current_function_decl));
3346 /* The assembly language programmer's guide states that the second argument
3347 to the .ent directive, the lex_level, is ignored by the assembler,
3348 so we might as well omit it. */
3350 if (!flag_inhibit_size_directive)
3352 fprintf (file, "\t.ent ");
3353 assemble_name (file, alpha_function_name);
3354 fprintf (file, "\n");
3356 ASM_OUTPUT_LABEL (file, alpha_function_name);
3357 inside_function = TRUE;
3359 if (TARGET_IEEE_CONFORMANT && !flag_inhibit_size_directive)
3360 /* Set flags in procedure descriptor to request IEEE-conformant
3361 math-library routines. The value we set it to is PDSC_EXC_IEEE
3362 (/usr/include/pdsc.h). */
3363 fprintf (file, "\t.eflag 48\n");
3365 /* Set up offsets to alpha virtual arg/local debugging pointer. */
3367 alpha_auto_offset = -frame_size + current_function_pretend_args_size;
3368 alpha_arg_offset = -frame_size + 48;
3370 alpha_function_needs_gp = alpha_does_function_need_gp ();
3372 if (TARGET_WINDOWS_NT == 0)
3374 if (alpha_function_needs_gp)
3375 fprintf (file, "\tldgp $29,0($27)\n");
3377 /* Put a label after the GP load so we can enter the function at it. */
3379 assemble_name (file, alpha_function_name);
3380 fprintf (file, "..ng:\n");
3383 /* Adjust the stack by the frame size. If the frame size is > 4096
3384 bytes, we need to be sure we probe somewhere in the first and last
3385 4096 bytes (we can probably get away without the latter test) and
3386 every 8192 bytes in between. If the frame size is > 32768, we
3387 do this in a loop. Otherwise, we generate the explicit probe
3390 Note that we are only allowed to adjust sp once in the prologue. */
3392 if (frame_size < 32768)
3394 if (frame_size > 4096)
3398 fprintf (file, "\tstq $31,-%d($30)\n", probed);
3400 while (probed + 8192 < frame_size)
3401 fprintf (file, "\tstq $31,-%d($30)\n", probed += 8192);
3403 /* We only have to do this probe if we aren't saving registers. */
3404 if (sa_size == 0 && probed + 4096 < frame_size)
3405 fprintf (file, "\tstq $31,-%d($30)\n", frame_size);
3408 if (frame_size != 0)
3409 fprintf (file, "\tlda $30,-%d($30)\n", frame_size);
3413 /* Here we generate code to set R4 to SP + 4096 and set R5 to the
3414 number of 8192 byte blocks to probe. We then probe each block
3415 in the loop and then set SP to the proper location. If the
3416 amount remaining is > 4096, we have to do one more probe if we
3417 are not saving any registers. */
3419 HOST_WIDE_INT blocks = (frame_size + 4096) / 8192;
3420 HOST_WIDE_INT leftover = frame_size + 4096 - blocks * 8192;
3422 add_long_const (file, blocks, 31, 5, 5);
3424 fprintf (file, "\tlda $4,4096($30)\n");
3427 assemble_name (file, alpha_function_name);
3428 fprintf (file, "..sc:\n");
3430 fprintf (file, "\tstq $31,-8192($4)\n");
3431 fprintf (file, "\tsubq $5,1,$5\n");
3432 fprintf (file, "\tlda $4,-8192($4)\n");
3434 fprintf (file, "\tbne $5,$");
3435 assemble_name (file, alpha_function_name);
3436 fprintf (file, "..sc\n");
3438 if (leftover > 4096 && sa_size == 0)
3439 fprintf (file, "\tstq $31,-%d($4)\n", leftover);
3441 fprintf (file, "\tlda $30,-%d($4)\n", leftover);
3444 /* Describe our frame. */
3445 if (!flag_inhibit_size_directive)
3447 fprintf (file, "\t.frame $%d,%d,$26,%d\n",
3448 (frame_pointer_needed
3449 ? HARD_FRAME_POINTER_REGNUM : STACK_POINTER_REGNUM),
3450 frame_size, current_function_pretend_args_size);
3453 /* Cope with very large offsets to the register save area. */
3455 if (reg_offset + sa_size > 0x8000)
3457 int low = ((reg_offset & 0xffff) ^ 0x8000) - 0x8000;
3458 if (low + sa_size <= 0x8000)
3460 add_long_const (file, reg_offset - low, 30, 24, 24);
3465 add_long_const (file, reg_offset, 30, 24, 24);
3471 /* Save register RA if any other register needs to be saved. */
3474 reg_mask |= 1 << REG_RA;
3475 fprintf (file, "\tstq $26,%d($%d)\n", reg_offset, sa_reg);
3477 int_reg_save_area_size += 8;
3480 /* Now save any other used integer registers required to be saved. */
3481 for (i = 0; i < 32; i++)
3482 if (! fixed_regs[i] && ! call_used_regs[i]
3483 && regs_ever_live[i] && i != REG_RA)
3486 fprintf (file, "\tstq $%d,%d($%d)\n", i, reg_offset, sa_reg);
3488 int_reg_save_area_size += 8;
3491 /* Print the register mask and do floating-point saves. */
3492 if (reg_mask && !flag_inhibit_size_directive)
3493 fprintf (file, "\t.mask 0x%x,%d\n", reg_mask,
3494 actual_start_reg_offset - frame_size);
3496 start_reg_offset = reg_offset;
3499 for (i = 0; i < 32; i++)
3500 if (! fixed_regs[i + 32] && ! call_used_regs[i + 32]
3501 && regs_ever_live[i + 32])
3504 fprintf (file, "\tstt $f%d,%d($%d)\n", i, reg_offset, sa_reg);
3508 /* Print the floating-point mask, if we've saved any fp register. */
3509 if (reg_mask && !flag_inhibit_size_directive)
3510 fprintf (file, "\t.fmask 0x%x,%d\n", reg_mask,
3511 actual_start_reg_offset - frame_size + int_reg_save_area_size);
3513 /* If we need a frame pointer, set it from the stack pointer. Note that
3514 this must always be the last instruction in the prologue. */
3515 if (frame_pointer_needed)
3516 fprintf (file, "\tbis $30,$30,$15\n");
3518 /* End the prologue and say if we used gp. */
3519 if (!flag_inhibit_size_directive)
3520 fprintf (file, "\t.prologue %d\n", alpha_function_needs_gp);
3523 /* Write function epilogue. */
3526 output_epilog (file, size)
3530 rtx insn = get_last_insn ();
3531 HOST_WIDE_INT out_args_size
3532 = ALPHA_ROUND (current_function_outgoing_args_size);
3533 HOST_WIDE_INT sa_size = alpha_sa_size ();
3534 HOST_WIDE_INT frame_size
3535 = (out_args_size + sa_size
3536 + ALPHA_ROUND (size + current_function_pretend_args_size));
3537 HOST_WIDE_INT reg_offset = out_args_size;
3538 HOST_WIDE_INT frame_size_from_reg_save = frame_size - reg_offset;
3540 = frame_pointer_needed && regs_ever_live[HARD_FRAME_POINTER_REGNUM];
3543 /* If the last insn was a BARRIER, we don't have to write anything except
3544 the .end pseudo-op. */
3545 if (GET_CODE (insn) == NOTE)
3546 insn = prev_nonnote_insn (insn);
3547 if (insn == 0 || GET_CODE (insn) != BARRIER)
3552 /* If we have a frame pointer, restore SP from it. */
3553 if (frame_pointer_needed)
3554 fprintf (file, "\tbis $15,$15,$30\n");
3556 /* Cope with large offsets to the register save area. */
3558 if (reg_offset + sa_size > 0x8000)
3560 int low = ((reg_offset & 0xffff) ^ 0x8000) - 0x8000;
3561 if (low + sa_size <= 0x8000)
3563 add_long_const (file, reg_offset - low, 30, 24, 24);
3568 add_long_const (file, reg_offset, 30, 24, 24);
3574 /* Restore all the registers, starting with the return address
3578 fprintf (file, "\tldq $26,%d($%d)\n", reg_offset, sa_reg);
3582 /* Now restore any other used integer registers that that we saved,
3583 except for FP if it is being used as FP, since it must be
3586 for (i = 0; i < 32; i++)
3587 if (! fixed_regs[i] && ! call_used_regs[i] && regs_ever_live[i]
3590 if (i == HARD_FRAME_POINTER_REGNUM && frame_pointer_needed)
3591 fp_offset = reg_offset;
3593 fprintf (file, "\tldq $%d,%d($%d)\n", i, reg_offset, sa_reg);
3597 for (i = 0; i < 32; i++)
3598 if (! fixed_regs[i + 32] && ! call_used_regs[i + 32]
3599 && regs_ever_live[i + 32])
3601 fprintf (file, "\tldt $f%d,%d($%d)\n", i, reg_offset, sa_reg);
3605 /* If the stack size is large and we have a frame pointer, compute the
3606 size of the stack into a register because the old FP restore, stack
3607 pointer adjust, and return are required to be consecutive
3609 if (frame_size > 32767 && restore_fp)
3610 add_long_const (file, frame_size, 31, 1, 1);
3612 /* If we needed a frame pointer and we have to restore it, do it
3613 now. This must be done in one instruction immediately
3614 before the SP update. */
3615 if (restore_fp && fp_offset)
3616 fprintf (file, "\tldq $15,%d($%d)\n", fp_offset, sa_reg);
3618 /* Now update the stack pointer, if needed. Only one instruction must
3619 modify the stack pointer. It must be the last instruction in the
3620 sequence and must be an ADDQ or LDA instruction. If the frame
3621 pointer was loaded above, we may only put one instruction here. */
3623 if (frame_size > 32768 && restore_fp)
3624 fprintf (file, "\taddq $1,$30,$30\n");
3626 add_long_const (file, frame_size, 30, 30, 1);
3628 /* Finally return to the caller. */
3629 fprintf (file, "\tret $31,($26),1\n");
3632 /* End the function. */
3633 if (!flag_inhibit_size_directive)
3635 fprintf (file, "\t.end ");
3636 assemble_name (file, alpha_function_name);
3637 fprintf (file, "\n");
3639 inside_function = FALSE;
3641 /* Show that we know this function if it is called again.
3643 Don't do this for global functions in object files destined for a
3644 shared library because the function may be overridden by the application
3646 ??? Is this just ELF? */
3648 if (!flag_pic || !TREE_PUBLIC (current_function_decl))
3649 SYMBOL_REF_FLAG (XEXP (DECL_RTL (current_function_decl), 0)) = 1;
3651 #endif /* !OPEN_VMS */
3653 /* Debugging support. */
3657 /* Count the number of sdb related labels are generated (to find block
3658 start and end boundaries). */
3660 int sdb_label_count = 0;
3662 /* Next label # for each statement. */
3664 static int sym_lineno = 0;
3666 /* Count the number of .file directives, so that .loc is up to date. */
3668 static int num_source_filenames = 0;
3670 /* Name of the file containing the current function. */
3672 static char *current_function_file = "";
3674 /* Offsets to alpha virtual arg/local debugging pointers. */
3676 long alpha_arg_offset;
3677 long alpha_auto_offset;
3679 /* Emit a new filename to a stream. */
3682 alpha_output_filename (stream, name)
3686 static int first_time = TRUE;
3687 char ltext_label_name[100];
3692 ++num_source_filenames;
3693 current_function_file = name;
3694 fprintf (stream, "\t.file\t%d ", num_source_filenames);
3695 output_quoted_string (stream, name);
3696 fprintf (stream, "\n");
3697 if (!TARGET_GAS && write_symbols == DBX_DEBUG)
3698 fprintf (stream, "\t#@stabs\n");
3701 else if (write_symbols == DBX_DEBUG)
3703 ASM_GENERATE_INTERNAL_LABEL (ltext_label_name, "Ltext", 0);
3704 fprintf (stream, "%s ", ASM_STABS_OP);
3705 output_quoted_string (stream, name);
3706 fprintf (stream, ",%d,0,0,%s\n", N_SOL, <ext_label_name[1]);
3709 else if (name != current_function_file
3710 && strcmp (name, current_function_file) != 0)
3712 if (inside_function && ! TARGET_GAS)
3713 fprintf (stream, "\t#.file\t%d ", num_source_filenames);
3716 ++num_source_filenames;
3717 current_function_file = name;
3718 fprintf (stream, "\t.file\t%d ", num_source_filenames);
3721 output_quoted_string (stream, name);
3722 fprintf (stream, "\n");
3726 /* Emit a linenumber to a stream. */
3729 alpha_output_lineno (stream, line)
3733 if (write_symbols == DBX_DEBUG)
3735 /* mips-tfile doesn't understand .stabd directives. */
3737 fprintf (stream, "$LM%d:\n\t%s %d,0,%d,$LM%d\n",
3738 sym_lineno, ASM_STABN_OP, N_SLINE, line, sym_lineno);
3741 fprintf (stream, "\n\t.loc\t%d %d\n", num_source_filenames, line);
3744 /* Structure to show the current status of registers and memory. */
3746 struct shadow_summary
3749 unsigned long i : 31; /* Mask of int regs */
3750 unsigned long fp : 31; /* Mask of fp regs */
3751 unsigned long mem : 1; /* mem == imem | fpmem */
3755 /* Summary the effects of expression X on the machine. Update SUM, a pointer
3756 to the summary structure. SET is nonzero if the insn is setting the
3757 object, otherwise zero. */
3760 summarize_insn (x, sum, set)
3762 struct shadow_summary *sum;
3771 switch (GET_CODE (x))
3773 /* ??? Note that this case would be incorrect if the Alpha had a
3774 ZERO_EXTRACT in SET_DEST. */
3776 summarize_insn (SET_SRC (x), sum, 0);
3777 summarize_insn (SET_DEST (x), sum, 1);
3781 summarize_insn (XEXP (x, 0), sum, 1);
3785 summarize_insn (XEXP (x, 0), sum, 0);
3789 for (i = ASM_OPERANDS_INPUT_LENGTH (x) - 1; i >= 0; i--)
3790 summarize_insn (ASM_OPERANDS_INPUT (x, i), sum, 0);
3794 for (i = XVECLEN (x, 0) - 1; i >= 0; i--)
3795 summarize_insn (XVECEXP (x, 0, i), sum, 0);
3804 int regno = REGNO (x);
3805 unsigned long mask = 1UL << (regno % 32);
3807 if (regno == 31 || regno == 63)
3813 sum->defd.i |= mask;
3815 sum->defd.fp |= mask;
3820 sum->used.i |= mask;
3822 sum->used.fp |= mask;
3833 /* Find the regs used in memory address computation: */
3834 summarize_insn (XEXP (x, 0), sum, 0);
3837 case CONST_INT: case CONST_DOUBLE:
3838 case SYMBOL_REF: case LABEL_REF: case CONST:
3841 /* Handle common unary and binary ops for efficiency. */
3842 case COMPARE: case PLUS: case MINUS: case MULT: case DIV:
3843 case MOD: case UDIV: case UMOD: case AND: case IOR:
3844 case XOR: case ASHIFT: case ROTATE: case ASHIFTRT: case LSHIFTRT:
3845 case ROTATERT: case SMIN: case SMAX: case UMIN: case UMAX:
3846 case NE: case EQ: case GE: case GT: case LE:
3847 case LT: case GEU: case GTU: case LEU: case LTU:
3848 summarize_insn (XEXP (x, 0), sum, 0);
3849 summarize_insn (XEXP (x, 1), sum, 0);
3852 case NEG: case NOT: case SIGN_EXTEND: case ZERO_EXTEND:
3853 case TRUNCATE: case FLOAT_EXTEND: case FLOAT_TRUNCATE: case FLOAT:
3854 case FIX: case UNSIGNED_FLOAT: case UNSIGNED_FIX: case ABS:
3855 case SQRT: case FFS:
3856 summarize_insn (XEXP (x, 0), sum, 0);
3860 format_ptr = GET_RTX_FORMAT (GET_CODE (x));
3861 for (i = GET_RTX_LENGTH (GET_CODE (x)) - 1; i >= 0; i--)
3862 switch (format_ptr[i])
3865 summarize_insn (XEXP (x, i), sum, 0);
3869 for (j = XVECLEN (x, i) - 1; j >= 0; j--)
3870 summarize_insn (XVECEXP (x, i, j), sum, 0);
3879 /* Ensure a sufficient number of `trapb' insns are in the code when the user
3880 requests code with a trap precision of functions or instructions.
3882 In naive mode, when the user requests a trap-precision of "instruction", a
3883 trapb is needed after every instruction that may generate a trap (and after
3884 jsr/bsr instructions, because called functions may import a trap from the
3885 caller). This ensures that the code is resumption safe but it is also slow.
3887 When optimizations are turned on, we delay issuing a trapb as long as
3888 possible. In this context, a trap shadow is the sequence of instructions
3889 that starts with a (potentially) trap generating instruction and extends to
3890 the next trapb or call_pal instruction (but GCC never generates call_pal by
3891 itself). We can delay (and therefore sometimes omit) a trapb subject to the
3892 following conditions:
3894 (a) On entry to the trap shadow, if any Alpha register or memory location
3895 contains a value that is used as an operand value by some instruction in
3896 the trap shadow (live on entry), then no instruction in the trap shadow
3897 may modify the register or memory location.
3899 (b) Within the trap shadow, the computation of the base register for a
3900 memory load or store instruction may not involve using the result
3901 of an instruction that might generate an UNPREDICTABLE result.
3903 (c) Within the trap shadow, no register may be used more than once as a
3904 destination register. (This is to make life easier for the trap-handler.)
3906 (d) The trap shadow may not include any branch instructions. */
3909 alpha_handle_trap_shadows (insns)
3912 struct shadow_summary shadow;
3913 int trap_pending, exception_nesting;
3916 if (alpha_tp == ALPHA_TP_PROG && !flag_exceptions)
3920 exception_nesting = 0;
3923 shadow.used.mem = 0;
3924 shadow.defd = shadow.used;
3926 for (i = insns; i ; i = NEXT_INSN (i))
3928 if (GET_CODE (i) == NOTE)
3930 switch (NOTE_LINE_NUMBER (i))
3932 case NOTE_INSN_EH_REGION_BEG:
3933 exception_nesting++;
3938 case NOTE_INSN_EH_REGION_END:
3939 exception_nesting--;
3944 case NOTE_INSN_EPILOGUE_BEG:
3945 if (trap_pending && alpha_tp >= ALPHA_TP_FUNC)
3950 else if (trap_pending)
3952 if (alpha_tp == ALPHA_TP_FUNC)
3954 if (GET_CODE (i) == JUMP_INSN
3955 && GET_CODE (PATTERN (i)) == RETURN)
3958 else if (alpha_tp == ALPHA_TP_INSN)
3962 struct shadow_summary sum;
3967 sum.defd = sum.used;
3969 switch (GET_CODE (i))
3972 /* Annoyingly, get_attr_trap will abort on these. */
3973 if (GET_CODE (PATTERN (i)) == USE
3974 || GET_CODE (PATTERN (i)) == CLOBBER)
3977 summarize_insn (PATTERN (i), &sum, 0);
3979 if ((sum.defd.i & shadow.defd.i)
3980 || (sum.defd.fp & shadow.defd.fp))
3982 /* (c) would be violated */
3986 /* Combine shadow with summary of current insn: */
3987 shadow.used.i |= sum.used.i;
3988 shadow.used.fp |= sum.used.fp;
3989 shadow.used.mem |= sum.used.mem;
3990 shadow.defd.i |= sum.defd.i;
3991 shadow.defd.fp |= sum.defd.fp;
3992 shadow.defd.mem |= sum.defd.mem;
3994 if ((sum.defd.i & shadow.used.i)
3995 || (sum.defd.fp & shadow.used.fp)
3996 || (sum.defd.mem & shadow.used.mem))
3998 /* (a) would be violated (also takes care of (b)) */
3999 if (get_attr_trap (i) == TRAP_YES
4000 && ((sum.defd.i & sum.used.i)
4001 || (sum.defd.fp & sum.used.fp)))
4020 emit_insn_before (gen_trapb (), i);
4024 shadow.used.mem = 0;
4025 shadow.defd = shadow.used;
4030 if ((exception_nesting > 0 || alpha_tp >= ALPHA_TP_FUNC)
4031 && GET_CODE (i) == INSN
4032 && GET_CODE (PATTERN (i)) != USE
4033 && GET_CODE (PATTERN (i)) != CLOBBER
4034 && get_attr_trap (i) == TRAP_YES)
4036 if (optimize && !trap_pending)
4037 summarize_insn (PATTERN (i), &shadow, 0);
4043 /* Machine dependant reorg pass. */
4049 alpha_handle_trap_shadows (insns);
4053 /* Check a floating-point value for validity for a particular machine mode. */
4055 static char *float_strings[] =
4057 /* These are for FLOAT_VAX. */
4058 "1.70141173319264430e+38", /* 2^127 (2^24 - 1) / 2^24 */
4059 "-1.70141173319264430e+38",
4060 "2.93873587705571877e-39", /* 2^-128 */
4061 "-2.93873587705571877e-39",
4062 /* These are for the default broken IEEE mode, which traps
4063 on infinity or denormal numbers. */
4064 "3.402823466385288598117e+38", /* 2^128 (1 - 2^-24) */
4065 "-3.402823466385288598117e+38",
4066 "1.1754943508222875079687e-38", /* 2^-126 */
4067 "-1.1754943508222875079687e-38",
4070 static REAL_VALUE_TYPE float_values[8];
4071 static int inited_float_values = 0;
4074 check_float_value (mode, d, overflow)
4075 enum machine_mode mode;
4080 if (TARGET_IEEE || TARGET_IEEE_CONFORMANT || TARGET_IEEE_WITH_INEXACT)
4083 if (inited_float_values == 0)
4086 for (i = 0; i < 8; i++)
4087 float_values[i] = REAL_VALUE_ATOF (float_strings[i], DFmode);
4089 inited_float_values = 1;
4095 REAL_VALUE_TYPE *fvptr;
4097 if (TARGET_FLOAT_VAX)
4098 fvptr = &float_values[0];
4100 fvptr = &float_values[4];
4102 bcopy ((char *) d, (char *) &r, sizeof (REAL_VALUE_TYPE));
4103 if (REAL_VALUES_LESS (fvptr[0], r))
4105 bcopy ((char *) &fvptr[0], (char *) d,
4106 sizeof (REAL_VALUE_TYPE));
4109 else if (REAL_VALUES_LESS (r, fvptr[1]))
4111 bcopy ((char *) &fvptr[1], (char *) d,
4112 sizeof (REAL_VALUE_TYPE));
4115 else if (REAL_VALUES_LESS (dconst0, r)
4116 && REAL_VALUES_LESS (r, fvptr[2]))
4118 bcopy ((char *) &dconst0, (char *) d, sizeof (REAL_VALUE_TYPE));
4121 else if (REAL_VALUES_LESS (r, dconst0)
4122 && REAL_VALUES_LESS (fvptr[3], r))
4124 bcopy ((char *) &dconst0, (char *) d, sizeof (REAL_VALUE_TYPE));
4134 /* Return the VMS argument type corresponding to MODE. */
4137 alpha_arg_type (mode)
4138 enum machine_mode mode;
4143 return TARGET_FLOAT_VAX ? FF : FS;
4145 return TARGET_FLOAT_VAX ? FD : FT;
4151 /* Return an rtx for an integer representing the VMS Argument Information
4155 alpha_arg_info_reg_val (cum)
4156 CUMULATIVE_ARGS cum;
4158 unsigned HOST_WIDE_INT regval = cum.num_args;
4161 for (i = 0; i < 6; i++)
4162 regval |= ((int) cum.atypes[i]) << (i * 3 + 8);
4164 return GEN_INT (regval);
4167 /* Structure to collect function names for final output
4170 enum links_kind {KIND_UNUSED, KIND_LOCAL, KIND_EXTERN};
4173 struct alpha_links {
4174 struct alpha_links *next;
4176 enum links_kind kind;
4179 static struct alpha_links *alpha_links_base = 0;
4181 /* Make (or fake) .linkage entry for function call.
4183 IS_LOCAL is 0 if name is used in call, 1 if name is used in definition. */
4186 alpha_need_linkage (name, is_local)
4191 struct alpha_links *lptr, *nptr;
4196 /* Is this name already defined ? */
4198 for (lptr = alpha_links_base; lptr; lptr = lptr->next)
4199 if (strcmp (lptr->name, name) == 0)
4203 /* Defined here but external assumed. */
4204 if (lptr->kind == KIND_EXTERN)
4205 lptr->kind = KIND_LOCAL;
4209 /* Used here but unused assumed. */
4210 if (lptr->kind == KIND_UNUSED)
4211 lptr->kind = KIND_LOCAL;
4216 nptr = (struct alpha_links *) xmalloc (sizeof (struct alpha_links));
4217 nptr->next = alpha_links_base;
4218 nptr->name = xstrdup (name);
4220 /* Assume external if no definition. */
4221 nptr->kind = (is_local ? KIND_UNUSED : KIND_EXTERN);
4223 /* Ensure we have an IDENTIFIER so assemble_name can mark is used. */
4224 get_identifier (name);
4226 alpha_links_base = nptr;
4233 alpha_write_linkage (stream)
4236 struct alpha_links *lptr, *nptr;
4238 readonly_section ();
4240 fprintf (stream, "\t.align 3\n");
4242 for (lptr = alpha_links_base; lptr; lptr = nptr)
4246 if (lptr->kind == KIND_UNUSED
4247 || ! TREE_SYMBOL_REFERENCED (get_identifier (lptr->name)))
4250 fprintf (stream, "$%s..lk:\n", lptr->name);
4251 if (lptr->kind == KIND_LOCAL)
4253 /* Local and used, build linkage pair. */
4254 fprintf (stream, "\t.quad %s..en\n", lptr->name);
4255 fprintf (stream, "\t.quad %s\n", lptr->name);
4258 /* External and used, request linkage pair. */
4259 fprintf (stream, "\t.linkage %s\n", lptr->name);
4266 alpha_need_linkage (name, is_local)
4272 #endif /* OPEN_VMS */