1 /* Subroutines used for code generation on the DEC Alpha.
2 Copyright (C) 1992, 93, 94, 95, 96, 97, 1998 Free Software Foundation, Inc.
3 Contributed by Richard Kenner (kenner@vlsi1.ultra.nyu.edu)
5 This file is part of GNU CC.
7 GNU CC is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License as published by
9 the Free Software Foundation; either version 2, or (at your option)
12 GNU CC is distributed in the hope that it will be useful,
13 but WITHOUT ANY WARRANTY; without even the implied warranty of
14 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 GNU General Public License for more details.
17 You should have received a copy of the GNU General Public License
18 along with GNU CC; see the file COPYING. If not, write to
19 the Free Software Foundation, 59 Temple Place - Suite 330,
20 Boston, MA 02111-1307, USA. */
28 #include "hard-reg-set.h"
30 #include "insn-config.h"
31 #include "conditions.h"
32 #include "insn-flags.h"
34 #include "insn-attr.h"
45 extern char *version_string;
46 extern int rtx_equal_function_value_matters;
48 /* Specify which cpu to schedule for. */
50 enum processor_type alpha_cpu;
51 static char* const alpha_cpu_name[] =
56 /* Specify how accurate floating-point traps need to be. */
58 enum alpha_trap_precision alpha_tp;
60 /* Specify the floating-point rounding mode. */
62 enum alpha_fp_rounding_mode alpha_fprm;
64 /* Specify which things cause traps. */
66 enum alpha_fp_trap_mode alpha_fptm;
68 /* Strings decoded into the above options. */
70 char *alpha_cpu_string; /* -mcpu= */
71 char *alpha_tp_string; /* -mtrap-precision=[p|s|i] */
72 char *alpha_fprm_string; /* -mfp-rounding-mode=[n|m|c|d] */
73 char *alpha_fptm_string; /* -mfp-trap-mode=[n|u|su|sui] */
74 char *alpha_mlat_string; /* -mmemory-latency= */
76 /* Save information from a "cmpxx" operation until the branch or scc is
79 rtx alpha_compare_op0, alpha_compare_op1;
80 int alpha_compare_fp_p;
82 /* Save the name of the current function as used by the assembler. This
83 is used by the epilogue. */
85 char *alpha_function_name;
87 /* Non-zero if inside of a function, because the Alpha asm can't
88 handle .files inside of functions. */
90 static int inside_function = FALSE;
92 /* Nonzero if the current function needs gp. */
94 int alpha_function_needs_gp;
96 /* If non-null, this rtx holds the return address for the function. */
98 static rtx alpha_return_addr_rtx;
100 /* The number of cycles of latency we should assume on memory reads. */
102 int alpha_memory_latency = 3;
104 /* Declarations of static functions. */
105 static void alpha_set_memflags_1 PROTO((rtx, int, int, int));
106 static rtx alpha_emit_set_const_1 PROTO((rtx, enum machine_mode,
107 HOST_WIDE_INT, int));
108 static void add_long_const PROTO((FILE *, HOST_WIDE_INT, int, int, int));
110 /* Compute the size of the save area in the stack. */
112 static void alpha_sa_mask PROTO((unsigned long *imaskP,
113 unsigned long *fmaskP));
115 /* Get the number of args of a function in one of two ways. */
117 #define NUM_ARGS current_function_args_info.num_args
119 #define NUM_ARGS current_function_args_info
129 /* Parse target option strings. */
134 /* 971208 -- EV6 scheduling parameters are still secret, so don't even
135 pretend and just schedule for an EV5 for now. -- r~ */
137 = TARGET_CPU_DEFAULT & MASK_CPU_EV6 ? PROCESSOR_EV6
138 : (TARGET_CPU_DEFAULT & MASK_CPU_EV5 ? PROCESSOR_EV5 : PROCESSOR_EV4);
140 if (alpha_cpu_string)
142 if (! strcmp (alpha_cpu_string, "ev4")
143 || ! strcmp (alpha_cpu_string, "21064"))
145 alpha_cpu = PROCESSOR_EV4;
146 target_flags &= ~ (MASK_BWX | MASK_CIX | MASK_MAX);
148 else if (! strcmp (alpha_cpu_string, "ev5")
149 || ! strcmp (alpha_cpu_string, "21164"))
151 alpha_cpu = PROCESSOR_EV5;
152 target_flags &= ~ (MASK_BWX | MASK_CIX | MASK_MAX);
154 else if (! strcmp (alpha_cpu_string, "ev56")
155 || ! strcmp (alpha_cpu_string, "21164a"))
157 alpha_cpu = PROCESSOR_EV5;
158 target_flags |= MASK_BWX;
159 target_flags &= ~ (MASK_CIX | MASK_MAX);
161 else if (! strcmp (alpha_cpu_string, "pca56")
162 || ! strcmp (alpha_cpu_string, "21164PC")
163 || ! strcmp (alpha_cpu_string, "21164pc"))
165 alpha_cpu = PROCESSOR_EV5;
166 target_flags |= MASK_BWX | MASK_MAX;
167 target_flags &= ~ MASK_CIX;
169 else if (! strcmp (alpha_cpu_string, "ev6")
170 || ! strcmp (alpha_cpu_string, "21264"))
172 alpha_cpu = PROCESSOR_EV6;
173 target_flags |= MASK_BWX | MASK_CIX | MASK_MAX;
176 error ("bad value `%s' for -mcpu switch", alpha_cpu_string);
179 alpha_tp = ALPHA_TP_PROG;
180 alpha_fprm = ALPHA_FPRM_NORM;
181 alpha_fptm = ALPHA_FPTM_N;
185 alpha_tp = ALPHA_TP_INSN;
186 alpha_fptm = ALPHA_FPTM_SU;
189 if (TARGET_IEEE_WITH_INEXACT)
191 alpha_tp = ALPHA_TP_INSN;
192 alpha_fptm = ALPHA_FPTM_SUI;
197 if (! strcmp (alpha_tp_string, "p"))
198 alpha_tp = ALPHA_TP_PROG;
199 else if (! strcmp (alpha_tp_string, "f"))
200 alpha_tp = ALPHA_TP_FUNC;
201 else if (! strcmp (alpha_tp_string, "i"))
202 alpha_tp = ALPHA_TP_INSN;
204 error ("bad value `%s' for -mtrap-precision switch", alpha_tp_string);
207 if (alpha_fprm_string)
209 if (! strcmp (alpha_fprm_string, "n"))
210 alpha_fprm = ALPHA_FPRM_NORM;
211 else if (! strcmp (alpha_fprm_string, "m"))
212 alpha_fprm = ALPHA_FPRM_MINF;
213 else if (! strcmp (alpha_fprm_string, "c"))
214 alpha_fprm = ALPHA_FPRM_CHOP;
215 else if (! strcmp (alpha_fprm_string,"d"))
216 alpha_fprm = ALPHA_FPRM_DYN;
218 error ("bad value `%s' for -mfp-rounding-mode switch",
222 if (alpha_fptm_string)
224 if (strcmp (alpha_fptm_string, "n") == 0)
225 alpha_fptm = ALPHA_FPTM_N;
226 else if (strcmp (alpha_fptm_string, "u") == 0)
227 alpha_fptm = ALPHA_FPTM_U;
228 else if (strcmp (alpha_fptm_string, "su") == 0)
229 alpha_fptm = ALPHA_FPTM_SU;
230 else if (strcmp (alpha_fptm_string, "sui") == 0)
231 alpha_fptm = ALPHA_FPTM_SUI;
233 error ("bad value `%s' for -mfp-trap-mode switch", alpha_fptm_string);
236 /* Do some sanity checks on the above option. */
238 if ((alpha_fptm == ALPHA_FPTM_SU || alpha_fptm == ALPHA_FPTM_SUI)
239 && alpha_tp != ALPHA_TP_INSN)
241 warning ("fp software completion requires -mtrap-precision=i");
242 alpha_tp = ALPHA_TP_INSN;
245 if (TARGET_FLOAT_VAX)
247 if (alpha_fprm == ALPHA_FPRM_MINF || alpha_fprm == ALPHA_FPRM_DYN)
249 warning ("rounding mode not supported for VAX floats");
250 alpha_fprm = ALPHA_FPRM_NORM;
252 if (alpha_fptm == ALPHA_FPTM_SUI)
254 warning ("trap mode not supported for VAX floats");
255 alpha_fptm = ALPHA_FPTM_SU;
263 if (!alpha_mlat_string)
264 alpha_mlat_string = "L1";
266 if (isdigit (alpha_mlat_string[0])
267 && (lat = strtol (alpha_mlat_string, &end, 10), *end == '\0'))
269 else if ((alpha_mlat_string[0] == 'L' || alpha_mlat_string[0] == 'l')
270 && isdigit (alpha_mlat_string[1])
271 && alpha_mlat_string[2] == '\0')
273 static int const cache_latency[][4] =
275 { 3, 30, -1 }, /* ev4 -- Bcache is a guess */
276 { 2, 12, 38 }, /* ev5 -- Bcache from PC164 LMbench numbers */
277 { 3, 13, -1 }, /* ev6 -- Ho hum, doesn't exist yet */
280 lat = alpha_mlat_string[1] - '0';
281 if (lat < 0 || lat > 3 || cache_latency[alpha_cpu][lat-1] == -1)
283 warning ("L%d cache latency unknown for %s",
284 lat, alpha_cpu_name[alpha_cpu]);
288 lat = cache_latency[alpha_cpu][lat-1];
290 else if (! strcmp (alpha_mlat_string, "main"))
292 /* Most current memories have about 370ns latency. This is
293 a reasonable guess for a fast cpu. */
298 warning ("bad value `%s' for -mmemory-latency", alpha_mlat_string);
302 alpha_memory_latency = lat;
305 /* Default the definition of "small data" to 8 bytes. */
310 /* Returns 1 if VALUE is a mask that contains full bytes of zero or ones. */
318 for (i = 0; i < HOST_BITS_PER_WIDE_INT / HOST_BITS_PER_CHAR;
320 if ((value & 0xff) != 0 && (value & 0xff) != 0xff)
326 /* Returns 1 if OP is either the constant zero or a register. If a
327 register, it must be in the proper mode unless MODE is VOIDmode. */
330 reg_or_0_operand (op, mode)
332 enum machine_mode mode;
334 return op == const0_rtx || register_operand (op, mode);
337 /* Return 1 if OP is a constant in the range of 0-63 (for a shift) or
341 reg_or_6bit_operand (op, mode)
343 enum machine_mode mode;
345 return ((GET_CODE (op) == CONST_INT
346 && (unsigned HOST_WIDE_INT) INTVAL (op) < 64)
347 || register_operand (op, mode));
351 /* Return 1 if OP is an 8-bit constant or any register. */
354 reg_or_8bit_operand (op, mode)
356 enum machine_mode mode;
358 return ((GET_CODE (op) == CONST_INT
359 && (unsigned HOST_WIDE_INT) INTVAL (op) < 0x100)
360 || register_operand (op, mode));
363 /* Return 1 if OP is an 8-bit constant. */
366 cint8_operand (op, mode)
368 enum machine_mode mode;
370 return (GET_CODE (op) == CONST_INT
371 && (unsigned HOST_WIDE_INT) INTVAL (op) < 0x100);
374 /* Return 1 if the operand is a valid second operand to an add insn. */
377 add_operand (op, mode)
379 enum machine_mode mode;
381 if (GET_CODE (op) == CONST_INT)
382 return (CONST_OK_FOR_LETTER_P (INTVAL (op), 'K')
383 || CONST_OK_FOR_LETTER_P (INTVAL (op), 'L')
384 || CONST_OK_FOR_LETTER_P (INTVAL (op), 'O'));
386 return register_operand (op, mode);
389 /* Return 1 if the operand is a valid second operand to a sign-extending
393 sext_add_operand (op, mode)
395 enum machine_mode mode;
397 if (GET_CODE (op) == CONST_INT)
398 return ((unsigned HOST_WIDE_INT) INTVAL (op) < 255
399 || (unsigned HOST_WIDE_INT) (- INTVAL (op)) < 255);
401 return register_operand (op, mode);
404 /* Return 1 if OP is the constant 4 or 8. */
407 const48_operand (op, mode)
409 enum machine_mode mode;
411 return (GET_CODE (op) == CONST_INT
412 && (INTVAL (op) == 4 || INTVAL (op) == 8));
415 /* Return 1 if OP is a valid first operand to an AND insn. */
418 and_operand (op, mode)
420 enum machine_mode mode;
422 if (GET_CODE (op) == CONST_DOUBLE && GET_MODE (op) == VOIDmode)
423 return (zap_mask (CONST_DOUBLE_LOW (op))
424 && zap_mask (CONST_DOUBLE_HIGH (op)));
426 if (GET_CODE (op) == CONST_INT)
427 return ((unsigned HOST_WIDE_INT) INTVAL (op) < 0x100
428 || (unsigned HOST_WIDE_INT) ~ INTVAL (op) < 0x100
429 || zap_mask (INTVAL (op)));
431 return register_operand (op, mode);
434 /* Return 1 if OP is a valid first operand to an IOR or XOR insn. */
437 or_operand (op, mode)
439 enum machine_mode mode;
441 if (GET_CODE (op) == CONST_INT)
442 return ((unsigned HOST_WIDE_INT) INTVAL (op) < 0x100
443 || (unsigned HOST_WIDE_INT) ~ INTVAL (op) < 0x100);
445 return register_operand (op, mode);
448 /* Return 1 if OP is a constant that is the width, in bits, of an integral
449 mode smaller than DImode. */
452 mode_width_operand (op, mode)
454 enum machine_mode mode;
456 return (GET_CODE (op) == CONST_INT
457 && (INTVAL (op) == 8 || INTVAL (op) == 16
458 || INTVAL (op) == 32 || INTVAL (op) == 64));
461 /* Return 1 if OP is a constant that is the width of an integral machine mode
462 smaller than an integer. */
465 mode_mask_operand (op, mode)
467 enum machine_mode mode;
469 #if HOST_BITS_PER_WIDE_INT == 32
470 if (GET_CODE (op) == CONST_DOUBLE)
471 return (CONST_DOUBLE_LOW (op) == -1
472 && (CONST_DOUBLE_HIGH (op) == -1
473 || CONST_DOUBLE_HIGH (op) == 0));
475 if (GET_CODE (op) == CONST_DOUBLE)
476 return (CONST_DOUBLE_LOW (op) == -1 && CONST_DOUBLE_HIGH (op) == 0);
479 return (GET_CODE (op) == CONST_INT
480 && (INTVAL (op) == 0xff
481 || INTVAL (op) == 0xffff
482 || INTVAL (op) == 0xffffffff
483 #if HOST_BITS_PER_WIDE_INT == 64
484 || INTVAL (op) == 0xffffffffffffffff
489 /* Return 1 if OP is a multiple of 8 less than 64. */
492 mul8_operand (op, mode)
494 enum machine_mode mode;
496 return (GET_CODE (op) == CONST_INT
497 && (unsigned HOST_WIDE_INT) INTVAL (op) < 64
498 && (INTVAL (op) & 7) == 0);
501 /* Return 1 if OP is the constant zero in floating-point. */
504 fp0_operand (op, mode)
506 enum machine_mode mode;
508 return (GET_MODE (op) == mode
509 && GET_MODE_CLASS (mode) == MODE_FLOAT && op == CONST0_RTX (mode));
512 /* Return 1 if OP is the floating-point constant zero or a register. */
515 reg_or_fp0_operand (op, mode)
517 enum machine_mode mode;
519 return fp0_operand (op, mode) || register_operand (op, mode);
522 /* Return 1 if OP is a hard floating-point register. */
525 hard_fp_register_operand (op, mode)
527 enum machine_mode mode;
529 return ((GET_CODE (op) == REG && REGNO_REG_CLASS (REGNO (op)) == FLOAT_REGS)
530 || (GET_CODE (op) == SUBREG
531 && hard_fp_register_operand (SUBREG_REG (op), mode)));
534 /* Return 1 if OP is a register or a constant integer. */
538 reg_or_cint_operand (op, mode)
540 enum machine_mode mode;
542 return GET_CODE (op) == CONST_INT || register_operand (op, mode);
545 /* Return 1 if OP is something that can be reloaded into a register;
546 if it is a MEM, it need not be valid. */
549 some_operand (op, mode)
551 enum machine_mode mode;
553 if (mode != VOIDmode && GET_MODE (op) != VOIDmode && mode != GET_MODE (op))
556 switch (GET_CODE (op))
558 case REG: case MEM: case CONST_DOUBLE:
559 case CONST_INT: case LABEL_REF: case SYMBOL_REF: case CONST:
563 return some_operand (SUBREG_REG (op), VOIDmode);
572 /* Return 1 if OP is a valid operand for the source of a move insn. */
575 input_operand (op, mode)
577 enum machine_mode mode;
579 if (mode != VOIDmode && GET_MODE (op) != VOIDmode && mode != GET_MODE (op))
582 if (GET_MODE_CLASS (mode) == MODE_FLOAT && GET_MODE (op) != mode)
585 switch (GET_CODE (op))
590 /* This handles both the Windows/NT and OSF cases. */
591 return mode == ptr_mode || mode == DImode;
597 if (register_operand (op, mode))
599 /* ... fall through ... */
601 return ((TARGET_BWX || (mode != HImode && mode != QImode))
602 && general_operand (op, mode));
605 return GET_MODE_CLASS (mode) == MODE_FLOAT && op == CONST0_RTX (mode);
608 return mode == QImode || mode == HImode || add_operand (op, mode);
617 /* Return 1 if OP is a SYMBOL_REF for a function known to be in this
621 current_file_function_operand (op, mode)
623 enum machine_mode mode;
625 return (GET_CODE (op) == SYMBOL_REF
626 && ! profile_flag && ! profile_block_flag
627 && (SYMBOL_REF_FLAG (op)
628 || op == XEXP (DECL_RTL (current_function_decl), 0)));
631 /* Return 1 if OP is a valid operand for the MEM of a CALL insn. */
634 call_operand (op, mode)
636 enum machine_mode mode;
641 return (GET_CODE (op) == SYMBOL_REF
642 || (GET_CODE (op) == REG
643 && (TARGET_OPEN_VMS || TARGET_WINDOWS_NT || REGNO (op) == 27)));
646 /* Return 1 if OP is a valid Alpha comparison operator. Here we know which
647 comparisons are valid in which insn. */
650 alpha_comparison_operator (op, mode)
652 enum machine_mode mode;
654 enum rtx_code code = GET_CODE (op);
656 if (mode != GET_MODE (op) || GET_RTX_CLASS (code) != '<')
659 return (code == EQ || code == LE || code == LT
660 || (mode == DImode && (code == LEU || code == LTU)));
663 /* Return 1 if OP is a valid Alpha swapped comparison operator. */
666 alpha_swapped_comparison_operator (op, mode)
668 enum machine_mode mode;
670 enum rtx_code code = GET_CODE (op);
672 if (mode != GET_MODE (op) || GET_RTX_CLASS (code) != '<')
675 code = swap_condition (code);
676 return (code == EQ || code == LE || code == LT
677 || (mode == DImode && (code == LEU || code == LTU)));
680 /* Return 1 if OP is a signed comparison operation. */
683 signed_comparison_operator (op, mode)
685 enum machine_mode mode;
687 switch (GET_CODE (op))
689 case EQ: case NE: case LE: case LT: case GE: case GT:
699 /* Return 1 if this is a divide or modulus operator. */
702 divmod_operator (op, mode)
704 enum machine_mode mode;
706 switch (GET_CODE (op))
708 case DIV: case MOD: case UDIV: case UMOD:
718 /* Return 1 if this memory address is a known aligned register plus
719 a constant. It must be a valid address. This means that we can do
720 this as an aligned reference plus some offset.
722 Take into account what reload will do.
724 We could say that out-of-range stack slots are alignable, but that would
725 complicate get_aligned_mem and it isn't worth the trouble since few
726 functions have large stack space. */
729 aligned_memory_operand (op, mode)
731 enum machine_mode mode;
733 if (GET_CODE (op) == SUBREG)
735 if (GET_MODE (op) != mode)
737 op = SUBREG_REG (op);
738 mode = GET_MODE (op);
741 if (reload_in_progress && GET_CODE (op) == REG
742 && REGNO (op) >= FIRST_PSEUDO_REGISTER)
743 op = reg_equiv_mem[REGNO (op)];
745 if (GET_CODE (op) != MEM || GET_MODE (op) != mode
746 || ! memory_address_p (mode, XEXP (op, 0)))
751 if (GET_CODE (op) == PLUS)
754 return (GET_CODE (op) == REG
755 && REGNO_POINTER_ALIGN (REGNO (op)) >= 4);
758 /* Similar, but return 1 if OP is a MEM which is not alignable. */
761 unaligned_memory_operand (op, mode)
763 enum machine_mode mode;
765 if (GET_CODE (op) == SUBREG)
767 if (GET_MODE (op) != mode)
769 op = SUBREG_REG (op);
770 mode = GET_MODE (op);
773 if (reload_in_progress && GET_CODE (op) == REG
774 && REGNO (op) >= FIRST_PSEUDO_REGISTER)
775 op = reg_equiv_mem[REGNO (op)];
777 if (GET_CODE (op) != MEM || GET_MODE (op) != mode)
782 if (! memory_address_p (mode, op))
785 if (GET_CODE (op) == PLUS)
788 return (GET_CODE (op) != REG
789 || REGNO_POINTER_ALIGN (REGNO (op)) < 4);
792 /* Return 1 if OP is either a register or an unaligned memory location. */
795 reg_or_unaligned_mem_operand (op, mode)
797 enum machine_mode mode;
799 return register_operand (op, mode) || unaligned_memory_operand (op, mode);
802 /* Return 1 if OP is any memory location. During reload a pseudo matches. */
805 any_memory_operand (op, mode)
807 enum machine_mode mode;
809 return (GET_CODE (op) == MEM
810 || (GET_CODE (op) == SUBREG && GET_CODE (SUBREG_REG (op)) == REG)
811 || (reload_in_progress && GET_CODE (op) == REG
812 && REGNO (op) >= FIRST_PSEUDO_REGISTER)
813 || (reload_in_progress && GET_CODE (op) == SUBREG
814 && GET_CODE (SUBREG_REG (op)) == REG
815 && REGNO (SUBREG_REG (op)) >= FIRST_PSEUDO_REGISTER));
818 /* REF is an alignable memory location. Place an aligned SImode
819 reference into *PALIGNED_MEM and the number of bits to shift into
823 get_aligned_mem (ref, paligned_mem, pbitnum)
825 rtx *paligned_mem, *pbitnum;
828 HOST_WIDE_INT offset = 0;
830 if (GET_CODE (ref) == SUBREG)
832 offset = SUBREG_WORD (ref) * UNITS_PER_WORD;
833 if (BYTES_BIG_ENDIAN)
834 offset -= (MIN (UNITS_PER_WORD, GET_MODE_SIZE (GET_MODE (ref)))
835 - MIN (UNITS_PER_WORD,
836 GET_MODE_SIZE (GET_MODE (SUBREG_REG (ref)))));
837 ref = SUBREG_REG (ref);
840 if (GET_CODE (ref) == REG)
841 ref = reg_equiv_mem[REGNO (ref)];
843 if (reload_in_progress)
844 base = find_replacement (&XEXP (ref, 0));
846 base = XEXP (ref, 0);
848 if (GET_CODE (base) == PLUS)
849 offset += INTVAL (XEXP (base, 1)), base = XEXP (base, 0);
851 *paligned_mem = gen_rtx_MEM (SImode,
852 plus_constant (base, offset & ~3));
853 MEM_IN_STRUCT_P (*paligned_mem) = MEM_IN_STRUCT_P (ref);
854 MEM_VOLATILE_P (*paligned_mem) = MEM_VOLATILE_P (ref);
855 RTX_UNCHANGING_P (*paligned_mem) = RTX_UNCHANGING_P (ref);
857 *pbitnum = GEN_INT ((offset & 3) * 8);
860 /* Similar, but just get the address. Handle the two reload cases.
861 Add EXTRA_OFFSET to the address we return. */
864 get_unaligned_address (ref, extra_offset)
869 HOST_WIDE_INT offset = 0;
871 if (GET_CODE (ref) == SUBREG)
873 offset = SUBREG_WORD (ref) * UNITS_PER_WORD;
874 if (BYTES_BIG_ENDIAN)
875 offset -= (MIN (UNITS_PER_WORD, GET_MODE_SIZE (GET_MODE (ref)))
876 - MIN (UNITS_PER_WORD,
877 GET_MODE_SIZE (GET_MODE (SUBREG_REG (ref)))));
878 ref = SUBREG_REG (ref);
881 if (GET_CODE (ref) == REG)
882 ref = reg_equiv_mem[REGNO (ref)];
884 if (reload_in_progress)
885 base = find_replacement (&XEXP (ref, 0));
887 base = XEXP (ref, 0);
889 if (GET_CODE (base) == PLUS)
890 offset += INTVAL (XEXP (base, 1)), base = XEXP (base, 0);
892 return plus_constant (base, offset + extra_offset);
895 /* Subfunction of the following function. Update the flags of any MEM
896 found in part of X. */
899 alpha_set_memflags_1 (x, in_struct_p, volatile_p, unchanging_p)
901 int in_struct_p, volatile_p, unchanging_p;
905 switch (GET_CODE (x))
909 for (i = XVECLEN (x, 0) - 1; i >= 0; i--)
910 alpha_set_memflags_1 (XVECEXP (x, 0, i), in_struct_p, volatile_p,
915 alpha_set_memflags_1 (PATTERN (x), in_struct_p, volatile_p,
920 alpha_set_memflags_1 (SET_DEST (x), in_struct_p, volatile_p,
922 alpha_set_memflags_1 (SET_SRC (x), in_struct_p, volatile_p,
927 MEM_IN_STRUCT_P (x) = in_struct_p;
928 MEM_VOLATILE_P (x) = volatile_p;
929 RTX_UNCHANGING_P (x) = unchanging_p;
937 /* Given INSN, which is either an INSN or a SEQUENCE generated to
938 perform a memory operation, look for any MEMs in either a SET_DEST or
939 a SET_SRC and copy the in-struct, unchanging, and volatile flags from
940 REF into each of the MEMs found. If REF is not a MEM, don't do
944 alpha_set_memflags (insn, ref)
948 /* Note that it is always safe to get these flags, though they won't
949 be what we think if REF is not a MEM. */
950 int in_struct_p = MEM_IN_STRUCT_P (ref);
951 int volatile_p = MEM_VOLATILE_P (ref);
952 int unchanging_p = RTX_UNCHANGING_P (ref);
954 if (GET_CODE (ref) != MEM
955 || (! in_struct_p && ! volatile_p && ! unchanging_p))
958 alpha_set_memflags_1 (insn, in_struct_p, volatile_p, unchanging_p);
961 /* Try to output insns to set TARGET equal to the constant C if it can be
962 done in less than N insns. Do all computations in MODE. Returns the place
963 where the output has been placed if it can be done and the insns have been
964 emitted. If it would take more than N insns, zero is returned and no
965 insns and emitted. */
968 alpha_emit_set_const (target, mode, c, n)
970 enum machine_mode mode;
977 /* Try 1 insn, then 2, then up to N. */
978 for (i = 1; i <= n; i++)
979 if ((pat = alpha_emit_set_const_1 (target, mode, c, i)) != 0)
985 /* Internal routine for the above to check for N or below insns. */
988 alpha_emit_set_const_1 (target, mode, c, n)
990 enum machine_mode mode;
994 HOST_WIDE_INT new = c;
996 /* Use a pseudo if highly optimizing and still generating RTL. */
998 = (flag_expensive_optimizations && rtx_equal_function_value_matters
1002 #if HOST_BITS_PER_WIDE_INT == 64
1003 /* We are only called for SImode and DImode. If this is SImode, ensure that
1004 we are sign extended to a full word. This does not make any sense when
1005 cross-compiling on a narrow machine. */
1008 c = (c & 0xffffffff) - 2 * (c & 0x80000000);
1011 /* If this is a sign-extended 32-bit constant, we can do this in at most
1012 three insns, so do it if we have enough insns left. We always have
1013 a sign-extended 32-bit constant when compiling on a narrow machine. */
1015 if (HOST_BITS_PER_WIDE_INT != 64
1016 || c >> 31 == -1 || c >> 31 == 0)
1018 HOST_WIDE_INT low = (c & 0xffff) - 2 * (c & 0x8000);
1019 HOST_WIDE_INT tmp1 = c - low;
1021 = ((tmp1 >> 16) & 0xffff) - 2 * ((tmp1 >> 16) & 0x8000);
1022 HOST_WIDE_INT extra = 0;
1024 /* If HIGH will be interpreted as negative but the constant is
1025 positive, we must adjust it to do two ldha insns. */
1027 if ((high & 0x8000) != 0 && c >= 0)
1031 high = ((tmp1 >> 16) & 0xffff) - 2 * ((tmp1 >> 16) & 0x8000);
1034 if (c == low || (low == 0 && extra == 0))
1036 /* We used to use copy_to_suggested_reg (GEN_INT (c), target, mode)
1037 but that meant that we can't handle INT_MIN on 32-bit machines
1038 (like NT/Alpha), because we recurse indefinitely through
1039 emit_move_insn to gen_movdi. So instead, since we know exactly
1040 what we want, create it explicitly. */
1043 target = gen_reg_rtx (mode);
1044 emit_insn (gen_rtx_SET (VOIDmode, target, GEN_INT (c)));
1047 else if (n >= 2 + (extra != 0))
1049 temp = copy_to_suggested_reg (GEN_INT (low), subtarget, mode);
1052 temp = expand_binop (mode, add_optab, temp, GEN_INT (extra << 16),
1053 subtarget, 0, OPTAB_WIDEN);
1055 return expand_binop (mode, add_optab, temp, GEN_INT (high << 16),
1056 target, 0, OPTAB_WIDEN);
1060 /* If we couldn't do it that way, try some other methods. But if we have
1061 no instructions left, don't bother. Likewise, if this is SImode and
1062 we can't make pseudos, we can't do anything since the expand_binop
1063 and expand_unop calls will widen and try to make pseudos. */
1066 || (mode == SImode && ! rtx_equal_function_value_matters))
1069 #if HOST_BITS_PER_WIDE_INT == 64
1070 /* First, see if can load a value into the target that is the same as the
1071 constant except that all bytes that are 0 are changed to be 0xff. If we
1072 can, then we can do a ZAPNOT to obtain the desired constant. */
1074 for (i = 0; i < 64; i += 8)
1075 if ((new & ((HOST_WIDE_INT) 0xff << i)) == 0)
1076 new |= (HOST_WIDE_INT) 0xff << i;
1078 /* We are only called for SImode and DImode. If this is SImode, ensure that
1079 we are sign extended to a full word. */
1082 new = (new & 0xffffffff) - 2 * (new & 0x80000000);
1085 && (temp = alpha_emit_set_const (subtarget, mode, new, n - 1)) != 0)
1086 return expand_binop (mode, and_optab, temp, GEN_INT (c | ~ new),
1087 target, 0, OPTAB_WIDEN);
1090 /* Next, see if we can load a related constant and then shift and possibly
1091 negate it to get the constant we want. Try this once each increasing
1092 numbers of insns. */
1094 for (i = 1; i < n; i++)
1096 /* First try complementing. */
1097 if ((temp = alpha_emit_set_const (subtarget, mode, ~ c, i)) != 0)
1098 return expand_unop (mode, one_cmpl_optab, temp, target, 0);
1100 /* Next try to form a constant and do a left shift. We can do this
1101 if some low-order bits are zero; the exact_log2 call below tells
1102 us that information. The bits we are shifting out could be any
1103 value, but here we'll just try the 0- and sign-extended forms of
1104 the constant. To try to increase the chance of having the same
1105 constant in more than one insn, start at the highest number of
1106 bits to shift, but try all possibilities in case a ZAPNOT will
1109 if ((bits = exact_log2 (c & - c)) > 0)
1110 for (; bits > 0; bits--)
1111 if ((temp = (alpha_emit_set_const
1113 (unsigned HOST_WIDE_INT) c >> bits, i))) != 0
1114 || ((temp = (alpha_emit_set_const
1116 ((unsigned HOST_WIDE_INT) c) >> bits, i)))
1118 return expand_binop (mode, ashl_optab, temp, GEN_INT (bits),
1119 target, 0, OPTAB_WIDEN);
1121 /* Now try high-order zero bits. Here we try the shifted-in bits as
1122 all zero and all ones. Be careful to avoid shifting outside the
1123 mode and to avoid shifting outside the host wide int size. */
1124 /* On narrow hosts, don't shift a 1 into the high bit, since we'll
1125 confuse the recursive call and set all of the high 32 bits. */
1127 if ((bits = (MIN (HOST_BITS_PER_WIDE_INT, GET_MODE_SIZE (mode) * 8)
1128 - floor_log2 (c) - 1 - (HOST_BITS_PER_WIDE_INT < 64))) > 0)
1129 for (; bits > 0; bits--)
1130 if ((temp = alpha_emit_set_const (subtarget, mode,
1132 || ((temp = (alpha_emit_set_const
1134 ((c << bits) | (((HOST_WIDE_INT) 1 << bits) - 1)),
1137 return expand_binop (mode, lshr_optab, temp, GEN_INT (bits),
1138 target, 1, OPTAB_WIDEN);
1140 /* Now try high-order 1 bits. We get that with a sign-extension.
1141 But one bit isn't enough here. Be careful to avoid shifting outside
1142 the mode and to avoid shifting outside the host wide int size. */
1144 if ((bits = (MIN (HOST_BITS_PER_WIDE_INT, GET_MODE_SIZE (mode) * 8)
1145 - floor_log2 (~ c) - 2)) > 0)
1146 for (; bits > 0; bits--)
1147 if ((temp = alpha_emit_set_const (subtarget, mode,
1149 || ((temp = (alpha_emit_set_const
1151 ((c << bits) | (((HOST_WIDE_INT) 1 << bits) - 1)),
1154 return expand_binop (mode, ashr_optab, temp, GEN_INT (bits),
1155 target, 0, OPTAB_WIDEN);
1161 #if HOST_BITS_PER_WIDE_INT == 64
1162 /* Having failed to find a 3 insn sequence in alpha_emit_set_const,
1163 fall back to a straight forward decomposition. We do this to avoid
1164 exponential run times encountered when looking for longer sequences
1165 with alpha_emit_set_const. */
1168 alpha_emit_set_long_const (target, c)
1172 /* Use a pseudo if highly optimizing and still generating RTL. */
1174 = (flag_expensive_optimizations && rtx_equal_function_value_matters
1176 HOST_WIDE_INT d1, d2, d3, d4;
1179 /* Decompose the entire word */
1180 d1 = ((c & 0xffff) ^ 0x8000) - 0x8000;
1182 d2 = ((c & 0xffffffff) ^ 0x80000000) - 0x80000000;
1184 d3 = ((c & 0xffff) ^ 0x8000) - 0x8000;
1186 d4 = ((c & 0xffffffff) ^ 0x80000000) - 0x80000000;
1191 /* Construct the high word */
1193 r1 = copy_to_suggested_reg (GEN_INT (d4), subtarget, DImode);
1195 r1 = copy_to_suggested_reg (GEN_INT (d3), subtarget, DImode);
1197 r1 = expand_binop (DImode, add_optab, GEN_INT (d3), GEN_INT (d4),
1198 subtarget, 0, OPTAB_WIDEN);
1200 /* Shift it into place */
1201 r2 = expand_binop (DImode, ashl_optab, r1, GEN_INT (32),
1202 subtarget, 0, OPTAB_WIDEN);
1204 if (subtarget == 0 && d1 == d3 && d2 == d4)
1205 r1 = expand_binop (DImode, add_optab, r1, r2, subtarget, 0, OPTAB_WIDEN);
1210 /* Add in the low word */
1212 r1 = expand_binop (DImode, add_optab, r1, GEN_INT (d2),
1213 subtarget, 0, OPTAB_WIDEN);
1215 r1 = expand_binop (DImode, add_optab, r1, GEN_INT (d1),
1216 subtarget, 0, OPTAB_WIDEN);
1220 r1 = copy_to_suggested_reg(r1, target, DImode);
1224 #endif /* HOST_BITS_PER_WIDE_INT == 64 */
1226 /* Rewrite a comparison against zero CMP of the form
1227 (CODE (cc0) (const_int 0)) so it can be written validly in
1228 a conditional move (if_then_else CMP ...).
1229 If both of the operands that set cc0 are non-zero we must emit
1230 an insn to perform the compare (it can't be done within
1231 the conditional move). */
1233 alpha_emit_conditional_move (cmp, mode)
1235 enum machine_mode mode;
1237 enum rtx_code code = GET_CODE (cmp);
1238 enum rtx_code cmov_code = NE;
1239 rtx op0 = alpha_compare_op0;
1240 rtx op1 = alpha_compare_op1;
1241 enum machine_mode cmp_mode
1242 = (GET_MODE (op0) == VOIDmode ? DImode : GET_MODE (op0));
1243 enum machine_mode cmp_op_mode = alpha_compare_fp_p ? DFmode : DImode;
1246 if (alpha_compare_fp_p != FLOAT_MODE_P (mode))
1249 /* We may be able to use a conditional move directly.
1250 This avoids emitting spurious compares. */
1251 if (signed_comparison_operator (cmp, cmp_op_mode)
1252 && (op0 == CONST0_RTX (cmp_mode) || op1 == CONST0_RTX (cmp_mode)))
1253 return gen_rtx_fmt_ee (code, VOIDmode, op0, op1);
1255 /* We can't put the comparison insides a conditional move;
1256 emit a compare instruction and put that inside the
1257 conditional move. Make sure we emit only comparisons we have;
1258 swap or reverse as necessary. */
1262 case EQ: case LE: case LT: case LEU: case LTU:
1263 /* We have these compares: */
1267 /* This must be reversed. */
1268 code = reverse_condition (code);
1272 case GE: case GT: case GEU: case GTU:
1273 /* These must be swapped. Make sure the new first operand is in
1275 code = swap_condition (code);
1276 tem = op0, op0 = op1, op1 = tem;
1277 op0 = force_reg (cmp_mode, op0);
1284 tem = gen_reg_rtx (cmp_op_mode);
1285 emit_move_insn (tem, gen_rtx_fmt_ee (code, cmp_op_mode, op0, op1));
1286 return gen_rtx_fmt_ee (cmov_code, VOIDmode, tem, CONST0_RTX (cmp_op_mode));
1289 /* Use ext[wlq][lh] as the Architecture Handbook describes for extracting
1293 word: ldq_u r1,X(r11) ldq_u r1,X(r11)
1294 ldq_u r2,X+1(r11) ldq_u r2,X+1(r11)
1295 lda r3,X(r11) lda r3,X+2(r11)
1296 extwl r1,r3,r1 extql r1,r3,r1
1297 extwh r2,r3,r2 extqh r2,r3,r2
1298 or r1.r2.r1 or r1,r2,r1
1301 long: ldq_u r1,X(r11) ldq_u r1,X(r11)
1302 ldq_u r2,X+3(r11) ldq_u r2,X+3(r11)
1303 lda r3,X(r11) lda r3,X(r11)
1304 extll r1,r3,r1 extll r1,r3,r1
1305 extlh r2,r3,r2 extlh r2,r3,r2
1306 or r1.r2.r1 addl r1,r2,r1
1308 quad: ldq_u r1,X(r11)
1317 alpha_expand_unaligned_load (tgt, mem, size, ofs, sign)
1319 HOST_WIDE_INT size, ofs;
1322 rtx meml, memh, addr, extl, exth;
1323 enum machine_mode mode;
1325 meml = gen_reg_rtx (DImode);
1326 memh = gen_reg_rtx (DImode);
1327 addr = gen_reg_rtx (DImode);
1328 extl = gen_reg_rtx (DImode);
1329 exth = gen_reg_rtx (DImode);
1331 emit_move_insn (meml,
1332 change_address (mem, DImode,
1333 gen_rtx_AND (DImode,
1334 plus_constant (XEXP (mem, 0),
1338 emit_move_insn (memh,
1339 change_address (mem, DImode,
1340 gen_rtx_AND (DImode,
1341 plus_constant (XEXP (mem, 0),
1345 if (sign && size == 2)
1347 emit_move_insn (addr, plus_constant (XEXP (mem, 0), ofs+2));
1349 emit_insn (gen_extxl (extl, meml, GEN_INT (64), addr));
1350 emit_insn (gen_extqh (exth, memh, addr));
1352 /* We must use tgt here for the target. Alpha-vms port fails if we use
1353 addr for the target, because addr is marked as a pointer and combine
1354 knows that pointers are always sign-extended 32 bit values. */
1355 addr = expand_binop (DImode, ior_optab, extl, exth, tgt, 1, OPTAB_WIDEN);
1356 addr = expand_binop (DImode, ashr_optab, addr, GEN_INT (48),
1357 addr, 1, OPTAB_WIDEN);
1361 emit_move_insn (addr, plus_constant (XEXP (mem, 0), ofs));
1362 emit_insn (gen_extxl (extl, meml, GEN_INT (size*8), addr));
1366 emit_insn (gen_extwh (exth, memh, addr));
1371 emit_insn (gen_extlh (exth, memh, addr));
1376 emit_insn (gen_extqh (exth, memh, addr));
1381 addr = expand_binop (mode, ior_optab, gen_lowpart (mode, extl),
1382 gen_lowpart (mode, exth), gen_lowpart (mode, tgt),
1387 emit_move_insn (tgt, gen_lowpart(GET_MODE (tgt), addr));
1390 /* Similarly, use ins and msk instructions to perform unaligned stores. */
1393 alpha_expand_unaligned_store (dst, src, size, ofs)
1395 HOST_WIDE_INT size, ofs;
1397 rtx dstl, dsth, addr, insl, insh, meml, memh;
1399 dstl = gen_reg_rtx (DImode);
1400 dsth = gen_reg_rtx (DImode);
1401 insl = gen_reg_rtx (DImode);
1402 insh = gen_reg_rtx (DImode);
1404 meml = change_address (dst, DImode,
1405 gen_rtx_AND (DImode,
1406 plus_constant (XEXP (dst, 0), ofs),
1408 memh = change_address (dst, DImode,
1409 gen_rtx_AND (DImode,
1410 plus_constant (XEXP (dst, 0),
1414 emit_move_insn (dsth, memh);
1415 emit_move_insn (dstl, meml);
1416 addr = copy_addr_to_reg (plus_constant (XEXP (dst, 0), ofs));
1418 if (src != const0_rtx)
1420 emit_insn (gen_insxh (insh, gen_lowpart (DImode, src),
1421 GEN_INT (size*8), addr));
1426 emit_insn (gen_inswl (insl, gen_lowpart (HImode, src), addr));
1429 emit_insn (gen_insll (insl, gen_lowpart (SImode, src), addr));
1432 emit_insn (gen_insql (insl, src, addr));
1437 emit_insn (gen_mskxh (dsth, dsth, GEN_INT (size*8), addr));
1442 emit_insn (gen_mskxl (dstl, dstl, GEN_INT (0xffff), addr));
1445 emit_insn (gen_mskxl (dstl, dstl, GEN_INT (0xffffffff), addr));
1449 #if HOST_BITS_PER_WIDE_INT == 32
1450 rtx msk = immed_double_const (0xffffffff, 0xffffffff, DImode);
1452 rtx msk = immed_double_const (0xffffffffffffffff, 0, DImode);
1454 emit_insn (gen_mskxl (dstl, dstl, msk, addr));
1459 if (src != const0_rtx)
1461 dsth = expand_binop (DImode, ior_optab, insh, dsth, dsth, 0, OPTAB_WIDEN);
1462 dstl = expand_binop (DImode, ior_optab, insl, dstl, dstl, 0, OPTAB_WIDEN);
1465 /* Must store high before low for degenerate case of aligned. */
1466 emit_move_insn (memh, dsth);
1467 emit_move_insn (meml, dstl);
1470 /* The block move code tries to maximize speed by separating loads and
1471 stores at the expense of register pressure: we load all of the data
1472 before we store it back out. There are two secondary effects worth
1473 mentioning, that this speeds copying to/from aligned and unaligned
1474 buffers, and that it makes the code significantly easier to write. */
1476 #define MAX_MOVE_WORDS 8
1478 /* Load an integral number of consecutive unaligned quadwords. */
1481 alpha_expand_unaligned_load_words (out_regs, smem, words, ofs)
1484 HOST_WIDE_INT words, ofs;
1486 rtx const im8 = GEN_INT (-8);
1487 rtx const i64 = GEN_INT (64);
1488 rtx ext_tmps[MAX_MOVE_WORDS], data_regs[MAX_MOVE_WORDS+1];
1492 /* Generate all the tmp registers we need. */
1493 for (i = 0; i < words; ++i)
1495 data_regs[i] = out_regs[i];
1496 ext_tmps[i] = gen_reg_rtx (DImode);
1498 data_regs[words] = gen_reg_rtx (DImode);
1501 smem = change_address (smem, GET_MODE (smem),
1502 plus_constant (XEXP (smem, 0), ofs));
1504 /* Load up all of the source data. */
1505 for (i = 0; i < words; ++i)
1507 emit_move_insn (data_regs[i],
1508 change_address (smem, DImode,
1509 gen_rtx_AND (DImode,
1510 plus_constant (XEXP(smem,0),
1514 emit_move_insn (data_regs[words],
1515 change_address (smem, DImode,
1516 gen_rtx_AND (DImode,
1517 plus_constant (XEXP(smem,0),
1521 /* Extract the half-word fragments. Unfortunately DEC decided to make
1522 extxh with offset zero a noop instead of zeroing the register, so
1523 we must take care of that edge condition ourselves with cmov. */
1525 sreg = copy_addr_to_reg (XEXP (smem, 0));
1526 areg = expand_binop (DImode, and_optab, sreg, GEN_INT (7), NULL,
1528 for (i = 0; i < words; ++i)
1530 emit_insn (gen_extxl (data_regs[i], data_regs[i], i64, sreg));
1532 emit_insn (gen_extqh (ext_tmps[i], data_regs[i+1], sreg));
1533 emit_insn (gen_rtx_SET (VOIDmode, ext_tmps[i],
1534 gen_rtx_IF_THEN_ELSE (DImode,
1535 gen_rtx_EQ (DImode, areg,
1537 const0_rtx, ext_tmps[i])));
1540 /* Merge the half-words into whole words. */
1541 for (i = 0; i < words; ++i)
1543 out_regs[i] = expand_binop (DImode, ior_optab, data_regs[i],
1544 ext_tmps[i], data_regs[i], 1, OPTAB_WIDEN);
1548 /* Store an integral number of consecutive unaligned quadwords. DATA_REGS
1549 may be NULL to store zeros. */
1552 alpha_expand_unaligned_store_words (data_regs, dmem, words, ofs)
1555 HOST_WIDE_INT words, ofs;
1557 rtx const im8 = GEN_INT (-8);
1558 rtx const i64 = GEN_INT (64);
1559 #if HOST_BITS_PER_WIDE_INT == 32
1560 rtx const im1 = immed_double_const (0xffffffff, 0xffffffff, DImode);
1562 rtx const im1 = immed_double_const (0xffffffffffffffff, 0, DImode);
1564 rtx ins_tmps[MAX_MOVE_WORDS];
1565 rtx st_tmp_1, st_tmp_2, dreg;
1566 rtx st_addr_1, st_addr_2;
1569 /* Generate all the tmp registers we need. */
1570 if (data_regs != NULL)
1571 for (i = 0; i < words; ++i)
1572 ins_tmps[i] = gen_reg_rtx(DImode);
1573 st_tmp_1 = gen_reg_rtx(DImode);
1574 st_tmp_2 = gen_reg_rtx(DImode);
1577 dmem = change_address (dmem, GET_MODE (dmem),
1578 plus_constant (XEXP (dmem, 0), ofs));
1581 st_addr_2 = change_address (dmem, DImode,
1582 gen_rtx_AND (DImode,
1583 plus_constant (XEXP(dmem,0),
1586 st_addr_1 = change_address (dmem, DImode,
1587 gen_rtx_AND (DImode,
1591 /* Load up the destination end bits. */
1592 emit_move_insn (st_tmp_2, st_addr_2);
1593 emit_move_insn (st_tmp_1, st_addr_1);
1595 /* Shift the input data into place. */
1596 dreg = copy_addr_to_reg (XEXP (dmem, 0));
1597 if (data_regs != NULL)
1599 for (i = words-1; i >= 0; --i)
1601 emit_insn (gen_insxh (ins_tmps[i], data_regs[i], i64, dreg));
1602 emit_insn (gen_insql (data_regs[i], data_regs[i], dreg));
1604 for (i = words-1; i > 0; --i)
1606 ins_tmps[i-1] = expand_binop (DImode, ior_optab, data_regs[i],
1607 ins_tmps[i-1], ins_tmps[i-1], 1,
1612 /* Split and merge the ends with the destination data. */
1613 emit_insn (gen_mskxh (st_tmp_2, st_tmp_2, i64, dreg));
1614 emit_insn (gen_mskxl (st_tmp_1, st_tmp_1, im1, dreg));
1616 if (data_regs != NULL)
1618 st_tmp_2 = expand_binop (DImode, ior_optab, st_tmp_2, ins_tmps[words-1],
1619 st_tmp_2, 1, OPTAB_WIDEN);
1620 st_tmp_1 = expand_binop (DImode, ior_optab, st_tmp_1, data_regs[0],
1621 st_tmp_1, 1, OPTAB_WIDEN);
1625 emit_move_insn (st_addr_2, st_tmp_2);
1626 for (i = words-1; i > 0; --i)
1628 emit_move_insn (change_address (dmem, DImode,
1629 gen_rtx_AND (DImode,
1630 plus_constant(XEXP (dmem,0),
1633 data_regs ? ins_tmps[i-1] : const0_rtx);
1635 emit_move_insn (st_addr_1, st_tmp_1);
1639 /* Expand string/block move operations.
1641 operands[0] is the pointer to the destination.
1642 operands[1] is the pointer to the source.
1643 operands[2] is the number of bytes to move.
1644 operands[3] is the alignment. */
1647 alpha_expand_block_move (operands)
1650 rtx bytes_rtx = operands[2];
1651 rtx align_rtx = operands[3];
1652 HOST_WIDE_INT bytes = INTVAL (bytes_rtx);
1653 HOST_WIDE_INT src_align = INTVAL (align_rtx);
1654 HOST_WIDE_INT dst_align = src_align;
1655 rtx orig_src = operands[1];
1656 rtx orig_dst = operands[0];
1657 rtx data_regs[2*MAX_MOVE_WORDS+16];
1659 int i, words, ofs, nregs = 0;
1663 if (bytes > MAX_MOVE_WORDS*8)
1666 /* Look for additional alignment information from recorded register info. */
1668 tmp = XEXP (orig_src, 0);
1669 if (GET_CODE (tmp) == REG)
1671 if (REGNO_POINTER_ALIGN (REGNO (tmp)) > src_align)
1672 src_align = REGNO_POINTER_ALIGN (REGNO (tmp));
1674 else if (GET_CODE (tmp) == PLUS
1675 && GET_CODE (XEXP (tmp, 0)) == REG
1676 && GET_CODE (XEXP (tmp, 1)) == CONST_INT)
1678 HOST_WIDE_INT c = INTVAL (XEXP (tmp, 1));
1679 int a = REGNO_POINTER_ALIGN (REGNO (XEXP (tmp, 0)));
1683 if (a >= 8 && c % 8 == 0)
1685 else if (a >= 4 && c % 4 == 0)
1687 else if (a >= 2 && c % 2 == 0)
1692 tmp = XEXP (orig_dst, 0);
1693 if (GET_CODE (tmp) == REG)
1695 if (REGNO_POINTER_ALIGN (REGNO (tmp)) > dst_align)
1696 dst_align = REGNO_POINTER_ALIGN (REGNO (tmp));
1698 else if (GET_CODE (tmp) == PLUS
1699 && GET_CODE (XEXP (tmp, 0)) == REG
1700 && GET_CODE (XEXP (tmp, 1)) == CONST_INT)
1702 HOST_WIDE_INT c = INTVAL (XEXP (tmp, 1));
1703 int a = REGNO_POINTER_ALIGN (REGNO (XEXP (tmp, 0)));
1707 if (a >= 8 && c % 8 == 0)
1709 else if (a >= 4 && c % 4 == 0)
1711 else if (a >= 2 && c % 2 == 0)
1717 * Load the entire block into registers.
1720 if (GET_CODE (XEXP (orig_src, 0)) == ADDRESSOF)
1722 enum machine_mode mode;
1723 tmp = XEXP (XEXP (orig_src, 0), 0);
1725 mode = mode_for_size (bytes, MODE_INT, 1);
1727 && GET_MODE_SIZE (GET_MODE (tmp)) <= bytes)
1729 /* Whee! Optimize the load to use the existing register. */
1730 data_regs[nregs++] = gen_lowpart (mode, tmp);
1734 /* ??? We could potentially be copying 3 bytes or whatnot from
1735 a wider reg. Probably not worth worrying about. */
1736 /* No appropriate mode; fall back on memory. */
1737 orig_src = change_address (orig_src, GET_MODE (orig_src),
1738 copy_addr_to_reg (XEXP (orig_src, 0)));
1742 if (src_align >= 8 && bytes >= 8)
1746 for (i = 0; i < words; ++i)
1747 data_regs[nregs+i] = gen_reg_rtx(DImode);
1749 for (i = 0; i < words; ++i)
1751 emit_move_insn (data_regs[nregs+i],
1752 change_address(orig_src, DImode,
1753 plus_constant (XEXP (orig_src, 0),
1761 if (src_align >= 4 && bytes >= 4)
1765 for (i = 0; i < words; ++i)
1766 data_regs[nregs+i] = gen_reg_rtx(SImode);
1768 for (i = 0; i < words; ++i)
1770 emit_move_insn (data_regs[nregs+i],
1771 change_address(orig_src, SImode,
1772 plus_constant (XEXP (orig_src, 0),
1784 for (i = 0; i < words+1; ++i)
1785 data_regs[nregs+i] = gen_reg_rtx(DImode);
1787 alpha_expand_unaligned_load_words(data_regs+nregs, orig_src, words, ofs);
1793 if (!TARGET_BWX && bytes >= 8)
1795 data_regs[nregs++] = tmp = gen_reg_rtx (DImode);
1796 alpha_expand_unaligned_load (tmp, orig_src, 8, ofs, 0);
1800 if (!TARGET_BWX && bytes >= 4)
1802 data_regs[nregs++] = tmp = gen_reg_rtx (SImode);
1803 alpha_expand_unaligned_load (tmp, orig_src, 4, ofs, 0);
1812 data_regs[nregs++] = tmp = gen_reg_rtx (HImode);
1813 emit_move_insn (tmp,
1814 change_address (orig_src, HImode,
1815 plus_constant (XEXP (orig_src, 0),
1819 } while (bytes >= 2);
1821 else if (!TARGET_BWX)
1823 data_regs[nregs++] = tmp = gen_reg_rtx (HImode);
1824 alpha_expand_unaligned_load (tmp, orig_src, 2, ofs, 0);
1831 data_regs[nregs++] = tmp = gen_reg_rtx (QImode);
1832 emit_move_insn (tmp,
1833 change_address (orig_src, QImode,
1834 plus_constant (XEXP (orig_src, 0),
1841 if (nregs > sizeof(data_regs)/sizeof(*data_regs))
1845 * Now save it back out again.
1850 if (GET_CODE (XEXP (orig_dst, 0)) == ADDRESSOF)
1852 enum machine_mode mode;
1853 tmp = XEXP (XEXP (orig_dst, 0), 0);
1855 mode = mode_for_size (bytes, MODE_INT, 1);
1856 if (GET_MODE (tmp) == mode && nregs == 1)
1858 emit_move_insn (tmp, data_regs[0]);
1863 /* ??? If nregs > 1, consider reconstructing the word in regs. */
1864 /* ??? Optimize mode < dst_mode with strict_low_part. */
1865 /* No appropriate mode; fall back on memory. */
1866 orig_dst = change_address (orig_dst, GET_MODE (orig_dst),
1867 copy_addr_to_reg (XEXP (orig_dst, 0)));
1870 /* Write out the data in whatever chunks reading the source allowed. */
1873 while (i < nregs && GET_MODE (data_regs[i]) == DImode)
1875 emit_move_insn (change_address(orig_dst, DImode,
1876 plus_constant (XEXP (orig_dst, 0),
1885 /* If the source has remaining DImode regs, write them out in
1887 while (i < nregs && GET_MODE (data_regs[i]) == DImode)
1889 tmp = expand_binop (DImode, lshr_optab, data_regs[i], GEN_INT (32),
1890 NULL_RTX, 1, OPTAB_WIDEN);
1892 emit_move_insn (change_address(orig_dst, SImode,
1893 plus_constant (XEXP (orig_dst, 0),
1895 gen_lowpart (SImode, data_regs[i]));
1896 emit_move_insn (change_address(orig_dst, SImode,
1897 plus_constant (XEXP (orig_dst, 0),
1899 gen_lowpart (SImode, tmp));
1904 while (i < nregs && GET_MODE (data_regs[i]) == SImode)
1906 emit_move_insn (change_address(orig_dst, SImode,
1907 plus_constant (XEXP (orig_dst, 0),
1914 if (i < nregs && GET_MODE (data_regs[i]) == DImode)
1916 /* Write out a remaining block of words using unaligned methods. */
1918 for (words = 1; i+words < nregs ; ++words)
1919 if (GET_MODE (data_regs[i+words]) != DImode)
1923 alpha_expand_unaligned_store (orig_dst, data_regs[i], 8, ofs);
1925 alpha_expand_unaligned_store_words (data_regs+i, orig_dst, words, ofs);
1931 /* Due to the above, this won't be aligned. */
1932 /* ??? If we have more than one of these, consider constructing full
1933 words in registers and using alpha_expand_unaligned_store_words. */
1934 while (i < nregs && GET_MODE (data_regs[i]) == SImode)
1936 alpha_expand_unaligned_store (orig_dst, data_regs[i], 4, ofs);
1942 while (i < nregs && GET_MODE (data_regs[i]) == HImode)
1944 emit_move_insn (change_address (orig_dst, HImode,
1945 plus_constant (XEXP (orig_dst, 0),
1952 while (i < nregs && GET_MODE (data_regs[i]) == HImode)
1954 alpha_expand_unaligned_store (orig_dst, data_regs[i], 2, ofs);
1958 while (i < nregs && GET_MODE (data_regs[i]) == QImode)
1960 emit_move_insn (change_address (orig_dst, QImode,
1961 plus_constant (XEXP (orig_dst, 0),
1976 alpha_expand_block_clear (operands)
1979 rtx bytes_rtx = operands[1];
1980 rtx align_rtx = operands[2];
1981 HOST_WIDE_INT bytes = INTVAL (bytes_rtx);
1982 HOST_WIDE_INT align = INTVAL (align_rtx);
1983 rtx orig_dst = operands[0];
1985 HOST_WIDE_INT i, words, ofs = 0;
1989 if (bytes > MAX_MOVE_WORDS*8)
1992 /* Look for stricter alignment. */
1994 tmp = XEXP (orig_dst, 0);
1995 if (GET_CODE (tmp) == REG)
1997 if (REGNO_POINTER_ALIGN (REGNO (tmp)) > align)
1998 align = REGNO_POINTER_ALIGN (REGNO (tmp));
2000 else if (GET_CODE (tmp) == PLUS
2001 && GET_CODE (XEXP (tmp, 0)) == REG
2002 && GET_CODE (XEXP (tmp, 1)) == CONST_INT)
2004 HOST_WIDE_INT c = INTVAL (XEXP (tmp, 1));
2005 int a = REGNO_POINTER_ALIGN (REGNO (XEXP (tmp, 0)));
2009 if (a >= 8 && c % 8 == 0)
2011 else if (a >= 4 && c % 4 == 0)
2013 else if (a >= 2 && c % 2 == 0)
2018 /* Handle a block of contiguous words first. */
2020 if (align >= 8 && bytes >= 8)
2024 for (i = 0; i < words; ++i)
2026 emit_move_insn (change_address(orig_dst, DImode,
2027 plus_constant (XEXP (orig_dst, 0),
2035 if (align >= 4 && bytes >= 4)
2039 for (i = 0; i < words; ++i)
2041 emit_move_insn (change_address(orig_dst, SImode,
2042 plus_constant (XEXP (orig_dst, 0),
2054 alpha_expand_unaligned_store_words (NULL, orig_dst, words, ofs);
2060 /* Next clean up any trailing pieces. We know from the contiguous
2061 block move that there are no aligned SImode or DImode hunks left. */
2063 if (!TARGET_BWX && bytes >= 8)
2065 alpha_expand_unaligned_store (orig_dst, const0_rtx, 8, ofs);
2069 if (!TARGET_BWX && bytes >= 4)
2071 alpha_expand_unaligned_store (orig_dst, const0_rtx, 4, ofs);
2080 emit_move_insn (change_address (orig_dst, HImode,
2081 plus_constant (XEXP (orig_dst, 0),
2086 } while (bytes >= 2);
2088 else if (!TARGET_BWX)
2090 alpha_expand_unaligned_store (orig_dst, const0_rtx, 2, ofs);
2097 emit_move_insn (change_address (orig_dst, QImode,
2098 plus_constant (XEXP (orig_dst, 0),
2109 /* Adjust the cost of a scheduling dependency. Return the new cost of
2110 a dependency LINK or INSN on DEP_INSN. COST is the current cost. */
2113 alpha_adjust_cost (insn, link, dep_insn, cost)
2120 enum attr_type insn_type, dep_insn_type;
2122 /* If the dependence is an anti-dependence, there is no cost. For an
2123 output dependence, there is sometimes a cost, but it doesn't seem
2124 worth handling those few cases. */
2126 if (REG_NOTE_KIND (link) != 0)
2129 /* If we can't recognize the insns, we can't really do anything. */
2130 if (recog_memoized (insn) < 0 || recog_memoized (dep_insn) < 0)
2133 insn_type = get_attr_type (insn);
2134 dep_insn_type = get_attr_type (dep_insn);
2136 /* Bring in the user-defined memory latency. */
2137 if (dep_insn_type == TYPE_ILD
2138 || dep_insn_type == TYPE_FLD
2139 || dep_insn_type == TYPE_LDSYM)
2140 cost += alpha_memory_latency-1;
2145 /* On EV4, if INSN is a store insn and DEP_INSN is setting the data
2146 being stored, we can sometimes lower the cost. */
2148 if ((insn_type == TYPE_IST || insn_type == TYPE_FST)
2149 && (set = single_set (dep_insn)) != 0
2150 && GET_CODE (PATTERN (insn)) == SET
2151 && rtx_equal_p (SET_DEST (set), SET_SRC (PATTERN (insn))))
2153 switch (dep_insn_type)
2157 /* No savings here. */
2161 /* In these cases, we save one cycle. */
2165 /* In all other cases, we save two cycles. */
2166 return MAX (0, cost - 2);
2170 /* Another case that needs adjustment is an arithmetic or logical
2171 operation. It's cost is usually one cycle, but we default it to
2172 two in the MD file. The only case that it is actually two is
2173 for the address in loads, stores, and jumps. */
2175 if (dep_insn_type == TYPE_IADD || dep_insn_type == TYPE_ILOG)
2190 /* The final case is when a compare feeds into an integer branch;
2191 the cost is only one cycle in that case. */
2193 if (dep_insn_type == TYPE_ICMP && insn_type == TYPE_IBR)
2198 /* And the lord DEC saith: "A special bypass provides an effective
2199 latency of 0 cycles for an ICMP or ILOG insn producing the test
2200 operand of an IBR or ICMOV insn." */
2202 if ((dep_insn_type == TYPE_ICMP || dep_insn_type == TYPE_ILOG)
2203 && (set = single_set (dep_insn)) != 0)
2205 /* A branch only has one input. This must be it. */
2206 if (insn_type == TYPE_IBR)
2208 /* A conditional move has three, make sure it is the test. */
2209 if (insn_type == TYPE_ICMOV
2210 && GET_CODE (set_src = PATTERN (insn)) == SET
2211 && GET_CODE (set_src = SET_SRC (set_src)) == IF_THEN_ELSE
2212 && rtx_equal_p (SET_DEST (set), XEXP (set_src, 0)))
2216 /* "The multiplier is unable to receive data from IEU bypass paths.
2217 The instruction issues at the expected time, but its latency is
2218 increased by the time it takes for the input data to become
2219 available to the multiplier" -- which happens in pipeline stage
2220 six, when results are comitted to the register file. */
2222 if (insn_type == TYPE_IMUL)
2224 switch (dep_insn_type)
2226 /* These insns produce their results in pipeline stage five. */
2233 /* Other integer insns produce results in pipeline stage four. */
2241 /* There is additional latency to move the result of (most) FP
2242 operations anywhere but the FP register file. */
2244 if ((insn_type == TYPE_FST || insn_type == TYPE_FTOI)
2245 && (dep_insn_type == TYPE_FADD ||
2246 dep_insn_type == TYPE_FMUL ||
2247 dep_insn_type == TYPE_FCMOV))
2253 /* Otherwise, return the default cost. */
2257 /* Functions to save and restore alpha_return_addr_rtx. */
2259 struct machine_function
2265 alpha_save_machine_status (p)
2268 struct machine_function *machine =
2269 (struct machine_function *) xmalloc (sizeof (struct machine_function));
2271 p->machine = machine;
2272 machine->ra_rtx = alpha_return_addr_rtx;
2276 alpha_restore_machine_status (p)
2279 struct machine_function *machine = p->machine;
2281 alpha_return_addr_rtx = machine->ra_rtx;
2284 p->machine = (struct machine_function *)0;
2287 /* Do anything needed before RTL is emitted for each function. */
2290 alpha_init_expanders ()
2292 alpha_return_addr_rtx = NULL_RTX;
2294 /* Arrange to save and restore machine status around nested functions. */
2295 save_machine_status = alpha_save_machine_status;
2296 restore_machine_status = alpha_restore_machine_status;
2299 /* Start the ball rolling with RETURN_ADDR_RTX. */
2302 alpha_return_addr (count, frame)
2311 if (alpha_return_addr_rtx)
2312 return alpha_return_addr_rtx;
2314 /* No rtx yet. Invent one, and initialize it from $26 in the prologue. */
2315 alpha_return_addr_rtx = gen_reg_rtx (Pmode);
2316 init = gen_rtx_SET (Pmode, alpha_return_addr_rtx,
2317 gen_rtx_REG (Pmode, REG_RA));
2319 /* Emit the insn to the prologue with the other argument copies. */
2320 push_topmost_sequence ();
2321 emit_insn_after (init, get_insns ());
2322 pop_topmost_sequence ();
2324 return alpha_return_addr_rtx;
2328 alpha_ra_ever_killed ()
2330 if (!alpha_return_addr_rtx)
2331 return regs_ever_live[REG_RA];
2333 return reg_set_between_p (gen_rtx_REG (Pmode, REG_RA),
2334 get_insns(), NULL_RTX);
2338 /* Print an operand. Recognize special options, documented below. */
2341 print_operand (file, x, code)
2351 /* Generates fp-rounding mode suffix: nothing for normal, 'c' for
2352 chopped, 'm' for minus-infinity, and 'd' for dynamic rounding
2353 mode. alpha_fprm controls which suffix is generated. */
2356 case ALPHA_FPRM_NORM:
2358 case ALPHA_FPRM_MINF:
2361 case ALPHA_FPRM_CHOP:
2364 case ALPHA_FPRM_DYN:
2371 /* Generates trap-mode suffix for instructions that accept the su
2372 suffix only (cmpt et al). */
2373 if (alpha_tp == ALPHA_TP_INSN)
2378 /* Generates trap-mode suffix for instructions that accept the
2379 v, sv, and svi suffix. The only instruction that needs this
2390 case ALPHA_FPTM_SUI:
2391 fputs ("svi", file);
2397 /* Generates trap-mode suffix for instructions that accept the u, su,
2398 and sui suffix. This is the bulk of the IEEE floating point
2399 instructions (addt et al). */
2410 case ALPHA_FPTM_SUI:
2411 fputs ("sui", file);
2417 /* Generates trap-mode suffix for instructions that accept the sui
2418 suffix (cvtqt and cvtqs). */
2421 case ALPHA_FPTM_N: case ALPHA_FPTM_U:
2422 case ALPHA_FPTM_SU: /* cvtqt/cvtqs can't cause underflow */
2424 case ALPHA_FPTM_SUI:
2425 fputs ("sui", file);
2431 /* Generates single precision instruction suffix. */
2432 fprintf (file, "%c", (TARGET_FLOAT_VAX ? 'f' : 's'));
2436 /* Generates double precision instruction suffix. */
2437 fprintf (file, "%c", (TARGET_FLOAT_VAX ? 'g' : 't'));
2441 /* If this operand is the constant zero, write it as "$31". */
2442 if (GET_CODE (x) == REG)
2443 fprintf (file, "%s", reg_names[REGNO (x)]);
2444 else if (x == CONST0_RTX (GET_MODE (x)))
2445 fprintf (file, "$31");
2447 output_operand_lossage ("invalid %%r value");
2452 /* Similar, but for floating-point. */
2453 if (GET_CODE (x) == REG)
2454 fprintf (file, "%s", reg_names[REGNO (x)]);
2455 else if (x == CONST0_RTX (GET_MODE (x)))
2456 fprintf (file, "$f31");
2458 output_operand_lossage ("invalid %%R value");
2463 /* Write the 1's complement of a constant. */
2464 if (GET_CODE (x) != CONST_INT)
2465 output_operand_lossage ("invalid %%N value");
2467 fprintf (file, HOST_WIDE_INT_PRINT_DEC, ~ INTVAL (x));
2471 /* Write 1 << C, for a constant C. */
2472 if (GET_CODE (x) != CONST_INT)
2473 output_operand_lossage ("invalid %%P value");
2475 fprintf (file, HOST_WIDE_INT_PRINT_DEC, (HOST_WIDE_INT) 1 << INTVAL (x));
2479 /* Write the high-order 16 bits of a constant, sign-extended. */
2480 if (GET_CODE (x) != CONST_INT)
2481 output_operand_lossage ("invalid %%h value");
2483 fprintf (file, HOST_WIDE_INT_PRINT_DEC, INTVAL (x) >> 16);
2487 /* Write the low-order 16 bits of a constant, sign-extended. */
2488 if (GET_CODE (x) != CONST_INT)
2489 output_operand_lossage ("invalid %%L value");
2491 fprintf (file, HOST_WIDE_INT_PRINT_DEC,
2492 (INTVAL (x) & 0xffff) - 2 * (INTVAL (x) & 0x8000));
2496 /* Write mask for ZAP insn. */
2497 if (GET_CODE (x) == CONST_DOUBLE)
2499 HOST_WIDE_INT mask = 0;
2500 HOST_WIDE_INT value;
2502 value = CONST_DOUBLE_LOW (x);
2503 for (i = 0; i < HOST_BITS_PER_WIDE_INT / HOST_BITS_PER_CHAR;
2508 value = CONST_DOUBLE_HIGH (x);
2509 for (i = 0; i < HOST_BITS_PER_WIDE_INT / HOST_BITS_PER_CHAR;
2512 mask |= (1 << (i + sizeof (int)));
2514 fprintf (file, HOST_WIDE_INT_PRINT_DEC, mask & 0xff);
2517 else if (GET_CODE (x) == CONST_INT)
2519 HOST_WIDE_INT mask = 0, value = INTVAL (x);
2521 for (i = 0; i < 8; i++, value >>= 8)
2525 fprintf (file, HOST_WIDE_INT_PRINT_DEC, mask);
2528 output_operand_lossage ("invalid %%m value");
2532 /* 'b', 'w', 'l', or 'q' as the value of the constant. */
2533 if (GET_CODE (x) != CONST_INT
2534 || (INTVAL (x) != 8 && INTVAL (x) != 16
2535 && INTVAL (x) != 32 && INTVAL (x) != 64))
2536 output_operand_lossage ("invalid %%M value");
2538 fprintf (file, "%s",
2539 (INTVAL (x) == 8 ? "b"
2540 : INTVAL (x) == 16 ? "w"
2541 : INTVAL (x) == 32 ? "l"
2546 /* Similar, except do it from the mask. */
2547 if (GET_CODE (x) == CONST_INT && INTVAL (x) == 0xff)
2548 fprintf (file, "b");
2549 else if (GET_CODE (x) == CONST_INT && INTVAL (x) == 0xffff)
2550 fprintf (file, "w");
2551 else if (GET_CODE (x) == CONST_INT && INTVAL (x) == 0xffffffff)
2552 fprintf (file, "l");
2553 #if HOST_BITS_PER_WIDE_INT == 32
2554 else if (GET_CODE (x) == CONST_DOUBLE
2555 && CONST_DOUBLE_HIGH (x) == 0
2556 && CONST_DOUBLE_LOW (x) == -1)
2557 fprintf (file, "l");
2558 else if (GET_CODE (x) == CONST_DOUBLE
2559 && CONST_DOUBLE_HIGH (x) == -1
2560 && CONST_DOUBLE_LOW (x) == -1)
2561 fprintf (file, "q");
2563 else if (GET_CODE (x) == CONST_INT && INTVAL (x) == 0xffffffffffffffff)
2564 fprintf (file, "q");
2565 else if (GET_CODE (x) == CONST_DOUBLE
2566 && CONST_DOUBLE_HIGH (x) == 0
2567 && CONST_DOUBLE_LOW (x) == -1)
2568 fprintf (file, "q");
2571 output_operand_lossage ("invalid %%U value");
2575 /* Write the constant value divided by 8. */
2576 if (GET_CODE (x) != CONST_INT
2577 && (unsigned HOST_WIDE_INT) INTVAL (x) >= 64
2578 && (INTVAL (x) & 7) != 8)
2579 output_operand_lossage ("invalid %%s value");
2581 fprintf (file, HOST_WIDE_INT_PRINT_DEC, INTVAL (x) / 8);
2585 /* Same, except compute (64 - c) / 8 */
2587 if (GET_CODE (x) != CONST_INT
2588 && (unsigned HOST_WIDE_INT) INTVAL (x) >= 64
2589 && (INTVAL (x) & 7) != 8)
2590 output_operand_lossage ("invalid %%s value");
2592 fprintf (file, HOST_WIDE_INT_PRINT_DEC, (64 - INTVAL (x)) / 8);
2595 case 'C': case 'D': case 'c': case 'd':
2596 /* Write out comparison name. */
2598 enum rtx_code c = GET_CODE (x);
2600 if (GET_RTX_CLASS (c) != '<')
2601 output_operand_lossage ("invalid %%C value");
2604 c = reverse_condition (c);
2605 else if (code == 'c')
2606 c = swap_condition (c);
2607 else if (code == 'd')
2608 c = swap_condition (reverse_condition (c));
2611 fprintf (file, "ule");
2613 fprintf (file, "ult");
2615 fprintf (file, "%s", GET_RTX_NAME (c));
2620 /* Write the divide or modulus operator. */
2621 switch (GET_CODE (x))
2624 fprintf (file, "div%s", GET_MODE (x) == SImode ? "l" : "q");
2627 fprintf (file, "div%su", GET_MODE (x) == SImode ? "l" : "q");
2630 fprintf (file, "rem%s", GET_MODE (x) == SImode ? "l" : "q");
2633 fprintf (file, "rem%su", GET_MODE (x) == SImode ? "l" : "q");
2636 output_operand_lossage ("invalid %%E value");
2642 /* Write "_u" for unaligned access. */
2643 if (GET_CODE (x) == MEM && GET_CODE (XEXP (x, 0)) == AND)
2644 fprintf (file, "_u");
2648 if (GET_CODE (x) == REG)
2649 fprintf (file, "%s", reg_names[REGNO (x)]);
2650 else if (GET_CODE (x) == MEM)
2651 output_address (XEXP (x, 0));
2653 output_addr_const (file, x);
2657 output_operand_lossage ("invalid %%xn code");
2661 /* Do what is necessary for `va_start'. The argument is ignored;
2662 We look at the current function to determine if stdarg or varargs
2663 is used and fill in an initial va_list. A pointer to this constructor
2667 alpha_builtin_saveregs (arglist)
2670 rtx block, addr, dest, argsize;
2671 tree fntype = TREE_TYPE (current_function_decl);
2672 int stdarg = (TYPE_ARG_TYPES (fntype) != 0
2673 && (TREE_VALUE (tree_last (TYPE_ARG_TYPES (fntype)))
2674 != void_type_node));
2676 /* Compute the current position into the args, taking into account
2677 both registers and memory. Both of these are already included in
2680 argsize = GEN_INT (NUM_ARGS * UNITS_PER_WORD);
2682 /* For Unix, SETUP_INCOMING_VARARGS moves the starting address base up by 48,
2683 storing fp arg registers in the first 48 bytes, and the integer arg
2684 registers in the next 48 bytes. This is only done, however, if any
2685 integer registers need to be stored.
2687 If no integer registers need be stored, then we must subtract 48 in
2688 order to account for the integer arg registers which are counted in
2689 argsize above, but which are not actually stored on the stack. */
2691 if (TARGET_OPEN_VMS)
2692 addr = plus_constant (virtual_incoming_args_rtx,
2693 NUM_ARGS <= 5 + stdarg
2694 ? UNITS_PER_WORD : - 6 * UNITS_PER_WORD);
2696 addr = (NUM_ARGS <= 5 + stdarg
2697 ? plus_constant (virtual_incoming_args_rtx,
2699 : plus_constant (virtual_incoming_args_rtx,
2700 - (6 * UNITS_PER_WORD)));
2702 /* For VMS, we include the argsize, while on Unix, it's handled as
2703 a separate field. */
2704 if (TARGET_OPEN_VMS)
2705 addr = plus_constant (addr, INTVAL (argsize));
2707 addr = force_operand (addr, NULL_RTX);
2709 #ifdef POINTERS_EXTEND_UNSIGNED
2710 addr = convert_memory_address (ptr_mode, addr);
2713 if (TARGET_OPEN_VMS)
2717 /* Allocate the va_list constructor */
2718 block = assign_stack_local (BLKmode, 2 * UNITS_PER_WORD, BITS_PER_WORD);
2719 RTX_UNCHANGING_P (block) = 1;
2720 RTX_UNCHANGING_P (XEXP (block, 0)) = 1;
2722 /* Store the address of the first integer register in the __base
2725 dest = change_address (block, ptr_mode, XEXP (block, 0));
2726 emit_move_insn (dest, addr);
2728 if (flag_check_memory_usage)
2729 emit_library_call (chkr_set_right_libfunc, 1, VOIDmode, 3,
2731 GEN_INT (GET_MODE_SIZE (ptr_mode)),
2732 TYPE_MODE (sizetype),
2733 GEN_INT (MEMORY_USE_RW),
2734 TYPE_MODE (integer_type_node));
2736 /* Store the argsize as the __va_offset member. */
2737 dest = change_address (block, TYPE_MODE (integer_type_node),
2738 plus_constant (XEXP (block, 0),
2739 POINTER_SIZE/BITS_PER_UNIT));
2740 emit_move_insn (dest, argsize);
2742 if (flag_check_memory_usage)
2743 emit_library_call (chkr_set_right_libfunc, 1, VOIDmode, 3,
2745 GEN_INT (GET_MODE_SIZE
2746 (TYPE_MODE (integer_type_node))),
2747 TYPE_MODE (sizetype),
2748 GEN_INT (MEMORY_USE_RW),
2749 TYPE_MODE (integer_type_node));
2751 /* Return the address of the va_list constructor, but don't put it in a
2752 register. Doing so would fail when not optimizing and produce worse
2753 code when optimizing. */
2754 return XEXP (block, 0);
2758 /* This page contains routines that are used to determine what the function
2759 prologue and epilogue code will do and write them out. */
2761 /* Compute the size of the save area in the stack. */
2765 /* These variables are used for communication between the following functions.
2766 They indicate various things about the current function being compiled
2767 that are used to tell what kind of prologue, epilogue and procedure
2768 descriptior to generate. */
2770 /* Nonzero if we need a stack procedure. */
2771 static int is_stack_procedure;
2773 /* Register number (either FP or SP) that is used to unwind the frame. */
2774 static int unwind_regno;
2776 /* Register number used to save FP. We need not have one for RA since
2777 we don't modify it for register procedures. This is only defined
2778 for register frame procedures. */
2779 static int save_fp_regno;
2781 /* Register number used to reference objects off our PV. */
2782 static int base_regno;
2784 /* Compute register masks for saved registers. */
2787 alpha_sa_mask (imaskP, fmaskP)
2788 unsigned long *imaskP;
2789 unsigned long *fmaskP;
2791 unsigned long imask = 0;
2792 unsigned long fmask = 0;
2795 if (is_stack_procedure)
2796 imask |= (1L << HARD_FRAME_POINTER_REGNUM);
2798 /* One for every register we have to save. */
2800 for (i = 0; i < FIRST_PSEUDO_REGISTER; i++)
2801 if (! fixed_regs[i] && ! call_used_regs[i]
2802 && regs_ever_live[i] && i != REG_RA)
2807 fmask |= (1L << (i - 32));
2820 HOST_WIDE_INT stack_needed;
2823 /* One for every register we have to save. */
2825 for (i = 0; i < FIRST_PSEUDO_REGISTER; i++)
2826 if (! fixed_regs[i] && ! call_used_regs[i]
2827 && regs_ever_live[i] && i != REG_RA)
2830 /* Start by assuming we can use a register procedure if we don't make any
2831 calls (REG_RA not used) or need to save any registers and a stack
2832 procedure if we do. */
2833 is_stack_procedure = sa_size != 0 || alpha_ra_ever_killed ();
2835 /* Decide whether to refer to objects off our PV via FP or PV.
2836 If we need need FP for something else or if we receive a nonlocal
2837 goto (which expects PV to contain the value), we must use PV.
2838 Otherwise, start by assuming we can use FP. */
2839 base_regno = (frame_pointer_needed || current_function_has_nonlocal_label
2840 || is_stack_procedure
2841 || current_function_outgoing_args_size
2842 ? REG_PV : HARD_FRAME_POINTER_REGNUM);
2844 /* If we want to copy PV into FP, we need to find some register in which to
2849 if (base_regno == HARD_FRAME_POINTER_REGNUM)
2850 for (i = 0; i < 32; i++)
2851 if (! fixed_regs[i] && call_used_regs[i] && ! regs_ever_live[i])
2854 if (save_fp_regno == -1)
2855 base_regno = REG_PV, is_stack_procedure = 1;
2857 /* Stack unwinding should be done via FP unless we use it for PV. */
2859 = base_regno == REG_PV ? HARD_FRAME_POINTER_REGNUM : STACK_POINTER_REGNUM;
2861 /* If this is a stack procedure, allow space for saving FP and RA. */
2862 if (is_stack_procedure)
2869 alpha_pv_save_size ()
2872 return is_stack_procedure ? 8 : 0;
2879 return unwind_regno == HARD_FRAME_POINTER_REGNUM;
2882 #else /* ! OPEN_VMS */
2890 for (i = 0; i < FIRST_PSEUDO_REGISTER; i++)
2891 if (! fixed_regs[i] && ! call_used_regs[i]
2892 && regs_ever_live[i] && i != REG_RA)
2895 /* If some registers were saved but not reg 26, reg 26 must also
2896 be saved, so leave space for it. */
2897 if (size != 0 || alpha_ra_ever_killed ())
2900 /* Our size must be even (multiple of 16 bytes). */
2907 #endif /* ! OPEN_VMS */
2909 /* Return 1 if this function can directly return via $26. */
2914 return (! TARGET_OPEN_VMS && reload_completed && alpha_sa_size () == 0
2915 && get_frame_size () == 0
2916 && current_function_outgoing_args_size == 0
2917 && current_function_pretend_args_size == 0);
2920 /* Write a version stamp. Don't write anything if we are running as a
2921 cross-compiler. Otherwise, use the versions in /usr/include/stamp.h. */
2923 #if !defined(CROSS_COMPILE) && !defined(_WIN32) && !defined(__linux__) && !defined(VMS)
2928 alpha_write_verstamp (file)
2932 fprintf (file, "\t.verstamp %d %d\n", MS_STAMP, LS_STAMP);
2936 /* Write code to add constant C to register number IN_REG (possibly 31)
2937 and put the result into OUT_REG. Use TEMP_REG as a scratch register;
2938 usually this will be OUT_REG, but should not be if OUT_REG is
2939 STACK_POINTER_REGNUM, since it must be updated in a single instruction.
2940 Write the code to FILE. */
2943 add_long_const (file, c, in_reg, out_reg, temp_reg)
2946 int in_reg, out_reg, temp_reg;
2948 HOST_WIDE_INT low = (c & 0xffff) - 2 * (c & 0x8000);
2949 HOST_WIDE_INT tmp1 = c - low;
2950 HOST_WIDE_INT high = ((tmp1 >> 16) & 0xffff) - 2 * ((tmp1 >> 16) & 0x8000);
2951 HOST_WIDE_INT extra = 0;
2953 /* We don't have code to write out constants larger than 32 bits. */
2954 #if HOST_BITS_PER_LONG_INT == 64
2955 if ((unsigned HOST_WIDE_INT) c >> 32 != 0)
2959 /* If HIGH will be interpreted as negative, we must adjust it to do two
2960 ldha insns. Note that we will never be building a negative constant
2967 high = ((tmp1 >> 16) & 0xffff) - 2 * ((tmp1 >> 16) & 0x8000);
2972 int result_reg = (extra == 0 && high == 0) ? out_reg : temp_reg;
2974 if (low >= 0 && low < 255)
2975 fprintf (file, "\taddq $%d,%d,$%d\n", in_reg, low, result_reg);
2977 fprintf (file, "\tlda $%d,%d($%d)\n", result_reg, low, in_reg);
2979 in_reg = result_reg;
2984 int result_reg = (high == 0) ? out_reg : temp_reg;
2986 fprintf (file, "\tldah $%d,%d($%d)\n", result_reg, extra, in_reg);
2987 in_reg = result_reg;
2991 fprintf (file, "\tldah $%d,%d($%d)\n", out_reg, high, in_reg);
2994 /* Write function prologue. */
2998 /* On vms we have two kinds of functions:
3000 - stack frame (PROC_STACK)
3001 these are 'normal' functions with local vars and which are
3002 calling other functions
3003 - register frame (PROC_REGISTER)
3004 keeps all data in registers, needs no stack
3006 We must pass this to the assembler so it can generate the
3007 proper pdsc (procedure descriptor)
3008 This is done with the '.pdesc' command.
3010 size is the stack size needed for local variables. */
3013 output_prolog (file, size)
3017 unsigned long imask = 0;
3018 unsigned long fmask = 0;
3019 /* Stack space needed for pushing registers clobbered by us. */
3020 HOST_WIDE_INT sa_size;
3021 /* Complete stack size needed. */
3022 HOST_WIDE_INT frame_size;
3023 /* Offset from base reg to register save area. */
3025 /* Offset during register save. */
3027 /* Label for the procedure entry. */
3028 char *entry_label = (char *) alloca (strlen (alpha_function_name) + 6);
3031 sa_size = alpha_sa_size ();
3033 = ALPHA_ROUND (sa_size
3034 + (is_stack_procedure ? 8 : 0)
3035 + size + current_function_pretend_args_size);
3037 /* Issue function start and label. */
3038 fprintf (file, "\t.ent ");
3039 assemble_name (file, alpha_function_name);
3040 fprintf (file, "\n");
3041 sprintf (entry_label, "%s..en", alpha_function_name);
3042 ASM_OUTPUT_LABEL (file, entry_label);
3043 inside_function = TRUE;
3045 fprintf (file, "\t.base $%d\n", base_regno);
3047 /* Calculate register masks for clobbered registers. */
3049 if (is_stack_procedure)
3050 alpha_sa_mask (&imask, &fmask);
3052 /* Adjust the stack by the frame size. If the frame size is > 4096
3053 bytes, we need to be sure we probe somewhere in the first and last
3054 4096 bytes (we can probably get away without the latter test) and
3055 every 8192 bytes in between. If the frame size is > 32768, we
3056 do this in a loop. Otherwise, we generate the explicit probe
3059 Note that we are only allowed to adjust sp once in the prologue. */
3061 if (frame_size < 32768)
3063 if (frame_size > 4096)
3067 fprintf (file, "\tstq $31,-%d($30)\n", probed);
3069 while (probed + 8192 < frame_size)
3070 fprintf (file, "\tstq $31,-%d($30)\n", probed += 8192);
3072 /* We only have to do this probe if we aren't saving registers. */
3073 if (sa_size == 0 && probed + 4096 < frame_size)
3074 fprintf (file, "\tstq $31,-%d($30)\n", frame_size);
3077 if (frame_size != 0)
3078 fprintf (file, "\tlda $30,-%d($30)\n", frame_size);
3082 /* Here we generate code to set R4 to SP + 4096 and set R23 to the
3083 number of 8192 byte blocks to probe. We then probe each block
3084 in the loop and then set SP to the proper location. If the
3085 amount remaining is > 4096, we have to do one more probe if we
3086 are not saving any registers. */
3088 HOST_WIDE_INT blocks = (frame_size + 4096) / 8192;
3089 HOST_WIDE_INT leftover = frame_size + 4096 - blocks * 8192;
3091 add_long_const (file, blocks, 31, 23, 23);
3093 fprintf (file, "\tlda $22,4096($30)\n");
3096 assemble_name (file, alpha_function_name);
3097 fprintf (file, "..sc:\n");
3099 fprintf (file, "\tstq $31,-8192($22)\n");
3100 fprintf (file, "\tsubq $23,1,$23\n");
3101 fprintf (file, "\tlda $22,-8192($22)\n");
3103 fprintf (file, "\tbne $23,$");
3104 assemble_name (file, alpha_function_name);
3105 fprintf (file, "..sc\n");
3107 if (leftover > 4096 && sa_size == 0)
3108 fprintf (file, "\tstq $31,-%d($22)\n", leftover);
3110 fprintf (file, "\tlda $30,-%d($22)\n", leftover);
3113 if (is_stack_procedure)
3115 int reg_offset = rsa_offset;
3117 /* Store R26 (RA) first. */
3118 fprintf (file, "\tstq $26,%d($30)\n", reg_offset);
3121 /* Store integer regs. according to mask. */
3122 for (i = 0; i < 32; i++)
3123 if (imask & (1L<<i))
3125 fprintf (file, "\tstq $%d,%d($30)\n", i, reg_offset);
3129 /* Print the register mask and do floating-point saves. */
3132 fprintf (file, "\t.mask 0x%x,0\n", imask);
3134 for (i = 0; i < 32; i++)
3136 if (fmask & (1L << i))
3138 fprintf (file, "\tstt $f%d,%d($30)\n", i, reg_offset);
3143 /* Print the floating-point mask, if we've saved any fp register. */
3145 fprintf (file, "\t.fmask 0x%x,0\n", fmask);
3147 fprintf (file, "\tstq $27,0($30)\n");
3151 fprintf (file, "\t.fp_save $%d\n", save_fp_regno);
3152 fprintf (file, "\tbis $%d,$%d,$%d\n", HARD_FRAME_POINTER_REGNUM,
3153 HARD_FRAME_POINTER_REGNUM, save_fp_regno);
3156 if (base_regno != REG_PV)
3157 fprintf (file, "\tbis $%d,$%d,$%d\n", REG_PV, REG_PV, base_regno);
3159 if (unwind_regno == HARD_FRAME_POINTER_REGNUM)
3160 fprintf (file, "\tbis $%d,$%d,$%d\n", STACK_POINTER_REGNUM,
3161 STACK_POINTER_REGNUM, HARD_FRAME_POINTER_REGNUM);
3163 /* Describe our frame. */
3164 fprintf (file, "\t.frame $%d,", unwind_regno);
3166 /* If the frame size is larger than an integer, print it as zero to
3167 avoid an assembler error. We won't be properly describing such a
3168 frame, but that's the best we can do. */
3169 fprintf (file, HOST_WIDE_INT_PRINT_DEC,
3170 #if HOST_BITS_PER_WIDE_INT == 64
3171 frame_size >= (1l << 31) ? 0:
3175 fprintf (file, ",$26,%d\n", rsa_offset);
3177 /* If we have to allocate space for outgoing args, do it now. */
3178 if (current_function_outgoing_args_size != 0)
3179 fprintf (file, "\tlda $%d,%d($%d)\n", STACK_POINTER_REGNUM,
3180 - ALPHA_ROUND (current_function_outgoing_args_size),
3181 HARD_FRAME_POINTER_REGNUM);
3183 fprintf (file, "\t.prologue\n");
3185 readonly_section ();
3186 fprintf (file, "\t.align 3\n");
3187 assemble_name (file, alpha_function_name); fputs ("..na:\n", file);
3188 fputs ("\t.ascii \"", file);
3189 assemble_name (file, alpha_function_name);
3190 fputs ("\\0\"\n", file);
3193 fprintf (file, "\t.align 3\n");
3194 fputs ("\t.name ", file);
3195 assemble_name (file, alpha_function_name);
3196 fputs ("..na\n", file);
3197 ASM_OUTPUT_LABEL (file, alpha_function_name);
3198 fprintf (file, "\t.pdesc ");
3199 assemble_name (file, alpha_function_name);
3200 fprintf (file, "..en,%s\n", is_stack_procedure ? "stack" : "reg");
3201 alpha_need_linkage (alpha_function_name, 1);
3207 /* Write function epilogue. */
3210 output_epilog (file, size)
3214 unsigned long imask = 0;
3215 unsigned long fmask = 0;
3216 /* Stack space needed for pushing registers clobbered by us. */
3217 HOST_WIDE_INT sa_size = alpha_sa_size ();
3218 /* Complete stack size needed. */
3219 HOST_WIDE_INT frame_size
3220 = ALPHA_ROUND (sa_size
3221 + (is_stack_procedure ? 8 : 0)
3222 + size + current_function_pretend_args_size);
3224 rtx insn = get_last_insn ();
3226 /* If the last insn was a BARRIER, we don't have to write anything except
3227 the .end pseudo-op. */
3229 if (GET_CODE (insn) == NOTE)
3230 insn = prev_nonnote_insn (insn);
3232 if (insn == 0 || GET_CODE (insn) != BARRIER)
3234 /* Restore clobbered registers, load FP last. */
3236 if (is_stack_procedure)
3242 if (unwind_regno == HARD_FRAME_POINTER_REGNUM)
3243 fprintf (file, "\tbis $%d,$%d,$%d\n", HARD_FRAME_POINTER_REGNUM,
3244 HARD_FRAME_POINTER_REGNUM, STACK_POINTER_REGNUM);
3246 alpha_sa_mask (&imask, &fmask);
3248 /* Start reloading registers after RA. */
3249 reg_offset = rsa_offset + 8;
3251 for (i = 0; i < 32; i++)
3252 if (imask & (1L<<i))
3254 if (i == HARD_FRAME_POINTER_REGNUM)
3255 fp_offset = reg_offset;
3257 fprintf (file, "\tldq $%d,%d($30)\n",
3262 for (i = 0; i < 32; i++)
3263 if (fmask & (1L << i))
3265 fprintf (file, "\tldt $f%d,%d($30)\n", i, reg_offset);
3269 /* Restore R26 (RA). */
3270 fprintf (file, "\tldq $26,%d($30)\n", rsa_offset);
3272 /* Restore R29 (FP). */
3273 fprintf (file, "\tldq $29,%d($30)\n", fp_offset);
3276 fprintf (file, "\tbis $%d,$%d,$%d\n", save_fp_regno, save_fp_regno,
3277 HARD_FRAME_POINTER_REGNUM);
3279 if (frame_size != 0)
3281 if (frame_size < 32768)
3282 fprintf (file, "\tlda $30,%d($30)\n", frame_size);
3285 long high = frame_size >> 16;
3286 long low = frame_size & 0xffff;
3290 low = -32768 + (low & 0x7fff);
3292 fprintf (file, "\tldah $2,%ld($31)\n", high);
3293 fprintf (file, "\tlda $2,%ld($2)\n", low);
3294 fprintf (file, "\taddq $30,$2,$30\n");
3298 /* Finally return to the caller. */
3299 fprintf (file, "\tret $31,($26),1\n");
3302 /* End the function. */
3303 fprintf (file, "\t.end ");
3304 assemble_name (file, alpha_function_name);
3305 fprintf (file, "\n");
3306 inside_function = FALSE;
3308 /* Show that we know this function if it is called again. */
3309 SYMBOL_REF_FLAG (XEXP (DECL_RTL (current_function_decl), 0)) = 1;
3313 vms_valid_decl_attribute_p (decl, attributes, identifier, args)
3319 if (is_attribute_p ("overlaid", identifier))
3320 return (args == NULL_TREE);
3324 #else /* !OPEN_VMS */
3327 alpha_does_function_need_gp ()
3331 /* We never need a GP for Windows/NT. */
3332 if (TARGET_WINDOWS_NT)
3335 #ifdef TARGET_PROFILING_NEEDS_GP
3340 /* If we need a GP (we have a LDSYM insn or a CALL_INSN), load it first.
3341 Even if we are a static function, we still need to do this in case
3342 our address is taken and passed to something like qsort. */
3344 for (insn = get_insns (); insn; insn = NEXT_INSN (insn))
3345 if (GET_RTX_CLASS (GET_CODE (insn)) == 'i'
3346 && GET_CODE (PATTERN (insn)) != USE
3347 && GET_CODE (PATTERN (insn)) != CLOBBER)
3349 enum attr_type type = get_attr_type (insn);
3350 if (type == TYPE_LDSYM || type == TYPE_JSR)
3358 output_prolog (file, size)
3362 HOST_WIDE_INT out_args_size
3363 = ALPHA_ROUND (current_function_outgoing_args_size);
3364 HOST_WIDE_INT sa_size = alpha_sa_size ();
3365 HOST_WIDE_INT frame_size
3366 = (out_args_size + sa_size
3367 + ALPHA_ROUND (size + current_function_pretend_args_size));
3368 HOST_WIDE_INT reg_offset = out_args_size;
3369 HOST_WIDE_INT start_reg_offset = reg_offset;
3370 HOST_WIDE_INT actual_start_reg_offset = start_reg_offset;
3371 int int_reg_save_area_size = 0;
3372 unsigned reg_mask = 0;
3375 /* Ecoff can handle multiple .file directives, so put out file and lineno.
3376 We have to do that before the .ent directive as we cannot switch
3377 files within procedures with native ecoff because line numbers are
3378 linked to procedure descriptors.
3379 Outputting the lineno helps debugging of one line functions as they
3380 would otherwise get no line number at all. Please note that we would
3381 like to put out last_linenum from final.c, but it is not accessible. */
3383 if (write_symbols == SDB_DEBUG)
3385 ASM_OUTPUT_SOURCE_FILENAME (file,
3386 DECL_SOURCE_FILE (current_function_decl));
3387 if (debug_info_level != DINFO_LEVEL_TERSE)
3388 ASM_OUTPUT_SOURCE_LINE (file,
3389 DECL_SOURCE_LINE (current_function_decl));
3392 /* The assembly language programmer's guide states that the second argument
3393 to the .ent directive, the lex_level, is ignored by the assembler,
3394 so we might as well omit it. */
3396 if (!flag_inhibit_size_directive)
3398 fprintf (file, "\t.ent ");
3399 assemble_name (file, alpha_function_name);
3400 fprintf (file, "\n");
3402 ASM_OUTPUT_LABEL (file, alpha_function_name);
3403 inside_function = TRUE;
3405 if (TARGET_IEEE_CONFORMANT && !flag_inhibit_size_directive)
3406 /* Set flags in procedure descriptor to request IEEE-conformant
3407 math-library routines. The value we set it to is PDSC_EXC_IEEE
3408 (/usr/include/pdsc.h). */
3409 fprintf (file, "\t.eflag 48\n");
3411 /* Set up offsets to alpha virtual arg/local debugging pointer. */
3413 alpha_auto_offset = -frame_size + current_function_pretend_args_size;
3414 alpha_arg_offset = -frame_size + 48;
3416 alpha_function_needs_gp = alpha_does_function_need_gp ();
3418 if (TARGET_WINDOWS_NT == 0)
3420 if (alpha_function_needs_gp)
3421 fprintf (file, "\tldgp $29,0($27)\n");
3423 /* Put a label after the GP load so we can enter the function at it. */
3425 assemble_name (file, alpha_function_name);
3426 fprintf (file, "..ng:\n");
3429 /* Adjust the stack by the frame size. If the frame size is > 4096
3430 bytes, we need to be sure we probe somewhere in the first and last
3431 4096 bytes (we can probably get away without the latter test) and
3432 every 8192 bytes in between. If the frame size is > 32768, we
3433 do this in a loop. Otherwise, we generate the explicit probe
3436 Note that we are only allowed to adjust sp once in the prologue. */
3438 if (frame_size < 32768)
3440 if (frame_size > 4096)
3444 fprintf (file, "\tstq $31,-%d($30)\n", probed);
3446 while (probed + 8192 < frame_size)
3447 fprintf (file, "\tstq $31,-%d($30)\n", probed += 8192);
3449 /* We only have to do this probe if we aren't saving registers. */
3450 if (sa_size == 0 && probed + 4096 < frame_size)
3451 fprintf (file, "\tstq $31,-%d($30)\n", frame_size);
3454 if (frame_size != 0)
3455 fprintf (file, "\tlda $30,-%d($30)\n", frame_size);
3459 /* Here we generate code to set R4 to SP + 4096 and set R5 to the
3460 number of 8192 byte blocks to probe. We then probe each block
3461 in the loop and then set SP to the proper location. If the
3462 amount remaining is > 4096, we have to do one more probe if we
3463 are not saving any registers. */
3465 HOST_WIDE_INT blocks = (frame_size + 4096) / 8192;
3466 HOST_WIDE_INT leftover = frame_size + 4096 - blocks * 8192;
3468 add_long_const (file, blocks, 31, 5, 5);
3470 fprintf (file, "\tlda $4,4096($30)\n");
3473 assemble_name (file, alpha_function_name);
3474 fprintf (file, "..sc:\n");
3476 fprintf (file, "\tstq $31,-8192($4)\n");
3477 fprintf (file, "\tsubq $5,1,$5\n");
3478 fprintf (file, "\tlda $4,-8192($4)\n");
3480 fprintf (file, "\tbne $5,$");
3481 assemble_name (file, alpha_function_name);
3482 fprintf (file, "..sc\n");
3484 if (leftover > 4096 && sa_size == 0)
3485 fprintf (file, "\tstq $31,-%d($4)\n", leftover);
3487 fprintf (file, "\tlda $30,-%d($4)\n", leftover);
3490 /* Describe our frame. */
3491 if (!flag_inhibit_size_directive)
3493 fprintf (file, "\t.frame $%d,",
3494 (frame_pointer_needed
3495 ? HARD_FRAME_POINTER_REGNUM : STACK_POINTER_REGNUM));
3497 /* If the frame size is larger than an integer, print it as zero to
3498 avoid an assembler error. We won't be properly describing such a
3499 frame, but that's the best we can do. */
3500 fprintf (file, HOST_WIDE_INT_PRINT_DEC,
3501 #if HOST_BITS_PER_WIDE_INT == 64
3502 frame_size >= (1l << 31) ? 0 :
3506 fprintf (file, ",$26,%d\n", current_function_pretend_args_size);
3509 /* Cope with very large offsets to the register save area. */
3511 if (reg_offset + sa_size > 0x8000)
3513 int low = ((reg_offset & 0xffff) ^ 0x8000) - 0x8000;
3514 if (low + sa_size <= 0x8000)
3516 add_long_const (file, reg_offset - low, 30, 24, 24);
3521 add_long_const (file, reg_offset, 30, 24, 24);
3527 /* Save register RA if any other register needs to be saved. */
3530 reg_mask |= 1 << REG_RA;
3531 fprintf (file, "\tstq $26,%d($%d)\n", reg_offset, sa_reg);
3533 int_reg_save_area_size += 8;
3536 /* Now save any other used integer registers required to be saved. */
3537 for (i = 0; i < 32; i++)
3538 if (! fixed_regs[i] && ! call_used_regs[i]
3539 && regs_ever_live[i] && i != REG_RA)
3542 fprintf (file, "\tstq $%d,%d($%d)\n", i, reg_offset, sa_reg);
3544 int_reg_save_area_size += 8;
3547 /* Print the register mask and do floating-point saves. */
3548 if (reg_mask && !flag_inhibit_size_directive)
3550 fprintf (file, "\t.mask 0x%x,", reg_mask);
3551 fprintf (file, HOST_WIDE_INT_PRINT_DEC,
3552 #if HOST_BITS_PER_WIDE_INT == 64
3553 frame_size >= (1l << 31) ? 0 :
3555 actual_start_reg_offset - frame_size);
3556 fprintf (file, "\n");
3559 start_reg_offset = reg_offset;
3562 for (i = 0; i < 32; i++)
3563 if (! fixed_regs[i + 32] && ! call_used_regs[i + 32]
3564 && regs_ever_live[i + 32])
3567 fprintf (file, "\tstt $f%d,%d($%d)\n", i, reg_offset, sa_reg);
3571 /* Print the floating-point mask, if we've saved any fp register. */
3572 if (reg_mask && !flag_inhibit_size_directive)
3573 fprintf (file, "\t.fmask 0x%x,%d\n", reg_mask,
3574 actual_start_reg_offset - frame_size + int_reg_save_area_size);
3576 /* If we need a frame pointer, set it from the stack pointer. Note that
3577 this must always be the last instruction in the prologue. */
3578 if (frame_pointer_needed)
3579 fprintf (file, "\tbis $30,$30,$15\n");
3581 /* End the prologue and say if we used gp. */
3582 if (!flag_inhibit_size_directive)
3583 fprintf (file, "\t.prologue %d\n", alpha_function_needs_gp);
3586 /* Write function epilogue. */
3589 output_epilog (file, size)
3593 rtx insn = get_last_insn ();
3594 HOST_WIDE_INT out_args_size
3595 = ALPHA_ROUND (current_function_outgoing_args_size);
3596 HOST_WIDE_INT sa_size = alpha_sa_size ();
3597 HOST_WIDE_INT frame_size
3598 = (out_args_size + sa_size
3599 + ALPHA_ROUND (size + current_function_pretend_args_size));
3600 HOST_WIDE_INT reg_offset = out_args_size;
3602 = frame_pointer_needed && regs_ever_live[HARD_FRAME_POINTER_REGNUM];
3605 /* If the last insn was a BARRIER, we don't have to write anything except
3606 the .end pseudo-op. */
3607 if (GET_CODE (insn) == NOTE)
3608 insn = prev_nonnote_insn (insn);
3609 if (insn == 0 || GET_CODE (insn) != BARRIER)
3614 /* If we have a frame pointer, restore SP from it. */
3615 if (frame_pointer_needed)
3616 fprintf (file, "\tbis $15,$15,$30\n");
3618 /* Cope with large offsets to the register save area. */
3620 if (reg_offset + sa_size > 0x8000)
3622 int low = ((reg_offset & 0xffff) ^ 0x8000) - 0x8000;
3623 if (low + sa_size <= 0x8000)
3625 add_long_const (file, reg_offset - low, 30, 24, 24);
3630 add_long_const (file, reg_offset, 30, 24, 24);
3636 /* Restore all the registers, starting with the return address
3640 fprintf (file, "\tldq $26,%d($%d)\n", reg_offset, sa_reg);
3644 /* Now restore any other used integer registers that that we saved,
3645 except for FP if it is being used as FP, since it must be
3648 for (i = 0; i < 32; i++)
3649 if (! fixed_regs[i] && ! call_used_regs[i] && regs_ever_live[i]
3652 if (i == HARD_FRAME_POINTER_REGNUM && frame_pointer_needed)
3653 fp_offset = reg_offset;
3655 fprintf (file, "\tldq $%d,%d($%d)\n", i, reg_offset, sa_reg);
3659 for (i = 0; i < 32; i++)
3660 if (! fixed_regs[i + 32] && ! call_used_regs[i + 32]
3661 && regs_ever_live[i + 32])
3663 fprintf (file, "\tldt $f%d,%d($%d)\n", i, reg_offset, sa_reg);
3667 /* If the stack size is large and we have a frame pointer, compute the
3668 size of the stack into a register because the old FP restore, stack
3669 pointer adjust, and return are required to be consecutive
3671 if (frame_size > 32767 && restore_fp)
3672 add_long_const (file, frame_size, 31, 1, 1);
3674 /* If we needed a frame pointer and we have to restore it, do it
3675 now. This must be done in one instruction immediately
3676 before the SP update. */
3677 if (restore_fp && fp_offset)
3678 fprintf (file, "\tldq $15,%d($%d)\n", fp_offset, sa_reg);
3680 /* Now update the stack pointer, if needed. Only one instruction must
3681 modify the stack pointer. It must be the last instruction in the
3682 sequence and must be an ADDQ or LDA instruction. If the frame
3683 pointer was loaded above, we may only put one instruction here. */
3685 if (frame_size > 32768 && restore_fp)
3686 fprintf (file, "\taddq $1,$30,$30\n");
3688 add_long_const (file, frame_size, 30, 30, 1);
3690 /* Finally return to the caller. */
3691 fprintf (file, "\tret $31,($26),1\n");
3694 /* End the function. */
3695 if (!flag_inhibit_size_directive)
3697 fprintf (file, "\t.end ");
3698 assemble_name (file, alpha_function_name);
3699 fprintf (file, "\n");
3701 inside_function = FALSE;
3703 /* Show that we know this function if it is called again.
3705 Don't do this for global functions in object files destined for a
3706 shared library because the function may be overridden by the application
3708 ??? Is this just ELF? */
3710 if (!flag_pic || !TREE_PUBLIC (current_function_decl))
3711 SYMBOL_REF_FLAG (XEXP (DECL_RTL (current_function_decl), 0)) = 1;
3713 #endif /* !OPEN_VMS */
3715 /* Debugging support. */
3719 /* Count the number of sdb related labels are generated (to find block
3720 start and end boundaries). */
3722 int sdb_label_count = 0;
3724 /* Next label # for each statement. */
3726 static int sym_lineno = 0;
3728 /* Count the number of .file directives, so that .loc is up to date. */
3730 static int num_source_filenames = 0;
3732 /* Name of the file containing the current function. */
3734 static char *current_function_file = "";
3736 /* Offsets to alpha virtual arg/local debugging pointers. */
3738 long alpha_arg_offset;
3739 long alpha_auto_offset;
3741 /* Emit a new filename to a stream. */
3744 alpha_output_filename (stream, name)
3748 static int first_time = TRUE;
3749 char ltext_label_name[100];
3754 ++num_source_filenames;
3755 current_function_file = name;
3756 fprintf (stream, "\t.file\t%d ", num_source_filenames);
3757 output_quoted_string (stream, name);
3758 fprintf (stream, "\n");
3759 if (!TARGET_GAS && write_symbols == DBX_DEBUG)
3760 fprintf (stream, "\t#@stabs\n");
3763 else if (write_symbols == DBX_DEBUG)
3765 ASM_GENERATE_INTERNAL_LABEL (ltext_label_name, "Ltext", 0);
3766 fprintf (stream, "%s ", ASM_STABS_OP);
3767 output_quoted_string (stream, name);
3768 fprintf (stream, ",%d,0,0,%s\n", N_SOL, <ext_label_name[1]);
3771 else if (name != current_function_file
3772 && strcmp (name, current_function_file) != 0)
3774 if (inside_function && ! TARGET_GAS)
3775 fprintf (stream, "\t#.file\t%d ", num_source_filenames);
3778 ++num_source_filenames;
3779 current_function_file = name;
3780 fprintf (stream, "\t.file\t%d ", num_source_filenames);
3783 output_quoted_string (stream, name);
3784 fprintf (stream, "\n");
3788 /* Emit a linenumber to a stream. */
3791 alpha_output_lineno (stream, line)
3795 if (write_symbols == DBX_DEBUG)
3797 /* mips-tfile doesn't understand .stabd directives. */
3799 fprintf (stream, "$LM%d:\n\t%s %d,0,%d,$LM%d\n",
3800 sym_lineno, ASM_STABN_OP, N_SLINE, line, sym_lineno);
3803 fprintf (stream, "\n\t.loc\t%d %d\n", num_source_filenames, line);
3806 /* Structure to show the current status of registers and memory. */
3808 struct shadow_summary
3811 unsigned long i : 31; /* Mask of int regs */
3812 unsigned long fp : 31; /* Mask of fp regs */
3813 unsigned long mem : 1; /* mem == imem | fpmem */
3817 /* Summary the effects of expression X on the machine. Update SUM, a pointer
3818 to the summary structure. SET is nonzero if the insn is setting the
3819 object, otherwise zero. */
3822 summarize_insn (x, sum, set)
3824 struct shadow_summary *sum;
3833 switch (GET_CODE (x))
3835 /* ??? Note that this case would be incorrect if the Alpha had a
3836 ZERO_EXTRACT in SET_DEST. */
3838 summarize_insn (SET_SRC (x), sum, 0);
3839 summarize_insn (SET_DEST (x), sum, 1);
3843 summarize_insn (XEXP (x, 0), sum, 1);
3847 summarize_insn (XEXP (x, 0), sum, 0);
3851 for (i = ASM_OPERANDS_INPUT_LENGTH (x) - 1; i >= 0; i--)
3852 summarize_insn (ASM_OPERANDS_INPUT (x, i), sum, 0);
3856 for (i = XVECLEN (x, 0) - 1; i >= 0; i--)
3857 summarize_insn (XVECEXP (x, 0, i), sum, 0);
3866 int regno = REGNO (x);
3867 unsigned long mask = 1UL << (regno % 32);
3869 if (regno == 31 || regno == 63)
3875 sum->defd.i |= mask;
3877 sum->defd.fp |= mask;
3882 sum->used.i |= mask;
3884 sum->used.fp |= mask;
3895 /* Find the regs used in memory address computation: */
3896 summarize_insn (XEXP (x, 0), sum, 0);
3899 case CONST_INT: case CONST_DOUBLE:
3900 case SYMBOL_REF: case LABEL_REF: case CONST:
3903 /* Handle common unary and binary ops for efficiency. */
3904 case COMPARE: case PLUS: case MINUS: case MULT: case DIV:
3905 case MOD: case UDIV: case UMOD: case AND: case IOR:
3906 case XOR: case ASHIFT: case ROTATE: case ASHIFTRT: case LSHIFTRT:
3907 case ROTATERT: case SMIN: case SMAX: case UMIN: case UMAX:
3908 case NE: case EQ: case GE: case GT: case LE:
3909 case LT: case GEU: case GTU: case LEU: case LTU:
3910 summarize_insn (XEXP (x, 0), sum, 0);
3911 summarize_insn (XEXP (x, 1), sum, 0);
3914 case NEG: case NOT: case SIGN_EXTEND: case ZERO_EXTEND:
3915 case TRUNCATE: case FLOAT_EXTEND: case FLOAT_TRUNCATE: case FLOAT:
3916 case FIX: case UNSIGNED_FLOAT: case UNSIGNED_FIX: case ABS:
3917 case SQRT: case FFS:
3918 summarize_insn (XEXP (x, 0), sum, 0);
3922 format_ptr = GET_RTX_FORMAT (GET_CODE (x));
3923 for (i = GET_RTX_LENGTH (GET_CODE (x)) - 1; i >= 0; i--)
3924 switch (format_ptr[i])
3927 summarize_insn (XEXP (x, i), sum, 0);
3931 for (j = XVECLEN (x, i) - 1; j >= 0; j--)
3932 summarize_insn (XVECEXP (x, i, j), sum, 0);
3941 /* Ensure a sufficient number of `trapb' insns are in the code when the user
3942 requests code with a trap precision of functions or instructions.
3944 In naive mode, when the user requests a trap-precision of "instruction", a
3945 trapb is needed after every instruction that may generate a trap (and after
3946 jsr/bsr instructions, because called functions may import a trap from the
3947 caller). This ensures that the code is resumption safe but it is also slow.
3949 When optimizations are turned on, we delay issuing a trapb as long as
3950 possible. In this context, a trap shadow is the sequence of instructions
3951 that starts with a (potentially) trap generating instruction and extends to
3952 the next trapb or call_pal instruction (but GCC never generates call_pal by
3953 itself). We can delay (and therefore sometimes omit) a trapb subject to the
3954 following conditions:
3956 (a) On entry to the trap shadow, if any Alpha register or memory location
3957 contains a value that is used as an operand value by some instruction in
3958 the trap shadow (live on entry), then no instruction in the trap shadow
3959 may modify the register or memory location.
3961 (b) Within the trap shadow, the computation of the base register for a
3962 memory load or store instruction may not involve using the result
3963 of an instruction that might generate an UNPREDICTABLE result.
3965 (c) Within the trap shadow, no register may be used more than once as a
3966 destination register. (This is to make life easier for the trap-handler.)
3968 (d) The trap shadow may not include any branch instructions. */
3971 alpha_handle_trap_shadows (insns)
3974 struct shadow_summary shadow;
3975 int trap_pending, exception_nesting;
3978 if (alpha_tp == ALPHA_TP_PROG && !flag_exceptions)
3982 exception_nesting = 0;
3985 shadow.used.mem = 0;
3986 shadow.defd = shadow.used;
3988 for (i = insns; i ; i = NEXT_INSN (i))
3990 if (GET_CODE (i) == NOTE)
3992 switch (NOTE_LINE_NUMBER (i))
3994 case NOTE_INSN_EH_REGION_BEG:
3995 exception_nesting++;
4000 case NOTE_INSN_EH_REGION_END:
4001 exception_nesting--;
4006 case NOTE_INSN_EPILOGUE_BEG:
4007 if (trap_pending && alpha_tp >= ALPHA_TP_FUNC)
4012 else if (trap_pending)
4014 if (alpha_tp == ALPHA_TP_FUNC)
4016 if (GET_CODE (i) == JUMP_INSN
4017 && GET_CODE (PATTERN (i)) == RETURN)
4020 else if (alpha_tp == ALPHA_TP_INSN)
4024 struct shadow_summary sum;
4029 sum.defd = sum.used;
4031 switch (GET_CODE (i))
4034 /* Annoyingly, get_attr_trap will abort on these. */
4035 if (GET_CODE (PATTERN (i)) == USE
4036 || GET_CODE (PATTERN (i)) == CLOBBER)
4039 summarize_insn (PATTERN (i), &sum, 0);
4041 if ((sum.defd.i & shadow.defd.i)
4042 || (sum.defd.fp & shadow.defd.fp))
4044 /* (c) would be violated */
4048 /* Combine shadow with summary of current insn: */
4049 shadow.used.i |= sum.used.i;
4050 shadow.used.fp |= sum.used.fp;
4051 shadow.used.mem |= sum.used.mem;
4052 shadow.defd.i |= sum.defd.i;
4053 shadow.defd.fp |= sum.defd.fp;
4054 shadow.defd.mem |= sum.defd.mem;
4056 if ((sum.defd.i & shadow.used.i)
4057 || (sum.defd.fp & shadow.used.fp)
4058 || (sum.defd.mem & shadow.used.mem))
4060 /* (a) would be violated (also takes care of (b)) */
4061 if (get_attr_trap (i) == TRAP_YES
4062 && ((sum.defd.i & sum.used.i)
4063 || (sum.defd.fp & sum.used.fp)))
4082 emit_insn_before (gen_trapb (), i);
4086 shadow.used.mem = 0;
4087 shadow.defd = shadow.used;
4092 if ((exception_nesting > 0 || alpha_tp >= ALPHA_TP_FUNC)
4093 && GET_CODE (i) == INSN
4094 && GET_CODE (PATTERN (i)) != USE
4095 && GET_CODE (PATTERN (i)) != CLOBBER
4096 && get_attr_trap (i) == TRAP_YES)
4098 if (optimize && !trap_pending)
4099 summarize_insn (PATTERN (i), &shadow, 0);
4105 /* Machine dependant reorg pass. */
4111 alpha_handle_trap_shadows (insns);
4115 /* Check a floating-point value for validity for a particular machine mode. */
4117 static char *float_strings[] =
4119 /* These are for FLOAT_VAX. */
4120 "1.70141173319264430e+38", /* 2^127 (2^24 - 1) / 2^24 */
4121 "-1.70141173319264430e+38",
4122 "2.93873587705571877e-39", /* 2^-128 */
4123 "-2.93873587705571877e-39",
4124 /* These are for the default broken IEEE mode, which traps
4125 on infinity or denormal numbers. */
4126 "3.402823466385288598117e+38", /* 2^128 (1 - 2^-24) */
4127 "-3.402823466385288598117e+38",
4128 "1.1754943508222875079687e-38", /* 2^-126 */
4129 "-1.1754943508222875079687e-38",
4132 static REAL_VALUE_TYPE float_values[8];
4133 static int inited_float_values = 0;
4136 check_float_value (mode, d, overflow)
4137 enum machine_mode mode;
4142 if (TARGET_IEEE || TARGET_IEEE_CONFORMANT || TARGET_IEEE_WITH_INEXACT)
4145 if (inited_float_values == 0)
4148 for (i = 0; i < 8; i++)
4149 float_values[i] = REAL_VALUE_ATOF (float_strings[i], DFmode);
4151 inited_float_values = 1;
4157 REAL_VALUE_TYPE *fvptr;
4159 if (TARGET_FLOAT_VAX)
4160 fvptr = &float_values[0];
4162 fvptr = &float_values[4];
4164 bcopy ((char *) d, (char *) &r, sizeof (REAL_VALUE_TYPE));
4165 if (REAL_VALUES_LESS (fvptr[0], r))
4167 bcopy ((char *) &fvptr[0], (char *) d,
4168 sizeof (REAL_VALUE_TYPE));
4171 else if (REAL_VALUES_LESS (r, fvptr[1]))
4173 bcopy ((char *) &fvptr[1], (char *) d,
4174 sizeof (REAL_VALUE_TYPE));
4177 else if (REAL_VALUES_LESS (dconst0, r)
4178 && REAL_VALUES_LESS (r, fvptr[2]))
4180 bcopy ((char *) &dconst0, (char *) d, sizeof (REAL_VALUE_TYPE));
4183 else if (REAL_VALUES_LESS (r, dconst0)
4184 && REAL_VALUES_LESS (fvptr[3], r))
4186 bcopy ((char *) &dconst0, (char *) d, sizeof (REAL_VALUE_TYPE));
4196 /* Return the VMS argument type corresponding to MODE. */
4199 alpha_arg_type (mode)
4200 enum machine_mode mode;
4205 return TARGET_FLOAT_VAX ? FF : FS;
4207 return TARGET_FLOAT_VAX ? FD : FT;
4213 /* Return an rtx for an integer representing the VMS Argument Information
4217 alpha_arg_info_reg_val (cum)
4218 CUMULATIVE_ARGS cum;
4220 unsigned HOST_WIDE_INT regval = cum.num_args;
4223 for (i = 0; i < 6; i++)
4224 regval |= ((int) cum.atypes[i]) << (i * 3 + 8);
4226 return GEN_INT (regval);
4229 /* Structure to collect function names for final output
4232 enum links_kind {KIND_UNUSED, KIND_LOCAL, KIND_EXTERN};
4235 struct alpha_links {
4236 struct alpha_links *next;
4238 enum links_kind kind;
4241 static struct alpha_links *alpha_links_base = 0;
4243 /* Make (or fake) .linkage entry for function call.
4245 IS_LOCAL is 0 if name is used in call, 1 if name is used in definition. */
4248 alpha_need_linkage (name, is_local)
4253 struct alpha_links *lptr, *nptr;
4258 /* Is this name already defined ? */
4260 for (lptr = alpha_links_base; lptr; lptr = lptr->next)
4261 if (strcmp (lptr->name, name) == 0)
4265 /* Defined here but external assumed. */
4266 if (lptr->kind == KIND_EXTERN)
4267 lptr->kind = KIND_LOCAL;
4271 /* Used here but unused assumed. */
4272 if (lptr->kind == KIND_UNUSED)
4273 lptr->kind = KIND_LOCAL;
4278 nptr = (struct alpha_links *) xmalloc (sizeof (struct alpha_links));
4279 nptr->next = alpha_links_base;
4280 nptr->name = xstrdup (name);
4282 /* Assume external if no definition. */
4283 nptr->kind = (is_local ? KIND_UNUSED : KIND_EXTERN);
4285 /* Ensure we have an IDENTIFIER so assemble_name can mark is used. */
4286 get_identifier (name);
4288 alpha_links_base = nptr;
4295 alpha_write_linkage (stream)
4298 struct alpha_links *lptr, *nptr;
4300 readonly_section ();
4302 fprintf (stream, "\t.align 3\n");
4304 for (lptr = alpha_links_base; lptr; lptr = nptr)
4308 if (lptr->kind == KIND_UNUSED
4309 || ! TREE_SYMBOL_REFERENCED (get_identifier (lptr->name)))
4312 fprintf (stream, "$%s..lk:\n", lptr->name);
4313 if (lptr->kind == KIND_LOCAL)
4315 /* Local and used, build linkage pair. */
4316 fprintf (stream, "\t.quad %s..en\n", lptr->name);
4317 fprintf (stream, "\t.quad %s\n", lptr->name);
4320 /* External and used, request linkage pair. */
4321 fprintf (stream, "\t.linkage %s\n", lptr->name);
4328 alpha_need_linkage (name, is_local)
4334 #endif /* OPEN_VMS */