1 /* Subroutines used for code generation on the DEC Alpha.
2 Copyright (C) 1992, 93-98, 1999 Free Software Foundation, Inc.
3 Contributed by Richard Kenner (kenner@vlsi1.ultra.nyu.edu)
5 This file is part of GNU CC.
7 GNU CC is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License as published by
9 the Free Software Foundation; either version 2, or (at your option)
12 GNU CC is distributed in the hope that it will be useful,
13 but WITHOUT ANY WARRANTY; without even the implied warranty of
14 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 GNU General Public License for more details.
17 You should have received a copy of the GNU General Public License
18 along with GNU CC; see the file COPYING. If not, write to
19 the Free Software Foundation, 59 Temple Place - Suite 330,
20 Boston, MA 02111-1307, USA. */
27 #include "hard-reg-set.h"
29 #include "insn-config.h"
30 #include "conditions.h"
31 #include "insn-flags.h"
33 #include "insn-attr.h"
45 extern char *version_string;
46 extern int rtx_equal_function_value_matters;
48 /* Specify which cpu to schedule for. */
50 enum processor_type alpha_cpu;
51 static const char * const alpha_cpu_name[] =
56 /* Specify how accurate floating-point traps need to be. */
58 enum alpha_trap_precision alpha_tp;
60 /* Specify the floating-point rounding mode. */
62 enum alpha_fp_rounding_mode alpha_fprm;
64 /* Specify which things cause traps. */
66 enum alpha_fp_trap_mode alpha_fptm;
68 /* Strings decoded into the above options. */
70 const char *alpha_cpu_string; /* -mcpu= */
71 const char *alpha_tp_string; /* -mtrap-precision=[p|s|i] */
72 const char *alpha_fprm_string; /* -mfp-rounding-mode=[n|m|c|d] */
73 const char *alpha_fptm_string; /* -mfp-trap-mode=[n|u|su|sui] */
74 const char *alpha_mlat_string; /* -mmemory-latency= */
76 /* Save information from a "cmpxx" operation until the branch or scc is
79 rtx alpha_compare_op0, alpha_compare_op1;
80 int alpha_compare_fp_p;
82 /* Define the information needed to modify the epilogue for EH. */
84 rtx alpha_eh_epilogue_sp_ofs;
86 /* Non-zero if inside of a function, because the Alpha asm can't
87 handle .files inside of functions. */
89 static int inside_function = FALSE;
91 /* If non-null, this rtx holds the return address for the function. */
93 static rtx alpha_return_addr_rtx;
95 /* The number of cycles of latency we should assume on memory reads. */
97 int alpha_memory_latency = 3;
99 /* Whether the function needs the GP. */
101 static int alpha_function_needs_gp;
103 /* The alias set for prologue/epilogue register save/restore. */
105 static int alpha_sr_alias_set;
107 /* Declarations of static functions. */
108 static void alpha_set_memflags_1
109 PROTO((rtx, int, int, int));
110 static rtx alpha_emit_set_const_1
111 PROTO((rtx, enum machine_mode, HOST_WIDE_INT, int));
112 static void alpha_expand_unaligned_load_words
113 PROTO((rtx *out_regs, rtx smem, HOST_WIDE_INT words, HOST_WIDE_INT ofs));
114 static void alpha_expand_unaligned_store_words
115 PROTO((rtx *out_regs, rtx smem, HOST_WIDE_INT words, HOST_WIDE_INT ofs));
116 static void alpha_sa_mask
117 PROTO((unsigned long *imaskP, unsigned long *fmaskP));
118 static int alpha_does_function_need_gp
122 /* Get the number of args of a function in one of two ways. */
124 #define NUM_ARGS current_function_args_info.num_args
126 #define NUM_ARGS current_function_args_info
132 /* Parse target option strings. */
137 alpha_tp = ALPHA_TP_PROG;
138 alpha_fprm = ALPHA_FPRM_NORM;
139 alpha_fptm = ALPHA_FPTM_N;
143 alpha_tp = ALPHA_TP_INSN;
144 alpha_fptm = ALPHA_FPTM_SU;
147 if (TARGET_IEEE_WITH_INEXACT)
149 alpha_tp = ALPHA_TP_INSN;
150 alpha_fptm = ALPHA_FPTM_SUI;
155 if (! strcmp (alpha_tp_string, "p"))
156 alpha_tp = ALPHA_TP_PROG;
157 else if (! strcmp (alpha_tp_string, "f"))
158 alpha_tp = ALPHA_TP_FUNC;
159 else if (! strcmp (alpha_tp_string, "i"))
160 alpha_tp = ALPHA_TP_INSN;
162 error ("bad value `%s' for -mtrap-precision switch", alpha_tp_string);
165 if (alpha_fprm_string)
167 if (! strcmp (alpha_fprm_string, "n"))
168 alpha_fprm = ALPHA_FPRM_NORM;
169 else if (! strcmp (alpha_fprm_string, "m"))
170 alpha_fprm = ALPHA_FPRM_MINF;
171 else if (! strcmp (alpha_fprm_string, "c"))
172 alpha_fprm = ALPHA_FPRM_CHOP;
173 else if (! strcmp (alpha_fprm_string,"d"))
174 alpha_fprm = ALPHA_FPRM_DYN;
176 error ("bad value `%s' for -mfp-rounding-mode switch",
180 if (alpha_fptm_string)
182 if (strcmp (alpha_fptm_string, "n") == 0)
183 alpha_fptm = ALPHA_FPTM_N;
184 else if (strcmp (alpha_fptm_string, "u") == 0)
185 alpha_fptm = ALPHA_FPTM_U;
186 else if (strcmp (alpha_fptm_string, "su") == 0)
187 alpha_fptm = ALPHA_FPTM_SU;
188 else if (strcmp (alpha_fptm_string, "sui") == 0)
189 alpha_fptm = ALPHA_FPTM_SUI;
191 error ("bad value `%s' for -mfp-trap-mode switch", alpha_fptm_string);
195 = TARGET_CPU_DEFAULT & MASK_CPU_EV6 ? PROCESSOR_EV6
196 : (TARGET_CPU_DEFAULT & MASK_CPU_EV5 ? PROCESSOR_EV5 : PROCESSOR_EV4);
198 if (alpha_cpu_string)
200 if (! strcmp (alpha_cpu_string, "ev4")
201 || ! strcmp (alpha_cpu_string, "ev45")
202 || ! strcmp (alpha_cpu_string, "21064"))
204 alpha_cpu = PROCESSOR_EV4;
205 target_flags &= ~ (MASK_BWX | MASK_MAX | MASK_FIX | MASK_CIX);
207 else if (! strcmp (alpha_cpu_string, "ev5")
208 || ! strcmp (alpha_cpu_string, "21164"))
210 alpha_cpu = PROCESSOR_EV5;
211 target_flags &= ~ (MASK_BWX | MASK_MAX | MASK_FIX | MASK_CIX);
213 else if (! strcmp (alpha_cpu_string, "ev56")
214 || ! strcmp (alpha_cpu_string, "21164a"))
216 alpha_cpu = PROCESSOR_EV5;
217 target_flags |= MASK_BWX;
218 target_flags &= ~ (MASK_MAX | MASK_FIX | MASK_CIX);
220 else if (! strcmp (alpha_cpu_string, "pca56")
221 || ! strcmp (alpha_cpu_string, "21164PC")
222 || ! strcmp (alpha_cpu_string, "21164pc"))
224 alpha_cpu = PROCESSOR_EV5;
225 target_flags |= MASK_BWX | MASK_MAX;
226 target_flags &= ~ (MASK_FIX | MASK_CIX);
228 else if (! strcmp (alpha_cpu_string, "ev6")
229 || ! strcmp (alpha_cpu_string, "21264"))
231 alpha_cpu = PROCESSOR_EV6;
232 target_flags |= MASK_BWX | MASK_MAX | MASK_FIX;
233 target_flags &= ~ (MASK_CIX);
235 /* Except for EV6 pass 1 (not released), we always have
236 precise arithmetic traps. Which means we can do
237 software completion without minding trap shadows. */
238 alpha_tp = ALPHA_TP_PROG;
241 error ("bad value `%s' for -mcpu switch", alpha_cpu_string);
244 /* Do some sanity checks on the above options. */
246 if ((alpha_fptm == ALPHA_FPTM_SU || alpha_fptm == ALPHA_FPTM_SUI)
247 && alpha_tp != ALPHA_TP_INSN && alpha_cpu != PROCESSOR_EV6)
249 warning ("fp software completion requires -mtrap-precision=i");
250 alpha_tp = ALPHA_TP_INSN;
253 if (TARGET_FLOAT_VAX)
255 if (alpha_fprm == ALPHA_FPRM_MINF || alpha_fprm == ALPHA_FPRM_DYN)
257 warning ("rounding mode not supported for VAX floats");
258 alpha_fprm = ALPHA_FPRM_NORM;
260 if (alpha_fptm == ALPHA_FPTM_SUI)
262 warning ("trap mode not supported for VAX floats");
263 alpha_fptm = ALPHA_FPTM_SU;
271 if (!alpha_mlat_string)
272 alpha_mlat_string = "L1";
274 if (ISDIGIT ((unsigned char)alpha_mlat_string[0])
275 && (lat = strtol (alpha_mlat_string, &end, 10), *end == '\0'))
277 else if ((alpha_mlat_string[0] == 'L' || alpha_mlat_string[0] == 'l')
278 && ISDIGIT ((unsigned char)alpha_mlat_string[1])
279 && alpha_mlat_string[2] == '\0')
281 static int const cache_latency[][4] =
283 { 3, 30, -1 }, /* ev4 -- Bcache is a guess */
284 { 2, 12, 38 }, /* ev5 -- Bcache from PC164 LMbench numbers */
285 { 3, 13, -1 }, /* ev6 -- Ho hum, doesn't exist yet */
288 lat = alpha_mlat_string[1] - '0';
289 if (lat < 0 || lat > 3 || cache_latency[alpha_cpu][lat-1] == -1)
291 warning ("L%d cache latency unknown for %s",
292 lat, alpha_cpu_name[alpha_cpu]);
296 lat = cache_latency[alpha_cpu][lat-1];
298 else if (! strcmp (alpha_mlat_string, "main"))
300 /* Most current memories have about 370ns latency. This is
301 a reasonable guess for a fast cpu. */
306 warning ("bad value `%s' for -mmemory-latency", alpha_mlat_string);
310 alpha_memory_latency = lat;
313 /* Default the definition of "small data" to 8 bytes. */
317 /* Acquire a unique set number for our register saves and restores. */
318 alpha_sr_alias_set = new_alias_set ();
321 /* Returns 1 if VALUE is a mask that contains full bytes of zero or ones. */
329 for (i = 0; i < HOST_BITS_PER_WIDE_INT / HOST_BITS_PER_CHAR;
331 if ((value & 0xff) != 0 && (value & 0xff) != 0xff)
337 /* Returns 1 if OP is either the constant zero or a register. If a
338 register, it must be in the proper mode unless MODE is VOIDmode. */
341 reg_or_0_operand (op, mode)
343 enum machine_mode mode;
345 return op == const0_rtx || register_operand (op, mode);
348 /* Return 1 if OP is a constant in the range of 0-63 (for a shift) or
352 reg_or_6bit_operand (op, mode)
354 enum machine_mode mode;
356 return ((GET_CODE (op) == CONST_INT
357 && (unsigned HOST_WIDE_INT) INTVAL (op) < 64)
358 || register_operand (op, mode));
362 /* Return 1 if OP is an 8-bit constant or any register. */
365 reg_or_8bit_operand (op, mode)
367 enum machine_mode mode;
369 return ((GET_CODE (op) == CONST_INT
370 && (unsigned HOST_WIDE_INT) INTVAL (op) < 0x100)
371 || register_operand (op, mode));
374 /* Return 1 if OP is an 8-bit constant. */
377 cint8_operand (op, mode)
379 enum machine_mode mode ATTRIBUTE_UNUSED;
381 return ((GET_CODE (op) == CONST_INT
382 && (unsigned HOST_WIDE_INT) INTVAL (op) < 0x100));
385 /* Return 1 if the operand is a valid second operand to an add insn. */
388 add_operand (op, mode)
390 enum machine_mode mode;
392 if (GET_CODE (op) == CONST_INT)
393 /* Constraints I, J, O and P are covered by K. */
394 return (CONST_OK_FOR_LETTER_P (INTVAL (op), 'K')
395 || CONST_OK_FOR_LETTER_P (INTVAL (op), 'L'));
397 return register_operand (op, mode);
400 /* Return 1 if the operand is a valid second operand to a sign-extending
404 sext_add_operand (op, mode)
406 enum machine_mode mode;
408 if (GET_CODE (op) == CONST_INT)
409 return (CONST_OK_FOR_LETTER_P (INTVAL (op), 'I')
410 || CONST_OK_FOR_LETTER_P (INTVAL (op), 'O'));
412 return register_operand (op, mode);
415 /* Return 1 if OP is the constant 4 or 8. */
418 const48_operand (op, mode)
420 enum machine_mode mode ATTRIBUTE_UNUSED;
422 return (GET_CODE (op) == CONST_INT
423 && (INTVAL (op) == 4 || INTVAL (op) == 8));
426 /* Return 1 if OP is a valid first operand to an AND insn. */
429 and_operand (op, mode)
431 enum machine_mode mode;
433 if (GET_CODE (op) == CONST_DOUBLE && GET_MODE (op) == VOIDmode)
434 return (zap_mask (CONST_DOUBLE_LOW (op))
435 && zap_mask (CONST_DOUBLE_HIGH (op)));
437 if (GET_CODE (op) == CONST_INT)
438 return ((unsigned HOST_WIDE_INT) INTVAL (op) < 0x100
439 || (unsigned HOST_WIDE_INT) ~ INTVAL (op) < 0x100
440 || zap_mask (INTVAL (op)));
442 return register_operand (op, mode);
445 /* Return 1 if OP is a valid first operand to an IOR or XOR insn. */
448 or_operand (op, mode)
450 enum machine_mode mode;
452 if (GET_CODE (op) == CONST_INT)
453 return ((unsigned HOST_WIDE_INT) INTVAL (op) < 0x100
454 || (unsigned HOST_WIDE_INT) ~ INTVAL (op) < 0x100);
456 return register_operand (op, mode);
459 /* Return 1 if OP is a constant that is the width, in bits, of an integral
460 mode smaller than DImode. */
463 mode_width_operand (op, mode)
465 enum machine_mode mode ATTRIBUTE_UNUSED;
467 return (GET_CODE (op) == CONST_INT
468 && (INTVAL (op) == 8 || INTVAL (op) == 16
469 || INTVAL (op) == 32 || INTVAL (op) == 64));
472 /* Return 1 if OP is a constant that is the width of an integral machine mode
473 smaller than an integer. */
476 mode_mask_operand (op, mode)
478 enum machine_mode mode ATTRIBUTE_UNUSED;
480 #if HOST_BITS_PER_WIDE_INT == 32
481 if (GET_CODE (op) == CONST_DOUBLE)
482 return (CONST_DOUBLE_LOW (op) == -1
483 && (CONST_DOUBLE_HIGH (op) == -1
484 || CONST_DOUBLE_HIGH (op) == 0));
486 if (GET_CODE (op) == CONST_DOUBLE)
487 return (CONST_DOUBLE_LOW (op) == -1 && CONST_DOUBLE_HIGH (op) == 0);
490 return (GET_CODE (op) == CONST_INT
491 && (INTVAL (op) == 0xff
492 || INTVAL (op) == 0xffff
493 || INTVAL (op) == (HOST_WIDE_INT)0xffffffff
494 #if HOST_BITS_PER_WIDE_INT == 64
500 /* Return 1 if OP is a multiple of 8 less than 64. */
503 mul8_operand (op, mode)
505 enum machine_mode mode ATTRIBUTE_UNUSED;
507 return (GET_CODE (op) == CONST_INT
508 && (unsigned HOST_WIDE_INT) INTVAL (op) < 64
509 && (INTVAL (op) & 7) == 0);
512 /* Return 1 if OP is the constant zero in floating-point. */
515 fp0_operand (op, mode)
517 enum machine_mode mode;
519 return (GET_MODE (op) == mode
520 && GET_MODE_CLASS (mode) == MODE_FLOAT && op == CONST0_RTX (mode));
523 /* Return 1 if OP is the floating-point constant zero or a register. */
526 reg_or_fp0_operand (op, mode)
528 enum machine_mode mode;
530 return fp0_operand (op, mode) || register_operand (op, mode);
533 /* Return 1 if OP is a hard floating-point register. */
536 hard_fp_register_operand (op, mode)
538 enum machine_mode mode;
540 return ((GET_CODE (op) == REG && REGNO_REG_CLASS (REGNO (op)) == FLOAT_REGS)
541 || (GET_CODE (op) == SUBREG
542 && hard_fp_register_operand (SUBREG_REG (op), mode)));
545 /* Return 1 if OP is a register or a constant integer. */
549 reg_or_cint_operand (op, mode)
551 enum machine_mode mode;
553 return (GET_CODE (op) == CONST_INT
554 || register_operand (op, mode));
557 /* Return 1 if OP is something that can be reloaded into a register;
558 if it is a MEM, it need not be valid. */
561 some_operand (op, mode)
563 enum machine_mode mode;
565 if (mode != VOIDmode && GET_MODE (op) != VOIDmode && mode != GET_MODE (op))
568 switch (GET_CODE (op))
570 case REG: case MEM: case CONST_DOUBLE: case CONST_INT: case LABEL_REF:
571 case SYMBOL_REF: case CONST:
575 return some_operand (SUBREG_REG (op), VOIDmode);
584 /* Return 1 if OP is a valid operand for the source of a move insn. */
587 input_operand (op, mode)
589 enum machine_mode mode;
591 if (mode != VOIDmode && GET_MODE (op) != VOIDmode && mode != GET_MODE (op))
594 if (GET_MODE_CLASS (mode) == MODE_FLOAT && GET_MODE (op) != mode)
597 switch (GET_CODE (op))
602 /* This handles both the Windows/NT and OSF cases. */
603 return mode == ptr_mode || mode == DImode;
609 if (register_operand (op, mode))
611 /* ... fall through ... */
613 return ((TARGET_BWX || (mode != HImode && mode != QImode))
614 && general_operand (op, mode));
617 return GET_MODE_CLASS (mode) == MODE_FLOAT && op == CONST0_RTX (mode);
620 return mode == QImode || mode == HImode || add_operand (op, mode);
632 /* Return 1 if OP is a SYMBOL_REF for a function known to be in this
636 current_file_function_operand (op, mode)
638 enum machine_mode mode ATTRIBUTE_UNUSED;
640 return (GET_CODE (op) == SYMBOL_REF
641 && ! profile_flag && ! profile_block_flag
642 && (SYMBOL_REF_FLAG (op)
643 || op == XEXP (DECL_RTL (current_function_decl), 0)));
646 /* Return 1 if OP is a valid operand for the MEM of a CALL insn. */
649 call_operand (op, mode)
651 enum machine_mode mode;
656 return (GET_CODE (op) == SYMBOL_REF
657 || (GET_CODE (op) == REG
658 && (TARGET_OPEN_VMS || TARGET_WINDOWS_NT || REGNO (op) == 27)));
661 /* Return 1 if OP is a valid Alpha comparison operator. Here we know which
662 comparisons are valid in which insn. */
665 alpha_comparison_operator (op, mode)
667 enum machine_mode mode;
669 enum rtx_code code = GET_CODE (op);
671 if (mode != GET_MODE (op) || GET_RTX_CLASS (code) != '<')
674 return (code == EQ || code == LE || code == LT
675 || (mode == DImode && (code == LEU || code == LTU)));
678 /* Return 1 if OP is a valid Alpha swapped comparison operator. */
681 alpha_swapped_comparison_operator (op, mode)
683 enum machine_mode mode;
685 enum rtx_code code = GET_CODE (op);
687 if (mode != GET_MODE (op) || GET_RTX_CLASS (code) != '<')
690 code = swap_condition (code);
691 return (code == EQ || code == LE || code == LT
692 || (mode == DImode && (code == LEU || code == LTU)));
695 /* Return 1 if OP is a signed comparison operation. */
698 signed_comparison_operator (op, mode)
700 enum machine_mode mode ATTRIBUTE_UNUSED;
702 switch (GET_CODE (op))
704 case EQ: case NE: case LE: case LT: case GE: case GT:
714 /* Return 1 if this is a divide or modulus operator. */
717 divmod_operator (op, mode)
719 enum machine_mode mode ATTRIBUTE_UNUSED;
721 switch (GET_CODE (op))
723 case DIV: case MOD: case UDIV: case UMOD:
733 /* Return 1 if this memory address is a known aligned register plus
734 a constant. It must be a valid address. This means that we can do
735 this as an aligned reference plus some offset.
737 Take into account what reload will do. */
740 aligned_memory_operand (op, mode)
742 enum machine_mode mode;
746 if (reload_in_progress)
749 if (GET_CODE (tmp) == SUBREG)
750 tmp = SUBREG_REG (tmp);
751 if (GET_CODE (tmp) == REG
752 && REGNO (tmp) >= FIRST_PSEUDO_REGISTER)
754 op = reg_equiv_memory_loc[REGNO (tmp)];
760 if (GET_CODE (op) != MEM
761 || GET_MODE (op) != mode)
765 /* LEGITIMIZE_RELOAD_ADDRESS creates (plus (plus reg const_hi) const_lo)
766 sorts of constructs. Dig for the real base register. */
767 if (reload_in_progress
768 && GET_CODE (op) == PLUS
769 && GET_CODE (XEXP (op, 0)) == PLUS)
770 base = XEXP (XEXP (op, 0), 0);
773 if (! memory_address_p (mode, op))
775 base = (GET_CODE (op) == PLUS ? XEXP (op, 0) : op);
778 return (GET_CODE (base) == REG
779 && REGNO_POINTER_ALIGN (REGNO (base)) >= 4);
782 /* Similar, but return 1 if OP is a MEM which is not alignable. */
785 unaligned_memory_operand (op, mode)
787 enum machine_mode mode;
791 if (reload_in_progress)
794 if (GET_CODE (tmp) == SUBREG)
795 tmp = SUBREG_REG (tmp);
796 if (GET_CODE (tmp) == REG
797 && REGNO (tmp) >= FIRST_PSEUDO_REGISTER)
799 op = reg_equiv_memory_loc[REGNO (tmp)];
805 if (GET_CODE (op) != MEM
806 || GET_MODE (op) != mode)
810 /* LEGITIMIZE_RELOAD_ADDRESS creates (plus (plus reg const_hi) const_lo)
811 sorts of constructs. Dig for the real base register. */
812 if (reload_in_progress
813 && GET_CODE (op) == PLUS
814 && GET_CODE (XEXP (op, 0)) == PLUS)
815 base = XEXP (XEXP (op, 0), 0);
818 if (! memory_address_p (mode, op))
820 base = (GET_CODE (op) == PLUS ? XEXP (op, 0) : op);
823 return (GET_CODE (base) == REG
824 && REGNO_POINTER_ALIGN (REGNO (base)) < 4);
827 /* Return 1 if OP is either a register or an unaligned memory location. */
830 reg_or_unaligned_mem_operand (op, mode)
832 enum machine_mode mode;
834 return register_operand (op, mode) || unaligned_memory_operand (op, mode);
837 /* Return 1 if OP is any memory location. During reload a pseudo matches. */
840 any_memory_operand (op, mode)
842 enum machine_mode mode ATTRIBUTE_UNUSED;
844 return (GET_CODE (op) == MEM
845 || (GET_CODE (op) == SUBREG && GET_CODE (SUBREG_REG (op)) == REG)
846 || (reload_in_progress && GET_CODE (op) == REG
847 && REGNO (op) >= FIRST_PSEUDO_REGISTER)
848 || (reload_in_progress && GET_CODE (op) == SUBREG
849 && GET_CODE (SUBREG_REG (op)) == REG
850 && REGNO (SUBREG_REG (op)) >= FIRST_PSEUDO_REGISTER));
853 /* Returns 1 if OP is not an eliminable register.
855 This exists to cure a pathological abort in the s8addq (et al) patterns,
857 long foo () { long t; bar(); return (long) &t * 26107; }
859 which run afoul of a hack in reload to cure a (presumably) similar
860 problem with lea-type instructions on other targets. But there is
861 one of us and many of them, so work around the problem by selectively
862 preventing combine from making the optimization. */
865 reg_not_elim_operand (op, mode)
867 enum machine_mode mode;
870 if (GET_CODE (op) == SUBREG)
871 inner = SUBREG_REG (op);
872 if (inner == frame_pointer_rtx || inner == arg_pointer_rtx)
875 return register_operand (op, mode);
878 /* Return 1 is OP is a memory location that is not a reference (using
879 an AND) to an unaligned location. Take into account what reload
883 normal_memory_operand (op, mode)
885 enum machine_mode mode ATTRIBUTE_UNUSED;
887 if (reload_in_progress)
890 if (GET_CODE (tmp) == SUBREG)
891 tmp = SUBREG_REG (tmp);
892 if (GET_CODE (tmp) == REG
893 && REGNO (tmp) >= FIRST_PSEUDO_REGISTER)
895 op = reg_equiv_memory_loc[REGNO (tmp)];
897 /* This may not have been assigned an equivalent address if it will
898 be eliminated. In that case, it doesn't matter what we do. */
904 return GET_CODE (op) == MEM && GET_CODE (XEXP (op, 0)) != AND;
907 /* Accept a register, but not a subreg of any kind. This allows us to
908 avoid pathological cases in reload wrt data movement common in
909 int->fp conversion. */
912 reg_no_subreg_operand (op, mode)
914 enum machine_mode mode;
916 if (GET_CODE (op) == SUBREG)
918 return register_operand (op, mode);
921 /* Return 1 if this function can directly return via $26. */
926 return (! TARGET_OPEN_VMS && reload_completed && alpha_sa_size () == 0
927 && get_frame_size () == 0
928 && current_function_outgoing_args_size == 0
929 && current_function_pretend_args_size == 0);
932 /* REF is an alignable memory location. Place an aligned SImode
933 reference into *PALIGNED_MEM and the number of bits to shift into
934 *PBITNUM. SCRATCH is a free register for use in reloading out
935 of range stack slots. */
938 get_aligned_mem (ref, paligned_mem, pbitnum)
940 rtx *paligned_mem, *pbitnum;
943 HOST_WIDE_INT offset = 0;
945 if (GET_CODE (ref) != MEM)
948 if (reload_in_progress
949 && ! memory_address_p (GET_MODE (ref), XEXP (ref, 0)))
951 base = find_replacement (&XEXP (ref, 0));
953 if (! memory_address_p (GET_MODE (ref), base))
958 base = XEXP (ref, 0);
961 if (GET_CODE (base) == PLUS)
962 offset += INTVAL (XEXP (base, 1)), base = XEXP (base, 0);
964 *paligned_mem = gen_rtx_MEM (SImode, plus_constant (base, offset & ~3));
965 MEM_COPY_ATTRIBUTES (*paligned_mem, ref);
966 RTX_UNCHANGING_P (*paligned_mem) = RTX_UNCHANGING_P (ref);
968 /* Sadly, we cannot use alias sets here because we may overlap other
969 data in a different alias set. */
970 /* MEM_ALIAS_SET (*paligned_mem) = MEM_ALIAS_SET (ref); */
972 *pbitnum = GEN_INT ((offset & 3) * 8);
975 /* Similar, but just get the address. Handle the two reload cases.
976 Add EXTRA_OFFSET to the address we return. */
979 get_unaligned_address (ref, extra_offset)
984 HOST_WIDE_INT offset = 0;
986 if (GET_CODE (ref) != MEM)
989 if (reload_in_progress
990 && ! memory_address_p (GET_MODE (ref), XEXP (ref, 0)))
992 base = find_replacement (&XEXP (ref, 0));
994 if (! memory_address_p (GET_MODE (ref), base))
999 base = XEXP (ref, 0);
1002 if (GET_CODE (base) == PLUS)
1003 offset += INTVAL (XEXP (base, 1)), base = XEXP (base, 0);
1005 return plus_constant (base, offset + extra_offset);
1008 /* Subfunction of the following function. Update the flags of any MEM
1009 found in part of X. */
1012 alpha_set_memflags_1 (x, in_struct_p, volatile_p, unchanging_p)
1014 int in_struct_p, volatile_p, unchanging_p;
1018 switch (GET_CODE (x))
1022 for (i = XVECLEN (x, 0) - 1; i >= 0; i--)
1023 alpha_set_memflags_1 (XVECEXP (x, 0, i), in_struct_p, volatile_p,
1028 alpha_set_memflags_1 (PATTERN (x), in_struct_p, volatile_p,
1033 alpha_set_memflags_1 (SET_DEST (x), in_struct_p, volatile_p,
1035 alpha_set_memflags_1 (SET_SRC (x), in_struct_p, volatile_p,
1040 MEM_IN_STRUCT_P (x) = in_struct_p;
1041 MEM_VOLATILE_P (x) = volatile_p;
1042 RTX_UNCHANGING_P (x) = unchanging_p;
1043 /* Sadly, we cannot use alias sets because the extra aliasing
1044 produced by the AND interferes. Given that two-byte quantities
1045 are the only thing we would be able to differentiate anyway,
1046 there does not seem to be any point in convoluting the early
1047 out of the alias check. */
1048 /* MEM_ALIAS_SET (x) = alias_set; */
1056 /* Given INSN, which is either an INSN or a SEQUENCE generated to
1057 perform a memory operation, look for any MEMs in either a SET_DEST or
1058 a SET_SRC and copy the in-struct, unchanging, and volatile flags from
1059 REF into each of the MEMs found. If REF is not a MEM, don't do
1063 alpha_set_memflags (insn, ref)
1067 int in_struct_p, volatile_p, unchanging_p;
1069 if (GET_CODE (ref) != MEM)
1072 in_struct_p = MEM_IN_STRUCT_P (ref);
1073 volatile_p = MEM_VOLATILE_P (ref);
1074 unchanging_p = RTX_UNCHANGING_P (ref);
1076 /* This is only called from alpha.md, after having had something
1077 generated from one of the insn patterns. So if everything is
1078 zero, the pattern is already up-to-date. */
1079 if (! in_struct_p && ! volatile_p && ! unchanging_p)
1082 alpha_set_memflags_1 (insn, in_struct_p, volatile_p, unchanging_p);
1085 /* Try to output insns to set TARGET equal to the constant C if it can be
1086 done in less than N insns. Do all computations in MODE. Returns the place
1087 where the output has been placed if it can be done and the insns have been
1088 emitted. If it would take more than N insns, zero is returned and no
1089 insns and emitted. */
1092 alpha_emit_set_const (target, mode, c, n)
1094 enum machine_mode mode;
1101 /* Try 1 insn, then 2, then up to N. */
1102 for (i = 1; i <= n; i++)
1103 if ((pat = alpha_emit_set_const_1 (target, mode, c, i)) != 0)
1109 /* Internal routine for the above to check for N or below insns. */
1112 alpha_emit_set_const_1 (target, mode, c, n)
1114 enum machine_mode mode;
1118 HOST_WIDE_INT new = c;
1120 /* Use a pseudo if highly optimizing and still generating RTL. */
1122 = (flag_expensive_optimizations && rtx_equal_function_value_matters
1126 #if HOST_BITS_PER_WIDE_INT == 64
1127 /* We are only called for SImode and DImode. If this is SImode, ensure that
1128 we are sign extended to a full word. This does not make any sense when
1129 cross-compiling on a narrow machine. */
1132 c = (c & 0xffffffff) - 2 * (c & 0x80000000);
1135 /* If this is a sign-extended 32-bit constant, we can do this in at most
1136 three insns, so do it if we have enough insns left. We always have
1137 a sign-extended 32-bit constant when compiling on a narrow machine. */
1139 if (HOST_BITS_PER_WIDE_INT != 64
1140 || c >> 31 == -1 || c >> 31 == 0)
1142 HOST_WIDE_INT low = (c & 0xffff) - 2 * (c & 0x8000);
1143 HOST_WIDE_INT tmp1 = c - low;
1145 = ((tmp1 >> 16) & 0xffff) - 2 * ((tmp1 >> 16) & 0x8000);
1146 HOST_WIDE_INT extra = 0;
1148 /* If HIGH will be interpreted as negative but the constant is
1149 positive, we must adjust it to do two ldha insns. */
1151 if ((high & 0x8000) != 0 && c >= 0)
1155 high = ((tmp1 >> 16) & 0xffff) - 2 * ((tmp1 >> 16) & 0x8000);
1158 if (c == low || (low == 0 && extra == 0))
1160 /* We used to use copy_to_suggested_reg (GEN_INT (c), target, mode)
1161 but that meant that we can't handle INT_MIN on 32-bit machines
1162 (like NT/Alpha), because we recurse indefinitely through
1163 emit_move_insn to gen_movdi. So instead, since we know exactly
1164 what we want, create it explicitly. */
1167 target = gen_reg_rtx (mode);
1168 emit_insn (gen_rtx_SET (VOIDmode, target, GEN_INT (c)));
1171 else if (n >= 2 + (extra != 0))
1173 temp = copy_to_suggested_reg (GEN_INT (low), subtarget, mode);
1176 temp = expand_binop (mode, add_optab, temp, GEN_INT (extra << 16),
1177 subtarget, 0, OPTAB_WIDEN);
1179 return expand_binop (mode, add_optab, temp, GEN_INT (high << 16),
1180 target, 0, OPTAB_WIDEN);
1184 /* If we couldn't do it that way, try some other methods. But if we have
1185 no instructions left, don't bother. Likewise, if this is SImode and
1186 we can't make pseudos, we can't do anything since the expand_binop
1187 and expand_unop calls will widen and try to make pseudos. */
1190 || (mode == SImode && ! rtx_equal_function_value_matters))
1193 #if HOST_BITS_PER_WIDE_INT == 64
1194 /* First, see if can load a value into the target that is the same as the
1195 constant except that all bytes that are 0 are changed to be 0xff. If we
1196 can, then we can do a ZAPNOT to obtain the desired constant. */
1198 for (i = 0; i < 64; i += 8)
1199 if ((new & ((HOST_WIDE_INT) 0xff << i)) == 0)
1200 new |= (HOST_WIDE_INT) 0xff << i;
1202 /* We are only called for SImode and DImode. If this is SImode, ensure that
1203 we are sign extended to a full word. */
1206 new = (new & 0xffffffff) - 2 * (new & 0x80000000);
1209 && (temp = alpha_emit_set_const (subtarget, mode, new, n - 1)) != 0)
1210 return expand_binop (mode, and_optab, temp, GEN_INT (c | ~ new),
1211 target, 0, OPTAB_WIDEN);
1214 /* Next, see if we can load a related constant and then shift and possibly
1215 negate it to get the constant we want. Try this once each increasing
1216 numbers of insns. */
1218 for (i = 1; i < n; i++)
1220 /* First try complementing. */
1221 if ((temp = alpha_emit_set_const (subtarget, mode, ~ c, i)) != 0)
1222 return expand_unop (mode, one_cmpl_optab, temp, target, 0);
1224 /* Next try to form a constant and do a left shift. We can do this
1225 if some low-order bits are zero; the exact_log2 call below tells
1226 us that information. The bits we are shifting out could be any
1227 value, but here we'll just try the 0- and sign-extended forms of
1228 the constant. To try to increase the chance of having the same
1229 constant in more than one insn, start at the highest number of
1230 bits to shift, but try all possibilities in case a ZAPNOT will
1233 if ((bits = exact_log2 (c & - c)) > 0)
1234 for (; bits > 0; bits--)
1235 if ((temp = (alpha_emit_set_const
1237 (unsigned HOST_WIDE_INT) (c >> bits), i))) != 0
1238 || ((temp = (alpha_emit_set_const
1240 ((unsigned HOST_WIDE_INT) c) >> bits, i)))
1242 return expand_binop (mode, ashl_optab, temp, GEN_INT (bits),
1243 target, 0, OPTAB_WIDEN);
1245 /* Now try high-order zero bits. Here we try the shifted-in bits as
1246 all zero and all ones. Be careful to avoid shifting outside the
1247 mode and to avoid shifting outside the host wide int size. */
1248 /* On narrow hosts, don't shift a 1 into the high bit, since we'll
1249 confuse the recursive call and set all of the high 32 bits. */
1251 if ((bits = (MIN (HOST_BITS_PER_WIDE_INT, GET_MODE_SIZE (mode) * 8)
1252 - floor_log2 (c) - 1 - (HOST_BITS_PER_WIDE_INT < 64))) > 0)
1253 for (; bits > 0; bits--)
1254 if ((temp = alpha_emit_set_const (subtarget, mode,
1256 || ((temp = (alpha_emit_set_const
1258 ((c << bits) | (((HOST_WIDE_INT) 1 << bits) - 1)),
1261 return expand_binop (mode, lshr_optab, temp, GEN_INT (bits),
1262 target, 1, OPTAB_WIDEN);
1264 /* Now try high-order 1 bits. We get that with a sign-extension.
1265 But one bit isn't enough here. Be careful to avoid shifting outside
1266 the mode and to avoid shifting outside the host wide int size. */
1268 if ((bits = (MIN (HOST_BITS_PER_WIDE_INT, GET_MODE_SIZE (mode) * 8)
1269 - floor_log2 (~ c) - 2)) > 0)
1270 for (; bits > 0; bits--)
1271 if ((temp = alpha_emit_set_const (subtarget, mode,
1273 || ((temp = (alpha_emit_set_const
1275 ((c << bits) | (((HOST_WIDE_INT) 1 << bits) - 1)),
1278 return expand_binop (mode, ashr_optab, temp, GEN_INT (bits),
1279 target, 0, OPTAB_WIDEN);
1285 /* Having failed to find a 3 insn sequence in alpha_emit_set_const,
1286 fall back to a straight forward decomposition. We do this to avoid
1287 exponential run times encountered when looking for longer sequences
1288 with alpha_emit_set_const. */
1291 alpha_emit_set_long_const (target, c1, c2)
1293 HOST_WIDE_INT c1, c2;
1295 HOST_WIDE_INT d1, d2, d3, d4;
1297 /* Decompose the entire word */
1298 #if HOST_BITS_PER_WIDE_INT >= 64
1299 if (c2 != -(c1 < 0))
1301 d1 = ((c1 & 0xffff) ^ 0x8000) - 0x8000;
1303 d2 = ((c1 & 0xffffffff) ^ 0x80000000) - 0x80000000;
1304 c1 = (c1 - d2) >> 32;
1305 d3 = ((c1 & 0xffff) ^ 0x8000) - 0x8000;
1307 d4 = ((c1 & 0xffffffff) ^ 0x80000000) - 0x80000000;
1311 d1 = ((c1 & 0xffff) ^ 0x8000) - 0x8000;
1313 d2 = ((c1 & 0xffffffff) ^ 0x80000000) - 0x80000000;
1317 d3 = ((c2 & 0xffff) ^ 0x8000) - 0x8000;
1319 d4 = ((c2 & 0xffffffff) ^ 0x80000000) - 0x80000000;
1324 /* Construct the high word */
1327 emit_move_insn (target, GEN_INT (d4));
1329 emit_move_insn (target, gen_rtx_PLUS (DImode, target, GEN_INT (d3)));
1332 emit_move_insn (target, GEN_INT (d3));
1334 /* Shift it into place */
1335 emit_move_insn (target, gen_rtx_ASHIFT (DImode, target, GEN_INT (32)));
1337 /* Add in the low bits. */
1339 emit_move_insn (target, gen_rtx_PLUS (DImode, target, GEN_INT (d2)));
1341 emit_move_insn (target, gen_rtx_PLUS (DImode, target, GEN_INT (d1)));
1346 /* Generate the comparison for a conditional branch. */
1349 alpha_emit_conditional_branch (code)
1352 enum rtx_code cmp_code, branch_code;
1353 enum machine_mode cmp_mode, branch_mode = VOIDmode;
1354 rtx op0 = alpha_compare_op0, op1 = alpha_compare_op1;
1357 /* The general case: fold the comparison code to the types of compares
1358 that we have, choosing the branch as necessary. */
1361 case EQ: case LE: case LT: case LEU: case LTU:
1362 /* We have these compares: */
1363 cmp_code = code, branch_code = NE;
1367 /* This must be reversed. */
1368 cmp_code = EQ, branch_code = EQ;
1371 case GE: case GT: case GEU: case GTU:
1372 /* For FP, we swap them, for INT, we reverse them. */
1373 if (alpha_compare_fp_p)
1375 cmp_code = swap_condition (code);
1377 tem = op0, op0 = op1, op1 = tem;
1381 cmp_code = reverse_condition (code);
1390 if (alpha_compare_fp_p)
1395 /* When we are not as concerned about non-finite values, and we
1396 are comparing against zero, we can branch directly. */
1397 if (op1 == CONST0_RTX (DFmode))
1398 cmp_code = NIL, branch_code = code;
1399 else if (op0 == CONST0_RTX (DFmode))
1401 /* Undo the swap we probably did just above. */
1402 tem = op0, op0 = op1, op1 = tem;
1403 branch_code = swap_condition (cmp_code);
1409 /* ??? We mark the the branch mode to be CCmode to prevent the
1410 compare and branch from being combined, since the compare
1411 insn follows IEEE rules that the branch does not. */
1412 branch_mode = CCmode;
1419 /* The following optimizations are only for signed compares. */
1420 if (code != LEU && code != LTU && code != GEU && code != GTU)
1422 /* Whee. Compare and branch against 0 directly. */
1423 if (op1 == const0_rtx)
1424 cmp_code = NIL, branch_code = code;
1426 /* We want to use cmpcc/bcc when we can, since there is a zero delay
1427 bypass between logicals and br/cmov on EV5. But we don't want to
1428 force valid immediate constants into registers needlessly. */
1429 else if (GET_CODE (op1) == CONST_INT)
1431 HOST_WIDE_INT v = INTVAL (op1), n = -v;
1433 if (! CONST_OK_FOR_LETTER_P (v, 'I')
1434 && (CONST_OK_FOR_LETTER_P (n, 'K')
1435 || CONST_OK_FOR_LETTER_P (n, 'L')))
1437 cmp_code = PLUS, branch_code = code;
1444 /* Force op0 into a register. */
1445 if (GET_CODE (op0) != REG)
1446 op0 = force_reg (cmp_mode, op0);
1448 /* Emit an initial compare instruction, if necessary. */
1450 if (cmp_code != NIL)
1452 tem = gen_reg_rtx (cmp_mode);
1453 emit_move_insn (tem, gen_rtx_fmt_ee (cmp_code, cmp_mode, op0, op1));
1456 /* Return the branch comparison. */
1457 return gen_rtx_fmt_ee (branch_code, branch_mode, tem, CONST0_RTX (cmp_mode));
1461 /* Rewrite a comparison against zero CMP of the form
1462 (CODE (cc0) (const_int 0)) so it can be written validly in
1463 a conditional move (if_then_else CMP ...).
1464 If both of the operands that set cc0 are non-zero we must emit
1465 an insn to perform the compare (it can't be done within
1466 the conditional move). */
1468 alpha_emit_conditional_move (cmp, mode)
1470 enum machine_mode mode;
1472 enum rtx_code code = GET_CODE (cmp);
1473 enum rtx_code cmov_code = NE;
1474 rtx op0 = alpha_compare_op0;
1475 rtx op1 = alpha_compare_op1;
1476 enum machine_mode cmp_mode
1477 = (GET_MODE (op0) == VOIDmode ? DImode : GET_MODE (op0));
1478 enum machine_mode cmp_op_mode = alpha_compare_fp_p ? DFmode : DImode;
1479 enum machine_mode cmov_mode = VOIDmode;
1482 if (alpha_compare_fp_p != FLOAT_MODE_P (mode))
1485 /* We may be able to use a conditional move directly.
1486 This avoids emitting spurious compares. */
1487 if (signed_comparison_operator (cmp, cmp_op_mode)
1488 && (!alpha_compare_fp_p || flag_fast_math)
1489 && (op0 == CONST0_RTX (cmp_mode) || op1 == CONST0_RTX (cmp_mode)))
1490 return gen_rtx_fmt_ee (code, VOIDmode, op0, op1);
1492 /* We can't put the comparison insides a conditional move;
1493 emit a compare instruction and put that inside the
1494 conditional move. Make sure we emit only comparisons we have;
1495 swap or reverse as necessary. */
1499 case EQ: case LE: case LT: case LEU: case LTU:
1500 /* We have these compares: */
1504 /* This must be reversed. */
1505 code = reverse_condition (code);
1509 case GE: case GT: case GEU: case GTU:
1510 /* These must be swapped. Make sure the new first operand is in
1512 code = swap_condition (code);
1513 tem = op0, op0 = op1, op1 = tem;
1514 op0 = force_reg (cmp_mode, op0);
1521 /* ??? We mark the branch mode to be CCmode to prevent the compare
1522 and cmov from being combined, since the compare insn follows IEEE
1523 rules that the cmov does not. */
1524 if (alpha_compare_fp_p && !flag_fast_math)
1527 tem = gen_reg_rtx (cmp_op_mode);
1528 emit_move_insn (tem, gen_rtx_fmt_ee (code, cmp_op_mode, op0, op1));
1529 return gen_rtx_fmt_ee (cmov_code, cmov_mode, tem, CONST0_RTX (cmp_op_mode));
1532 /* Use ext[wlq][lh] as the Architecture Handbook describes for extracting
1536 word: ldq_u r1,X(r11) ldq_u r1,X(r11)
1537 ldq_u r2,X+1(r11) ldq_u r2,X+1(r11)
1538 lda r3,X(r11) lda r3,X+2(r11)
1539 extwl r1,r3,r1 extql r1,r3,r1
1540 extwh r2,r3,r2 extqh r2,r3,r2
1541 or r1.r2.r1 or r1,r2,r1
1544 long: ldq_u r1,X(r11) ldq_u r1,X(r11)
1545 ldq_u r2,X+3(r11) ldq_u r2,X+3(r11)
1546 lda r3,X(r11) lda r3,X(r11)
1547 extll r1,r3,r1 extll r1,r3,r1
1548 extlh r2,r3,r2 extlh r2,r3,r2
1549 or r1.r2.r1 addl r1,r2,r1
1551 quad: ldq_u r1,X(r11)
1560 alpha_expand_unaligned_load (tgt, mem, size, ofs, sign)
1562 HOST_WIDE_INT size, ofs;
1565 rtx meml, memh, addr, extl, exth;
1566 enum machine_mode mode;
1568 meml = gen_reg_rtx (DImode);
1569 memh = gen_reg_rtx (DImode);
1570 addr = gen_reg_rtx (DImode);
1571 extl = gen_reg_rtx (DImode);
1572 exth = gen_reg_rtx (DImode);
1574 emit_move_insn (meml,
1575 change_address (mem, DImode,
1576 gen_rtx_AND (DImode,
1577 plus_constant (XEXP (mem, 0),
1581 emit_move_insn (memh,
1582 change_address (mem, DImode,
1583 gen_rtx_AND (DImode,
1584 plus_constant (XEXP (mem, 0),
1588 if (sign && size == 2)
1590 emit_move_insn (addr, plus_constant (XEXP (mem, 0), ofs+2));
1592 emit_insn (gen_extxl (extl, meml, GEN_INT (64), addr));
1593 emit_insn (gen_extqh (exth, memh, addr));
1595 /* We must use tgt here for the target. Alpha-vms port fails if we use
1596 addr for the target, because addr is marked as a pointer and combine
1597 knows that pointers are always sign-extended 32 bit values. */
1598 addr = expand_binop (DImode, ior_optab, extl, exth, tgt, 1, OPTAB_WIDEN);
1599 addr = expand_binop (DImode, ashr_optab, addr, GEN_INT (48),
1600 addr, 1, OPTAB_WIDEN);
1604 emit_move_insn (addr, plus_constant (XEXP (mem, 0), ofs));
1605 emit_insn (gen_extxl (extl, meml, GEN_INT (size*8), addr));
1609 emit_insn (gen_extwh (exth, memh, addr));
1614 emit_insn (gen_extlh (exth, memh, addr));
1619 emit_insn (gen_extqh (exth, memh, addr));
1626 addr = expand_binop (mode, ior_optab, gen_lowpart (mode, extl),
1627 gen_lowpart (mode, exth), gen_lowpart (mode, tgt),
1632 emit_move_insn (tgt, gen_lowpart(GET_MODE (tgt), addr));
1635 /* Similarly, use ins and msk instructions to perform unaligned stores. */
1638 alpha_expand_unaligned_store (dst, src, size, ofs)
1640 HOST_WIDE_INT size, ofs;
1642 rtx dstl, dsth, addr, insl, insh, meml, memh;
1644 dstl = gen_reg_rtx (DImode);
1645 dsth = gen_reg_rtx (DImode);
1646 insl = gen_reg_rtx (DImode);
1647 insh = gen_reg_rtx (DImode);
1649 meml = change_address (dst, DImode,
1650 gen_rtx_AND (DImode,
1651 plus_constant (XEXP (dst, 0), ofs),
1653 memh = change_address (dst, DImode,
1654 gen_rtx_AND (DImode,
1655 plus_constant (XEXP (dst, 0),
1659 emit_move_insn (dsth, memh);
1660 emit_move_insn (dstl, meml);
1661 addr = copy_addr_to_reg (plus_constant (XEXP (dst, 0), ofs));
1663 if (src != const0_rtx)
1665 emit_insn (gen_insxh (insh, gen_lowpart (DImode, src),
1666 GEN_INT (size*8), addr));
1671 emit_insn (gen_inswl (insl, gen_lowpart (HImode, src), addr));
1674 emit_insn (gen_insll (insl, gen_lowpart (SImode, src), addr));
1677 emit_insn (gen_insql (insl, src, addr));
1682 emit_insn (gen_mskxh (dsth, dsth, GEN_INT (size*8), addr));
1687 emit_insn (gen_mskxl (dstl, dstl, GEN_INT (0xffff), addr));
1690 emit_insn (gen_mskxl (dstl, dstl, GEN_INT (0xffffffff), addr));
1694 #if HOST_BITS_PER_WIDE_INT == 32
1695 rtx msk = immed_double_const (0xffffffff, 0xffffffff, DImode);
1697 rtx msk = immed_double_const (0xffffffffffffffff, 0, DImode);
1699 emit_insn (gen_mskxl (dstl, dstl, msk, addr));
1704 if (src != const0_rtx)
1706 dsth = expand_binop (DImode, ior_optab, insh, dsth, dsth, 0, OPTAB_WIDEN);
1707 dstl = expand_binop (DImode, ior_optab, insl, dstl, dstl, 0, OPTAB_WIDEN);
1710 /* Must store high before low for degenerate case of aligned. */
1711 emit_move_insn (memh, dsth);
1712 emit_move_insn (meml, dstl);
1715 /* The block move code tries to maximize speed by separating loads and
1716 stores at the expense of register pressure: we load all of the data
1717 before we store it back out. There are two secondary effects worth
1718 mentioning, that this speeds copying to/from aligned and unaligned
1719 buffers, and that it makes the code significantly easier to write. */
1721 #define MAX_MOVE_WORDS 8
1723 /* Load an integral number of consecutive unaligned quadwords. */
1726 alpha_expand_unaligned_load_words (out_regs, smem, words, ofs)
1729 HOST_WIDE_INT words, ofs;
1731 rtx const im8 = GEN_INT (-8);
1732 rtx const i64 = GEN_INT (64);
1733 rtx ext_tmps[MAX_MOVE_WORDS], data_regs[MAX_MOVE_WORDS+1];
1737 /* Generate all the tmp registers we need. */
1738 for (i = 0; i < words; ++i)
1740 data_regs[i] = out_regs[i];
1741 ext_tmps[i] = gen_reg_rtx (DImode);
1743 data_regs[words] = gen_reg_rtx (DImode);
1746 smem = change_address (smem, GET_MODE (smem),
1747 plus_constant (XEXP (smem, 0), ofs));
1749 /* Load up all of the source data. */
1750 for (i = 0; i < words; ++i)
1752 emit_move_insn (data_regs[i],
1753 change_address (smem, DImode,
1754 gen_rtx_AND (DImode,
1755 plus_constant (XEXP(smem,0),
1759 emit_move_insn (data_regs[words],
1760 change_address (smem, DImode,
1761 gen_rtx_AND (DImode,
1762 plus_constant (XEXP(smem,0),
1766 /* Extract the half-word fragments. Unfortunately DEC decided to make
1767 extxh with offset zero a noop instead of zeroing the register, so
1768 we must take care of that edge condition ourselves with cmov. */
1770 sreg = copy_addr_to_reg (XEXP (smem, 0));
1771 areg = expand_binop (DImode, and_optab, sreg, GEN_INT (7), NULL,
1773 for (i = 0; i < words; ++i)
1775 emit_insn (gen_extxl (data_regs[i], data_regs[i], i64, sreg));
1777 emit_insn (gen_extqh (ext_tmps[i], data_regs[i+1], sreg));
1778 emit_insn (gen_rtx_SET (VOIDmode, ext_tmps[i],
1779 gen_rtx_IF_THEN_ELSE (DImode,
1780 gen_rtx_EQ (DImode, areg,
1782 const0_rtx, ext_tmps[i])));
1785 /* Merge the half-words into whole words. */
1786 for (i = 0; i < words; ++i)
1788 out_regs[i] = expand_binop (DImode, ior_optab, data_regs[i],
1789 ext_tmps[i], data_regs[i], 1, OPTAB_WIDEN);
1793 /* Store an integral number of consecutive unaligned quadwords. DATA_REGS
1794 may be NULL to store zeros. */
1797 alpha_expand_unaligned_store_words (data_regs, dmem, words, ofs)
1800 HOST_WIDE_INT words, ofs;
1802 rtx const im8 = GEN_INT (-8);
1803 rtx const i64 = GEN_INT (64);
1804 #if HOST_BITS_PER_WIDE_INT == 32
1805 rtx const im1 = immed_double_const (0xffffffff, 0xffffffff, DImode);
1807 rtx const im1 = immed_double_const (0xffffffffffffffff, 0, DImode);
1809 rtx ins_tmps[MAX_MOVE_WORDS];
1810 rtx st_tmp_1, st_tmp_2, dreg;
1811 rtx st_addr_1, st_addr_2;
1814 /* Generate all the tmp registers we need. */
1815 if (data_regs != NULL)
1816 for (i = 0; i < words; ++i)
1817 ins_tmps[i] = gen_reg_rtx(DImode);
1818 st_tmp_1 = gen_reg_rtx(DImode);
1819 st_tmp_2 = gen_reg_rtx(DImode);
1822 dmem = change_address (dmem, GET_MODE (dmem),
1823 plus_constant (XEXP (dmem, 0), ofs));
1826 st_addr_2 = change_address (dmem, DImode,
1827 gen_rtx_AND (DImode,
1828 plus_constant (XEXP(dmem,0),
1831 st_addr_1 = change_address (dmem, DImode,
1832 gen_rtx_AND (DImode,
1836 /* Load up the destination end bits. */
1837 emit_move_insn (st_tmp_2, st_addr_2);
1838 emit_move_insn (st_tmp_1, st_addr_1);
1840 /* Shift the input data into place. */
1841 dreg = copy_addr_to_reg (XEXP (dmem, 0));
1842 if (data_regs != NULL)
1844 for (i = words-1; i >= 0; --i)
1846 emit_insn (gen_insxh (ins_tmps[i], data_regs[i], i64, dreg));
1847 emit_insn (gen_insql (data_regs[i], data_regs[i], dreg));
1849 for (i = words-1; i > 0; --i)
1851 ins_tmps[i-1] = expand_binop (DImode, ior_optab, data_regs[i],
1852 ins_tmps[i-1], ins_tmps[i-1], 1,
1857 /* Split and merge the ends with the destination data. */
1858 emit_insn (gen_mskxh (st_tmp_2, st_tmp_2, i64, dreg));
1859 emit_insn (gen_mskxl (st_tmp_1, st_tmp_1, im1, dreg));
1861 if (data_regs != NULL)
1863 st_tmp_2 = expand_binop (DImode, ior_optab, st_tmp_2, ins_tmps[words-1],
1864 st_tmp_2, 1, OPTAB_WIDEN);
1865 st_tmp_1 = expand_binop (DImode, ior_optab, st_tmp_1, data_regs[0],
1866 st_tmp_1, 1, OPTAB_WIDEN);
1870 emit_move_insn (st_addr_2, st_tmp_2);
1871 for (i = words-1; i > 0; --i)
1873 emit_move_insn (change_address (dmem, DImode,
1874 gen_rtx_AND (DImode,
1875 plus_constant(XEXP (dmem,0),
1878 data_regs ? ins_tmps[i-1] : const0_rtx);
1880 emit_move_insn (st_addr_1, st_tmp_1);
1884 /* Expand string/block move operations.
1886 operands[0] is the pointer to the destination.
1887 operands[1] is the pointer to the source.
1888 operands[2] is the number of bytes to move.
1889 operands[3] is the alignment. */
1892 alpha_expand_block_move (operands)
1895 rtx bytes_rtx = operands[2];
1896 rtx align_rtx = operands[3];
1897 HOST_WIDE_INT orig_bytes = INTVAL (bytes_rtx);
1898 HOST_WIDE_INT bytes = orig_bytes;
1899 HOST_WIDE_INT src_align = INTVAL (align_rtx);
1900 HOST_WIDE_INT dst_align = src_align;
1901 rtx orig_src = operands[1];
1902 rtx orig_dst = operands[0];
1903 rtx data_regs[2*MAX_MOVE_WORDS+16];
1905 int i, words, ofs, nregs = 0;
1909 if (bytes > MAX_MOVE_WORDS*8)
1912 /* Look for additional alignment information from recorded register info. */
1914 tmp = XEXP (orig_src, 0);
1915 if (GET_CODE (tmp) == REG)
1917 if (REGNO_POINTER_ALIGN (REGNO (tmp)) > src_align)
1918 src_align = REGNO_POINTER_ALIGN (REGNO (tmp));
1920 else if (GET_CODE (tmp) == PLUS
1921 && GET_CODE (XEXP (tmp, 0)) == REG
1922 && GET_CODE (XEXP (tmp, 1)) == CONST_INT)
1924 HOST_WIDE_INT c = INTVAL (XEXP (tmp, 1));
1925 int a = REGNO_POINTER_ALIGN (REGNO (XEXP (tmp, 0)));
1929 if (a >= 8 && c % 8 == 0)
1931 else if (a >= 4 && c % 4 == 0)
1933 else if (a >= 2 && c % 2 == 0)
1938 tmp = XEXP (orig_dst, 0);
1939 if (GET_CODE (tmp) == REG)
1941 if (REGNO_POINTER_ALIGN (REGNO (tmp)) > dst_align)
1942 dst_align = REGNO_POINTER_ALIGN (REGNO (tmp));
1944 else if (GET_CODE (tmp) == PLUS
1945 && GET_CODE (XEXP (tmp, 0)) == REG
1946 && GET_CODE (XEXP (tmp, 1)) == CONST_INT)
1948 HOST_WIDE_INT c = INTVAL (XEXP (tmp, 1));
1949 int a = REGNO_POINTER_ALIGN (REGNO (XEXP (tmp, 0)));
1953 if (a >= 8 && c % 8 == 0)
1955 else if (a >= 4 && c % 4 == 0)
1957 else if (a >= 2 && c % 2 == 0)
1963 * Load the entire block into registers.
1966 if (GET_CODE (XEXP (orig_src, 0)) == ADDRESSOF)
1968 enum machine_mode mode;
1969 tmp = XEXP (XEXP (orig_src, 0), 0);
1971 /* Don't use the existing register if we're reading more than
1972 is held in the register. Nor if there is not a mode that
1973 handles the exact size. */
1974 mode = mode_for_size (bytes * BITS_PER_UNIT, MODE_INT, 1);
1976 && GET_MODE_SIZE (GET_MODE (tmp)) >= bytes)
1980 data_regs[nregs] = gen_lowpart (DImode, tmp);
1981 data_regs[nregs+1] = gen_highpart (DImode, tmp);
1985 data_regs[nregs++] = gen_lowpart (mode, tmp);
1989 /* No appropriate mode; fall back on memory. */
1990 orig_src = change_address (orig_src, GET_MODE (orig_src),
1991 copy_addr_to_reg (XEXP (orig_src, 0)));
1995 if (src_align >= 8 && bytes >= 8)
1999 for (i = 0; i < words; ++i)
2000 data_regs[nregs+i] = gen_reg_rtx(DImode);
2002 for (i = 0; i < words; ++i)
2004 emit_move_insn (data_regs[nregs+i],
2005 change_address (orig_src, DImode,
2006 plus_constant (XEXP (orig_src, 0),
2014 if (src_align >= 4 && bytes >= 4)
2018 for (i = 0; i < words; ++i)
2019 data_regs[nregs+i] = gen_reg_rtx(SImode);
2021 for (i = 0; i < words; ++i)
2023 emit_move_insn (data_regs[nregs+i],
2024 change_address (orig_src, SImode,
2025 plus_constant (XEXP (orig_src, 0),
2037 for (i = 0; i < words+1; ++i)
2038 data_regs[nregs+i] = gen_reg_rtx(DImode);
2040 alpha_expand_unaligned_load_words (data_regs + nregs, orig_src,
2047 if (!TARGET_BWX && bytes >= 8)
2049 data_regs[nregs++] = tmp = gen_reg_rtx (DImode);
2050 alpha_expand_unaligned_load (tmp, orig_src, 8, ofs, 0);
2054 if (!TARGET_BWX && bytes >= 4)
2056 data_regs[nregs++] = tmp = gen_reg_rtx (SImode);
2057 alpha_expand_unaligned_load (tmp, orig_src, 4, ofs, 0);
2066 data_regs[nregs++] = tmp = gen_reg_rtx (HImode);
2067 emit_move_insn (tmp,
2068 change_address (orig_src, HImode,
2069 plus_constant (XEXP (orig_src, 0),
2073 } while (bytes >= 2);
2075 else if (!TARGET_BWX)
2077 data_regs[nregs++] = tmp = gen_reg_rtx (HImode);
2078 alpha_expand_unaligned_load (tmp, orig_src, 2, ofs, 0);
2085 data_regs[nregs++] = tmp = gen_reg_rtx (QImode);
2086 emit_move_insn (tmp,
2087 change_address (orig_src, QImode,
2088 plus_constant (XEXP (orig_src, 0),
2095 if (nregs > (int)(sizeof(data_regs)/sizeof(*data_regs)))
2099 * Now save it back out again.
2104 if (GET_CODE (XEXP (orig_dst, 0)) == ADDRESSOF)
2106 enum machine_mode mode;
2107 tmp = XEXP (XEXP (orig_dst, 0), 0);
2109 mode = mode_for_size (orig_bytes * BITS_PER_UNIT, MODE_INT, 1);
2110 if (GET_MODE (tmp) == mode)
2114 emit_move_insn (tmp, data_regs[0]);
2118 else if (nregs == 2 && mode == TImode)
2120 /* Undo the subregging done above when copying between
2121 two TImode registers. */
2122 if (GET_CODE (data_regs[0]) == SUBREG
2123 && GET_MODE (SUBREG_REG (data_regs[0])) == TImode)
2125 emit_move_insn (tmp, SUBREG_REG (data_regs[0]));
2132 emit_move_insn (gen_lowpart (DImode, tmp), data_regs[0]);
2133 emit_move_insn (gen_highpart (DImode, tmp), data_regs[1]);
2137 emit_no_conflict_block (seq, tmp, data_regs[0],
2138 data_regs[1], NULL_RTX);
2146 /* ??? If nregs > 1, consider reconstructing the word in regs. */
2147 /* ??? Optimize mode < dst_mode with strict_low_part. */
2149 /* No appropriate mode; fall back on memory. We can speed things
2150 up by recognizing extra alignment information. */
2151 orig_dst = change_address (orig_dst, GET_MODE (orig_dst),
2152 copy_addr_to_reg (XEXP (orig_dst, 0)));
2153 dst_align = GET_MODE_SIZE (GET_MODE (tmp));
2156 /* Write out the data in whatever chunks reading the source allowed. */
2159 while (i < nregs && GET_MODE (data_regs[i]) == DImode)
2161 emit_move_insn (change_address (orig_dst, DImode,
2162 plus_constant (XEXP (orig_dst, 0),
2171 /* If the source has remaining DImode regs, write them out in
2173 while (i < nregs && GET_MODE (data_regs[i]) == DImode)
2175 tmp = expand_binop (DImode, lshr_optab, data_regs[i], GEN_INT (32),
2176 NULL_RTX, 1, OPTAB_WIDEN);
2178 emit_move_insn (change_address (orig_dst, SImode,
2179 plus_constant (XEXP (orig_dst, 0),
2181 gen_lowpart (SImode, data_regs[i]));
2182 emit_move_insn (change_address (orig_dst, SImode,
2183 plus_constant (XEXP (orig_dst, 0),
2185 gen_lowpart (SImode, tmp));
2190 while (i < nregs && GET_MODE (data_regs[i]) == SImode)
2192 emit_move_insn (change_address(orig_dst, SImode,
2193 plus_constant (XEXP (orig_dst, 0),
2200 if (i < nregs && GET_MODE (data_regs[i]) == DImode)
2202 /* Write out a remaining block of words using unaligned methods. */
2204 for (words = 1; i+words < nregs ; ++words)
2205 if (GET_MODE (data_regs[i+words]) != DImode)
2209 alpha_expand_unaligned_store (orig_dst, data_regs[i], 8, ofs);
2211 alpha_expand_unaligned_store_words (data_regs+i, orig_dst, words, ofs);
2217 /* Due to the above, this won't be aligned. */
2218 /* ??? If we have more than one of these, consider constructing full
2219 words in registers and using alpha_expand_unaligned_store_words. */
2220 while (i < nregs && GET_MODE (data_regs[i]) == SImode)
2222 alpha_expand_unaligned_store (orig_dst, data_regs[i], 4, ofs);
2228 while (i < nregs && GET_MODE (data_regs[i]) == HImode)
2230 emit_move_insn (change_address (orig_dst, HImode,
2231 plus_constant (XEXP (orig_dst, 0),
2238 while (i < nregs && GET_MODE (data_regs[i]) == HImode)
2240 alpha_expand_unaligned_store (orig_dst, data_regs[i], 2, ofs);
2244 while (i < nregs && GET_MODE (data_regs[i]) == QImode)
2246 emit_move_insn (change_address (orig_dst, QImode,
2247 plus_constant (XEXP (orig_dst, 0),
2262 alpha_expand_block_clear (operands)
2265 rtx bytes_rtx = operands[1];
2266 rtx align_rtx = operands[2];
2267 HOST_WIDE_INT bytes = INTVAL (bytes_rtx);
2268 HOST_WIDE_INT align = INTVAL (align_rtx);
2269 rtx orig_dst = operands[0];
2271 HOST_WIDE_INT i, words, ofs = 0;
2275 if (bytes > MAX_MOVE_WORDS*8)
2278 /* Look for stricter alignment. */
2280 tmp = XEXP (orig_dst, 0);
2281 if (GET_CODE (tmp) == REG)
2283 if (REGNO_POINTER_ALIGN (REGNO (tmp)) > align)
2284 align = REGNO_POINTER_ALIGN (REGNO (tmp));
2286 else if (GET_CODE (tmp) == PLUS
2287 && GET_CODE (XEXP (tmp, 0)) == REG
2288 && GET_CODE (XEXP (tmp, 1)) == CONST_INT)
2290 HOST_WIDE_INT c = INTVAL (XEXP (tmp, 1));
2291 int a = REGNO_POINTER_ALIGN (REGNO (XEXP (tmp, 0)));
2295 if (a >= 8 && c % 8 == 0)
2297 else if (a >= 4 && c % 4 == 0)
2299 else if (a >= 2 && c % 2 == 0)
2303 else if (GET_CODE (tmp) == ADDRESSOF)
2305 enum machine_mode mode;
2307 mode = mode_for_size (bytes * BITS_PER_UNIT, MODE_INT, 1);
2308 if (GET_MODE (XEXP (tmp, 0)) == mode)
2310 emit_move_insn (XEXP (tmp, 0), const0_rtx);
2314 /* No appropriate mode; fall back on memory. */
2315 orig_dst = change_address (orig_dst, GET_MODE (orig_dst),
2316 copy_addr_to_reg (tmp));
2317 align = GET_MODE_SIZE (GET_MODE (XEXP (tmp, 0)));
2320 /* Handle a block of contiguous words first. */
2322 if (align >= 8 && bytes >= 8)
2326 for (i = 0; i < words; ++i)
2328 emit_move_insn (change_address(orig_dst, DImode,
2329 plus_constant (XEXP (orig_dst, 0),
2337 if (align >= 4 && bytes >= 4)
2341 for (i = 0; i < words; ++i)
2343 emit_move_insn (change_address (orig_dst, SImode,
2344 plus_constant (XEXP (orig_dst, 0),
2356 alpha_expand_unaligned_store_words (NULL, orig_dst, words, ofs);
2362 /* Next clean up any trailing pieces. We know from the contiguous
2363 block move that there are no aligned SImode or DImode hunks left. */
2365 if (!TARGET_BWX && bytes >= 8)
2367 alpha_expand_unaligned_store (orig_dst, const0_rtx, 8, ofs);
2371 if (!TARGET_BWX && bytes >= 4)
2373 alpha_expand_unaligned_store (orig_dst, const0_rtx, 4, ofs);
2382 emit_move_insn (change_address (orig_dst, HImode,
2383 plus_constant (XEXP (orig_dst, 0),
2388 } while (bytes >= 2);
2390 else if (!TARGET_BWX)
2392 alpha_expand_unaligned_store (orig_dst, const0_rtx, 2, ofs);
2399 emit_move_insn (change_address (orig_dst, QImode,
2400 plus_constant (XEXP (orig_dst, 0),
2411 /* Adjust the cost of a scheduling dependency. Return the new cost of
2412 a dependency LINK or INSN on DEP_INSN. COST is the current cost. */
2415 alpha_adjust_cost (insn, link, dep_insn, cost)
2422 enum attr_type insn_type, dep_insn_type;
2424 /* If the dependence is an anti-dependence, there is no cost. For an
2425 output dependence, there is sometimes a cost, but it doesn't seem
2426 worth handling those few cases. */
2428 if (REG_NOTE_KIND (link) != 0)
2431 /* If we can't recognize the insns, we can't really do anything. */
2432 if (recog_memoized (insn) < 0 || recog_memoized (dep_insn) < 0)
2435 insn_type = get_attr_type (insn);
2436 dep_insn_type = get_attr_type (dep_insn);
2438 /* Bring in the user-defined memory latency. */
2439 if (dep_insn_type == TYPE_ILD
2440 || dep_insn_type == TYPE_FLD
2441 || dep_insn_type == TYPE_LDSYM)
2442 cost += alpha_memory_latency-1;
2447 /* On EV4, if INSN is a store insn and DEP_INSN is setting the data
2448 being stored, we can sometimes lower the cost. */
2450 if ((insn_type == TYPE_IST || insn_type == TYPE_FST)
2451 && (set = single_set (dep_insn)) != 0
2452 && GET_CODE (PATTERN (insn)) == SET
2453 && rtx_equal_p (SET_DEST (set), SET_SRC (PATTERN (insn))))
2455 switch (dep_insn_type)
2459 /* No savings here. */
2463 /* In these cases, we save one cycle. */
2467 /* In all other cases, we save two cycles. */
2468 return MAX (0, cost - 2);
2472 /* Another case that needs adjustment is an arithmetic or logical
2473 operation. It's cost is usually one cycle, but we default it to
2474 two in the MD file. The only case that it is actually two is
2475 for the address in loads, stores, and jumps. */
2477 if (dep_insn_type == TYPE_IADD || dep_insn_type == TYPE_ILOG)
2492 /* The final case is when a compare feeds into an integer branch;
2493 the cost is only one cycle in that case. */
2495 if (dep_insn_type == TYPE_ICMP && insn_type == TYPE_IBR)
2500 /* And the lord DEC saith: "A special bypass provides an effective
2501 latency of 0 cycles for an ICMP or ILOG insn producing the test
2502 operand of an IBR or ICMOV insn." */
2504 if ((dep_insn_type == TYPE_ICMP || dep_insn_type == TYPE_ILOG)
2505 && (set = single_set (dep_insn)) != 0)
2507 /* A branch only has one input. This must be it. */
2508 if (insn_type == TYPE_IBR)
2510 /* A conditional move has three, make sure it is the test. */
2511 if (insn_type == TYPE_ICMOV
2512 && GET_CODE (set_src = PATTERN (insn)) == SET
2513 && GET_CODE (set_src = SET_SRC (set_src)) == IF_THEN_ELSE
2514 && rtx_equal_p (SET_DEST (set), XEXP (set_src, 0)))
2518 /* "The multiplier is unable to receive data from IEU bypass paths.
2519 The instruction issues at the expected time, but its latency is
2520 increased by the time it takes for the input data to become
2521 available to the multiplier" -- which happens in pipeline stage
2522 six, when results are comitted to the register file. */
2524 if (insn_type == TYPE_IMUL)
2526 switch (dep_insn_type)
2528 /* These insns produce their results in pipeline stage five. */
2535 /* Other integer insns produce results in pipeline stage four. */
2543 /* There is additional latency to move the result of (most) FP
2544 operations anywhere but the FP register file. */
2546 if ((insn_type == TYPE_FST || insn_type == TYPE_FTOI)
2547 && (dep_insn_type == TYPE_FADD ||
2548 dep_insn_type == TYPE_FMUL ||
2549 dep_insn_type == TYPE_FCMOV))
2555 /* Otherwise, return the default cost. */
2559 /* Functions to save and restore alpha_return_addr_rtx. */
2561 struct machine_function
2567 alpha_save_machine_status (p)
2570 struct machine_function *machine =
2571 (struct machine_function *) xmalloc (sizeof (struct machine_function));
2573 p->machine = machine;
2574 machine->ra_rtx = alpha_return_addr_rtx;
2578 alpha_restore_machine_status (p)
2581 struct machine_function *machine = p->machine;
2583 alpha_return_addr_rtx = machine->ra_rtx;
2586 p->machine = (struct machine_function *)0;
2589 /* Do anything needed before RTL is emitted for each function. */
2592 alpha_init_expanders ()
2594 alpha_return_addr_rtx = NULL_RTX;
2595 alpha_eh_epilogue_sp_ofs = NULL_RTX;
2597 /* Arrange to save and restore machine status around nested functions. */
2598 save_machine_status = alpha_save_machine_status;
2599 restore_machine_status = alpha_restore_machine_status;
2602 /* Start the ball rolling with RETURN_ADDR_RTX. */
2605 alpha_return_addr (count, frame)
2607 rtx frame ATTRIBUTE_UNUSED;
2614 if (alpha_return_addr_rtx)
2615 return alpha_return_addr_rtx;
2617 /* No rtx yet. Invent one, and initialize it from $26 in the prologue. */
2618 alpha_return_addr_rtx = gen_reg_rtx (Pmode);
2619 init = gen_rtx_SET (VOIDmode, alpha_return_addr_rtx,
2620 gen_rtx_REG (Pmode, REG_RA));
2622 /* Emit the insn to the prologue with the other argument copies. */
2623 push_topmost_sequence ();
2624 emit_insn_after (init, get_insns ());
2625 pop_topmost_sequence ();
2627 return alpha_return_addr_rtx;
2631 alpha_ra_ever_killed ()
2635 #ifdef ASM_OUTPUT_MI_THUNK
2636 if (current_function_is_thunk)
2639 if (!alpha_return_addr_rtx)
2640 return regs_ever_live[REG_RA];
2642 push_topmost_sequence ();
2644 pop_topmost_sequence ();
2646 return reg_set_between_p (gen_rtx_REG (Pmode, REG_RA), top, NULL_RTX);
2650 /* Print an operand. Recognize special options, documented below. */
2653 print_operand (file, x, code)
2663 /* Generates fp-rounding mode suffix: nothing for normal, 'c' for
2664 chopped, 'm' for minus-infinity, and 'd' for dynamic rounding
2665 mode. alpha_fprm controls which suffix is generated. */
2668 case ALPHA_FPRM_NORM:
2670 case ALPHA_FPRM_MINF:
2673 case ALPHA_FPRM_CHOP:
2676 case ALPHA_FPRM_DYN:
2683 /* Generates trap-mode suffix for instructions that accept the su
2684 suffix only (cmpt et al). */
2685 if (alpha_tp == ALPHA_TP_INSN)
2690 /* Generates trap-mode suffix for instructions that accept the
2691 v and sv suffix. The only instruction that needs this is cvtql. */
2700 case ALPHA_FPTM_SUI:
2707 /* Generates trap-mode suffix for instructions that accept the
2708 v, sv, and svi suffix. The only instruction that needs this
2720 case ALPHA_FPTM_SUI:
2721 fputs ("svi", file);
2727 /* Generates trap-mode suffix for instructions that accept the u, su,
2728 and sui suffix. This is the bulk of the IEEE floating point
2729 instructions (addt et al). */
2740 case ALPHA_FPTM_SUI:
2741 fputs ("sui", file);
2747 /* Generates trap-mode suffix for instructions that accept the sui
2748 suffix (cvtqt and cvtqs). */
2753 case ALPHA_FPTM_SU: /* cvtqt/cvtqs can't cause underflow */
2755 case ALPHA_FPTM_SUI:
2756 fputs ("sui", file);
2762 /* Generates single precision instruction suffix. */
2763 fprintf (file, "%c", (TARGET_FLOAT_VAX ? 'f' : 's'));
2767 /* Generates double precision instruction suffix. */
2768 fprintf (file, "%c", (TARGET_FLOAT_VAX ? 'g' : 't'));
2772 /* If this operand is the constant zero, write it as "$31". */
2773 if (GET_CODE (x) == REG)
2774 fprintf (file, "%s", reg_names[REGNO (x)]);
2775 else if (x == CONST0_RTX (GET_MODE (x)))
2776 fprintf (file, "$31");
2778 output_operand_lossage ("invalid %%r value");
2783 /* Similar, but for floating-point. */
2784 if (GET_CODE (x) == REG)
2785 fprintf (file, "%s", reg_names[REGNO (x)]);
2786 else if (x == CONST0_RTX (GET_MODE (x)))
2787 fprintf (file, "$f31");
2789 output_operand_lossage ("invalid %%R value");
2794 /* Write the 1's complement of a constant. */
2795 if (GET_CODE (x) != CONST_INT)
2796 output_operand_lossage ("invalid %%N value");
2798 fprintf (file, HOST_WIDE_INT_PRINT_DEC, ~ INTVAL (x));
2802 /* Write 1 << C, for a constant C. */
2803 if (GET_CODE (x) != CONST_INT)
2804 output_operand_lossage ("invalid %%P value");
2806 fprintf (file, HOST_WIDE_INT_PRINT_DEC, (HOST_WIDE_INT) 1 << INTVAL (x));
2810 /* Write the high-order 16 bits of a constant, sign-extended. */
2811 if (GET_CODE (x) != CONST_INT)
2812 output_operand_lossage ("invalid %%h value");
2814 fprintf (file, HOST_WIDE_INT_PRINT_DEC, INTVAL (x) >> 16);
2818 /* Write the low-order 16 bits of a constant, sign-extended. */
2819 if (GET_CODE (x) != CONST_INT)
2820 output_operand_lossage ("invalid %%L value");
2822 fprintf (file, HOST_WIDE_INT_PRINT_DEC,
2823 (INTVAL (x) & 0xffff) - 2 * (INTVAL (x) & 0x8000));
2827 /* Write mask for ZAP insn. */
2828 if (GET_CODE (x) == CONST_DOUBLE)
2830 HOST_WIDE_INT mask = 0;
2831 HOST_WIDE_INT value;
2833 value = CONST_DOUBLE_LOW (x);
2834 for (i = 0; i < HOST_BITS_PER_WIDE_INT / HOST_BITS_PER_CHAR;
2839 value = CONST_DOUBLE_HIGH (x);
2840 for (i = 0; i < HOST_BITS_PER_WIDE_INT / HOST_BITS_PER_CHAR;
2843 mask |= (1 << (i + sizeof (int)));
2845 fprintf (file, HOST_WIDE_INT_PRINT_DEC, mask & 0xff);
2848 else if (GET_CODE (x) == CONST_INT)
2850 HOST_WIDE_INT mask = 0, value = INTVAL (x);
2852 for (i = 0; i < 8; i++, value >>= 8)
2856 fprintf (file, HOST_WIDE_INT_PRINT_DEC, mask);
2859 output_operand_lossage ("invalid %%m value");
2863 /* 'b', 'w', 'l', or 'q' as the value of the constant. */
2864 if (GET_CODE (x) != CONST_INT
2865 || (INTVAL (x) != 8 && INTVAL (x) != 16
2866 && INTVAL (x) != 32 && INTVAL (x) != 64))
2867 output_operand_lossage ("invalid %%M value");
2869 fprintf (file, "%s",
2870 (INTVAL (x) == 8 ? "b"
2871 : INTVAL (x) == 16 ? "w"
2872 : INTVAL (x) == 32 ? "l"
2877 /* Similar, except do it from the mask. */
2878 if (GET_CODE (x) == CONST_INT && INTVAL (x) == 0xff)
2879 fprintf (file, "b");
2880 else if (GET_CODE (x) == CONST_INT && INTVAL (x) == 0xffff)
2881 fprintf (file, "w");
2882 else if (GET_CODE (x) == CONST_INT && INTVAL (x) == 0xffffffff)
2883 fprintf (file, "l");
2884 #if HOST_BITS_PER_WIDE_INT == 32
2885 else if (GET_CODE (x) == CONST_DOUBLE
2886 && CONST_DOUBLE_HIGH (x) == 0
2887 && CONST_DOUBLE_LOW (x) == -1)
2888 fprintf (file, "l");
2889 else if (GET_CODE (x) == CONST_DOUBLE
2890 && CONST_DOUBLE_HIGH (x) == -1
2891 && CONST_DOUBLE_LOW (x) == -1)
2892 fprintf (file, "q");
2894 else if (GET_CODE (x) == CONST_INT && INTVAL (x) == -1)
2895 fprintf (file, "q");
2896 else if (GET_CODE (x) == CONST_DOUBLE
2897 && CONST_DOUBLE_HIGH (x) == 0
2898 && CONST_DOUBLE_LOW (x) == -1)
2899 fprintf (file, "q");
2902 output_operand_lossage ("invalid %%U value");
2906 /* Write the constant value divided by 8. */
2907 if (GET_CODE (x) != CONST_INT
2908 && (unsigned HOST_WIDE_INT) INTVAL (x) >= 64
2909 && (INTVAL (x) & 7) != 8)
2910 output_operand_lossage ("invalid %%s value");
2912 fprintf (file, HOST_WIDE_INT_PRINT_DEC, INTVAL (x) / 8);
2916 /* Same, except compute (64 - c) / 8 */
2918 if (GET_CODE (x) != CONST_INT
2919 && (unsigned HOST_WIDE_INT) INTVAL (x) >= 64
2920 && (INTVAL (x) & 7) != 8)
2921 output_operand_lossage ("invalid %%s value");
2923 fprintf (file, HOST_WIDE_INT_PRINT_DEC, (64 - INTVAL (x)) / 8);
2926 case 'C': case 'D': case 'c': case 'd':
2927 /* Write out comparison name. */
2929 enum rtx_code c = GET_CODE (x);
2931 if (GET_RTX_CLASS (c) != '<')
2932 output_operand_lossage ("invalid %%C value");
2935 c = reverse_condition (c);
2936 else if (code == 'c')
2937 c = swap_condition (c);
2938 else if (code == 'd')
2939 c = swap_condition (reverse_condition (c));
2942 fprintf (file, "ule");
2944 fprintf (file, "ult");
2946 fprintf (file, "%s", GET_RTX_NAME (c));
2951 /* Write the divide or modulus operator. */
2952 switch (GET_CODE (x))
2955 fprintf (file, "div%s", GET_MODE (x) == SImode ? "l" : "q");
2958 fprintf (file, "div%su", GET_MODE (x) == SImode ? "l" : "q");
2961 fprintf (file, "rem%s", GET_MODE (x) == SImode ? "l" : "q");
2964 fprintf (file, "rem%su", GET_MODE (x) == SImode ? "l" : "q");
2967 output_operand_lossage ("invalid %%E value");
2973 /* Write "_u" for unaligned access. */
2974 if (GET_CODE (x) == MEM && GET_CODE (XEXP (x, 0)) == AND)
2975 fprintf (file, "_u");
2979 if (GET_CODE (x) == REG)
2980 fprintf (file, "%s", reg_names[REGNO (x)]);
2981 else if (GET_CODE (x) == MEM)
2982 output_address (XEXP (x, 0));
2984 output_addr_const (file, x);
2988 output_operand_lossage ("invalid %%xn code");
2993 print_operand_address (file, addr)
2998 HOST_WIDE_INT offset = 0;
3000 if (GET_CODE (addr) == AND)
3001 addr = XEXP (addr, 0);
3003 if (GET_CODE (addr) == PLUS
3004 && GET_CODE (XEXP (addr, 1)) == CONST_INT)
3006 offset = INTVAL (XEXP (addr, 1));
3007 addr = XEXP (addr, 0);
3009 if (GET_CODE (addr) == REG)
3010 basereg = REGNO (addr);
3011 else if (GET_CODE (addr) == SUBREG
3012 && GET_CODE (SUBREG_REG (addr)) == REG)
3013 basereg = REGNO (SUBREG_REG (addr)) + SUBREG_WORD (addr);
3014 else if (GET_CODE (addr) == CONST_INT)
3015 offset = INTVAL (addr);
3019 fprintf (file, HOST_WIDE_INT_PRINT_DEC, offset);
3020 fprintf (file, "($%d)", basereg);
3023 /* Emit RTL insns to initialize the variable parts of a trampoline at
3024 TRAMP. FNADDR is an RTX for the address of the function's pure
3025 code. CXT is an RTX for the static chain value for the function.
3027 The three offset parameters are for the individual template's
3028 layout. A JMPOFS < 0 indicates that the trampoline does not
3029 contain instructions at all.
3031 We assume here that a function will be called many more times than
3032 its address is taken (e.g., it might be passed to qsort), so we
3033 take the trouble to initialize the "hint" field in the JMP insn.
3034 Note that the hint field is PC (new) + 4 * bits 13:0. */
3037 alpha_initialize_trampoline (tramp, fnaddr, cxt, fnofs, cxtofs, jmpofs)
3038 rtx tramp, fnaddr, cxt;
3039 int fnofs, cxtofs, jmpofs;
3041 rtx temp, temp1, addr;
3042 /* VMS really uses DImode pointers in memory at this point. */
3043 enum machine_mode mode = TARGET_OPEN_VMS ? Pmode : ptr_mode;
3045 #ifdef POINTERS_EXTEND_UNSIGNED
3046 fnaddr = convert_memory_address (mode, fnaddr);
3047 cxt = convert_memory_address (mode, cxt);
3050 /* Store function address and CXT. */
3051 addr = memory_address (mode, plus_constant (tramp, fnofs));
3052 emit_move_insn (gen_rtx (MEM, mode, addr), fnaddr);
3053 addr = memory_address (mode, plus_constant (tramp, cxtofs));
3054 emit_move_insn (gen_rtx (MEM, mode, addr), cxt);
3056 /* This has been disabled since the hint only has a 32k range, and in
3057 no existing OS is the stack within 32k of the text segment. */
3058 if (0 && jmpofs >= 0)
3060 /* Compute hint value. */
3061 temp = force_operand (plus_constant (tramp, jmpofs+4), NULL_RTX);
3062 temp = expand_binop (DImode, sub_optab, fnaddr, temp, temp, 1,
3064 temp = expand_shift (RSHIFT_EXPR, Pmode, temp,
3065 build_int_2 (2, 0), NULL_RTX, 1);
3066 temp = expand_and (gen_lowpart (SImode, temp), GEN_INT (0x3fff), 0);
3068 /* Merge in the hint. */
3069 addr = memory_address (SImode, plus_constant (tramp, jmpofs));
3070 temp1 = force_reg (SImode, gen_rtx (MEM, SImode, addr));
3071 temp1 = expand_and (temp1, GEN_INT (0xffffc000), NULL_RTX);
3072 temp1 = expand_binop (SImode, ior_optab, temp1, temp, temp1, 1,
3074 emit_move_insn (gen_rtx (MEM, SImode, addr), temp1);
3077 #ifdef TRANSFER_FROM_TRAMPOLINE
3078 emit_library_call (gen_rtx (SYMBOL_REF, Pmode, "__enable_execute_stack"),
3079 0, VOIDmode, 1, addr, Pmode);
3083 emit_insn (gen_imb ());
3086 /* Do what is necessary for `va_start'. The argument is ignored;
3087 We look at the current function to determine if stdarg or varargs
3088 is used and fill in an initial va_list. A pointer to this constructor
3092 alpha_builtin_saveregs (arglist)
3093 tree arglist ATTRIBUTE_UNUSED;
3095 rtx block, addr, dest, argsize;
3096 tree fntype = TREE_TYPE (current_function_decl);
3097 int stdarg = (TYPE_ARG_TYPES (fntype) != 0
3098 && (TREE_VALUE (tree_last (TYPE_ARG_TYPES (fntype)))
3099 != void_type_node));
3101 /* Compute the current position into the args, taking into account
3102 both registers and memory. Both of these are already included in
3105 argsize = GEN_INT (NUM_ARGS * UNITS_PER_WORD);
3107 /* For Unix, SETUP_INCOMING_VARARGS moves the starting address base up by 48,
3108 storing fp arg registers in the first 48 bytes, and the integer arg
3109 registers in the next 48 bytes. This is only done, however, if any
3110 integer registers need to be stored.
3112 If no integer registers need be stored, then we must subtract 48 in
3113 order to account for the integer arg registers which are counted in
3114 argsize above, but which are not actually stored on the stack. */
3116 if (TARGET_OPEN_VMS)
3117 addr = plus_constant (virtual_incoming_args_rtx,
3118 NUM_ARGS <= 5 + stdarg
3119 ? UNITS_PER_WORD : - 6 * UNITS_PER_WORD);
3121 addr = (NUM_ARGS <= 5 + stdarg
3122 ? plus_constant (virtual_incoming_args_rtx,
3124 : plus_constant (virtual_incoming_args_rtx,
3125 - (6 * UNITS_PER_WORD)));
3127 /* For VMS, we include the argsize, while on Unix, it's handled as
3128 a separate field. */
3129 if (TARGET_OPEN_VMS)
3130 addr = plus_constant (addr, INTVAL (argsize));
3132 addr = force_operand (addr, NULL_RTX);
3134 #ifdef POINTERS_EXTEND_UNSIGNED
3135 addr = convert_memory_address (ptr_mode, addr);
3138 if (TARGET_OPEN_VMS)
3142 /* Allocate the va_list constructor */
3143 block = assign_stack_local (BLKmode, 2 * UNITS_PER_WORD, BITS_PER_WORD);
3144 RTX_UNCHANGING_P (block) = 1;
3145 RTX_UNCHANGING_P (XEXP (block, 0)) = 1;
3147 /* Store the address of the first integer register in the __base
3150 dest = change_address (block, ptr_mode, XEXP (block, 0));
3151 emit_move_insn (dest, addr);
3153 if (current_function_check_memory_usage)
3154 emit_library_call (chkr_set_right_libfunc, 1, VOIDmode, 3,
3156 GEN_INT (GET_MODE_SIZE (ptr_mode)),
3157 TYPE_MODE (sizetype),
3158 GEN_INT (MEMORY_USE_RW),
3159 TYPE_MODE (integer_type_node));
3161 /* Store the argsize as the __va_offset member. */
3162 dest = change_address (block, TYPE_MODE (integer_type_node),
3163 plus_constant (XEXP (block, 0),
3164 POINTER_SIZE/BITS_PER_UNIT));
3165 emit_move_insn (dest, argsize);
3167 if (current_function_check_memory_usage)
3168 emit_library_call (chkr_set_right_libfunc, 1, VOIDmode, 3,
3170 GEN_INT (GET_MODE_SIZE
3171 (TYPE_MODE (integer_type_node))),
3172 TYPE_MODE (sizetype),
3173 GEN_INT (MEMORY_USE_RW),
3174 TYPE_MODE (integer_type_node));
3176 /* Return the address of the va_list constructor, but don't put it in a
3177 register. Doing so would fail when not optimizing and produce worse
3178 code when optimizing. */
3179 return XEXP (block, 0);
3183 /* This page contains routines that are used to determine what the function
3184 prologue and epilogue code will do and write them out. */
3186 /* Compute the size of the save area in the stack. */
3188 /* These variables are used for communication between the following functions.
3189 They indicate various things about the current function being compiled
3190 that are used to tell what kind of prologue, epilogue and procedure
3191 descriptior to generate. */
3193 /* Nonzero if we need a stack procedure. */
3194 static int vms_is_stack_procedure;
3196 /* Register number (either FP or SP) that is used to unwind the frame. */
3197 static int vms_unwind_regno;
3199 /* Register number used to save FP. We need not have one for RA since
3200 we don't modify it for register procedures. This is only defined
3201 for register frame procedures. */
3202 static int vms_save_fp_regno;
3204 /* Register number used to reference objects off our PV. */
3205 static int vms_base_regno;
3207 /* Compute register masks for saved registers. */
3210 alpha_sa_mask (imaskP, fmaskP)
3211 unsigned long *imaskP;
3212 unsigned long *fmaskP;
3214 unsigned long imask = 0;
3215 unsigned long fmask = 0;
3218 #ifdef ASM_OUTPUT_MI_THUNK
3219 if (!current_function_is_thunk)
3222 if (TARGET_OPEN_VMS && vms_is_stack_procedure)
3223 imask |= (1L << HARD_FRAME_POINTER_REGNUM);
3225 /* One for every register we have to save. */
3226 for (i = 0; i < FIRST_PSEUDO_REGISTER; i++)
3227 if (! fixed_regs[i] && ! call_used_regs[i]
3228 && regs_ever_live[i] && i != REG_RA)
3233 fmask |= (1L << (i - 32));
3236 if (imask || fmask || alpha_ra_ever_killed ())
3237 imask |= (1L << REG_RA);
3250 #ifdef ASM_OUTPUT_MI_THUNK
3251 if (current_function_is_thunk)
3256 /* One for every register we have to save. */
3257 for (i = 0; i < FIRST_PSEUDO_REGISTER; i++)
3258 if (! fixed_regs[i] && ! call_used_regs[i]
3259 && regs_ever_live[i] && i != REG_RA)
3263 if (TARGET_OPEN_VMS)
3265 /* Start by assuming we can use a register procedure if we don't
3266 make any calls (REG_RA not used) or need to save any
3267 registers and a stack procedure if we do. */
3268 vms_is_stack_procedure = sa_size != 0 || alpha_ra_ever_killed ();
3270 /* Decide whether to refer to objects off our PV via FP or PV.
3271 If we need FP for something else or if we receive a nonlocal
3272 goto (which expects PV to contain the value), we must use PV.
3273 Otherwise, start by assuming we can use FP. */
3274 vms_base_regno = (frame_pointer_needed
3275 || current_function_has_nonlocal_label
3276 || vms_is_stack_procedure
3277 || current_function_outgoing_args_size
3278 ? REG_PV : HARD_FRAME_POINTER_REGNUM);
3280 /* If we want to copy PV into FP, we need to find some register
3281 in which to save FP. */
3283 vms_save_fp_regno = -1;
3284 if (vms_base_regno == HARD_FRAME_POINTER_REGNUM)
3285 for (i = 0; i < 32; i++)
3286 if (! fixed_regs[i] && call_used_regs[i] && ! regs_ever_live[i])
3287 vms_save_fp_regno = i;
3289 if (vms_save_fp_regno == -1)
3290 vms_base_regno = REG_PV, vms_is_stack_procedure = 1;
3292 /* Stack unwinding should be done via FP unless we use it for PV. */
3293 vms_unwind_regno = (vms_base_regno == REG_PV
3294 ? HARD_FRAME_POINTER_REGNUM : STACK_POINTER_REGNUM);
3296 /* If this is a stack procedure, allow space for saving FP and RA. */
3297 if (vms_is_stack_procedure)
3302 /* If some registers were saved but not RA, RA must also be saved,
3303 so leave space for it. */
3304 if (sa_size != 0 || alpha_ra_ever_killed ())
3307 /* Our size must be even (multiple of 16 bytes). */
3316 alpha_pv_save_size ()
3319 return vms_is_stack_procedure ? 8 : 0;
3326 return vms_unwind_regno == HARD_FRAME_POINTER_REGNUM;
3330 vms_valid_decl_attribute_p (decl, attributes, identifier, args)
3331 tree decl ATTRIBUTE_UNUSED;
3332 tree attributes ATTRIBUTE_UNUSED;
3336 if (is_attribute_p ("overlaid", identifier))
3337 return (args == NULL_TREE);
3342 alpha_does_function_need_gp ()
3346 /* We never need a GP for Windows/NT or VMS. */
3347 if (TARGET_WINDOWS_NT || TARGET_OPEN_VMS)
3350 #ifdef TARGET_PROFILING_NEEDS_GP
3355 #ifdef ASM_OUTPUT_MI_THUNK
3356 if (current_function_is_thunk)
3360 /* If we need a GP (we have a LDSYM insn or a CALL_INSN), load it first.
3361 Even if we are a static function, we still need to do this in case
3362 our address is taken and passed to something like qsort. */
3364 push_topmost_sequence ();
3365 insn = get_insns ();
3366 pop_topmost_sequence ();
3368 for (; insn; insn = NEXT_INSN (insn))
3369 if (GET_RTX_CLASS (GET_CODE (insn)) == 'i'
3370 && GET_CODE (PATTERN (insn)) != USE
3371 && GET_CODE (PATTERN (insn)) != CLOBBER)
3373 enum attr_type type = get_attr_type (insn);
3374 if (type == TYPE_LDSYM || type == TYPE_JSR)
3381 /* Write a version stamp. Don't write anything if we are running as a
3382 cross-compiler. Otherwise, use the versions in /usr/include/stamp.h. */
3389 alpha_write_verstamp (file)
3390 FILE *file ATTRIBUTE_UNUSED;
3393 fprintf (file, "\t.verstamp %d %d\n", MS_STAMP, LS_STAMP);
3397 /* Helper function to set RTX_FRAME_RELATED_P on instructions, including
3401 set_frame_related_p ()
3403 rtx seq = gen_sequence ();
3406 if (GET_CODE (seq) == SEQUENCE)
3408 int i = XVECLEN (seq, 0);
3410 RTX_FRAME_RELATED_P (XVECEXP (seq, 0, i)) = 1;
3411 return emit_insn (seq);
3415 seq = emit_insn (seq);
3416 RTX_FRAME_RELATED_P (seq) = 1;
3421 #define FRP(exp) (start_sequence (), exp, set_frame_related_p ())
3423 /* Write function prologue. */
3425 /* On vms we have two kinds of functions:
3427 - stack frame (PROC_STACK)
3428 these are 'normal' functions with local vars and which are
3429 calling other functions
3430 - register frame (PROC_REGISTER)
3431 keeps all data in registers, needs no stack
3433 We must pass this to the assembler so it can generate the
3434 proper pdsc (procedure descriptor)
3435 This is done with the '.pdesc' command.
3437 On not-vms, we don't really differentiate between the two, as we can
3438 simply allocate stack without saving registers. */
3441 alpha_expand_prologue ()
3443 /* Registers to save. */
3444 unsigned long imask = 0;
3445 unsigned long fmask = 0;
3446 /* Stack space needed for pushing registers clobbered by us. */
3447 HOST_WIDE_INT sa_size;
3448 /* Complete stack size needed. */
3449 HOST_WIDE_INT frame_size;
3450 /* Offset from base reg to register save area. */
3451 HOST_WIDE_INT reg_offset;
3455 sa_size = alpha_sa_size ();
3457 frame_size = get_frame_size ();
3458 if (TARGET_OPEN_VMS)
3459 frame_size = ALPHA_ROUND (sa_size
3460 + (vms_is_stack_procedure ? 8 : 0)
3462 + current_function_pretend_args_size);
3464 frame_size = (ALPHA_ROUND (current_function_outgoing_args_size)
3466 + ALPHA_ROUND (frame_size
3467 + current_function_pretend_args_size));
3469 if (TARGET_OPEN_VMS)
3472 reg_offset = ALPHA_ROUND (current_function_outgoing_args_size);
3474 alpha_sa_mask (&imask, &fmask);
3476 /* Adjust the stack by the frame size. If the frame size is > 4096
3477 bytes, we need to be sure we probe somewhere in the first and last
3478 4096 bytes (we can probably get away without the latter test) and
3479 every 8192 bytes in between. If the frame size is > 32768, we
3480 do this in a loop. Otherwise, we generate the explicit probe
3483 Note that we are only allowed to adjust sp once in the prologue. */
3485 if (frame_size <= 32768)
3487 if (frame_size > 4096)
3492 emit_insn (gen_probe_stack (GEN_INT (-probed)));
3493 while ((probed += 8192) < frame_size);
3495 /* We only have to do this probe if we aren't saving registers. */
3496 if (sa_size == 0 && probed + 4096 < frame_size)
3497 emit_insn (gen_probe_stack (GEN_INT (-frame_size)));
3500 if (frame_size != 0)
3502 FRP (emit_insn (gen_adddi3 (stack_pointer_rtx, stack_pointer_rtx,
3503 GEN_INT (-frame_size))));
3508 /* Here we generate code to set R22 to SP + 4096 and set R23 to the
3509 number of 8192 byte blocks to probe. We then probe each block
3510 in the loop and then set SP to the proper location. If the
3511 amount remaining is > 4096, we have to do one more probe if we
3512 are not saving any registers. */
3514 HOST_WIDE_INT blocks = (frame_size + 4096) / 8192;
3515 HOST_WIDE_INT leftover = frame_size + 4096 - blocks * 8192;
3516 rtx ptr = gen_rtx_REG (DImode, 22);
3517 rtx count = gen_rtx_REG (DImode, 23);
3520 emit_move_insn (count, GEN_INT (blocks));
3521 emit_insn (gen_adddi3 (ptr, stack_pointer_rtx, GEN_INT (4096)));
3523 /* Because of the difficulty in emitting a new basic block this
3524 late in the compilation, generate the loop as a single insn. */
3525 emit_insn (gen_prologue_stack_probe_loop (count, ptr));
3527 if (leftover > 4096 && sa_size == 0)
3529 rtx last = gen_rtx_MEM (DImode, plus_constant (ptr, -leftover));
3530 MEM_VOLATILE_P (last) = 1;
3531 emit_move_insn (last, const0_rtx);
3534 if (TARGET_WINDOWS_NT)
3536 /* For NT stack unwind (done by 'reverse execution'), it's
3537 not OK to take the result of a loop, even though the value
3538 is already in ptr, so we reload it via a single operation
3539 and subtract it to sp.
3541 Yes, that's correct -- we have to reload the whole constant
3542 into a temporary via ldah+lda then subtract from sp. To
3543 ensure we get ldah+lda, we use a special pattern. */
3545 HOST_WIDE_INT lo, hi;
3546 lo = ((frame_size & 0xffff) ^ 0x8000) - 0x8000;
3547 hi = frame_size - lo;
3549 emit_move_insn (ptr, GEN_INT (hi));
3550 emit_insn (gen_nt_lda (ptr, GEN_INT (lo)));
3551 seq = emit_insn (gen_subdi3 (stack_pointer_rtx, stack_pointer_rtx,
3556 seq = emit_insn (gen_adddi3 (stack_pointer_rtx, ptr,
3557 GEN_INT (-leftover)));
3560 /* This alternative is special, because the DWARF code cannot
3561 possibly intuit through the loop above. So we invent this
3562 note it looks at instead. */
3563 RTX_FRAME_RELATED_P (seq) = 1;
3565 = gen_rtx_EXPR_LIST (REG_FRAME_RELATED_EXPR,
3566 gen_rtx_SET (VOIDmode, stack_pointer_rtx,
3567 gen_rtx_PLUS (Pmode, stack_pointer_rtx,
3568 GEN_INT (-frame_size))),
3572 /* Cope with very large offsets to the register save area. */
3573 sa_reg = stack_pointer_rtx;
3574 if (reg_offset + sa_size > 0x8000)
3576 int low = ((reg_offset & 0xffff) ^ 0x8000) - 0x8000;
3579 if (low + sa_size <= 0x8000)
3580 bias = reg_offset - low, reg_offset = low;
3582 bias = reg_offset, reg_offset = 0;
3584 sa_reg = gen_rtx_REG (DImode, 24);
3585 FRP (emit_insn (gen_adddi3 (sa_reg, stack_pointer_rtx, GEN_INT (bias))));
3588 /* Save regs in stack order. Beginning with VMS PV. */
3589 if (TARGET_OPEN_VMS && vms_is_stack_procedure)
3591 mem = gen_rtx_MEM (DImode, stack_pointer_rtx);
3592 MEM_ALIAS_SET (mem) = alpha_sr_alias_set;
3593 FRP (emit_move_insn (mem, gen_rtx_REG (DImode, REG_PV)));
3596 /* Save register RA next. */
3597 if (imask & (1L << REG_RA))
3599 mem = gen_rtx_MEM (DImode, plus_constant (sa_reg, reg_offset));
3600 MEM_ALIAS_SET (mem) = alpha_sr_alias_set;
3601 FRP (emit_move_insn (mem, gen_rtx_REG (DImode, REG_RA)));
3602 imask &= ~(1L << REG_RA);
3606 /* Now save any other registers required to be saved. */
3607 for (i = 0; i < 32; i++)
3608 if (imask & (1L << i))
3610 mem = gen_rtx_MEM (DImode, plus_constant (sa_reg, reg_offset));
3611 MEM_ALIAS_SET (mem) = alpha_sr_alias_set;
3612 FRP (emit_move_insn (mem, gen_rtx_REG (DImode, i)));
3616 for (i = 0; i < 32; i++)
3617 if (fmask & (1L << i))
3619 mem = gen_rtx_MEM (DFmode, plus_constant (sa_reg, reg_offset));
3620 MEM_ALIAS_SET (mem) = alpha_sr_alias_set;
3621 FRP (emit_move_insn (mem, gen_rtx_REG (DFmode, i+32)));
3625 if (TARGET_OPEN_VMS)
3627 if (!vms_is_stack_procedure)
3629 /* Register frame procedures fave the fp. */
3630 FRP (emit_move_insn (gen_rtx_REG (DImode, vms_save_fp_regno),
3631 hard_frame_pointer_rtx));
3634 if (vms_base_regno != REG_PV)
3635 FRP (emit_move_insn (gen_rtx_REG (DImode, vms_base_regno),
3636 gen_rtx_REG (DImode, REG_PV)));
3638 if (vms_unwind_regno == HARD_FRAME_POINTER_REGNUM)
3640 FRP (emit_move_insn (hard_frame_pointer_rtx, stack_pointer_rtx));
3643 /* If we have to allocate space for outgoing args, do it now. */
3644 if (current_function_outgoing_args_size != 0)
3646 FRP (emit_move_insn (stack_pointer_rtx,
3647 plus_constant (hard_frame_pointer_rtx,
3648 - ALPHA_ROUND (current_function_outgoing_args_size))));
3653 /* If we need a frame pointer, set it from the stack pointer. */
3654 if (frame_pointer_needed)
3656 if (TARGET_CAN_FAULT_IN_PROLOGUE)
3657 FRP (emit_move_insn (hard_frame_pointer_rtx, stack_pointer_rtx));
3660 /* This must always be the last instruction in the
3661 prologue, thus we emit a special move + clobber. */
3662 FRP (emit_insn (gen_init_fp (hard_frame_pointer_rtx,
3663 stack_pointer_rtx, sa_reg)));
3668 /* The ABIs for VMS and OSF/1 say that while we can schedule insns into
3669 the prologue, for exception handling reasons, we cannot do this for
3670 any insn that might fault. We could prevent this for mems with a
3671 (clobber:BLK (scratch)), but this doesn't work for fp insns. So we
3672 have to prevent all such scheduling with a blockage.
3674 Linux, on the other hand, never bothered to implement OSF/1's
3675 exception handling, and so doesn't care about such things. Anyone
3676 planning to use dwarf2 frame-unwind info can also omit the blockage. */
3678 if (! TARGET_CAN_FAULT_IN_PROLOGUE)
3679 emit_insn (gen_blockage ());
3682 /* Output the textual info surrounding the prologue. */
3685 alpha_start_function (file, fnname, decl)
3688 tree decl ATTRIBUTE_UNUSED;
3690 unsigned long imask = 0;
3691 unsigned long fmask = 0;
3692 /* Stack space needed for pushing registers clobbered by us. */
3693 HOST_WIDE_INT sa_size;
3694 /* Complete stack size needed. */
3695 HOST_WIDE_INT frame_size;
3696 /* Offset from base reg to register save area. */
3697 HOST_WIDE_INT reg_offset;
3698 char *entry_label = (char *) alloca (strlen (fnname) + 6);
3701 sa_size = alpha_sa_size ();
3703 frame_size = get_frame_size ();
3704 if (TARGET_OPEN_VMS)
3705 frame_size = ALPHA_ROUND (sa_size
3706 + (vms_is_stack_procedure ? 8 : 0)
3708 + current_function_pretend_args_size);
3710 frame_size = (ALPHA_ROUND (current_function_outgoing_args_size)
3712 + ALPHA_ROUND (frame_size
3713 + current_function_pretend_args_size));
3715 if (TARGET_OPEN_VMS)
3718 reg_offset = ALPHA_ROUND (current_function_outgoing_args_size);
3720 alpha_sa_mask (&imask, &fmask);
3722 /* Ecoff can handle multiple .file directives, so put out file and lineno.
3723 We have to do that before the .ent directive as we cannot switch
3724 files within procedures with native ecoff because line numbers are
3725 linked to procedure descriptors.
3726 Outputting the lineno helps debugging of one line functions as they
3727 would otherwise get no line number at all. Please note that we would
3728 like to put out last_linenum from final.c, but it is not accessible. */
3730 if (write_symbols == SDB_DEBUG)
3732 ASM_OUTPUT_SOURCE_FILENAME (file,
3733 DECL_SOURCE_FILE (current_function_decl));
3734 if (debug_info_level != DINFO_LEVEL_TERSE)
3735 ASM_OUTPUT_SOURCE_LINE (file,
3736 DECL_SOURCE_LINE (current_function_decl));
3739 /* Issue function start and label. */
3740 if (TARGET_OPEN_VMS || !flag_inhibit_size_directive)
3742 fputs ("\t.ent ", file);
3743 assemble_name (file, fnname);
3747 strcpy (entry_label, fnname);
3748 if (TARGET_OPEN_VMS)
3749 strcat (entry_label, "..en");
3750 ASM_OUTPUT_LABEL (file, entry_label);
3751 inside_function = TRUE;
3753 if (TARGET_OPEN_VMS)
3754 fprintf (file, "\t.base $%d\n", vms_base_regno);
3756 if (!TARGET_OPEN_VMS && TARGET_IEEE_CONFORMANT
3757 && !flag_inhibit_size_directive)
3759 /* Set flags in procedure descriptor to request IEEE-conformant
3760 math-library routines. The value we set it to is PDSC_EXC_IEEE
3761 (/usr/include/pdsc.h). */
3762 fputs ("\t.eflag 48\n", file);
3765 /* Set up offsets to alpha virtual arg/local debugging pointer. */
3766 alpha_auto_offset = -frame_size + current_function_pretend_args_size;
3767 alpha_arg_offset = -frame_size + 48;
3769 /* Describe our frame. If the frame size is larger than an integer,
3770 print it as zero to avoid an assembler error. We won't be
3771 properly describing such a frame, but that's the best we can do. */
3772 if (TARGET_OPEN_VMS)
3774 fprintf (file, "\t.frame $%d,", vms_unwind_regno);
3775 fprintf (file, HOST_WIDE_INT_PRINT_DEC,
3776 frame_size >= (1l << 31) ? 0 : frame_size);
3777 fputs (",$26,", file);
3778 fprintf (file, HOST_WIDE_INT_PRINT_DEC, reg_offset);
3781 else if (!flag_inhibit_size_directive)
3783 fprintf (file, "\t.frame $%d,",
3784 (frame_pointer_needed
3785 ? HARD_FRAME_POINTER_REGNUM : STACK_POINTER_REGNUM));
3786 fprintf (file, HOST_WIDE_INT_PRINT_DEC,
3787 frame_size >= (1l << 31) ? 0 : frame_size);
3788 fprintf (file, ",$26,%d\n", current_function_pretend_args_size);
3791 /* Describe which registers were spilled. */
3792 if (TARGET_OPEN_VMS)
3795 /* ??? Does VMS care if mask contains ra? The old code did'nt
3796 set it, so I don't here. */
3797 fprintf (file, "\t.mask 0x%lx,0\n", imask & ~(1L << REG_RA));
3799 fprintf (file, "\t.fmask 0x%lx,0\n", fmask);
3800 if (!vms_is_stack_procedure)
3801 fprintf (file, "\t.fp_save $%d\n", vms_save_fp_regno);
3803 else if (!flag_inhibit_size_directive)
3807 fprintf (file, "\t.mask 0x%lx,", imask);
3808 fprintf (file, HOST_WIDE_INT_PRINT_DEC,
3809 frame_size >= (1l << 31) ? 0 : reg_offset - frame_size);
3812 for (i = 0; i < 32; ++i)
3813 if (imask & (1L << i))
3819 fprintf (file, "\t.fmask 0x%lx,", fmask);
3820 fprintf (file, HOST_WIDE_INT_PRINT_DEC,
3821 frame_size >= (1l << 31) ? 0 : reg_offset - frame_size);
3826 /* Emit GP related things. It is rather unfortunate about the alignment
3827 issues surrounding a CODE_LABEL that forces us to do the label in
3829 if (!TARGET_OPEN_VMS && !TARGET_WINDOWS_NT)
3831 alpha_function_needs_gp = alpha_does_function_need_gp ();
3832 if (alpha_function_needs_gp)
3833 fputs ("\tldgp $29,0($27)\n", file);
3836 assemble_name (file, fnname);
3837 fputs ("..ng:\n", file);
3841 /* Ifdef'ed cause readonly_section and link_section are only
3843 readonly_section ();
3844 fprintf (file, "\t.align 3\n");
3845 assemble_name (file, fnname); fputs ("..na:\n", file);
3846 fputs ("\t.ascii \"", file);
3847 assemble_name (file, fnname);
3848 fputs ("\\0\"\n", file);
3851 fprintf (file, "\t.align 3\n");
3852 fputs ("\t.name ", file);
3853 assemble_name (file, fnname);
3854 fputs ("..na\n", file);
3855 ASM_OUTPUT_LABEL (file, fnname);
3856 fprintf (file, "\t.pdesc ");
3857 assemble_name (file, fnname);
3858 fprintf (file, "..en,%s\n", vms_is_stack_procedure ? "stack" : "reg");
3859 alpha_need_linkage (fnname, 1);
3864 /* Emit the .prologue note at the scheduled end of the prologue. */
3867 output_end_prologue (file)
3870 if (TARGET_OPEN_VMS)
3871 fputs ("\t.prologue\n", file);
3872 else if (TARGET_WINDOWS_NT)
3873 fputs ("\t.prologue 0\n", file);
3874 else if (!flag_inhibit_size_directive)
3875 fprintf (file, "\t.prologue %d\n", alpha_function_needs_gp);
3878 /* Write function epilogue. */
3880 /* ??? At some point we will want to support full unwind, and so will
3881 need to mark the epilogue as well. At the moment, we just confuse
3884 #define FRP(exp) exp
3887 alpha_expand_epilogue ()
3889 /* Registers to save. */
3890 unsigned long imask = 0;
3891 unsigned long fmask = 0;
3892 /* Stack space needed for pushing registers clobbered by us. */
3893 HOST_WIDE_INT sa_size;
3894 /* Complete stack size needed. */
3895 HOST_WIDE_INT frame_size;
3896 /* Offset from base reg to register save area. */
3897 HOST_WIDE_INT reg_offset;
3898 int fp_is_frame_pointer, fp_offset;
3899 rtx sa_reg, sa_reg_exp = NULL;
3900 rtx sp_adj1, sp_adj2, mem;
3903 sa_size = alpha_sa_size ();
3905 frame_size = get_frame_size ();
3906 if (TARGET_OPEN_VMS)
3907 frame_size = ALPHA_ROUND (sa_size
3908 + (vms_is_stack_procedure ? 8 : 0)
3910 + current_function_pretend_args_size);
3912 frame_size = (ALPHA_ROUND (current_function_outgoing_args_size)
3914 + ALPHA_ROUND (frame_size
3915 + current_function_pretend_args_size));
3917 if (TARGET_OPEN_VMS)
3920 reg_offset = ALPHA_ROUND (current_function_outgoing_args_size);
3922 alpha_sa_mask (&imask, &fmask);
3924 fp_is_frame_pointer = ((TARGET_OPEN_VMS && vms_is_stack_procedure)
3925 || (!TARGET_OPEN_VMS && frame_pointer_needed));
3929 /* If we have a frame pointer, restore SP from it. */
3930 if ((TARGET_OPEN_VMS
3931 && vms_unwind_regno == HARD_FRAME_POINTER_REGNUM)
3932 || (!TARGET_OPEN_VMS && frame_pointer_needed))
3934 FRP (emit_move_insn (stack_pointer_rtx, hard_frame_pointer_rtx));
3937 /* Cope with very large offsets to the register save area. */
3938 sa_reg = stack_pointer_rtx;
3939 if (reg_offset + sa_size > 0x8000)
3941 int low = ((reg_offset & 0xffff) ^ 0x8000) - 0x8000;
3944 if (low + sa_size <= 0x8000)
3945 bias = reg_offset - low, reg_offset = low;
3947 bias = reg_offset, reg_offset = 0;
3949 sa_reg = gen_rtx_REG (DImode, 22);
3950 sa_reg_exp = plus_constant (stack_pointer_rtx, bias);
3952 FRP (emit_move_insn (sa_reg, sa_reg_exp));
3955 /* Restore registers in order, excepting a true frame pointer. */
3957 if (! alpha_eh_epilogue_sp_ofs)
3959 mem = gen_rtx_MEM (DImode, plus_constant(sa_reg, reg_offset));
3960 MEM_ALIAS_SET (mem) = alpha_sr_alias_set;
3961 FRP (emit_move_insn (gen_rtx_REG (DImode, REG_RA), mem));
3964 imask &= ~(1L << REG_RA);
3966 for (i = 0; i < 32; ++i)
3967 if (imask & (1L << i))
3969 if (i == HARD_FRAME_POINTER_REGNUM && fp_is_frame_pointer)
3970 fp_offset = reg_offset;
3973 mem = gen_rtx_MEM (DImode, plus_constant(sa_reg, reg_offset));
3974 MEM_ALIAS_SET (mem) = alpha_sr_alias_set;
3975 FRP (emit_move_insn (gen_rtx_REG (DImode, i), mem));
3980 for (i = 0; i < 32; ++i)
3981 if (fmask & (1L << i))
3983 mem = gen_rtx_MEM (DFmode, plus_constant(sa_reg, reg_offset));
3984 MEM_ALIAS_SET (mem) = alpha_sr_alias_set;
3985 FRP (emit_move_insn (gen_rtx_REG (DFmode, i+32), mem));
3990 if (frame_size || alpha_eh_epilogue_sp_ofs)
3992 sp_adj1 = stack_pointer_rtx;
3994 if (alpha_eh_epilogue_sp_ofs)
3996 sp_adj1 = gen_rtx_REG (DImode, 23);
3997 emit_move_insn (sp_adj1,
3998 gen_rtx_PLUS (Pmode, stack_pointer_rtx,
3999 alpha_eh_epilogue_sp_ofs));
4002 /* If the stack size is large, begin computation into a temporary
4003 register so as not to interfere with a potential fp restore,
4004 which must be consecutive with an SP restore. */
4005 if (frame_size < 32768)
4006 sp_adj2 = GEN_INT (frame_size);
4007 else if (frame_size < 0x40007fffL)
4009 int low = ((frame_size & 0xffff) ^ 0x8000) - 0x8000;
4011 sp_adj2 = plus_constant (sp_adj1, frame_size - low);
4012 if (sa_reg_exp && rtx_equal_p (sa_reg_exp, sp_adj2))
4016 sp_adj1 = gen_rtx_REG (DImode, 23);
4017 FRP (emit_move_insn (sp_adj1, sp_adj2));
4019 sp_adj2 = GEN_INT (low);
4023 rtx tmp = gen_rtx_REG (DImode, 23);
4024 FRP (sp_adj2 = alpha_emit_set_const (tmp, DImode, frame_size, 3));
4027 /* We can't drop new things to memory this late, afaik,
4028 so build it up by pieces. */
4029 FRP (sp_adj2 = alpha_emit_set_long_const (tmp, frame_size,
4030 -(frame_size < 0)));
4036 /* From now on, things must be in order. So emit blockages. */
4038 /* Restore the frame pointer. */
4039 if (fp_is_frame_pointer)
4041 emit_insn (gen_blockage ());
4042 mem = gen_rtx_MEM (DImode, plus_constant(sa_reg, fp_offset));
4043 MEM_ALIAS_SET (mem) = alpha_sr_alias_set;
4044 FRP (emit_move_insn (hard_frame_pointer_rtx, mem));
4046 else if (TARGET_OPEN_VMS)
4048 emit_insn (gen_blockage ());
4049 FRP (emit_move_insn (hard_frame_pointer_rtx,
4050 gen_rtx_REG (DImode, vms_save_fp_regno)));
4053 /* Restore the stack pointer. */
4054 emit_insn (gen_blockage ());
4055 FRP (emit_move_insn (stack_pointer_rtx,
4056 gen_rtx_PLUS (DImode, sp_adj1, sp_adj2)));
4060 if (TARGET_OPEN_VMS && !vms_is_stack_procedure)
4062 emit_insn (gen_blockage ());
4063 FRP (emit_move_insn (hard_frame_pointer_rtx,
4064 gen_rtx_REG (DImode, vms_save_fp_regno)));
4069 emit_jump_insn (gen_return_internal ());
4072 /* Output the rest of the textual info surrounding the epilogue. */
4075 alpha_end_function (file, fnname, decl)
4078 tree decl ATTRIBUTE_UNUSED;
4080 /* End the function. */
4081 if (!flag_inhibit_size_directive)
4083 fputs ("\t.end ", file);
4084 assemble_name (file, fnname);
4087 inside_function = FALSE;
4089 /* Show that we know this function if it is called again.
4091 Don't do this for global functions in object files destined for a
4092 shared library because the function may be overridden by the application
4093 or other libraries. Similarly, don't do this for weak functions. */
4095 if (!DECL_WEAK (current_function_decl)
4096 && (!flag_pic || !TREE_PUBLIC (current_function_decl)))
4097 SYMBOL_REF_FLAG (XEXP (DECL_RTL (current_function_decl), 0)) = 1;
4100 /* Debugging support. */
4104 /* Count the number of sdb related labels are generated (to find block
4105 start and end boundaries). */
4107 int sdb_label_count = 0;
4109 /* Next label # for each statement. */
4111 static int sym_lineno = 0;
4113 /* Count the number of .file directives, so that .loc is up to date. */
4115 static int num_source_filenames = 0;
4117 /* Name of the file containing the current function. */
4119 static const char *current_function_file = "";
4121 /* Offsets to alpha virtual arg/local debugging pointers. */
4123 long alpha_arg_offset;
4124 long alpha_auto_offset;
4126 /* Emit a new filename to a stream. */
4129 alpha_output_filename (stream, name)
4133 static int first_time = TRUE;
4134 char ltext_label_name[100];
4139 ++num_source_filenames;
4140 current_function_file = name;
4141 fprintf (stream, "\t.file\t%d ", num_source_filenames);
4142 output_quoted_string (stream, name);
4143 fprintf (stream, "\n");
4144 if (!TARGET_GAS && write_symbols == DBX_DEBUG)
4145 fprintf (stream, "\t#@stabs\n");
4148 else if (write_symbols == DBX_DEBUG)
4150 ASM_GENERATE_INTERNAL_LABEL (ltext_label_name, "Ltext", 0);
4151 fprintf (stream, "%s ", ASM_STABS_OP);
4152 output_quoted_string (stream, name);
4153 fprintf (stream, ",%d,0,0,%s\n", N_SOL, <ext_label_name[1]);
4156 else if (name != current_function_file
4157 && strcmp (name, current_function_file) != 0)
4159 if (inside_function && ! TARGET_GAS)
4160 fprintf (stream, "\t#.file\t%d ", num_source_filenames);
4163 ++num_source_filenames;
4164 current_function_file = name;
4165 fprintf (stream, "\t.file\t%d ", num_source_filenames);
4168 output_quoted_string (stream, name);
4169 fprintf (stream, "\n");
4173 /* Emit a linenumber to a stream. */
4176 alpha_output_lineno (stream, line)
4180 if (write_symbols == DBX_DEBUG)
4182 /* mips-tfile doesn't understand .stabd directives. */
4184 fprintf (stream, "$LM%d:\n\t%s %d,0,%d,$LM%d\n",
4185 sym_lineno, ASM_STABN_OP, N_SLINE, line, sym_lineno);
4188 fprintf (stream, "\n\t.loc\t%d %d\n", num_source_filenames, line);
4191 /* Structure to show the current status of registers and memory. */
4193 struct shadow_summary
4196 unsigned long i : 31; /* Mask of int regs */
4197 unsigned long fp : 31; /* Mask of fp regs */
4198 unsigned long mem : 1; /* mem == imem | fpmem */
4202 static void summarize_insn PROTO((rtx, struct shadow_summary *, int));
4203 static void alpha_handle_trap_shadows PROTO((rtx));
4205 /* Summary the effects of expression X on the machine. Update SUM, a pointer
4206 to the summary structure. SET is nonzero if the insn is setting the
4207 object, otherwise zero. */
4210 summarize_insn (x, sum, set)
4212 struct shadow_summary *sum;
4221 switch (GET_CODE (x))
4223 /* ??? Note that this case would be incorrect if the Alpha had a
4224 ZERO_EXTRACT in SET_DEST. */
4226 summarize_insn (SET_SRC (x), sum, 0);
4227 summarize_insn (SET_DEST (x), sum, 1);
4231 summarize_insn (XEXP (x, 0), sum, 1);
4235 summarize_insn (XEXP (x, 0), sum, 0);
4239 for (i = ASM_OPERANDS_INPUT_LENGTH (x) - 1; i >= 0; i--)
4240 summarize_insn (ASM_OPERANDS_INPUT (x, i), sum, 0);
4244 for (i = XVECLEN (x, 0) - 1; i >= 0; i--)
4245 summarize_insn (XVECEXP (x, 0, i), sum, 0);
4249 summarize_insn (SUBREG_REG (x), sum, 0);
4254 int regno = REGNO (x);
4255 unsigned long mask = 1UL << (regno % 32);
4257 if (regno == 31 || regno == 63)
4263 sum->defd.i |= mask;
4265 sum->defd.fp |= mask;
4270 sum->used.i |= mask;
4272 sum->used.fp |= mask;
4283 /* Find the regs used in memory address computation: */
4284 summarize_insn (XEXP (x, 0), sum, 0);
4287 case CONST_INT: case CONST_DOUBLE:
4288 case SYMBOL_REF: case LABEL_REF: case CONST:
4291 /* Handle common unary and binary ops for efficiency. */
4292 case COMPARE: case PLUS: case MINUS: case MULT: case DIV:
4293 case MOD: case UDIV: case UMOD: case AND: case IOR:
4294 case XOR: case ASHIFT: case ROTATE: case ASHIFTRT: case LSHIFTRT:
4295 case ROTATERT: case SMIN: case SMAX: case UMIN: case UMAX:
4296 case NE: case EQ: case GE: case GT: case LE:
4297 case LT: case GEU: case GTU: case LEU: case LTU:
4298 summarize_insn (XEXP (x, 0), sum, 0);
4299 summarize_insn (XEXP (x, 1), sum, 0);
4302 case NEG: case NOT: case SIGN_EXTEND: case ZERO_EXTEND:
4303 case TRUNCATE: case FLOAT_EXTEND: case FLOAT_TRUNCATE: case FLOAT:
4304 case FIX: case UNSIGNED_FLOAT: case UNSIGNED_FIX: case ABS:
4305 case SQRT: case FFS:
4306 summarize_insn (XEXP (x, 0), sum, 0);
4310 format_ptr = GET_RTX_FORMAT (GET_CODE (x));
4311 for (i = GET_RTX_LENGTH (GET_CODE (x)) - 1; i >= 0; i--)
4312 switch (format_ptr[i])
4315 summarize_insn (XEXP (x, i), sum, 0);
4319 for (j = XVECLEN (x, i) - 1; j >= 0; j--)
4320 summarize_insn (XVECEXP (x, i, j), sum, 0);
4332 /* Ensure a sufficient number of `trapb' insns are in the code when
4333 the user requests code with a trap precision of functions or
4336 In naive mode, when the user requests a trap-precision of
4337 "instruction", a trapb is needed after every instruction that may
4338 generate a trap. This ensures that the code is resumption safe but
4341 When optimizations are turned on, we delay issuing a trapb as long
4342 as possible. In this context, a trap shadow is the sequence of
4343 instructions that starts with a (potentially) trap generating
4344 instruction and extends to the next trapb or call_pal instruction
4345 (but GCC never generates call_pal by itself). We can delay (and
4346 therefore sometimes omit) a trapb subject to the following
4349 (a) On entry to the trap shadow, if any Alpha register or memory
4350 location contains a value that is used as an operand value by some
4351 instruction in the trap shadow (live on entry), then no instruction
4352 in the trap shadow may modify the register or memory location.
4354 (b) Within the trap shadow, the computation of the base register
4355 for a memory load or store instruction may not involve using the
4356 result of an instruction that might generate an UNPREDICTABLE
4359 (c) Within the trap shadow, no register may be used more than once
4360 as a destination register. (This is to make life easier for the
4363 (d) The trap shadow may not include any branch instructions. */
4366 alpha_handle_trap_shadows (insns)
4369 struct shadow_summary shadow;
4370 int trap_pending, exception_nesting;
4374 exception_nesting = 0;
4377 shadow.used.mem = 0;
4378 shadow.defd = shadow.used;
4380 for (i = insns; i ; i = NEXT_INSN (i))
4382 if (GET_CODE (i) == NOTE)
4384 switch (NOTE_LINE_NUMBER (i))
4386 case NOTE_INSN_EH_REGION_BEG:
4387 exception_nesting++;
4392 case NOTE_INSN_EH_REGION_END:
4393 exception_nesting--;
4398 case NOTE_INSN_EPILOGUE_BEG:
4399 if (trap_pending && alpha_tp >= ALPHA_TP_FUNC)
4404 else if (trap_pending)
4406 if (alpha_tp == ALPHA_TP_FUNC)
4408 if (GET_CODE (i) == JUMP_INSN
4409 && GET_CODE (PATTERN (i)) == RETURN)
4412 else if (alpha_tp == ALPHA_TP_INSN)
4416 struct shadow_summary sum;
4421 sum.defd = sum.used;
4423 switch (GET_CODE (i))
4426 /* Annoyingly, get_attr_trap will abort on these. */
4427 if (GET_CODE (PATTERN (i)) == USE
4428 || GET_CODE (PATTERN (i)) == CLOBBER)
4431 summarize_insn (PATTERN (i), &sum, 0);
4433 if ((sum.defd.i & shadow.defd.i)
4434 || (sum.defd.fp & shadow.defd.fp))
4436 /* (c) would be violated */
4440 /* Combine shadow with summary of current insn: */
4441 shadow.used.i |= sum.used.i;
4442 shadow.used.fp |= sum.used.fp;
4443 shadow.used.mem |= sum.used.mem;
4444 shadow.defd.i |= sum.defd.i;
4445 shadow.defd.fp |= sum.defd.fp;
4446 shadow.defd.mem |= sum.defd.mem;
4448 if ((sum.defd.i & shadow.used.i)
4449 || (sum.defd.fp & shadow.used.fp)
4450 || (sum.defd.mem & shadow.used.mem))
4452 /* (a) would be violated (also takes care of (b)) */
4453 if (get_attr_trap (i) == TRAP_YES
4454 && ((sum.defd.i & sum.used.i)
4455 || (sum.defd.fp & sum.used.fp)))
4474 n = emit_insn_before (gen_trapb (), i);
4475 PUT_MODE (n, TImode);
4476 PUT_MODE (i, TImode);
4480 shadow.used.mem = 0;
4481 shadow.defd = shadow.used;
4486 if ((exception_nesting > 0 || alpha_tp >= ALPHA_TP_FUNC)
4487 && GET_CODE (i) == INSN
4488 && GET_CODE (PATTERN (i)) != USE
4489 && GET_CODE (PATTERN (i)) != CLOBBER
4490 && get_attr_trap (i) == TRAP_YES)
4492 if (optimize && !trap_pending)
4493 summarize_insn (PATTERN (i), &shadow, 0);
4500 /* Alpha can only issue instruction groups simultaneously if they are
4501 suitibly aligned. This is very processor-specific. */
4503 enum alphaev4_pipe {
4510 enum alphaev5_pipe {
4521 static enum alphaev4_pipe alphaev4_insn_pipe PROTO((rtx));
4522 static enum alphaev5_pipe alphaev5_insn_pipe PROTO((rtx));
4523 static rtx alphaev4_next_group PROTO((rtx, int*, int*));
4524 static rtx alphaev5_next_group PROTO((rtx, int*, int*));
4525 static rtx alphaev4_next_nop PROTO((int*));
4526 static rtx alphaev5_next_nop PROTO((int*));
4528 static void alpha_align_insns
4529 PROTO((rtx, int, rtx (*)(rtx, int*, int*), rtx (*)(int*), int));
4531 static enum alphaev4_pipe
4532 alphaev4_insn_pipe (insn)
4535 if (recog_memoized (insn) < 0)
4537 if (get_attr_length (insn) != 4)
4540 switch (get_attr_type (insn))
4573 static enum alphaev5_pipe
4574 alphaev5_insn_pipe (insn)
4577 if (recog_memoized (insn) < 0)
4579 if (get_attr_length (insn) != 4)
4582 switch (get_attr_type (insn))
4622 /* IN_USE is a mask of the slots currently filled within the insn group.
4623 The mask bits come from alphaev4_pipe above. If EV4_IBX is set, then
4624 the insn in EV4_IB0 can be swapped by the hardware into EV4_IB1.
4626 LEN is, of course, the length of the group in bytes. */
4629 alphaev4_next_group (insn, pin_use, plen)
4631 int *pin_use, *plen;
4637 if (GET_RTX_CLASS (GET_CODE (insn)) != 'i'
4638 || GET_CODE (PATTERN (insn)) == CLOBBER
4639 || GET_CODE (PATTERN (insn)) == USE)
4644 enum alphaev4_pipe pipe;
4646 pipe = alphaev4_insn_pipe (insn);
4650 /* Force complex instructions to start new groups. */
4654 /* If this is a completely unrecognized insn, its an asm.
4655 We don't know how long it is, so record length as -1 to
4656 signal a needed realignment. */
4657 if (recog_memoized (insn) < 0)
4660 len = get_attr_length (insn);
4664 if (in_use & EV4_IB0)
4666 if (in_use & EV4_IB1)
4671 in_use |= EV4_IB0 | EV4_IBX;
4675 if (in_use & EV4_IB0)
4677 if (!(in_use & EV4_IBX) || (in_use & EV4_IB1))
4685 if (in_use & EV4_IB1)
4695 /* Haifa doesn't do well scheduling branches. */
4696 if (GET_CODE (insn) == JUMP_INSN)
4700 insn = next_nonnote_insn (insn);
4702 if (!insn || GET_RTX_CLASS (GET_CODE (insn)) != 'i')
4705 /* Let Haifa tell us where it thinks insn group boundaries are. */
4706 if (GET_MODE (insn) == TImode)
4709 if (GET_CODE (insn) == CLOBBER || GET_CODE (insn) == USE)
4714 insn = next_nonnote_insn (insn);
4722 /* IN_USE is a mask of the slots currently filled within the insn group.
4723 The mask bits come from alphaev5_pipe above. If EV5_E01 is set, then
4724 the insn in EV5_E0 can be swapped by the hardware into EV5_E1.
4726 LEN is, of course, the length of the group in bytes. */
4729 alphaev5_next_group (insn, pin_use, plen)
4731 int *pin_use, *plen;
4737 if (GET_RTX_CLASS (GET_CODE (insn)) != 'i'
4738 || GET_CODE (PATTERN (insn)) == CLOBBER
4739 || GET_CODE (PATTERN (insn)) == USE)
4744 enum alphaev5_pipe pipe;
4746 pipe = alphaev5_insn_pipe (insn);
4750 /* Force complex instructions to start new groups. */
4754 /* If this is a completely unrecognized insn, its an asm.
4755 We don't know how long it is, so record length as -1 to
4756 signal a needed realignment. */
4757 if (recog_memoized (insn) < 0)
4760 len = get_attr_length (insn);
4763 /* ??? Most of the places below, we would like to abort, as
4764 it would indicate an error either in Haifa, or in the
4765 scheduling description. Unfortunately, Haifa never
4766 schedules the last instruction of the BB, so we don't
4767 have an accurate TI bit to go off. */
4769 if (in_use & EV5_E0)
4771 if (in_use & EV5_E1)
4776 in_use |= EV5_E0 | EV5_E01;
4780 if (in_use & EV5_E0)
4782 if (!(in_use & EV5_E01) || (in_use & EV5_E1))
4790 if (in_use & EV5_E1)
4796 if (in_use & EV5_FA)
4798 if (in_use & EV5_FM)
4803 in_use |= EV5_FA | EV5_FAM;
4807 if (in_use & EV5_FA)
4813 if (in_use & EV5_FM)
4826 /* Haifa doesn't do well scheduling branches. */
4827 /* ??? If this is predicted not-taken, slotting continues, except
4828 that no more IBR, FBR, or JSR insns may be slotted. */
4829 if (GET_CODE (insn) == JUMP_INSN)
4833 insn = next_nonnote_insn (insn);
4835 if (!insn || GET_RTX_CLASS (GET_CODE (insn)) != 'i')
4838 /* Let Haifa tell us where it thinks insn group boundaries are. */
4839 if (GET_MODE (insn) == TImode)
4842 if (GET_CODE (insn) == CLOBBER || GET_CODE (insn) == USE)
4847 insn = next_nonnote_insn (insn);
4856 alphaev4_next_nop (pin_use)
4859 int in_use = *pin_use;
4862 if (!(in_use & EV4_IB0))
4867 else if ((in_use & (EV4_IBX|EV4_IB1)) == EV4_IBX)
4872 else if (TARGET_FP && !(in_use & EV4_IB1))
4885 alphaev5_next_nop (pin_use)
4888 int in_use = *pin_use;
4891 if (!(in_use & EV5_E1))
4896 else if (TARGET_FP && !(in_use & EV5_FA))
4901 else if (TARGET_FP && !(in_use & EV5_FM))
4913 /* The instruction group alignment main loop. */
4916 alpha_align_insns (insns, max_align, next_group, next_nop, gp_in_use)
4919 rtx (*next_group) PROTO((rtx, int*, int*));
4920 rtx (*next_nop) PROTO((int*));
4923 /* ALIGN is the known alignment for the insn group. */
4925 /* OFS is the offset of the current insn in the insn group. */
4927 int prev_in_use, in_use, len;
4930 /* Let shorten branches care for assigning alignments to code labels. */
4931 shorten_branches (insns);
4933 align = (FUNCTION_BOUNDARY/BITS_PER_UNIT < max_align
4934 ? FUNCTION_BOUNDARY/BITS_PER_UNIT : max_align);
4936 /* Account for the initial GP load, which happens before the scheduled
4937 prologue we emitted as RTL. */
4938 ofs = prev_in_use = 0;
4939 if (alpha_does_function_need_gp())
4941 ofs = 8 & (align - 1);
4942 prev_in_use = gp_in_use;
4946 if (GET_CODE (i) == NOTE)
4947 i = next_nonnote_insn (i);
4951 next = (*next_group)(i, &in_use, &len);
4953 /* When we see a label, resync alignment etc. */
4954 if (GET_CODE (i) == CODE_LABEL)
4956 int new_align = 1 << label_to_alignment (i);
4957 if (new_align >= align)
4959 align = new_align < max_align ? new_align : max_align;
4962 else if (ofs & (new_align-1))
4963 ofs = (ofs | (new_align-1)) + 1;
4968 /* Handle complex instructions special. */
4969 else if (in_use == 0)
4971 /* Asms will have length < 0. This is a signal that we have
4972 lost alignment knowledge. Assume, however, that the asm
4973 will not mis-align instructions. */
4982 /* If the known alignment is smaller than the recognized insn group,
4983 realign the output. */
4984 else if (align < len)
4986 int new_log_align = len > 8 ? 4 : 3;
4989 where = prev_nonnote_insn (i);
4990 if (!where || GET_CODE (where) != CODE_LABEL)
4993 emit_insn_before (gen_realign (GEN_INT (new_log_align)), where);
4994 align = 1 << new_log_align;
4998 /* If the group won't fit in the same INT16 as the previous,
4999 we need to add padding to keep the group together. Rather
5000 than simply leaving the insn filling to the assembler, we
5001 can make use of the knowledge of what sorts of instructions
5002 were issued in the previous group to make sure that all of
5003 the added nops are really free. */
5004 else if (ofs + len > align)
5006 int nop_count = (align - ofs) / 4;
5009 /* Insert nops before labels and branches to truely merge the
5010 execution of the nops with the previous instruction group. */
5011 where = prev_nonnote_insn (i);
5014 if (GET_CODE (where) == CODE_LABEL)
5016 rtx where2 = prev_nonnote_insn (where);
5017 if (where2 && GET_CODE (where2) == JUMP_INSN)
5020 else if (GET_CODE (where) != JUMP_INSN)
5027 emit_insn_before ((*next_nop)(&prev_in_use), where);
5028 while (--nop_count);
5032 ofs = (ofs + len) & (align - 1);
5033 prev_in_use = in_use;
5039 /* Machine dependant reorg pass. */
5045 if (alpha_tp != ALPHA_TP_PROG || flag_exceptions)
5046 alpha_handle_trap_shadows (insns);
5049 /* Due to the number of extra trapb insns, don't bother fixing up
5050 alignment when trap precision is instruction. Moreover, we can
5051 only do our job when sched2 is run and Haifa is our scheduler. */
5052 if (optimize && !optimize_size
5053 && alpha_tp != ALPHA_TP_INSN
5054 && flag_schedule_insns_after_reload)
5056 if (alpha_cpu == PROCESSOR_EV4)
5057 alpha_align_insns (insns, 8, alphaev4_next_group,
5058 alphaev4_next_nop, EV4_IB0);
5059 else if (alpha_cpu == PROCESSOR_EV5)
5060 alpha_align_insns (insns, 16, alphaev5_next_group,
5061 alphaev5_next_nop, EV5_E01 | EV5_E0);
5067 /* Check a floating-point value for validity for a particular machine mode. */
5069 static char * const float_strings[] =
5071 /* These are for FLOAT_VAX. */
5072 "1.70141173319264430e+38", /* 2^127 (2^24 - 1) / 2^24 */
5073 "-1.70141173319264430e+38",
5074 "2.93873587705571877e-39", /* 2^-128 */
5075 "-2.93873587705571877e-39",
5076 /* These are for the default broken IEEE mode, which traps
5077 on infinity or denormal numbers. */
5078 "3.402823466385288598117e+38", /* 2^128 (1 - 2^-24) */
5079 "-3.402823466385288598117e+38",
5080 "1.1754943508222875079687e-38", /* 2^-126 */
5081 "-1.1754943508222875079687e-38",
5084 static REAL_VALUE_TYPE float_values[8];
5085 static int inited_float_values = 0;
5088 check_float_value (mode, d, overflow)
5089 enum machine_mode mode;
5091 int overflow ATTRIBUTE_UNUSED;
5094 if (TARGET_IEEE || TARGET_IEEE_CONFORMANT || TARGET_IEEE_WITH_INEXACT)
5097 if (inited_float_values == 0)
5100 for (i = 0; i < 8; i++)
5101 float_values[i] = REAL_VALUE_ATOF (float_strings[i], DFmode);
5103 inited_float_values = 1;
5109 REAL_VALUE_TYPE *fvptr;
5111 if (TARGET_FLOAT_VAX)
5112 fvptr = &float_values[0];
5114 fvptr = &float_values[4];
5116 bcopy ((char *) d, (char *) &r, sizeof (REAL_VALUE_TYPE));
5117 if (REAL_VALUES_LESS (fvptr[0], r))
5119 bcopy ((char *) &fvptr[0], (char *) d,
5120 sizeof (REAL_VALUE_TYPE));
5123 else if (REAL_VALUES_LESS (r, fvptr[1]))
5125 bcopy ((char *) &fvptr[1], (char *) d,
5126 sizeof (REAL_VALUE_TYPE));
5129 else if (REAL_VALUES_LESS (dconst0, r)
5130 && REAL_VALUES_LESS (r, fvptr[2]))
5132 bcopy ((char *) &dconst0, (char *) d, sizeof (REAL_VALUE_TYPE));
5135 else if (REAL_VALUES_LESS (r, dconst0)
5136 && REAL_VALUES_LESS (fvptr[3], r))
5138 bcopy ((char *) &dconst0, (char *) d, sizeof (REAL_VALUE_TYPE));
5148 /* Return the VMS argument type corresponding to MODE. */
5151 alpha_arg_type (mode)
5152 enum machine_mode mode;
5157 return TARGET_FLOAT_VAX ? FF : FS;
5159 return TARGET_FLOAT_VAX ? FD : FT;
5165 /* Return an rtx for an integer representing the VMS Argument Information
5169 alpha_arg_info_reg_val (cum)
5170 CUMULATIVE_ARGS cum;
5172 unsigned HOST_WIDE_INT regval = cum.num_args;
5175 for (i = 0; i < 6; i++)
5176 regval |= ((int) cum.atypes[i]) << (i * 3 + 8);
5178 return GEN_INT (regval);
5181 /* Structure to collect function names for final output
5184 enum links_kind {KIND_UNUSED, KIND_LOCAL, KIND_EXTERN};
5187 struct alpha_links {
5188 struct alpha_links *next;
5190 enum links_kind kind;
5193 static struct alpha_links *alpha_links_base = 0;
5195 /* Make (or fake) .linkage entry for function call.
5197 IS_LOCAL is 0 if name is used in call, 1 if name is used in definition. */
5200 alpha_need_linkage (name, is_local)
5205 struct alpha_links *lptr, *nptr;
5210 /* Is this name already defined ? */
5212 for (lptr = alpha_links_base; lptr; lptr = lptr->next)
5213 if (strcmp (lptr->name, name) == 0)
5217 /* Defined here but external assumed. */
5218 if (lptr->kind == KIND_EXTERN)
5219 lptr->kind = KIND_LOCAL;
5223 /* Used here but unused assumed. */
5224 if (lptr->kind == KIND_UNUSED)
5225 lptr->kind = KIND_LOCAL;
5230 nptr = (struct alpha_links *) xmalloc (sizeof (struct alpha_links));
5231 nptr->next = alpha_links_base;
5232 nptr->name = xstrdup (name);
5234 /* Assume external if no definition. */
5235 nptr->kind = (is_local ? KIND_UNUSED : KIND_EXTERN);
5237 /* Ensure we have an IDENTIFIER so assemble_name can mark is used. */
5238 get_identifier (name);
5240 alpha_links_base = nptr;
5247 alpha_write_linkage (stream)
5250 struct alpha_links *lptr, *nptr;
5252 readonly_section ();
5254 fprintf (stream, "\t.align 3\n");
5256 for (lptr = alpha_links_base; lptr; lptr = nptr)
5260 if (lptr->kind == KIND_UNUSED
5261 || ! TREE_SYMBOL_REFERENCED (get_identifier (lptr->name)))
5264 fprintf (stream, "$%s..lk:\n", lptr->name);
5265 if (lptr->kind == KIND_LOCAL)
5267 /* Local and used, build linkage pair. */
5268 fprintf (stream, "\t.quad %s..en\n", lptr->name);
5269 fprintf (stream, "\t.quad %s\n", lptr->name);
5272 /* External and used, request linkage pair. */
5273 fprintf (stream, "\t.linkage %s\n", lptr->name);
5280 alpha_need_linkage (name, is_local)
5281 char *name ATTRIBUTE_UNUSED;
5282 int is_local ATTRIBUTE_UNUSED;
5286 #endif /* OPEN_VMS */