1 /* Subroutines used for code generation on the DEC Alpha.
2 Copyright (C) 1992, 93-98, 1999 Free Software Foundation, Inc.
3 Contributed by Richard Kenner (kenner@vlsi1.ultra.nyu.edu)
5 This file is part of GNU CC.
7 GNU CC is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License as published by
9 the Free Software Foundation; either version 2, or (at your option)
12 GNU CC is distributed in the hope that it will be useful,
13 but WITHOUT ANY WARRANTY; without even the implied warranty of
14 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 GNU General Public License for more details.
17 You should have received a copy of the GNU General Public License
18 along with GNU CC; see the file COPYING. If not, write to
19 the Free Software Foundation, 59 Temple Place - Suite 330,
20 Boston, MA 02111-1307, USA. */
27 #include "hard-reg-set.h"
29 #include "insn-config.h"
30 #include "conditions.h"
31 #include "insn-flags.h"
33 #include "insn-attr.h"
45 extern char *version_string;
46 extern int rtx_equal_function_value_matters;
48 /* Specify which cpu to schedule for. */
50 enum processor_type alpha_cpu;
51 static const char * const alpha_cpu_name[] =
56 /* Specify how accurate floating-point traps need to be. */
58 enum alpha_trap_precision alpha_tp;
60 /* Specify the floating-point rounding mode. */
62 enum alpha_fp_rounding_mode alpha_fprm;
64 /* Specify which things cause traps. */
66 enum alpha_fp_trap_mode alpha_fptm;
68 /* Strings decoded into the above options. */
70 const char *alpha_cpu_string; /* -mcpu= */
71 const char *alpha_tp_string; /* -mtrap-precision=[p|s|i] */
72 const char *alpha_fprm_string; /* -mfp-rounding-mode=[n|m|c|d] */
73 const char *alpha_fptm_string; /* -mfp-trap-mode=[n|u|su|sui] */
74 const char *alpha_mlat_string; /* -mmemory-latency= */
76 /* Save information from a "cmpxx" operation until the branch or scc is
79 rtx alpha_compare_op0, alpha_compare_op1;
80 int alpha_compare_fp_p;
82 /* Define the information needed to modify the epilogue for EH. */
84 rtx alpha_eh_epilogue_sp_ofs;
86 /* Non-zero if inside of a function, because the Alpha asm can't
87 handle .files inside of functions. */
89 static int inside_function = FALSE;
91 /* If non-null, this rtx holds the return address for the function. */
93 static rtx alpha_return_addr_rtx;
95 /* The number of cycles of latency we should assume on memory reads. */
97 int alpha_memory_latency = 3;
99 /* Whether the function needs the GP. */
101 static int alpha_function_needs_gp;
103 /* The alias set for prologue/epilogue register save/restore. */
105 static int alpha_sr_alias_set;
107 /* Declarations of static functions. */
108 static void alpha_set_memflags_1
109 PROTO((rtx, int, int, int));
110 static rtx alpha_emit_set_const_1
111 PROTO((rtx, enum machine_mode, HOST_WIDE_INT, int));
112 static void alpha_expand_unaligned_load_words
113 PROTO((rtx *out_regs, rtx smem, HOST_WIDE_INT words, HOST_WIDE_INT ofs));
114 static void alpha_expand_unaligned_store_words
115 PROTO((rtx *out_regs, rtx smem, HOST_WIDE_INT words, HOST_WIDE_INT ofs));
116 static void alpha_sa_mask
117 PROTO((unsigned long *imaskP, unsigned long *fmaskP));
118 static int alpha_does_function_need_gp
122 /* Get the number of args of a function in one of two ways. */
124 #define NUM_ARGS current_function_args_info.num_args
126 #define NUM_ARGS current_function_args_info
132 /* Parse target option strings. */
138 = TARGET_CPU_DEFAULT & MASK_CPU_EV6 ? PROCESSOR_EV6
139 : (TARGET_CPU_DEFAULT & MASK_CPU_EV5 ? PROCESSOR_EV5 : PROCESSOR_EV4);
141 if (alpha_cpu_string)
143 if (! strcmp (alpha_cpu_string, "ev4")
144 || ! strcmp (alpha_cpu_string, "21064"))
146 alpha_cpu = PROCESSOR_EV4;
147 target_flags &= ~ (MASK_BWX | MASK_CIX | MASK_MAX);
149 else if (! strcmp (alpha_cpu_string, "ev5")
150 || ! strcmp (alpha_cpu_string, "21164"))
152 alpha_cpu = PROCESSOR_EV5;
153 target_flags &= ~ (MASK_BWX | MASK_CIX | MASK_MAX);
155 else if (! strcmp (alpha_cpu_string, "ev56")
156 || ! strcmp (alpha_cpu_string, "21164a"))
158 alpha_cpu = PROCESSOR_EV5;
159 target_flags |= MASK_BWX;
160 target_flags &= ~ (MASK_CIX | MASK_MAX);
162 else if (! strcmp (alpha_cpu_string, "pca56")
163 || ! strcmp (alpha_cpu_string, "21164PC")
164 || ! strcmp (alpha_cpu_string, "21164pc"))
166 alpha_cpu = PROCESSOR_EV5;
167 target_flags |= MASK_BWX | MASK_MAX;
168 target_flags &= ~ MASK_CIX;
170 else if (! strcmp (alpha_cpu_string, "ev6")
171 || ! strcmp (alpha_cpu_string, "21264"))
173 alpha_cpu = PROCESSOR_EV6;
174 target_flags |= MASK_BWX | MASK_CIX | MASK_MAX;
177 error ("bad value `%s' for -mcpu switch", alpha_cpu_string);
180 alpha_tp = ALPHA_TP_PROG;
181 alpha_fprm = ALPHA_FPRM_NORM;
182 alpha_fptm = ALPHA_FPTM_N;
186 alpha_tp = ALPHA_TP_INSN;
187 alpha_fptm = ALPHA_FPTM_SU;
190 if (TARGET_IEEE_WITH_INEXACT)
192 alpha_tp = ALPHA_TP_INSN;
193 alpha_fptm = ALPHA_FPTM_SUI;
198 if (! strcmp (alpha_tp_string, "p"))
199 alpha_tp = ALPHA_TP_PROG;
200 else if (! strcmp (alpha_tp_string, "f"))
201 alpha_tp = ALPHA_TP_FUNC;
202 else if (! strcmp (alpha_tp_string, "i"))
203 alpha_tp = ALPHA_TP_INSN;
205 error ("bad value `%s' for -mtrap-precision switch", alpha_tp_string);
208 if (alpha_fprm_string)
210 if (! strcmp (alpha_fprm_string, "n"))
211 alpha_fprm = ALPHA_FPRM_NORM;
212 else if (! strcmp (alpha_fprm_string, "m"))
213 alpha_fprm = ALPHA_FPRM_MINF;
214 else if (! strcmp (alpha_fprm_string, "c"))
215 alpha_fprm = ALPHA_FPRM_CHOP;
216 else if (! strcmp (alpha_fprm_string,"d"))
217 alpha_fprm = ALPHA_FPRM_DYN;
219 error ("bad value `%s' for -mfp-rounding-mode switch",
223 if (alpha_fptm_string)
225 if (strcmp (alpha_fptm_string, "n") == 0)
226 alpha_fptm = ALPHA_FPTM_N;
227 else if (strcmp (alpha_fptm_string, "u") == 0)
228 alpha_fptm = ALPHA_FPTM_U;
229 else if (strcmp (alpha_fptm_string, "su") == 0)
230 alpha_fptm = ALPHA_FPTM_SU;
231 else if (strcmp (alpha_fptm_string, "sui") == 0)
232 alpha_fptm = ALPHA_FPTM_SUI;
234 error ("bad value `%s' for -mfp-trap-mode switch", alpha_fptm_string);
237 /* Do some sanity checks on the above option. */
239 if ((alpha_fptm == ALPHA_FPTM_SU || alpha_fptm == ALPHA_FPTM_SUI)
240 && alpha_tp != ALPHA_TP_INSN)
242 warning ("fp software completion requires -mtrap-precision=i");
243 alpha_tp = ALPHA_TP_INSN;
246 if (TARGET_FLOAT_VAX)
248 if (alpha_fprm == ALPHA_FPRM_MINF || alpha_fprm == ALPHA_FPRM_DYN)
250 warning ("rounding mode not supported for VAX floats");
251 alpha_fprm = ALPHA_FPRM_NORM;
253 if (alpha_fptm == ALPHA_FPTM_SUI)
255 warning ("trap mode not supported for VAX floats");
256 alpha_fptm = ALPHA_FPTM_SU;
264 if (!alpha_mlat_string)
265 alpha_mlat_string = "L1";
267 if (ISDIGIT ((unsigned char)alpha_mlat_string[0])
268 && (lat = strtol (alpha_mlat_string, &end, 10), *end == '\0'))
270 else if ((alpha_mlat_string[0] == 'L' || alpha_mlat_string[0] == 'l')
271 && ISDIGIT ((unsigned char)alpha_mlat_string[1])
272 && alpha_mlat_string[2] == '\0')
274 static int const cache_latency[][4] =
276 { 3, 30, -1 }, /* ev4 -- Bcache is a guess */
277 { 2, 12, 38 }, /* ev5 -- Bcache from PC164 LMbench numbers */
278 { 3, 13, -1 }, /* ev6 -- Ho hum, doesn't exist yet */
281 lat = alpha_mlat_string[1] - '0';
282 if (lat < 0 || lat > 3 || cache_latency[alpha_cpu][lat-1] == -1)
284 warning ("L%d cache latency unknown for %s",
285 lat, alpha_cpu_name[alpha_cpu]);
289 lat = cache_latency[alpha_cpu][lat-1];
291 else if (! strcmp (alpha_mlat_string, "main"))
293 /* Most current memories have about 370ns latency. This is
294 a reasonable guess for a fast cpu. */
299 warning ("bad value `%s' for -mmemory-latency", alpha_mlat_string);
303 alpha_memory_latency = lat;
306 /* Default the definition of "small data" to 8 bytes. */
310 /* Acquire a unique set number for our register saves and restores. */
311 alpha_sr_alias_set = new_alias_set ();
314 /* Returns 1 if VALUE is a mask that contains full bytes of zero or ones. */
322 for (i = 0; i < HOST_BITS_PER_WIDE_INT / HOST_BITS_PER_CHAR;
324 if ((value & 0xff) != 0 && (value & 0xff) != 0xff)
330 /* Returns 1 if OP is either the constant zero or a register. If a
331 register, it must be in the proper mode unless MODE is VOIDmode. */
334 reg_or_0_operand (op, mode)
336 enum machine_mode mode;
338 return op == const0_rtx || register_operand (op, mode);
341 /* Return 1 if OP is a constant in the range of 0-63 (for a shift) or
345 reg_or_6bit_operand (op, mode)
347 enum machine_mode mode;
349 return ((GET_CODE (op) == CONST_INT
350 && (unsigned HOST_WIDE_INT) INTVAL (op) < 64)
351 || register_operand (op, mode));
355 /* Return 1 if OP is an 8-bit constant or any register. */
358 reg_or_8bit_operand (op, mode)
360 enum machine_mode mode;
362 return ((GET_CODE (op) == CONST_INT
363 && (unsigned HOST_WIDE_INT) INTVAL (op) < 0x100)
364 || register_operand (op, mode));
367 /* Return 1 if OP is an 8-bit constant. */
370 cint8_operand (op, mode)
372 enum machine_mode mode ATTRIBUTE_UNUSED;
374 return ((GET_CODE (op) == CONST_INT
375 && (unsigned HOST_WIDE_INT) INTVAL (op) < 0x100));
378 /* Return 1 if the operand is a valid second operand to an add insn. */
381 add_operand (op, mode)
383 enum machine_mode mode;
385 if (GET_CODE (op) == CONST_INT)
386 /* Constraints I, J, O and P are covered by K. */
387 return (CONST_OK_FOR_LETTER_P (INTVAL (op), 'K')
388 || CONST_OK_FOR_LETTER_P (INTVAL (op), 'L'));
390 return register_operand (op, mode);
393 /* Return 1 if the operand is a valid second operand to a sign-extending
397 sext_add_operand (op, mode)
399 enum machine_mode mode;
401 if (GET_CODE (op) == CONST_INT)
402 return (CONST_OK_FOR_LETTER_P (INTVAL (op), 'I')
403 || CONST_OK_FOR_LETTER_P (INTVAL (op), 'O'));
405 return register_operand (op, mode);
408 /* Return 1 if OP is the constant 4 or 8. */
411 const48_operand (op, mode)
413 enum machine_mode mode ATTRIBUTE_UNUSED;
415 return (GET_CODE (op) == CONST_INT
416 && (INTVAL (op) == 4 || INTVAL (op) == 8));
419 /* Return 1 if OP is a valid first operand to an AND insn. */
422 and_operand (op, mode)
424 enum machine_mode mode;
426 if (GET_CODE (op) == CONST_DOUBLE && GET_MODE (op) == VOIDmode)
427 return (zap_mask (CONST_DOUBLE_LOW (op))
428 && zap_mask (CONST_DOUBLE_HIGH (op)));
430 if (GET_CODE (op) == CONST_INT)
431 return ((unsigned HOST_WIDE_INT) INTVAL (op) < 0x100
432 || (unsigned HOST_WIDE_INT) ~ INTVAL (op) < 0x100
433 || zap_mask (INTVAL (op)));
435 return register_operand (op, mode);
438 /* Return 1 if OP is a valid first operand to an IOR or XOR insn. */
441 or_operand (op, mode)
443 enum machine_mode mode;
445 if (GET_CODE (op) == CONST_INT)
446 return ((unsigned HOST_WIDE_INT) INTVAL (op) < 0x100
447 || (unsigned HOST_WIDE_INT) ~ INTVAL (op) < 0x100);
449 return register_operand (op, mode);
452 /* Return 1 if OP is a constant that is the width, in bits, of an integral
453 mode smaller than DImode. */
456 mode_width_operand (op, mode)
458 enum machine_mode mode ATTRIBUTE_UNUSED;
460 return (GET_CODE (op) == CONST_INT
461 && (INTVAL (op) == 8 || INTVAL (op) == 16
462 || INTVAL (op) == 32 || INTVAL (op) == 64));
465 /* Return 1 if OP is a constant that is the width of an integral machine mode
466 smaller than an integer. */
469 mode_mask_operand (op, mode)
471 enum machine_mode mode ATTRIBUTE_UNUSED;
473 #if HOST_BITS_PER_WIDE_INT == 32
474 if (GET_CODE (op) == CONST_DOUBLE)
475 return (CONST_DOUBLE_LOW (op) == -1
476 && (CONST_DOUBLE_HIGH (op) == -1
477 || CONST_DOUBLE_HIGH (op) == 0));
479 if (GET_CODE (op) == CONST_DOUBLE)
480 return (CONST_DOUBLE_LOW (op) == -1 && CONST_DOUBLE_HIGH (op) == 0);
483 return (GET_CODE (op) == CONST_INT
484 && (INTVAL (op) == 0xff
485 || INTVAL (op) == 0xffff
486 || INTVAL (op) == (HOST_WIDE_INT)0xffffffff
487 #if HOST_BITS_PER_WIDE_INT == 64
493 /* Return 1 if OP is a multiple of 8 less than 64. */
496 mul8_operand (op, mode)
498 enum machine_mode mode ATTRIBUTE_UNUSED;
500 return (GET_CODE (op) == CONST_INT
501 && (unsigned HOST_WIDE_INT) INTVAL (op) < 64
502 && (INTVAL (op) & 7) == 0);
505 /* Return 1 if OP is the constant zero in floating-point. */
508 fp0_operand (op, mode)
510 enum machine_mode mode;
512 return (GET_MODE (op) == mode
513 && GET_MODE_CLASS (mode) == MODE_FLOAT && op == CONST0_RTX (mode));
516 /* Return 1 if OP is the floating-point constant zero or a register. */
519 reg_or_fp0_operand (op, mode)
521 enum machine_mode mode;
523 return fp0_operand (op, mode) || register_operand (op, mode);
526 /* Return 1 if OP is a hard floating-point register. */
529 hard_fp_register_operand (op, mode)
531 enum machine_mode mode;
533 return ((GET_CODE (op) == REG && REGNO_REG_CLASS (REGNO (op)) == FLOAT_REGS)
534 || (GET_CODE (op) == SUBREG
535 && hard_fp_register_operand (SUBREG_REG (op), mode)));
538 /* Return 1 if OP is a register or a constant integer. */
542 reg_or_cint_operand (op, mode)
544 enum machine_mode mode;
546 return (GET_CODE (op) == CONST_INT
547 || register_operand (op, mode));
550 /* Return 1 if OP is something that can be reloaded into a register;
551 if it is a MEM, it need not be valid. */
554 some_operand (op, mode)
556 enum machine_mode mode;
558 if (mode != VOIDmode && GET_MODE (op) != VOIDmode && mode != GET_MODE (op))
561 switch (GET_CODE (op))
563 case REG: case MEM: case CONST_DOUBLE: case CONST_INT: case LABEL_REF:
564 case SYMBOL_REF: case CONST:
568 return some_operand (SUBREG_REG (op), VOIDmode);
577 /* Return 1 if OP is a valid operand for the source of a move insn. */
580 input_operand (op, mode)
582 enum machine_mode mode;
584 if (mode != VOIDmode && GET_MODE (op) != VOIDmode && mode != GET_MODE (op))
587 if (GET_MODE_CLASS (mode) == MODE_FLOAT && GET_MODE (op) != mode)
590 switch (GET_CODE (op))
595 /* This handles both the Windows/NT and OSF cases. */
596 return mode == ptr_mode || mode == DImode;
602 if (register_operand (op, mode))
604 /* ... fall through ... */
606 return ((TARGET_BWX || (mode != HImode && mode != QImode))
607 && general_operand (op, mode));
610 return GET_MODE_CLASS (mode) == MODE_FLOAT && op == CONST0_RTX (mode);
613 return mode == QImode || mode == HImode || add_operand (op, mode);
625 /* Return 1 if OP is a SYMBOL_REF for a function known to be in this
629 current_file_function_operand (op, mode)
631 enum machine_mode mode ATTRIBUTE_UNUSED;
633 return (GET_CODE (op) == SYMBOL_REF
634 && ! profile_flag && ! profile_block_flag
635 && (SYMBOL_REF_FLAG (op)
636 || op == XEXP (DECL_RTL (current_function_decl), 0)));
639 /* Return 1 if OP is a valid operand for the MEM of a CALL insn. */
642 call_operand (op, mode)
644 enum machine_mode mode;
649 return (GET_CODE (op) == SYMBOL_REF
650 || (GET_CODE (op) == REG
651 && (TARGET_OPEN_VMS || TARGET_WINDOWS_NT || REGNO (op) == 27)));
654 /* Return 1 if OP is a valid Alpha comparison operator. Here we know which
655 comparisons are valid in which insn. */
658 alpha_comparison_operator (op, mode)
660 enum machine_mode mode;
662 enum rtx_code code = GET_CODE (op);
664 if (mode != GET_MODE (op) || GET_RTX_CLASS (code) != '<')
667 return (code == EQ || code == LE || code == LT
668 || (mode == DImode && (code == LEU || code == LTU)));
671 /* Return 1 if OP is a valid Alpha swapped comparison operator. */
674 alpha_swapped_comparison_operator (op, mode)
676 enum machine_mode mode;
678 enum rtx_code code = GET_CODE (op);
680 if (mode != GET_MODE (op) || GET_RTX_CLASS (code) != '<')
683 code = swap_condition (code);
684 return (code == EQ || code == LE || code == LT
685 || (mode == DImode && (code == LEU || code == LTU)));
688 /* Return 1 if OP is a signed comparison operation. */
691 signed_comparison_operator (op, mode)
693 enum machine_mode mode ATTRIBUTE_UNUSED;
695 switch (GET_CODE (op))
697 case EQ: case NE: case LE: case LT: case GE: case GT:
707 /* Return 1 if this is a divide or modulus operator. */
710 divmod_operator (op, mode)
712 enum machine_mode mode ATTRIBUTE_UNUSED;
714 switch (GET_CODE (op))
716 case DIV: case MOD: case UDIV: case UMOD:
726 /* Return 1 if this memory address is a known aligned register plus
727 a constant. It must be a valid address. This means that we can do
728 this as an aligned reference plus some offset.
730 Take into account what reload will do. */
733 aligned_memory_operand (op, mode)
735 enum machine_mode mode;
739 if (reload_in_progress)
742 if (GET_CODE (tmp) == SUBREG)
743 tmp = SUBREG_REG (tmp);
744 if (GET_CODE (tmp) == REG
745 && REGNO (tmp) >= FIRST_PSEUDO_REGISTER)
747 op = reg_equiv_memory_loc[REGNO (tmp)];
753 if (GET_CODE (op) != MEM
754 || GET_MODE (op) != mode)
758 /* LEGITIMIZE_RELOAD_ADDRESS creates (plus (plus reg const_hi) const_lo)
759 sorts of constructs. Dig for the real base register. */
760 if (reload_in_progress
761 && GET_CODE (op) == PLUS
762 && GET_CODE (XEXP (op, 0)) == PLUS)
763 base = XEXP (XEXP (op, 0), 0);
766 if (! memory_address_p (mode, op))
768 base = (GET_CODE (op) == PLUS ? XEXP (op, 0) : op);
771 return (GET_CODE (base) == REG
772 && REGNO_POINTER_ALIGN (REGNO (base)) >= 4);
775 /* Similar, but return 1 if OP is a MEM which is not alignable. */
778 unaligned_memory_operand (op, mode)
780 enum machine_mode mode;
784 if (reload_in_progress)
787 if (GET_CODE (tmp) == SUBREG)
788 tmp = SUBREG_REG (tmp);
789 if (GET_CODE (tmp) == REG
790 && REGNO (tmp) >= FIRST_PSEUDO_REGISTER)
792 op = reg_equiv_memory_loc[REGNO (tmp)];
798 if (GET_CODE (op) != MEM
799 || GET_MODE (op) != mode)
803 /* LEGITIMIZE_RELOAD_ADDRESS creates (plus (plus reg const_hi) const_lo)
804 sorts of constructs. Dig for the real base register. */
805 if (reload_in_progress
806 && GET_CODE (op) == PLUS
807 && GET_CODE (XEXP (op, 0)) == PLUS)
808 base = XEXP (XEXP (op, 0), 0);
811 if (! memory_address_p (mode, op))
813 base = (GET_CODE (op) == PLUS ? XEXP (op, 0) : op);
816 return (GET_CODE (base) == REG
817 && REGNO_POINTER_ALIGN (REGNO (base)) < 4);
820 /* Return 1 if OP is either a register or an unaligned memory location. */
823 reg_or_unaligned_mem_operand (op, mode)
825 enum machine_mode mode;
827 return register_operand (op, mode) || unaligned_memory_operand (op, mode);
830 /* Return 1 if OP is any memory location. During reload a pseudo matches. */
833 any_memory_operand (op, mode)
835 enum machine_mode mode ATTRIBUTE_UNUSED;
837 return (GET_CODE (op) == MEM
838 || (GET_CODE (op) == SUBREG && GET_CODE (SUBREG_REG (op)) == REG)
839 || (reload_in_progress && GET_CODE (op) == REG
840 && REGNO (op) >= FIRST_PSEUDO_REGISTER)
841 || (reload_in_progress && GET_CODE (op) == SUBREG
842 && GET_CODE (SUBREG_REG (op)) == REG
843 && REGNO (SUBREG_REG (op)) >= FIRST_PSEUDO_REGISTER));
846 /* Returns 1 if OP is not an eliminable register.
848 This exists to cure a pathological abort in the s8addq (et al) patterns,
850 long foo () { long t; bar(); return (long) &t * 26107; }
852 which run afoul of a hack in reload to cure a (presumably) similar
853 problem with lea-type instructions on other targets. But there is
854 one of us and many of them, so work around the problem by selectively
855 preventing combine from making the optimization. */
858 reg_not_elim_operand (op, mode)
860 enum machine_mode mode;
863 if (GET_CODE (op) == SUBREG)
864 inner = SUBREG_REG (op);
865 if (inner == frame_pointer_rtx || inner == arg_pointer_rtx)
868 return register_operand (op, mode);
871 /* Return 1 is OP is a memory location that is not a reference (using
872 an AND) to an unaligned location. Take into account what reload
876 normal_memory_operand (op, mode)
878 enum machine_mode mode ATTRIBUTE_UNUSED;
880 if (reload_in_progress)
883 if (GET_CODE (tmp) == SUBREG)
884 tmp = SUBREG_REG (tmp);
885 if (GET_CODE (tmp) == REG
886 && REGNO (tmp) >= FIRST_PSEUDO_REGISTER)
888 op = reg_equiv_memory_loc[REGNO (tmp)];
890 /* This may not have been assigned an equivalent address if it will
891 be eliminated. In that case, it doesn't matter what we do. */
897 return GET_CODE (op) == MEM && GET_CODE (XEXP (op, 0)) != AND;
900 /* Accept a register, but not a subreg of any kind. This allows us to
901 avoid pathological cases in reload wrt data movement common in
902 int->fp conversion. */
905 reg_no_subreg_operand (op, mode)
907 enum machine_mode mode;
909 if (GET_CODE (op) == SUBREG)
911 return register_operand (op, mode);
914 /* Return 1 if this function can directly return via $26. */
919 return (! TARGET_OPEN_VMS && reload_completed && alpha_sa_size () == 0
920 && get_frame_size () == 0
921 && current_function_outgoing_args_size == 0
922 && current_function_pretend_args_size == 0);
925 /* REF is an alignable memory location. Place an aligned SImode
926 reference into *PALIGNED_MEM and the number of bits to shift into
927 *PBITNUM. SCRATCH is a free register for use in reloading out
928 of range stack slots. */
931 get_aligned_mem (ref, paligned_mem, pbitnum)
933 rtx *paligned_mem, *pbitnum;
936 HOST_WIDE_INT offset = 0;
938 if (GET_CODE (ref) != MEM)
941 if (reload_in_progress
942 && ! memory_address_p (GET_MODE (ref), XEXP (ref, 0)))
944 base = find_replacement (&XEXP (ref, 0));
946 if (! memory_address_p (GET_MODE (ref), base))
951 base = XEXP (ref, 0);
954 if (GET_CODE (base) == PLUS)
955 offset += INTVAL (XEXP (base, 1)), base = XEXP (base, 0);
957 *paligned_mem = gen_rtx_MEM (SImode, plus_constant (base, offset & ~3));
958 MEM_COPY_ATTRIBUTES (*paligned_mem, ref);
959 RTX_UNCHANGING_P (*paligned_mem) = RTX_UNCHANGING_P (ref);
961 /* Sadly, we cannot use alias sets here because we may overlap other
962 data in a different alias set. */
963 /* MEM_ALIAS_SET (*paligned_mem) = MEM_ALIAS_SET (ref); */
965 *pbitnum = GEN_INT ((offset & 3) * 8);
968 /* Similar, but just get the address. Handle the two reload cases.
969 Add EXTRA_OFFSET to the address we return. */
972 get_unaligned_address (ref, extra_offset)
977 HOST_WIDE_INT offset = 0;
979 if (GET_CODE (ref) != MEM)
982 if (reload_in_progress
983 && ! memory_address_p (GET_MODE (ref), XEXP (ref, 0)))
985 base = find_replacement (&XEXP (ref, 0));
987 if (! memory_address_p (GET_MODE (ref), base))
992 base = XEXP (ref, 0);
995 if (GET_CODE (base) == PLUS)
996 offset += INTVAL (XEXP (base, 1)), base = XEXP (base, 0);
998 return plus_constant (base, offset + extra_offset);
1001 /* Subfunction of the following function. Update the flags of any MEM
1002 found in part of X. */
1005 alpha_set_memflags_1 (x, in_struct_p, volatile_p, unchanging_p)
1007 int in_struct_p, volatile_p, unchanging_p;
1011 switch (GET_CODE (x))
1015 for (i = XVECLEN (x, 0) - 1; i >= 0; i--)
1016 alpha_set_memflags_1 (XVECEXP (x, 0, i), in_struct_p, volatile_p,
1021 alpha_set_memflags_1 (PATTERN (x), in_struct_p, volatile_p,
1026 alpha_set_memflags_1 (SET_DEST (x), in_struct_p, volatile_p,
1028 alpha_set_memflags_1 (SET_SRC (x), in_struct_p, volatile_p,
1033 MEM_IN_STRUCT_P (x) = in_struct_p;
1034 MEM_VOLATILE_P (x) = volatile_p;
1035 RTX_UNCHANGING_P (x) = unchanging_p;
1036 /* Sadly, we cannot use alias sets because the extra aliasing
1037 produced by the AND interferes. Given that two-byte quantities
1038 are the only thing we would be able to differentiate anyway,
1039 there does not seem to be any point in convoluting the early
1040 out of the alias check. */
1041 /* MEM_ALIAS_SET (x) = alias_set; */
1049 /* Given INSN, which is either an INSN or a SEQUENCE generated to
1050 perform a memory operation, look for any MEMs in either a SET_DEST or
1051 a SET_SRC and copy the in-struct, unchanging, and volatile flags from
1052 REF into each of the MEMs found. If REF is not a MEM, don't do
1056 alpha_set_memflags (insn, ref)
1060 int in_struct_p, volatile_p, unchanging_p;
1062 if (GET_CODE (ref) != MEM)
1065 in_struct_p = MEM_IN_STRUCT_P (ref);
1066 volatile_p = MEM_VOLATILE_P (ref);
1067 unchanging_p = RTX_UNCHANGING_P (ref);
1069 /* This is only called from alpha.md, after having had something
1070 generated from one of the insn patterns. So if everything is
1071 zero, the pattern is already up-to-date. */
1072 if (! in_struct_p && ! volatile_p && ! unchanging_p)
1075 alpha_set_memflags_1 (insn, in_struct_p, volatile_p, unchanging_p);
1078 /* Try to output insns to set TARGET equal to the constant C if it can be
1079 done in less than N insns. Do all computations in MODE. Returns the place
1080 where the output has been placed if it can be done and the insns have been
1081 emitted. If it would take more than N insns, zero is returned and no
1082 insns and emitted. */
1085 alpha_emit_set_const (target, mode, c, n)
1087 enum machine_mode mode;
1094 /* Try 1 insn, then 2, then up to N. */
1095 for (i = 1; i <= n; i++)
1096 if ((pat = alpha_emit_set_const_1 (target, mode, c, i)) != 0)
1102 /* Internal routine for the above to check for N or below insns. */
1105 alpha_emit_set_const_1 (target, mode, c, n)
1107 enum machine_mode mode;
1111 HOST_WIDE_INT new = c;
1113 /* Use a pseudo if highly optimizing and still generating RTL. */
1115 = (flag_expensive_optimizations && rtx_equal_function_value_matters
1119 #if HOST_BITS_PER_WIDE_INT == 64
1120 /* We are only called for SImode and DImode. If this is SImode, ensure that
1121 we are sign extended to a full word. This does not make any sense when
1122 cross-compiling on a narrow machine. */
1125 c = (c & 0xffffffff) - 2 * (c & 0x80000000);
1128 /* If this is a sign-extended 32-bit constant, we can do this in at most
1129 three insns, so do it if we have enough insns left. We always have
1130 a sign-extended 32-bit constant when compiling on a narrow machine. */
1132 if (HOST_BITS_PER_WIDE_INT != 64
1133 || c >> 31 == -1 || c >> 31 == 0)
1135 HOST_WIDE_INT low = (c & 0xffff) - 2 * (c & 0x8000);
1136 HOST_WIDE_INT tmp1 = c - low;
1138 = ((tmp1 >> 16) & 0xffff) - 2 * ((tmp1 >> 16) & 0x8000);
1139 HOST_WIDE_INT extra = 0;
1141 /* If HIGH will be interpreted as negative but the constant is
1142 positive, we must adjust it to do two ldha insns. */
1144 if ((high & 0x8000) != 0 && c >= 0)
1148 high = ((tmp1 >> 16) & 0xffff) - 2 * ((tmp1 >> 16) & 0x8000);
1151 if (c == low || (low == 0 && extra == 0))
1153 /* We used to use copy_to_suggested_reg (GEN_INT (c), target, mode)
1154 but that meant that we can't handle INT_MIN on 32-bit machines
1155 (like NT/Alpha), because we recurse indefinitely through
1156 emit_move_insn to gen_movdi. So instead, since we know exactly
1157 what we want, create it explicitly. */
1160 target = gen_reg_rtx (mode);
1161 emit_insn (gen_rtx_SET (VOIDmode, target, GEN_INT (c)));
1164 else if (n >= 2 + (extra != 0))
1166 temp = copy_to_suggested_reg (GEN_INT (low), subtarget, mode);
1169 temp = expand_binop (mode, add_optab, temp, GEN_INT (extra << 16),
1170 subtarget, 0, OPTAB_WIDEN);
1172 return expand_binop (mode, add_optab, temp, GEN_INT (high << 16),
1173 target, 0, OPTAB_WIDEN);
1177 /* If we couldn't do it that way, try some other methods. But if we have
1178 no instructions left, don't bother. Likewise, if this is SImode and
1179 we can't make pseudos, we can't do anything since the expand_binop
1180 and expand_unop calls will widen and try to make pseudos. */
1183 || (mode == SImode && ! rtx_equal_function_value_matters))
1186 #if HOST_BITS_PER_WIDE_INT == 64
1187 /* First, see if can load a value into the target that is the same as the
1188 constant except that all bytes that are 0 are changed to be 0xff. If we
1189 can, then we can do a ZAPNOT to obtain the desired constant. */
1191 for (i = 0; i < 64; i += 8)
1192 if ((new & ((HOST_WIDE_INT) 0xff << i)) == 0)
1193 new |= (HOST_WIDE_INT) 0xff << i;
1195 /* We are only called for SImode and DImode. If this is SImode, ensure that
1196 we are sign extended to a full word. */
1199 new = (new & 0xffffffff) - 2 * (new & 0x80000000);
1202 && (temp = alpha_emit_set_const (subtarget, mode, new, n - 1)) != 0)
1203 return expand_binop (mode, and_optab, temp, GEN_INT (c | ~ new),
1204 target, 0, OPTAB_WIDEN);
1207 /* Next, see if we can load a related constant and then shift and possibly
1208 negate it to get the constant we want. Try this once each increasing
1209 numbers of insns. */
1211 for (i = 1; i < n; i++)
1213 /* First try complementing. */
1214 if ((temp = alpha_emit_set_const (subtarget, mode, ~ c, i)) != 0)
1215 return expand_unop (mode, one_cmpl_optab, temp, target, 0);
1217 /* Next try to form a constant and do a left shift. We can do this
1218 if some low-order bits are zero; the exact_log2 call below tells
1219 us that information. The bits we are shifting out could be any
1220 value, but here we'll just try the 0- and sign-extended forms of
1221 the constant. To try to increase the chance of having the same
1222 constant in more than one insn, start at the highest number of
1223 bits to shift, but try all possibilities in case a ZAPNOT will
1226 if ((bits = exact_log2 (c & - c)) > 0)
1227 for (; bits > 0; bits--)
1228 if ((temp = (alpha_emit_set_const
1230 (unsigned HOST_WIDE_INT) (c >> bits), i))) != 0
1231 || ((temp = (alpha_emit_set_const
1233 ((unsigned HOST_WIDE_INT) c) >> bits, i)))
1235 return expand_binop (mode, ashl_optab, temp, GEN_INT (bits),
1236 target, 0, OPTAB_WIDEN);
1238 /* Now try high-order zero bits. Here we try the shifted-in bits as
1239 all zero and all ones. Be careful to avoid shifting outside the
1240 mode and to avoid shifting outside the host wide int size. */
1241 /* On narrow hosts, don't shift a 1 into the high bit, since we'll
1242 confuse the recursive call and set all of the high 32 bits. */
1244 if ((bits = (MIN (HOST_BITS_PER_WIDE_INT, GET_MODE_SIZE (mode) * 8)
1245 - floor_log2 (c) - 1 - (HOST_BITS_PER_WIDE_INT < 64))) > 0)
1246 for (; bits > 0; bits--)
1247 if ((temp = alpha_emit_set_const (subtarget, mode,
1249 || ((temp = (alpha_emit_set_const
1251 ((c << bits) | (((HOST_WIDE_INT) 1 << bits) - 1)),
1254 return expand_binop (mode, lshr_optab, temp, GEN_INT (bits),
1255 target, 1, OPTAB_WIDEN);
1257 /* Now try high-order 1 bits. We get that with a sign-extension.
1258 But one bit isn't enough here. Be careful to avoid shifting outside
1259 the mode and to avoid shifting outside the host wide int size. */
1261 if ((bits = (MIN (HOST_BITS_PER_WIDE_INT, GET_MODE_SIZE (mode) * 8)
1262 - floor_log2 (~ c) - 2)) > 0)
1263 for (; bits > 0; bits--)
1264 if ((temp = alpha_emit_set_const (subtarget, mode,
1266 || ((temp = (alpha_emit_set_const
1268 ((c << bits) | (((HOST_WIDE_INT) 1 << bits) - 1)),
1271 return expand_binop (mode, ashr_optab, temp, GEN_INT (bits),
1272 target, 0, OPTAB_WIDEN);
1278 /* Having failed to find a 3 insn sequence in alpha_emit_set_const,
1279 fall back to a straight forward decomposition. We do this to avoid
1280 exponential run times encountered when looking for longer sequences
1281 with alpha_emit_set_const. */
1284 alpha_emit_set_long_const (target, c1, c2)
1286 HOST_WIDE_INT c1, c2;
1288 HOST_WIDE_INT d1, d2, d3, d4;
1290 /* Decompose the entire word */
1291 #if HOST_BITS_PER_WIDE_INT >= 64
1292 if (c2 != -(c1 < 0))
1294 d1 = ((c1 & 0xffff) ^ 0x8000) - 0x8000;
1296 d2 = ((c1 & 0xffffffff) ^ 0x80000000) - 0x80000000;
1297 c1 = (c1 - d2) >> 32;
1298 d3 = ((c1 & 0xffff) ^ 0x8000) - 0x8000;
1300 d4 = ((c1 & 0xffffffff) ^ 0x80000000) - 0x80000000;
1304 d1 = ((c1 & 0xffff) ^ 0x8000) - 0x8000;
1306 d2 = ((c1 & 0xffffffff) ^ 0x80000000) - 0x80000000;
1310 d3 = ((c2 & 0xffff) ^ 0x8000) - 0x8000;
1312 d4 = ((c2 & 0xffffffff) ^ 0x80000000) - 0x80000000;
1317 /* Construct the high word */
1320 emit_move_insn (target, GEN_INT (d4));
1322 emit_move_insn (target, gen_rtx_PLUS (DImode, target, GEN_INT (d3)));
1325 emit_move_insn (target, GEN_INT (d3));
1327 /* Shift it into place */
1328 emit_move_insn (target, gen_rtx_ASHIFT (DImode, target, GEN_INT (32)));
1330 /* Add in the low bits. */
1332 emit_move_insn (target, gen_rtx_PLUS (DImode, target, GEN_INT (d2)));
1334 emit_move_insn (target, gen_rtx_PLUS (DImode, target, GEN_INT (d1)));
1339 /* Generate the comparison for a conditional branch. */
1342 alpha_emit_conditional_branch (code)
1345 enum rtx_code cmp_code, branch_code;
1346 enum machine_mode cmp_mode, branch_mode = VOIDmode;
1347 rtx op0 = alpha_compare_op0, op1 = alpha_compare_op1;
1350 /* The general case: fold the comparison code to the types of compares
1351 that we have, choosing the branch as necessary. */
1354 case EQ: case LE: case LT: case LEU: case LTU:
1355 /* We have these compares: */
1356 cmp_code = code, branch_code = NE;
1360 /* This must be reversed. */
1361 cmp_code = EQ, branch_code = EQ;
1364 case GE: case GT: case GEU: case GTU:
1365 /* For FP, we swap them, for INT, we reverse them. */
1366 if (alpha_compare_fp_p)
1368 cmp_code = swap_condition (code);
1370 tem = op0, op0 = op1, op1 = tem;
1374 cmp_code = reverse_condition (code);
1383 if (alpha_compare_fp_p)
1388 /* When we are not as concerned about non-finite values, and we
1389 are comparing against zero, we can branch directly. */
1390 if (op1 == CONST0_RTX (DFmode))
1391 cmp_code = NIL, branch_code = code;
1392 else if (op0 == CONST0_RTX (DFmode))
1394 /* Undo the swap we probably did just above. */
1395 tem = op0, op0 = op1, op1 = tem;
1396 branch_code = swap_condition (cmp_code);
1402 /* ??? We mark the the branch mode to be CCmode to prevent the
1403 compare and branch from being combined, since the compare
1404 insn follows IEEE rules that the branch does not. */
1405 branch_mode = CCmode;
1412 /* The following optimizations are only for signed compares. */
1413 if (code != LEU && code != LTU && code != GEU && code != GTU)
1415 /* Whee. Compare and branch against 0 directly. */
1416 if (op1 == const0_rtx)
1417 cmp_code = NIL, branch_code = code;
1419 /* We want to use cmpcc/bcc when we can, since there is a zero delay
1420 bypass between logicals and br/cmov on EV5. But we don't want to
1421 force valid immediate constants into registers needlessly. */
1422 else if (GET_CODE (op1) == CONST_INT)
1424 HOST_WIDE_INT v = INTVAL (op1), n = -v;
1426 if (! CONST_OK_FOR_LETTER_P (v, 'I')
1427 && (CONST_OK_FOR_LETTER_P (n, 'K')
1428 || CONST_OK_FOR_LETTER_P (n, 'L')))
1430 cmp_code = PLUS, branch_code = code;
1437 /* Force op0 into a register. */
1438 if (GET_CODE (op0) != REG)
1439 op0 = force_reg (cmp_mode, op0);
1441 /* Emit an initial compare instruction, if necessary. */
1443 if (cmp_code != NIL)
1445 tem = gen_reg_rtx (cmp_mode);
1446 emit_move_insn (tem, gen_rtx_fmt_ee (cmp_code, cmp_mode, op0, op1));
1449 /* Return the branch comparison. */
1450 return gen_rtx_fmt_ee (branch_code, branch_mode, tem, CONST0_RTX (cmp_mode));
1454 /* Rewrite a comparison against zero CMP of the form
1455 (CODE (cc0) (const_int 0)) so it can be written validly in
1456 a conditional move (if_then_else CMP ...).
1457 If both of the operands that set cc0 are non-zero we must emit
1458 an insn to perform the compare (it can't be done within
1459 the conditional move). */
1461 alpha_emit_conditional_move (cmp, mode)
1463 enum machine_mode mode;
1465 enum rtx_code code = GET_CODE (cmp);
1466 enum rtx_code cmov_code = NE;
1467 rtx op0 = alpha_compare_op0;
1468 rtx op1 = alpha_compare_op1;
1469 enum machine_mode cmp_mode
1470 = (GET_MODE (op0) == VOIDmode ? DImode : GET_MODE (op0));
1471 enum machine_mode cmp_op_mode = alpha_compare_fp_p ? DFmode : DImode;
1472 enum machine_mode cmov_mode = VOIDmode;
1475 if (alpha_compare_fp_p != FLOAT_MODE_P (mode))
1478 /* We may be able to use a conditional move directly.
1479 This avoids emitting spurious compares. */
1480 if (signed_comparison_operator (cmp, cmp_op_mode)
1481 && (!alpha_compare_fp_p || flag_fast_math)
1482 && (op0 == CONST0_RTX (cmp_mode) || op1 == CONST0_RTX (cmp_mode)))
1483 return gen_rtx_fmt_ee (code, VOIDmode, op0, op1);
1485 /* We can't put the comparison insides a conditional move;
1486 emit a compare instruction and put that inside the
1487 conditional move. Make sure we emit only comparisons we have;
1488 swap or reverse as necessary. */
1492 case EQ: case LE: case LT: case LEU: case LTU:
1493 /* We have these compares: */
1497 /* This must be reversed. */
1498 code = reverse_condition (code);
1502 case GE: case GT: case GEU: case GTU:
1503 /* These must be swapped. Make sure the new first operand is in
1505 code = swap_condition (code);
1506 tem = op0, op0 = op1, op1 = tem;
1507 op0 = force_reg (cmp_mode, op0);
1514 /* ??? We mark the branch mode to be CCmode to prevent the compare
1515 and cmov from being combined, since the compare insn follows IEEE
1516 rules that the cmov does not. */
1517 if (alpha_compare_fp_p && !flag_fast_math)
1520 tem = gen_reg_rtx (cmp_op_mode);
1521 emit_move_insn (tem, gen_rtx_fmt_ee (code, cmp_op_mode, op0, op1));
1522 return gen_rtx_fmt_ee (cmov_code, cmov_mode, tem, CONST0_RTX (cmp_op_mode));
1525 /* Use ext[wlq][lh] as the Architecture Handbook describes for extracting
1529 word: ldq_u r1,X(r11) ldq_u r1,X(r11)
1530 ldq_u r2,X+1(r11) ldq_u r2,X+1(r11)
1531 lda r3,X(r11) lda r3,X+2(r11)
1532 extwl r1,r3,r1 extql r1,r3,r1
1533 extwh r2,r3,r2 extqh r2,r3,r2
1534 or r1.r2.r1 or r1,r2,r1
1537 long: ldq_u r1,X(r11) ldq_u r1,X(r11)
1538 ldq_u r2,X+3(r11) ldq_u r2,X+3(r11)
1539 lda r3,X(r11) lda r3,X(r11)
1540 extll r1,r3,r1 extll r1,r3,r1
1541 extlh r2,r3,r2 extlh r2,r3,r2
1542 or r1.r2.r1 addl r1,r2,r1
1544 quad: ldq_u r1,X(r11)
1553 alpha_expand_unaligned_load (tgt, mem, size, ofs, sign)
1555 HOST_WIDE_INT size, ofs;
1558 rtx meml, memh, addr, extl, exth;
1559 enum machine_mode mode;
1561 meml = gen_reg_rtx (DImode);
1562 memh = gen_reg_rtx (DImode);
1563 addr = gen_reg_rtx (DImode);
1564 extl = gen_reg_rtx (DImode);
1565 exth = gen_reg_rtx (DImode);
1567 emit_move_insn (meml,
1568 change_address (mem, DImode,
1569 gen_rtx_AND (DImode,
1570 plus_constant (XEXP (mem, 0),
1574 emit_move_insn (memh,
1575 change_address (mem, DImode,
1576 gen_rtx_AND (DImode,
1577 plus_constant (XEXP (mem, 0),
1581 if (sign && size == 2)
1583 emit_move_insn (addr, plus_constant (XEXP (mem, 0), ofs+2));
1585 emit_insn (gen_extxl (extl, meml, GEN_INT (64), addr));
1586 emit_insn (gen_extqh (exth, memh, addr));
1588 /* We must use tgt here for the target. Alpha-vms port fails if we use
1589 addr for the target, because addr is marked as a pointer and combine
1590 knows that pointers are always sign-extended 32 bit values. */
1591 addr = expand_binop (DImode, ior_optab, extl, exth, tgt, 1, OPTAB_WIDEN);
1592 addr = expand_binop (DImode, ashr_optab, addr, GEN_INT (48),
1593 addr, 1, OPTAB_WIDEN);
1597 emit_move_insn (addr, plus_constant (XEXP (mem, 0), ofs));
1598 emit_insn (gen_extxl (extl, meml, GEN_INT (size*8), addr));
1602 emit_insn (gen_extwh (exth, memh, addr));
1607 emit_insn (gen_extlh (exth, memh, addr));
1612 emit_insn (gen_extqh (exth, memh, addr));
1619 addr = expand_binop (mode, ior_optab, gen_lowpart (mode, extl),
1620 gen_lowpart (mode, exth), gen_lowpart (mode, tgt),
1625 emit_move_insn (tgt, gen_lowpart(GET_MODE (tgt), addr));
1628 /* Similarly, use ins and msk instructions to perform unaligned stores. */
1631 alpha_expand_unaligned_store (dst, src, size, ofs)
1633 HOST_WIDE_INT size, ofs;
1635 rtx dstl, dsth, addr, insl, insh, meml, memh;
1637 dstl = gen_reg_rtx (DImode);
1638 dsth = gen_reg_rtx (DImode);
1639 insl = gen_reg_rtx (DImode);
1640 insh = gen_reg_rtx (DImode);
1642 meml = change_address (dst, DImode,
1643 gen_rtx_AND (DImode,
1644 plus_constant (XEXP (dst, 0), ofs),
1646 memh = change_address (dst, DImode,
1647 gen_rtx_AND (DImode,
1648 plus_constant (XEXP (dst, 0),
1652 emit_move_insn (dsth, memh);
1653 emit_move_insn (dstl, meml);
1654 addr = copy_addr_to_reg (plus_constant (XEXP (dst, 0), ofs));
1656 if (src != const0_rtx)
1658 emit_insn (gen_insxh (insh, gen_lowpart (DImode, src),
1659 GEN_INT (size*8), addr));
1664 emit_insn (gen_inswl (insl, gen_lowpart (HImode, src), addr));
1667 emit_insn (gen_insll (insl, gen_lowpart (SImode, src), addr));
1670 emit_insn (gen_insql (insl, src, addr));
1675 emit_insn (gen_mskxh (dsth, dsth, GEN_INT (size*8), addr));
1680 emit_insn (gen_mskxl (dstl, dstl, GEN_INT (0xffff), addr));
1683 emit_insn (gen_mskxl (dstl, dstl, GEN_INT (0xffffffff), addr));
1687 #if HOST_BITS_PER_WIDE_INT == 32
1688 rtx msk = immed_double_const (0xffffffff, 0xffffffff, DImode);
1690 rtx msk = immed_double_const (0xffffffffffffffff, 0, DImode);
1692 emit_insn (gen_mskxl (dstl, dstl, msk, addr));
1697 if (src != const0_rtx)
1699 dsth = expand_binop (DImode, ior_optab, insh, dsth, dsth, 0, OPTAB_WIDEN);
1700 dstl = expand_binop (DImode, ior_optab, insl, dstl, dstl, 0, OPTAB_WIDEN);
1703 /* Must store high before low for degenerate case of aligned. */
1704 emit_move_insn (memh, dsth);
1705 emit_move_insn (meml, dstl);
1708 /* The block move code tries to maximize speed by separating loads and
1709 stores at the expense of register pressure: we load all of the data
1710 before we store it back out. There are two secondary effects worth
1711 mentioning, that this speeds copying to/from aligned and unaligned
1712 buffers, and that it makes the code significantly easier to write. */
1714 #define MAX_MOVE_WORDS 8
1716 /* Load an integral number of consecutive unaligned quadwords. */
1719 alpha_expand_unaligned_load_words (out_regs, smem, words, ofs)
1722 HOST_WIDE_INT words, ofs;
1724 rtx const im8 = GEN_INT (-8);
1725 rtx const i64 = GEN_INT (64);
1726 rtx ext_tmps[MAX_MOVE_WORDS], data_regs[MAX_MOVE_WORDS+1];
1730 /* Generate all the tmp registers we need. */
1731 for (i = 0; i < words; ++i)
1733 data_regs[i] = out_regs[i];
1734 ext_tmps[i] = gen_reg_rtx (DImode);
1736 data_regs[words] = gen_reg_rtx (DImode);
1739 smem = change_address (smem, GET_MODE (smem),
1740 plus_constant (XEXP (smem, 0), ofs));
1742 /* Load up all of the source data. */
1743 for (i = 0; i < words; ++i)
1745 emit_move_insn (data_regs[i],
1746 change_address (smem, DImode,
1747 gen_rtx_AND (DImode,
1748 plus_constant (XEXP(smem,0),
1752 emit_move_insn (data_regs[words],
1753 change_address (smem, DImode,
1754 gen_rtx_AND (DImode,
1755 plus_constant (XEXP(smem,0),
1759 /* Extract the half-word fragments. Unfortunately DEC decided to make
1760 extxh with offset zero a noop instead of zeroing the register, so
1761 we must take care of that edge condition ourselves with cmov. */
1763 sreg = copy_addr_to_reg (XEXP (smem, 0));
1764 areg = expand_binop (DImode, and_optab, sreg, GEN_INT (7), NULL,
1766 for (i = 0; i < words; ++i)
1768 emit_insn (gen_extxl (data_regs[i], data_regs[i], i64, sreg));
1770 emit_insn (gen_extqh (ext_tmps[i], data_regs[i+1], sreg));
1771 emit_insn (gen_rtx_SET (VOIDmode, ext_tmps[i],
1772 gen_rtx_IF_THEN_ELSE (DImode,
1773 gen_rtx_EQ (DImode, areg,
1775 const0_rtx, ext_tmps[i])));
1778 /* Merge the half-words into whole words. */
1779 for (i = 0; i < words; ++i)
1781 out_regs[i] = expand_binop (DImode, ior_optab, data_regs[i],
1782 ext_tmps[i], data_regs[i], 1, OPTAB_WIDEN);
1786 /* Store an integral number of consecutive unaligned quadwords. DATA_REGS
1787 may be NULL to store zeros. */
1790 alpha_expand_unaligned_store_words (data_regs, dmem, words, ofs)
1793 HOST_WIDE_INT words, ofs;
1795 rtx const im8 = GEN_INT (-8);
1796 rtx const i64 = GEN_INT (64);
1797 #if HOST_BITS_PER_WIDE_INT == 32
1798 rtx const im1 = immed_double_const (0xffffffff, 0xffffffff, DImode);
1800 rtx const im1 = immed_double_const (0xffffffffffffffff, 0, DImode);
1802 rtx ins_tmps[MAX_MOVE_WORDS];
1803 rtx st_tmp_1, st_tmp_2, dreg;
1804 rtx st_addr_1, st_addr_2;
1807 /* Generate all the tmp registers we need. */
1808 if (data_regs != NULL)
1809 for (i = 0; i < words; ++i)
1810 ins_tmps[i] = gen_reg_rtx(DImode);
1811 st_tmp_1 = gen_reg_rtx(DImode);
1812 st_tmp_2 = gen_reg_rtx(DImode);
1815 dmem = change_address (dmem, GET_MODE (dmem),
1816 plus_constant (XEXP (dmem, 0), ofs));
1819 st_addr_2 = change_address (dmem, DImode,
1820 gen_rtx_AND (DImode,
1821 plus_constant (XEXP(dmem,0),
1824 st_addr_1 = change_address (dmem, DImode,
1825 gen_rtx_AND (DImode,
1829 /* Load up the destination end bits. */
1830 emit_move_insn (st_tmp_2, st_addr_2);
1831 emit_move_insn (st_tmp_1, st_addr_1);
1833 /* Shift the input data into place. */
1834 dreg = copy_addr_to_reg (XEXP (dmem, 0));
1835 if (data_regs != NULL)
1837 for (i = words-1; i >= 0; --i)
1839 emit_insn (gen_insxh (ins_tmps[i], data_regs[i], i64, dreg));
1840 emit_insn (gen_insql (data_regs[i], data_regs[i], dreg));
1842 for (i = words-1; i > 0; --i)
1844 ins_tmps[i-1] = expand_binop (DImode, ior_optab, data_regs[i],
1845 ins_tmps[i-1], ins_tmps[i-1], 1,
1850 /* Split and merge the ends with the destination data. */
1851 emit_insn (gen_mskxh (st_tmp_2, st_tmp_2, i64, dreg));
1852 emit_insn (gen_mskxl (st_tmp_1, st_tmp_1, im1, dreg));
1854 if (data_regs != NULL)
1856 st_tmp_2 = expand_binop (DImode, ior_optab, st_tmp_2, ins_tmps[words-1],
1857 st_tmp_2, 1, OPTAB_WIDEN);
1858 st_tmp_1 = expand_binop (DImode, ior_optab, st_tmp_1, data_regs[0],
1859 st_tmp_1, 1, OPTAB_WIDEN);
1863 emit_move_insn (st_addr_2, st_tmp_2);
1864 for (i = words-1; i > 0; --i)
1866 emit_move_insn (change_address (dmem, DImode,
1867 gen_rtx_AND (DImode,
1868 plus_constant(XEXP (dmem,0),
1871 data_regs ? ins_tmps[i-1] : const0_rtx);
1873 emit_move_insn (st_addr_1, st_tmp_1);
1877 /* Expand string/block move operations.
1879 operands[0] is the pointer to the destination.
1880 operands[1] is the pointer to the source.
1881 operands[2] is the number of bytes to move.
1882 operands[3] is the alignment. */
1885 alpha_expand_block_move (operands)
1888 rtx bytes_rtx = operands[2];
1889 rtx align_rtx = operands[3];
1890 HOST_WIDE_INT orig_bytes = INTVAL (bytes_rtx);
1891 HOST_WIDE_INT bytes = orig_bytes;
1892 HOST_WIDE_INT src_align = INTVAL (align_rtx);
1893 HOST_WIDE_INT dst_align = src_align;
1894 rtx orig_src = operands[1];
1895 rtx orig_dst = operands[0];
1896 rtx data_regs[2*MAX_MOVE_WORDS+16];
1898 int i, words, ofs, nregs = 0;
1902 if (bytes > MAX_MOVE_WORDS*8)
1905 /* Look for additional alignment information from recorded register info. */
1907 tmp = XEXP (orig_src, 0);
1908 if (GET_CODE (tmp) == REG)
1910 if (REGNO_POINTER_ALIGN (REGNO (tmp)) > src_align)
1911 src_align = REGNO_POINTER_ALIGN (REGNO (tmp));
1913 else if (GET_CODE (tmp) == PLUS
1914 && GET_CODE (XEXP (tmp, 0)) == REG
1915 && GET_CODE (XEXP (tmp, 1)) == CONST_INT)
1917 HOST_WIDE_INT c = INTVAL (XEXP (tmp, 1));
1918 int a = REGNO_POINTER_ALIGN (REGNO (XEXP (tmp, 0)));
1922 if (a >= 8 && c % 8 == 0)
1924 else if (a >= 4 && c % 4 == 0)
1926 else if (a >= 2 && c % 2 == 0)
1931 tmp = XEXP (orig_dst, 0);
1932 if (GET_CODE (tmp) == REG)
1934 if (REGNO_POINTER_ALIGN (REGNO (tmp)) > dst_align)
1935 dst_align = REGNO_POINTER_ALIGN (REGNO (tmp));
1937 else if (GET_CODE (tmp) == PLUS
1938 && GET_CODE (XEXP (tmp, 0)) == REG
1939 && GET_CODE (XEXP (tmp, 1)) == CONST_INT)
1941 HOST_WIDE_INT c = INTVAL (XEXP (tmp, 1));
1942 int a = REGNO_POINTER_ALIGN (REGNO (XEXP (tmp, 0)));
1946 if (a >= 8 && c % 8 == 0)
1948 else if (a >= 4 && c % 4 == 0)
1950 else if (a >= 2 && c % 2 == 0)
1956 * Load the entire block into registers.
1959 if (GET_CODE (XEXP (orig_src, 0)) == ADDRESSOF)
1961 enum machine_mode mode;
1962 tmp = XEXP (XEXP (orig_src, 0), 0);
1964 /* Don't use the existing register if we're reading more than
1965 is held in the register. Nor if there is not a mode that
1966 handles the exact size. */
1967 mode = mode_for_size (bytes * BITS_PER_UNIT, MODE_INT, 1);
1969 && GET_MODE_SIZE (GET_MODE (tmp)) >= bytes)
1973 data_regs[nregs] = gen_lowpart (DImode, tmp);
1974 data_regs[nregs+1] = gen_highpart (DImode, tmp);
1978 data_regs[nregs++] = gen_lowpart (mode, tmp);
1982 /* No appropriate mode; fall back on memory. */
1983 orig_src = change_address (orig_src, GET_MODE (orig_src),
1984 copy_addr_to_reg (XEXP (orig_src, 0)));
1988 if (src_align >= 8 && bytes >= 8)
1992 for (i = 0; i < words; ++i)
1993 data_regs[nregs+i] = gen_reg_rtx(DImode);
1995 for (i = 0; i < words; ++i)
1997 emit_move_insn (data_regs[nregs+i],
1998 change_address (orig_src, DImode,
1999 plus_constant (XEXP (orig_src, 0),
2007 if (src_align >= 4 && bytes >= 4)
2011 for (i = 0; i < words; ++i)
2012 data_regs[nregs+i] = gen_reg_rtx(SImode);
2014 for (i = 0; i < words; ++i)
2016 emit_move_insn (data_regs[nregs+i],
2017 change_address (orig_src, SImode,
2018 plus_constant (XEXP (orig_src, 0),
2030 for (i = 0; i < words+1; ++i)
2031 data_regs[nregs+i] = gen_reg_rtx(DImode);
2033 alpha_expand_unaligned_load_words (data_regs + nregs, orig_src,
2040 if (!TARGET_BWX && bytes >= 8)
2042 data_regs[nregs++] = tmp = gen_reg_rtx (DImode);
2043 alpha_expand_unaligned_load (tmp, orig_src, 8, ofs, 0);
2047 if (!TARGET_BWX && bytes >= 4)
2049 data_regs[nregs++] = tmp = gen_reg_rtx (SImode);
2050 alpha_expand_unaligned_load (tmp, orig_src, 4, ofs, 0);
2059 data_regs[nregs++] = tmp = gen_reg_rtx (HImode);
2060 emit_move_insn (tmp,
2061 change_address (orig_src, HImode,
2062 plus_constant (XEXP (orig_src, 0),
2066 } while (bytes >= 2);
2068 else if (!TARGET_BWX)
2070 data_regs[nregs++] = tmp = gen_reg_rtx (HImode);
2071 alpha_expand_unaligned_load (tmp, orig_src, 2, ofs, 0);
2078 data_regs[nregs++] = tmp = gen_reg_rtx (QImode);
2079 emit_move_insn (tmp,
2080 change_address (orig_src, QImode,
2081 plus_constant (XEXP (orig_src, 0),
2088 if (nregs > (int)(sizeof(data_regs)/sizeof(*data_regs)))
2092 * Now save it back out again.
2097 if (GET_CODE (XEXP (orig_dst, 0)) == ADDRESSOF)
2099 enum machine_mode mode;
2100 tmp = XEXP (XEXP (orig_dst, 0), 0);
2102 mode = mode_for_size (orig_bytes * BITS_PER_UNIT, MODE_INT, 1);
2103 if (GET_MODE (tmp) == mode)
2107 emit_move_insn (tmp, data_regs[0]);
2111 else if (nregs == 2 && mode == TImode)
2113 /* Undo the subregging done above when copying between
2114 two TImode registers. */
2115 if (GET_CODE (data_regs[0]) == SUBREG
2116 && GET_MODE (SUBREG_REG (data_regs[0])) == TImode)
2118 emit_move_insn (tmp, SUBREG_REG (data_regs[0]));
2125 emit_move_insn (gen_lowpart (DImode, tmp), data_regs[0]);
2126 emit_move_insn (gen_highpart (DImode, tmp), data_regs[1]);
2127 seq = gen_sequence ();
2130 emit_no_conflict_block (seq, tmp, data_regs[0],
2131 data_regs[1], NULL_RTX);
2139 /* ??? If nregs > 1, consider reconstructing the word in regs. */
2140 /* ??? Optimize mode < dst_mode with strict_low_part. */
2142 /* No appropriate mode; fall back on memory. We can speed things
2143 up by recognizing extra alignment information. */
2144 orig_dst = change_address (orig_dst, GET_MODE (orig_dst),
2145 copy_addr_to_reg (XEXP (orig_dst, 0)));
2146 dst_align = GET_MODE_SIZE (GET_MODE (tmp));
2149 /* Write out the data in whatever chunks reading the source allowed. */
2152 while (i < nregs && GET_MODE (data_regs[i]) == DImode)
2154 emit_move_insn (change_address (orig_dst, DImode,
2155 plus_constant (XEXP (orig_dst, 0),
2164 /* If the source has remaining DImode regs, write them out in
2166 while (i < nregs && GET_MODE (data_regs[i]) == DImode)
2168 tmp = expand_binop (DImode, lshr_optab, data_regs[i], GEN_INT (32),
2169 NULL_RTX, 1, OPTAB_WIDEN);
2171 emit_move_insn (change_address (orig_dst, SImode,
2172 plus_constant (XEXP (orig_dst, 0),
2174 gen_lowpart (SImode, data_regs[i]));
2175 emit_move_insn (change_address (orig_dst, SImode,
2176 plus_constant (XEXP (orig_dst, 0),
2178 gen_lowpart (SImode, tmp));
2183 while (i < nregs && GET_MODE (data_regs[i]) == SImode)
2185 emit_move_insn (change_address(orig_dst, SImode,
2186 plus_constant (XEXP (orig_dst, 0),
2193 if (i < nregs && GET_MODE (data_regs[i]) == DImode)
2195 /* Write out a remaining block of words using unaligned methods. */
2197 for (words = 1; i+words < nregs ; ++words)
2198 if (GET_MODE (data_regs[i+words]) != DImode)
2202 alpha_expand_unaligned_store (orig_dst, data_regs[i], 8, ofs);
2204 alpha_expand_unaligned_store_words (data_regs+i, orig_dst, words, ofs);
2210 /* Due to the above, this won't be aligned. */
2211 /* ??? If we have more than one of these, consider constructing full
2212 words in registers and using alpha_expand_unaligned_store_words. */
2213 while (i < nregs && GET_MODE (data_regs[i]) == SImode)
2215 alpha_expand_unaligned_store (orig_dst, data_regs[i], 4, ofs);
2221 while (i < nregs && GET_MODE (data_regs[i]) == HImode)
2223 emit_move_insn (change_address (orig_dst, HImode,
2224 plus_constant (XEXP (orig_dst, 0),
2231 while (i < nregs && GET_MODE (data_regs[i]) == HImode)
2233 alpha_expand_unaligned_store (orig_dst, data_regs[i], 2, ofs);
2237 while (i < nregs && GET_MODE (data_regs[i]) == QImode)
2239 emit_move_insn (change_address (orig_dst, QImode,
2240 plus_constant (XEXP (orig_dst, 0),
2255 alpha_expand_block_clear (operands)
2258 rtx bytes_rtx = operands[1];
2259 rtx align_rtx = operands[2];
2260 HOST_WIDE_INT bytes = INTVAL (bytes_rtx);
2261 HOST_WIDE_INT align = INTVAL (align_rtx);
2262 rtx orig_dst = operands[0];
2264 HOST_WIDE_INT i, words, ofs = 0;
2268 if (bytes > MAX_MOVE_WORDS*8)
2271 /* Look for stricter alignment. */
2273 tmp = XEXP (orig_dst, 0);
2274 if (GET_CODE (tmp) == REG)
2276 if (REGNO_POINTER_ALIGN (REGNO (tmp)) > align)
2277 align = REGNO_POINTER_ALIGN (REGNO (tmp));
2279 else if (GET_CODE (tmp) == PLUS
2280 && GET_CODE (XEXP (tmp, 0)) == REG
2281 && GET_CODE (XEXP (tmp, 1)) == CONST_INT)
2283 HOST_WIDE_INT c = INTVAL (XEXP (tmp, 1));
2284 int a = REGNO_POINTER_ALIGN (REGNO (XEXP (tmp, 0)));
2288 if (a >= 8 && c % 8 == 0)
2290 else if (a >= 4 && c % 4 == 0)
2292 else if (a >= 2 && c % 2 == 0)
2296 else if (GET_CODE (tmp) == ADDRESSOF)
2298 enum machine_mode mode;
2300 mode = mode_for_size (bytes * BITS_PER_UNIT, MODE_INT, 1);
2301 if (GET_MODE (XEXP (tmp, 0)) == mode)
2303 emit_move_insn (XEXP (tmp, 0), const0_rtx);
2307 /* No appropriate mode; fall back on memory. */
2308 orig_dst = change_address (orig_dst, GET_MODE (orig_dst),
2309 copy_addr_to_reg (tmp));
2310 align = GET_MODE_SIZE (GET_MODE (XEXP (tmp, 0)));
2313 /* Handle a block of contiguous words first. */
2315 if (align >= 8 && bytes >= 8)
2319 for (i = 0; i < words; ++i)
2321 emit_move_insn (change_address(orig_dst, DImode,
2322 plus_constant (XEXP (orig_dst, 0),
2330 if (align >= 4 && bytes >= 4)
2334 for (i = 0; i < words; ++i)
2336 emit_move_insn (change_address (orig_dst, SImode,
2337 plus_constant (XEXP (orig_dst, 0),
2349 alpha_expand_unaligned_store_words (NULL, orig_dst, words, ofs);
2355 /* Next clean up any trailing pieces. We know from the contiguous
2356 block move that there are no aligned SImode or DImode hunks left. */
2358 if (!TARGET_BWX && bytes >= 8)
2360 alpha_expand_unaligned_store (orig_dst, const0_rtx, 8, ofs);
2364 if (!TARGET_BWX && bytes >= 4)
2366 alpha_expand_unaligned_store (orig_dst, const0_rtx, 4, ofs);
2375 emit_move_insn (change_address (orig_dst, HImode,
2376 plus_constant (XEXP (orig_dst, 0),
2381 } while (bytes >= 2);
2383 else if (!TARGET_BWX)
2385 alpha_expand_unaligned_store (orig_dst, const0_rtx, 2, ofs);
2392 emit_move_insn (change_address (orig_dst, QImode,
2393 plus_constant (XEXP (orig_dst, 0),
2404 /* Adjust the cost of a scheduling dependency. Return the new cost of
2405 a dependency LINK or INSN on DEP_INSN. COST is the current cost. */
2408 alpha_adjust_cost (insn, link, dep_insn, cost)
2415 enum attr_type insn_type, dep_insn_type;
2417 /* If the dependence is an anti-dependence, there is no cost. For an
2418 output dependence, there is sometimes a cost, but it doesn't seem
2419 worth handling those few cases. */
2421 if (REG_NOTE_KIND (link) != 0)
2424 /* If we can't recognize the insns, we can't really do anything. */
2425 if (recog_memoized (insn) < 0 || recog_memoized (dep_insn) < 0)
2428 insn_type = get_attr_type (insn);
2429 dep_insn_type = get_attr_type (dep_insn);
2431 /* Bring in the user-defined memory latency. */
2432 if (dep_insn_type == TYPE_ILD
2433 || dep_insn_type == TYPE_FLD
2434 || dep_insn_type == TYPE_LDSYM)
2435 cost += alpha_memory_latency-1;
2440 /* On EV4, if INSN is a store insn and DEP_INSN is setting the data
2441 being stored, we can sometimes lower the cost. */
2443 if ((insn_type == TYPE_IST || insn_type == TYPE_FST)
2444 && (set = single_set (dep_insn)) != 0
2445 && GET_CODE (PATTERN (insn)) == SET
2446 && rtx_equal_p (SET_DEST (set), SET_SRC (PATTERN (insn))))
2448 switch (dep_insn_type)
2452 /* No savings here. */
2456 /* In these cases, we save one cycle. */
2460 /* In all other cases, we save two cycles. */
2461 return MAX (0, cost - 2);
2465 /* Another case that needs adjustment is an arithmetic or logical
2466 operation. It's cost is usually one cycle, but we default it to
2467 two in the MD file. The only case that it is actually two is
2468 for the address in loads, stores, and jumps. */
2470 if (dep_insn_type == TYPE_IADD || dep_insn_type == TYPE_ILOG)
2485 /* The final case is when a compare feeds into an integer branch;
2486 the cost is only one cycle in that case. */
2488 if (dep_insn_type == TYPE_ICMP && insn_type == TYPE_IBR)
2493 /* And the lord DEC saith: "A special bypass provides an effective
2494 latency of 0 cycles for an ICMP or ILOG insn producing the test
2495 operand of an IBR or ICMOV insn." */
2497 if ((dep_insn_type == TYPE_ICMP || dep_insn_type == TYPE_ILOG)
2498 && (set = single_set (dep_insn)) != 0)
2500 /* A branch only has one input. This must be it. */
2501 if (insn_type == TYPE_IBR)
2503 /* A conditional move has three, make sure it is the test. */
2504 if (insn_type == TYPE_ICMOV
2505 && GET_CODE (set_src = PATTERN (insn)) == SET
2506 && GET_CODE (set_src = SET_SRC (set_src)) == IF_THEN_ELSE
2507 && rtx_equal_p (SET_DEST (set), XEXP (set_src, 0)))
2511 /* "The multiplier is unable to receive data from IEU bypass paths.
2512 The instruction issues at the expected time, but its latency is
2513 increased by the time it takes for the input data to become
2514 available to the multiplier" -- which happens in pipeline stage
2515 six, when results are comitted to the register file. */
2517 if (insn_type == TYPE_IMUL)
2519 switch (dep_insn_type)
2521 /* These insns produce their results in pipeline stage five. */
2528 /* Other integer insns produce results in pipeline stage four. */
2536 /* There is additional latency to move the result of (most) FP
2537 operations anywhere but the FP register file. */
2539 if ((insn_type == TYPE_FST || insn_type == TYPE_FTOI)
2540 && (dep_insn_type == TYPE_FADD ||
2541 dep_insn_type == TYPE_FMUL ||
2542 dep_insn_type == TYPE_FCMOV))
2548 /* Otherwise, return the default cost. */
2552 /* Functions to save and restore alpha_return_addr_rtx. */
2554 struct machine_function
2560 alpha_save_machine_status (p)
2563 struct machine_function *machine =
2564 (struct machine_function *) xmalloc (sizeof (struct machine_function));
2566 p->machine = machine;
2567 machine->ra_rtx = alpha_return_addr_rtx;
2571 alpha_restore_machine_status (p)
2574 struct machine_function *machine = p->machine;
2576 alpha_return_addr_rtx = machine->ra_rtx;
2579 p->machine = (struct machine_function *)0;
2582 /* Do anything needed before RTL is emitted for each function. */
2585 alpha_init_expanders ()
2587 alpha_return_addr_rtx = NULL_RTX;
2588 alpha_eh_epilogue_sp_ofs = NULL_RTX;
2590 /* Arrange to save and restore machine status around nested functions. */
2591 save_machine_status = alpha_save_machine_status;
2592 restore_machine_status = alpha_restore_machine_status;
2595 /* Start the ball rolling with RETURN_ADDR_RTX. */
2598 alpha_return_addr (count, frame)
2600 rtx frame ATTRIBUTE_UNUSED;
2607 if (alpha_return_addr_rtx)
2608 return alpha_return_addr_rtx;
2610 /* No rtx yet. Invent one, and initialize it from $26 in the prologue. */
2611 alpha_return_addr_rtx = gen_reg_rtx (Pmode);
2612 init = gen_rtx_SET (VOIDmode, alpha_return_addr_rtx,
2613 gen_rtx_REG (Pmode, REG_RA));
2615 /* Emit the insn to the prologue with the other argument copies. */
2616 push_topmost_sequence ();
2617 emit_insn_after (init, get_insns ());
2618 pop_topmost_sequence ();
2620 return alpha_return_addr_rtx;
2624 alpha_ra_ever_killed ()
2628 #ifdef ASM_OUTPUT_MI_THUNK
2629 if (current_function_is_thunk)
2632 if (!alpha_return_addr_rtx)
2633 return regs_ever_live[REG_RA];
2635 push_topmost_sequence ();
2637 pop_topmost_sequence ();
2639 return reg_set_between_p (gen_rtx_REG (Pmode, REG_RA), top, NULL_RTX);
2643 /* Print an operand. Recognize special options, documented below. */
2646 print_operand (file, x, code)
2656 /* Generates fp-rounding mode suffix: nothing for normal, 'c' for
2657 chopped, 'm' for minus-infinity, and 'd' for dynamic rounding
2658 mode. alpha_fprm controls which suffix is generated. */
2661 case ALPHA_FPRM_NORM:
2663 case ALPHA_FPRM_MINF:
2666 case ALPHA_FPRM_CHOP:
2669 case ALPHA_FPRM_DYN:
2676 /* Generates trap-mode suffix for instructions that accept the su
2677 suffix only (cmpt et al). */
2678 if (alpha_tp == ALPHA_TP_INSN)
2683 /* Generates trap-mode suffix for instructions that accept the
2684 v and sv suffix. The only instruction that needs this is cvtql. */
2693 case ALPHA_FPTM_SUI:
2700 /* Generates trap-mode suffix for instructions that accept the
2701 v, sv, and svi suffix. The only instruction that needs this
2713 case ALPHA_FPTM_SUI:
2714 fputs ("svi", file);
2720 /* Generates trap-mode suffix for instructions that accept the u, su,
2721 and sui suffix. This is the bulk of the IEEE floating point
2722 instructions (addt et al). */
2733 case ALPHA_FPTM_SUI:
2734 fputs ("sui", file);
2740 /* Generates trap-mode suffix for instructions that accept the sui
2741 suffix (cvtqt and cvtqs). */
2746 case ALPHA_FPTM_SU: /* cvtqt/cvtqs can't cause underflow */
2748 case ALPHA_FPTM_SUI:
2749 fputs ("sui", file);
2755 /* Generates single precision instruction suffix. */
2756 fprintf (file, "%c", (TARGET_FLOAT_VAX ? 'f' : 's'));
2760 /* Generates double precision instruction suffix. */
2761 fprintf (file, "%c", (TARGET_FLOAT_VAX ? 'g' : 't'));
2765 /* If this operand is the constant zero, write it as "$31". */
2766 if (GET_CODE (x) == REG)
2767 fprintf (file, "%s", reg_names[REGNO (x)]);
2768 else if (x == CONST0_RTX (GET_MODE (x)))
2769 fprintf (file, "$31");
2771 output_operand_lossage ("invalid %%r value");
2776 /* Similar, but for floating-point. */
2777 if (GET_CODE (x) == REG)
2778 fprintf (file, "%s", reg_names[REGNO (x)]);
2779 else if (x == CONST0_RTX (GET_MODE (x)))
2780 fprintf (file, "$f31");
2782 output_operand_lossage ("invalid %%R value");
2787 /* Write the 1's complement of a constant. */
2788 if (GET_CODE (x) != CONST_INT)
2789 output_operand_lossage ("invalid %%N value");
2791 fprintf (file, HOST_WIDE_INT_PRINT_DEC, ~ INTVAL (x));
2795 /* Write 1 << C, for a constant C. */
2796 if (GET_CODE (x) != CONST_INT)
2797 output_operand_lossage ("invalid %%P value");
2799 fprintf (file, HOST_WIDE_INT_PRINT_DEC, (HOST_WIDE_INT) 1 << INTVAL (x));
2803 /* Write the high-order 16 bits of a constant, sign-extended. */
2804 if (GET_CODE (x) != CONST_INT)
2805 output_operand_lossage ("invalid %%h value");
2807 fprintf (file, HOST_WIDE_INT_PRINT_DEC, INTVAL (x) >> 16);
2811 /* Write the low-order 16 bits of a constant, sign-extended. */
2812 if (GET_CODE (x) != CONST_INT)
2813 output_operand_lossage ("invalid %%L value");
2815 fprintf (file, HOST_WIDE_INT_PRINT_DEC,
2816 (INTVAL (x) & 0xffff) - 2 * (INTVAL (x) & 0x8000));
2820 /* Write mask for ZAP insn. */
2821 if (GET_CODE (x) == CONST_DOUBLE)
2823 HOST_WIDE_INT mask = 0;
2824 HOST_WIDE_INT value;
2826 value = CONST_DOUBLE_LOW (x);
2827 for (i = 0; i < HOST_BITS_PER_WIDE_INT / HOST_BITS_PER_CHAR;
2832 value = CONST_DOUBLE_HIGH (x);
2833 for (i = 0; i < HOST_BITS_PER_WIDE_INT / HOST_BITS_PER_CHAR;
2836 mask |= (1 << (i + sizeof (int)));
2838 fprintf (file, HOST_WIDE_INT_PRINT_DEC, mask & 0xff);
2841 else if (GET_CODE (x) == CONST_INT)
2843 HOST_WIDE_INT mask = 0, value = INTVAL (x);
2845 for (i = 0; i < 8; i++, value >>= 8)
2849 fprintf (file, HOST_WIDE_INT_PRINT_DEC, mask);
2852 output_operand_lossage ("invalid %%m value");
2856 /* 'b', 'w', 'l', or 'q' as the value of the constant. */
2857 if (GET_CODE (x) != CONST_INT
2858 || (INTVAL (x) != 8 && INTVAL (x) != 16
2859 && INTVAL (x) != 32 && INTVAL (x) != 64))
2860 output_operand_lossage ("invalid %%M value");
2862 fprintf (file, "%s",
2863 (INTVAL (x) == 8 ? "b"
2864 : INTVAL (x) == 16 ? "w"
2865 : INTVAL (x) == 32 ? "l"
2870 /* Similar, except do it from the mask. */
2871 if (GET_CODE (x) == CONST_INT && INTVAL (x) == 0xff)
2872 fprintf (file, "b");
2873 else if (GET_CODE (x) == CONST_INT && INTVAL (x) == 0xffff)
2874 fprintf (file, "w");
2875 else if (GET_CODE (x) == CONST_INT && INTVAL (x) == 0xffffffff)
2876 fprintf (file, "l");
2877 #if HOST_BITS_PER_WIDE_INT == 32
2878 else if (GET_CODE (x) == CONST_DOUBLE
2879 && CONST_DOUBLE_HIGH (x) == 0
2880 && CONST_DOUBLE_LOW (x) == -1)
2881 fprintf (file, "l");
2882 else if (GET_CODE (x) == CONST_DOUBLE
2883 && CONST_DOUBLE_HIGH (x) == -1
2884 && CONST_DOUBLE_LOW (x) == -1)
2885 fprintf (file, "q");
2887 else if (GET_CODE (x) == CONST_INT && INTVAL (x) == -1)
2888 fprintf (file, "q");
2889 else if (GET_CODE (x) == CONST_DOUBLE
2890 && CONST_DOUBLE_HIGH (x) == 0
2891 && CONST_DOUBLE_LOW (x) == -1)
2892 fprintf (file, "q");
2895 output_operand_lossage ("invalid %%U value");
2899 /* Write the constant value divided by 8. */
2900 if (GET_CODE (x) != CONST_INT
2901 && (unsigned HOST_WIDE_INT) INTVAL (x) >= 64
2902 && (INTVAL (x) & 7) != 8)
2903 output_operand_lossage ("invalid %%s value");
2905 fprintf (file, HOST_WIDE_INT_PRINT_DEC, INTVAL (x) / 8);
2909 /* Same, except compute (64 - c) / 8 */
2911 if (GET_CODE (x) != CONST_INT
2912 && (unsigned HOST_WIDE_INT) INTVAL (x) >= 64
2913 && (INTVAL (x) & 7) != 8)
2914 output_operand_lossage ("invalid %%s value");
2916 fprintf (file, HOST_WIDE_INT_PRINT_DEC, (64 - INTVAL (x)) / 8);
2919 case 'C': case 'D': case 'c': case 'd':
2920 /* Write out comparison name. */
2922 enum rtx_code c = GET_CODE (x);
2924 if (GET_RTX_CLASS (c) != '<')
2925 output_operand_lossage ("invalid %%C value");
2928 c = reverse_condition (c);
2929 else if (code == 'c')
2930 c = swap_condition (c);
2931 else if (code == 'd')
2932 c = swap_condition (reverse_condition (c));
2935 fprintf (file, "ule");
2937 fprintf (file, "ult");
2939 fprintf (file, "%s", GET_RTX_NAME (c));
2944 /* Write the divide or modulus operator. */
2945 switch (GET_CODE (x))
2948 fprintf (file, "div%s", GET_MODE (x) == SImode ? "l" : "q");
2951 fprintf (file, "div%su", GET_MODE (x) == SImode ? "l" : "q");
2954 fprintf (file, "rem%s", GET_MODE (x) == SImode ? "l" : "q");
2957 fprintf (file, "rem%su", GET_MODE (x) == SImode ? "l" : "q");
2960 output_operand_lossage ("invalid %%E value");
2966 /* Write "_u" for unaligned access. */
2967 if (GET_CODE (x) == MEM && GET_CODE (XEXP (x, 0)) == AND)
2968 fprintf (file, "_u");
2972 if (GET_CODE (x) == REG)
2973 fprintf (file, "%s", reg_names[REGNO (x)]);
2974 else if (GET_CODE (x) == MEM)
2975 output_address (XEXP (x, 0));
2977 output_addr_const (file, x);
2981 output_operand_lossage ("invalid %%xn code");
2986 print_operand_address (file, addr)
2991 HOST_WIDE_INT offset = 0;
2993 if (GET_CODE (addr) == AND)
2994 addr = XEXP (addr, 0);
2996 if (GET_CODE (addr) == PLUS
2997 && GET_CODE (XEXP (addr, 1)) == CONST_INT)
2999 offset = INTVAL (XEXP (addr, 1));
3000 addr = XEXP (addr, 0);
3002 if (GET_CODE (addr) == REG)
3003 basereg = REGNO (addr);
3004 else if (GET_CODE (addr) == SUBREG
3005 && GET_CODE (SUBREG_REG (addr)) == REG)
3006 basereg = REGNO (SUBREG_REG (addr)) + SUBREG_WORD (addr);
3007 else if (GET_CODE (addr) == CONST_INT)
3008 offset = INTVAL (addr);
3012 fprintf (file, HOST_WIDE_INT_PRINT_DEC, offset);
3013 fprintf (file, "($%d)", basereg);
3016 /* Emit RTL insns to initialize the variable parts of a trampoline at
3017 TRAMP. FNADDR is an RTX for the address of the function's pure
3018 code. CXT is an RTX for the static chain value for the function.
3020 The three offset parameters are for the individual template's
3021 layout. A JMPOFS < 0 indicates that the trampoline does not
3022 contain instructions at all.
3024 We assume here that a function will be called many more times than
3025 its address is taken (e.g., it might be passed to qsort), so we
3026 take the trouble to initialize the "hint" field in the JMP insn.
3027 Note that the hint field is PC (new) + 4 * bits 13:0. */
3030 alpha_initialize_trampoline (tramp, fnaddr, cxt, fnofs, cxtofs, jmpofs)
3031 rtx tramp, fnaddr, cxt;
3032 int fnofs, cxtofs, jmpofs;
3034 rtx temp, temp1, addr;
3035 /* VMS really uses DImode pointers in memory at this point. */
3036 enum machine_mode mode = TARGET_OPEN_VMS ? Pmode : ptr_mode;
3038 #ifdef POINTERS_EXTEND_UNSIGNED
3039 fnaddr = convert_memory_address (mode, fnaddr);
3040 cxt = convert_memory_address (mode, cxt);
3043 /* Store function address and CXT. */
3044 addr = memory_address (mode, plus_constant (tramp, fnofs));
3045 emit_move_insn (gen_rtx (MEM, mode, addr), fnaddr);
3046 addr = memory_address (mode, plus_constant (tramp, cxtofs));
3047 emit_move_insn (gen_rtx (MEM, mode, addr), cxt);
3049 /* This has been disabled since the hint only has a 32k range, and in
3050 no existing OS is the stack within 32k of the text segment. */
3051 if (0 && jmpofs >= 0)
3053 /* Compute hint value. */
3054 temp = force_operand (plus_constant (tramp, jmpofs+4), NULL_RTX);
3055 temp = expand_binop (DImode, sub_optab, fnaddr, temp, temp, 1,
3057 temp = expand_shift (RSHIFT_EXPR, Pmode, temp,
3058 build_int_2 (2, 0), NULL_RTX, 1);
3059 temp = expand_and (gen_lowpart (SImode, temp), GEN_INT (0x3fff), 0);
3061 /* Merge in the hint. */
3062 addr = memory_address (SImode, plus_constant (tramp, jmpofs));
3063 temp1 = force_reg (SImode, gen_rtx (MEM, SImode, addr));
3064 temp1 = expand_and (temp1, GEN_INT (0xffffc000), NULL_RTX);
3065 temp1 = expand_binop (SImode, ior_optab, temp1, temp, temp1, 1,
3067 emit_move_insn (gen_rtx (MEM, SImode, addr), temp1);
3070 #ifdef TRANSFER_FROM_TRAMPOLINE
3071 emit_library_call (gen_rtx (SYMBOL_REF, Pmode, "__enable_execute_stack"),
3072 0, VOIDmode, 1, addr, Pmode);
3076 emit_insn (gen_imb ());
3079 /* Do what is necessary for `va_start'. The argument is ignored;
3080 We look at the current function to determine if stdarg or varargs
3081 is used and fill in an initial va_list. A pointer to this constructor
3085 alpha_builtin_saveregs (arglist)
3086 tree arglist ATTRIBUTE_UNUSED;
3088 rtx block, addr, dest, argsize;
3089 tree fntype = TREE_TYPE (current_function_decl);
3090 int stdarg = (TYPE_ARG_TYPES (fntype) != 0
3091 && (TREE_VALUE (tree_last (TYPE_ARG_TYPES (fntype)))
3092 != void_type_node));
3094 /* Compute the current position into the args, taking into account
3095 both registers and memory. Both of these are already included in
3098 argsize = GEN_INT (NUM_ARGS * UNITS_PER_WORD);
3100 /* For Unix, SETUP_INCOMING_VARARGS moves the starting address base up by 48,
3101 storing fp arg registers in the first 48 bytes, and the integer arg
3102 registers in the next 48 bytes. This is only done, however, if any
3103 integer registers need to be stored.
3105 If no integer registers need be stored, then we must subtract 48 in
3106 order to account for the integer arg registers which are counted in
3107 argsize above, but which are not actually stored on the stack. */
3109 if (TARGET_OPEN_VMS)
3110 addr = plus_constant (virtual_incoming_args_rtx,
3111 NUM_ARGS <= 5 + stdarg
3112 ? UNITS_PER_WORD : - 6 * UNITS_PER_WORD);
3114 addr = (NUM_ARGS <= 5 + stdarg
3115 ? plus_constant (virtual_incoming_args_rtx,
3117 : plus_constant (virtual_incoming_args_rtx,
3118 - (6 * UNITS_PER_WORD)));
3120 /* For VMS, we include the argsize, while on Unix, it's handled as
3121 a separate field. */
3122 if (TARGET_OPEN_VMS)
3123 addr = plus_constant (addr, INTVAL (argsize));
3125 addr = force_operand (addr, NULL_RTX);
3127 #ifdef POINTERS_EXTEND_UNSIGNED
3128 addr = convert_memory_address (ptr_mode, addr);
3131 if (TARGET_OPEN_VMS)
3135 /* Allocate the va_list constructor */
3136 block = assign_stack_local (BLKmode, 2 * UNITS_PER_WORD, BITS_PER_WORD);
3137 RTX_UNCHANGING_P (block) = 1;
3138 RTX_UNCHANGING_P (XEXP (block, 0)) = 1;
3140 /* Store the address of the first integer register in the __base
3143 dest = change_address (block, ptr_mode, XEXP (block, 0));
3144 emit_move_insn (dest, addr);
3146 if (current_function_check_memory_usage)
3147 emit_library_call (chkr_set_right_libfunc, 1, VOIDmode, 3,
3149 GEN_INT (GET_MODE_SIZE (ptr_mode)),
3150 TYPE_MODE (sizetype),
3151 GEN_INT (MEMORY_USE_RW),
3152 TYPE_MODE (integer_type_node));
3154 /* Store the argsize as the __va_offset member. */
3155 dest = change_address (block, TYPE_MODE (integer_type_node),
3156 plus_constant (XEXP (block, 0),
3157 POINTER_SIZE/BITS_PER_UNIT));
3158 emit_move_insn (dest, argsize);
3160 if (current_function_check_memory_usage)
3161 emit_library_call (chkr_set_right_libfunc, 1, VOIDmode, 3,
3163 GEN_INT (GET_MODE_SIZE
3164 (TYPE_MODE (integer_type_node))),
3165 TYPE_MODE (sizetype),
3166 GEN_INT (MEMORY_USE_RW),
3167 TYPE_MODE (integer_type_node));
3169 /* Return the address of the va_list constructor, but don't put it in a
3170 register. Doing so would fail when not optimizing and produce worse
3171 code when optimizing. */
3172 return XEXP (block, 0);
3176 /* This page contains routines that are used to determine what the function
3177 prologue and epilogue code will do and write them out. */
3179 /* Compute the size of the save area in the stack. */
3181 /* These variables are used for communication between the following functions.
3182 They indicate various things about the current function being compiled
3183 that are used to tell what kind of prologue, epilogue and procedure
3184 descriptior to generate. */
3186 /* Nonzero if we need a stack procedure. */
3187 static int vms_is_stack_procedure;
3189 /* Register number (either FP or SP) that is used to unwind the frame. */
3190 static int vms_unwind_regno;
3192 /* Register number used to save FP. We need not have one for RA since
3193 we don't modify it for register procedures. This is only defined
3194 for register frame procedures. */
3195 static int vms_save_fp_regno;
3197 /* Register number used to reference objects off our PV. */
3198 static int vms_base_regno;
3200 /* Compute register masks for saved registers. */
3203 alpha_sa_mask (imaskP, fmaskP)
3204 unsigned long *imaskP;
3205 unsigned long *fmaskP;
3207 unsigned long imask = 0;
3208 unsigned long fmask = 0;
3211 #ifdef ASM_OUTPUT_MI_THUNK
3212 if (!current_function_is_thunk)
3215 if (TARGET_OPEN_VMS && vms_is_stack_procedure)
3216 imask |= (1L << HARD_FRAME_POINTER_REGNUM);
3218 /* One for every register we have to save. */
3219 for (i = 0; i < FIRST_PSEUDO_REGISTER; i++)
3220 if (! fixed_regs[i] && ! call_used_regs[i]
3221 && regs_ever_live[i] && i != REG_RA)
3226 fmask |= (1L << (i - 32));
3229 if (imask || fmask || alpha_ra_ever_killed ())
3230 imask |= (1L << REG_RA);
3243 #ifdef ASM_OUTPUT_MI_THUNK
3244 if (current_function_is_thunk)
3249 /* One for every register we have to save. */
3250 for (i = 0; i < FIRST_PSEUDO_REGISTER; i++)
3251 if (! fixed_regs[i] && ! call_used_regs[i]
3252 && regs_ever_live[i] && i != REG_RA)
3256 if (TARGET_OPEN_VMS)
3258 /* Start by assuming we can use a register procedure if we don't
3259 make any calls (REG_RA not used) or need to save any
3260 registers and a stack procedure if we do. */
3261 vms_is_stack_procedure = sa_size != 0 || alpha_ra_ever_killed ();
3263 /* Decide whether to refer to objects off our PV via FP or PV.
3264 If we need FP for something else or if we receive a nonlocal
3265 goto (which expects PV to contain the value), we must use PV.
3266 Otherwise, start by assuming we can use FP. */
3267 vms_base_regno = (frame_pointer_needed
3268 || current_function_has_nonlocal_label
3269 || vms_is_stack_procedure
3270 || current_function_outgoing_args_size
3271 ? REG_PV : HARD_FRAME_POINTER_REGNUM);
3273 /* If we want to copy PV into FP, we need to find some register
3274 in which to save FP. */
3276 vms_save_fp_regno = -1;
3277 if (vms_base_regno == HARD_FRAME_POINTER_REGNUM)
3278 for (i = 0; i < 32; i++)
3279 if (! fixed_regs[i] && call_used_regs[i] && ! regs_ever_live[i])
3280 vms_save_fp_regno = i;
3282 if (vms_save_fp_regno == -1)
3283 vms_base_regno = REG_PV, vms_is_stack_procedure = 1;
3285 /* Stack unwinding should be done via FP unless we use it for PV. */
3286 vms_unwind_regno = (vms_base_regno == REG_PV
3287 ? HARD_FRAME_POINTER_REGNUM : STACK_POINTER_REGNUM);
3289 /* If this is a stack procedure, allow space for saving FP and RA. */
3290 if (vms_is_stack_procedure)
3295 /* If some registers were saved but not RA, RA must also be saved,
3296 so leave space for it. */
3297 if (sa_size != 0 || alpha_ra_ever_killed ())
3300 /* Our size must be even (multiple of 16 bytes). */
3309 alpha_pv_save_size ()
3312 return vms_is_stack_procedure ? 8 : 0;
3319 return vms_unwind_regno == HARD_FRAME_POINTER_REGNUM;
3323 vms_valid_decl_attribute_p (decl, attributes, identifier, args)
3324 tree decl ATTRIBUTE_UNUSED;
3325 tree attributes ATTRIBUTE_UNUSED;
3329 if (is_attribute_p ("overlaid", identifier))
3330 return (args == NULL_TREE);
3335 alpha_does_function_need_gp ()
3339 /* We never need a GP for Windows/NT or VMS. */
3340 if (TARGET_WINDOWS_NT || TARGET_OPEN_VMS)
3343 #ifdef TARGET_PROFILING_NEEDS_GP
3348 #ifdef ASM_OUTPUT_MI_THUNK
3349 if (current_function_is_thunk)
3353 /* If we need a GP (we have a LDSYM insn or a CALL_INSN), load it first.
3354 Even if we are a static function, we still need to do this in case
3355 our address is taken and passed to something like qsort. */
3357 push_topmost_sequence ();
3358 insn = get_insns ();
3359 pop_topmost_sequence ();
3361 for (; insn; insn = NEXT_INSN (insn))
3362 if (GET_RTX_CLASS (GET_CODE (insn)) == 'i'
3363 && GET_CODE (PATTERN (insn)) != USE
3364 && GET_CODE (PATTERN (insn)) != CLOBBER)
3366 enum attr_type type = get_attr_type (insn);
3367 if (type == TYPE_LDSYM || type == TYPE_JSR)
3374 /* Write a version stamp. Don't write anything if we are running as a
3375 cross-compiler. Otherwise, use the versions in /usr/include/stamp.h. */
3382 alpha_write_verstamp (file)
3383 FILE *file ATTRIBUTE_UNUSED;
3386 fprintf (file, "\t.verstamp %d %d\n", MS_STAMP, LS_STAMP);
3390 /* Helper function to set RTX_FRAME_RELATED_P on instructions, including
3394 set_frame_related_p ()
3396 rtx seq = gen_sequence ();
3399 if (GET_CODE (seq) == SEQUENCE)
3401 int i = XVECLEN (seq, 0);
3403 RTX_FRAME_RELATED_P (XVECEXP (seq, 0, i)) = 1;
3404 return emit_insn (seq);
3408 seq = emit_insn (seq);
3409 RTX_FRAME_RELATED_P (seq) = 1;
3414 #define FRP(exp) (start_sequence (), exp, set_frame_related_p ())
3416 /* Write function prologue. */
3418 /* On vms we have two kinds of functions:
3420 - stack frame (PROC_STACK)
3421 these are 'normal' functions with local vars and which are
3422 calling other functions
3423 - register frame (PROC_REGISTER)
3424 keeps all data in registers, needs no stack
3426 We must pass this to the assembler so it can generate the
3427 proper pdsc (procedure descriptor)
3428 This is done with the '.pdesc' command.
3430 On not-vms, we don't really differentiate between the two, as we can
3431 simply allocate stack without saving registers. */
3434 alpha_expand_prologue ()
3436 /* Registers to save. */
3437 unsigned long imask = 0;
3438 unsigned long fmask = 0;
3439 /* Stack space needed for pushing registers clobbered by us. */
3440 HOST_WIDE_INT sa_size;
3441 /* Complete stack size needed. */
3442 HOST_WIDE_INT frame_size;
3443 /* Offset from base reg to register save area. */
3444 HOST_WIDE_INT reg_offset;
3448 sa_size = alpha_sa_size ();
3450 frame_size = get_frame_size ();
3451 if (TARGET_OPEN_VMS)
3452 frame_size = ALPHA_ROUND (sa_size
3453 + (vms_is_stack_procedure ? 8 : 0)
3455 + current_function_pretend_args_size);
3457 frame_size = (ALPHA_ROUND (current_function_outgoing_args_size)
3459 + ALPHA_ROUND (frame_size
3460 + current_function_pretend_args_size));
3462 if (TARGET_OPEN_VMS)
3465 reg_offset = ALPHA_ROUND (current_function_outgoing_args_size);
3467 alpha_sa_mask (&imask, &fmask);
3469 /* Adjust the stack by the frame size. If the frame size is > 4096
3470 bytes, we need to be sure we probe somewhere in the first and last
3471 4096 bytes (we can probably get away without the latter test) and
3472 every 8192 bytes in between. If the frame size is > 32768, we
3473 do this in a loop. Otherwise, we generate the explicit probe
3476 Note that we are only allowed to adjust sp once in the prologue. */
3478 if (frame_size <= 32768)
3480 if (frame_size > 4096)
3485 emit_insn (gen_probe_stack (GEN_INT (-probed)));
3486 while ((probed += 8192) < frame_size);
3488 /* We only have to do this probe if we aren't saving registers. */
3489 if (sa_size == 0 && probed + 4096 < frame_size)
3490 emit_insn (gen_probe_stack (GEN_INT (-frame_size)));
3493 if (frame_size != 0)
3495 FRP (emit_insn (gen_adddi3 (stack_pointer_rtx, stack_pointer_rtx,
3496 GEN_INT (-frame_size))));
3501 /* Here we generate code to set R22 to SP + 4096 and set R23 to the
3502 number of 8192 byte blocks to probe. We then probe each block
3503 in the loop and then set SP to the proper location. If the
3504 amount remaining is > 4096, we have to do one more probe if we
3505 are not saving any registers. */
3507 HOST_WIDE_INT blocks = (frame_size + 4096) / 8192;
3508 HOST_WIDE_INT leftover = frame_size + 4096 - blocks * 8192;
3509 rtx ptr = gen_rtx_REG (DImode, 22);
3510 rtx count = gen_rtx_REG (DImode, 23);
3513 emit_move_insn (count, GEN_INT (blocks));
3514 emit_insn (gen_adddi3 (ptr, stack_pointer_rtx, GEN_INT (4096)));
3516 /* Because of the difficulty in emitting a new basic block this
3517 late in the compilation, generate the loop as a single insn. */
3518 emit_insn (gen_prologue_stack_probe_loop (count, ptr));
3520 if (leftover > 4096 && sa_size == 0)
3522 rtx last = gen_rtx_MEM (DImode, plus_constant (ptr, -leftover));
3523 MEM_VOLATILE_P (last) = 1;
3524 emit_move_insn (last, const0_rtx);
3527 if (TARGET_WINDOWS_NT)
3529 /* For NT stack unwind (done by 'reverse execution'), it's
3530 not OK to take the result of a loop, even though the value
3531 is already in ptr, so we reload it via a single operation
3532 and subtract it to sp.
3534 Yes, that's correct -- we have to reload the whole constant
3535 into a temporary via ldah+lda then subtract from sp. To
3536 ensure we get ldah+lda, we use a special pattern. */
3538 HOST_WIDE_INT lo, hi;
3539 lo = ((frame_size & 0xffff) ^ 0x8000) - 0x8000;
3540 hi = frame_size - lo;
3542 emit_move_insn (ptr, GEN_INT (hi));
3543 emit_insn (gen_nt_lda (ptr, GEN_INT (lo)));
3544 seq = emit_insn (gen_subdi3 (stack_pointer_rtx, stack_pointer_rtx,
3549 seq = emit_insn (gen_adddi3 (stack_pointer_rtx, ptr,
3550 GEN_INT (-leftover)));
3553 /* This alternative is special, because the DWARF code cannot
3554 possibly intuit through the loop above. So we invent this
3555 note it looks at instead. */
3556 RTX_FRAME_RELATED_P (seq) = 1;
3558 = gen_rtx_EXPR_LIST (REG_FRAME_RELATED_EXPR,
3559 gen_rtx_SET (VOIDmode, stack_pointer_rtx,
3560 gen_rtx_PLUS (Pmode, stack_pointer_rtx,
3561 GEN_INT (-frame_size))),
3565 /* Cope with very large offsets to the register save area. */
3566 sa_reg = stack_pointer_rtx;
3567 if (reg_offset + sa_size > 0x8000)
3569 int low = ((reg_offset & 0xffff) ^ 0x8000) - 0x8000;
3572 if (low + sa_size <= 0x8000)
3573 bias = reg_offset - low, reg_offset = low;
3575 bias = reg_offset, reg_offset = 0;
3577 sa_reg = gen_rtx_REG (DImode, 24);
3578 FRP (emit_insn (gen_adddi3 (sa_reg, stack_pointer_rtx, GEN_INT (bias))));
3581 /* Save regs in stack order. Beginning with VMS PV. */
3582 if (TARGET_OPEN_VMS && vms_is_stack_procedure)
3584 mem = gen_rtx_MEM (DImode, stack_pointer_rtx);
3585 MEM_ALIAS_SET (mem) = alpha_sr_alias_set;
3586 FRP (emit_move_insn (mem, gen_rtx_REG (DImode, REG_PV)));
3589 /* Save register RA next. */
3590 if (imask & (1L << REG_RA))
3592 mem = gen_rtx_MEM (DImode, plus_constant (sa_reg, reg_offset));
3593 MEM_ALIAS_SET (mem) = alpha_sr_alias_set;
3594 FRP (emit_move_insn (mem, gen_rtx_REG (DImode, REG_RA)));
3595 imask &= ~(1L << REG_RA);
3599 /* Now save any other registers required to be saved. */
3600 for (i = 0; i < 32; i++)
3601 if (imask & (1L << i))
3603 mem = gen_rtx_MEM (DImode, plus_constant (sa_reg, reg_offset));
3604 MEM_ALIAS_SET (mem) = alpha_sr_alias_set;
3605 FRP (emit_move_insn (mem, gen_rtx_REG (DImode, i)));
3609 for (i = 0; i < 32; i++)
3610 if (fmask & (1L << i))
3612 mem = gen_rtx_MEM (DFmode, plus_constant (sa_reg, reg_offset));
3613 MEM_ALIAS_SET (mem) = alpha_sr_alias_set;
3614 FRP (emit_move_insn (mem, gen_rtx_REG (DFmode, i+32)));
3618 if (TARGET_OPEN_VMS)
3620 if (!vms_is_stack_procedure)
3622 /* Register frame procedures fave the fp. */
3623 FRP (emit_move_insn (gen_rtx_REG (DImode, vms_save_fp_regno),
3624 hard_frame_pointer_rtx));
3627 if (vms_base_regno != REG_PV)
3628 FRP (emit_move_insn (gen_rtx_REG (DImode, vms_base_regno),
3629 gen_rtx_REG (DImode, REG_PV)));
3631 if (vms_unwind_regno == HARD_FRAME_POINTER_REGNUM)
3633 FRP (emit_move_insn (hard_frame_pointer_rtx, stack_pointer_rtx));
3636 /* If we have to allocate space for outgoing args, do it now. */
3637 if (current_function_outgoing_args_size != 0)
3639 FRP (emit_move_insn (stack_pointer_rtx,
3640 plus_constant (hard_frame_pointer_rtx,
3641 - ALPHA_ROUND (current_function_outgoing_args_size))));
3646 /* If we need a frame pointer, set it from the stack pointer. */
3647 if (frame_pointer_needed)
3649 if (TARGET_CAN_FAULT_IN_PROLOGUE)
3650 FRP (emit_move_insn (hard_frame_pointer_rtx, stack_pointer_rtx));
3653 /* This must always be the last instruction in the
3654 prologue, thus we emit a special move + clobber. */
3655 FRP (emit_insn (gen_init_fp (hard_frame_pointer_rtx,
3656 stack_pointer_rtx, sa_reg)));
3661 /* The ABIs for VMS and OSF/1 say that while we can schedule insns into
3662 the prologue, for exception handling reasons, we cannot do this for
3663 any insn that might fault. We could prevent this for mems with a
3664 (clobber:BLK (scratch)), but this doesn't work for fp insns. So we
3665 have to prevent all such scheduling with a blockage.
3667 Linux, on the other hand, never bothered to implement OSF/1's
3668 exception handling, and so doesn't care about such things. Anyone
3669 planning to use dwarf2 frame-unwind info can also omit the blockage. */
3671 if (! TARGET_CAN_FAULT_IN_PROLOGUE)
3672 emit_insn (gen_blockage ());
3675 /* Output the textual info surrounding the prologue. */
3678 alpha_start_function (file, fnname, decl)
3681 tree decl ATTRIBUTE_UNUSED;
3683 unsigned long imask = 0;
3684 unsigned long fmask = 0;
3685 /* Stack space needed for pushing registers clobbered by us. */
3686 HOST_WIDE_INT sa_size;
3687 /* Complete stack size needed. */
3688 HOST_WIDE_INT frame_size;
3689 /* Offset from base reg to register save area. */
3690 HOST_WIDE_INT reg_offset;
3691 char *entry_label = (char *) alloca (strlen (fnname) + 6);
3694 sa_size = alpha_sa_size ();
3696 frame_size = get_frame_size ();
3697 if (TARGET_OPEN_VMS)
3698 frame_size = ALPHA_ROUND (sa_size
3699 + (vms_is_stack_procedure ? 8 : 0)
3701 + current_function_pretend_args_size);
3703 frame_size = (ALPHA_ROUND (current_function_outgoing_args_size)
3705 + ALPHA_ROUND (frame_size
3706 + current_function_pretend_args_size));
3708 if (TARGET_OPEN_VMS)
3711 reg_offset = ALPHA_ROUND (current_function_outgoing_args_size);
3713 alpha_sa_mask (&imask, &fmask);
3715 /* Ecoff can handle multiple .file directives, so put out file and lineno.
3716 We have to do that before the .ent directive as we cannot switch
3717 files within procedures with native ecoff because line numbers are
3718 linked to procedure descriptors.
3719 Outputting the lineno helps debugging of one line functions as they
3720 would otherwise get no line number at all. Please note that we would
3721 like to put out last_linenum from final.c, but it is not accessible. */
3723 if (write_symbols == SDB_DEBUG)
3725 ASM_OUTPUT_SOURCE_FILENAME (file,
3726 DECL_SOURCE_FILE (current_function_decl));
3727 if (debug_info_level != DINFO_LEVEL_TERSE)
3728 ASM_OUTPUT_SOURCE_LINE (file,
3729 DECL_SOURCE_LINE (current_function_decl));
3732 /* Issue function start and label. */
3733 if (TARGET_OPEN_VMS || !flag_inhibit_size_directive)
3735 fputs ("\t.ent ", file);
3736 assemble_name (file, fnname);
3740 strcpy (entry_label, fnname);
3741 if (TARGET_OPEN_VMS)
3742 strcat (entry_label, "..en");
3743 ASM_OUTPUT_LABEL (file, entry_label);
3744 inside_function = TRUE;
3746 if (TARGET_OPEN_VMS)
3747 fprintf (file, "\t.base $%d\n", vms_base_regno);
3749 if (!TARGET_OPEN_VMS && TARGET_IEEE_CONFORMANT
3750 && !flag_inhibit_size_directive)
3752 /* Set flags in procedure descriptor to request IEEE-conformant
3753 math-library routines. The value we set it to is PDSC_EXC_IEEE
3754 (/usr/include/pdsc.h). */
3755 fputs ("\t.eflag 48\n", file);
3758 /* Set up offsets to alpha virtual arg/local debugging pointer. */
3759 alpha_auto_offset = -frame_size + current_function_pretend_args_size;
3760 alpha_arg_offset = -frame_size + 48;
3762 /* Describe our frame. If the frame size is larger than an integer,
3763 print it as zero to avoid an assembler error. We won't be
3764 properly describing such a frame, but that's the best we can do. */
3765 if (TARGET_OPEN_VMS)
3767 fprintf (file, "\t.frame $%d,", vms_unwind_regno);
3768 fprintf (file, HOST_WIDE_INT_PRINT_DEC,
3769 frame_size >= (1l << 31) ? 0 : frame_size);
3770 fputs (",$26,", file);
3771 fprintf (file, HOST_WIDE_INT_PRINT_DEC, reg_offset);
3774 else if (!flag_inhibit_size_directive)
3776 fprintf (file, "\t.frame $%d,",
3777 (frame_pointer_needed
3778 ? HARD_FRAME_POINTER_REGNUM : STACK_POINTER_REGNUM));
3779 fprintf (file, HOST_WIDE_INT_PRINT_DEC,
3780 frame_size >= (1l << 31) ? 0 : frame_size);
3781 fprintf (file, ",$26,%d\n", current_function_pretend_args_size);
3784 /* Describe which registers were spilled. */
3785 if (TARGET_OPEN_VMS)
3788 /* ??? Does VMS care if mask contains ra? The old code did'nt
3789 set it, so I don't here. */
3790 fprintf (file, "\t.mask 0x%lx,0\n", imask & ~(1L << REG_RA));
3792 fprintf (file, "\t.fmask 0x%lx,0\n", fmask);
3793 if (!vms_is_stack_procedure)
3794 fprintf (file, "\t.fp_save $%d\n", vms_save_fp_regno);
3796 else if (!flag_inhibit_size_directive)
3800 fprintf (file, "\t.mask 0x%lx,", imask);
3801 fprintf (file, HOST_WIDE_INT_PRINT_DEC,
3802 frame_size >= (1l << 31) ? 0 : reg_offset - frame_size);
3805 for (i = 0; i < 32; ++i)
3806 if (imask & (1L << i))
3812 fprintf (file, "\t.fmask 0x%lx,", fmask);
3813 fprintf (file, HOST_WIDE_INT_PRINT_DEC,
3814 frame_size >= (1l << 31) ? 0 : reg_offset - frame_size);
3819 /* Emit GP related things. It is rather unfortunate about the alignment
3820 issues surrounding a CODE_LABEL that forces us to do the label in
3822 if (!TARGET_OPEN_VMS && !TARGET_WINDOWS_NT)
3824 alpha_function_needs_gp = alpha_does_function_need_gp ();
3825 if (alpha_function_needs_gp)
3826 fputs ("\tldgp $29,0($27)\n", file);
3829 assemble_name (file, fnname);
3830 fputs ("..ng:\n", file);
3834 /* Ifdef'ed cause readonly_section and link_section are only
3836 readonly_section ();
3837 fprintf (file, "\t.align 3\n");
3838 assemble_name (file, fnname); fputs ("..na:\n", file);
3839 fputs ("\t.ascii \"", file);
3840 assemble_name (file, fnname);
3841 fputs ("\\0\"\n", file);
3844 fprintf (file, "\t.align 3\n");
3845 fputs ("\t.name ", file);
3846 assemble_name (file, fnname);
3847 fputs ("..na\n", file);
3848 ASM_OUTPUT_LABEL (file, fnname);
3849 fprintf (file, "\t.pdesc ");
3850 assemble_name (file, fnname);
3851 fprintf (file, "..en,%s\n", vms_is_stack_procedure ? "stack" : "reg");
3852 alpha_need_linkage (fnname, 1);
3857 /* Emit the .prologue note at the scheduled end of the prologue. */
3860 output_end_prologue (file)
3863 if (TARGET_OPEN_VMS)
3864 fputs ("\t.prologue\n", file);
3865 else if (TARGET_WINDOWS_NT)
3866 fputs ("\t.prologue 0\n", file);
3867 else if (!flag_inhibit_size_directive)
3868 fprintf (file, "\t.prologue %d\n", alpha_function_needs_gp);
3871 /* Write function epilogue. */
3873 /* ??? At some point we will want to support full unwind, and so will
3874 need to mark the epilogue as well. At the moment, we just confuse
3877 #define FRP(exp) exp
3880 alpha_expand_epilogue ()
3882 /* Registers to save. */
3883 unsigned long imask = 0;
3884 unsigned long fmask = 0;
3885 /* Stack space needed for pushing registers clobbered by us. */
3886 HOST_WIDE_INT sa_size;
3887 /* Complete stack size needed. */
3888 HOST_WIDE_INT frame_size;
3889 /* Offset from base reg to register save area. */
3890 HOST_WIDE_INT reg_offset;
3891 int fp_is_frame_pointer, fp_offset;
3892 rtx sa_reg, sa_reg_exp = NULL;
3893 rtx sp_adj1, sp_adj2, mem;
3896 sa_size = alpha_sa_size ();
3898 frame_size = get_frame_size ();
3899 if (TARGET_OPEN_VMS)
3900 frame_size = ALPHA_ROUND (sa_size
3901 + (vms_is_stack_procedure ? 8 : 0)
3903 + current_function_pretend_args_size);
3905 frame_size = (ALPHA_ROUND (current_function_outgoing_args_size)
3907 + ALPHA_ROUND (frame_size
3908 + current_function_pretend_args_size));
3910 if (TARGET_OPEN_VMS)
3913 reg_offset = ALPHA_ROUND (current_function_outgoing_args_size);
3915 alpha_sa_mask (&imask, &fmask);
3917 fp_is_frame_pointer = ((TARGET_OPEN_VMS && vms_is_stack_procedure)
3918 || (!TARGET_OPEN_VMS && frame_pointer_needed));
3922 /* If we have a frame pointer, restore SP from it. */
3923 if ((TARGET_OPEN_VMS
3924 && vms_unwind_regno == HARD_FRAME_POINTER_REGNUM)
3925 || (!TARGET_OPEN_VMS && frame_pointer_needed))
3927 FRP (emit_move_insn (stack_pointer_rtx, hard_frame_pointer_rtx));
3930 /* Cope with very large offsets to the register save area. */
3931 sa_reg = stack_pointer_rtx;
3932 if (reg_offset + sa_size > 0x8000)
3934 int low = ((reg_offset & 0xffff) ^ 0x8000) - 0x8000;
3937 if (low + sa_size <= 0x8000)
3938 bias = reg_offset - low, reg_offset = low;
3940 bias = reg_offset, reg_offset = 0;
3942 sa_reg = gen_rtx_REG (DImode, 22);
3943 sa_reg_exp = plus_constant (stack_pointer_rtx, bias);
3945 FRP (emit_move_insn (sa_reg, sa_reg_exp));
3948 /* Restore registers in order, excepting a true frame pointer. */
3950 if (! alpha_eh_epilogue_sp_ofs)
3952 mem = gen_rtx_MEM (DImode, plus_constant(sa_reg, reg_offset));
3953 MEM_ALIAS_SET (mem) = alpha_sr_alias_set;
3954 FRP (emit_move_insn (gen_rtx_REG (DImode, REG_RA), mem));
3957 imask &= ~(1L << REG_RA);
3959 for (i = 0; i < 32; ++i)
3960 if (imask & (1L << i))
3962 if (i == HARD_FRAME_POINTER_REGNUM && fp_is_frame_pointer)
3963 fp_offset = reg_offset;
3966 mem = gen_rtx_MEM (DImode, plus_constant(sa_reg, reg_offset));
3967 MEM_ALIAS_SET (mem) = alpha_sr_alias_set;
3968 FRP (emit_move_insn (gen_rtx_REG (DImode, i), mem));
3973 for (i = 0; i < 32; ++i)
3974 if (fmask & (1L << i))
3976 mem = gen_rtx_MEM (DFmode, plus_constant(sa_reg, reg_offset));
3977 MEM_ALIAS_SET (mem) = alpha_sr_alias_set;
3978 FRP (emit_move_insn (gen_rtx_REG (DFmode, i+32), mem));
3983 if (frame_size || alpha_eh_epilogue_sp_ofs)
3985 sp_adj1 = stack_pointer_rtx;
3987 if (alpha_eh_epilogue_sp_ofs)
3989 sp_adj1 = gen_rtx_REG (DImode, 23);
3990 emit_move_insn (sp_adj1,
3991 gen_rtx_PLUS (Pmode, stack_pointer_rtx,
3992 alpha_eh_epilogue_sp_ofs));
3995 /* If the stack size is large, begin computation into a temporary
3996 register so as not to interfere with a potential fp restore,
3997 which must be consecutive with an SP restore. */
3998 if (frame_size < 32768)
3999 sp_adj2 = GEN_INT (frame_size);
4000 else if (frame_size < 0x40007fffL)
4002 int low = ((frame_size & 0xffff) ^ 0x8000) - 0x8000;
4004 sp_adj2 = plus_constant (sp_adj1, frame_size - low);
4005 if (sa_reg_exp && rtx_equal_p (sa_reg_exp, sp_adj2))
4009 sp_adj1 = gen_rtx_REG (DImode, 23);
4010 FRP (emit_move_insn (sp_adj1, sp_adj2));
4012 sp_adj2 = GEN_INT (low);
4016 rtx tmp = gen_rtx_REG (DImode, 23);
4017 FRP (sp_adj2 = alpha_emit_set_const (tmp, DImode, frame_size, 3));
4020 /* We can't drop new things to memory this late, afaik,
4021 so build it up by pieces. */
4022 FRP (sp_adj2 = alpha_emit_set_long_const (tmp, frame_size,
4023 -(frame_size < 0)));
4029 /* From now on, things must be in order. So emit blockages. */
4031 /* Restore the frame pointer. */
4032 if (fp_is_frame_pointer)
4034 emit_insn (gen_blockage ());
4035 mem = gen_rtx_MEM (DImode, plus_constant(sa_reg, fp_offset));
4036 MEM_ALIAS_SET (mem) = alpha_sr_alias_set;
4037 FRP (emit_move_insn (hard_frame_pointer_rtx, mem));
4039 else if (TARGET_OPEN_VMS)
4041 emit_insn (gen_blockage ());
4042 FRP (emit_move_insn (hard_frame_pointer_rtx,
4043 gen_rtx_REG (DImode, vms_save_fp_regno)));
4046 /* Restore the stack pointer. */
4047 emit_insn (gen_blockage ());
4048 FRP (emit_move_insn (stack_pointer_rtx,
4049 gen_rtx_PLUS (DImode, sp_adj1, sp_adj2)));
4053 if (TARGET_OPEN_VMS && !vms_is_stack_procedure)
4055 emit_insn (gen_blockage ());
4056 FRP (emit_move_insn (hard_frame_pointer_rtx,
4057 gen_rtx_REG (DImode, vms_save_fp_regno)));
4062 emit_jump_insn (gen_return_internal ());
4065 /* Output the rest of the textual info surrounding the epilogue. */
4068 alpha_end_function (file, fnname, decl)
4071 tree decl ATTRIBUTE_UNUSED;
4073 /* End the function. */
4074 if (!flag_inhibit_size_directive)
4076 fputs ("\t.end ", file);
4077 assemble_name (file, fnname);
4080 inside_function = FALSE;
4082 /* Show that we know this function if it is called again.
4084 Don't do this for global functions in object files destined for a
4085 shared library because the function may be overridden by the application
4086 or other libraries. Similarly, don't do this for weak functions. */
4088 if (!DECL_WEAK (current_function_decl)
4089 && (!flag_pic || !TREE_PUBLIC (current_function_decl)))
4090 SYMBOL_REF_FLAG (XEXP (DECL_RTL (current_function_decl), 0)) = 1;
4093 /* Debugging support. */
4097 /* Count the number of sdb related labels are generated (to find block
4098 start and end boundaries). */
4100 int sdb_label_count = 0;
4102 /* Next label # for each statement. */
4104 static int sym_lineno = 0;
4106 /* Count the number of .file directives, so that .loc is up to date. */
4108 static int num_source_filenames = 0;
4110 /* Name of the file containing the current function. */
4112 static const char *current_function_file = "";
4114 /* Offsets to alpha virtual arg/local debugging pointers. */
4116 long alpha_arg_offset;
4117 long alpha_auto_offset;
4119 /* Emit a new filename to a stream. */
4122 alpha_output_filename (stream, name)
4126 static int first_time = TRUE;
4127 char ltext_label_name[100];
4132 ++num_source_filenames;
4133 current_function_file = name;
4134 fprintf (stream, "\t.file\t%d ", num_source_filenames);
4135 output_quoted_string (stream, name);
4136 fprintf (stream, "\n");
4137 if (!TARGET_GAS && write_symbols == DBX_DEBUG)
4138 fprintf (stream, "\t#@stabs\n");
4141 else if (write_symbols == DBX_DEBUG)
4143 ASM_GENERATE_INTERNAL_LABEL (ltext_label_name, "Ltext", 0);
4144 fprintf (stream, "%s ", ASM_STABS_OP);
4145 output_quoted_string (stream, name);
4146 fprintf (stream, ",%d,0,0,%s\n", N_SOL, <ext_label_name[1]);
4149 else if (name != current_function_file
4150 && strcmp (name, current_function_file) != 0)
4152 if (inside_function && ! TARGET_GAS)
4153 fprintf (stream, "\t#.file\t%d ", num_source_filenames);
4156 ++num_source_filenames;
4157 current_function_file = name;
4158 fprintf (stream, "\t.file\t%d ", num_source_filenames);
4161 output_quoted_string (stream, name);
4162 fprintf (stream, "\n");
4166 /* Emit a linenumber to a stream. */
4169 alpha_output_lineno (stream, line)
4173 if (write_symbols == DBX_DEBUG)
4175 /* mips-tfile doesn't understand .stabd directives. */
4177 fprintf (stream, "$LM%d:\n\t%s %d,0,%d,$LM%d\n",
4178 sym_lineno, ASM_STABN_OP, N_SLINE, line, sym_lineno);
4181 fprintf (stream, "\n\t.loc\t%d %d\n", num_source_filenames, line);
4184 /* Structure to show the current status of registers and memory. */
4186 struct shadow_summary
4189 unsigned long i : 31; /* Mask of int regs */
4190 unsigned long fp : 31; /* Mask of fp regs */
4191 unsigned long mem : 1; /* mem == imem | fpmem */
4195 static void summarize_insn PROTO((rtx, struct shadow_summary *, int));
4196 static void alpha_handle_trap_shadows PROTO((rtx));
4198 /* Summary the effects of expression X on the machine. Update SUM, a pointer
4199 to the summary structure. SET is nonzero if the insn is setting the
4200 object, otherwise zero. */
4203 summarize_insn (x, sum, set)
4205 struct shadow_summary *sum;
4214 switch (GET_CODE (x))
4216 /* ??? Note that this case would be incorrect if the Alpha had a
4217 ZERO_EXTRACT in SET_DEST. */
4219 summarize_insn (SET_SRC (x), sum, 0);
4220 summarize_insn (SET_DEST (x), sum, 1);
4224 summarize_insn (XEXP (x, 0), sum, 1);
4228 summarize_insn (XEXP (x, 0), sum, 0);
4232 for (i = ASM_OPERANDS_INPUT_LENGTH (x) - 1; i >= 0; i--)
4233 summarize_insn (ASM_OPERANDS_INPUT (x, i), sum, 0);
4237 for (i = XVECLEN (x, 0) - 1; i >= 0; i--)
4238 summarize_insn (XVECEXP (x, 0, i), sum, 0);
4242 summarize_insn (SUBREG_REG (x), sum, 0);
4247 int regno = REGNO (x);
4248 unsigned long mask = 1UL << (regno % 32);
4250 if (regno == 31 || regno == 63)
4256 sum->defd.i |= mask;
4258 sum->defd.fp |= mask;
4263 sum->used.i |= mask;
4265 sum->used.fp |= mask;
4276 /* Find the regs used in memory address computation: */
4277 summarize_insn (XEXP (x, 0), sum, 0);
4280 case CONST_INT: case CONST_DOUBLE:
4281 case SYMBOL_REF: case LABEL_REF: case CONST:
4284 /* Handle common unary and binary ops for efficiency. */
4285 case COMPARE: case PLUS: case MINUS: case MULT: case DIV:
4286 case MOD: case UDIV: case UMOD: case AND: case IOR:
4287 case XOR: case ASHIFT: case ROTATE: case ASHIFTRT: case LSHIFTRT:
4288 case ROTATERT: case SMIN: case SMAX: case UMIN: case UMAX:
4289 case NE: case EQ: case GE: case GT: case LE:
4290 case LT: case GEU: case GTU: case LEU: case LTU:
4291 summarize_insn (XEXP (x, 0), sum, 0);
4292 summarize_insn (XEXP (x, 1), sum, 0);
4295 case NEG: case NOT: case SIGN_EXTEND: case ZERO_EXTEND:
4296 case TRUNCATE: case FLOAT_EXTEND: case FLOAT_TRUNCATE: case FLOAT:
4297 case FIX: case UNSIGNED_FLOAT: case UNSIGNED_FIX: case ABS:
4298 case SQRT: case FFS:
4299 summarize_insn (XEXP (x, 0), sum, 0);
4303 format_ptr = GET_RTX_FORMAT (GET_CODE (x));
4304 for (i = GET_RTX_LENGTH (GET_CODE (x)) - 1; i >= 0; i--)
4305 switch (format_ptr[i])
4308 summarize_insn (XEXP (x, i), sum, 0);
4312 for (j = XVECLEN (x, i) - 1; j >= 0; j--)
4313 summarize_insn (XVECEXP (x, i, j), sum, 0);
4325 /* Ensure a sufficient number of `trapb' insns are in the code when
4326 the user requests code with a trap precision of functions or
4329 In naive mode, when the user requests a trap-precision of
4330 "instruction", a trapb is needed after every instruction that may
4331 generate a trap. This ensures that the code is resumption safe but
4334 When optimizations are turned on, we delay issuing a trapb as long
4335 as possible. In this context, a trap shadow is the sequence of
4336 instructions that starts with a (potentially) trap generating
4337 instruction and extends to the next trapb or call_pal instruction
4338 (but GCC never generates call_pal by itself). We can delay (and
4339 therefore sometimes omit) a trapb subject to the following
4342 (a) On entry to the trap shadow, if any Alpha register or memory
4343 location contains a value that is used as an operand value by some
4344 instruction in the trap shadow (live on entry), then no instruction
4345 in the trap shadow may modify the register or memory location.
4347 (b) Within the trap shadow, the computation of the base register
4348 for a memory load or store instruction may not involve using the
4349 result of an instruction that might generate an UNPREDICTABLE
4352 (c) Within the trap shadow, no register may be used more than once
4353 as a destination register. (This is to make life easier for the
4356 (d) The trap shadow may not include any branch instructions. */
4359 alpha_handle_trap_shadows (insns)
4362 struct shadow_summary shadow;
4363 int trap_pending, exception_nesting;
4367 exception_nesting = 0;
4370 shadow.used.mem = 0;
4371 shadow.defd = shadow.used;
4373 for (i = insns; i ; i = NEXT_INSN (i))
4375 if (GET_CODE (i) == NOTE)
4377 switch (NOTE_LINE_NUMBER (i))
4379 case NOTE_INSN_EH_REGION_BEG:
4380 exception_nesting++;
4385 case NOTE_INSN_EH_REGION_END:
4386 exception_nesting--;
4391 case NOTE_INSN_EPILOGUE_BEG:
4392 if (trap_pending && alpha_tp >= ALPHA_TP_FUNC)
4397 else if (trap_pending)
4399 if (alpha_tp == ALPHA_TP_FUNC)
4401 if (GET_CODE (i) == JUMP_INSN
4402 && GET_CODE (PATTERN (i)) == RETURN)
4405 else if (alpha_tp == ALPHA_TP_INSN)
4409 struct shadow_summary sum;
4414 sum.defd = sum.used;
4416 switch (GET_CODE (i))
4419 /* Annoyingly, get_attr_trap will abort on these. */
4420 if (GET_CODE (PATTERN (i)) == USE
4421 || GET_CODE (PATTERN (i)) == CLOBBER)
4424 summarize_insn (PATTERN (i), &sum, 0);
4426 if ((sum.defd.i & shadow.defd.i)
4427 || (sum.defd.fp & shadow.defd.fp))
4429 /* (c) would be violated */
4433 /* Combine shadow with summary of current insn: */
4434 shadow.used.i |= sum.used.i;
4435 shadow.used.fp |= sum.used.fp;
4436 shadow.used.mem |= sum.used.mem;
4437 shadow.defd.i |= sum.defd.i;
4438 shadow.defd.fp |= sum.defd.fp;
4439 shadow.defd.mem |= sum.defd.mem;
4441 if ((sum.defd.i & shadow.used.i)
4442 || (sum.defd.fp & shadow.used.fp)
4443 || (sum.defd.mem & shadow.used.mem))
4445 /* (a) would be violated (also takes care of (b)) */
4446 if (get_attr_trap (i) == TRAP_YES
4447 && ((sum.defd.i & sum.used.i)
4448 || (sum.defd.fp & sum.used.fp)))
4467 n = emit_insn_before (gen_trapb (), i);
4468 PUT_MODE (n, TImode);
4469 PUT_MODE (i, TImode);
4473 shadow.used.mem = 0;
4474 shadow.defd = shadow.used;
4479 if ((exception_nesting > 0 || alpha_tp >= ALPHA_TP_FUNC)
4480 && GET_CODE (i) == INSN
4481 && GET_CODE (PATTERN (i)) != USE
4482 && GET_CODE (PATTERN (i)) != CLOBBER
4483 && get_attr_trap (i) == TRAP_YES)
4485 if (optimize && !trap_pending)
4486 summarize_insn (PATTERN (i), &shadow, 0);
4493 /* Alpha can only issue instruction groups simultaneously if they are
4494 suitibly aligned. This is very processor-specific. */
4496 enum alphaev4_pipe {
4503 enum alphaev5_pipe {
4514 static enum alphaev4_pipe alphaev4_insn_pipe PROTO((rtx));
4515 static enum alphaev5_pipe alphaev5_insn_pipe PROTO((rtx));
4516 static rtx alphaev4_next_group PROTO((rtx, int*, int*));
4517 static rtx alphaev5_next_group PROTO((rtx, int*, int*));
4518 static rtx alphaev4_next_nop PROTO((int*));
4519 static rtx alphaev5_next_nop PROTO((int*));
4521 static void alpha_align_insns
4522 PROTO((rtx, int, rtx (*)(rtx, int*, int*), rtx (*)(int*), int));
4524 static enum alphaev4_pipe
4525 alphaev4_insn_pipe (insn)
4528 if (recog_memoized (insn) < 0)
4530 if (get_attr_length (insn) != 4)
4533 switch (get_attr_type (insn))
4566 static enum alphaev5_pipe
4567 alphaev5_insn_pipe (insn)
4570 if (recog_memoized (insn) < 0)
4572 if (get_attr_length (insn) != 4)
4575 switch (get_attr_type (insn))
4615 /* IN_USE is a mask of the slots currently filled within the insn group.
4616 The mask bits come from alphaev4_pipe above. If EV4_IBX is set, then
4617 the insn in EV4_IB0 can be swapped by the hardware into EV4_IB1.
4619 LEN is, of course, the length of the group in bytes. */
4622 alphaev4_next_group (insn, pin_use, plen)
4624 int *pin_use, *plen;
4630 if (GET_RTX_CLASS (GET_CODE (insn)) != 'i'
4631 || GET_CODE (PATTERN (insn)) == CLOBBER
4632 || GET_CODE (PATTERN (insn)) == USE)
4637 enum alphaev4_pipe pipe;
4639 pipe = alphaev4_insn_pipe (insn);
4643 /* Force complex instructions to start new groups. */
4647 /* If this is a completely unrecognized insn, its an asm.
4648 We don't know how long it is, so record length as -1 to
4649 signal a needed realignment. */
4650 if (recog_memoized (insn) < 0)
4653 len = get_attr_length (insn);
4657 if (in_use & EV4_IB0)
4659 if (in_use & EV4_IB1)
4664 in_use |= EV4_IB0 | EV4_IBX;
4668 if (in_use & EV4_IB0)
4670 if (!(in_use & EV4_IBX) || (in_use & EV4_IB1))
4678 if (in_use & EV4_IB1)
4688 /* Haifa doesn't do well scheduling branches. */
4689 if (GET_CODE (insn) == JUMP_INSN)
4693 insn = next_nonnote_insn (insn);
4695 if (!insn || GET_RTX_CLASS (GET_CODE (insn)) != 'i')
4698 /* Let Haifa tell us where it thinks insn group boundaries are. */
4699 if (GET_MODE (insn) == TImode)
4702 if (GET_CODE (insn) == CLOBBER || GET_CODE (insn) == USE)
4707 insn = next_nonnote_insn (insn);
4715 /* IN_USE is a mask of the slots currently filled within the insn group.
4716 The mask bits come from alphaev5_pipe above. If EV5_E01 is set, then
4717 the insn in EV5_E0 can be swapped by the hardware into EV5_E1.
4719 LEN is, of course, the length of the group in bytes. */
4722 alphaev5_next_group (insn, pin_use, plen)
4724 int *pin_use, *plen;
4730 if (GET_RTX_CLASS (GET_CODE (insn)) != 'i'
4731 || GET_CODE (PATTERN (insn)) == CLOBBER
4732 || GET_CODE (PATTERN (insn)) == USE)
4737 enum alphaev5_pipe pipe;
4739 pipe = alphaev5_insn_pipe (insn);
4743 /* Force complex instructions to start new groups. */
4747 /* If this is a completely unrecognized insn, its an asm.
4748 We don't know how long it is, so record length as -1 to
4749 signal a needed realignment. */
4750 if (recog_memoized (insn) < 0)
4753 len = get_attr_length (insn);
4756 /* ??? Most of the places below, we would like to abort, as
4757 it would indicate an error either in Haifa, or in the
4758 scheduling description. Unfortunately, Haifa never
4759 schedules the last instruction of the BB, so we don't
4760 have an accurate TI bit to go off. */
4762 if (in_use & EV5_E0)
4764 if (in_use & EV5_E1)
4769 in_use |= EV5_E0 | EV5_E01;
4773 if (in_use & EV5_E0)
4775 if (!(in_use & EV5_E01) || (in_use & EV5_E1))
4783 if (in_use & EV5_E1)
4789 if (in_use & EV5_FA)
4791 if (in_use & EV5_FM)
4796 in_use |= EV5_FA | EV5_FAM;
4800 if (in_use & EV5_FA)
4806 if (in_use & EV5_FM)
4819 /* Haifa doesn't do well scheduling branches. */
4820 /* ??? If this is predicted not-taken, slotting continues, except
4821 that no more IBR, FBR, or JSR insns may be slotted. */
4822 if (GET_CODE (insn) == JUMP_INSN)
4826 insn = next_nonnote_insn (insn);
4828 if (!insn || GET_RTX_CLASS (GET_CODE (insn)) != 'i')
4831 /* Let Haifa tell us where it thinks insn group boundaries are. */
4832 if (GET_MODE (insn) == TImode)
4835 if (GET_CODE (insn) == CLOBBER || GET_CODE (insn) == USE)
4840 insn = next_nonnote_insn (insn);
4849 alphaev4_next_nop (pin_use)
4852 int in_use = *pin_use;
4855 if (!(in_use & EV4_IB0))
4860 else if ((in_use & (EV4_IBX|EV4_IB1)) == EV4_IBX)
4865 else if (TARGET_FP && !(in_use & EV4_IB1))
4878 alphaev5_next_nop (pin_use)
4881 int in_use = *pin_use;
4884 if (!(in_use & EV5_E1))
4889 else if (TARGET_FP && !(in_use & EV5_FA))
4894 else if (TARGET_FP && !(in_use & EV5_FM))
4906 /* The instruction group alignment main loop. */
4909 alpha_align_insns (insns, max_align, next_group, next_nop, gp_in_use)
4912 rtx (*next_group) PROTO((rtx, int*, int*));
4913 rtx (*next_nop) PROTO((int*));
4916 /* ALIGN is the known alignment for the insn group. */
4918 /* OFS is the offset of the current insn in the insn group. */
4920 int prev_in_use, in_use, len;
4923 /* Let shorten branches care for assigning alignments to code labels. */
4924 shorten_branches (insns);
4926 align = (FUNCTION_BOUNDARY/BITS_PER_UNIT < max_align
4927 ? FUNCTION_BOUNDARY/BITS_PER_UNIT : max_align);
4929 /* Account for the initial GP load, which happens before the scheduled
4930 prologue we emitted as RTL. */
4931 ofs = prev_in_use = 0;
4932 if (alpha_does_function_need_gp())
4934 ofs = 8 & (align - 1);
4935 prev_in_use = gp_in_use;
4939 if (GET_CODE (i) == NOTE)
4940 i = next_nonnote_insn (i);
4944 next = (*next_group)(i, &in_use, &len);
4946 /* When we see a label, resync alignment etc. */
4947 if (GET_CODE (i) == CODE_LABEL)
4949 int new_align = 1 << label_to_alignment (i);
4950 if (new_align >= align)
4952 align = new_align < max_align ? new_align : max_align;
4955 else if (ofs & (new_align-1))
4956 ofs = (ofs | (new_align-1)) + 1;
4961 /* Handle complex instructions special. */
4962 else if (in_use == 0)
4964 /* Asms will have length < 0. This is a signal that we have
4965 lost alignment knowledge. Assume, however, that the asm
4966 will not mis-align instructions. */
4975 /* If the known alignment is smaller than the recognized insn group,
4976 realign the output. */
4977 else if (align < len)
4979 int new_log_align = len > 8 ? 4 : 3;
4982 where = prev_nonnote_insn (i);
4983 if (!where || GET_CODE (where) != CODE_LABEL)
4986 emit_insn_before (gen_realign (GEN_INT (new_log_align)), where);
4987 align = 1 << new_log_align;
4991 /* If the group won't fit in the same INT16 as the previous,
4992 we need to add padding to keep the group together. Rather
4993 than simply leaving the insn filling to the assembler, we
4994 can make use of the knowledge of what sorts of instructions
4995 were issued in the previous group to make sure that all of
4996 the added nops are really free. */
4997 else if (ofs + len > align)
4999 int nop_count = (align - ofs) / 4;
5002 /* Insert nops before labels and branches to truely merge the
5003 execution of the nops with the previous instruction group. */
5004 where = prev_nonnote_insn (i);
5007 if (GET_CODE (where) == CODE_LABEL)
5009 rtx where2 = prev_nonnote_insn (where);
5010 if (where2 && GET_CODE (where2) == JUMP_INSN)
5013 else if (GET_CODE (where) != JUMP_INSN)
5020 emit_insn_before ((*next_nop)(&prev_in_use), where);
5021 while (--nop_count);
5025 ofs = (ofs + len) & (align - 1);
5026 prev_in_use = in_use;
5032 /* Machine dependant reorg pass. */
5038 if (alpha_tp != ALPHA_TP_PROG || flag_exceptions)
5039 alpha_handle_trap_shadows (insns);
5042 /* Due to the number of extra trapb insns, don't bother fixing up
5043 alignment when trap precision is instruction. Moreover, we can
5044 only do our job when sched2 is run and Haifa is our scheduler. */
5045 if (optimize && !optimize_size
5046 && alpha_tp != ALPHA_TP_INSN
5047 && flag_schedule_insns_after_reload)
5049 if (alpha_cpu == PROCESSOR_EV4)
5050 alpha_align_insns (insns, 8, alphaev4_next_group,
5051 alphaev4_next_nop, EV4_IB0);
5052 else if (alpha_cpu == PROCESSOR_EV5)
5053 alpha_align_insns (insns, 16, alphaev5_next_group,
5054 alphaev5_next_nop, EV5_E01 | EV5_E0);
5060 /* Check a floating-point value for validity for a particular machine mode. */
5062 static char * const float_strings[] =
5064 /* These are for FLOAT_VAX. */
5065 "1.70141173319264430e+38", /* 2^127 (2^24 - 1) / 2^24 */
5066 "-1.70141173319264430e+38",
5067 "2.93873587705571877e-39", /* 2^-128 */
5068 "-2.93873587705571877e-39",
5069 /* These are for the default broken IEEE mode, which traps
5070 on infinity or denormal numbers. */
5071 "3.402823466385288598117e+38", /* 2^128 (1 - 2^-24) */
5072 "-3.402823466385288598117e+38",
5073 "1.1754943508222875079687e-38", /* 2^-126 */
5074 "-1.1754943508222875079687e-38",
5077 static REAL_VALUE_TYPE float_values[8];
5078 static int inited_float_values = 0;
5081 check_float_value (mode, d, overflow)
5082 enum machine_mode mode;
5084 int overflow ATTRIBUTE_UNUSED;
5087 if (TARGET_IEEE || TARGET_IEEE_CONFORMANT || TARGET_IEEE_WITH_INEXACT)
5090 if (inited_float_values == 0)
5093 for (i = 0; i < 8; i++)
5094 float_values[i] = REAL_VALUE_ATOF (float_strings[i], DFmode);
5096 inited_float_values = 1;
5102 REAL_VALUE_TYPE *fvptr;
5104 if (TARGET_FLOAT_VAX)
5105 fvptr = &float_values[0];
5107 fvptr = &float_values[4];
5109 bcopy ((char *) d, (char *) &r, sizeof (REAL_VALUE_TYPE));
5110 if (REAL_VALUES_LESS (fvptr[0], r))
5112 bcopy ((char *) &fvptr[0], (char *) d,
5113 sizeof (REAL_VALUE_TYPE));
5116 else if (REAL_VALUES_LESS (r, fvptr[1]))
5118 bcopy ((char *) &fvptr[1], (char *) d,
5119 sizeof (REAL_VALUE_TYPE));
5122 else if (REAL_VALUES_LESS (dconst0, r)
5123 && REAL_VALUES_LESS (r, fvptr[2]))
5125 bcopy ((char *) &dconst0, (char *) d, sizeof (REAL_VALUE_TYPE));
5128 else if (REAL_VALUES_LESS (r, dconst0)
5129 && REAL_VALUES_LESS (fvptr[3], r))
5131 bcopy ((char *) &dconst0, (char *) d, sizeof (REAL_VALUE_TYPE));
5141 /* Return the VMS argument type corresponding to MODE. */
5144 alpha_arg_type (mode)
5145 enum machine_mode mode;
5150 return TARGET_FLOAT_VAX ? FF : FS;
5152 return TARGET_FLOAT_VAX ? FD : FT;
5158 /* Return an rtx for an integer representing the VMS Argument Information
5162 alpha_arg_info_reg_val (cum)
5163 CUMULATIVE_ARGS cum;
5165 unsigned HOST_WIDE_INT regval = cum.num_args;
5168 for (i = 0; i < 6; i++)
5169 regval |= ((int) cum.atypes[i]) << (i * 3 + 8);
5171 return GEN_INT (regval);
5174 /* Structure to collect function names for final output
5177 enum links_kind {KIND_UNUSED, KIND_LOCAL, KIND_EXTERN};
5180 struct alpha_links {
5181 struct alpha_links *next;
5183 enum links_kind kind;
5186 static struct alpha_links *alpha_links_base = 0;
5188 /* Make (or fake) .linkage entry for function call.
5190 IS_LOCAL is 0 if name is used in call, 1 if name is used in definition. */
5193 alpha_need_linkage (name, is_local)
5198 struct alpha_links *lptr, *nptr;
5203 /* Is this name already defined ? */
5205 for (lptr = alpha_links_base; lptr; lptr = lptr->next)
5206 if (strcmp (lptr->name, name) == 0)
5210 /* Defined here but external assumed. */
5211 if (lptr->kind == KIND_EXTERN)
5212 lptr->kind = KIND_LOCAL;
5216 /* Used here but unused assumed. */
5217 if (lptr->kind == KIND_UNUSED)
5218 lptr->kind = KIND_LOCAL;
5223 nptr = (struct alpha_links *) xmalloc (sizeof (struct alpha_links));
5224 nptr->next = alpha_links_base;
5225 nptr->name = xstrdup (name);
5227 /* Assume external if no definition. */
5228 nptr->kind = (is_local ? KIND_UNUSED : KIND_EXTERN);
5230 /* Ensure we have an IDENTIFIER so assemble_name can mark is used. */
5231 get_identifier (name);
5233 alpha_links_base = nptr;
5240 alpha_write_linkage (stream)
5243 struct alpha_links *lptr, *nptr;
5245 readonly_section ();
5247 fprintf (stream, "\t.align 3\n");
5249 for (lptr = alpha_links_base; lptr; lptr = nptr)
5253 if (lptr->kind == KIND_UNUSED
5254 || ! TREE_SYMBOL_REFERENCED (get_identifier (lptr->name)))
5257 fprintf (stream, "$%s..lk:\n", lptr->name);
5258 if (lptr->kind == KIND_LOCAL)
5260 /* Local and used, build linkage pair. */
5261 fprintf (stream, "\t.quad %s..en\n", lptr->name);
5262 fprintf (stream, "\t.quad %s\n", lptr->name);
5265 /* External and used, request linkage pair. */
5266 fprintf (stream, "\t.linkage %s\n", lptr->name);
5273 alpha_need_linkage (name, is_local)
5274 char *name ATTRIBUTE_UNUSED;
5275 int is_local ATTRIBUTE_UNUSED;
5279 #endif /* OPEN_VMS */