1 /* Subroutines used for code generation on the DEC Alpha.
2 Copyright (C) 1992, 1993, 1994, 1995, 1996, 1997, 1998, 1999,
3 2000, 2001, 2002, 2003, 2004, 2005 Free Software Foundation, Inc.
4 Contributed by Richard Kenner (kenner@vlsi1.ultra.nyu.edu)
6 This file is part of GCC.
8 GCC is free software; you can redistribute it and/or modify
9 it under the terms of the GNU General Public License as published by
10 the Free Software Foundation; either version 2, or (at your option)
13 GCC is distributed in the hope that it will be useful,
14 but WITHOUT ANY WARRANTY; without even the implied warranty of
15 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 GNU General Public License for more details.
18 You should have received a copy of the GNU General Public License
19 along with GCC; see the file COPYING. If not, write to
20 the Free Software Foundation, 51 Franklin Street, Fifth Floor,
21 Boston, MA 02110-1301, USA. */
26 #include "coretypes.h"
31 #include "hard-reg-set.h"
33 #include "insn-config.h"
34 #include "conditions.h"
36 #include "insn-attr.h"
47 #include "integrate.h"
50 #include "target-def.h"
52 #include "langhooks.h"
53 #include <splay-tree.h>
54 #include "cfglayout.h"
55 #include "tree-gimple.h"
56 #include "tree-flow.h"
57 #include "tree-stdarg.h"
59 /* Specify which cpu to schedule for. */
60 enum processor_type alpha_tune;
62 /* Which cpu we're generating code for. */
63 enum processor_type alpha_cpu;
65 static const char * const alpha_cpu_name[] =
70 /* Specify how accurate floating-point traps need to be. */
72 enum alpha_trap_precision alpha_tp;
74 /* Specify the floating-point rounding mode. */
76 enum alpha_fp_rounding_mode alpha_fprm;
78 /* Specify which things cause traps. */
80 enum alpha_fp_trap_mode alpha_fptm;
82 /* Save information from a "cmpxx" operation until the branch or scc is
85 struct alpha_compare alpha_compare;
87 /* Nonzero if inside of a function, because the Alpha asm can't
88 handle .files inside of functions. */
90 static int inside_function = FALSE;
92 /* The number of cycles of latency we should assume on memory reads. */
94 int alpha_memory_latency = 3;
96 /* Whether the function needs the GP. */
98 static int alpha_function_needs_gp;
100 /* The alias set for prologue/epilogue register save/restore. */
102 static GTY(()) int alpha_sr_alias_set;
104 /* The assembler name of the current function. */
106 static const char *alpha_fnname;
108 /* The next explicit relocation sequence number. */
109 extern GTY(()) int alpha_next_sequence_number;
110 int alpha_next_sequence_number = 1;
112 /* The literal and gpdisp sequence numbers for this insn, as printed
113 by %# and %* respectively. */
114 extern GTY(()) int alpha_this_literal_sequence_number;
115 extern GTY(()) int alpha_this_gpdisp_sequence_number;
116 int alpha_this_literal_sequence_number;
117 int alpha_this_gpdisp_sequence_number;
119 /* Costs of various operations on the different architectures. */
121 struct alpha_rtx_cost_data
123 unsigned char fp_add;
124 unsigned char fp_mult;
125 unsigned char fp_div_sf;
126 unsigned char fp_div_df;
127 unsigned char int_mult_si;
128 unsigned char int_mult_di;
129 unsigned char int_shift;
130 unsigned char int_cmov;
131 unsigned short int_div;
134 static struct alpha_rtx_cost_data const alpha_rtx_cost_data[PROCESSOR_MAX] =
137 COSTS_N_INSNS (6), /* fp_add */
138 COSTS_N_INSNS (6), /* fp_mult */
139 COSTS_N_INSNS (34), /* fp_div_sf */
140 COSTS_N_INSNS (63), /* fp_div_df */
141 COSTS_N_INSNS (23), /* int_mult_si */
142 COSTS_N_INSNS (23), /* int_mult_di */
143 COSTS_N_INSNS (2), /* int_shift */
144 COSTS_N_INSNS (2), /* int_cmov */
145 COSTS_N_INSNS (97), /* int_div */
148 COSTS_N_INSNS (4), /* fp_add */
149 COSTS_N_INSNS (4), /* fp_mult */
150 COSTS_N_INSNS (15), /* fp_div_sf */
151 COSTS_N_INSNS (22), /* fp_div_df */
152 COSTS_N_INSNS (8), /* int_mult_si */
153 COSTS_N_INSNS (12), /* int_mult_di */
154 COSTS_N_INSNS (1) + 1, /* int_shift */
155 COSTS_N_INSNS (1), /* int_cmov */
156 COSTS_N_INSNS (83), /* int_div */
159 COSTS_N_INSNS (4), /* fp_add */
160 COSTS_N_INSNS (4), /* fp_mult */
161 COSTS_N_INSNS (12), /* fp_div_sf */
162 COSTS_N_INSNS (15), /* fp_div_df */
163 COSTS_N_INSNS (7), /* int_mult_si */
164 COSTS_N_INSNS (7), /* int_mult_di */
165 COSTS_N_INSNS (1), /* int_shift */
166 COSTS_N_INSNS (2), /* int_cmov */
167 COSTS_N_INSNS (86), /* int_div */
171 /* Similar but tuned for code size instead of execution latency. The
172 extra +N is fractional cost tuning based on latency. It's used to
173 encourage use of cheaper insns like shift, but only if there's just
176 static struct alpha_rtx_cost_data const alpha_rtx_cost_size =
178 COSTS_N_INSNS (1), /* fp_add */
179 COSTS_N_INSNS (1), /* fp_mult */
180 COSTS_N_INSNS (1), /* fp_div_sf */
181 COSTS_N_INSNS (1) + 1, /* fp_div_df */
182 COSTS_N_INSNS (1) + 1, /* int_mult_si */
183 COSTS_N_INSNS (1) + 2, /* int_mult_di */
184 COSTS_N_INSNS (1), /* int_shift */
185 COSTS_N_INSNS (1), /* int_cmov */
186 COSTS_N_INSNS (6), /* int_div */
189 /* Get the number of args of a function in one of two ways. */
190 #if TARGET_ABI_OPEN_VMS || TARGET_ABI_UNICOSMK
191 #define NUM_ARGS current_function_args_info.num_args
193 #define NUM_ARGS current_function_args_info
199 /* Declarations of static functions. */
200 static struct machine_function *alpha_init_machine_status (void);
201 static rtx alpha_emit_xfloating_compare (enum rtx_code *, rtx, rtx);
203 #if TARGET_ABI_OPEN_VMS
204 static void alpha_write_linkage (FILE *, const char *, tree);
207 static void unicosmk_output_deferred_case_vectors (FILE *);
208 static void unicosmk_gen_dsib (unsigned long *);
209 static void unicosmk_output_ssib (FILE *, const char *);
210 static int unicosmk_need_dex (rtx);
212 /* Implement TARGET_HANDLE_OPTION. */
215 alpha_handle_option (size_t code, const char *arg, int value)
221 target_flags |= MASK_SOFT_FP;
225 case OPT_mieee_with_inexact:
226 target_flags |= MASK_IEEE_CONFORMANT;
230 if (value != 16 && value != 32 && value != 64)
231 error ("bad value %qs for -mtls-size switch", arg);
238 #ifdef TARGET_ALTERNATE_LONG_DOUBLE_MANGLING
239 /* Implement TARGET_MANGLE_FUNDAMENTAL_TYPE. */
242 alpha_mangle_fundamental_type (tree type)
244 if (TYPE_MAIN_VARIANT (type) == long_double_type_node
245 && TARGET_LONG_DOUBLE_128)
248 /* For all other types, use normal C++ mangling. */
253 /* Parse target option strings. */
256 override_options (void)
258 static const struct cpu_table {
259 const char *const name;
260 const enum processor_type processor;
263 { "ev4", PROCESSOR_EV4, 0 },
264 { "ev45", PROCESSOR_EV4, 0 },
265 { "21064", PROCESSOR_EV4, 0 },
266 { "ev5", PROCESSOR_EV5, 0 },
267 { "21164", PROCESSOR_EV5, 0 },
268 { "ev56", PROCESSOR_EV5, MASK_BWX },
269 { "21164a", PROCESSOR_EV5, MASK_BWX },
270 { "pca56", PROCESSOR_EV5, MASK_BWX|MASK_MAX },
271 { "21164PC",PROCESSOR_EV5, MASK_BWX|MASK_MAX },
272 { "21164pc",PROCESSOR_EV5, MASK_BWX|MASK_MAX },
273 { "ev6", PROCESSOR_EV6, MASK_BWX|MASK_MAX|MASK_FIX },
274 { "21264", PROCESSOR_EV6, MASK_BWX|MASK_MAX|MASK_FIX },
275 { "ev67", PROCESSOR_EV6, MASK_BWX|MASK_MAX|MASK_FIX|MASK_CIX },
276 { "21264a", PROCESSOR_EV6, MASK_BWX|MASK_MAX|MASK_FIX|MASK_CIX },
282 /* Unicos/Mk doesn't have shared libraries. */
283 if (TARGET_ABI_UNICOSMK && flag_pic)
285 warning (0, "-f%s ignored for Unicos/Mk (not supported)",
286 (flag_pic > 1) ? "PIC" : "pic");
290 /* On Unicos/Mk, the native compiler consistently generates /d suffices for
291 floating-point instructions. Make that the default for this target. */
292 if (TARGET_ABI_UNICOSMK)
293 alpha_fprm = ALPHA_FPRM_DYN;
295 alpha_fprm = ALPHA_FPRM_NORM;
297 alpha_tp = ALPHA_TP_PROG;
298 alpha_fptm = ALPHA_FPTM_N;
300 /* We cannot use su and sui qualifiers for conversion instructions on
301 Unicos/Mk. I'm not sure if this is due to assembler or hardware
302 limitations. Right now, we issue a warning if -mieee is specified
303 and then ignore it; eventually, we should either get it right or
304 disable the option altogether. */
308 if (TARGET_ABI_UNICOSMK)
309 warning (0, "-mieee not supported on Unicos/Mk");
312 alpha_tp = ALPHA_TP_INSN;
313 alpha_fptm = ALPHA_FPTM_SU;
317 if (TARGET_IEEE_WITH_INEXACT)
319 if (TARGET_ABI_UNICOSMK)
320 warning (0, "-mieee-with-inexact not supported on Unicos/Mk");
323 alpha_tp = ALPHA_TP_INSN;
324 alpha_fptm = ALPHA_FPTM_SUI;
330 if (! strcmp (alpha_tp_string, "p"))
331 alpha_tp = ALPHA_TP_PROG;
332 else if (! strcmp (alpha_tp_string, "f"))
333 alpha_tp = ALPHA_TP_FUNC;
334 else if (! strcmp (alpha_tp_string, "i"))
335 alpha_tp = ALPHA_TP_INSN;
337 error ("bad value %qs for -mtrap-precision switch", alpha_tp_string);
340 if (alpha_fprm_string)
342 if (! strcmp (alpha_fprm_string, "n"))
343 alpha_fprm = ALPHA_FPRM_NORM;
344 else if (! strcmp (alpha_fprm_string, "m"))
345 alpha_fprm = ALPHA_FPRM_MINF;
346 else if (! strcmp (alpha_fprm_string, "c"))
347 alpha_fprm = ALPHA_FPRM_CHOP;
348 else if (! strcmp (alpha_fprm_string,"d"))
349 alpha_fprm = ALPHA_FPRM_DYN;
351 error ("bad value %qs for -mfp-rounding-mode switch",
355 if (alpha_fptm_string)
357 if (strcmp (alpha_fptm_string, "n") == 0)
358 alpha_fptm = ALPHA_FPTM_N;
359 else if (strcmp (alpha_fptm_string, "u") == 0)
360 alpha_fptm = ALPHA_FPTM_U;
361 else if (strcmp (alpha_fptm_string, "su") == 0)
362 alpha_fptm = ALPHA_FPTM_SU;
363 else if (strcmp (alpha_fptm_string, "sui") == 0)
364 alpha_fptm = ALPHA_FPTM_SUI;
366 error ("bad value %qs for -mfp-trap-mode switch", alpha_fptm_string);
369 if (alpha_cpu_string)
371 for (i = 0; cpu_table [i].name; i++)
372 if (! strcmp (alpha_cpu_string, cpu_table [i].name))
374 alpha_tune = alpha_cpu = cpu_table [i].processor;
375 target_flags &= ~ (MASK_BWX | MASK_MAX | MASK_FIX | MASK_CIX);
376 target_flags |= cpu_table [i].flags;
379 if (! cpu_table [i].name)
380 error ("bad value %qs for -mcpu switch", alpha_cpu_string);
383 if (alpha_tune_string)
385 for (i = 0; cpu_table [i].name; i++)
386 if (! strcmp (alpha_tune_string, cpu_table [i].name))
388 alpha_tune = cpu_table [i].processor;
391 if (! cpu_table [i].name)
392 error ("bad value %qs for -mcpu switch", alpha_tune_string);
395 /* Do some sanity checks on the above options. */
397 if (TARGET_ABI_UNICOSMK && alpha_fptm != ALPHA_FPTM_N)
399 warning (0, "trap mode not supported on Unicos/Mk");
400 alpha_fptm = ALPHA_FPTM_N;
403 if ((alpha_fptm == ALPHA_FPTM_SU || alpha_fptm == ALPHA_FPTM_SUI)
404 && alpha_tp != ALPHA_TP_INSN && alpha_cpu != PROCESSOR_EV6)
406 warning (0, "fp software completion requires -mtrap-precision=i");
407 alpha_tp = ALPHA_TP_INSN;
410 if (alpha_cpu == PROCESSOR_EV6)
412 /* Except for EV6 pass 1 (not released), we always have precise
413 arithmetic traps. Which means we can do software completion
414 without minding trap shadows. */
415 alpha_tp = ALPHA_TP_PROG;
418 if (TARGET_FLOAT_VAX)
420 if (alpha_fprm == ALPHA_FPRM_MINF || alpha_fprm == ALPHA_FPRM_DYN)
422 warning (0, "rounding mode not supported for VAX floats");
423 alpha_fprm = ALPHA_FPRM_NORM;
425 if (alpha_fptm == ALPHA_FPTM_SUI)
427 warning (0, "trap mode not supported for VAX floats");
428 alpha_fptm = ALPHA_FPTM_SU;
430 if (target_flags_explicit & MASK_LONG_DOUBLE_128)
431 warning (0, "128-bit long double not supported for VAX floats");
432 target_flags &= ~MASK_LONG_DOUBLE_128;
439 if (!alpha_mlat_string)
440 alpha_mlat_string = "L1";
442 if (ISDIGIT ((unsigned char)alpha_mlat_string[0])
443 && (lat = strtol (alpha_mlat_string, &end, 10), *end == '\0'))
445 else if ((alpha_mlat_string[0] == 'L' || alpha_mlat_string[0] == 'l')
446 && ISDIGIT ((unsigned char)alpha_mlat_string[1])
447 && alpha_mlat_string[2] == '\0')
449 static int const cache_latency[][4] =
451 { 3, 30, -1 }, /* ev4 -- Bcache is a guess */
452 { 2, 12, 38 }, /* ev5 -- Bcache from PC164 LMbench numbers */
453 { 3, 12, 30 }, /* ev6 -- Bcache from DS20 LMbench. */
456 lat = alpha_mlat_string[1] - '0';
457 if (lat <= 0 || lat > 3 || cache_latency[alpha_tune][lat-1] == -1)
459 warning (0, "L%d cache latency unknown for %s",
460 lat, alpha_cpu_name[alpha_tune]);
464 lat = cache_latency[alpha_tune][lat-1];
466 else if (! strcmp (alpha_mlat_string, "main"))
468 /* Most current memories have about 370ns latency. This is
469 a reasonable guess for a fast cpu. */
474 warning (0, "bad value %qs for -mmemory-latency", alpha_mlat_string);
478 alpha_memory_latency = lat;
481 /* Default the definition of "small data" to 8 bytes. */
485 /* Infer TARGET_SMALL_DATA from -fpic/-fPIC. */
487 target_flags |= MASK_SMALL_DATA;
488 else if (flag_pic == 2)
489 target_flags &= ~MASK_SMALL_DATA;
491 /* Align labels and loops for optimal branching. */
492 /* ??? Kludge these by not doing anything if we don't optimize and also if
493 we are writing ECOFF symbols to work around a bug in DEC's assembler. */
494 if (optimize > 0 && write_symbols != SDB_DEBUG)
496 if (align_loops <= 0)
498 if (align_jumps <= 0)
501 if (align_functions <= 0)
502 align_functions = 16;
504 /* Acquire a unique set number for our register saves and restores. */
505 alpha_sr_alias_set = new_alias_set ();
507 /* Register variables and functions with the garbage collector. */
509 /* Set up function hooks. */
510 init_machine_status = alpha_init_machine_status;
512 /* Tell the compiler when we're using VAX floating point. */
513 if (TARGET_FLOAT_VAX)
515 REAL_MODE_FORMAT (SFmode) = &vax_f_format;
516 REAL_MODE_FORMAT (DFmode) = &vax_g_format;
517 REAL_MODE_FORMAT (TFmode) = NULL;
520 #ifdef TARGET_DEFAULT_LONG_DOUBLE_128
521 if (!(target_flags_explicit & MASK_LONG_DOUBLE_128))
522 target_flags |= MASK_LONG_DOUBLE_128;
526 /* Returns 1 if VALUE is a mask that contains full bytes of zero or ones. */
529 zap_mask (HOST_WIDE_INT value)
533 for (i = 0; i < HOST_BITS_PER_WIDE_INT / HOST_BITS_PER_CHAR;
535 if ((value & 0xff) != 0 && (value & 0xff) != 0xff)
541 /* Return true if OP is valid for a particular TLS relocation.
542 We are already guaranteed that OP is a CONST. */
545 tls_symbolic_operand_1 (rtx op, int size, int unspec)
549 if (GET_CODE (op) != UNSPEC || XINT (op, 1) != unspec)
551 op = XVECEXP (op, 0, 0);
553 if (GET_CODE (op) != SYMBOL_REF)
556 switch (SYMBOL_REF_TLS_MODEL (op))
558 case TLS_MODEL_LOCAL_DYNAMIC:
559 return unspec == UNSPEC_DTPREL && size == alpha_tls_size;
560 case TLS_MODEL_INITIAL_EXEC:
561 return unspec == UNSPEC_TPREL && size == 64;
562 case TLS_MODEL_LOCAL_EXEC:
563 return unspec == UNSPEC_TPREL && size == alpha_tls_size;
569 /* Used by aligned_memory_operand and unaligned_memory_operand to
570 resolve what reload is going to do with OP if it's a register. */
573 resolve_reload_operand (rtx op)
575 if (reload_in_progress)
578 if (GET_CODE (tmp) == SUBREG)
579 tmp = SUBREG_REG (tmp);
580 if (GET_CODE (tmp) == REG
581 && REGNO (tmp) >= FIRST_PSEUDO_REGISTER)
583 op = reg_equiv_memory_loc[REGNO (tmp)];
591 /* Implements CONST_OK_FOR_LETTER_P. Return true if the value matches
592 the range defined for C in [I-P]. */
595 alpha_const_ok_for_letter_p (HOST_WIDE_INT value, int c)
600 /* An unsigned 8 bit constant. */
601 return (unsigned HOST_WIDE_INT) value < 0x100;
603 /* The constant zero. */
606 /* A signed 16 bit constant. */
607 return (unsigned HOST_WIDE_INT) (value + 0x8000) < 0x10000;
609 /* A shifted signed 16 bit constant appropriate for LDAH. */
610 return ((value & 0xffff) == 0
611 && ((value) >> 31 == -1 || value >> 31 == 0));
613 /* A constant that can be AND'ed with using a ZAP insn. */
614 return zap_mask (value);
616 /* A complemented unsigned 8 bit constant. */
617 return (unsigned HOST_WIDE_INT) (~ value) < 0x100;
619 /* A negated unsigned 8 bit constant. */
620 return (unsigned HOST_WIDE_INT) (- value) < 0x100;
622 /* The constant 1, 2 or 3. */
623 return value == 1 || value == 2 || value == 3;
630 /* Implements CONST_DOUBLE_OK_FOR_LETTER_P. Return true if VALUE
631 matches for C in [GH]. */
634 alpha_const_double_ok_for_letter_p (rtx value, int c)
639 /* The floating point zero constant. */
640 return (GET_MODE_CLASS (GET_MODE (value)) == MODE_FLOAT
641 && value == CONST0_RTX (GET_MODE (value)));
644 /* A valid operand of a ZAP insn. */
645 return (GET_MODE (value) == VOIDmode
646 && zap_mask (CONST_DOUBLE_LOW (value))
647 && zap_mask (CONST_DOUBLE_HIGH (value)));
654 /* Implements CONST_DOUBLE_OK_FOR_LETTER_P. Return true if VALUE
658 alpha_extra_constraint (rtx value, int c)
663 return normal_memory_operand (value, VOIDmode);
665 return direct_call_operand (value, Pmode);
667 return (GET_CODE (value) == CONST_INT
668 && (unsigned HOST_WIDE_INT) INTVAL (value) < 64);
670 return GET_CODE (value) == HIGH;
672 return TARGET_ABI_UNICOSMK && symbolic_operand (value, VOIDmode);
674 return (GET_CODE (value) == CONST_VECTOR
675 && value == CONST0_RTX (GET_MODE (value)));
681 /* The scalar modes supported differs from the default check-what-c-supports
682 version in that sometimes TFmode is available even when long double
683 indicates only DFmode. On unicosmk, we have the situation that HImode
684 doesn't map to any C type, but of course we still support that. */
687 alpha_scalar_mode_supported_p (enum machine_mode mode)
695 case TImode: /* via optabs.c */
703 return TARGET_HAS_XFLOATING_LIBS;
710 /* Alpha implements a couple of integer vector mode operations when
711 TARGET_MAX is enabled. We do not check TARGET_MAX here, however,
712 which allows the vectorizer to operate on e.g. move instructions,
713 or when expand_vector_operations can do something useful. */
716 alpha_vector_mode_supported_p (enum machine_mode mode)
718 return mode == V8QImode || mode == V4HImode || mode == V2SImode;
721 /* Return 1 if this function can directly return via $26. */
726 return (! TARGET_ABI_OPEN_VMS && ! TARGET_ABI_UNICOSMK
728 && alpha_sa_size () == 0
729 && get_frame_size () == 0
730 && current_function_outgoing_args_size == 0
731 && current_function_pretend_args_size == 0);
734 /* Return the ADDR_VEC associated with a tablejump insn. */
737 alpha_tablejump_addr_vec (rtx insn)
741 tmp = JUMP_LABEL (insn);
744 tmp = NEXT_INSN (tmp);
747 if (GET_CODE (tmp) == JUMP_INSN
748 && GET_CODE (PATTERN (tmp)) == ADDR_DIFF_VEC)
749 return PATTERN (tmp);
753 /* Return the label of the predicted edge, or CONST0_RTX if we don't know. */
756 alpha_tablejump_best_label (rtx insn)
758 rtx jump_table = alpha_tablejump_addr_vec (insn);
759 rtx best_label = NULL_RTX;
761 /* ??? Once the CFG doesn't keep getting completely rebuilt, look
762 there for edge frequency counts from profile data. */
766 int n_labels = XVECLEN (jump_table, 1);
770 for (i = 0; i < n_labels; i++)
774 for (j = i + 1; j < n_labels; j++)
775 if (XEXP (XVECEXP (jump_table, 1, i), 0)
776 == XEXP (XVECEXP (jump_table, 1, j), 0))
779 if (count > best_count)
780 best_count = count, best_label = XVECEXP (jump_table, 1, i);
784 return best_label ? best_label : const0_rtx;
787 /* Return the TLS model to use for SYMBOL. */
789 static enum tls_model
790 tls_symbolic_operand_type (rtx symbol)
792 enum tls_model model;
794 if (GET_CODE (symbol) != SYMBOL_REF)
796 model = SYMBOL_REF_TLS_MODEL (symbol);
798 /* Local-exec with a 64-bit size is the same code as initial-exec. */
799 if (model == TLS_MODEL_LOCAL_EXEC && alpha_tls_size == 64)
800 model = TLS_MODEL_INITIAL_EXEC;
805 /* Return true if the function DECL will share the same GP as any
806 function in the current unit of translation. */
809 decl_has_samegp (tree decl)
811 /* Functions that are not local can be overridden, and thus may
812 not share the same gp. */
813 if (!(*targetm.binds_local_p) (decl))
816 /* If -msmall-data is in effect, assume that there is only one GP
817 for the module, and so any local symbol has this property. We
818 need explicit relocations to be able to enforce this for symbols
819 not defined in this unit of translation, however. */
820 if (TARGET_EXPLICIT_RELOCS && TARGET_SMALL_DATA)
823 /* Functions that are not external are defined in this UoT. */
824 /* ??? Irritatingly, static functions not yet emitted are still
825 marked "external". Apply this to non-static functions only. */
826 return !TREE_PUBLIC (decl) || !DECL_EXTERNAL (decl);
829 /* Return true if EXP should be placed in the small data section. */
832 alpha_in_small_data_p (tree exp)
834 /* We want to merge strings, so we never consider them small data. */
835 if (TREE_CODE (exp) == STRING_CST)
838 /* Functions are never in the small data area. Duh. */
839 if (TREE_CODE (exp) == FUNCTION_DECL)
842 if (TREE_CODE (exp) == VAR_DECL && DECL_SECTION_NAME (exp))
844 const char *section = TREE_STRING_POINTER (DECL_SECTION_NAME (exp));
845 if (strcmp (section, ".sdata") == 0
846 || strcmp (section, ".sbss") == 0)
851 HOST_WIDE_INT size = int_size_in_bytes (TREE_TYPE (exp));
853 /* If this is an incomplete type with size 0, then we can't put it
854 in sdata because it might be too big when completed. */
855 if (size > 0 && (unsigned HOST_WIDE_INT) size <= g_switch_value)
862 #if TARGET_ABI_OPEN_VMS
864 alpha_linkage_symbol_p (const char *symname)
866 int symlen = strlen (symname);
869 return strcmp (&symname [symlen - 4], "..lk") == 0;
874 #define LINKAGE_SYMBOL_REF_P(X) \
875 ((GET_CODE (X) == SYMBOL_REF \
876 && alpha_linkage_symbol_p (XSTR (X, 0))) \
877 || (GET_CODE (X) == CONST \
878 && GET_CODE (XEXP (X, 0)) == PLUS \
879 && GET_CODE (XEXP (XEXP (X, 0), 0)) == SYMBOL_REF \
880 && alpha_linkage_symbol_p (XSTR (XEXP (XEXP (X, 0), 0), 0))))
883 /* legitimate_address_p recognizes an RTL expression that is a valid
884 memory address for an instruction. The MODE argument is the
885 machine mode for the MEM expression that wants to use this address.
887 For Alpha, we have either a constant address or the sum of a
888 register and a constant address, or just a register. For DImode,
889 any of those forms can be surrounded with an AND that clear the
890 low-order three bits; this is an "unaligned" access. */
893 alpha_legitimate_address_p (enum machine_mode mode, rtx x, int strict)
895 /* If this is an ldq_u type address, discard the outer AND. */
897 && GET_CODE (x) == AND
898 && GET_CODE (XEXP (x, 1)) == CONST_INT
899 && INTVAL (XEXP (x, 1)) == -8)
902 /* Discard non-paradoxical subregs. */
903 if (GET_CODE (x) == SUBREG
904 && (GET_MODE_SIZE (GET_MODE (x))
905 < GET_MODE_SIZE (GET_MODE (SUBREG_REG (x)))))
908 /* Unadorned general registers are valid. */
911 ? STRICT_REG_OK_FOR_BASE_P (x)
912 : NONSTRICT_REG_OK_FOR_BASE_P (x)))
915 /* Constant addresses (i.e. +/- 32k) are valid. */
916 if (CONSTANT_ADDRESS_P (x))
919 #if TARGET_ABI_OPEN_VMS
920 if (LINKAGE_SYMBOL_REF_P (x))
924 /* Register plus a small constant offset is valid. */
925 if (GET_CODE (x) == PLUS)
927 rtx ofs = XEXP (x, 1);
930 /* Discard non-paradoxical subregs. */
931 if (GET_CODE (x) == SUBREG
932 && (GET_MODE_SIZE (GET_MODE (x))
933 < GET_MODE_SIZE (GET_MODE (SUBREG_REG (x)))))
939 && NONSTRICT_REG_OK_FP_BASE_P (x)
940 && GET_CODE (ofs) == CONST_INT)
943 ? STRICT_REG_OK_FOR_BASE_P (x)
944 : NONSTRICT_REG_OK_FOR_BASE_P (x))
945 && CONSTANT_ADDRESS_P (ofs))
950 /* If we're managing explicit relocations, LO_SUM is valid, as
951 are small data symbols. */
952 else if (TARGET_EXPLICIT_RELOCS)
954 if (small_symbolic_operand (x, Pmode))
957 if (GET_CODE (x) == LO_SUM)
959 rtx ofs = XEXP (x, 1);
962 /* Discard non-paradoxical subregs. */
963 if (GET_CODE (x) == SUBREG
964 && (GET_MODE_SIZE (GET_MODE (x))
965 < GET_MODE_SIZE (GET_MODE (SUBREG_REG (x)))))
968 /* Must have a valid base register. */
971 ? STRICT_REG_OK_FOR_BASE_P (x)
972 : NONSTRICT_REG_OK_FOR_BASE_P (x))))
975 /* The symbol must be local. */
976 if (local_symbolic_operand (ofs, Pmode)
977 || dtp32_symbolic_operand (ofs, Pmode)
978 || tp32_symbolic_operand (ofs, Pmode))
986 /* Build the SYMBOL_REF for __tls_get_addr. */
988 static GTY(()) rtx tls_get_addr_libfunc;
991 get_tls_get_addr (void)
993 if (!tls_get_addr_libfunc)
994 tls_get_addr_libfunc = init_one_libfunc ("__tls_get_addr");
995 return tls_get_addr_libfunc;
998 /* Try machine-dependent ways of modifying an illegitimate address
999 to be legitimate. If we find one, return the new, valid address. */
1002 alpha_legitimize_address (rtx x, rtx scratch,
1003 enum machine_mode mode ATTRIBUTE_UNUSED)
1005 HOST_WIDE_INT addend;
1007 /* If the address is (plus reg const_int) and the CONST_INT is not a
1008 valid offset, compute the high part of the constant and add it to
1009 the register. Then our address is (plus temp low-part-const). */
1010 if (GET_CODE (x) == PLUS
1011 && GET_CODE (XEXP (x, 0)) == REG
1012 && GET_CODE (XEXP (x, 1)) == CONST_INT
1013 && ! CONSTANT_ADDRESS_P (XEXP (x, 1)))
1015 addend = INTVAL (XEXP (x, 1));
1020 /* If the address is (const (plus FOO const_int)), find the low-order
1021 part of the CONST_INT. Then load FOO plus any high-order part of the
1022 CONST_INT into a register. Our address is (plus reg low-part-const).
1023 This is done to reduce the number of GOT entries. */
1025 && GET_CODE (x) == CONST
1026 && GET_CODE (XEXP (x, 0)) == PLUS
1027 && GET_CODE (XEXP (XEXP (x, 0), 1)) == CONST_INT)
1029 addend = INTVAL (XEXP (XEXP (x, 0), 1));
1030 x = force_reg (Pmode, XEXP (XEXP (x, 0), 0));
1034 /* If we have a (plus reg const), emit the load as in (2), then add
1035 the two registers, and finally generate (plus reg low-part-const) as
1038 && GET_CODE (x) == PLUS
1039 && GET_CODE (XEXP (x, 0)) == REG
1040 && GET_CODE (XEXP (x, 1)) == CONST
1041 && GET_CODE (XEXP (XEXP (x, 1), 0)) == PLUS
1042 && GET_CODE (XEXP (XEXP (XEXP (x, 1), 0), 1)) == CONST_INT)
1044 addend = INTVAL (XEXP (XEXP (XEXP (x, 1), 0), 1));
1045 x = expand_simple_binop (Pmode, PLUS, XEXP (x, 0),
1046 XEXP (XEXP (XEXP (x, 1), 0), 0),
1047 NULL_RTX, 1, OPTAB_LIB_WIDEN);
1051 /* If this is a local symbol, split the address into HIGH/LO_SUM parts. */
1052 if (TARGET_EXPLICIT_RELOCS && symbolic_operand (x, Pmode))
1054 rtx r0, r16, eqv, tga, tp, insn, dest, seq;
1056 switch (tls_symbolic_operand_type (x))
1058 case TLS_MODEL_NONE:
1061 case TLS_MODEL_GLOBAL_DYNAMIC:
1064 r0 = gen_rtx_REG (Pmode, 0);
1065 r16 = gen_rtx_REG (Pmode, 16);
1066 tga = get_tls_get_addr ();
1067 dest = gen_reg_rtx (Pmode);
1068 seq = GEN_INT (alpha_next_sequence_number++);
1070 emit_insn (gen_movdi_er_tlsgd (r16, pic_offset_table_rtx, x, seq));
1071 insn = gen_call_value_osf_tlsgd (r0, tga, seq);
1072 insn = emit_call_insn (insn);
1073 CONST_OR_PURE_CALL_P (insn) = 1;
1074 use_reg (&CALL_INSN_FUNCTION_USAGE (insn), r16);
1076 insn = get_insns ();
1079 emit_libcall_block (insn, dest, r0, x);
1082 case TLS_MODEL_LOCAL_DYNAMIC:
1085 r0 = gen_rtx_REG (Pmode, 0);
1086 r16 = gen_rtx_REG (Pmode, 16);
1087 tga = get_tls_get_addr ();
1088 scratch = gen_reg_rtx (Pmode);
1089 seq = GEN_INT (alpha_next_sequence_number++);
1091 emit_insn (gen_movdi_er_tlsldm (r16, pic_offset_table_rtx, seq));
1092 insn = gen_call_value_osf_tlsldm (r0, tga, seq);
1093 insn = emit_call_insn (insn);
1094 CONST_OR_PURE_CALL_P (insn) = 1;
1095 use_reg (&CALL_INSN_FUNCTION_USAGE (insn), r16);
1097 insn = get_insns ();
1100 eqv = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, const0_rtx),
1101 UNSPEC_TLSLDM_CALL);
1102 emit_libcall_block (insn, scratch, r0, eqv);
1104 eqv = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, x), UNSPEC_DTPREL);
1105 eqv = gen_rtx_CONST (Pmode, eqv);
1107 if (alpha_tls_size == 64)
1109 dest = gen_reg_rtx (Pmode);
1110 emit_insn (gen_rtx_SET (VOIDmode, dest, eqv));
1111 emit_insn (gen_adddi3 (dest, dest, scratch));
1114 if (alpha_tls_size == 32)
1116 insn = gen_rtx_HIGH (Pmode, eqv);
1117 insn = gen_rtx_PLUS (Pmode, scratch, insn);
1118 scratch = gen_reg_rtx (Pmode);
1119 emit_insn (gen_rtx_SET (VOIDmode, scratch, insn));
1121 return gen_rtx_LO_SUM (Pmode, scratch, eqv);
1123 case TLS_MODEL_INITIAL_EXEC:
1124 eqv = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, x), UNSPEC_TPREL);
1125 eqv = gen_rtx_CONST (Pmode, eqv);
1126 tp = gen_reg_rtx (Pmode);
1127 scratch = gen_reg_rtx (Pmode);
1128 dest = gen_reg_rtx (Pmode);
1130 emit_insn (gen_load_tp (tp));
1131 emit_insn (gen_rtx_SET (VOIDmode, scratch, eqv));
1132 emit_insn (gen_adddi3 (dest, tp, scratch));
1135 case TLS_MODEL_LOCAL_EXEC:
1136 eqv = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, x), UNSPEC_TPREL);
1137 eqv = gen_rtx_CONST (Pmode, eqv);
1138 tp = gen_reg_rtx (Pmode);
1140 emit_insn (gen_load_tp (tp));
1141 if (alpha_tls_size == 32)
1143 insn = gen_rtx_HIGH (Pmode, eqv);
1144 insn = gen_rtx_PLUS (Pmode, tp, insn);
1145 tp = gen_reg_rtx (Pmode);
1146 emit_insn (gen_rtx_SET (VOIDmode, tp, insn));
1148 return gen_rtx_LO_SUM (Pmode, tp, eqv);
1154 if (local_symbolic_operand (x, Pmode))
1156 if (small_symbolic_operand (x, Pmode))
1160 if (!no_new_pseudos)
1161 scratch = gen_reg_rtx (Pmode);
1162 emit_insn (gen_rtx_SET (VOIDmode, scratch,
1163 gen_rtx_HIGH (Pmode, x)));
1164 return gen_rtx_LO_SUM (Pmode, scratch, x);
1173 HOST_WIDE_INT low, high;
1175 low = ((addend & 0xffff) ^ 0x8000) - 0x8000;
1177 high = ((addend & 0xffffffff) ^ 0x80000000) - 0x80000000;
1181 x = expand_simple_binop (Pmode, PLUS, x, GEN_INT (addend),
1182 (no_new_pseudos ? scratch : NULL_RTX),
1183 1, OPTAB_LIB_WIDEN);
1185 x = expand_simple_binop (Pmode, PLUS, x, GEN_INT (high),
1186 (no_new_pseudos ? scratch : NULL_RTX),
1187 1, OPTAB_LIB_WIDEN);
1189 return plus_constant (x, low);
1193 /* Primarily this is required for TLS symbols, but given that our move
1194 patterns *ought* to be able to handle any symbol at any time, we
1195 should never be spilling symbolic operands to the constant pool, ever. */
1198 alpha_cannot_force_const_mem (rtx x)
1200 enum rtx_code code = GET_CODE (x);
1201 return code == SYMBOL_REF || code == LABEL_REF || code == CONST;
1204 /* We do not allow indirect calls to be optimized into sibling calls, nor
1205 can we allow a call to a function with a different GP to be optimized
1209 alpha_function_ok_for_sibcall (tree decl, tree exp ATTRIBUTE_UNUSED)
1211 /* Can't do indirect tail calls, since we don't know if the target
1212 uses the same GP. */
1216 /* Otherwise, we can make a tail call if the target function shares
1218 return decl_has_samegp (decl);
1222 some_small_symbolic_operand_int (rtx *px, void *data ATTRIBUTE_UNUSED)
1226 /* Don't re-split. */
1227 if (GET_CODE (x) == LO_SUM)
1230 return small_symbolic_operand (x, Pmode) != 0;
1234 split_small_symbolic_operand_1 (rtx *px, void *data ATTRIBUTE_UNUSED)
1238 /* Don't re-split. */
1239 if (GET_CODE (x) == LO_SUM)
1242 if (small_symbolic_operand (x, Pmode))
1244 x = gen_rtx_LO_SUM (Pmode, pic_offset_table_rtx, x);
1253 split_small_symbolic_operand (rtx x)
1256 for_each_rtx (&x, split_small_symbolic_operand_1, NULL);
1260 /* Indicate that INSN cannot be duplicated. This is true for any insn
1261 that we've marked with gpdisp relocs, since those have to stay in
1262 1-1 correspondence with one another.
1264 Technically we could copy them if we could set up a mapping from one
1265 sequence number to another, across the set of insns to be duplicated.
1266 This seems overly complicated and error-prone since interblock motion
1267 from sched-ebb could move one of the pair of insns to a different block.
1269 Also cannot allow jsr insns to be duplicated. If they throw exceptions,
1270 then they'll be in a different block from their ldgp. Which could lead
1271 the bb reorder code to think that it would be ok to copy just the block
1272 containing the call and branch to the block containing the ldgp. */
1275 alpha_cannot_copy_insn_p (rtx insn)
1277 if (!reload_completed || !TARGET_EXPLICIT_RELOCS)
1279 if (recog_memoized (insn) >= 0)
1280 return get_attr_cannot_copy (insn);
1286 /* Try a machine-dependent way of reloading an illegitimate address
1287 operand. If we find one, push the reload and return the new rtx. */
1290 alpha_legitimize_reload_address (rtx x,
1291 enum machine_mode mode ATTRIBUTE_UNUSED,
1292 int opnum, int type,
1293 int ind_levels ATTRIBUTE_UNUSED)
1295 /* We must recognize output that we have already generated ourselves. */
1296 if (GET_CODE (x) == PLUS
1297 && GET_CODE (XEXP (x, 0)) == PLUS
1298 && GET_CODE (XEXP (XEXP (x, 0), 0)) == REG
1299 && GET_CODE (XEXP (XEXP (x, 0), 1)) == CONST_INT
1300 && GET_CODE (XEXP (x, 1)) == CONST_INT)
1302 push_reload (XEXP (x, 0), NULL_RTX, &XEXP (x, 0), NULL,
1303 BASE_REG_CLASS, GET_MODE (x), VOIDmode, 0, 0,
1308 /* We wish to handle large displacements off a base register by
1309 splitting the addend across an ldah and the mem insn. This
1310 cuts number of extra insns needed from 3 to 1. */
1311 if (GET_CODE (x) == PLUS
1312 && GET_CODE (XEXP (x, 0)) == REG
1313 && REGNO (XEXP (x, 0)) < FIRST_PSEUDO_REGISTER
1314 && REGNO_OK_FOR_BASE_P (REGNO (XEXP (x, 0)))
1315 && GET_CODE (XEXP (x, 1)) == CONST_INT)
1317 HOST_WIDE_INT val = INTVAL (XEXP (x, 1));
1318 HOST_WIDE_INT low = ((val & 0xffff) ^ 0x8000) - 0x8000;
1320 = (((val - low) & 0xffffffff) ^ 0x80000000) - 0x80000000;
1322 /* Check for 32-bit overflow. */
1323 if (high + low != val)
1326 /* Reload the high part into a base reg; leave the low part
1327 in the mem directly. */
1328 x = gen_rtx_PLUS (GET_MODE (x),
1329 gen_rtx_PLUS (GET_MODE (x), XEXP (x, 0),
1333 push_reload (XEXP (x, 0), NULL_RTX, &XEXP (x, 0), NULL,
1334 BASE_REG_CLASS, GET_MODE (x), VOIDmode, 0, 0,
1342 /* Compute a (partial) cost for rtx X. Return true if the complete
1343 cost has been computed, and false if subexpressions should be
1344 scanned. In either case, *TOTAL contains the cost result. */
1347 alpha_rtx_costs (rtx x, int code, int outer_code, int *total)
1349 enum machine_mode mode = GET_MODE (x);
1350 bool float_mode_p = FLOAT_MODE_P (mode);
1351 const struct alpha_rtx_cost_data *cost_data;
1354 cost_data = &alpha_rtx_cost_size;
1356 cost_data = &alpha_rtx_cost_data[alpha_tune];
1361 /* If this is an 8-bit constant, return zero since it can be used
1362 nearly anywhere with no cost. If it is a valid operand for an
1363 ADD or AND, likewise return 0 if we know it will be used in that
1364 context. Otherwise, return 2 since it might be used there later.
1365 All other constants take at least two insns. */
1366 if (INTVAL (x) >= 0 && INTVAL (x) < 256)
1374 if (x == CONST0_RTX (mode))
1376 else if ((outer_code == PLUS && add_operand (x, VOIDmode))
1377 || (outer_code == AND && and_operand (x, VOIDmode)))
1379 else if (add_operand (x, VOIDmode) || and_operand (x, VOIDmode))
1382 *total = COSTS_N_INSNS (2);
1388 if (TARGET_EXPLICIT_RELOCS && small_symbolic_operand (x, VOIDmode))
1389 *total = COSTS_N_INSNS (outer_code != MEM);
1390 else if (TARGET_EXPLICIT_RELOCS && local_symbolic_operand (x, VOIDmode))
1391 *total = COSTS_N_INSNS (1 + (outer_code != MEM));
1392 else if (tls_symbolic_operand_type (x))
1393 /* Estimate of cost for call_pal rduniq. */
1394 /* ??? How many insns do we emit here? More than one... */
1395 *total = COSTS_N_INSNS (15);
1397 /* Otherwise we do a load from the GOT. */
1398 *total = COSTS_N_INSNS (optimize_size ? 1 : alpha_memory_latency);
1402 /* This is effectively an add_operand. */
1409 *total = cost_data->fp_add;
1410 else if (GET_CODE (XEXP (x, 0)) == MULT
1411 && const48_operand (XEXP (XEXP (x, 0), 1), VOIDmode))
1413 *total = (rtx_cost (XEXP (XEXP (x, 0), 0), outer_code)
1414 + rtx_cost (XEXP (x, 1), outer_code) + COSTS_N_INSNS (1));
1421 *total = cost_data->fp_mult;
1422 else if (mode == DImode)
1423 *total = cost_data->int_mult_di;
1425 *total = cost_data->int_mult_si;
1429 if (GET_CODE (XEXP (x, 1)) == CONST_INT
1430 && INTVAL (XEXP (x, 1)) <= 3)
1432 *total = COSTS_N_INSNS (1);
1439 *total = cost_data->int_shift;
1444 *total = cost_data->fp_add;
1446 *total = cost_data->int_cmov;
1454 *total = cost_data->int_div;
1455 else if (mode == SFmode)
1456 *total = cost_data->fp_div_sf;
1458 *total = cost_data->fp_div_df;
1462 *total = COSTS_N_INSNS (optimize_size ? 1 : alpha_memory_latency);
1468 *total = COSTS_N_INSNS (1);
1476 *total = COSTS_N_INSNS (1) + cost_data->int_cmov;
1482 case UNSIGNED_FLOAT:
1485 case FLOAT_TRUNCATE:
1486 *total = cost_data->fp_add;
1490 if (GET_CODE (XEXP (x, 0)) == MEM)
1493 *total = cost_data->fp_add;
1501 /* REF is an alignable memory location. Place an aligned SImode
1502 reference into *PALIGNED_MEM and the number of bits to shift into
1503 *PBITNUM. SCRATCH is a free register for use in reloading out
1504 of range stack slots. */
1507 get_aligned_mem (rtx ref, rtx *paligned_mem, rtx *pbitnum)
1510 HOST_WIDE_INT disp, offset;
1512 gcc_assert (GET_CODE (ref) == MEM);
1514 if (reload_in_progress
1515 && ! memory_address_p (GET_MODE (ref), XEXP (ref, 0)))
1517 base = find_replacement (&XEXP (ref, 0));
1518 gcc_assert (memory_address_p (GET_MODE (ref), base));
1521 base = XEXP (ref, 0);
1523 if (GET_CODE (base) == PLUS)
1524 disp = INTVAL (XEXP (base, 1)), base = XEXP (base, 0);
1528 /* Find the byte offset within an aligned word. If the memory itself is
1529 claimed to be aligned, believe it. Otherwise, aligned_memory_operand
1530 will have examined the base register and determined it is aligned, and
1531 thus displacements from it are naturally alignable. */
1532 if (MEM_ALIGN (ref) >= 32)
1537 /* Access the entire aligned word. */
1538 *paligned_mem = widen_memory_access (ref, SImode, -offset);
1540 /* Convert the byte offset within the word to a bit offset. */
1541 if (WORDS_BIG_ENDIAN)
1542 offset = 32 - (GET_MODE_BITSIZE (GET_MODE (ref)) + offset * 8);
1545 *pbitnum = GEN_INT (offset);
1548 /* Similar, but just get the address. Handle the two reload cases.
1549 Add EXTRA_OFFSET to the address we return. */
1552 get_unaligned_address (rtx ref, int extra_offset)
1555 HOST_WIDE_INT offset = 0;
1557 gcc_assert (GET_CODE (ref) == MEM);
1559 if (reload_in_progress
1560 && ! memory_address_p (GET_MODE (ref), XEXP (ref, 0)))
1562 base = find_replacement (&XEXP (ref, 0));
1564 gcc_assert (memory_address_p (GET_MODE (ref), base));
1567 base = XEXP (ref, 0);
1569 if (GET_CODE (base) == PLUS)
1570 offset += INTVAL (XEXP (base, 1)), base = XEXP (base, 0);
1572 return plus_constant (base, offset + extra_offset);
1575 /* On the Alpha, all (non-symbolic) constants except zero go into
1576 a floating-point register via memory. Note that we cannot
1577 return anything that is not a subset of CLASS, and that some
1578 symbolic constants cannot be dropped to memory. */
1581 alpha_preferred_reload_class(rtx x, enum reg_class class)
1583 /* Zero is present in any register class. */
1584 if (x == CONST0_RTX (GET_MODE (x)))
1587 /* These sorts of constants we can easily drop to memory. */
1588 if (GET_CODE (x) == CONST_INT
1589 || GET_CODE (x) == CONST_DOUBLE
1590 || GET_CODE (x) == CONST_VECTOR)
1592 if (class == FLOAT_REGS)
1594 if (class == ALL_REGS)
1595 return GENERAL_REGS;
1599 /* All other kinds of constants should not (and in the case of HIGH
1600 cannot) be dropped to memory -- instead we use a GENERAL_REGS
1601 secondary reload. */
1603 return (class == ALL_REGS ? GENERAL_REGS : class);
1608 /* Loading and storing HImode or QImode values to and from memory
1609 usually requires a scratch register. The exceptions are loading
1610 QImode and HImode from an aligned address to a general register
1611 unless byte instructions are permitted.
1613 We also cannot load an unaligned address or a paradoxical SUBREG
1614 into an FP register.
1616 We also cannot do integral arithmetic into FP regs, as might result
1617 from register elimination into a DImode fp register. */
1620 alpha_secondary_reload_class (enum reg_class class, enum machine_mode mode,
1623 if ((mode == QImode || mode == HImode) && ! TARGET_BWX)
1625 if (GET_CODE (x) == MEM
1626 || (GET_CODE (x) == REG && REGNO (x) >= FIRST_PSEUDO_REGISTER)
1627 || (GET_CODE (x) == SUBREG
1628 && (GET_CODE (SUBREG_REG (x)) == MEM
1629 || (GET_CODE (SUBREG_REG (x)) == REG
1630 && REGNO (SUBREG_REG (x)) >= FIRST_PSEUDO_REGISTER))))
1632 if (!in || !aligned_memory_operand(x, mode))
1633 return GENERAL_REGS;
1637 if (class == FLOAT_REGS)
1639 if (GET_CODE (x) == MEM && GET_CODE (XEXP (x, 0)) == AND)
1640 return GENERAL_REGS;
1642 if (GET_CODE (x) == SUBREG
1643 && (GET_MODE_SIZE (GET_MODE (x))
1644 > GET_MODE_SIZE (GET_MODE (SUBREG_REG (x)))))
1645 return GENERAL_REGS;
1647 if (in && INTEGRAL_MODE_P (mode)
1648 && ! (memory_operand (x, mode) || x == const0_rtx))
1649 return GENERAL_REGS;
1655 /* Subfunction of the following function. Update the flags of any MEM
1656 found in part of X. */
1659 alpha_set_memflags_1 (rtx *xp, void *data)
1661 rtx x = *xp, orig = (rtx) data;
1663 if (GET_CODE (x) != MEM)
1666 MEM_VOLATILE_P (x) = MEM_VOLATILE_P (orig);
1667 MEM_IN_STRUCT_P (x) = MEM_IN_STRUCT_P (orig);
1668 MEM_SCALAR_P (x) = MEM_SCALAR_P (orig);
1669 MEM_NOTRAP_P (x) = MEM_NOTRAP_P (orig);
1670 MEM_READONLY_P (x) = MEM_READONLY_P (orig);
1672 /* Sadly, we cannot use alias sets because the extra aliasing
1673 produced by the AND interferes. Given that two-byte quantities
1674 are the only thing we would be able to differentiate anyway,
1675 there does not seem to be any point in convoluting the early
1676 out of the alias check. */
1681 /* Given INSN, which is an INSN list or the PATTERN of a single insn
1682 generated to perform a memory operation, look for any MEMs in either
1683 a SET_DEST or a SET_SRC and copy the in-struct, unchanging, and
1684 volatile flags from REF into each of the MEMs found. If REF is not
1685 a MEM, don't do anything. */
1688 alpha_set_memflags (rtx insn, rtx ref)
1692 if (GET_CODE (ref) != MEM)
1695 /* This is only called from alpha.md, after having had something
1696 generated from one of the insn patterns. So if everything is
1697 zero, the pattern is already up-to-date. */
1698 if (!MEM_VOLATILE_P (ref)
1699 && !MEM_IN_STRUCT_P (ref)
1700 && !MEM_SCALAR_P (ref)
1701 && !MEM_NOTRAP_P (ref)
1702 && !MEM_READONLY_P (ref))
1706 base_ptr = &PATTERN (insn);
1709 for_each_rtx (base_ptr, alpha_set_memflags_1, (void *) ref);
1712 static rtx alpha_emit_set_const (rtx, enum machine_mode, HOST_WIDE_INT,
1715 /* Internal routine for alpha_emit_set_const to check for N or below insns.
1716 If NO_OUTPUT is true, then we only check to see if N insns are possible,
1717 and return pc_rtx if successful. */
1720 alpha_emit_set_const_1 (rtx target, enum machine_mode mode,
1721 HOST_WIDE_INT c, int n, bool no_output)
1725 /* Use a pseudo if highly optimizing and still generating RTL. */
1727 = (flag_expensive_optimizations && !no_new_pseudos ? 0 : target);
1730 /* If this is a sign-extended 32-bit constant, we can do this in at most
1731 three insns, so do it if we have enough insns left. We always have
1732 a sign-extended 32-bit constant when compiling on a narrow machine. */
1734 if (HOST_BITS_PER_WIDE_INT != 64
1735 || c >> 31 == -1 || c >> 31 == 0)
1737 HOST_WIDE_INT low = ((c & 0xffff) ^ 0x8000) - 0x8000;
1738 HOST_WIDE_INT tmp1 = c - low;
1739 HOST_WIDE_INT high = (((tmp1 >> 16) & 0xffff) ^ 0x8000) - 0x8000;
1740 HOST_WIDE_INT extra = 0;
1742 /* If HIGH will be interpreted as negative but the constant is
1743 positive, we must adjust it to do two ldha insns. */
1745 if ((high & 0x8000) != 0 && c >= 0)
1749 high = ((tmp1 >> 16) & 0xffff) - 2 * ((tmp1 >> 16) & 0x8000);
1752 if (c == low || (low == 0 && extra == 0))
1754 /* We used to use copy_to_suggested_reg (GEN_INT (c), target, mode)
1755 but that meant that we can't handle INT_MIN on 32-bit machines
1756 (like NT/Alpha), because we recurse indefinitely through
1757 emit_move_insn to gen_movdi. So instead, since we know exactly
1758 what we want, create it explicitly. */
1763 target = gen_reg_rtx (mode);
1764 emit_insn (gen_rtx_SET (VOIDmode, target, GEN_INT (c)));
1767 else if (n >= 2 + (extra != 0))
1773 emit_insn (gen_rtx_SET (VOIDmode, target, GEN_INT (high << 16)));
1777 temp = copy_to_suggested_reg (GEN_INT (high << 16),
1780 /* As of 2002-02-23, addsi3 is only available when not optimizing.
1781 This means that if we go through expand_binop, we'll try to
1782 generate extensions, etc, which will require new pseudos, which
1783 will fail during some split phases. The SImode add patterns
1784 still exist, but are not named. So build the insns by hand. */
1789 subtarget = gen_reg_rtx (mode);
1790 insn = gen_rtx_PLUS (mode, temp, GEN_INT (extra << 16));
1791 insn = gen_rtx_SET (VOIDmode, subtarget, insn);
1797 target = gen_reg_rtx (mode);
1798 insn = gen_rtx_PLUS (mode, temp, GEN_INT (low));
1799 insn = gen_rtx_SET (VOIDmode, target, insn);
1805 /* If we couldn't do it that way, try some other methods. But if we have
1806 no instructions left, don't bother. Likewise, if this is SImode and
1807 we can't make pseudos, we can't do anything since the expand_binop
1808 and expand_unop calls will widen and try to make pseudos. */
1810 if (n == 1 || (mode == SImode && no_new_pseudos))
1813 /* Next, see if we can load a related constant and then shift and possibly
1814 negate it to get the constant we want. Try this once each increasing
1815 numbers of insns. */
1817 for (i = 1; i < n; i++)
1819 /* First, see if minus some low bits, we've an easy load of
1822 new = ((c & 0xffff) ^ 0x8000) - 0x8000;
1825 temp = alpha_emit_set_const (subtarget, mode, c - new, i, no_output);
1830 return expand_binop (mode, add_optab, temp, GEN_INT (new),
1831 target, 0, OPTAB_WIDEN);
1835 /* Next try complementing. */
1836 temp = alpha_emit_set_const (subtarget, mode, ~c, i, no_output);
1841 return expand_unop (mode, one_cmpl_optab, temp, target, 0);
1844 /* Next try to form a constant and do a left shift. We can do this
1845 if some low-order bits are zero; the exact_log2 call below tells
1846 us that information. The bits we are shifting out could be any
1847 value, but here we'll just try the 0- and sign-extended forms of
1848 the constant. To try to increase the chance of having the same
1849 constant in more than one insn, start at the highest number of
1850 bits to shift, but try all possibilities in case a ZAPNOT will
1853 bits = exact_log2 (c & -c);
1855 for (; bits > 0; bits--)
1858 temp = alpha_emit_set_const (subtarget, mode, new, i, no_output);
1861 new = (unsigned HOST_WIDE_INT)c >> bits;
1862 temp = alpha_emit_set_const (subtarget, mode, new,
1869 return expand_binop (mode, ashl_optab, temp, GEN_INT (bits),
1870 target, 0, OPTAB_WIDEN);
1874 /* Now try high-order zero bits. Here we try the shifted-in bits as
1875 all zero and all ones. Be careful to avoid shifting outside the
1876 mode and to avoid shifting outside the host wide int size. */
1877 /* On narrow hosts, don't shift a 1 into the high bit, since we'll
1878 confuse the recursive call and set all of the high 32 bits. */
1880 bits = (MIN (HOST_BITS_PER_WIDE_INT, GET_MODE_SIZE (mode) * 8)
1881 - floor_log2 (c) - 1 - (HOST_BITS_PER_WIDE_INT < 64));
1883 for (; bits > 0; bits--)
1886 temp = alpha_emit_set_const (subtarget, mode, new, i, no_output);
1889 new = (c << bits) | (((HOST_WIDE_INT) 1 << bits) - 1);
1890 temp = alpha_emit_set_const (subtarget, mode, new,
1897 return expand_binop (mode, lshr_optab, temp, GEN_INT (bits),
1898 target, 1, OPTAB_WIDEN);
1902 /* Now try high-order 1 bits. We get that with a sign-extension.
1903 But one bit isn't enough here. Be careful to avoid shifting outside
1904 the mode and to avoid shifting outside the host wide int size. */
1906 bits = (MIN (HOST_BITS_PER_WIDE_INT, GET_MODE_SIZE (mode) * 8)
1907 - floor_log2 (~ c) - 2);
1909 for (; bits > 0; bits--)
1912 temp = alpha_emit_set_const (subtarget, mode, new, i, no_output);
1915 new = (c << bits) | (((HOST_WIDE_INT) 1 << bits) - 1);
1916 temp = alpha_emit_set_const (subtarget, mode, new,
1923 return expand_binop (mode, ashr_optab, temp, GEN_INT (bits),
1924 target, 0, OPTAB_WIDEN);
1929 #if HOST_BITS_PER_WIDE_INT == 64
1930 /* Finally, see if can load a value into the target that is the same as the
1931 constant except that all bytes that are 0 are changed to be 0xff. If we
1932 can, then we can do a ZAPNOT to obtain the desired constant. */
1935 for (i = 0; i < 64; i += 8)
1936 if ((new & ((HOST_WIDE_INT) 0xff << i)) == 0)
1937 new |= (HOST_WIDE_INT) 0xff << i;
1939 /* We are only called for SImode and DImode. If this is SImode, ensure that
1940 we are sign extended to a full word. */
1943 new = ((new & 0xffffffff) ^ 0x80000000) - 0x80000000;
1947 temp = alpha_emit_set_const (subtarget, mode, new, n - 1, no_output);
1952 return expand_binop (mode, and_optab, temp, GEN_INT (c | ~ new),
1953 target, 0, OPTAB_WIDEN);
1961 /* Try to output insns to set TARGET equal to the constant C if it can be
1962 done in less than N insns. Do all computations in MODE. Returns the place
1963 where the output has been placed if it can be done and the insns have been
1964 emitted. If it would take more than N insns, zero is returned and no
1965 insns and emitted. */
1968 alpha_emit_set_const (rtx target, enum machine_mode mode,
1969 HOST_WIDE_INT c, int n, bool no_output)
1971 enum machine_mode orig_mode = mode;
1972 rtx orig_target = target;
1976 /* If we can't make any pseudos, TARGET is an SImode hard register, we
1977 can't load this constant in one insn, do this in DImode. */
1978 if (no_new_pseudos && mode == SImode
1979 && GET_CODE (target) == REG && REGNO (target) < FIRST_PSEUDO_REGISTER)
1981 result = alpha_emit_set_const_1 (target, mode, c, 1, no_output);
1985 target = no_output ? NULL : gen_lowpart (DImode, target);
1988 else if (mode == V8QImode || mode == V4HImode || mode == V2SImode)
1990 target = no_output ? NULL : gen_lowpart (DImode, target);
1994 /* Try 1 insn, then 2, then up to N. */
1995 for (i = 1; i <= n; i++)
1997 result = alpha_emit_set_const_1 (target, mode, c, i, no_output);
2005 insn = get_last_insn ();
2006 set = single_set (insn);
2007 if (! CONSTANT_P (SET_SRC (set)))
2008 set_unique_reg_note (get_last_insn (), REG_EQUAL, GEN_INT (c));
2013 /* Allow for the case where we changed the mode of TARGET. */
2016 if (result == target)
2017 result = orig_target;
2018 else if (mode != orig_mode)
2019 result = gen_lowpart (orig_mode, result);
2025 /* Having failed to find a 3 insn sequence in alpha_emit_set_const,
2026 fall back to a straight forward decomposition. We do this to avoid
2027 exponential run times encountered when looking for longer sequences
2028 with alpha_emit_set_const. */
2031 alpha_emit_set_long_const (rtx target, HOST_WIDE_INT c1, HOST_WIDE_INT c2)
2033 HOST_WIDE_INT d1, d2, d3, d4;
2035 /* Decompose the entire word */
2036 #if HOST_BITS_PER_WIDE_INT >= 64
2037 gcc_assert (c2 == -(c1 < 0));
2038 d1 = ((c1 & 0xffff) ^ 0x8000) - 0x8000;
2040 d2 = ((c1 & 0xffffffff) ^ 0x80000000) - 0x80000000;
2041 c1 = (c1 - d2) >> 32;
2042 d3 = ((c1 & 0xffff) ^ 0x8000) - 0x8000;
2044 d4 = ((c1 & 0xffffffff) ^ 0x80000000) - 0x80000000;
2045 gcc_assert (c1 == d4);
2047 d1 = ((c1 & 0xffff) ^ 0x8000) - 0x8000;
2049 d2 = ((c1 & 0xffffffff) ^ 0x80000000) - 0x80000000;
2050 gcc_assert (c1 == d2);
2052 d3 = ((c2 & 0xffff) ^ 0x8000) - 0x8000;
2054 d4 = ((c2 & 0xffffffff) ^ 0x80000000) - 0x80000000;
2055 gcc_assert (c2 == d4);
2058 /* Construct the high word */
2061 emit_move_insn (target, GEN_INT (d4));
2063 emit_move_insn (target, gen_rtx_PLUS (DImode, target, GEN_INT (d3)));
2066 emit_move_insn (target, GEN_INT (d3));
2068 /* Shift it into place */
2069 emit_move_insn (target, gen_rtx_ASHIFT (DImode, target, GEN_INT (32)));
2071 /* Add in the low bits. */
2073 emit_move_insn (target, gen_rtx_PLUS (DImode, target, GEN_INT (d2)));
2075 emit_move_insn (target, gen_rtx_PLUS (DImode, target, GEN_INT (d1)));
2080 /* Given an integral CONST_INT, CONST_DOUBLE, or CONST_VECTOR, return
2084 alpha_extract_integer (rtx x, HOST_WIDE_INT *p0, HOST_WIDE_INT *p1)
2086 HOST_WIDE_INT i0, i1;
2088 if (GET_CODE (x) == CONST_VECTOR)
2089 x = simplify_subreg (DImode, x, GET_MODE (x), 0);
2092 if (GET_CODE (x) == CONST_INT)
2097 else if (HOST_BITS_PER_WIDE_INT >= 64)
2099 i0 = CONST_DOUBLE_LOW (x);
2104 i0 = CONST_DOUBLE_LOW (x);
2105 i1 = CONST_DOUBLE_HIGH (x);
2112 /* Implement LEGITIMATE_CONSTANT_P. This is all constants for which we
2113 are willing to load the value into a register via a move pattern.
2114 Normally this is all symbolic constants, integral constants that
2115 take three or fewer instructions, and floating-point zero. */
2118 alpha_legitimate_constant_p (rtx x)
2120 enum machine_mode mode = GET_MODE (x);
2121 HOST_WIDE_INT i0, i1;
2123 switch (GET_CODE (x))
2131 /* TLS symbols are never valid. */
2132 return SYMBOL_REF_TLS_MODEL (x) == 0;
2135 if (x == CONST0_RTX (mode))
2137 if (FLOAT_MODE_P (mode))
2142 if (x == CONST0_RTX (mode))
2144 if (GET_MODE_CLASS (mode) != MODE_VECTOR_INT)
2146 if (GET_MODE_SIZE (mode) != 8)
2152 if (TARGET_BUILD_CONSTANTS)
2154 alpha_extract_integer (x, &i0, &i1);
2155 if (HOST_BITS_PER_WIDE_INT >= 64 || i1 == (-i0 < 0))
2156 return alpha_emit_set_const_1 (x, mode, i0, 3, true) != NULL;
2164 /* Operand 1 is known to be a constant, and should require more than one
2165 instruction to load. Emit that multi-part load. */
2168 alpha_split_const_mov (enum machine_mode mode, rtx *operands)
2170 HOST_WIDE_INT i0, i1;
2171 rtx temp = NULL_RTX;
2173 alpha_extract_integer (operands[1], &i0, &i1);
2175 if (HOST_BITS_PER_WIDE_INT >= 64 || i1 == -(i0 < 0))
2176 temp = alpha_emit_set_const (operands[0], mode, i0, 3, false);
2178 if (!temp && TARGET_BUILD_CONSTANTS)
2179 temp = alpha_emit_set_long_const (operands[0], i0, i1);
2183 if (!rtx_equal_p (operands[0], temp))
2184 emit_move_insn (operands[0], temp);
2191 /* Expand a move instruction; return true if all work is done.
2192 We don't handle non-bwx subword loads here. */
2195 alpha_expand_mov (enum machine_mode mode, rtx *operands)
2197 /* If the output is not a register, the input must be. */
2198 if (GET_CODE (operands[0]) == MEM
2199 && ! reg_or_0_operand (operands[1], mode))
2200 operands[1] = force_reg (mode, operands[1]);
2202 /* Allow legitimize_address to perform some simplifications. */
2203 if (mode == Pmode && symbolic_operand (operands[1], mode))
2207 tmp = alpha_legitimize_address (operands[1], operands[0], mode);
2210 if (tmp == operands[0])
2217 /* Early out for non-constants and valid constants. */
2218 if (! CONSTANT_P (operands[1]) || input_operand (operands[1], mode))
2221 /* Split large integers. */
2222 if (GET_CODE (operands[1]) == CONST_INT
2223 || GET_CODE (operands[1]) == CONST_DOUBLE
2224 || GET_CODE (operands[1]) == CONST_VECTOR)
2226 if (alpha_split_const_mov (mode, operands))
2230 /* Otherwise we've nothing left but to drop the thing to memory. */
2231 operands[1] = force_const_mem (mode, operands[1]);
2232 if (reload_in_progress)
2234 emit_move_insn (operands[0], XEXP (operands[1], 0));
2235 operands[1] = copy_rtx (operands[1]);
2236 XEXP (operands[1], 0) = operands[0];
2239 operands[1] = validize_mem (operands[1]);
2243 /* Expand a non-bwx QImode or HImode move instruction;
2244 return true if all work is done. */
2247 alpha_expand_mov_nobwx (enum machine_mode mode, rtx *operands)
2249 /* If the output is not a register, the input must be. */
2250 if (GET_CODE (operands[0]) == MEM)
2251 operands[1] = force_reg (mode, operands[1]);
2253 /* Handle four memory cases, unaligned and aligned for either the input
2254 or the output. The only case where we can be called during reload is
2255 for aligned loads; all other cases require temporaries. */
2257 if (GET_CODE (operands[1]) == MEM
2258 || (GET_CODE (operands[1]) == SUBREG
2259 && GET_CODE (SUBREG_REG (operands[1])) == MEM)
2260 || (reload_in_progress && GET_CODE (operands[1]) == REG
2261 && REGNO (operands[1]) >= FIRST_PSEUDO_REGISTER)
2262 || (reload_in_progress && GET_CODE (operands[1]) == SUBREG
2263 && GET_CODE (SUBREG_REG (operands[1])) == REG
2264 && REGNO (SUBREG_REG (operands[1])) >= FIRST_PSEUDO_REGISTER))
2266 if (aligned_memory_operand (operands[1], mode))
2268 if (reload_in_progress)
2270 emit_insn ((mode == QImode
2271 ? gen_reload_inqi_help
2272 : gen_reload_inhi_help)
2273 (operands[0], operands[1],
2274 gen_rtx_REG (SImode, REGNO (operands[0]))));
2278 rtx aligned_mem, bitnum;
2279 rtx scratch = gen_reg_rtx (SImode);
2283 get_aligned_mem (operands[1], &aligned_mem, &bitnum);
2285 subtarget = operands[0];
2286 if (GET_CODE (subtarget) == REG)
2287 subtarget = gen_lowpart (DImode, subtarget), copyout = false;
2289 subtarget = gen_reg_rtx (DImode), copyout = true;
2291 emit_insn ((mode == QImode
2292 ? gen_aligned_loadqi
2293 : gen_aligned_loadhi)
2294 (subtarget, aligned_mem, bitnum, scratch));
2297 emit_move_insn (operands[0], gen_lowpart (mode, subtarget));
2302 /* Don't pass these as parameters since that makes the generated
2303 code depend on parameter evaluation order which will cause
2304 bootstrap failures. */
2306 rtx temp1, temp2, seq, subtarget;
2309 temp1 = gen_reg_rtx (DImode);
2310 temp2 = gen_reg_rtx (DImode);
2312 subtarget = operands[0];
2313 if (GET_CODE (subtarget) == REG)
2314 subtarget = gen_lowpart (DImode, subtarget), copyout = false;
2316 subtarget = gen_reg_rtx (DImode), copyout = true;
2318 seq = ((mode == QImode
2319 ? gen_unaligned_loadqi
2320 : gen_unaligned_loadhi)
2321 (subtarget, get_unaligned_address (operands[1], 0),
2323 alpha_set_memflags (seq, operands[1]);
2327 emit_move_insn (operands[0], gen_lowpart (mode, subtarget));
2332 if (GET_CODE (operands[0]) == MEM
2333 || (GET_CODE (operands[0]) == SUBREG
2334 && GET_CODE (SUBREG_REG (operands[0])) == MEM)
2335 || (reload_in_progress && GET_CODE (operands[0]) == REG
2336 && REGNO (operands[0]) >= FIRST_PSEUDO_REGISTER)
2337 || (reload_in_progress && GET_CODE (operands[0]) == SUBREG
2338 && GET_CODE (SUBREG_REG (operands[0])) == REG
2339 && REGNO (operands[0]) >= FIRST_PSEUDO_REGISTER))
2341 if (aligned_memory_operand (operands[0], mode))
2343 rtx aligned_mem, bitnum;
2344 rtx temp1 = gen_reg_rtx (SImode);
2345 rtx temp2 = gen_reg_rtx (SImode);
2347 get_aligned_mem (operands[0], &aligned_mem, &bitnum);
2349 emit_insn (gen_aligned_store (aligned_mem, operands[1], bitnum,
2354 rtx temp1 = gen_reg_rtx (DImode);
2355 rtx temp2 = gen_reg_rtx (DImode);
2356 rtx temp3 = gen_reg_rtx (DImode);
2357 rtx seq = ((mode == QImode
2358 ? gen_unaligned_storeqi
2359 : gen_unaligned_storehi)
2360 (get_unaligned_address (operands[0], 0),
2361 operands[1], temp1, temp2, temp3));
2363 alpha_set_memflags (seq, operands[0]);
2372 /* Implement the movmisalign patterns. One of the operands is a memory
2373 that is not naturally aligned. Emit instructions to load it. */
2376 alpha_expand_movmisalign (enum machine_mode mode, rtx *operands)
2378 /* Honor misaligned loads, for those we promised to do so. */
2379 if (MEM_P (operands[1]))
2383 if (register_operand (operands[0], mode))
2386 tmp = gen_reg_rtx (mode);
2388 alpha_expand_unaligned_load (tmp, operands[1], 8, 0, 0);
2389 if (tmp != operands[0])
2390 emit_move_insn (operands[0], tmp);
2392 else if (MEM_P (operands[0]))
2394 if (!reg_or_0_operand (operands[1], mode))
2395 operands[1] = force_reg (mode, operands[1]);
2396 alpha_expand_unaligned_store (operands[0], operands[1], 8, 0);
2402 /* Generate an unsigned DImode to FP conversion. This is the same code
2403 optabs would emit if we didn't have TFmode patterns.
2405 For SFmode, this is the only construction I've found that can pass
2406 gcc.c-torture/execute/ieee/rbug.c. No scenario that uses DFmode
2407 intermediates will work, because you'll get intermediate rounding
2408 that ruins the end result. Some of this could be fixed by turning
2409 on round-to-positive-infinity, but that requires diddling the fpsr,
2410 which kills performance. I tried turning this around and converting
2411 to a negative number, so that I could turn on /m, but either I did
2412 it wrong or there's something else cause I wound up with the exact
2413 same single-bit error. There is a branch-less form of this same code:
2424 fcmoveq $f10,$f11,$f0
2426 I'm not using it because it's the same number of instructions as
2427 this branch-full form, and it has more serialized long latency
2428 instructions on the critical path.
2430 For DFmode, we can avoid rounding errors by breaking up the word
2431 into two pieces, converting them separately, and adding them back:
2433 LC0: .long 0,0x5f800000
2438 cpyse $f11,$f31,$f10
2439 cpyse $f31,$f11,$f11
2447 This doesn't seem to be a clear-cut win over the optabs form.
2448 It probably all depends on the distribution of numbers being
2449 converted -- in the optabs form, all but high-bit-set has a
2450 much lower minimum execution time. */
2453 alpha_emit_floatuns (rtx operands[2])
2455 rtx neglab, donelab, i0, i1, f0, in, out;
2456 enum machine_mode mode;
2459 in = force_reg (DImode, operands[1]);
2460 mode = GET_MODE (out);
2461 neglab = gen_label_rtx ();
2462 donelab = gen_label_rtx ();
2463 i0 = gen_reg_rtx (DImode);
2464 i1 = gen_reg_rtx (DImode);
2465 f0 = gen_reg_rtx (mode);
2467 emit_cmp_and_jump_insns (in, const0_rtx, LT, const0_rtx, DImode, 0, neglab);
2469 emit_insn (gen_rtx_SET (VOIDmode, out, gen_rtx_FLOAT (mode, in)));
2470 emit_jump_insn (gen_jump (donelab));
2473 emit_label (neglab);
2475 emit_insn (gen_lshrdi3 (i0, in, const1_rtx));
2476 emit_insn (gen_anddi3 (i1, in, const1_rtx));
2477 emit_insn (gen_iordi3 (i0, i0, i1));
2478 emit_insn (gen_rtx_SET (VOIDmode, f0, gen_rtx_FLOAT (mode, i0)));
2479 emit_insn (gen_rtx_SET (VOIDmode, out, gen_rtx_PLUS (mode, f0, f0)));
2481 emit_label (donelab);
2484 /* Generate the comparison for a conditional branch. */
2487 alpha_emit_conditional_branch (enum rtx_code code)
2489 enum rtx_code cmp_code, branch_code;
2490 enum machine_mode cmp_mode, branch_mode = VOIDmode;
2491 rtx op0 = alpha_compare.op0, op1 = alpha_compare.op1;
2494 if (alpha_compare.fp_p && GET_MODE (op0) == TFmode)
2496 op0 = alpha_emit_xfloating_compare (&code, op0, op1);
2498 alpha_compare.fp_p = 0;
2501 /* The general case: fold the comparison code to the types of compares
2502 that we have, choosing the branch as necessary. */
2505 case EQ: case LE: case LT: case LEU: case LTU:
2507 /* We have these compares: */
2508 cmp_code = code, branch_code = NE;
2513 /* These must be reversed. */
2514 cmp_code = reverse_condition (code), branch_code = EQ;
2517 case GE: case GT: case GEU: case GTU:
2518 /* For FP, we swap them, for INT, we reverse them. */
2519 if (alpha_compare.fp_p)
2521 cmp_code = swap_condition (code);
2523 tem = op0, op0 = op1, op1 = tem;
2527 cmp_code = reverse_condition (code);
2536 if (alpha_compare.fp_p)
2539 if (flag_unsafe_math_optimizations)
2541 /* When we are not as concerned about non-finite values, and we
2542 are comparing against zero, we can branch directly. */
2543 if (op1 == CONST0_RTX (DFmode))
2544 cmp_code = UNKNOWN, branch_code = code;
2545 else if (op0 == CONST0_RTX (DFmode))
2547 /* Undo the swap we probably did just above. */
2548 tem = op0, op0 = op1, op1 = tem;
2549 branch_code = swap_condition (cmp_code);
2555 /* ??? We mark the branch mode to be CCmode to prevent the
2556 compare and branch from being combined, since the compare
2557 insn follows IEEE rules that the branch does not. */
2558 branch_mode = CCmode;
2565 /* The following optimizations are only for signed compares. */
2566 if (code != LEU && code != LTU && code != GEU && code != GTU)
2568 /* Whee. Compare and branch against 0 directly. */
2569 if (op1 == const0_rtx)
2570 cmp_code = UNKNOWN, branch_code = code;
2572 /* If the constants doesn't fit into an immediate, but can
2573 be generated by lda/ldah, we adjust the argument and
2574 compare against zero, so we can use beq/bne directly. */
2575 /* ??? Don't do this when comparing against symbols, otherwise
2576 we'll reduce (&x == 0x1234) to (&x-0x1234 == 0), which will
2577 be declared false out of hand (at least for non-weak). */
2578 else if (GET_CODE (op1) == CONST_INT
2579 && (code == EQ || code == NE)
2580 && !(symbolic_operand (op0, VOIDmode)
2581 || (GET_CODE (op0) == REG && REG_POINTER (op0))))
2583 HOST_WIDE_INT v = INTVAL (op1), n = -v;
2585 if (! CONST_OK_FOR_LETTER_P (v, 'I')
2586 && (CONST_OK_FOR_LETTER_P (n, 'K')
2587 || CONST_OK_FOR_LETTER_P (n, 'L')))
2589 cmp_code = PLUS, branch_code = code;
2595 if (!reg_or_0_operand (op0, DImode))
2596 op0 = force_reg (DImode, op0);
2597 if (cmp_code != PLUS && !reg_or_8bit_operand (op1, DImode))
2598 op1 = force_reg (DImode, op1);
2601 /* Emit an initial compare instruction, if necessary. */
2603 if (cmp_code != UNKNOWN)
2605 tem = gen_reg_rtx (cmp_mode);
2606 emit_move_insn (tem, gen_rtx_fmt_ee (cmp_code, cmp_mode, op0, op1));
2609 /* Zero the operands. */
2610 memset (&alpha_compare, 0, sizeof (alpha_compare));
2612 /* Return the branch comparison. */
2613 return gen_rtx_fmt_ee (branch_code, branch_mode, tem, CONST0_RTX (cmp_mode));
2616 /* Certain simplifications can be done to make invalid setcc operations
2617 valid. Return the final comparison, or NULL if we can't work. */
2620 alpha_emit_setcc (enum rtx_code code)
2622 enum rtx_code cmp_code;
2623 rtx op0 = alpha_compare.op0, op1 = alpha_compare.op1;
2624 int fp_p = alpha_compare.fp_p;
2627 /* Zero the operands. */
2628 memset (&alpha_compare, 0, sizeof (alpha_compare));
2630 if (fp_p && GET_MODE (op0) == TFmode)
2632 op0 = alpha_emit_xfloating_compare (&code, op0, op1);
2637 if (fp_p && !TARGET_FIX)
2640 /* The general case: fold the comparison code to the types of compares
2641 that we have, choosing the branch as necessary. */
2646 case EQ: case LE: case LT: case LEU: case LTU:
2648 /* We have these compares. */
2650 cmp_code = code, code = NE;
2654 if (!fp_p && op1 == const0_rtx)
2659 cmp_code = reverse_condition (code);
2663 case GE: case GT: case GEU: case GTU:
2664 /* These normally need swapping, but for integer zero we have
2665 special patterns that recognize swapped operands. */
2666 if (!fp_p && op1 == const0_rtx)
2668 code = swap_condition (code);
2670 cmp_code = code, code = NE;
2671 tmp = op0, op0 = op1, op1 = tmp;
2680 if (!register_operand (op0, DImode))
2681 op0 = force_reg (DImode, op0);
2682 if (!reg_or_8bit_operand (op1, DImode))
2683 op1 = force_reg (DImode, op1);
2686 /* Emit an initial compare instruction, if necessary. */
2687 if (cmp_code != UNKNOWN)
2689 enum machine_mode mode = fp_p ? DFmode : DImode;
2691 tmp = gen_reg_rtx (mode);
2692 emit_insn (gen_rtx_SET (VOIDmode, tmp,
2693 gen_rtx_fmt_ee (cmp_code, mode, op0, op1)));
2695 op0 = fp_p ? gen_lowpart (DImode, tmp) : tmp;
2699 /* Return the setcc comparison. */
2700 return gen_rtx_fmt_ee (code, DImode, op0, op1);
2704 /* Rewrite a comparison against zero CMP of the form
2705 (CODE (cc0) (const_int 0)) so it can be written validly in
2706 a conditional move (if_then_else CMP ...).
2707 If both of the operands that set cc0 are nonzero we must emit
2708 an insn to perform the compare (it can't be done within
2709 the conditional move). */
2712 alpha_emit_conditional_move (rtx cmp, enum machine_mode mode)
2714 enum rtx_code code = GET_CODE (cmp);
2715 enum rtx_code cmov_code = NE;
2716 rtx op0 = alpha_compare.op0;
2717 rtx op1 = alpha_compare.op1;
2718 int fp_p = alpha_compare.fp_p;
2719 enum machine_mode cmp_mode
2720 = (GET_MODE (op0) == VOIDmode ? DImode : GET_MODE (op0));
2721 enum machine_mode cmp_op_mode = fp_p ? DFmode : DImode;
2722 enum machine_mode cmov_mode = VOIDmode;
2723 int local_fast_math = flag_unsafe_math_optimizations;
2726 /* Zero the operands. */
2727 memset (&alpha_compare, 0, sizeof (alpha_compare));
2729 if (fp_p != FLOAT_MODE_P (mode))
2731 enum rtx_code cmp_code;
2736 /* If we have fp<->int register move instructions, do a cmov by
2737 performing the comparison in fp registers, and move the
2738 zero/nonzero value to integer registers, where we can then
2739 use a normal cmov, or vice-versa. */
2743 case EQ: case LE: case LT: case LEU: case LTU:
2744 /* We have these compares. */
2745 cmp_code = code, code = NE;
2749 /* This must be reversed. */
2750 cmp_code = EQ, code = EQ;
2753 case GE: case GT: case GEU: case GTU:
2754 /* These normally need swapping, but for integer zero we have
2755 special patterns that recognize swapped operands. */
2756 if (!fp_p && op1 == const0_rtx)
2757 cmp_code = code, code = NE;
2760 cmp_code = swap_condition (code);
2762 tem = op0, op0 = op1, op1 = tem;
2770 tem = gen_reg_rtx (cmp_op_mode);
2771 emit_insn (gen_rtx_SET (VOIDmode, tem,
2772 gen_rtx_fmt_ee (cmp_code, cmp_op_mode,
2775 cmp_mode = cmp_op_mode = fp_p ? DImode : DFmode;
2776 op0 = gen_lowpart (cmp_op_mode, tem);
2777 op1 = CONST0_RTX (cmp_op_mode);
2779 local_fast_math = 1;
2782 /* We may be able to use a conditional move directly.
2783 This avoids emitting spurious compares. */
2784 if (signed_comparison_operator (cmp, VOIDmode)
2785 && (!fp_p || local_fast_math)
2786 && (op0 == CONST0_RTX (cmp_mode) || op1 == CONST0_RTX (cmp_mode)))
2787 return gen_rtx_fmt_ee (code, VOIDmode, op0, op1);
2789 /* We can't put the comparison inside the conditional move;
2790 emit a compare instruction and put that inside the
2791 conditional move. Make sure we emit only comparisons we have;
2792 swap or reverse as necessary. */
2799 case EQ: case LE: case LT: case LEU: case LTU:
2800 /* We have these compares: */
2804 /* This must be reversed. */
2805 code = reverse_condition (code);
2809 case GE: case GT: case GEU: case GTU:
2810 /* These must be swapped. */
2811 if (op1 != CONST0_RTX (cmp_mode))
2813 code = swap_condition (code);
2814 tem = op0, op0 = op1, op1 = tem;
2824 if (!reg_or_0_operand (op0, DImode))
2825 op0 = force_reg (DImode, op0);
2826 if (!reg_or_8bit_operand (op1, DImode))
2827 op1 = force_reg (DImode, op1);
2830 /* ??? We mark the branch mode to be CCmode to prevent the compare
2831 and cmov from being combined, since the compare insn follows IEEE
2832 rules that the cmov does not. */
2833 if (fp_p && !local_fast_math)
2836 tem = gen_reg_rtx (cmp_op_mode);
2837 emit_move_insn (tem, gen_rtx_fmt_ee (code, cmp_op_mode, op0, op1));
2838 return gen_rtx_fmt_ee (cmov_code, cmov_mode, tem, CONST0_RTX (cmp_op_mode));
2841 /* Simplify a conditional move of two constants into a setcc with
2842 arithmetic. This is done with a splitter since combine would
2843 just undo the work if done during code generation. It also catches
2844 cases we wouldn't have before cse. */
2847 alpha_split_conditional_move (enum rtx_code code, rtx dest, rtx cond,
2848 rtx t_rtx, rtx f_rtx)
2850 HOST_WIDE_INT t, f, diff;
2851 enum machine_mode mode;
2852 rtx target, subtarget, tmp;
2854 mode = GET_MODE (dest);
2859 if (((code == NE || code == EQ) && diff < 0)
2860 || (code == GE || code == GT))
2862 code = reverse_condition (code);
2863 diff = t, t = f, f = diff;
2867 subtarget = target = dest;
2870 target = gen_lowpart (DImode, dest);
2871 if (! no_new_pseudos)
2872 subtarget = gen_reg_rtx (DImode);
2876 /* Below, we must be careful to use copy_rtx on target and subtarget
2877 in intermediate insns, as they may be a subreg rtx, which may not
2880 if (f == 0 && exact_log2 (diff) > 0
2881 /* On EV6, we've got enough shifters to make non-arithmetic shifts
2882 viable over a longer latency cmove. On EV5, the E0 slot is a
2883 scarce resource, and on EV4 shift has the same latency as a cmove. */
2884 && (diff <= 8 || alpha_tune == PROCESSOR_EV6))
2886 tmp = gen_rtx_fmt_ee (code, DImode, cond, const0_rtx);
2887 emit_insn (gen_rtx_SET (VOIDmode, copy_rtx (subtarget), tmp));
2889 tmp = gen_rtx_ASHIFT (DImode, copy_rtx (subtarget),
2890 GEN_INT (exact_log2 (t)));
2891 emit_insn (gen_rtx_SET (VOIDmode, target, tmp));
2893 else if (f == 0 && t == -1)
2895 tmp = gen_rtx_fmt_ee (code, DImode, cond, const0_rtx);
2896 emit_insn (gen_rtx_SET (VOIDmode, copy_rtx (subtarget), tmp));
2898 emit_insn (gen_negdi2 (target, copy_rtx (subtarget)));
2900 else if (diff == 1 || diff == 4 || diff == 8)
2904 tmp = gen_rtx_fmt_ee (code, DImode, cond, const0_rtx);
2905 emit_insn (gen_rtx_SET (VOIDmode, copy_rtx (subtarget), tmp));
2908 emit_insn (gen_adddi3 (target, copy_rtx (subtarget), GEN_INT (f)));
2911 add_op = GEN_INT (f);
2912 if (sext_add_operand (add_op, mode))
2914 tmp = gen_rtx_MULT (DImode, copy_rtx (subtarget),
2916 tmp = gen_rtx_PLUS (DImode, tmp, add_op);
2917 emit_insn (gen_rtx_SET (VOIDmode, target, tmp));
2929 /* Look up the function X_floating library function name for the
2932 struct xfloating_op GTY(())
2934 const enum rtx_code code;
2935 const char *const GTY((skip)) osf_func;
2936 const char *const GTY((skip)) vms_func;
2940 static GTY(()) struct xfloating_op xfloating_ops[] =
2942 { PLUS, "_OtsAddX", "OTS$ADD_X", 0 },
2943 { MINUS, "_OtsSubX", "OTS$SUB_X", 0 },
2944 { MULT, "_OtsMulX", "OTS$MUL_X", 0 },
2945 { DIV, "_OtsDivX", "OTS$DIV_X", 0 },
2946 { EQ, "_OtsEqlX", "OTS$EQL_X", 0 },
2947 { NE, "_OtsNeqX", "OTS$NEQ_X", 0 },
2948 { LT, "_OtsLssX", "OTS$LSS_X", 0 },
2949 { LE, "_OtsLeqX", "OTS$LEQ_X", 0 },
2950 { GT, "_OtsGtrX", "OTS$GTR_X", 0 },
2951 { GE, "_OtsGeqX", "OTS$GEQ_X", 0 },
2952 { FIX, "_OtsCvtXQ", "OTS$CVTXQ", 0 },
2953 { FLOAT, "_OtsCvtQX", "OTS$CVTQX", 0 },
2954 { UNSIGNED_FLOAT, "_OtsCvtQUX", "OTS$CVTQUX", 0 },
2955 { FLOAT_EXTEND, "_OtsConvertFloatTX", "OTS$CVT_FLOAT_T_X", 0 },
2956 { FLOAT_TRUNCATE, "_OtsConvertFloatXT", "OTS$CVT_FLOAT_X_T", 0 }
2959 static GTY(()) struct xfloating_op vax_cvt_ops[] =
2961 { FLOAT_EXTEND, "_OtsConvertFloatGX", "OTS$CVT_FLOAT_G_X", 0 },
2962 { FLOAT_TRUNCATE, "_OtsConvertFloatXG", "OTS$CVT_FLOAT_X_G", 0 }
2966 alpha_lookup_xfloating_lib_func (enum rtx_code code)
2968 struct xfloating_op *ops = xfloating_ops;
2969 long n = ARRAY_SIZE (xfloating_ops);
2972 gcc_assert (TARGET_HAS_XFLOATING_LIBS);
2974 /* How irritating. Nothing to key off for the main table. */
2975 if (TARGET_FLOAT_VAX && (code == FLOAT_EXTEND || code == FLOAT_TRUNCATE))
2978 n = ARRAY_SIZE (vax_cvt_ops);
2981 for (i = 0; i < n; ++i, ++ops)
2982 if (ops->code == code)
2984 rtx func = ops->libcall;
2987 func = init_one_libfunc (TARGET_ABI_OPEN_VMS
2988 ? ops->vms_func : ops->osf_func);
2989 ops->libcall = func;
2997 /* Most X_floating operations take the rounding mode as an argument.
2998 Compute that here. */
3001 alpha_compute_xfloating_mode_arg (enum rtx_code code,
3002 enum alpha_fp_rounding_mode round)
3008 case ALPHA_FPRM_NORM:
3011 case ALPHA_FPRM_MINF:
3014 case ALPHA_FPRM_CHOP:
3017 case ALPHA_FPRM_DYN:
3023 /* XXX For reference, round to +inf is mode = 3. */
3026 if (code == FLOAT_TRUNCATE && alpha_fptm == ALPHA_FPTM_N)
3032 /* Emit an X_floating library function call.
3034 Note that these functions do not follow normal calling conventions:
3035 TFmode arguments are passed in two integer registers (as opposed to
3036 indirect); TFmode return values appear in R16+R17.
3038 FUNC is the function to call.
3039 TARGET is where the output belongs.
3040 OPERANDS are the inputs.
3041 NOPERANDS is the count of inputs.
3042 EQUIV is the expression equivalent for the function.
3046 alpha_emit_xfloating_libcall (rtx func, rtx target, rtx operands[],
3047 int noperands, rtx equiv)
3049 rtx usage = NULL_RTX, tmp, reg;
3054 for (i = 0; i < noperands; ++i)
3056 switch (GET_MODE (operands[i]))
3059 reg = gen_rtx_REG (TFmode, regno);
3064 reg = gen_rtx_REG (DFmode, regno + 32);
3069 gcc_assert (GET_CODE (operands[i]) == CONST_INT);
3072 reg = gen_rtx_REG (DImode, regno);
3080 emit_move_insn (reg, operands[i]);
3081 usage = alloc_EXPR_LIST (0, gen_rtx_USE (VOIDmode, reg), usage);
3084 switch (GET_MODE (target))
3087 reg = gen_rtx_REG (TFmode, 16);
3090 reg = gen_rtx_REG (DFmode, 32);
3093 reg = gen_rtx_REG (DImode, 0);
3099 tmp = gen_rtx_MEM (QImode, func);
3100 tmp = emit_call_insn (GEN_CALL_VALUE (reg, tmp, const0_rtx,
3101 const0_rtx, const0_rtx));
3102 CALL_INSN_FUNCTION_USAGE (tmp) = usage;
3103 CONST_OR_PURE_CALL_P (tmp) = 1;
3108 emit_libcall_block (tmp, target, reg, equiv);
3111 /* Emit an X_floating library function call for arithmetic (+,-,*,/). */
3114 alpha_emit_xfloating_arith (enum rtx_code code, rtx operands[])
3118 rtx out_operands[3];
3120 func = alpha_lookup_xfloating_lib_func (code);
3121 mode = alpha_compute_xfloating_mode_arg (code, alpha_fprm);
3123 out_operands[0] = operands[1];
3124 out_operands[1] = operands[2];
3125 out_operands[2] = GEN_INT (mode);
3126 alpha_emit_xfloating_libcall (func, operands[0], out_operands, 3,
3127 gen_rtx_fmt_ee (code, TFmode, operands[1],
3131 /* Emit an X_floating library function call for a comparison. */
3134 alpha_emit_xfloating_compare (enum rtx_code *pcode, rtx op0, rtx op1)
3136 enum rtx_code cmp_code, res_code;
3137 rtx func, out, operands[2];
3139 /* X_floating library comparison functions return
3143 Convert the compare against the raw return value. */
3171 func = alpha_lookup_xfloating_lib_func (cmp_code);
3175 out = gen_reg_rtx (DImode);
3177 /* ??? Strange mode for equiv because what's actually returned
3178 is -1,0,1, not a proper boolean value. */
3179 alpha_emit_xfloating_libcall (func, out, operands, 2,
3180 gen_rtx_fmt_ee (cmp_code, CCmode, op0, op1));
3185 /* Emit an X_floating library function call for a conversion. */
3188 alpha_emit_xfloating_cvt (enum rtx_code orig_code, rtx operands[])
3190 int noperands = 1, mode;
3191 rtx out_operands[2];
3193 enum rtx_code code = orig_code;
3195 if (code == UNSIGNED_FIX)
3198 func = alpha_lookup_xfloating_lib_func (code);
3200 out_operands[0] = operands[1];
3205 mode = alpha_compute_xfloating_mode_arg (code, ALPHA_FPRM_CHOP);
3206 out_operands[1] = GEN_INT (mode);
3209 case FLOAT_TRUNCATE:
3210 mode = alpha_compute_xfloating_mode_arg (code, alpha_fprm);
3211 out_operands[1] = GEN_INT (mode);
3218 alpha_emit_xfloating_libcall (func, operands[0], out_operands, noperands,
3219 gen_rtx_fmt_e (orig_code,
3220 GET_MODE (operands[0]),
3224 /* Split a TImode or TFmode move from OP[1] to OP[0] into a pair of
3225 DImode moves from OP[2,3] to OP[0,1]. If FIXUP_OVERLAP is true,
3226 guarantee that the sequence
3229 is valid. Naturally, output operand ordering is little-endian.
3230 This is used by *movtf_internal and *movti_internal. */
3233 alpha_split_tmode_pair (rtx operands[4], enum machine_mode mode,
3236 switch (GET_CODE (operands[1]))
3239 operands[3] = gen_rtx_REG (DImode, REGNO (operands[1]) + 1);
3240 operands[2] = gen_rtx_REG (DImode, REGNO (operands[1]));
3244 operands[3] = adjust_address (operands[1], DImode, 8);
3245 operands[2] = adjust_address (operands[1], DImode, 0);
3250 gcc_assert (operands[1] == CONST0_RTX (mode));
3251 operands[2] = operands[3] = const0_rtx;
3258 switch (GET_CODE (operands[0]))
3261 operands[1] = gen_rtx_REG (DImode, REGNO (operands[0]) + 1);
3262 operands[0] = gen_rtx_REG (DImode, REGNO (operands[0]));
3266 operands[1] = adjust_address (operands[0], DImode, 8);
3267 operands[0] = adjust_address (operands[0], DImode, 0);
3274 if (fixup_overlap && reg_overlap_mentioned_p (operands[0], operands[3]))
3277 tmp = operands[0], operands[0] = operands[1], operands[1] = tmp;
3278 tmp = operands[2], operands[2] = operands[3], operands[3] = tmp;
3282 /* Implement negtf2 or abstf2. Op0 is destination, op1 is source,
3283 op2 is a register containing the sign bit, operation is the
3284 logical operation to be performed. */
3287 alpha_split_tfmode_frobsign (rtx operands[3], rtx (*operation) (rtx, rtx, rtx))
3289 rtx high_bit = operands[2];
3293 alpha_split_tmode_pair (operands, TFmode, false);
3295 /* Detect three flavors of operand overlap. */
3297 if (rtx_equal_p (operands[0], operands[2]))
3299 else if (rtx_equal_p (operands[1], operands[2]))
3301 if (rtx_equal_p (operands[0], high_bit))
3308 emit_move_insn (operands[0], operands[2]);
3310 /* ??? If the destination overlaps both source tf and high_bit, then
3311 assume source tf is dead in its entirety and use the other half
3312 for a scratch register. Otherwise "scratch" is just the proper
3313 destination register. */
3314 scratch = operands[move < 2 ? 1 : 3];
3316 emit_insn ((*operation) (scratch, high_bit, operands[3]));
3320 emit_move_insn (operands[0], operands[2]);
3322 emit_move_insn (operands[1], scratch);
3326 /* Use ext[wlq][lh] as the Architecture Handbook describes for extracting
3330 word: ldq_u r1,X(r11) ldq_u r1,X(r11)
3331 ldq_u r2,X+1(r11) ldq_u r2,X+1(r11)
3332 lda r3,X(r11) lda r3,X+2(r11)
3333 extwl r1,r3,r1 extql r1,r3,r1
3334 extwh r2,r3,r2 extqh r2,r3,r2
3335 or r1.r2.r1 or r1,r2,r1
3338 long: ldq_u r1,X(r11) ldq_u r1,X(r11)
3339 ldq_u r2,X+3(r11) ldq_u r2,X+3(r11)
3340 lda r3,X(r11) lda r3,X(r11)
3341 extll r1,r3,r1 extll r1,r3,r1
3342 extlh r2,r3,r2 extlh r2,r3,r2
3343 or r1.r2.r1 addl r1,r2,r1
3345 quad: ldq_u r1,X(r11)
3354 alpha_expand_unaligned_load (rtx tgt, rtx mem, HOST_WIDE_INT size,
3355 HOST_WIDE_INT ofs, int sign)
3357 rtx meml, memh, addr, extl, exth, tmp, mema;
3358 enum machine_mode mode;
3360 if (TARGET_BWX && size == 2)
3362 meml = adjust_address (mem, QImode, ofs);
3363 memh = adjust_address (mem, QImode, ofs+1);
3364 if (BYTES_BIG_ENDIAN)
3365 tmp = meml, meml = memh, memh = tmp;
3366 extl = gen_reg_rtx (DImode);
3367 exth = gen_reg_rtx (DImode);
3368 emit_insn (gen_zero_extendqidi2 (extl, meml));
3369 emit_insn (gen_zero_extendqidi2 (exth, memh));
3370 exth = expand_simple_binop (DImode, ASHIFT, exth, GEN_INT (8),
3371 NULL, 1, OPTAB_LIB_WIDEN);
3372 addr = expand_simple_binop (DImode, IOR, extl, exth,
3373 NULL, 1, OPTAB_LIB_WIDEN);
3375 if (sign && GET_MODE (tgt) != HImode)
3377 addr = gen_lowpart (HImode, addr);
3378 emit_insn (gen_extend_insn (tgt, addr, GET_MODE (tgt), HImode, 0));
3382 if (GET_MODE (tgt) != DImode)
3383 addr = gen_lowpart (GET_MODE (tgt), addr);
3384 emit_move_insn (tgt, addr);
3389 meml = gen_reg_rtx (DImode);
3390 memh = gen_reg_rtx (DImode);
3391 addr = gen_reg_rtx (DImode);
3392 extl = gen_reg_rtx (DImode);
3393 exth = gen_reg_rtx (DImode);
3395 mema = XEXP (mem, 0);
3396 if (GET_CODE (mema) == LO_SUM)
3397 mema = force_reg (Pmode, mema);
3399 /* AND addresses cannot be in any alias set, since they may implicitly
3400 alias surrounding code. Ideally we'd have some alias set that
3401 covered all types except those with alignment 8 or higher. */
3403 tmp = change_address (mem, DImode,
3404 gen_rtx_AND (DImode,
3405 plus_constant (mema, ofs),
3407 set_mem_alias_set (tmp, 0);
3408 emit_move_insn (meml, tmp);
3410 tmp = change_address (mem, DImode,
3411 gen_rtx_AND (DImode,
3412 plus_constant (mema, ofs + size - 1),
3414 set_mem_alias_set (tmp, 0);
3415 emit_move_insn (memh, tmp);
3417 if (WORDS_BIG_ENDIAN && sign && (size == 2 || size == 4))
3419 emit_move_insn (addr, plus_constant (mema, -1));
3421 emit_insn (gen_extqh_be (extl, meml, addr));
3422 emit_insn (gen_extxl_be (exth, memh, GEN_INT (64), addr));
3424 addr = expand_binop (DImode, ior_optab, extl, exth, tgt, 1, OPTAB_WIDEN);
3425 addr = expand_binop (DImode, ashr_optab, addr, GEN_INT (64 - size*8),
3426 addr, 1, OPTAB_WIDEN);
3428 else if (sign && size == 2)
3430 emit_move_insn (addr, plus_constant (mema, ofs+2));
3432 emit_insn (gen_extxl_le (extl, meml, GEN_INT (64), addr));
3433 emit_insn (gen_extqh_le (exth, memh, addr));
3435 /* We must use tgt here for the target. Alpha-vms port fails if we use
3436 addr for the target, because addr is marked as a pointer and combine
3437 knows that pointers are always sign-extended 32 bit values. */
3438 addr = expand_binop (DImode, ior_optab, extl, exth, tgt, 1, OPTAB_WIDEN);
3439 addr = expand_binop (DImode, ashr_optab, addr, GEN_INT (48),
3440 addr, 1, OPTAB_WIDEN);
3444 if (WORDS_BIG_ENDIAN)
3446 emit_move_insn (addr, plus_constant (mema, ofs+size-1));
3450 emit_insn (gen_extwh_be (extl, meml, addr));
3455 emit_insn (gen_extlh_be (extl, meml, addr));
3460 emit_insn (gen_extqh_be (extl, meml, addr));
3467 emit_insn (gen_extxl_be (exth, memh, GEN_INT (size*8), addr));
3471 emit_move_insn (addr, plus_constant (mema, ofs));
3472 emit_insn (gen_extxl_le (extl, meml, GEN_INT (size*8), addr));
3476 emit_insn (gen_extwh_le (exth, memh, addr));
3481 emit_insn (gen_extlh_le (exth, memh, addr));
3486 emit_insn (gen_extqh_le (exth, memh, addr));
3495 addr = expand_binop (mode, ior_optab, gen_lowpart (mode, extl),
3496 gen_lowpart (mode, exth), gen_lowpart (mode, tgt),
3501 emit_move_insn (tgt, gen_lowpart (GET_MODE (tgt), addr));
3504 /* Similarly, use ins and msk instructions to perform unaligned stores. */
3507 alpha_expand_unaligned_store (rtx dst, rtx src,
3508 HOST_WIDE_INT size, HOST_WIDE_INT ofs)
3510 rtx dstl, dsth, addr, insl, insh, meml, memh, dsta;
3512 if (TARGET_BWX && size == 2)
3514 if (src != const0_rtx)
3516 dstl = gen_lowpart (QImode, src);
3517 dsth = expand_simple_binop (DImode, LSHIFTRT, src, GEN_INT (8),
3518 NULL, 1, OPTAB_LIB_WIDEN);
3519 dsth = gen_lowpart (QImode, dsth);
3522 dstl = dsth = const0_rtx;
3524 meml = adjust_address (dst, QImode, ofs);
3525 memh = adjust_address (dst, QImode, ofs+1);
3526 if (BYTES_BIG_ENDIAN)
3527 addr = meml, meml = memh, memh = addr;
3529 emit_move_insn (meml, dstl);
3530 emit_move_insn (memh, dsth);
3534 dstl = gen_reg_rtx (DImode);
3535 dsth = gen_reg_rtx (DImode);
3536 insl = gen_reg_rtx (DImode);
3537 insh = gen_reg_rtx (DImode);
3539 dsta = XEXP (dst, 0);
3540 if (GET_CODE (dsta) == LO_SUM)
3541 dsta = force_reg (Pmode, dsta);
3543 /* AND addresses cannot be in any alias set, since they may implicitly
3544 alias surrounding code. Ideally we'd have some alias set that
3545 covered all types except those with alignment 8 or higher. */
3547 meml = change_address (dst, DImode,
3548 gen_rtx_AND (DImode,
3549 plus_constant (dsta, ofs),
3551 set_mem_alias_set (meml, 0);
3553 memh = change_address (dst, DImode,
3554 gen_rtx_AND (DImode,
3555 plus_constant (dsta, ofs + size - 1),
3557 set_mem_alias_set (memh, 0);
3559 emit_move_insn (dsth, memh);
3560 emit_move_insn (dstl, meml);
3561 if (WORDS_BIG_ENDIAN)
3563 addr = copy_addr_to_reg (plus_constant (dsta, ofs+size-1));
3565 if (src != const0_rtx)
3570 emit_insn (gen_inswl_be (insh, gen_lowpart (HImode,src), addr));
3573 emit_insn (gen_insll_be (insh, gen_lowpart (SImode,src), addr));
3576 emit_insn (gen_insql_be (insh, gen_lowpart (DImode,src), addr));
3579 emit_insn (gen_insxh (insl, gen_lowpart (DImode, src),
3580 GEN_INT (size*8), addr));
3586 emit_insn (gen_mskxl_be (dsth, dsth, GEN_INT (0xffff), addr));
3590 rtx msk = immed_double_const (0xffffffff, 0, DImode);
3591 emit_insn (gen_mskxl_be (dsth, dsth, msk, addr));
3595 emit_insn (gen_mskxl_be (dsth, dsth, constm1_rtx, addr));
3599 emit_insn (gen_mskxh (dstl, dstl, GEN_INT (size*8), addr));
3603 addr = copy_addr_to_reg (plus_constant (dsta, ofs));
3605 if (src != CONST0_RTX (GET_MODE (src)))
3607 emit_insn (gen_insxh (insh, gen_lowpart (DImode, src),
3608 GEN_INT (size*8), addr));
3613 emit_insn (gen_inswl_le (insl, gen_lowpart (HImode, src), addr));
3616 emit_insn (gen_insll_le (insl, gen_lowpart (SImode, src), addr));
3619 emit_insn (gen_insql_le (insl, src, addr));
3624 emit_insn (gen_mskxh (dsth, dsth, GEN_INT (size*8), addr));
3629 emit_insn (gen_mskxl_le (dstl, dstl, GEN_INT (0xffff), addr));
3633 rtx msk = immed_double_const (0xffffffff, 0, DImode);
3634 emit_insn (gen_mskxl_le (dstl, dstl, msk, addr));
3638 emit_insn (gen_mskxl_le (dstl, dstl, constm1_rtx, addr));
3643 if (src != CONST0_RTX (GET_MODE (src)))
3645 dsth = expand_binop (DImode, ior_optab, insh, dsth, dsth, 0, OPTAB_WIDEN);
3646 dstl = expand_binop (DImode, ior_optab, insl, dstl, dstl, 0, OPTAB_WIDEN);
3649 if (WORDS_BIG_ENDIAN)
3651 emit_move_insn (meml, dstl);
3652 emit_move_insn (memh, dsth);
3656 /* Must store high before low for degenerate case of aligned. */
3657 emit_move_insn (memh, dsth);
3658 emit_move_insn (meml, dstl);
3662 /* The block move code tries to maximize speed by separating loads and
3663 stores at the expense of register pressure: we load all of the data
3664 before we store it back out. There are two secondary effects worth
3665 mentioning, that this speeds copying to/from aligned and unaligned
3666 buffers, and that it makes the code significantly easier to write. */
3668 #define MAX_MOVE_WORDS 8
3670 /* Load an integral number of consecutive unaligned quadwords. */
3673 alpha_expand_unaligned_load_words (rtx *out_regs, rtx smem,
3674 HOST_WIDE_INT words, HOST_WIDE_INT ofs)
3676 rtx const im8 = GEN_INT (-8);
3677 rtx const i64 = GEN_INT (64);
3678 rtx ext_tmps[MAX_MOVE_WORDS], data_regs[MAX_MOVE_WORDS+1];
3679 rtx sreg, areg, tmp, smema;
3682 smema = XEXP (smem, 0);
3683 if (GET_CODE (smema) == LO_SUM)
3684 smema = force_reg (Pmode, smema);
3686 /* Generate all the tmp registers we need. */
3687 for (i = 0; i < words; ++i)
3689 data_regs[i] = out_regs[i];
3690 ext_tmps[i] = gen_reg_rtx (DImode);
3692 data_regs[words] = gen_reg_rtx (DImode);
3695 smem = adjust_address (smem, GET_MODE (smem), ofs);
3697 /* Load up all of the source data. */
3698 for (i = 0; i < words; ++i)
3700 tmp = change_address (smem, DImode,
3701 gen_rtx_AND (DImode,
3702 plus_constant (smema, 8*i),
3704 set_mem_alias_set (tmp, 0);
3705 emit_move_insn (data_regs[i], tmp);
3708 tmp = change_address (smem, DImode,
3709 gen_rtx_AND (DImode,
3710 plus_constant (smema, 8*words - 1),
3712 set_mem_alias_set (tmp, 0);
3713 emit_move_insn (data_regs[words], tmp);
3715 /* Extract the half-word fragments. Unfortunately DEC decided to make
3716 extxh with offset zero a noop instead of zeroing the register, so
3717 we must take care of that edge condition ourselves with cmov. */
3719 sreg = copy_addr_to_reg (smema);
3720 areg = expand_binop (DImode, and_optab, sreg, GEN_INT (7), NULL,
3722 if (WORDS_BIG_ENDIAN)
3723 emit_move_insn (sreg, plus_constant (sreg, 7));
3724 for (i = 0; i < words; ++i)
3726 if (WORDS_BIG_ENDIAN)
3728 emit_insn (gen_extqh_be (data_regs[i], data_regs[i], sreg));
3729 emit_insn (gen_extxl_be (ext_tmps[i], data_regs[i+1], i64, sreg));
3733 emit_insn (gen_extxl_le (data_regs[i], data_regs[i], i64, sreg));
3734 emit_insn (gen_extqh_le (ext_tmps[i], data_regs[i+1], sreg));
3736 emit_insn (gen_rtx_SET (VOIDmode, ext_tmps[i],
3737 gen_rtx_IF_THEN_ELSE (DImode,
3738 gen_rtx_EQ (DImode, areg,
3740 const0_rtx, ext_tmps[i])));
3743 /* Merge the half-words into whole words. */
3744 for (i = 0; i < words; ++i)
3746 out_regs[i] = expand_binop (DImode, ior_optab, data_regs[i],
3747 ext_tmps[i], data_regs[i], 1, OPTAB_WIDEN);
3751 /* Store an integral number of consecutive unaligned quadwords. DATA_REGS
3752 may be NULL to store zeros. */
3755 alpha_expand_unaligned_store_words (rtx *data_regs, rtx dmem,
3756 HOST_WIDE_INT words, HOST_WIDE_INT ofs)
3758 rtx const im8 = GEN_INT (-8);
3759 rtx const i64 = GEN_INT (64);
3760 rtx ins_tmps[MAX_MOVE_WORDS];
3761 rtx st_tmp_1, st_tmp_2, dreg;
3762 rtx st_addr_1, st_addr_2, dmema;
3765 dmema = XEXP (dmem, 0);
3766 if (GET_CODE (dmema) == LO_SUM)
3767 dmema = force_reg (Pmode, dmema);
3769 /* Generate all the tmp registers we need. */
3770 if (data_regs != NULL)
3771 for (i = 0; i < words; ++i)
3772 ins_tmps[i] = gen_reg_rtx(DImode);
3773 st_tmp_1 = gen_reg_rtx(DImode);
3774 st_tmp_2 = gen_reg_rtx(DImode);
3777 dmem = adjust_address (dmem, GET_MODE (dmem), ofs);
3779 st_addr_2 = change_address (dmem, DImode,
3780 gen_rtx_AND (DImode,
3781 plus_constant (dmema, words*8 - 1),
3783 set_mem_alias_set (st_addr_2, 0);
3785 st_addr_1 = change_address (dmem, DImode,
3786 gen_rtx_AND (DImode, dmema, im8));
3787 set_mem_alias_set (st_addr_1, 0);
3789 /* Load up the destination end bits. */
3790 emit_move_insn (st_tmp_2, st_addr_2);
3791 emit_move_insn (st_tmp_1, st_addr_1);
3793 /* Shift the input data into place. */
3794 dreg = copy_addr_to_reg (dmema);
3795 if (WORDS_BIG_ENDIAN)
3796 emit_move_insn (dreg, plus_constant (dreg, 7));
3797 if (data_regs != NULL)
3799 for (i = words-1; i >= 0; --i)
3801 if (WORDS_BIG_ENDIAN)
3803 emit_insn (gen_insql_be (ins_tmps[i], data_regs[i], dreg));
3804 emit_insn (gen_insxh (data_regs[i], data_regs[i], i64, dreg));
3808 emit_insn (gen_insxh (ins_tmps[i], data_regs[i], i64, dreg));
3809 emit_insn (gen_insql_le (data_regs[i], data_regs[i], dreg));
3812 for (i = words-1; i > 0; --i)
3814 ins_tmps[i-1] = expand_binop (DImode, ior_optab, data_regs[i],
3815 ins_tmps[i-1], ins_tmps[i-1], 1,
3820 /* Split and merge the ends with the destination data. */
3821 if (WORDS_BIG_ENDIAN)
3823 emit_insn (gen_mskxl_be (st_tmp_2, st_tmp_2, constm1_rtx, dreg));
3824 emit_insn (gen_mskxh (st_tmp_1, st_tmp_1, i64, dreg));
3828 emit_insn (gen_mskxh (st_tmp_2, st_tmp_2, i64, dreg));
3829 emit_insn (gen_mskxl_le (st_tmp_1, st_tmp_1, constm1_rtx, dreg));
3832 if (data_regs != NULL)
3834 st_tmp_2 = expand_binop (DImode, ior_optab, st_tmp_2, ins_tmps[words-1],
3835 st_tmp_2, 1, OPTAB_WIDEN);
3836 st_tmp_1 = expand_binop (DImode, ior_optab, st_tmp_1, data_regs[0],
3837 st_tmp_1, 1, OPTAB_WIDEN);
3841 if (WORDS_BIG_ENDIAN)
3842 emit_move_insn (st_addr_1, st_tmp_1);
3844 emit_move_insn (st_addr_2, st_tmp_2);
3845 for (i = words-1; i > 0; --i)
3847 rtx tmp = change_address (dmem, DImode,
3848 gen_rtx_AND (DImode,
3849 plus_constant(dmema,
3850 WORDS_BIG_ENDIAN ? i*8-1 : i*8),
3852 set_mem_alias_set (tmp, 0);
3853 emit_move_insn (tmp, data_regs ? ins_tmps[i-1] : const0_rtx);
3855 if (WORDS_BIG_ENDIAN)
3856 emit_move_insn (st_addr_2, st_tmp_2);
3858 emit_move_insn (st_addr_1, st_tmp_1);
3862 /* Expand string/block move operations.
3864 operands[0] is the pointer to the destination.
3865 operands[1] is the pointer to the source.
3866 operands[2] is the number of bytes to move.
3867 operands[3] is the alignment. */
3870 alpha_expand_block_move (rtx operands[])
3872 rtx bytes_rtx = operands[2];
3873 rtx align_rtx = operands[3];
3874 HOST_WIDE_INT orig_bytes = INTVAL (bytes_rtx);
3875 HOST_WIDE_INT bytes = orig_bytes;
3876 HOST_WIDE_INT src_align = INTVAL (align_rtx) * BITS_PER_UNIT;
3877 HOST_WIDE_INT dst_align = src_align;
3878 rtx orig_src = operands[1];
3879 rtx orig_dst = operands[0];
3880 rtx data_regs[2 * MAX_MOVE_WORDS + 16];
3882 unsigned int i, words, ofs, nregs = 0;
3884 if (orig_bytes <= 0)
3886 else if (orig_bytes > MAX_MOVE_WORDS * UNITS_PER_WORD)
3889 /* Look for additional alignment information from recorded register info. */
3891 tmp = XEXP (orig_src, 0);
3892 if (GET_CODE (tmp) == REG)
3893 src_align = MAX (src_align, REGNO_POINTER_ALIGN (REGNO (tmp)));
3894 else if (GET_CODE (tmp) == PLUS
3895 && GET_CODE (XEXP (tmp, 0)) == REG
3896 && GET_CODE (XEXP (tmp, 1)) == CONST_INT)
3898 unsigned HOST_WIDE_INT c = INTVAL (XEXP (tmp, 1));
3899 unsigned int a = REGNO_POINTER_ALIGN (REGNO (XEXP (tmp, 0)));
3903 if (a >= 64 && c % 8 == 0)
3905 else if (a >= 32 && c % 4 == 0)
3907 else if (a >= 16 && c % 2 == 0)
3912 tmp = XEXP (orig_dst, 0);
3913 if (GET_CODE (tmp) == REG)
3914 dst_align = MAX (dst_align, REGNO_POINTER_ALIGN (REGNO (tmp)));
3915 else if (GET_CODE (tmp) == PLUS
3916 && GET_CODE (XEXP (tmp, 0)) == REG
3917 && GET_CODE (XEXP (tmp, 1)) == CONST_INT)
3919 unsigned HOST_WIDE_INT c = INTVAL (XEXP (tmp, 1));
3920 unsigned int a = REGNO_POINTER_ALIGN (REGNO (XEXP (tmp, 0)));
3924 if (a >= 64 && c % 8 == 0)
3926 else if (a >= 32 && c % 4 == 0)
3928 else if (a >= 16 && c % 2 == 0)
3934 if (src_align >= 64 && bytes >= 8)
3938 for (i = 0; i < words; ++i)
3939 data_regs[nregs + i] = gen_reg_rtx (DImode);
3941 for (i = 0; i < words; ++i)
3942 emit_move_insn (data_regs[nregs + i],
3943 adjust_address (orig_src, DImode, ofs + i * 8));
3950 if (src_align >= 32 && bytes >= 4)
3954 for (i = 0; i < words; ++i)
3955 data_regs[nregs + i] = gen_reg_rtx (SImode);
3957 for (i = 0; i < words; ++i)
3958 emit_move_insn (data_regs[nregs + i],
3959 adjust_address (orig_src, SImode, ofs + i * 4));
3970 for (i = 0; i < words+1; ++i)
3971 data_regs[nregs + i] = gen_reg_rtx (DImode);
3973 alpha_expand_unaligned_load_words (data_regs + nregs, orig_src,
3981 if (! TARGET_BWX && bytes >= 4)
3983 data_regs[nregs++] = tmp = gen_reg_rtx (SImode);
3984 alpha_expand_unaligned_load (tmp, orig_src, 4, ofs, 0);
3991 if (src_align >= 16)
3994 data_regs[nregs++] = tmp = gen_reg_rtx (HImode);
3995 emit_move_insn (tmp, adjust_address (orig_src, HImode, ofs));
3998 } while (bytes >= 2);
4000 else if (! TARGET_BWX)
4002 data_regs[nregs++] = tmp = gen_reg_rtx (HImode);
4003 alpha_expand_unaligned_load (tmp, orig_src, 2, ofs, 0);
4011 data_regs[nregs++] = tmp = gen_reg_rtx (QImode);
4012 emit_move_insn (tmp, adjust_address (orig_src, QImode, ofs));
4017 gcc_assert (nregs <= ARRAY_SIZE (data_regs));
4019 /* Now save it back out again. */
4023 /* Write out the data in whatever chunks reading the source allowed. */
4024 if (dst_align >= 64)
4026 while (i < nregs && GET_MODE (data_regs[i]) == DImode)
4028 emit_move_insn (adjust_address (orig_dst, DImode, ofs),
4035 if (dst_align >= 32)
4037 /* If the source has remaining DImode regs, write them out in
4039 while (i < nregs && GET_MODE (data_regs[i]) == DImode)
4041 tmp = expand_binop (DImode, lshr_optab, data_regs[i], GEN_INT (32),
4042 NULL_RTX, 1, OPTAB_WIDEN);
4044 emit_move_insn (adjust_address (orig_dst, SImode, ofs),
4045 gen_lowpart (SImode, data_regs[i]));
4046 emit_move_insn (adjust_address (orig_dst, SImode, ofs + 4),
4047 gen_lowpart (SImode, tmp));
4052 while (i < nregs && GET_MODE (data_regs[i]) == SImode)
4054 emit_move_insn (adjust_address (orig_dst, SImode, ofs),
4061 if (i < nregs && GET_MODE (data_regs[i]) == DImode)
4063 /* Write out a remaining block of words using unaligned methods. */
4065 for (words = 1; i + words < nregs; words++)
4066 if (GET_MODE (data_regs[i + words]) != DImode)
4070 alpha_expand_unaligned_store (orig_dst, data_regs[i], 8, ofs);
4072 alpha_expand_unaligned_store_words (data_regs + i, orig_dst,
4079 /* Due to the above, this won't be aligned. */
4080 /* ??? If we have more than one of these, consider constructing full
4081 words in registers and using alpha_expand_unaligned_store_words. */
4082 while (i < nregs && GET_MODE (data_regs[i]) == SImode)
4084 alpha_expand_unaligned_store (orig_dst, data_regs[i], 4, ofs);
4089 if (dst_align >= 16)
4090 while (i < nregs && GET_MODE (data_regs[i]) == HImode)
4092 emit_move_insn (adjust_address (orig_dst, HImode, ofs), data_regs[i]);
4097 while (i < nregs && GET_MODE (data_regs[i]) == HImode)
4099 alpha_expand_unaligned_store (orig_dst, data_regs[i], 2, ofs);
4104 /* The remainder must be byte copies. */
4107 gcc_assert (GET_MODE (data_regs[i]) == QImode);
4108 emit_move_insn (adjust_address (orig_dst, QImode, ofs), data_regs[i]);
4117 alpha_expand_block_clear (rtx operands[])
4119 rtx bytes_rtx = operands[1];
4120 rtx align_rtx = operands[3];
4121 HOST_WIDE_INT orig_bytes = INTVAL (bytes_rtx);
4122 HOST_WIDE_INT bytes = orig_bytes;
4123 HOST_WIDE_INT align = INTVAL (align_rtx) * BITS_PER_UNIT;
4124 HOST_WIDE_INT alignofs = 0;
4125 rtx orig_dst = operands[0];
4127 int i, words, ofs = 0;
4129 if (orig_bytes <= 0)
4131 if (orig_bytes > MAX_MOVE_WORDS * UNITS_PER_WORD)
4134 /* Look for stricter alignment. */
4135 tmp = XEXP (orig_dst, 0);
4136 if (GET_CODE (tmp) == REG)
4137 align = MAX (align, REGNO_POINTER_ALIGN (REGNO (tmp)));
4138 else if (GET_CODE (tmp) == PLUS
4139 && GET_CODE (XEXP (tmp, 0)) == REG
4140 && GET_CODE (XEXP (tmp, 1)) == CONST_INT)
4142 HOST_WIDE_INT c = INTVAL (XEXP (tmp, 1));
4143 int a = REGNO_POINTER_ALIGN (REGNO (XEXP (tmp, 0)));
4148 align = a, alignofs = 8 - c % 8;
4150 align = a, alignofs = 4 - c % 4;
4152 align = a, alignofs = 2 - c % 2;
4156 /* Handle an unaligned prefix first. */
4160 #if HOST_BITS_PER_WIDE_INT >= 64
4161 /* Given that alignofs is bounded by align, the only time BWX could
4162 generate three stores is for a 7 byte fill. Prefer two individual
4163 stores over a load/mask/store sequence. */
4164 if ((!TARGET_BWX || alignofs == 7)
4166 && !(alignofs == 4 && bytes >= 4))
4168 enum machine_mode mode = (align >= 64 ? DImode : SImode);
4169 int inv_alignofs = (align >= 64 ? 8 : 4) - alignofs;
4173 mem = adjust_address (orig_dst, mode, ofs - inv_alignofs);
4174 set_mem_alias_set (mem, 0);
4176 mask = ~(~(HOST_WIDE_INT)0 << (inv_alignofs * 8));
4177 if (bytes < alignofs)
4179 mask |= ~(HOST_WIDE_INT)0 << ((inv_alignofs + bytes) * 8);
4190 tmp = expand_binop (mode, and_optab, mem, GEN_INT (mask),
4191 NULL_RTX, 1, OPTAB_WIDEN);
4193 emit_move_insn (mem, tmp);
4197 if (TARGET_BWX && (alignofs & 1) && bytes >= 1)
4199 emit_move_insn (adjust_address (orig_dst, QImode, ofs), const0_rtx);
4204 if (TARGET_BWX && align >= 16 && (alignofs & 3) == 2 && bytes >= 2)
4206 emit_move_insn (adjust_address (orig_dst, HImode, ofs), const0_rtx);
4211 if (alignofs == 4 && bytes >= 4)
4213 emit_move_insn (adjust_address (orig_dst, SImode, ofs), const0_rtx);
4219 /* If we've not used the extra lead alignment information by now,
4220 we won't be able to. Downgrade align to match what's left over. */
4223 alignofs = alignofs & -alignofs;
4224 align = MIN (align, alignofs * BITS_PER_UNIT);
4228 /* Handle a block of contiguous long-words. */
4230 if (align >= 64 && bytes >= 8)
4234 for (i = 0; i < words; ++i)
4235 emit_move_insn (adjust_address (orig_dst, DImode, ofs + i * 8),
4242 /* If the block is large and appropriately aligned, emit a single
4243 store followed by a sequence of stq_u insns. */
4245 if (align >= 32 && bytes > 16)
4249 emit_move_insn (adjust_address (orig_dst, SImode, ofs), const0_rtx);
4253 orig_dsta = XEXP (orig_dst, 0);
4254 if (GET_CODE (orig_dsta) == LO_SUM)
4255 orig_dsta = force_reg (Pmode, orig_dsta);
4258 for (i = 0; i < words; ++i)
4261 = change_address (orig_dst, DImode,
4262 gen_rtx_AND (DImode,
4263 plus_constant (orig_dsta, ofs + i*8),
4265 set_mem_alias_set (mem, 0);
4266 emit_move_insn (mem, const0_rtx);
4269 /* Depending on the alignment, the first stq_u may have overlapped
4270 with the initial stl, which means that the last stq_u didn't
4271 write as much as it would appear. Leave those questionable bytes
4273 bytes -= words * 8 - 4;
4274 ofs += words * 8 - 4;
4277 /* Handle a smaller block of aligned words. */
4279 if ((align >= 64 && bytes == 4)
4280 || (align == 32 && bytes >= 4))
4284 for (i = 0; i < words; ++i)
4285 emit_move_insn (adjust_address (orig_dst, SImode, ofs + i * 4),
4292 /* An unaligned block uses stq_u stores for as many as possible. */
4298 alpha_expand_unaligned_store_words (NULL, orig_dst, words, ofs);
4304 /* Next clean up any trailing pieces. */
4306 #if HOST_BITS_PER_WIDE_INT >= 64
4307 /* Count the number of bits in BYTES for which aligned stores could
4310 for (i = (TARGET_BWX ? 1 : 4); i * BITS_PER_UNIT <= align ; i <<= 1)
4314 /* If we have appropriate alignment (and it wouldn't take too many
4315 instructions otherwise), mask out the bytes we need. */
4316 if (TARGET_BWX ? words > 2 : bytes > 0)
4323 mem = adjust_address (orig_dst, DImode, ofs);
4324 set_mem_alias_set (mem, 0);
4326 mask = ~(HOST_WIDE_INT)0 << (bytes * 8);
4328 tmp = expand_binop (DImode, and_optab, mem, GEN_INT (mask),
4329 NULL_RTX, 1, OPTAB_WIDEN);
4331 emit_move_insn (mem, tmp);
4334 else if (align >= 32 && bytes < 4)
4339 mem = adjust_address (orig_dst, SImode, ofs);
4340 set_mem_alias_set (mem, 0);
4342 mask = ~(HOST_WIDE_INT)0 << (bytes * 8);
4344 tmp = expand_binop (SImode, and_optab, mem, GEN_INT (mask),
4345 NULL_RTX, 1, OPTAB_WIDEN);
4347 emit_move_insn (mem, tmp);
4353 if (!TARGET_BWX && bytes >= 4)
4355 alpha_expand_unaligned_store (orig_dst, const0_rtx, 4, ofs);
4365 emit_move_insn (adjust_address (orig_dst, HImode, ofs),
4369 } while (bytes >= 2);
4371 else if (! TARGET_BWX)
4373 alpha_expand_unaligned_store (orig_dst, const0_rtx, 2, ofs);
4381 emit_move_insn (adjust_address (orig_dst, QImode, ofs), const0_rtx);
4389 /* Returns a mask so that zap(x, value) == x & mask. */
4392 alpha_expand_zap_mask (HOST_WIDE_INT value)
4397 if (HOST_BITS_PER_WIDE_INT >= 64)
4399 HOST_WIDE_INT mask = 0;
4401 for (i = 7; i >= 0; --i)
4404 if (!((value >> i) & 1))
4408 result = gen_int_mode (mask, DImode);
4412 HOST_WIDE_INT mask_lo = 0, mask_hi = 0;
4414 gcc_assert (HOST_BITS_PER_WIDE_INT == 32);
4416 for (i = 7; i >= 4; --i)
4419 if (!((value >> i) & 1))
4423 for (i = 3; i >= 0; --i)
4426 if (!((value >> i) & 1))
4430 result = immed_double_const (mask_lo, mask_hi, DImode);
4437 alpha_expand_builtin_vector_binop (rtx (*gen) (rtx, rtx, rtx),
4438 enum machine_mode mode,
4439 rtx op0, rtx op1, rtx op2)
4441 op0 = gen_lowpart (mode, op0);
4443 if (op1 == const0_rtx)
4444 op1 = CONST0_RTX (mode);
4446 op1 = gen_lowpart (mode, op1);
4448 if (op2 == const0_rtx)
4449 op2 = CONST0_RTX (mode);
4451 op2 = gen_lowpart (mode, op2);
4453 emit_insn ((*gen) (op0, op1, op2));
4456 /* A subroutine of the atomic operation splitters. Jump to LABEL if
4457 COND is true. Mark the jump as unlikely to be taken. */
4460 emit_unlikely_jump (rtx cond, rtx label)
4462 rtx very_unlikely = GEN_INT (REG_BR_PROB_BASE / 100 - 1);
4465 x = gen_rtx_IF_THEN_ELSE (VOIDmode, cond, label, pc_rtx);
4466 x = emit_jump_insn (gen_rtx_SET (VOIDmode, pc_rtx, x));
4467 REG_NOTES (x) = gen_rtx_EXPR_LIST (REG_BR_PROB, very_unlikely, NULL_RTX);
4470 /* A subroutine of the atomic operation splitters. Emit a load-locked
4471 instruction in MODE. */
4474 emit_load_locked (enum machine_mode mode, rtx reg, rtx mem)
4476 rtx (*fn) (rtx, rtx) = NULL;
4478 fn = gen_load_locked_si;
4479 else if (mode == DImode)
4480 fn = gen_load_locked_di;
4481 emit_insn (fn (reg, mem));
4484 /* A subroutine of the atomic operation splitters. Emit a store-conditional
4485 instruction in MODE. */
4488 emit_store_conditional (enum machine_mode mode, rtx res, rtx mem, rtx val)
4490 rtx (*fn) (rtx, rtx, rtx) = NULL;
4492 fn = gen_store_conditional_si;
4493 else if (mode == DImode)
4494 fn = gen_store_conditional_di;
4495 emit_insn (fn (res, mem, val));
4498 /* A subroutine of the atomic operation splitters. Emit an insxl
4499 instruction in MODE. */
4502 emit_insxl (enum machine_mode mode, rtx op1, rtx op2)
4504 rtx ret = gen_reg_rtx (DImode);
4505 rtx (*fn) (rtx, rtx, rtx);
4507 if (WORDS_BIG_ENDIAN)
4521 emit_insn (fn (ret, op1, op2));
4526 /* Expand an an atomic fetch-and-operate pattern. CODE is the binary operation
4527 to perform. MEM is the memory on which to operate. VAL is the second
4528 operand of the binary operator. BEFORE and AFTER are optional locations to
4529 return the value of MEM either before of after the operation. SCRATCH is
4530 a scratch register. */
4533 alpha_split_atomic_op (enum rtx_code code, rtx mem, rtx val,
4534 rtx before, rtx after, rtx scratch)
4536 enum machine_mode mode = GET_MODE (mem);
4537 rtx label, x, cond = gen_rtx_REG (DImode, REGNO (scratch));
4539 emit_insn (gen_memory_barrier ());
4541 label = gen_label_rtx ();
4543 label = gen_rtx_LABEL_REF (DImode, label);
4547 emit_load_locked (mode, before, mem);
4550 x = gen_rtx_AND (mode, gen_rtx_NOT (mode, before), val);
4552 x = gen_rtx_fmt_ee (code, mode, before, val);
4554 emit_insn (gen_rtx_SET (VOIDmode, after, copy_rtx (x)));
4555 emit_insn (gen_rtx_SET (VOIDmode, scratch, x));
4557 emit_store_conditional (mode, cond, mem, scratch);
4559 x = gen_rtx_EQ (DImode, cond, const0_rtx);
4560 emit_unlikely_jump (x, label);
4562 emit_insn (gen_memory_barrier ());
4565 /* Expand a compare and swap operation. */
4568 alpha_split_compare_and_swap (rtx retval, rtx mem, rtx oldval, rtx newval,
4571 enum machine_mode mode = GET_MODE (mem);
4572 rtx label1, label2, x, cond = gen_lowpart (DImode, scratch);
4574 emit_insn (gen_memory_barrier ());
4576 label1 = gen_rtx_LABEL_REF (DImode, gen_label_rtx ());
4577 label2 = gen_rtx_LABEL_REF (DImode, gen_label_rtx ());
4578 emit_label (XEXP (label1, 0));
4580 emit_load_locked (mode, retval, mem);
4582 x = gen_lowpart (DImode, retval);
4583 if (oldval == const0_rtx)
4584 x = gen_rtx_NE (DImode, x, const0_rtx);
4587 x = gen_rtx_EQ (DImode, x, oldval);
4588 emit_insn (gen_rtx_SET (VOIDmode, cond, x));
4589 x = gen_rtx_EQ (DImode, cond, const0_rtx);
4591 emit_unlikely_jump (x, label2);
4593 emit_move_insn (scratch, newval);
4594 emit_store_conditional (mode, cond, mem, scratch);
4596 x = gen_rtx_EQ (DImode, cond, const0_rtx);
4597 emit_unlikely_jump (x, label1);
4599 emit_insn (gen_memory_barrier ());
4600 emit_label (XEXP (label2, 0));
4604 alpha_expand_compare_and_swap_12 (rtx dst, rtx mem, rtx oldval, rtx newval)
4606 enum machine_mode mode = GET_MODE (mem);
4607 rtx addr, align, wdst;
4608 rtx (*fn5) (rtx, rtx, rtx, rtx, rtx);
4610 addr = force_reg (DImode, XEXP (mem, 0));
4611 align = expand_simple_binop (Pmode, AND, addr, GEN_INT (-8),
4612 NULL_RTX, 1, OPTAB_DIRECT);
4614 oldval = convert_modes (DImode, mode, oldval, 1);
4615 newval = emit_insxl (mode, newval, addr);
4617 wdst = gen_reg_rtx (DImode);
4619 fn5 = gen_sync_compare_and_swapqi_1;
4621 fn5 = gen_sync_compare_and_swaphi_1;
4622 emit_insn (fn5 (wdst, addr, oldval, newval, align));
4624 emit_move_insn (dst, gen_lowpart (mode, wdst));
4628 alpha_split_compare_and_swap_12 (enum machine_mode mode, rtx dest, rtx addr,
4629 rtx oldval, rtx newval, rtx align,
4630 rtx scratch, rtx cond)
4632 rtx label1, label2, mem, width, mask, x;
4634 mem = gen_rtx_MEM (DImode, align);
4635 MEM_VOLATILE_P (mem) = 1;
4637 emit_insn (gen_memory_barrier ());
4638 label1 = gen_rtx_LABEL_REF (DImode, gen_label_rtx ());
4639 label2 = gen_rtx_LABEL_REF (DImode, gen_label_rtx ());
4640 emit_label (XEXP (label1, 0));
4642 emit_load_locked (DImode, scratch, mem);
4644 width = GEN_INT (GET_MODE_BITSIZE (mode));
4645 mask = GEN_INT (mode == QImode ? 0xff : 0xffff);
4646 if (WORDS_BIG_ENDIAN)
4647 emit_insn (gen_extxl_be (dest, scratch, width, addr));
4649 emit_insn (gen_extxl_le (dest, scratch, width, addr));
4651 if (oldval == const0_rtx)
4652 x = gen_rtx_NE (DImode, dest, const0_rtx);
4655 x = gen_rtx_EQ (DImode, dest, oldval);
4656 emit_insn (gen_rtx_SET (VOIDmode, cond, x));
4657 x = gen_rtx_EQ (DImode, cond, const0_rtx);
4659 emit_unlikely_jump (x, label2);
4661 if (WORDS_BIG_ENDIAN)
4662 emit_insn (gen_mskxl_be (scratch, scratch, mask, addr));
4664 emit_insn (gen_mskxl_le (scratch, scratch, mask, addr));
4665 emit_insn (gen_iordi3 (scratch, scratch, newval));
4667 emit_store_conditional (DImode, scratch, mem, scratch);
4669 x = gen_rtx_EQ (DImode, scratch, const0_rtx);
4670 emit_unlikely_jump (x, label1);
4672 emit_insn (gen_memory_barrier ());
4673 emit_label (XEXP (label2, 0));
4676 /* Expand an atomic exchange operation. */
4679 alpha_split_lock_test_and_set (rtx retval, rtx mem, rtx val, rtx scratch)
4681 enum machine_mode mode = GET_MODE (mem);
4682 rtx label, x, cond = gen_lowpart (DImode, scratch);
4684 emit_insn (gen_memory_barrier ());
4686 label = gen_rtx_LABEL_REF (DImode, gen_label_rtx ());
4687 emit_label (XEXP (label, 0));
4689 emit_load_locked (mode, retval, mem);
4690 emit_move_insn (scratch, val);
4691 emit_store_conditional (mode, cond, mem, scratch);
4693 x = gen_rtx_EQ (DImode, cond, const0_rtx);
4694 emit_unlikely_jump (x, label);
4698 alpha_expand_lock_test_and_set_12 (rtx dst, rtx mem, rtx val)
4700 enum machine_mode mode = GET_MODE (mem);
4701 rtx addr, align, wdst;
4702 rtx (*fn4) (rtx, rtx, rtx, rtx);
4704 /* Force the address into a register. */
4705 addr = force_reg (DImode, XEXP (mem, 0));
4707 /* Align it to a multiple of 8. */
4708 align = expand_simple_binop (Pmode, AND, addr, GEN_INT (-8),
4709 NULL_RTX, 1, OPTAB_DIRECT);
4711 /* Insert val into the correct byte location within the word. */
4712 val = emit_insxl (mode, val, addr);
4714 wdst = gen_reg_rtx (DImode);
4716 fn4 = gen_sync_lock_test_and_setqi_1;
4718 fn4 = gen_sync_lock_test_and_sethi_1;
4719 emit_insn (fn4 (wdst, addr, val, align));
4721 emit_move_insn (dst, gen_lowpart (mode, wdst));
4725 alpha_split_lock_test_and_set_12 (enum machine_mode mode, rtx dest, rtx addr,
4726 rtx val, rtx align, rtx scratch)
4728 rtx label, mem, width, mask, x;
4730 mem = gen_rtx_MEM (DImode, align);
4731 MEM_VOLATILE_P (mem) = 1;
4733 emit_insn (gen_memory_barrier ());
4734 label = gen_rtx_LABEL_REF (DImode, gen_label_rtx ());
4735 emit_label (XEXP (label, 0));
4737 emit_load_locked (DImode, scratch, mem);
4739 width = GEN_INT (GET_MODE_BITSIZE (mode));
4740 mask = GEN_INT (mode == QImode ? 0xff : 0xffff);
4741 if (WORDS_BIG_ENDIAN)
4743 emit_insn (gen_extxl_be (dest, scratch, width, addr));
4744 emit_insn (gen_mskxl_be (scratch, scratch, mask, addr));
4748 emit_insn (gen_extxl_le (dest, scratch, width, addr));
4749 emit_insn (gen_mskxl_le (scratch, scratch, mask, addr));
4751 emit_insn (gen_iordi3 (scratch, scratch, val));
4753 emit_store_conditional (DImode, scratch, mem, scratch);
4755 x = gen_rtx_EQ (DImode, scratch, const0_rtx);
4756 emit_unlikely_jump (x, label);
4759 /* Adjust the cost of a scheduling dependency. Return the new cost of
4760 a dependency LINK or INSN on DEP_INSN. COST is the current cost. */
4763 alpha_adjust_cost (rtx insn, rtx link, rtx dep_insn, int cost)
4765 enum attr_type insn_type, dep_insn_type;
4767 /* If the dependence is an anti-dependence, there is no cost. For an
4768 output dependence, there is sometimes a cost, but it doesn't seem
4769 worth handling those few cases. */
4770 if (REG_NOTE_KIND (link) != 0)
4773 /* If we can't recognize the insns, we can't really do anything. */
4774 if (recog_memoized (insn) < 0 || recog_memoized (dep_insn) < 0)
4777 insn_type = get_attr_type (insn);
4778 dep_insn_type = get_attr_type (dep_insn);
4780 /* Bring in the user-defined memory latency. */
4781 if (dep_insn_type == TYPE_ILD
4782 || dep_insn_type == TYPE_FLD
4783 || dep_insn_type == TYPE_LDSYM)
4784 cost += alpha_memory_latency-1;
4786 /* Everything else handled in DFA bypasses now. */
4791 /* The number of instructions that can be issued per cycle. */
4794 alpha_issue_rate (void)
4796 return (alpha_tune == PROCESSOR_EV4 ? 2 : 4);
4799 /* How many alternative schedules to try. This should be as wide as the
4800 scheduling freedom in the DFA, but no wider. Making this value too
4801 large results extra work for the scheduler.
4803 For EV4, loads can be issued to either IB0 or IB1, thus we have 2
4804 alternative schedules. For EV5, we can choose between E0/E1 and
4805 FA/FM. For EV6, an arithmetic insn can be issued to U0/U1/L0/L1. */
4808 alpha_multipass_dfa_lookahead (void)
4810 return (alpha_tune == PROCESSOR_EV6 ? 4 : 2);
4813 /* Machine-specific function data. */
4815 struct machine_function GTY(())
4818 /* List of call information words for calls from this function. */
4819 struct rtx_def *first_ciw;
4820 struct rtx_def *last_ciw;
4823 /* List of deferred case vectors. */
4824 struct rtx_def *addr_list;
4827 const char *some_ld_name;
4829 /* For TARGET_LD_BUGGY_LDGP. */
4830 struct rtx_def *gp_save_rtx;
4833 /* How to allocate a 'struct machine_function'. */
4835 static struct machine_function *
4836 alpha_init_machine_status (void)
4838 return ((struct machine_function *)
4839 ggc_alloc_cleared (sizeof (struct machine_function)));
4842 /* Functions to save and restore alpha_return_addr_rtx. */
4844 /* Start the ball rolling with RETURN_ADDR_RTX. */
4847 alpha_return_addr (int count, rtx frame ATTRIBUTE_UNUSED)
4852 return get_hard_reg_initial_val (Pmode, REG_RA);
4855 /* Return or create a memory slot containing the gp value for the current
4856 function. Needed only if TARGET_LD_BUGGY_LDGP. */
4859 alpha_gp_save_rtx (void)
4861 rtx seq, m = cfun->machine->gp_save_rtx;
4867 m = assign_stack_local (DImode, UNITS_PER_WORD, BITS_PER_WORD);
4868 m = validize_mem (m);
4869 emit_move_insn (m, pic_offset_table_rtx);
4873 emit_insn_after (seq, entry_of_function ());
4875 cfun->machine->gp_save_rtx = m;
4882 alpha_ra_ever_killed (void)
4886 if (!has_hard_reg_initial_val (Pmode, REG_RA))
4887 return regs_ever_live[REG_RA];
4889 push_topmost_sequence ();
4891 pop_topmost_sequence ();
4893 return reg_set_between_p (gen_rtx_REG (Pmode, REG_RA), top, NULL_RTX);
4897 /* Return the trap mode suffix applicable to the current
4898 instruction, or NULL. */
4901 get_trap_mode_suffix (void)
4903 enum attr_trap_suffix s = get_attr_trap_suffix (current_output_insn);
4907 case TRAP_SUFFIX_NONE:
4910 case TRAP_SUFFIX_SU:
4911 if (alpha_fptm >= ALPHA_FPTM_SU)
4915 case TRAP_SUFFIX_SUI:
4916 if (alpha_fptm >= ALPHA_FPTM_SUI)
4920 case TRAP_SUFFIX_V_SV:
4928 case ALPHA_FPTM_SUI:
4934 case TRAP_SUFFIX_V_SV_SVI:
4943 case ALPHA_FPTM_SUI:
4950 case TRAP_SUFFIX_U_SU_SUI:
4959 case ALPHA_FPTM_SUI:
4972 /* Return the rounding mode suffix applicable to the current
4973 instruction, or NULL. */
4976 get_round_mode_suffix (void)
4978 enum attr_round_suffix s = get_attr_round_suffix (current_output_insn);
4982 case ROUND_SUFFIX_NONE:
4984 case ROUND_SUFFIX_NORMAL:
4987 case ALPHA_FPRM_NORM:
4989 case ALPHA_FPRM_MINF:
4991 case ALPHA_FPRM_CHOP:
4993 case ALPHA_FPRM_DYN:
5000 case ROUND_SUFFIX_C:
5009 /* Locate some local-dynamic symbol still in use by this function
5010 so that we can print its name in some movdi_er_tlsldm pattern. */
5013 get_some_local_dynamic_name_1 (rtx *px, void *data ATTRIBUTE_UNUSED)
5017 if (GET_CODE (x) == SYMBOL_REF
5018 && SYMBOL_REF_TLS_MODEL (x) == TLS_MODEL_LOCAL_DYNAMIC)
5020 cfun->machine->some_ld_name = XSTR (x, 0);
5028 get_some_local_dynamic_name (void)
5032 if (cfun->machine->some_ld_name)
5033 return cfun->machine->some_ld_name;
5035 for (insn = get_insns (); insn ; insn = NEXT_INSN (insn))
5037 && for_each_rtx (&PATTERN (insn), get_some_local_dynamic_name_1, 0))
5038 return cfun->machine->some_ld_name;
5043 /* Print an operand. Recognize special options, documented below. */
5046 print_operand (FILE *file, rtx x, int code)
5053 /* Print the assembler name of the current function. */
5054 assemble_name (file, alpha_fnname);
5058 assemble_name (file, get_some_local_dynamic_name ());
5063 const char *trap = get_trap_mode_suffix ();
5064 const char *round = get_round_mode_suffix ();
5067 fprintf (file, (TARGET_AS_SLASH_BEFORE_SUFFIX ? "/%s%s" : "%s%s"),
5068 (trap ? trap : ""), (round ? round : ""));
5073 /* Generates single precision instruction suffix. */
5074 fputc ((TARGET_FLOAT_VAX ? 'f' : 's'), file);
5078 /* Generates double precision instruction suffix. */
5079 fputc ((TARGET_FLOAT_VAX ? 'g' : 't'), file);
5083 /* Generates a nop after a noreturn call at the very end of the
5085 if (next_real_insn (current_output_insn) == 0)
5086 fprintf (file, "\n\tnop");
5090 if (alpha_this_literal_sequence_number == 0)
5091 alpha_this_literal_sequence_number = alpha_next_sequence_number++;
5092 fprintf (file, "%d", alpha_this_literal_sequence_number);
5096 if (alpha_this_gpdisp_sequence_number == 0)
5097 alpha_this_gpdisp_sequence_number = alpha_next_sequence_number++;
5098 fprintf (file, "%d", alpha_this_gpdisp_sequence_number);
5102 if (GET_CODE (x) == HIGH)
5103 output_addr_const (file, XEXP (x, 0));
5105 output_operand_lossage ("invalid %%H value");
5112 if (GET_CODE (x) == UNSPEC && XINT (x, 1) == UNSPEC_TLSGD_CALL)
5114 x = XVECEXP (x, 0, 0);
5115 lituse = "lituse_tlsgd";
5117 else if (GET_CODE (x) == UNSPEC && XINT (x, 1) == UNSPEC_TLSLDM_CALL)
5119 x = XVECEXP (x, 0, 0);
5120 lituse = "lituse_tlsldm";
5122 else if (GET_CODE (x) == CONST_INT)
5123 lituse = "lituse_jsr";
5126 output_operand_lossage ("invalid %%J value");
5130 if (x != const0_rtx)
5131 fprintf (file, "\t\t!%s!%d", lituse, (int) INTVAL (x));
5139 #ifdef HAVE_AS_JSRDIRECT_RELOCS
5140 lituse = "lituse_jsrdirect";
5142 lituse = "lituse_jsr";
5145 gcc_assert (INTVAL (x) != 0);
5146 fprintf (file, "\t\t!%s!%d", lituse, (int) INTVAL (x));
5150 /* If this operand is the constant zero, write it as "$31". */
5151 if (GET_CODE (x) == REG)
5152 fprintf (file, "%s", reg_names[REGNO (x)]);
5153 else if (x == CONST0_RTX (GET_MODE (x)))
5154 fprintf (file, "$31");
5156 output_operand_lossage ("invalid %%r value");
5160 /* Similar, but for floating-point. */
5161 if (GET_CODE (x) == REG)
5162 fprintf (file, "%s", reg_names[REGNO (x)]);
5163 else if (x == CONST0_RTX (GET_MODE (x)))
5164 fprintf (file, "$f31");
5166 output_operand_lossage ("invalid %%R value");
5170 /* Write the 1's complement of a constant. */
5171 if (GET_CODE (x) != CONST_INT)
5172 output_operand_lossage ("invalid %%N value");
5174 fprintf (file, HOST_WIDE_INT_PRINT_DEC, ~ INTVAL (x));
5178 /* Write 1 << C, for a constant C. */
5179 if (GET_CODE (x) != CONST_INT)
5180 output_operand_lossage ("invalid %%P value");
5182 fprintf (file, HOST_WIDE_INT_PRINT_DEC, (HOST_WIDE_INT) 1 << INTVAL (x));
5186 /* Write the high-order 16 bits of a constant, sign-extended. */
5187 if (GET_CODE (x) != CONST_INT)
5188 output_operand_lossage ("invalid %%h value");
5190 fprintf (file, HOST_WIDE_INT_PRINT_DEC, INTVAL (x) >> 16);
5194 /* Write the low-order 16 bits of a constant, sign-extended. */
5195 if (GET_CODE (x) != CONST_INT)
5196 output_operand_lossage ("invalid %%L value");
5198 fprintf (file, HOST_WIDE_INT_PRINT_DEC,
5199 (INTVAL (x) & 0xffff) - 2 * (INTVAL (x) & 0x8000));
5203 /* Write mask for ZAP insn. */
5204 if (GET_CODE (x) == CONST_DOUBLE)
5206 HOST_WIDE_INT mask = 0;
5207 HOST_WIDE_INT value;
5209 value = CONST_DOUBLE_LOW (x);
5210 for (i = 0; i < HOST_BITS_PER_WIDE_INT / HOST_BITS_PER_CHAR;
5215 value = CONST_DOUBLE_HIGH (x);
5216 for (i = 0; i < HOST_BITS_PER_WIDE_INT / HOST_BITS_PER_CHAR;
5219 mask |= (1 << (i + sizeof (int)));
5221 fprintf (file, HOST_WIDE_INT_PRINT_DEC, mask & 0xff);
5224 else if (GET_CODE (x) == CONST_INT)
5226 HOST_WIDE_INT mask = 0, value = INTVAL (x);
5228 for (i = 0; i < 8; i++, value >>= 8)
5232 fprintf (file, HOST_WIDE_INT_PRINT_DEC, mask);
5235 output_operand_lossage ("invalid %%m value");
5239 /* 'b', 'w', 'l', or 'q' as the value of the constant. */
5240 if (GET_CODE (x) != CONST_INT
5241 || (INTVAL (x) != 8 && INTVAL (x) != 16
5242 && INTVAL (x) != 32 && INTVAL (x) != 64))
5243 output_operand_lossage ("invalid %%M value");
5245 fprintf (file, "%s",
5246 (INTVAL (x) == 8 ? "b"
5247 : INTVAL (x) == 16 ? "w"
5248 : INTVAL (x) == 32 ? "l"
5253 /* Similar, except do it from the mask. */
5254 if (GET_CODE (x) == CONST_INT)
5256 HOST_WIDE_INT value = INTVAL (x);
5263 if (value == 0xffff)
5268 if (value == 0xffffffff)
5279 else if (HOST_BITS_PER_WIDE_INT == 32
5280 && GET_CODE (x) == CONST_DOUBLE
5281 && CONST_DOUBLE_LOW (x) == 0xffffffff
5282 && CONST_DOUBLE_HIGH (x) == 0)
5287 output_operand_lossage ("invalid %%U value");
5291 /* Write the constant value divided by 8 for little-endian mode or
5292 (56 - value) / 8 for big-endian mode. */
5294 if (GET_CODE (x) != CONST_INT
5295 || (unsigned HOST_WIDE_INT) INTVAL (x) >= (WORDS_BIG_ENDIAN
5298 || (INTVAL (x) & 7) != 0)
5299 output_operand_lossage ("invalid %%s value");
5301 fprintf (file, HOST_WIDE_INT_PRINT_DEC,
5303 ? (56 - INTVAL (x)) / 8
5308 /* Same, except compute (64 - c) / 8 */
5310 if (GET_CODE (x) != CONST_INT
5311 && (unsigned HOST_WIDE_INT) INTVAL (x) >= 64
5312 && (INTVAL (x) & 7) != 8)
5313 output_operand_lossage ("invalid %%s value");
5315 fprintf (file, HOST_WIDE_INT_PRINT_DEC, (64 - INTVAL (x)) / 8);
5320 /* On Unicos/Mk systems: use a DEX expression if the symbol
5321 clashes with a register name. */
5322 int dex = unicosmk_need_dex (x);
5324 fprintf (file, "DEX(%d)", dex);
5326 output_addr_const (file, x);
5330 case 'C': case 'D': case 'c': case 'd':
5331 /* Write out comparison name. */
5333 enum rtx_code c = GET_CODE (x);
5335 if (!COMPARISON_P (x))
5336 output_operand_lossage ("invalid %%C value");
5338 else if (code == 'D')
5339 c = reverse_condition (c);
5340 else if (code == 'c')
5341 c = swap_condition (c);
5342 else if (code == 'd')
5343 c = swap_condition (reverse_condition (c));
5346 fprintf (file, "ule");
5348 fprintf (file, "ult");
5349 else if (c == UNORDERED)
5350 fprintf (file, "un");
5352 fprintf (file, "%s", GET_RTX_NAME (c));
5357 /* Write the divide or modulus operator. */
5358 switch (GET_CODE (x))
5361 fprintf (file, "div%s", GET_MODE (x) == SImode ? "l" : "q");
5364 fprintf (file, "div%su", GET_MODE (x) == SImode ? "l" : "q");
5367 fprintf (file, "rem%s", GET_MODE (x) == SImode ? "l" : "q");
5370 fprintf (file, "rem%su", GET_MODE (x) == SImode ? "l" : "q");
5373 output_operand_lossage ("invalid %%E value");
5379 /* Write "_u" for unaligned access. */
5380 if (GET_CODE (x) == MEM && GET_CODE (XEXP (x, 0)) == AND)
5381 fprintf (file, "_u");
5385 if (GET_CODE (x) == REG)
5386 fprintf (file, "%s", reg_names[REGNO (x)]);
5387 else if (GET_CODE (x) == MEM)
5388 output_address (XEXP (x, 0));
5389 else if (GET_CODE (x) == CONST && GET_CODE (XEXP (x, 0)) == UNSPEC)
5391 switch (XINT (XEXP (x, 0), 1))
5395 output_addr_const (file, XVECEXP (XEXP (x, 0), 0, 0));
5398 output_operand_lossage ("unknown relocation unspec");
5403 output_addr_const (file, x);
5407 output_operand_lossage ("invalid %%xn code");
5412 print_operand_address (FILE *file, rtx addr)
5415 HOST_WIDE_INT offset = 0;
5417 if (GET_CODE (addr) == AND)
5418 addr = XEXP (addr, 0);
5420 if (GET_CODE (addr) == PLUS
5421 && GET_CODE (XEXP (addr, 1)) == CONST_INT)
5423 offset = INTVAL (XEXP (addr, 1));
5424 addr = XEXP (addr, 0);
5427 if (GET_CODE (addr) == LO_SUM)
5429 const char *reloc16, *reloclo;
5430 rtx op1 = XEXP (addr, 1);
5432 if (GET_CODE (op1) == CONST && GET_CODE (XEXP (op1, 0)) == UNSPEC)
5434 op1 = XEXP (op1, 0);
5435 switch (XINT (op1, 1))
5439 reloclo = (alpha_tls_size == 16 ? "dtprel" : "dtprello");
5443 reloclo = (alpha_tls_size == 16 ? "tprel" : "tprello");
5446 output_operand_lossage ("unknown relocation unspec");
5450 output_addr_const (file, XVECEXP (op1, 0, 0));
5455 reloclo = "gprellow";
5456 output_addr_const (file, op1);
5460 fprintf (file, "+" HOST_WIDE_INT_PRINT_DEC, offset);
5462 addr = XEXP (addr, 0);
5463 switch (GET_CODE (addr))
5466 basereg = REGNO (addr);
5470 basereg = subreg_regno (addr);
5477 fprintf (file, "($%d)\t\t!%s", basereg,
5478 (basereg == 29 ? reloc16 : reloclo));
5482 switch (GET_CODE (addr))
5485 basereg = REGNO (addr);
5489 basereg = subreg_regno (addr);
5493 offset = INTVAL (addr);
5496 #if TARGET_ABI_OPEN_VMS
5498 fprintf (file, "%s", XSTR (addr, 0));
5502 gcc_assert (GET_CODE (XEXP (addr, 0)) == PLUS
5503 && GET_CODE (XEXP (XEXP (addr, 0), 0)) == SYMBOL_REF);
5504 fprintf (file, "%s+" HOST_WIDE_INT_PRINT_DEC,
5505 XSTR (XEXP (XEXP (addr, 0), 0), 0),
5506 INTVAL (XEXP (XEXP (addr, 0), 1)));
5514 fprintf (file, HOST_WIDE_INT_PRINT_DEC "($%d)", offset, basereg);
5517 /* Emit RTL insns to initialize the variable parts of a trampoline at
5518 TRAMP. FNADDR is an RTX for the address of the function's pure
5519 code. CXT is an RTX for the static chain value for the function.
5521 The three offset parameters are for the individual template's
5522 layout. A JMPOFS < 0 indicates that the trampoline does not
5523 contain instructions at all.
5525 We assume here that a function will be called many more times than
5526 its address is taken (e.g., it might be passed to qsort), so we
5527 take the trouble to initialize the "hint" field in the JMP insn.
5528 Note that the hint field is PC (new) + 4 * bits 13:0. */
5531 alpha_initialize_trampoline (rtx tramp, rtx fnaddr, rtx cxt,
5532 int fnofs, int cxtofs, int jmpofs)
5534 rtx temp, temp1, addr;
5535 /* VMS really uses DImode pointers in memory at this point. */
5536 enum machine_mode mode = TARGET_ABI_OPEN_VMS ? Pmode : ptr_mode;
5538 #ifdef POINTERS_EXTEND_UNSIGNED
5539 fnaddr = convert_memory_address (mode, fnaddr);
5540 cxt = convert_memory_address (mode, cxt);
5543 /* Store function address and CXT. */
5544 addr = memory_address (mode, plus_constant (tramp, fnofs));
5545 emit_move_insn (gen_rtx_MEM (mode, addr), fnaddr);
5546 addr = memory_address (mode, plus_constant (tramp, cxtofs));
5547 emit_move_insn (gen_rtx_MEM (mode, addr), cxt);
5549 /* This has been disabled since the hint only has a 32k range, and in
5550 no existing OS is the stack within 32k of the text segment. */
5551 if (0 && jmpofs >= 0)
5553 /* Compute hint value. */
5554 temp = force_operand (plus_constant (tramp, jmpofs+4), NULL_RTX);
5555 temp = expand_binop (DImode, sub_optab, fnaddr, temp, temp, 1,
5557 temp = expand_shift (RSHIFT_EXPR, Pmode, temp,
5558 build_int_cst (NULL_TREE, 2), NULL_RTX, 1);
5559 temp = expand_and (SImode, gen_lowpart (SImode, temp),
5560 GEN_INT (0x3fff), 0);
5562 /* Merge in the hint. */
5563 addr = memory_address (SImode, plus_constant (tramp, jmpofs));
5564 temp1 = force_reg (SImode, gen_rtx_MEM (SImode, addr));
5565 temp1 = expand_and (SImode, temp1, GEN_INT (0xffffc000), NULL_RTX);
5566 temp1 = expand_binop (SImode, ior_optab, temp1, temp, temp1, 1,
5568 emit_move_insn (gen_rtx_MEM (SImode, addr), temp1);
5571 #ifdef ENABLE_EXECUTE_STACK
5572 emit_library_call (init_one_libfunc ("__enable_execute_stack"),
5573 0, VOIDmode, 1, tramp, Pmode);
5577 emit_insn (gen_imb ());
5580 /* Determine where to put an argument to a function.
5581 Value is zero to push the argument on the stack,
5582 or a hard register in which to store the argument.
5584 MODE is the argument's machine mode.
5585 TYPE is the data type of the argument (as a tree).
5586 This is null for libcalls where that information may
5588 CUM is a variable of type CUMULATIVE_ARGS which gives info about
5589 the preceding args and about the function being called.
5590 NAMED is nonzero if this argument is a named parameter
5591 (otherwise it is an extra parameter matching an ellipsis).
5593 On Alpha the first 6 words of args are normally in registers
5594 and the rest are pushed. */
5597 function_arg (CUMULATIVE_ARGS cum, enum machine_mode mode, tree type,
5598 int named ATTRIBUTE_UNUSED)
5603 /* Don't get confused and pass small structures in FP registers. */
5604 if (type && AGGREGATE_TYPE_P (type))
5608 #ifdef ENABLE_CHECKING
5609 /* With alpha_split_complex_arg, we shouldn't see any raw complex
5611 gcc_assert (!COMPLEX_MODE_P (mode));
5614 /* Set up defaults for FP operands passed in FP registers, and
5615 integral operands passed in integer registers. */
5616 if (TARGET_FPREGS && GET_MODE_CLASS (mode) == MODE_FLOAT)
5622 /* ??? Irritatingly, the definition of CUMULATIVE_ARGS is different for
5623 the three platforms, so we can't avoid conditional compilation. */
5624 #if TARGET_ABI_OPEN_VMS
5626 if (mode == VOIDmode)
5627 return alpha_arg_info_reg_val (cum);
5629 num_args = cum.num_args;
5631 || targetm.calls.must_pass_in_stack (mode, type))
5634 #elif TARGET_ABI_UNICOSMK
5638 /* If this is the last argument, generate the call info word (CIW). */
5639 /* ??? We don't include the caller's line number in the CIW because
5640 I don't know how to determine it if debug infos are turned off. */
5641 if (mode == VOIDmode)
5650 for (i = 0; i < cum.num_reg_words && i < 5; i++)
5651 if (cum.reg_args_type[i])
5652 lo |= (1 << (7 - i));
5654 if (cum.num_reg_words == 6 && cum.reg_args_type[5])
5657 lo |= cum.num_reg_words;
5659 #if HOST_BITS_PER_WIDE_INT == 32
5660 hi = (cum.num_args << 20) | cum.num_arg_words;
5662 lo = lo | ((HOST_WIDE_INT) cum.num_args << 52)
5663 | ((HOST_WIDE_INT) cum.num_arg_words << 32);
5666 ciw = immed_double_const (lo, hi, DImode);
5668 return gen_rtx_UNSPEC (DImode, gen_rtvec (1, ciw),
5669 UNSPEC_UMK_LOAD_CIW);
5672 size = ALPHA_ARG_SIZE (mode, type, named);
5673 num_args = cum.num_reg_words;
5675 || cum.num_reg_words + size > 6
5676 || targetm.calls.must_pass_in_stack (mode, type))
5678 else if (type && TYPE_MODE (type) == BLKmode)
5682 reg1 = gen_rtx_REG (DImode, num_args + 16);
5683 reg1 = gen_rtx_EXPR_LIST (DImode, reg1, const0_rtx);
5685 /* The argument fits in two registers. Note that we still need to
5686 reserve a register for empty structures. */
5690 return gen_rtx_PARALLEL (mode, gen_rtvec (1, reg1));
5693 reg2 = gen_rtx_REG (DImode, num_args + 17);
5694 reg2 = gen_rtx_EXPR_LIST (DImode, reg2, GEN_INT (8));
5695 return gen_rtx_PARALLEL (mode, gen_rtvec (2, reg1, reg2));
5699 #elif TARGET_ABI_OSF
5705 /* VOID is passed as a special flag for "last argument". */
5706 if (type == void_type_node)
5708 else if (targetm.calls.must_pass_in_stack (mode, type))
5712 #error Unhandled ABI
5715 return gen_rtx_REG (mode, num_args + basereg);
5719 alpha_arg_partial_bytes (CUMULATIVE_ARGS *cum ATTRIBUTE_UNUSED,
5720 enum machine_mode mode ATTRIBUTE_UNUSED,
5721 tree type ATTRIBUTE_UNUSED,
5722 bool named ATTRIBUTE_UNUSED)
5726 #if TARGET_ABI_OPEN_VMS
5727 if (cum->num_args < 6
5728 && 6 < cum->num_args + ALPHA_ARG_SIZE (mode, type, named))
5729 words = 6 - cum->num_args;
5730 #elif TARGET_ABI_UNICOSMK
5731 /* Never any split arguments. */
5732 #elif TARGET_ABI_OSF
5733 if (*cum < 6 && 6 < *cum + ALPHA_ARG_SIZE (mode, type, named))
5736 #error Unhandled ABI
5739 return words * UNITS_PER_WORD;
5743 /* Return true if TYPE must be returned in memory, instead of in registers. */
5746 alpha_return_in_memory (tree type, tree fndecl ATTRIBUTE_UNUSED)
5748 enum machine_mode mode = VOIDmode;
5753 mode = TYPE_MODE (type);
5755 /* All aggregates are returned in memory. */
5756 if (AGGREGATE_TYPE_P (type))
5760 size = GET_MODE_SIZE (mode);
5761 switch (GET_MODE_CLASS (mode))
5763 case MODE_VECTOR_FLOAT:
5764 /* Pass all float vectors in memory, like an aggregate. */
5767 case MODE_COMPLEX_FLOAT:
5768 /* We judge complex floats on the size of their element,
5769 not the size of the whole type. */
5770 size = GET_MODE_UNIT_SIZE (mode);
5775 case MODE_COMPLEX_INT:
5776 case MODE_VECTOR_INT:
5780 /* ??? We get called on all sorts of random stuff from
5781 aggregate_value_p. We must return something, but it's not
5782 clear what's safe to return. Pretend it's a struct I
5787 /* Otherwise types must fit in one register. */
5788 return size > UNITS_PER_WORD;
5791 /* Return true if TYPE should be passed by invisible reference. */
5794 alpha_pass_by_reference (CUMULATIVE_ARGS *ca ATTRIBUTE_UNUSED,
5795 enum machine_mode mode,
5796 tree type ATTRIBUTE_UNUSED,
5797 bool named ATTRIBUTE_UNUSED)
5799 return mode == TFmode || mode == TCmode;
5802 /* Define how to find the value returned by a function. VALTYPE is the
5803 data type of the value (as a tree). If the precise function being
5804 called is known, FUNC is its FUNCTION_DECL; otherwise, FUNC is 0.
5805 MODE is set instead of VALTYPE for libcalls.
5807 On Alpha the value is found in $0 for integer functions and
5808 $f0 for floating-point functions. */
5811 function_value (tree valtype, tree func ATTRIBUTE_UNUSED,
5812 enum machine_mode mode)
5814 unsigned int regnum, dummy;
5815 enum mode_class class;
5817 gcc_assert (!valtype || !alpha_return_in_memory (valtype, func));
5820 mode = TYPE_MODE (valtype);
5822 class = GET_MODE_CLASS (mode);
5826 PROMOTE_MODE (mode, dummy, valtype);
5829 case MODE_COMPLEX_INT:
5830 case MODE_VECTOR_INT:
5838 case MODE_COMPLEX_FLOAT:
5840 enum machine_mode cmode = GET_MODE_INNER (mode);
5842 return gen_rtx_PARALLEL
5845 gen_rtx_EXPR_LIST (VOIDmode, gen_rtx_REG (cmode, 32),
5847 gen_rtx_EXPR_LIST (VOIDmode, gen_rtx_REG (cmode, 33),
5848 GEN_INT (GET_MODE_SIZE (cmode)))));
5855 return gen_rtx_REG (mode, regnum);
5858 /* TCmode complex values are passed by invisible reference. We
5859 should not split these values. */
5862 alpha_split_complex_arg (tree type)
5864 return TYPE_MODE (type) != TCmode;
5868 alpha_build_builtin_va_list (void)
5870 tree base, ofs, space, record, type_decl;
5872 if (TARGET_ABI_OPEN_VMS || TARGET_ABI_UNICOSMK)
5873 return ptr_type_node;
5875 record = (*lang_hooks.types.make_type) (RECORD_TYPE);
5876 type_decl = build_decl (TYPE_DECL, get_identifier ("__va_list_tag"), record);
5877 TREE_CHAIN (record) = type_decl;
5878 TYPE_NAME (record) = type_decl;
5880 /* C++? SET_IS_AGGR_TYPE (record, 1); */
5882 /* Dummy field to prevent alignment warnings. */
5883 space = build_decl (FIELD_DECL, NULL_TREE, integer_type_node);
5884 DECL_FIELD_CONTEXT (space) = record;
5885 DECL_ARTIFICIAL (space) = 1;
5886 DECL_IGNORED_P (space) = 1;
5888 ofs = build_decl (FIELD_DECL, get_identifier ("__offset"),
5890 DECL_FIELD_CONTEXT (ofs) = record;
5891 TREE_CHAIN (ofs) = space;
5893 base = build_decl (FIELD_DECL, get_identifier ("__base"),
5895 DECL_FIELD_CONTEXT (base) = record;
5896 TREE_CHAIN (base) = ofs;
5898 TYPE_FIELDS (record) = base;
5899 layout_type (record);
5901 va_list_gpr_counter_field = ofs;
5906 /* Helper function for alpha_stdarg_optimize_hook. Skip over casts
5907 and constant additions. */
5910 va_list_skip_additions (tree lhs)
5914 if (TREE_CODE (lhs) != SSA_NAME)
5919 stmt = SSA_NAME_DEF_STMT (lhs);
5921 if (TREE_CODE (stmt) == PHI_NODE)
5924 if (TREE_CODE (stmt) != MODIFY_EXPR
5925 || TREE_OPERAND (stmt, 0) != lhs)
5928 rhs = TREE_OPERAND (stmt, 1);
5929 if (TREE_CODE (rhs) == WITH_SIZE_EXPR)
5930 rhs = TREE_OPERAND (rhs, 0);
5932 if ((TREE_CODE (rhs) != NOP_EXPR
5933 && TREE_CODE (rhs) != CONVERT_EXPR
5934 && (TREE_CODE (rhs) != PLUS_EXPR
5935 || TREE_CODE (TREE_OPERAND (rhs, 1)) != INTEGER_CST
5936 || !host_integerp (TREE_OPERAND (rhs, 1), 1)))
5937 || TREE_CODE (TREE_OPERAND (rhs, 0)) != SSA_NAME)
5940 lhs = TREE_OPERAND (rhs, 0);
5944 /* Check if LHS = RHS statement is
5945 LHS = *(ap.__base + ap.__offset + cst)
5948 + ((ap.__offset + cst <= 47)
5949 ? ap.__offset + cst - 48 : ap.__offset + cst) + cst2).
5950 If the former, indicate that GPR registers are needed,
5951 if the latter, indicate that FPR registers are needed.
5952 On alpha, cfun->va_list_gpr_size is used as size of the needed
5953 regs and cfun->va_list_fpr_size is a bitmask, bit 0 set if
5954 GPR registers are needed and bit 1 set if FPR registers are needed.
5955 Return true if va_list references should not be scanned for the current
5959 alpha_stdarg_optimize_hook (struct stdarg_info *si, tree lhs, tree rhs)
5961 tree base, offset, arg1, arg2;
5964 if (TREE_CODE (rhs) != INDIRECT_REF
5965 || TREE_CODE (TREE_OPERAND (rhs, 0)) != SSA_NAME)
5968 lhs = va_list_skip_additions (TREE_OPERAND (rhs, 0));
5969 if (lhs == NULL_TREE
5970 || TREE_CODE (lhs) != PLUS_EXPR)
5973 base = TREE_OPERAND (lhs, 0);
5974 if (TREE_CODE (base) == SSA_NAME)
5975 base = va_list_skip_additions (base);
5977 if (TREE_CODE (base) != COMPONENT_REF
5978 || TREE_OPERAND (base, 1) != TYPE_FIELDS (va_list_type_node))
5980 base = TREE_OPERAND (lhs, 0);
5981 if (TREE_CODE (base) == SSA_NAME)
5982 base = va_list_skip_additions (base);
5984 if (TREE_CODE (base) != COMPONENT_REF
5985 || TREE_OPERAND (base, 1) != TYPE_FIELDS (va_list_type_node))
5991 base = get_base_address (base);
5992 if (TREE_CODE (base) != VAR_DECL
5993 || !bitmap_bit_p (si->va_list_vars, DECL_UID (base)))
5996 offset = TREE_OPERAND (lhs, offset_arg);
5997 if (TREE_CODE (offset) == SSA_NAME)
5998 offset = va_list_skip_additions (offset);
6000 if (TREE_CODE (offset) == PHI_NODE)
6004 if (PHI_NUM_ARGS (offset) != 2)
6007 arg1 = va_list_skip_additions (PHI_ARG_DEF (offset, 0));
6008 arg2 = va_list_skip_additions (PHI_ARG_DEF (offset, 1));
6009 if (TREE_CODE (arg2) != MINUS_EXPR && TREE_CODE (arg2) != PLUS_EXPR)
6015 if (TREE_CODE (arg2) != MINUS_EXPR && TREE_CODE (arg2) != PLUS_EXPR)
6018 if (!host_integerp (TREE_OPERAND (arg2, 1), 0))
6021 sub = tree_low_cst (TREE_OPERAND (arg2, 1), 0);
6022 if (TREE_CODE (arg2) == MINUS_EXPR)
6024 if (sub < -48 || sub > -32)
6027 arg2 = va_list_skip_additions (TREE_OPERAND (arg2, 0));
6031 if (TREE_CODE (arg1) == SSA_NAME)
6032 arg1 = va_list_skip_additions (arg1);
6034 if (TREE_CODE (arg1) != COMPONENT_REF
6035 || TREE_OPERAND (arg1, 1) != va_list_gpr_counter_field
6036 || get_base_address (arg1) != base)
6039 /* Need floating point regs. */
6040 cfun->va_list_fpr_size |= 2;
6042 else if (TREE_CODE (offset) != COMPONENT_REF
6043 || TREE_OPERAND (offset, 1) != va_list_gpr_counter_field
6044 || get_base_address (offset) != base)
6047 /* Need general regs. */
6048 cfun->va_list_fpr_size |= 1;
6052 si->va_list_escapes = true;
6057 /* Perform any needed actions needed for a function that is receiving a
6058 variable number of arguments. */
6061 alpha_setup_incoming_varargs (CUMULATIVE_ARGS *pcum, enum machine_mode mode,
6062 tree type, int *pretend_size, int no_rtl)
6064 CUMULATIVE_ARGS cum = *pcum;
6066 /* Skip the current argument. */
6067 FUNCTION_ARG_ADVANCE (cum, mode, type, 1);
6069 #if TARGET_ABI_UNICOSMK
6070 /* On Unicos/Mk, the standard subroutine __T3E_MISMATCH stores all register
6071 arguments on the stack. Unfortunately, it doesn't always store the first
6072 one (i.e. the one that arrives in $16 or $f16). This is not a problem
6073 with stdargs as we always have at least one named argument there. */
6074 if (cum.num_reg_words < 6)
6078 emit_insn (gen_umk_mismatch_args (GEN_INT (cum.num_reg_words)));
6079 emit_insn (gen_arg_home_umk ());
6083 #elif TARGET_ABI_OPEN_VMS
6084 /* For VMS, we allocate space for all 6 arg registers plus a count.
6086 However, if NO registers need to be saved, don't allocate any space.
6087 This is not only because we won't need the space, but because AP
6088 includes the current_pretend_args_size and we don't want to mess up
6089 any ap-relative addresses already made. */
6090 if (cum.num_args < 6)
6094 emit_move_insn (gen_rtx_REG (DImode, 1), virtual_incoming_args_rtx);
6095 emit_insn (gen_arg_home ());
6097 *pretend_size = 7 * UNITS_PER_WORD;
6100 /* On OSF/1 and friends, we allocate space for all 12 arg registers, but
6101 only push those that are remaining. However, if NO registers need to
6102 be saved, don't allocate any space. This is not only because we won't
6103 need the space, but because AP includes the current_pretend_args_size
6104 and we don't want to mess up any ap-relative addresses already made.
6106 If we are not to use the floating-point registers, save the integer
6107 registers where we would put the floating-point registers. This is
6108 not the most efficient way to implement varargs with just one register
6109 class, but it isn't worth doing anything more efficient in this rare
6116 int count, set = get_varargs_alias_set ();
6119 count = cfun->va_list_gpr_size / UNITS_PER_WORD;
6120 if (count > 6 - cum)
6123 /* Detect whether integer registers or floating-point registers
6124 are needed by the detected va_arg statements. See above for
6125 how these values are computed. Note that the "escape" value
6126 is VA_LIST_MAX_FPR_SIZE, which is 255, which has both of
6128 gcc_assert ((VA_LIST_MAX_FPR_SIZE & 3) == 3);
6130 if (cfun->va_list_fpr_size & 1)
6132 tmp = gen_rtx_MEM (BLKmode,
6133 plus_constant (virtual_incoming_args_rtx,
6134 (cum + 6) * UNITS_PER_WORD));
6135 MEM_NOTRAP_P (tmp) = 1;
6136 set_mem_alias_set (tmp, set);
6137 move_block_from_reg (16 + cum, tmp, count);
6140 if (cfun->va_list_fpr_size & 2)
6142 tmp = gen_rtx_MEM (BLKmode,
6143 plus_constant (virtual_incoming_args_rtx,
6144 cum * UNITS_PER_WORD));
6145 MEM_NOTRAP_P (tmp) = 1;
6146 set_mem_alias_set (tmp, set);
6147 move_block_from_reg (16 + cum + TARGET_FPREGS*32, tmp, count);
6150 *pretend_size = 12 * UNITS_PER_WORD;
6155 alpha_va_start (tree valist, rtx nextarg ATTRIBUTE_UNUSED)
6157 HOST_WIDE_INT offset;
6158 tree t, offset_field, base_field;
6160 if (TREE_CODE (TREE_TYPE (valist)) == ERROR_MARK)
6163 if (TARGET_ABI_UNICOSMK)
6164 std_expand_builtin_va_start (valist, nextarg);
6166 /* For Unix, TARGET_SETUP_INCOMING_VARARGS moves the starting address base
6167 up by 48, storing fp arg registers in the first 48 bytes, and the
6168 integer arg registers in the next 48 bytes. This is only done,
6169 however, if any integer registers need to be stored.
6171 If no integer registers need be stored, then we must subtract 48
6172 in order to account for the integer arg registers which are counted
6173 in argsize above, but which are not actually stored on the stack.
6174 Must further be careful here about structures straddling the last
6175 integer argument register; that futzes with pretend_args_size,
6176 which changes the meaning of AP. */
6179 offset = TARGET_ABI_OPEN_VMS ? UNITS_PER_WORD : 6 * UNITS_PER_WORD;
6181 offset = -6 * UNITS_PER_WORD + current_function_pretend_args_size;
6183 if (TARGET_ABI_OPEN_VMS)
6185 nextarg = plus_constant (nextarg, offset);
6186 nextarg = plus_constant (nextarg, NUM_ARGS * UNITS_PER_WORD);
6187 t = build2 (MODIFY_EXPR, TREE_TYPE (valist), valist,
6188 make_tree (ptr_type_node, nextarg));
6189 TREE_SIDE_EFFECTS (t) = 1;
6191 expand_expr (t, const0_rtx, VOIDmode, EXPAND_NORMAL);
6195 base_field = TYPE_FIELDS (TREE_TYPE (valist));
6196 offset_field = TREE_CHAIN (base_field);
6198 base_field = build3 (COMPONENT_REF, TREE_TYPE (base_field),
6199 valist, base_field, NULL_TREE);
6200 offset_field = build3 (COMPONENT_REF, TREE_TYPE (offset_field),
6201 valist, offset_field, NULL_TREE);
6203 t = make_tree (ptr_type_node, virtual_incoming_args_rtx);
6204 t = build2 (PLUS_EXPR, ptr_type_node, t,
6205 build_int_cst (NULL_TREE, offset));
6206 t = build2 (MODIFY_EXPR, TREE_TYPE (base_field), base_field, t);
6207 TREE_SIDE_EFFECTS (t) = 1;
6208 expand_expr (t, const0_rtx, VOIDmode, EXPAND_NORMAL);
6210 t = build_int_cst (NULL_TREE, NUM_ARGS * UNITS_PER_WORD);
6211 t = build2 (MODIFY_EXPR, TREE_TYPE (offset_field), offset_field, t);
6212 TREE_SIDE_EFFECTS (t) = 1;
6213 expand_expr (t, const0_rtx, VOIDmode, EXPAND_NORMAL);
6218 alpha_gimplify_va_arg_1 (tree type, tree base, tree offset, tree *pre_p)
6220 tree type_size, ptr_type, addend, t, addr, internal_post;
6222 /* If the type could not be passed in registers, skip the block
6223 reserved for the registers. */
6224 if (targetm.calls.must_pass_in_stack (TYPE_MODE (type), type))
6226 t = build_int_cst (TREE_TYPE (offset), 6*8);
6227 t = build2 (MODIFY_EXPR, TREE_TYPE (offset), offset,
6228 build2 (MAX_EXPR, TREE_TYPE (offset), offset, t));
6229 gimplify_and_add (t, pre_p);
6233 ptr_type = build_pointer_type (type);
6235 if (TREE_CODE (type) == COMPLEX_TYPE)
6237 tree real_part, imag_part, real_temp;
6239 real_part = alpha_gimplify_va_arg_1 (TREE_TYPE (type), base,
6242 /* Copy the value into a new temporary, lest the formal temporary
6243 be reused out from under us. */
6244 real_temp = get_initialized_tmp_var (real_part, pre_p, NULL);
6246 imag_part = alpha_gimplify_va_arg_1 (TREE_TYPE (type), base,
6249 return build2 (COMPLEX_EXPR, type, real_temp, imag_part);
6251 else if (TREE_CODE (type) == REAL_TYPE)
6253 tree fpaddend, cond, fourtyeight;
6255 fourtyeight = build_int_cst (TREE_TYPE (addend), 6*8);
6256 fpaddend = fold_build2 (MINUS_EXPR, TREE_TYPE (addend),
6257 addend, fourtyeight);
6258 cond = fold_build2 (LT_EXPR, boolean_type_node, addend, fourtyeight);
6259 addend = fold_build3 (COND_EXPR, TREE_TYPE (addend), cond,
6263 /* Build the final address and force that value into a temporary. */
6264 addr = build2 (PLUS_EXPR, ptr_type, fold_convert (ptr_type, base),
6265 fold_convert (ptr_type, addend));
6266 internal_post = NULL;
6267 gimplify_expr (&addr, pre_p, &internal_post, is_gimple_val, fb_rvalue);
6268 append_to_statement_list (internal_post, pre_p);
6270 /* Update the offset field. */
6271 type_size = TYPE_SIZE_UNIT (TYPE_MAIN_VARIANT (type));
6272 if (type_size == NULL || TREE_OVERFLOW (type_size))
6276 t = size_binop (PLUS_EXPR, type_size, size_int (7));
6277 t = size_binop (TRUNC_DIV_EXPR, t, size_int (8));
6278 t = size_binop (MULT_EXPR, t, size_int (8));
6280 t = fold_convert (TREE_TYPE (offset), t);
6281 t = build2 (MODIFY_EXPR, void_type_node, offset,
6282 build2 (PLUS_EXPR, TREE_TYPE (offset), offset, t));
6283 gimplify_and_add (t, pre_p);
6285 return build_va_arg_indirect_ref (addr);
6289 alpha_gimplify_va_arg (tree valist, tree type, tree *pre_p, tree *post_p)
6291 tree offset_field, base_field, offset, base, t, r;
6294 if (TARGET_ABI_OPEN_VMS || TARGET_ABI_UNICOSMK)
6295 return std_gimplify_va_arg_expr (valist, type, pre_p, post_p);
6297 base_field = TYPE_FIELDS (va_list_type_node);
6298 offset_field = TREE_CHAIN (base_field);
6299 base_field = build3 (COMPONENT_REF, TREE_TYPE (base_field),
6300 valist, base_field, NULL_TREE);
6301 offset_field = build3 (COMPONENT_REF, TREE_TYPE (offset_field),
6302 valist, offset_field, NULL_TREE);
6304 /* Pull the fields of the structure out into temporaries. Since we never
6305 modify the base field, we can use a formal temporary. Sign-extend the
6306 offset field so that it's the proper width for pointer arithmetic. */
6307 base = get_formal_tmp_var (base_field, pre_p);
6309 t = fold_convert (lang_hooks.types.type_for_size (64, 0), offset_field);
6310 offset = get_initialized_tmp_var (t, pre_p, NULL);
6312 indirect = pass_by_reference (NULL, TYPE_MODE (type), type, false);
6314 type = build_pointer_type (type);
6316 /* Find the value. Note that this will be a stable indirection, or
6317 a composite of stable indirections in the case of complex. */
6318 r = alpha_gimplify_va_arg_1 (type, base, offset, pre_p);
6320 /* Stuff the offset temporary back into its field. */
6321 t = build2 (MODIFY_EXPR, void_type_node, offset_field,
6322 fold_convert (TREE_TYPE (offset_field), offset));
6323 gimplify_and_add (t, pre_p);
6326 r = build_va_arg_indirect_ref (r);
6335 ALPHA_BUILTIN_CMPBGE,
6336 ALPHA_BUILTIN_EXTBL,
6337 ALPHA_BUILTIN_EXTWL,
6338 ALPHA_BUILTIN_EXTLL,
6339 ALPHA_BUILTIN_EXTQL,
6340 ALPHA_BUILTIN_EXTWH,
6341 ALPHA_BUILTIN_EXTLH,
6342 ALPHA_BUILTIN_EXTQH,
6343 ALPHA_BUILTIN_INSBL,
6344 ALPHA_BUILTIN_INSWL,
6345 ALPHA_BUILTIN_INSLL,
6346 ALPHA_BUILTIN_INSQL,
6347 ALPHA_BUILTIN_INSWH,
6348 ALPHA_BUILTIN_INSLH,
6349 ALPHA_BUILTIN_INSQH,
6350 ALPHA_BUILTIN_MSKBL,
6351 ALPHA_BUILTIN_MSKWL,
6352 ALPHA_BUILTIN_MSKLL,
6353 ALPHA_BUILTIN_MSKQL,
6354 ALPHA_BUILTIN_MSKWH,
6355 ALPHA_BUILTIN_MSKLH,
6356 ALPHA_BUILTIN_MSKQH,
6357 ALPHA_BUILTIN_UMULH,
6359 ALPHA_BUILTIN_ZAPNOT,
6360 ALPHA_BUILTIN_AMASK,
6361 ALPHA_BUILTIN_IMPLVER,
6363 ALPHA_BUILTIN_THREAD_POINTER,
6364 ALPHA_BUILTIN_SET_THREAD_POINTER,
6367 ALPHA_BUILTIN_MINUB8,
6368 ALPHA_BUILTIN_MINSB8,
6369 ALPHA_BUILTIN_MINUW4,
6370 ALPHA_BUILTIN_MINSW4,
6371 ALPHA_BUILTIN_MAXUB8,
6372 ALPHA_BUILTIN_MAXSB8,
6373 ALPHA_BUILTIN_MAXUW4,
6374 ALPHA_BUILTIN_MAXSW4,
6378 ALPHA_BUILTIN_UNPKBL,
6379 ALPHA_BUILTIN_UNPKBW,
6384 ALPHA_BUILTIN_CTPOP,
6389 static unsigned int const code_for_builtin[ALPHA_BUILTIN_max] = {
6390 CODE_FOR_builtin_cmpbge,
6391 CODE_FOR_builtin_extbl,
6392 CODE_FOR_builtin_extwl,
6393 CODE_FOR_builtin_extll,
6394 CODE_FOR_builtin_extql,
6395 CODE_FOR_builtin_extwh,
6396 CODE_FOR_builtin_extlh,
6397 CODE_FOR_builtin_extqh,
6398 CODE_FOR_builtin_insbl,
6399 CODE_FOR_builtin_inswl,
6400 CODE_FOR_builtin_insll,
6401 CODE_FOR_builtin_insql,
6402 CODE_FOR_builtin_inswh,
6403 CODE_FOR_builtin_inslh,
6404 CODE_FOR_builtin_insqh,
6405 CODE_FOR_builtin_mskbl,
6406 CODE_FOR_builtin_mskwl,
6407 CODE_FOR_builtin_mskll,
6408 CODE_FOR_builtin_mskql,
6409 CODE_FOR_builtin_mskwh,
6410 CODE_FOR_builtin_msklh,
6411 CODE_FOR_builtin_mskqh,
6412 CODE_FOR_umuldi3_highpart,
6413 CODE_FOR_builtin_zap,
6414 CODE_FOR_builtin_zapnot,
6415 CODE_FOR_builtin_amask,
6416 CODE_FOR_builtin_implver,
6417 CODE_FOR_builtin_rpcc,
6422 CODE_FOR_builtin_minub8,
6423 CODE_FOR_builtin_minsb8,
6424 CODE_FOR_builtin_minuw4,
6425 CODE_FOR_builtin_minsw4,
6426 CODE_FOR_builtin_maxub8,
6427 CODE_FOR_builtin_maxsb8,
6428 CODE_FOR_builtin_maxuw4,
6429 CODE_FOR_builtin_maxsw4,
6430 CODE_FOR_builtin_perr,
6431 CODE_FOR_builtin_pklb,
6432 CODE_FOR_builtin_pkwb,
6433 CODE_FOR_builtin_unpkbl,
6434 CODE_FOR_builtin_unpkbw,
6439 CODE_FOR_popcountdi2
6442 struct alpha_builtin_def
6445 enum alpha_builtin code;
6446 unsigned int target_mask;
6450 static struct alpha_builtin_def const zero_arg_builtins[] = {
6451 { "__builtin_alpha_implver", ALPHA_BUILTIN_IMPLVER, 0, true },
6452 { "__builtin_alpha_rpcc", ALPHA_BUILTIN_RPCC, 0, false }
6455 static struct alpha_builtin_def const one_arg_builtins[] = {
6456 { "__builtin_alpha_amask", ALPHA_BUILTIN_AMASK, 0, true },
6457 { "__builtin_alpha_pklb", ALPHA_BUILTIN_PKLB, MASK_MAX, true },
6458 { "__builtin_alpha_pkwb", ALPHA_BUILTIN_PKWB, MASK_MAX, true },
6459 { "__builtin_alpha_unpkbl", ALPHA_BUILTIN_UNPKBL, MASK_MAX, true },
6460 { "__builtin_alpha_unpkbw", ALPHA_BUILTIN_UNPKBW, MASK_MAX, true },
6461 { "__builtin_alpha_cttz", ALPHA_BUILTIN_CTTZ, MASK_CIX, true },
6462 { "__builtin_alpha_ctlz", ALPHA_BUILTIN_CTLZ, MASK_CIX, true },
6463 { "__builtin_alpha_ctpop", ALPHA_BUILTIN_CTPOP, MASK_CIX, true }
6466 static struct alpha_builtin_def const two_arg_builtins[] = {
6467 { "__builtin_alpha_cmpbge", ALPHA_BUILTIN_CMPBGE, 0, true },
6468 { "__builtin_alpha_extbl", ALPHA_BUILTIN_EXTBL, 0, true },
6469 { "__builtin_alpha_extwl", ALPHA_BUILTIN_EXTWL, 0, true },
6470 { "__builtin_alpha_extll", ALPHA_BUILTIN_EXTLL, 0, true },
6471 { "__builtin_alpha_extql", ALPHA_BUILTIN_EXTQL, 0, true },
6472 { "__builtin_alpha_extwh", ALPHA_BUILTIN_EXTWH, 0, true },
6473 { "__builtin_alpha_extlh", ALPHA_BUILTIN_EXTLH, 0, true },
6474 { "__builtin_alpha_extqh", ALPHA_BUILTIN_EXTQH, 0, true },
6475 { "__builtin_alpha_insbl", ALPHA_BUILTIN_INSBL, 0, true },
6476 { "__builtin_alpha_inswl", ALPHA_BUILTIN_INSWL, 0, true },
6477 { "__builtin_alpha_insll", ALPHA_BUILTIN_INSLL, 0, true },
6478 { "__builtin_alpha_insql", ALPHA_BUILTIN_INSQL, 0, true },
6479 { "__builtin_alpha_inswh", ALPHA_BUILTIN_INSWH, 0, true },
6480 { "__builtin_alpha_inslh", ALPHA_BUILTIN_INSLH, 0, true },
6481 { "__builtin_alpha_insqh", ALPHA_BUILTIN_INSQH, 0, true },
6482 { "__builtin_alpha_mskbl", ALPHA_BUILTIN_MSKBL, 0, true },
6483 { "__builtin_alpha_mskwl", ALPHA_BUILTIN_MSKWL, 0, true },
6484 { "__builtin_alpha_mskll", ALPHA_BUILTIN_MSKLL, 0, true },
6485 { "__builtin_alpha_mskql", ALPHA_BUILTIN_MSKQL, 0, true },
6486 { "__builtin_alpha_mskwh", ALPHA_BUILTIN_MSKWH, 0, true },
6487 { "__builtin_alpha_msklh", ALPHA_BUILTIN_MSKLH, 0, true },
6488 { "__builtin_alpha_mskqh", ALPHA_BUILTIN_MSKQH, 0, true },
6489 { "__builtin_alpha_umulh", ALPHA_BUILTIN_UMULH, 0, true },
6490 { "__builtin_alpha_zap", ALPHA_BUILTIN_ZAP, 0, true },
6491 { "__builtin_alpha_zapnot", ALPHA_BUILTIN_ZAPNOT, 0, true },
6492 { "__builtin_alpha_minub8", ALPHA_BUILTIN_MINUB8, MASK_MAX, true },
6493 { "__builtin_alpha_minsb8", ALPHA_BUILTIN_MINSB8, MASK_MAX, true },
6494 { "__builtin_alpha_minuw4", ALPHA_BUILTIN_MINUW4, MASK_MAX, true },
6495 { "__builtin_alpha_minsw4", ALPHA_BUILTIN_MINSW4, MASK_MAX, true },
6496 { "__builtin_alpha_maxub8", ALPHA_BUILTIN_MAXUB8, MASK_MAX, true },
6497 { "__builtin_alpha_maxsb8", ALPHA_BUILTIN_MAXSB8, MASK_MAX, true },
6498 { "__builtin_alpha_maxuw4", ALPHA_BUILTIN_MAXUW4, MASK_MAX, true },
6499 { "__builtin_alpha_maxsw4", ALPHA_BUILTIN_MAXSW4, MASK_MAX, true },
6500 { "__builtin_alpha_perr", ALPHA_BUILTIN_PERR, MASK_MAX, true }
6503 static GTY(()) tree alpha_v8qi_u;
6504 static GTY(()) tree alpha_v8qi_s;
6505 static GTY(()) tree alpha_v4hi_u;
6506 static GTY(()) tree alpha_v4hi_s;
6509 alpha_init_builtins (void)
6511 const struct alpha_builtin_def *p;
6512 tree dimode_integer_type_node;
6513 tree ftype, attrs[2];
6516 dimode_integer_type_node = lang_hooks.types.type_for_mode (DImode, 0);
6518 attrs[0] = tree_cons (get_identifier ("nothrow"), NULL, NULL);
6519 attrs[1] = tree_cons (get_identifier ("const"), NULL, attrs[0]);
6521 ftype = build_function_type (dimode_integer_type_node, void_list_node);
6523 p = zero_arg_builtins;
6524 for (i = 0; i < ARRAY_SIZE (zero_arg_builtins); ++i, ++p)
6525 if ((target_flags & p->target_mask) == p->target_mask)
6526 lang_hooks.builtin_function (p->name, ftype, p->code, BUILT_IN_MD,
6527 NULL, attrs[p->is_const]);
6529 ftype = build_function_type_list (dimode_integer_type_node,
6530 dimode_integer_type_node, NULL_TREE);
6532 p = one_arg_builtins;
6533 for (i = 0; i < ARRAY_SIZE (one_arg_builtins); ++i, ++p)
6534 if ((target_flags & p->target_mask) == p->target_mask)
6535 lang_hooks.builtin_function (p->name, ftype, p->code, BUILT_IN_MD,
6536 NULL, attrs[p->is_const]);
6538 ftype = build_function_type_list (dimode_integer_type_node,
6539 dimode_integer_type_node,
6540 dimode_integer_type_node, NULL_TREE);
6542 p = two_arg_builtins;
6543 for (i = 0; i < ARRAY_SIZE (two_arg_builtins); ++i, ++p)
6544 if ((target_flags & p->target_mask) == p->target_mask)
6545 lang_hooks.builtin_function (p->name, ftype, p->code, BUILT_IN_MD,
6546 NULL, attrs[p->is_const]);
6548 ftype = build_function_type (ptr_type_node, void_list_node);
6549 lang_hooks.builtin_function ("__builtin_thread_pointer", ftype,
6550 ALPHA_BUILTIN_THREAD_POINTER, BUILT_IN_MD,
6553 ftype = build_function_type_list (void_type_node, ptr_type_node, NULL_TREE);
6554 lang_hooks.builtin_function ("__builtin_set_thread_pointer", ftype,
6555 ALPHA_BUILTIN_SET_THREAD_POINTER, BUILT_IN_MD,
6558 alpha_v8qi_u = build_vector_type (unsigned_intQI_type_node, 8);
6559 alpha_v8qi_s = build_vector_type (intQI_type_node, 8);
6560 alpha_v4hi_u = build_vector_type (unsigned_intHI_type_node, 4);
6561 alpha_v4hi_s = build_vector_type (intHI_type_node, 4);
6564 /* Expand an expression EXP that calls a built-in function,
6565 with result going to TARGET if that's convenient
6566 (and in mode MODE if that's convenient).
6567 SUBTARGET may be used as the target for computing one of EXP's operands.
6568 IGNORE is nonzero if the value is to be ignored. */
6571 alpha_expand_builtin (tree exp, rtx target,
6572 rtx subtarget ATTRIBUTE_UNUSED,
6573 enum machine_mode mode ATTRIBUTE_UNUSED,
6574 int ignore ATTRIBUTE_UNUSED)
6578 tree fndecl = TREE_OPERAND (TREE_OPERAND (exp, 0), 0);
6579 unsigned int fcode = DECL_FUNCTION_CODE (fndecl);
6580 tree arglist = TREE_OPERAND (exp, 1);
6581 enum insn_code icode;
6582 rtx op[MAX_ARGS], pat;
6586 if (fcode >= ALPHA_BUILTIN_max)
6587 internal_error ("bad builtin fcode");
6588 icode = code_for_builtin[fcode];
6590 internal_error ("bad builtin fcode");
6592 nonvoid = TREE_TYPE (TREE_TYPE (fndecl)) != void_type_node;
6594 for (arglist = TREE_OPERAND (exp, 1), arity = 0;
6596 arglist = TREE_CHAIN (arglist), arity++)
6598 const struct insn_operand_data *insn_op;
6600 tree arg = TREE_VALUE (arglist);
6601 if (arg == error_mark_node)
6603 if (arity > MAX_ARGS)
6606 insn_op = &insn_data[icode].operand[arity + nonvoid];
6608 op[arity] = expand_expr (arg, NULL_RTX, insn_op->mode, 0);
6610 if (!(*insn_op->predicate) (op[arity], insn_op->mode))
6611 op[arity] = copy_to_mode_reg (insn_op->mode, op[arity]);
6616 enum machine_mode tmode = insn_data[icode].operand[0].mode;
6618 || GET_MODE (target) != tmode
6619 || !(*insn_data[icode].operand[0].predicate) (target, tmode))
6620 target = gen_reg_rtx (tmode);
6626 pat = GEN_FCN (icode) (target);
6630 pat = GEN_FCN (icode) (target, op[0]);
6632 pat = GEN_FCN (icode) (op[0]);
6635 pat = GEN_FCN (icode) (target, op[0], op[1]);
6651 /* Several bits below assume HWI >= 64 bits. This should be enforced
6653 #if HOST_BITS_PER_WIDE_INT < 64
6654 # error "HOST_WIDE_INT too small"
6657 /* Fold the builtin for the CMPBGE instruction. This is a vector comparison
6658 with an 8 bit output vector. OPINT contains the integer operands; bit N
6659 of OP_CONST is set if OPINT[N] is valid. */
6662 alpha_fold_builtin_cmpbge (unsigned HOST_WIDE_INT opint[], long op_const)
6667 for (i = 0, val = 0; i < 8; ++i)
6669 unsigned HOST_WIDE_INT c0 = (opint[0] >> (i * 8)) & 0xff;
6670 unsigned HOST_WIDE_INT c1 = (opint[1] >> (i * 8)) & 0xff;
6674 return build_int_cst (long_integer_type_node, val);
6676 else if (op_const == 2 && opint[1] == 0)
6677 return build_int_cst (long_integer_type_node, 0xff);
6681 /* Fold the builtin for the ZAPNOT instruction. This is essentially a
6682 specialized form of an AND operation. Other byte manipulation instructions
6683 are defined in terms of this instruction, so this is also used as a
6684 subroutine for other builtins.
6686 OP contains the tree operands; OPINT contains the extracted integer values.
6687 Bit N of OP_CONST it set if OPINT[N] is valid. OP may be null if only
6688 OPINT may be considered. */
6691 alpha_fold_builtin_zapnot (tree *op, unsigned HOST_WIDE_INT opint[],
6696 unsigned HOST_WIDE_INT mask = 0;
6699 for (i = 0; i < 8; ++i)
6700 if ((opint[1] >> i) & 1)
6701 mask |= (unsigned HOST_WIDE_INT)0xff << (i * 8);
6704 return build_int_cst (long_integer_type_node, opint[0] & mask);
6707 return fold (build2 (BIT_AND_EXPR, long_integer_type_node, op[0],
6708 build_int_cst (long_integer_type_node, mask)));
6710 else if ((op_const & 1) && opint[0] == 0)
6711 return build_int_cst (long_integer_type_node, 0);
6715 /* Fold the builtins for the EXT family of instructions. */
6718 alpha_fold_builtin_extxx (tree op[], unsigned HOST_WIDE_INT opint[],
6719 long op_const, unsigned HOST_WIDE_INT bytemask,
6723 tree *zap_op = NULL;
6727 unsigned HOST_WIDE_INT loc;
6730 if (BYTES_BIG_ENDIAN)
6738 unsigned HOST_WIDE_INT temp = opint[0];
6751 opint[1] = bytemask;
6752 return alpha_fold_builtin_zapnot (zap_op, opint, zap_const);
6755 /* Fold the builtins for the INS family of instructions. */
6758 alpha_fold_builtin_insxx (tree op[], unsigned HOST_WIDE_INT opint[],
6759 long op_const, unsigned HOST_WIDE_INT bytemask,
6762 if ((op_const & 1) && opint[0] == 0)
6763 return build_int_cst (long_integer_type_node, 0);
6767 unsigned HOST_WIDE_INT temp, loc, byteloc;
6768 tree *zap_op = NULL;
6771 if (BYTES_BIG_ENDIAN)
6778 byteloc = (64 - (loc * 8)) & 0x3f;
6795 opint[1] = bytemask;
6796 return alpha_fold_builtin_zapnot (zap_op, opint, op_const);
6803 alpha_fold_builtin_mskxx (tree op[], unsigned HOST_WIDE_INT opint[],
6804 long op_const, unsigned HOST_WIDE_INT bytemask,
6809 unsigned HOST_WIDE_INT loc;
6812 if (BYTES_BIG_ENDIAN)
6819 opint[1] = bytemask ^ 0xff;
6822 return alpha_fold_builtin_zapnot (op, opint, op_const);
6826 alpha_fold_builtin_umulh (unsigned HOST_WIDE_INT opint[], long op_const)
6832 unsigned HOST_WIDE_INT l;
6835 mul_double (opint[0], 0, opint[1], 0, &l, &h);
6837 #if HOST_BITS_PER_WIDE_INT > 64
6841 return build_int_cst (long_integer_type_node, h);
6845 opint[1] = opint[0];
6848 /* Note that (X*1) >> 64 == 0. */
6849 if (opint[1] == 0 || opint[1] == 1)
6850 return build_int_cst (long_integer_type_node, 0);
6857 alpha_fold_vector_minmax (enum tree_code code, tree op[], tree vtype)
6859 tree op0 = fold_convert (vtype, op[0]);
6860 tree op1 = fold_convert (vtype, op[1]);
6861 tree val = fold (build2 (code, vtype, op0, op1));
6862 return fold_convert (long_integer_type_node, val);
6866 alpha_fold_builtin_perr (unsigned HOST_WIDE_INT opint[], long op_const)
6868 unsigned HOST_WIDE_INT temp = 0;
6874 for (i = 0; i < 8; ++i)
6876 unsigned HOST_WIDE_INT a = (opint[0] >> (i * 8)) & 0xff;
6877 unsigned HOST_WIDE_INT b = (opint[1] >> (i * 8)) & 0xff;
6884 return build_int_cst (long_integer_type_node, temp);
6888 alpha_fold_builtin_pklb (unsigned HOST_WIDE_INT opint[], long op_const)
6890 unsigned HOST_WIDE_INT temp;
6895 temp = opint[0] & 0xff;
6896 temp |= (opint[0] >> 24) & 0xff00;
6898 return build_int_cst (long_integer_type_node, temp);
6902 alpha_fold_builtin_pkwb (unsigned HOST_WIDE_INT opint[], long op_const)
6904 unsigned HOST_WIDE_INT temp;
6909 temp = opint[0] & 0xff;
6910 temp |= (opint[0] >> 8) & 0xff00;
6911 temp |= (opint[0] >> 16) & 0xff0000;
6912 temp |= (opint[0] >> 24) & 0xff000000;
6914 return build_int_cst (long_integer_type_node, temp);
6918 alpha_fold_builtin_unpkbl (unsigned HOST_WIDE_INT opint[], long op_const)
6920 unsigned HOST_WIDE_INT temp;
6925 temp = opint[0] & 0xff;
6926 temp |= (opint[0] & 0xff00) << 24;
6928 return build_int_cst (long_integer_type_node, temp);
6932 alpha_fold_builtin_unpkbw (unsigned HOST_WIDE_INT opint[], long op_const)
6934 unsigned HOST_WIDE_INT temp;
6939 temp = opint[0] & 0xff;
6940 temp |= (opint[0] & 0x0000ff00) << 8;
6941 temp |= (opint[0] & 0x00ff0000) << 16;
6942 temp |= (opint[0] & 0xff000000) << 24;
6944 return build_int_cst (long_integer_type_node, temp);
6948 alpha_fold_builtin_cttz (unsigned HOST_WIDE_INT opint[], long op_const)
6950 unsigned HOST_WIDE_INT temp;
6958 temp = exact_log2 (opint[0] & -opint[0]);
6960 return build_int_cst (long_integer_type_node, temp);
6964 alpha_fold_builtin_ctlz (unsigned HOST_WIDE_INT opint[], long op_const)
6966 unsigned HOST_WIDE_INT temp;
6974 temp = 64 - floor_log2 (opint[0]) - 1;
6976 return build_int_cst (long_integer_type_node, temp);
6980 alpha_fold_builtin_ctpop (unsigned HOST_WIDE_INT opint[], long op_const)
6982 unsigned HOST_WIDE_INT temp, op;
6990 temp++, op &= op - 1;
6992 return build_int_cst (long_integer_type_node, temp);
6995 /* Fold one of our builtin functions. */
6998 alpha_fold_builtin (tree fndecl, tree arglist, bool ignore ATTRIBUTE_UNUSED)
7000 tree op[MAX_ARGS], t;
7001 unsigned HOST_WIDE_INT opint[MAX_ARGS];
7002 long op_const = 0, arity = 0;
7004 for (t = arglist; t ; t = TREE_CHAIN (t), ++arity)
7006 tree arg = TREE_VALUE (t);
7007 if (arg == error_mark_node)
7009 if (arity >= MAX_ARGS)
7014 if (TREE_CODE (arg) == INTEGER_CST)
7016 op_const |= 1L << arity;
7017 opint[arity] = int_cst_value (arg);
7021 switch (DECL_FUNCTION_CODE (fndecl))
7023 case ALPHA_BUILTIN_CMPBGE:
7024 return alpha_fold_builtin_cmpbge (opint, op_const);
7026 case ALPHA_BUILTIN_EXTBL:
7027 return alpha_fold_builtin_extxx (op, opint, op_const, 0x01, false);
7028 case ALPHA_BUILTIN_EXTWL:
7029 return alpha_fold_builtin_extxx (op, opint, op_const, 0x03, false);
7030 case ALPHA_BUILTIN_EXTLL:
7031 return alpha_fold_builtin_extxx (op, opint, op_const, 0x0f, false);
7032 case ALPHA_BUILTIN_EXTQL:
7033 return alpha_fold_builtin_extxx (op, opint, op_const, 0xff, false);
7034 case ALPHA_BUILTIN_EXTWH:
7035 return alpha_fold_builtin_extxx (op, opint, op_const, 0x03, true);
7036 case ALPHA_BUILTIN_EXTLH:
7037 return alpha_fold_builtin_extxx (op, opint, op_const, 0x0f, true);
7038 case ALPHA_BUILTIN_EXTQH:
7039 return alpha_fold_builtin_extxx (op, opint, op_const, 0xff, true);
7041 case ALPHA_BUILTIN_INSBL:
7042 return alpha_fold_builtin_insxx (op, opint, op_const, 0x01, false);
7043 case ALPHA_BUILTIN_INSWL:
7044 return alpha_fold_builtin_insxx (op, opint, op_const, 0x03, false);
7045 case ALPHA_BUILTIN_INSLL:
7046 return alpha_fold_builtin_insxx (op, opint, op_const, 0x0f, false);
7047 case ALPHA_BUILTIN_INSQL:
7048 return alpha_fold_builtin_insxx (op, opint, op_const, 0xff, false);
7049 case ALPHA_BUILTIN_INSWH:
7050 return alpha_fold_builtin_insxx (op, opint, op_const, 0x03, true);
7051 case ALPHA_BUILTIN_INSLH:
7052 return alpha_fold_builtin_insxx (op, opint, op_const, 0x0f, true);
7053 case ALPHA_BUILTIN_INSQH:
7054 return alpha_fold_builtin_insxx (op, opint, op_const, 0xff, true);
7056 case ALPHA_BUILTIN_MSKBL:
7057 return alpha_fold_builtin_mskxx (op, opint, op_const, 0x01, false);
7058 case ALPHA_BUILTIN_MSKWL:
7059 return alpha_fold_builtin_mskxx (op, opint, op_const, 0x03, false);
7060 case ALPHA_BUILTIN_MSKLL:
7061 return alpha_fold_builtin_mskxx (op, opint, op_const, 0x0f, false);
7062 case ALPHA_BUILTIN_MSKQL:
7063 return alpha_fold_builtin_mskxx (op, opint, op_const, 0xff, false);
7064 case ALPHA_BUILTIN_MSKWH:
7065 return alpha_fold_builtin_mskxx (op, opint, op_const, 0x03, true);
7066 case ALPHA_BUILTIN_MSKLH:
7067 return alpha_fold_builtin_mskxx (op, opint, op_const, 0x0f, true);
7068 case ALPHA_BUILTIN_MSKQH:
7069 return alpha_fold_builtin_mskxx (op, opint, op_const, 0xff, true);
7071 case ALPHA_BUILTIN_UMULH:
7072 return alpha_fold_builtin_umulh (opint, op_const);
7074 case ALPHA_BUILTIN_ZAP:
7077 case ALPHA_BUILTIN_ZAPNOT:
7078 return alpha_fold_builtin_zapnot (op, opint, op_const);
7080 case ALPHA_BUILTIN_MINUB8:
7081 return alpha_fold_vector_minmax (MIN_EXPR, op, alpha_v8qi_u);
7082 case ALPHA_BUILTIN_MINSB8:
7083 return alpha_fold_vector_minmax (MIN_EXPR, op, alpha_v8qi_s);
7084 case ALPHA_BUILTIN_MINUW4:
7085 return alpha_fold_vector_minmax (MIN_EXPR, op, alpha_v4hi_u);
7086 case ALPHA_BUILTIN_MINSW4:
7087 return alpha_fold_vector_minmax (MIN_EXPR, op, alpha_v4hi_s);
7088 case ALPHA_BUILTIN_MAXUB8:
7089 return alpha_fold_vector_minmax (MAX_EXPR, op, alpha_v8qi_u);
7090 case ALPHA_BUILTIN_MAXSB8:
7091 return alpha_fold_vector_minmax (MAX_EXPR, op, alpha_v8qi_s);
7092 case ALPHA_BUILTIN_MAXUW4:
7093 return alpha_fold_vector_minmax (MAX_EXPR, op, alpha_v4hi_u);
7094 case ALPHA_BUILTIN_MAXSW4:
7095 return alpha_fold_vector_minmax (MAX_EXPR, op, alpha_v4hi_s);
7097 case ALPHA_BUILTIN_PERR:
7098 return alpha_fold_builtin_perr (opint, op_const);
7099 case ALPHA_BUILTIN_PKLB:
7100 return alpha_fold_builtin_pklb (opint, op_const);
7101 case ALPHA_BUILTIN_PKWB:
7102 return alpha_fold_builtin_pkwb (opint, op_const);
7103 case ALPHA_BUILTIN_UNPKBL:
7104 return alpha_fold_builtin_unpkbl (opint, op_const);
7105 case ALPHA_BUILTIN_UNPKBW:
7106 return alpha_fold_builtin_unpkbw (opint, op_const);
7108 case ALPHA_BUILTIN_CTTZ:
7109 return alpha_fold_builtin_cttz (opint, op_const);
7110 case ALPHA_BUILTIN_CTLZ:
7111 return alpha_fold_builtin_ctlz (opint, op_const);
7112 case ALPHA_BUILTIN_CTPOP:
7113 return alpha_fold_builtin_ctpop (opint, op_const);
7115 case ALPHA_BUILTIN_AMASK:
7116 case ALPHA_BUILTIN_IMPLVER:
7117 case ALPHA_BUILTIN_RPCC:
7118 case ALPHA_BUILTIN_THREAD_POINTER:
7119 case ALPHA_BUILTIN_SET_THREAD_POINTER:
7120 /* None of these are foldable at compile-time. */
7126 /* This page contains routines that are used to determine what the function
7127 prologue and epilogue code will do and write them out. */
7129 /* Compute the size of the save area in the stack. */
7131 /* These variables are used for communication between the following functions.
7132 They indicate various things about the current function being compiled
7133 that are used to tell what kind of prologue, epilogue and procedure
7134 descriptor to generate. */
7136 /* Nonzero if we need a stack procedure. */
7137 enum alpha_procedure_types {PT_NULL = 0, PT_REGISTER = 1, PT_STACK = 2};
7138 static enum alpha_procedure_types alpha_procedure_type;
7140 /* Register number (either FP or SP) that is used to unwind the frame. */
7141 static int vms_unwind_regno;
7143 /* Register number used to save FP. We need not have one for RA since
7144 we don't modify it for register procedures. This is only defined
7145 for register frame procedures. */
7146 static int vms_save_fp_regno;
7148 /* Register number used to reference objects off our PV. */
7149 static int vms_base_regno;
7151 /* Compute register masks for saved registers. */
7154 alpha_sa_mask (unsigned long *imaskP, unsigned long *fmaskP)
7156 unsigned long imask = 0;
7157 unsigned long fmask = 0;
7160 /* When outputting a thunk, we don't have valid register life info,
7161 but assemble_start_function wants to output .frame and .mask
7163 if (current_function_is_thunk)
7170 if (TARGET_ABI_OPEN_VMS && alpha_procedure_type == PT_STACK)
7171 imask |= (1UL << HARD_FRAME_POINTER_REGNUM);
7173 /* One for every register we have to save. */
7174 for (i = 0; i < FIRST_PSEUDO_REGISTER; i++)
7175 if (! fixed_regs[i] && ! call_used_regs[i]
7176 && regs_ever_live[i] && i != REG_RA
7177 && (!TARGET_ABI_UNICOSMK || i != HARD_FRAME_POINTER_REGNUM))
7180 imask |= (1UL << i);
7182 fmask |= (1UL << (i - 32));
7185 /* We need to restore these for the handler. */
7186 if (current_function_calls_eh_return)
7190 unsigned regno = EH_RETURN_DATA_REGNO (i);
7191 if (regno == INVALID_REGNUM)
7193 imask |= 1UL << regno;
7197 /* If any register spilled, then spill the return address also. */
7198 /* ??? This is required by the Digital stack unwind specification
7199 and isn't needed if we're doing Dwarf2 unwinding. */
7200 if (imask || fmask || alpha_ra_ever_killed ())
7201 imask |= (1UL << REG_RA);
7208 alpha_sa_size (void)
7210 unsigned long mask[2];
7214 alpha_sa_mask (&mask[0], &mask[1]);
7216 if (TARGET_ABI_UNICOSMK)
7218 if (mask[0] || mask[1])
7223 for (j = 0; j < 2; ++j)
7224 for (i = 0; i < 32; ++i)
7225 if ((mask[j] >> i) & 1)
7229 if (TARGET_ABI_UNICOSMK)
7231 /* We might not need to generate a frame if we don't make any calls
7232 (including calls to __T3E_MISMATCH if this is a vararg function),
7233 don't have any local variables which require stack slots, don't
7234 use alloca and have not determined that we need a frame for other
7237 alpha_procedure_type
7238 = (sa_size || get_frame_size() != 0
7239 || current_function_outgoing_args_size
7240 || current_function_stdarg || current_function_calls_alloca
7241 || frame_pointer_needed)
7242 ? PT_STACK : PT_REGISTER;
7244 /* Always reserve space for saving callee-saved registers if we
7245 need a frame as required by the calling convention. */
7246 if (alpha_procedure_type == PT_STACK)
7249 else if (TARGET_ABI_OPEN_VMS)
7251 /* Start by assuming we can use a register procedure if we don't
7252 make any calls (REG_RA not used) or need to save any
7253 registers and a stack procedure if we do. */
7254 if ((mask[0] >> REG_RA) & 1)
7255 alpha_procedure_type = PT_STACK;
7256 else if (get_frame_size() != 0)
7257 alpha_procedure_type = PT_REGISTER;
7259 alpha_procedure_type = PT_NULL;
7261 /* Don't reserve space for saving FP & RA yet. Do that later after we've
7262 made the final decision on stack procedure vs register procedure. */
7263 if (alpha_procedure_type == PT_STACK)
7266 /* Decide whether to refer to objects off our PV via FP or PV.
7267 If we need FP for something else or if we receive a nonlocal
7268 goto (which expects PV to contain the value), we must use PV.
7269 Otherwise, start by assuming we can use FP. */
7272 = (frame_pointer_needed
7273 || current_function_has_nonlocal_label
7274 || alpha_procedure_type == PT_STACK
7275 || current_function_outgoing_args_size)
7276 ? REG_PV : HARD_FRAME_POINTER_REGNUM;
7278 /* If we want to copy PV into FP, we need to find some register
7279 in which to save FP. */
7281 vms_save_fp_regno = -1;
7282 if (vms_base_regno == HARD_FRAME_POINTER_REGNUM)
7283 for (i = 0; i < 32; i++)
7284 if (! fixed_regs[i] && call_used_regs[i] && ! regs_ever_live[i])
7285 vms_save_fp_regno = i;
7287 if (vms_save_fp_regno == -1 && alpha_procedure_type == PT_REGISTER)
7288 vms_base_regno = REG_PV, alpha_procedure_type = PT_STACK;
7289 else if (alpha_procedure_type == PT_NULL)
7290 vms_base_regno = REG_PV;
7292 /* Stack unwinding should be done via FP unless we use it for PV. */
7293 vms_unwind_regno = (vms_base_regno == REG_PV
7294 ? HARD_FRAME_POINTER_REGNUM : STACK_POINTER_REGNUM);
7296 /* If this is a stack procedure, allow space for saving FP and RA. */
7297 if (alpha_procedure_type == PT_STACK)
7302 /* Our size must be even (multiple of 16 bytes). */
7310 /* Define the offset between two registers, one to be eliminated,
7311 and the other its replacement, at the start of a routine. */
7314 alpha_initial_elimination_offset (unsigned int from,
7315 unsigned int to ATTRIBUTE_UNUSED)
7319 ret = alpha_sa_size ();
7320 ret += ALPHA_ROUND (current_function_outgoing_args_size);
7324 case FRAME_POINTER_REGNUM:
7327 case ARG_POINTER_REGNUM:
7328 ret += (ALPHA_ROUND (get_frame_size ()
7329 + current_function_pretend_args_size)
7330 - current_function_pretend_args_size);
7341 alpha_pv_save_size (void)
7344 return alpha_procedure_type == PT_STACK ? 8 : 0;
7348 alpha_using_fp (void)
7351 return vms_unwind_regno == HARD_FRAME_POINTER_REGNUM;
7354 #if TARGET_ABI_OPEN_VMS
7356 const struct attribute_spec vms_attribute_table[] =
7358 /* { name, min_len, max_len, decl_req, type_req, fn_type_req, handler } */
7359 { "overlaid", 0, 0, true, false, false, NULL },
7360 { "global", 0, 0, true, false, false, NULL },
7361 { "initialize", 0, 0, true, false, false, NULL },
7362 { NULL, 0, 0, false, false, false, NULL }
7368 find_lo_sum_using_gp (rtx *px, void *data ATTRIBUTE_UNUSED)
7370 return GET_CODE (*px) == LO_SUM && XEXP (*px, 0) == pic_offset_table_rtx;
7374 alpha_find_lo_sum_using_gp (rtx insn)
7376 return for_each_rtx (&PATTERN (insn), find_lo_sum_using_gp, NULL) > 0;
7380 alpha_does_function_need_gp (void)
7384 /* The GP being variable is an OSF abi thing. */
7385 if (! TARGET_ABI_OSF)
7388 /* We need the gp to load the address of __mcount. */
7389 if (TARGET_PROFILING_NEEDS_GP && current_function_profile)
7392 /* The code emitted by alpha_output_mi_thunk_osf uses the gp. */
7393 if (current_function_is_thunk)
7396 /* The nonlocal receiver pattern assumes that the gp is valid for
7397 the nested function. Reasonable because it's almost always set
7398 correctly already. For the cases where that's wrong, make sure
7399 the nested function loads its gp on entry. */
7400 if (current_function_has_nonlocal_goto)
7403 /* If we need a GP (we have a LDSYM insn or a CALL_INSN), load it first.
7404 Even if we are a static function, we still need to do this in case
7405 our address is taken and passed to something like qsort. */
7407 push_topmost_sequence ();
7408 insn = get_insns ();
7409 pop_topmost_sequence ();
7411 for (; insn; insn = NEXT_INSN (insn))
7413 && ! JUMP_TABLE_DATA_P (insn)
7414 && GET_CODE (PATTERN (insn)) != USE
7415 && GET_CODE (PATTERN (insn)) != CLOBBER
7416 && get_attr_usegp (insn))
7423 /* Helper function to set RTX_FRAME_RELATED_P on instructions, including
7427 set_frame_related_p (void)
7429 rtx seq = get_insns ();
7440 while (insn != NULL_RTX)
7442 RTX_FRAME_RELATED_P (insn) = 1;
7443 insn = NEXT_INSN (insn);
7445 seq = emit_insn (seq);
7449 seq = emit_insn (seq);
7450 RTX_FRAME_RELATED_P (seq) = 1;
7455 #define FRP(exp) (start_sequence (), exp, set_frame_related_p ())
7457 /* Generates a store with the proper unwind info attached. VALUE is
7458 stored at BASE_REG+BASE_OFS. If FRAME_BIAS is nonzero, then BASE_REG
7459 contains SP+FRAME_BIAS, and that is the unwind info that should be
7460 generated. If FRAME_REG != VALUE, then VALUE is being stored on
7461 behalf of FRAME_REG, and FRAME_REG should be present in the unwind. */
7464 emit_frame_store_1 (rtx value, rtx base_reg, HOST_WIDE_INT frame_bias,
7465 HOST_WIDE_INT base_ofs, rtx frame_reg)
7467 rtx addr, mem, insn;
7469 addr = plus_constant (base_reg, base_ofs);
7470 mem = gen_rtx_MEM (DImode, addr);
7471 set_mem_alias_set (mem, alpha_sr_alias_set);
7473 insn = emit_move_insn (mem, value);
7474 RTX_FRAME_RELATED_P (insn) = 1;
7476 if (frame_bias || value != frame_reg)
7480 addr = plus_constant (stack_pointer_rtx, frame_bias + base_ofs);
7481 mem = gen_rtx_MEM (DImode, addr);
7485 = gen_rtx_EXPR_LIST (REG_FRAME_RELATED_EXPR,
7486 gen_rtx_SET (VOIDmode, mem, frame_reg),
7492 emit_frame_store (unsigned int regno, rtx base_reg,
7493 HOST_WIDE_INT frame_bias, HOST_WIDE_INT base_ofs)
7495 rtx reg = gen_rtx_REG (DImode, regno);
7496 emit_frame_store_1 (reg, base_reg, frame_bias, base_ofs, reg);
7499 /* Write function prologue. */
7501 /* On vms we have two kinds of functions:
7503 - stack frame (PROC_STACK)
7504 these are 'normal' functions with local vars and which are
7505 calling other functions
7506 - register frame (PROC_REGISTER)
7507 keeps all data in registers, needs no stack
7509 We must pass this to the assembler so it can generate the
7510 proper pdsc (procedure descriptor)
7511 This is done with the '.pdesc' command.
7513 On not-vms, we don't really differentiate between the two, as we can
7514 simply allocate stack without saving registers. */
7517 alpha_expand_prologue (void)
7519 /* Registers to save. */
7520 unsigned long imask = 0;
7521 unsigned long fmask = 0;
7522 /* Stack space needed for pushing registers clobbered by us. */
7523 HOST_WIDE_INT sa_size;
7524 /* Complete stack size needed. */
7525 HOST_WIDE_INT frame_size;
7526 /* Offset from base reg to register save area. */
7527 HOST_WIDE_INT reg_offset;
7531 sa_size = alpha_sa_size ();
7533 frame_size = get_frame_size ();
7534 if (TARGET_ABI_OPEN_VMS)
7535 frame_size = ALPHA_ROUND (sa_size
7536 + (alpha_procedure_type == PT_STACK ? 8 : 0)
7538 + current_function_pretend_args_size);
7539 else if (TARGET_ABI_UNICOSMK)
7540 /* We have to allocate space for the DSIB if we generate a frame. */
7541 frame_size = ALPHA_ROUND (sa_size
7542 + (alpha_procedure_type == PT_STACK ? 48 : 0))
7543 + ALPHA_ROUND (frame_size
7544 + current_function_outgoing_args_size);
7546 frame_size = (ALPHA_ROUND (current_function_outgoing_args_size)
7548 + ALPHA_ROUND (frame_size
7549 + current_function_pretend_args_size));
7551 if (TARGET_ABI_OPEN_VMS)
7554 reg_offset = ALPHA_ROUND (current_function_outgoing_args_size);
7556 alpha_sa_mask (&imask, &fmask);
7558 /* Emit an insn to reload GP, if needed. */
7561 alpha_function_needs_gp = alpha_does_function_need_gp ();
7562 if (alpha_function_needs_gp)
7563 emit_insn (gen_prologue_ldgp ());
7566 /* TARGET_PROFILING_NEEDS_GP actually implies that we need to insert
7567 the call to mcount ourselves, rather than having the linker do it
7568 magically in response to -pg. Since _mcount has special linkage,
7569 don't represent the call as a call. */
7570 if (TARGET_PROFILING_NEEDS_GP && current_function_profile)
7571 emit_insn (gen_prologue_mcount ());
7573 if (TARGET_ABI_UNICOSMK)
7574 unicosmk_gen_dsib (&imask);
7576 /* Adjust the stack by the frame size. If the frame size is > 4096
7577 bytes, we need to be sure we probe somewhere in the first and last
7578 4096 bytes (we can probably get away without the latter test) and
7579 every 8192 bytes in between. If the frame size is > 32768, we
7580 do this in a loop. Otherwise, we generate the explicit probe
7583 Note that we are only allowed to adjust sp once in the prologue. */
7585 if (frame_size <= 32768)
7587 if (frame_size > 4096)
7591 for (probed = 4096; probed < frame_size; probed += 8192)
7592 emit_insn (gen_probe_stack (GEN_INT (TARGET_ABI_UNICOSMK
7596 /* We only have to do this probe if we aren't saving registers. */
7597 if (sa_size == 0 && frame_size > probed - 4096)
7598 emit_insn (gen_probe_stack (GEN_INT (-frame_size)));
7601 if (frame_size != 0)
7602 FRP (emit_insn (gen_adddi3 (stack_pointer_rtx, stack_pointer_rtx,
7603 GEN_INT (TARGET_ABI_UNICOSMK
7609 /* Here we generate code to set R22 to SP + 4096 and set R23 to the
7610 number of 8192 byte blocks to probe. We then probe each block
7611 in the loop and then set SP to the proper location. If the
7612 amount remaining is > 4096, we have to do one more probe if we
7613 are not saving any registers. */
7615 HOST_WIDE_INT blocks = (frame_size + 4096) / 8192;
7616 HOST_WIDE_INT leftover = frame_size + 4096 - blocks * 8192;
7617 rtx ptr = gen_rtx_REG (DImode, 22);
7618 rtx count = gen_rtx_REG (DImode, 23);
7621 emit_move_insn (count, GEN_INT (blocks));
7622 emit_insn (gen_adddi3 (ptr, stack_pointer_rtx,
7623 GEN_INT (TARGET_ABI_UNICOSMK ? 4096 - 64 : 4096)));
7625 /* Because of the difficulty in emitting a new basic block this
7626 late in the compilation, generate the loop as a single insn. */
7627 emit_insn (gen_prologue_stack_probe_loop (count, ptr));
7629 if (leftover > 4096 && sa_size == 0)
7631 rtx last = gen_rtx_MEM (DImode, plus_constant (ptr, -leftover));
7632 MEM_VOLATILE_P (last) = 1;
7633 emit_move_insn (last, const0_rtx);
7636 if (TARGET_ABI_WINDOWS_NT)
7638 /* For NT stack unwind (done by 'reverse execution'), it's
7639 not OK to take the result of a loop, even though the value
7640 is already in ptr, so we reload it via a single operation
7641 and subtract it to sp.
7643 Yes, that's correct -- we have to reload the whole constant
7644 into a temporary via ldah+lda then subtract from sp. */
7646 HOST_WIDE_INT lo, hi;
7647 lo = ((frame_size & 0xffff) ^ 0x8000) - 0x8000;
7648 hi = frame_size - lo;
7650 emit_move_insn (ptr, GEN_INT (hi));
7651 emit_insn (gen_adddi3 (ptr, ptr, GEN_INT (lo)));
7652 seq = emit_insn (gen_subdi3 (stack_pointer_rtx, stack_pointer_rtx,
7657 seq = emit_insn (gen_adddi3 (stack_pointer_rtx, ptr,
7658 GEN_INT (-leftover)));
7661 /* This alternative is special, because the DWARF code cannot
7662 possibly intuit through the loop above. So we invent this
7663 note it looks at instead. */
7664 RTX_FRAME_RELATED_P (seq) = 1;
7666 = gen_rtx_EXPR_LIST (REG_FRAME_RELATED_EXPR,
7667 gen_rtx_SET (VOIDmode, stack_pointer_rtx,
7668 gen_rtx_PLUS (Pmode, stack_pointer_rtx,
7669 GEN_INT (TARGET_ABI_UNICOSMK
7675 if (!TARGET_ABI_UNICOSMK)
7677 HOST_WIDE_INT sa_bias = 0;
7679 /* Cope with very large offsets to the register save area. */
7680 sa_reg = stack_pointer_rtx;
7681 if (reg_offset + sa_size > 0x8000)
7683 int low = ((reg_offset & 0xffff) ^ 0x8000) - 0x8000;
7686 if (low + sa_size <= 0x8000)
7687 sa_bias = reg_offset - low, reg_offset = low;
7689 sa_bias = reg_offset, reg_offset = 0;
7691 sa_reg = gen_rtx_REG (DImode, 24);
7692 sa_bias_rtx = GEN_INT (sa_bias);
7694 if (add_operand (sa_bias_rtx, DImode))
7695 emit_insn (gen_adddi3 (sa_reg, stack_pointer_rtx, sa_bias_rtx));
7698 emit_move_insn (sa_reg, sa_bias_rtx);
7699 emit_insn (gen_adddi3 (sa_reg, stack_pointer_rtx, sa_reg));
7703 /* Save regs in stack order. Beginning with VMS PV. */
7704 if (TARGET_ABI_OPEN_VMS && alpha_procedure_type == PT_STACK)
7705 emit_frame_store (REG_PV, stack_pointer_rtx, 0, 0);
7707 /* Save register RA next. */
7708 if (imask & (1UL << REG_RA))
7710 emit_frame_store (REG_RA, sa_reg, sa_bias, reg_offset);
7711 imask &= ~(1UL << REG_RA);
7715 /* Now save any other registers required to be saved. */
7716 for (i = 0; i < 31; i++)
7717 if (imask & (1UL << i))
7719 emit_frame_store (i, sa_reg, sa_bias, reg_offset);
7723 for (i = 0; i < 31; i++)
7724 if (fmask & (1UL << i))
7726 emit_frame_store (i+32, sa_reg, sa_bias, reg_offset);
7730 else if (TARGET_ABI_UNICOSMK && alpha_procedure_type == PT_STACK)
7732 /* The standard frame on the T3E includes space for saving registers.
7733 We just have to use it. We don't have to save the return address and
7734 the old frame pointer here - they are saved in the DSIB. */
7737 for (i = 9; i < 15; i++)
7738 if (imask & (1UL << i))
7740 emit_frame_store (i, hard_frame_pointer_rtx, 0, reg_offset);
7743 for (i = 2; i < 10; i++)
7744 if (fmask & (1UL << i))
7746 emit_frame_store (i+32, hard_frame_pointer_rtx, 0, reg_offset);
7751 if (TARGET_ABI_OPEN_VMS)
7753 if (alpha_procedure_type == PT_REGISTER)
7754 /* Register frame procedures save the fp.
7755 ?? Ought to have a dwarf2 save for this. */
7756 emit_move_insn (gen_rtx_REG (DImode, vms_save_fp_regno),
7757 hard_frame_pointer_rtx);
7759 if (alpha_procedure_type != PT_NULL && vms_base_regno != REG_PV)
7760 emit_insn (gen_force_movdi (gen_rtx_REG (DImode, vms_base_regno),
7761 gen_rtx_REG (DImode, REG_PV)));
7763 if (alpha_procedure_type != PT_NULL
7764 && vms_unwind_regno == HARD_FRAME_POINTER_REGNUM)
7765 FRP (emit_move_insn (hard_frame_pointer_rtx, stack_pointer_rtx));
7767 /* If we have to allocate space for outgoing args, do it now. */
7768 if (current_function_outgoing_args_size != 0)
7771 = emit_move_insn (stack_pointer_rtx,
7773 (hard_frame_pointer_rtx,
7775 (current_function_outgoing_args_size))));
7777 /* Only set FRAME_RELATED_P on the stack adjustment we just emitted
7778 if ! frame_pointer_needed. Setting the bit will change the CFA
7779 computation rule to use sp again, which would be wrong if we had
7780 frame_pointer_needed, as this means sp might move unpredictably
7784 frame_pointer_needed
7785 => vms_unwind_regno == HARD_FRAME_POINTER_REGNUM
7787 current_function_outgoing_args_size != 0
7788 => alpha_procedure_type != PT_NULL,
7790 so when we are not setting the bit here, we are guaranteed to
7791 have emitted an FRP frame pointer update just before. */
7792 RTX_FRAME_RELATED_P (seq) = ! frame_pointer_needed;
7795 else if (!TARGET_ABI_UNICOSMK)
7797 /* If we need a frame pointer, set it from the stack pointer. */
7798 if (frame_pointer_needed)
7800 if (TARGET_CAN_FAULT_IN_PROLOGUE)
7801 FRP (emit_move_insn (hard_frame_pointer_rtx, stack_pointer_rtx));
7803 /* This must always be the last instruction in the
7804 prologue, thus we emit a special move + clobber. */
7805 FRP (emit_insn (gen_init_fp (hard_frame_pointer_rtx,
7806 stack_pointer_rtx, sa_reg)));
7810 /* The ABIs for VMS and OSF/1 say that while we can schedule insns into
7811 the prologue, for exception handling reasons, we cannot do this for
7812 any insn that might fault. We could prevent this for mems with a
7813 (clobber:BLK (scratch)), but this doesn't work for fp insns. So we
7814 have to prevent all such scheduling with a blockage.
7816 Linux, on the other hand, never bothered to implement OSF/1's
7817 exception handling, and so doesn't care about such things. Anyone
7818 planning to use dwarf2 frame-unwind info can also omit the blockage. */
7820 if (! TARGET_CAN_FAULT_IN_PROLOGUE)
7821 emit_insn (gen_blockage ());
7824 /* Count the number of .file directives, so that .loc is up to date. */
7825 int num_source_filenames = 0;
7827 /* Output the textual info surrounding the prologue. */
7830 alpha_start_function (FILE *file, const char *fnname,
7831 tree decl ATTRIBUTE_UNUSED)
7833 unsigned long imask = 0;
7834 unsigned long fmask = 0;
7835 /* Stack space needed for pushing registers clobbered by us. */
7836 HOST_WIDE_INT sa_size;
7837 /* Complete stack size needed. */
7838 unsigned HOST_WIDE_INT frame_size;
7839 /* Offset from base reg to register save area. */
7840 HOST_WIDE_INT reg_offset;
7841 char *entry_label = (char *) alloca (strlen (fnname) + 6);
7844 /* Don't emit an extern directive for functions defined in the same file. */
7845 if (TARGET_ABI_UNICOSMK)
7848 name_tree = get_identifier (fnname);
7849 TREE_ASM_WRITTEN (name_tree) = 1;
7852 alpha_fnname = fnname;
7853 sa_size = alpha_sa_size ();
7855 frame_size = get_frame_size ();
7856 if (TARGET_ABI_OPEN_VMS)
7857 frame_size = ALPHA_ROUND (sa_size
7858 + (alpha_procedure_type == PT_STACK ? 8 : 0)
7860 + current_function_pretend_args_size);
7861 else if (TARGET_ABI_UNICOSMK)
7862 frame_size = ALPHA_ROUND (sa_size
7863 + (alpha_procedure_type == PT_STACK ? 48 : 0))
7864 + ALPHA_ROUND (frame_size
7865 + current_function_outgoing_args_size);
7867 frame_size = (ALPHA_ROUND (current_function_outgoing_args_size)
7869 + ALPHA_ROUND (frame_size
7870 + current_function_pretend_args_size));
7872 if (TARGET_ABI_OPEN_VMS)
7875 reg_offset = ALPHA_ROUND (current_function_outgoing_args_size);
7877 alpha_sa_mask (&imask, &fmask);
7879 /* Ecoff can handle multiple .file directives, so put out file and lineno.
7880 We have to do that before the .ent directive as we cannot switch
7881 files within procedures with native ecoff because line numbers are
7882 linked to procedure descriptors.
7883 Outputting the lineno helps debugging of one line functions as they
7884 would otherwise get no line number at all. Please note that we would
7885 like to put out last_linenum from final.c, but it is not accessible. */
7887 if (write_symbols == SDB_DEBUG)
7889 #ifdef ASM_OUTPUT_SOURCE_FILENAME
7890 ASM_OUTPUT_SOURCE_FILENAME (file,
7891 DECL_SOURCE_FILE (current_function_decl));
7893 #ifdef SDB_OUTPUT_SOURCE_LINE
7894 if (debug_info_level != DINFO_LEVEL_TERSE)
7895 SDB_OUTPUT_SOURCE_LINE (file,
7896 DECL_SOURCE_LINE (current_function_decl));
7900 /* Issue function start and label. */
7901 if (TARGET_ABI_OPEN_VMS
7902 || (!TARGET_ABI_UNICOSMK && !flag_inhibit_size_directive))
7904 fputs ("\t.ent ", file);
7905 assemble_name (file, fnname);
7908 /* If the function needs GP, we'll write the "..ng" label there.
7909 Otherwise, do it here. */
7911 && ! alpha_function_needs_gp
7912 && ! current_function_is_thunk)
7915 assemble_name (file, fnname);
7916 fputs ("..ng:\n", file);
7920 strcpy (entry_label, fnname);
7921 if (TARGET_ABI_OPEN_VMS)
7922 strcat (entry_label, "..en");
7924 /* For public functions, the label must be globalized by appending an
7925 additional colon. */
7926 if (TARGET_ABI_UNICOSMK && TREE_PUBLIC (decl))
7927 strcat (entry_label, ":");
7929 ASM_OUTPUT_LABEL (file, entry_label);
7930 inside_function = TRUE;
7932 if (TARGET_ABI_OPEN_VMS)
7933 fprintf (file, "\t.base $%d\n", vms_base_regno);
7935 if (!TARGET_ABI_OPEN_VMS && !TARGET_ABI_UNICOSMK && TARGET_IEEE_CONFORMANT
7936 && !flag_inhibit_size_directive)
7938 /* Set flags in procedure descriptor to request IEEE-conformant
7939 math-library routines. The value we set it to is PDSC_EXC_IEEE
7940 (/usr/include/pdsc.h). */
7941 fputs ("\t.eflag 48\n", file);
7944 /* Set up offsets to alpha virtual arg/local debugging pointer. */
7945 alpha_auto_offset = -frame_size + current_function_pretend_args_size;
7946 alpha_arg_offset = -frame_size + 48;
7948 /* Describe our frame. If the frame size is larger than an integer,
7949 print it as zero to avoid an assembler error. We won't be
7950 properly describing such a frame, but that's the best we can do. */
7951 if (TARGET_ABI_UNICOSMK)
7953 else if (TARGET_ABI_OPEN_VMS)
7954 fprintf (file, "\t.frame $%d," HOST_WIDE_INT_PRINT_DEC ",$26,"
7955 HOST_WIDE_INT_PRINT_DEC "\n",
7957 frame_size >= (1UL << 31) ? 0 : frame_size,
7959 else if (!flag_inhibit_size_directive)
7960 fprintf (file, "\t.frame $%d," HOST_WIDE_INT_PRINT_DEC ",$26,%d\n",
7961 (frame_pointer_needed
7962 ? HARD_FRAME_POINTER_REGNUM : STACK_POINTER_REGNUM),
7963 frame_size >= (1UL << 31) ? 0 : frame_size,
7964 current_function_pretend_args_size);
7966 /* Describe which registers were spilled. */
7967 if (TARGET_ABI_UNICOSMK)
7969 else if (TARGET_ABI_OPEN_VMS)
7972 /* ??? Does VMS care if mask contains ra? The old code didn't
7973 set it, so I don't here. */
7974 fprintf (file, "\t.mask 0x%lx,0\n", imask & ~(1UL << REG_RA));
7976 fprintf (file, "\t.fmask 0x%lx,0\n", fmask);
7977 if (alpha_procedure_type == PT_REGISTER)
7978 fprintf (file, "\t.fp_save $%d\n", vms_save_fp_regno);
7980 else if (!flag_inhibit_size_directive)
7984 fprintf (file, "\t.mask 0x%lx," HOST_WIDE_INT_PRINT_DEC "\n", imask,
7985 frame_size >= (1UL << 31) ? 0 : reg_offset - frame_size);
7987 for (i = 0; i < 32; ++i)
7988 if (imask & (1UL << i))
7993 fprintf (file, "\t.fmask 0x%lx," HOST_WIDE_INT_PRINT_DEC "\n", fmask,
7994 frame_size >= (1UL << 31) ? 0 : reg_offset - frame_size);
7997 #if TARGET_ABI_OPEN_VMS
7998 /* Ifdef'ed cause link_section are only available then. */
7999 switch_to_section (readonly_data_section);
8000 fprintf (file, "\t.align 3\n");
8001 assemble_name (file, fnname); fputs ("..na:\n", file);
8002 fputs ("\t.ascii \"", file);
8003 assemble_name (file, fnname);
8004 fputs ("\\0\"\n", file);
8005 alpha_need_linkage (fnname, 1);
8006 switch_to_section (text_section);
8010 /* Emit the .prologue note at the scheduled end of the prologue. */
8013 alpha_output_function_end_prologue (FILE *file)
8015 if (TARGET_ABI_UNICOSMK)
8017 else if (TARGET_ABI_OPEN_VMS)
8018 fputs ("\t.prologue\n", file);
8019 else if (TARGET_ABI_WINDOWS_NT)
8020 fputs ("\t.prologue 0\n", file);
8021 else if (!flag_inhibit_size_directive)
8022 fprintf (file, "\t.prologue %d\n",
8023 alpha_function_needs_gp || current_function_is_thunk);
8026 /* Write function epilogue. */
8028 /* ??? At some point we will want to support full unwind, and so will
8029 need to mark the epilogue as well. At the moment, we just confuse
8032 #define FRP(exp) exp
8035 alpha_expand_epilogue (void)
8037 /* Registers to save. */
8038 unsigned long imask = 0;
8039 unsigned long fmask = 0;
8040 /* Stack space needed for pushing registers clobbered by us. */
8041 HOST_WIDE_INT sa_size;
8042 /* Complete stack size needed. */
8043 HOST_WIDE_INT frame_size;
8044 /* Offset from base reg to register save area. */
8045 HOST_WIDE_INT reg_offset;
8046 int fp_is_frame_pointer, fp_offset;
8047 rtx sa_reg, sa_reg_exp = NULL;
8048 rtx sp_adj1, sp_adj2, mem;
8052 sa_size = alpha_sa_size ();
8054 frame_size = get_frame_size ();
8055 if (TARGET_ABI_OPEN_VMS)
8056 frame_size = ALPHA_ROUND (sa_size
8057 + (alpha_procedure_type == PT_STACK ? 8 : 0)
8059 + current_function_pretend_args_size);
8060 else if (TARGET_ABI_UNICOSMK)
8061 frame_size = ALPHA_ROUND (sa_size
8062 + (alpha_procedure_type == PT_STACK ? 48 : 0))
8063 + ALPHA_ROUND (frame_size
8064 + current_function_outgoing_args_size);
8066 frame_size = (ALPHA_ROUND (current_function_outgoing_args_size)
8068 + ALPHA_ROUND (frame_size
8069 + current_function_pretend_args_size));
8071 if (TARGET_ABI_OPEN_VMS)
8073 if (alpha_procedure_type == PT_STACK)
8079 reg_offset = ALPHA_ROUND (current_function_outgoing_args_size);
8081 alpha_sa_mask (&imask, &fmask);
8084 = ((TARGET_ABI_OPEN_VMS && alpha_procedure_type == PT_STACK)
8085 || (!TARGET_ABI_OPEN_VMS && frame_pointer_needed));
8087 sa_reg = stack_pointer_rtx;
8089 if (current_function_calls_eh_return)
8090 eh_ofs = EH_RETURN_STACKADJ_RTX;
8094 if (!TARGET_ABI_UNICOSMK && sa_size)
8096 /* If we have a frame pointer, restore SP from it. */
8097 if ((TARGET_ABI_OPEN_VMS
8098 && vms_unwind_regno == HARD_FRAME_POINTER_REGNUM)
8099 || (!TARGET_ABI_OPEN_VMS && frame_pointer_needed))
8100 FRP (emit_move_insn (stack_pointer_rtx, hard_frame_pointer_rtx));
8102 /* Cope with very large offsets to the register save area. */
8103 if (reg_offset + sa_size > 0x8000)
8105 int low = ((reg_offset & 0xffff) ^ 0x8000) - 0x8000;
8108 if (low + sa_size <= 0x8000)
8109 bias = reg_offset - low, reg_offset = low;
8111 bias = reg_offset, reg_offset = 0;
8113 sa_reg = gen_rtx_REG (DImode, 22);
8114 sa_reg_exp = plus_constant (stack_pointer_rtx, bias);
8116 FRP (emit_move_insn (sa_reg, sa_reg_exp));
8119 /* Restore registers in order, excepting a true frame pointer. */
8121 mem = gen_rtx_MEM (DImode, plus_constant (sa_reg, reg_offset));
8123 set_mem_alias_set (mem, alpha_sr_alias_set);
8124 FRP (emit_move_insn (gen_rtx_REG (DImode, REG_RA), mem));
8127 imask &= ~(1UL << REG_RA);
8129 for (i = 0; i < 31; ++i)
8130 if (imask & (1UL << i))
8132 if (i == HARD_FRAME_POINTER_REGNUM && fp_is_frame_pointer)
8133 fp_offset = reg_offset;
8136 mem = gen_rtx_MEM (DImode, plus_constant(sa_reg, reg_offset));
8137 set_mem_alias_set (mem, alpha_sr_alias_set);
8138 FRP (emit_move_insn (gen_rtx_REG (DImode, i), mem));
8143 for (i = 0; i < 31; ++i)
8144 if (fmask & (1UL << i))
8146 mem = gen_rtx_MEM (DFmode, plus_constant(sa_reg, reg_offset));
8147 set_mem_alias_set (mem, alpha_sr_alias_set);
8148 FRP (emit_move_insn (gen_rtx_REG (DFmode, i+32), mem));
8152 else if (TARGET_ABI_UNICOSMK && alpha_procedure_type == PT_STACK)
8154 /* Restore callee-saved general-purpose registers. */
8158 for (i = 9; i < 15; i++)
8159 if (imask & (1UL << i))
8161 mem = gen_rtx_MEM (DImode, plus_constant(hard_frame_pointer_rtx,
8163 set_mem_alias_set (mem, alpha_sr_alias_set);
8164 FRP (emit_move_insn (gen_rtx_REG (DImode, i), mem));
8168 for (i = 2; i < 10; i++)
8169 if (fmask & (1UL << i))
8171 mem = gen_rtx_MEM (DFmode, plus_constant(hard_frame_pointer_rtx,
8173 set_mem_alias_set (mem, alpha_sr_alias_set);
8174 FRP (emit_move_insn (gen_rtx_REG (DFmode, i+32), mem));
8178 /* Restore the return address from the DSIB. */
8180 mem = gen_rtx_MEM (DImode, plus_constant(hard_frame_pointer_rtx, -8));
8181 set_mem_alias_set (mem, alpha_sr_alias_set);
8182 FRP (emit_move_insn (gen_rtx_REG (DImode, REG_RA), mem));
8185 if (frame_size || eh_ofs)
8187 sp_adj1 = stack_pointer_rtx;
8191 sp_adj1 = gen_rtx_REG (DImode, 23);
8192 emit_move_insn (sp_adj1,
8193 gen_rtx_PLUS (Pmode, stack_pointer_rtx, eh_ofs));
8196 /* If the stack size is large, begin computation into a temporary
8197 register so as not to interfere with a potential fp restore,
8198 which must be consecutive with an SP restore. */
8199 if (frame_size < 32768
8200 && ! (TARGET_ABI_UNICOSMK && current_function_calls_alloca))
8201 sp_adj2 = GEN_INT (frame_size);
8202 else if (TARGET_ABI_UNICOSMK)
8204 sp_adj1 = gen_rtx_REG (DImode, 23);
8205 FRP (emit_move_insn (sp_adj1, hard_frame_pointer_rtx));
8206 sp_adj2 = const0_rtx;
8208 else if (frame_size < 0x40007fffL)
8210 int low = ((frame_size & 0xffff) ^ 0x8000) - 0x8000;
8212 sp_adj2 = plus_constant (sp_adj1, frame_size - low);
8213 if (sa_reg_exp && rtx_equal_p (sa_reg_exp, sp_adj2))
8217 sp_adj1 = gen_rtx_REG (DImode, 23);
8218 FRP (emit_move_insn (sp_adj1, sp_adj2));
8220 sp_adj2 = GEN_INT (low);
8224 rtx tmp = gen_rtx_REG (DImode, 23);
8225 FRP (sp_adj2 = alpha_emit_set_const (tmp, DImode, frame_size,
8229 /* We can't drop new things to memory this late, afaik,
8230 so build it up by pieces. */
8231 FRP (sp_adj2 = alpha_emit_set_long_const (tmp, frame_size,
8232 -(frame_size < 0)));
8233 gcc_assert (sp_adj2);
8237 /* From now on, things must be in order. So emit blockages. */
8239 /* Restore the frame pointer. */
8240 if (TARGET_ABI_UNICOSMK)
8242 emit_insn (gen_blockage ());
8243 mem = gen_rtx_MEM (DImode,
8244 plus_constant (hard_frame_pointer_rtx, -16));
8245 set_mem_alias_set (mem, alpha_sr_alias_set);
8246 FRP (emit_move_insn (hard_frame_pointer_rtx, mem));
8248 else if (fp_is_frame_pointer)
8250 emit_insn (gen_blockage ());
8251 mem = gen_rtx_MEM (DImode, plus_constant (sa_reg, fp_offset));
8252 set_mem_alias_set (mem, alpha_sr_alias_set);
8253 FRP (emit_move_insn (hard_frame_pointer_rtx, mem));
8255 else if (TARGET_ABI_OPEN_VMS)
8257 emit_insn (gen_blockage ());
8258 FRP (emit_move_insn (hard_frame_pointer_rtx,
8259 gen_rtx_REG (DImode, vms_save_fp_regno)));
8262 /* Restore the stack pointer. */
8263 emit_insn (gen_blockage ());
8264 if (sp_adj2 == const0_rtx)
8265 FRP (emit_move_insn (stack_pointer_rtx, sp_adj1));
8267 FRP (emit_move_insn (stack_pointer_rtx,
8268 gen_rtx_PLUS (DImode, sp_adj1, sp_adj2)));
8272 if (TARGET_ABI_OPEN_VMS && alpha_procedure_type == PT_REGISTER)
8274 emit_insn (gen_blockage ());
8275 FRP (emit_move_insn (hard_frame_pointer_rtx,
8276 gen_rtx_REG (DImode, vms_save_fp_regno)));
8278 else if (TARGET_ABI_UNICOSMK && alpha_procedure_type != PT_STACK)
8280 /* Decrement the frame pointer if the function does not have a
8283 emit_insn (gen_blockage ());
8284 FRP (emit_insn (gen_adddi3 (hard_frame_pointer_rtx,
8285 hard_frame_pointer_rtx, constm1_rtx)));
8290 /* Output the rest of the textual info surrounding the epilogue. */
8293 alpha_end_function (FILE *file, const char *fnname, tree decl ATTRIBUTE_UNUSED)
8295 #if TARGET_ABI_OPEN_VMS
8296 alpha_write_linkage (file, fnname, decl);
8299 /* End the function. */
8300 if (!TARGET_ABI_UNICOSMK && !flag_inhibit_size_directive)
8302 fputs ("\t.end ", file);
8303 assemble_name (file, fnname);
8306 inside_function = FALSE;
8308 /* Output jump tables and the static subroutine information block. */
8309 if (TARGET_ABI_UNICOSMK)
8311 unicosmk_output_ssib (file, fnname);
8312 unicosmk_output_deferred_case_vectors (file);
8317 /* Emit a tail call to FUNCTION after adjusting THIS by DELTA.
8319 In order to avoid the hordes of differences between generated code
8320 with and without TARGET_EXPLICIT_RELOCS, and to avoid duplicating
8321 lots of code loading up large constants, generate rtl and emit it
8322 instead of going straight to text.
8324 Not sure why this idea hasn't been explored before... */
8327 alpha_output_mi_thunk_osf (FILE *file, tree thunk_fndecl ATTRIBUTE_UNUSED,
8328 HOST_WIDE_INT delta, HOST_WIDE_INT vcall_offset,
8331 HOST_WIDE_INT hi, lo;
8332 rtx this, insn, funexp;
8334 reset_block_changes ();
8336 /* We always require a valid GP. */
8337 emit_insn (gen_prologue_ldgp ());
8338 emit_note (NOTE_INSN_PROLOGUE_END);
8340 /* Find the "this" pointer. If the function returns a structure,
8341 the structure return pointer is in $16. */
8342 if (aggregate_value_p (TREE_TYPE (TREE_TYPE (function)), function))
8343 this = gen_rtx_REG (Pmode, 17);
8345 this = gen_rtx_REG (Pmode, 16);
8347 /* Add DELTA. When possible we use ldah+lda. Otherwise load the
8348 entire constant for the add. */
8349 lo = ((delta & 0xffff) ^ 0x8000) - 0x8000;
8350 hi = (((delta - lo) & 0xffffffff) ^ 0x80000000) - 0x80000000;
8351 if (hi + lo == delta)
8354 emit_insn (gen_adddi3 (this, this, GEN_INT (hi)));
8356 emit_insn (gen_adddi3 (this, this, GEN_INT (lo)));
8360 rtx tmp = alpha_emit_set_long_const (gen_rtx_REG (Pmode, 0),
8361 delta, -(delta < 0));
8362 emit_insn (gen_adddi3 (this, this, tmp));
8365 /* Add a delta stored in the vtable at VCALL_OFFSET. */
8370 tmp = gen_rtx_REG (Pmode, 0);
8371 emit_move_insn (tmp, gen_rtx_MEM (Pmode, this));
8373 lo = ((vcall_offset & 0xffff) ^ 0x8000) - 0x8000;
8374 hi = (((vcall_offset - lo) & 0xffffffff) ^ 0x80000000) - 0x80000000;
8375 if (hi + lo == vcall_offset)
8378 emit_insn (gen_adddi3 (tmp, tmp, GEN_INT (hi)));
8382 tmp2 = alpha_emit_set_long_const (gen_rtx_REG (Pmode, 1),
8383 vcall_offset, -(vcall_offset < 0));
8384 emit_insn (gen_adddi3 (tmp, tmp, tmp2));
8388 tmp2 = gen_rtx_PLUS (Pmode, tmp, GEN_INT (lo));
8391 emit_move_insn (tmp, gen_rtx_MEM (Pmode, tmp2));
8393 emit_insn (gen_adddi3 (this, this, tmp));
8396 /* Generate a tail call to the target function. */
8397 if (! TREE_USED (function))
8399 assemble_external (function);
8400 TREE_USED (function) = 1;
8402 funexp = XEXP (DECL_RTL (function), 0);
8403 funexp = gen_rtx_MEM (FUNCTION_MODE, funexp);
8404 insn = emit_call_insn (gen_sibcall (funexp, const0_rtx));
8405 SIBLING_CALL_P (insn) = 1;
8407 /* Run just enough of rest_of_compilation to get the insns emitted.
8408 There's not really enough bulk here to make other passes such as
8409 instruction scheduling worth while. Note that use_thunk calls
8410 assemble_start_function and assemble_end_function. */
8411 insn = get_insns ();
8412 insn_locators_initialize ();
8413 shorten_branches (insn);
8414 final_start_function (insn, file, 1);
8415 final (insn, file, 1);
8416 final_end_function ();
8418 #endif /* TARGET_ABI_OSF */
8420 /* Debugging support. */
8424 /* Count the number of sdb related labels are generated (to find block
8425 start and end boundaries). */
8427 int sdb_label_count = 0;
8429 /* Name of the file containing the current function. */
8431 static const char *current_function_file = "";
8433 /* Offsets to alpha virtual arg/local debugging pointers. */
8435 long alpha_arg_offset;
8436 long alpha_auto_offset;
8438 /* Emit a new filename to a stream. */
8441 alpha_output_filename (FILE *stream, const char *name)
8443 static int first_time = TRUE;
8448 ++num_source_filenames;
8449 current_function_file = name;
8450 fprintf (stream, "\t.file\t%d ", num_source_filenames);
8451 output_quoted_string (stream, name);
8452 fprintf (stream, "\n");
8453 if (!TARGET_GAS && write_symbols == DBX_DEBUG)
8454 fprintf (stream, "\t#@stabs\n");
8457 else if (write_symbols == DBX_DEBUG)
8458 /* dbxout.c will emit an appropriate .stabs directive. */
8461 else if (name != current_function_file
8462 && strcmp (name, current_function_file) != 0)
8464 if (inside_function && ! TARGET_GAS)
8465 fprintf (stream, "\t#.file\t%d ", num_source_filenames);
8468 ++num_source_filenames;
8469 current_function_file = name;
8470 fprintf (stream, "\t.file\t%d ", num_source_filenames);
8473 output_quoted_string (stream, name);
8474 fprintf (stream, "\n");
8478 /* Structure to show the current status of registers and memory. */
8480 struct shadow_summary
8483 unsigned int i : 31; /* Mask of int regs */
8484 unsigned int fp : 31; /* Mask of fp regs */
8485 unsigned int mem : 1; /* mem == imem | fpmem */
8489 /* Summary the effects of expression X on the machine. Update SUM, a pointer
8490 to the summary structure. SET is nonzero if the insn is setting the
8491 object, otherwise zero. */
8494 summarize_insn (rtx x, struct shadow_summary *sum, int set)
8496 const char *format_ptr;
8502 switch (GET_CODE (x))
8504 /* ??? Note that this case would be incorrect if the Alpha had a
8505 ZERO_EXTRACT in SET_DEST. */
8507 summarize_insn (SET_SRC (x), sum, 0);
8508 summarize_insn (SET_DEST (x), sum, 1);
8512 summarize_insn (XEXP (x, 0), sum, 1);
8516 summarize_insn (XEXP (x, 0), sum, 0);
8520 for (i = ASM_OPERANDS_INPUT_LENGTH (x) - 1; i >= 0; i--)
8521 summarize_insn (ASM_OPERANDS_INPUT (x, i), sum, 0);
8525 for (i = XVECLEN (x, 0) - 1; i >= 0; i--)
8526 summarize_insn (XVECEXP (x, 0, i), sum, 0);
8530 summarize_insn (SUBREG_REG (x), sum, 0);
8535 int regno = REGNO (x);
8536 unsigned long mask = ((unsigned long) 1) << (regno % 32);
8538 if (regno == 31 || regno == 63)
8544 sum->defd.i |= mask;
8546 sum->defd.fp |= mask;
8551 sum->used.i |= mask;
8553 sum->used.fp |= mask;
8564 /* Find the regs used in memory address computation: */
8565 summarize_insn (XEXP (x, 0), sum, 0);
8568 case CONST_INT: case CONST_DOUBLE:
8569 case SYMBOL_REF: case LABEL_REF: case CONST:
8570 case SCRATCH: case ASM_INPUT:
8573 /* Handle common unary and binary ops for efficiency. */
8574 case COMPARE: case PLUS: case MINUS: case MULT: case DIV:
8575 case MOD: case UDIV: case UMOD: case AND: case IOR:
8576 case XOR: case ASHIFT: case ROTATE: case ASHIFTRT: case LSHIFTRT:
8577 case ROTATERT: case SMIN: case SMAX: case UMIN: case UMAX:
8578 case NE: case EQ: case GE: case GT: case LE:
8579 case LT: case GEU: case GTU: case LEU: case LTU:
8580 summarize_insn (XEXP (x, 0), sum, 0);
8581 summarize_insn (XEXP (x, 1), sum, 0);
8584 case NEG: case NOT: case SIGN_EXTEND: case ZERO_EXTEND:
8585 case TRUNCATE: case FLOAT_EXTEND: case FLOAT_TRUNCATE: case FLOAT:
8586 case FIX: case UNSIGNED_FLOAT: case UNSIGNED_FIX: case ABS:
8587 case SQRT: case FFS:
8588 summarize_insn (XEXP (x, 0), sum, 0);
8592 format_ptr = GET_RTX_FORMAT (GET_CODE (x));
8593 for (i = GET_RTX_LENGTH (GET_CODE (x)) - 1; i >= 0; i--)
8594 switch (format_ptr[i])
8597 summarize_insn (XEXP (x, i), sum, 0);
8601 for (j = XVECLEN (x, i) - 1; j >= 0; j--)
8602 summarize_insn (XVECEXP (x, i, j), sum, 0);
8614 /* Ensure a sufficient number of `trapb' insns are in the code when
8615 the user requests code with a trap precision of functions or
8618 In naive mode, when the user requests a trap-precision of
8619 "instruction", a trapb is needed after every instruction that may
8620 generate a trap. This ensures that the code is resumption safe but
8623 When optimizations are turned on, we delay issuing a trapb as long
8624 as possible. In this context, a trap shadow is the sequence of
8625 instructions that starts with a (potentially) trap generating
8626 instruction and extends to the next trapb or call_pal instruction
8627 (but GCC never generates call_pal by itself). We can delay (and
8628 therefore sometimes omit) a trapb subject to the following
8631 (a) On entry to the trap shadow, if any Alpha register or memory
8632 location contains a value that is used as an operand value by some
8633 instruction in the trap shadow (live on entry), then no instruction
8634 in the trap shadow may modify the register or memory location.
8636 (b) Within the trap shadow, the computation of the base register
8637 for a memory load or store instruction may not involve using the
8638 result of an instruction that might generate an UNPREDICTABLE
8641 (c) Within the trap shadow, no register may be used more than once
8642 as a destination register. (This is to make life easier for the
8645 (d) The trap shadow may not include any branch instructions. */
8648 alpha_handle_trap_shadows (void)
8650 struct shadow_summary shadow;
8651 int trap_pending, exception_nesting;
8655 exception_nesting = 0;
8658 shadow.used.mem = 0;
8659 shadow.defd = shadow.used;
8661 for (i = get_insns (); i ; i = NEXT_INSN (i))
8663 if (GET_CODE (i) == NOTE)
8665 switch (NOTE_LINE_NUMBER (i))
8667 case NOTE_INSN_EH_REGION_BEG:
8668 exception_nesting++;
8673 case NOTE_INSN_EH_REGION_END:
8674 exception_nesting--;
8679 case NOTE_INSN_EPILOGUE_BEG:
8680 if (trap_pending && alpha_tp >= ALPHA_TP_FUNC)
8685 else if (trap_pending)
8687 if (alpha_tp == ALPHA_TP_FUNC)
8689 if (GET_CODE (i) == JUMP_INSN
8690 && GET_CODE (PATTERN (i)) == RETURN)
8693 else if (alpha_tp == ALPHA_TP_INSN)
8697 struct shadow_summary sum;
8702 sum.defd = sum.used;
8704 switch (GET_CODE (i))
8707 /* Annoyingly, get_attr_trap will die on these. */
8708 if (GET_CODE (PATTERN (i)) == USE
8709 || GET_CODE (PATTERN (i)) == CLOBBER)
8712 summarize_insn (PATTERN (i), &sum, 0);
8714 if ((sum.defd.i & shadow.defd.i)
8715 || (sum.defd.fp & shadow.defd.fp))
8717 /* (c) would be violated */
8721 /* Combine shadow with summary of current insn: */
8722 shadow.used.i |= sum.used.i;
8723 shadow.used.fp |= sum.used.fp;
8724 shadow.used.mem |= sum.used.mem;
8725 shadow.defd.i |= sum.defd.i;
8726 shadow.defd.fp |= sum.defd.fp;
8727 shadow.defd.mem |= sum.defd.mem;
8729 if ((sum.defd.i & shadow.used.i)
8730 || (sum.defd.fp & shadow.used.fp)
8731 || (sum.defd.mem & shadow.used.mem))
8733 /* (a) would be violated (also takes care of (b)) */
8734 gcc_assert (get_attr_trap (i) != TRAP_YES
8735 || (!(sum.defd.i & sum.used.i)
8736 && !(sum.defd.fp & sum.used.fp)));
8754 n = emit_insn_before (gen_trapb (), i);
8755 PUT_MODE (n, TImode);
8756 PUT_MODE (i, TImode);
8760 shadow.used.mem = 0;
8761 shadow.defd = shadow.used;
8766 if ((exception_nesting > 0 || alpha_tp >= ALPHA_TP_FUNC)
8767 && GET_CODE (i) == INSN
8768 && GET_CODE (PATTERN (i)) != USE
8769 && GET_CODE (PATTERN (i)) != CLOBBER
8770 && get_attr_trap (i) == TRAP_YES)
8772 if (optimize && !trap_pending)
8773 summarize_insn (PATTERN (i), &shadow, 0);
8779 /* Alpha can only issue instruction groups simultaneously if they are
8780 suitably aligned. This is very processor-specific. */
8781 /* There are a number of entries in alphaev4_insn_pipe and alphaev5_insn_pipe
8782 that are marked "fake". These instructions do not exist on that target,
8783 but it is possible to see these insns with deranged combinations of
8784 command-line options, such as "-mtune=ev4 -mmax". Instead of aborting,
8785 choose a result at random. */
8787 enum alphaev4_pipe {
8794 enum alphaev5_pipe {
8805 static enum alphaev4_pipe
8806 alphaev4_insn_pipe (rtx insn)
8808 if (recog_memoized (insn) < 0)
8810 if (get_attr_length (insn) != 4)
8813 switch (get_attr_type (insn))
8829 case TYPE_MVI: /* fake */
8844 case TYPE_FSQRT: /* fake */
8845 case TYPE_FTOI: /* fake */
8846 case TYPE_ITOF: /* fake */
8854 static enum alphaev5_pipe
8855 alphaev5_insn_pipe (rtx insn)
8857 if (recog_memoized (insn) < 0)
8859 if (get_attr_length (insn) != 4)
8862 switch (get_attr_type (insn))
8882 case TYPE_FTOI: /* fake */
8883 case TYPE_ITOF: /* fake */
8898 case TYPE_FSQRT: /* fake */
8909 /* IN_USE is a mask of the slots currently filled within the insn group.
8910 The mask bits come from alphaev4_pipe above. If EV4_IBX is set, then
8911 the insn in EV4_IB0 can be swapped by the hardware into EV4_IB1.
8913 LEN is, of course, the length of the group in bytes. */
8916 alphaev4_next_group (rtx insn, int *pin_use, int *plen)
8923 || GET_CODE (PATTERN (insn)) == CLOBBER
8924 || GET_CODE (PATTERN (insn)) == USE)
8929 enum alphaev4_pipe pipe;
8931 pipe = alphaev4_insn_pipe (insn);
8935 /* Force complex instructions to start new groups. */
8939 /* If this is a completely unrecognized insn, it's an asm.
8940 We don't know how long it is, so record length as -1 to
8941 signal a needed realignment. */
8942 if (recog_memoized (insn) < 0)
8945 len = get_attr_length (insn);
8949 if (in_use & EV4_IB0)
8951 if (in_use & EV4_IB1)
8956 in_use |= EV4_IB0 | EV4_IBX;
8960 if (in_use & EV4_IB0)
8962 if (!(in_use & EV4_IBX) || (in_use & EV4_IB1))
8970 if (in_use & EV4_IB1)
8980 /* Haifa doesn't do well scheduling branches. */
8981 if (GET_CODE (insn) == JUMP_INSN)
8985 insn = next_nonnote_insn (insn);
8987 if (!insn || ! INSN_P (insn))
8990 /* Let Haifa tell us where it thinks insn group boundaries are. */
8991 if (GET_MODE (insn) == TImode)
8994 if (GET_CODE (insn) == CLOBBER || GET_CODE (insn) == USE)
8999 insn = next_nonnote_insn (insn);
9007 /* IN_USE is a mask of the slots currently filled within the insn group.
9008 The mask bits come from alphaev5_pipe above. If EV5_E01 is set, then
9009 the insn in EV5_E0 can be swapped by the hardware into EV5_E1.
9011 LEN is, of course, the length of the group in bytes. */
9014 alphaev5_next_group (rtx insn, int *pin_use, int *plen)
9021 || GET_CODE (PATTERN (insn)) == CLOBBER
9022 || GET_CODE (PATTERN (insn)) == USE)
9027 enum alphaev5_pipe pipe;
9029 pipe = alphaev5_insn_pipe (insn);
9033 /* Force complex instructions to start new groups. */
9037 /* If this is a completely unrecognized insn, it's an asm.
9038 We don't know how long it is, so record length as -1 to
9039 signal a needed realignment. */
9040 if (recog_memoized (insn) < 0)
9043 len = get_attr_length (insn);
9046 /* ??? Most of the places below, we would like to assert never
9047 happen, as it would indicate an error either in Haifa, or
9048 in the scheduling description. Unfortunately, Haifa never
9049 schedules the last instruction of the BB, so we don't have
9050 an accurate TI bit to go off. */
9052 if (in_use & EV5_E0)
9054 if (in_use & EV5_E1)
9059 in_use |= EV5_E0 | EV5_E01;
9063 if (in_use & EV5_E0)
9065 if (!(in_use & EV5_E01) || (in_use & EV5_E1))
9073 if (in_use & EV5_E1)
9079 if (in_use & EV5_FA)
9081 if (in_use & EV5_FM)
9086 in_use |= EV5_FA | EV5_FAM;
9090 if (in_use & EV5_FA)
9096 if (in_use & EV5_FM)
9109 /* Haifa doesn't do well scheduling branches. */
9110 /* ??? If this is predicted not-taken, slotting continues, except
9111 that no more IBR, FBR, or JSR insns may be slotted. */
9112 if (GET_CODE (insn) == JUMP_INSN)
9116 insn = next_nonnote_insn (insn);
9118 if (!insn || ! INSN_P (insn))
9121 /* Let Haifa tell us where it thinks insn group boundaries are. */
9122 if (GET_MODE (insn) == TImode)
9125 if (GET_CODE (insn) == CLOBBER || GET_CODE (insn) == USE)
9130 insn = next_nonnote_insn (insn);
9139 alphaev4_next_nop (int *pin_use)
9141 int in_use = *pin_use;
9144 if (!(in_use & EV4_IB0))
9149 else if ((in_use & (EV4_IBX|EV4_IB1)) == EV4_IBX)
9154 else if (TARGET_FP && !(in_use & EV4_IB1))
9167 alphaev5_next_nop (int *pin_use)
9169 int in_use = *pin_use;
9172 if (!(in_use & EV5_E1))
9177 else if (TARGET_FP && !(in_use & EV5_FA))
9182 else if (TARGET_FP && !(in_use & EV5_FM))
9194 /* The instruction group alignment main loop. */
9197 alpha_align_insns (unsigned int max_align,
9198 rtx (*next_group) (rtx, int *, int *),
9199 rtx (*next_nop) (int *))
9201 /* ALIGN is the known alignment for the insn group. */
9203 /* OFS is the offset of the current insn in the insn group. */
9205 int prev_in_use, in_use, len, ldgp;
9208 /* Let shorten branches care for assigning alignments to code labels. */
9209 shorten_branches (get_insns ());
9211 if (align_functions < 4)
9213 else if ((unsigned int) align_functions < max_align)
9214 align = align_functions;
9218 ofs = prev_in_use = 0;
9220 if (GET_CODE (i) == NOTE)
9221 i = next_nonnote_insn (i);
9223 ldgp = alpha_function_needs_gp ? 8 : 0;
9227 next = (*next_group) (i, &in_use, &len);
9229 /* When we see a label, resync alignment etc. */
9230 if (GET_CODE (i) == CODE_LABEL)
9232 unsigned int new_align = 1 << label_to_alignment (i);
9234 if (new_align >= align)
9236 align = new_align < max_align ? new_align : max_align;
9240 else if (ofs & (new_align-1))
9241 ofs = (ofs | (new_align-1)) + 1;
9245 /* Handle complex instructions special. */
9246 else if (in_use == 0)
9248 /* Asms will have length < 0. This is a signal that we have
9249 lost alignment knowledge. Assume, however, that the asm
9250 will not mis-align instructions. */
9259 /* If the known alignment is smaller than the recognized insn group,
9260 realign the output. */
9261 else if ((int) align < len)
9263 unsigned int new_log_align = len > 8 ? 4 : 3;
9266 where = prev = prev_nonnote_insn (i);
9267 if (!where || GET_CODE (where) != CODE_LABEL)
9270 /* Can't realign between a call and its gp reload. */
9271 if (! (TARGET_EXPLICIT_RELOCS
9272 && prev && GET_CODE (prev) == CALL_INSN))
9274 emit_insn_before (gen_realign (GEN_INT (new_log_align)), where);
9275 align = 1 << new_log_align;
9280 /* We may not insert padding inside the initial ldgp sequence. */
9284 /* If the group won't fit in the same INT16 as the previous,
9285 we need to add padding to keep the group together. Rather
9286 than simply leaving the insn filling to the assembler, we
9287 can make use of the knowledge of what sorts of instructions
9288 were issued in the previous group to make sure that all of
9289 the added nops are really free. */
9290 else if (ofs + len > (int) align)
9292 int nop_count = (align - ofs) / 4;
9295 /* Insert nops before labels, branches, and calls to truly merge
9296 the execution of the nops with the previous instruction group. */
9297 where = prev_nonnote_insn (i);
9300 if (GET_CODE (where) == CODE_LABEL)
9302 rtx where2 = prev_nonnote_insn (where);
9303 if (where2 && GET_CODE (where2) == JUMP_INSN)
9306 else if (GET_CODE (where) == INSN)
9313 emit_insn_before ((*next_nop)(&prev_in_use), where);
9314 while (--nop_count);
9318 ofs = (ofs + len) & (align - 1);
9319 prev_in_use = in_use;
9324 /* Machine dependent reorg pass. */
9329 if (alpha_tp != ALPHA_TP_PROG || flag_exceptions)
9330 alpha_handle_trap_shadows ();
9332 /* Due to the number of extra trapb insns, don't bother fixing up
9333 alignment when trap precision is instruction. Moreover, we can
9334 only do our job when sched2 is run. */
9335 if (optimize && !optimize_size
9336 && alpha_tp != ALPHA_TP_INSN
9337 && flag_schedule_insns_after_reload)
9339 if (alpha_tune == PROCESSOR_EV4)
9340 alpha_align_insns (8, alphaev4_next_group, alphaev4_next_nop);
9341 else if (alpha_tune == PROCESSOR_EV5)
9342 alpha_align_insns (16, alphaev5_next_group, alphaev5_next_nop);
9346 #if !TARGET_ABI_UNICOSMK
9353 alpha_file_start (void)
9355 #ifdef OBJECT_FORMAT_ELF
9356 /* If emitting dwarf2 debug information, we cannot generate a .file
9357 directive to start the file, as it will conflict with dwarf2out
9358 file numbers. So it's only useful when emitting mdebug output. */
9359 targetm.file_start_file_directive = (write_symbols == DBX_DEBUG);
9362 default_file_start ();
9364 fprintf (asm_out_file, "\t.verstamp %d %d\n", MS_STAMP, LS_STAMP);
9367 fputs ("\t.set noreorder\n", asm_out_file);
9368 fputs ("\t.set volatile\n", asm_out_file);
9369 if (!TARGET_ABI_OPEN_VMS)
9370 fputs ("\t.set noat\n", asm_out_file);
9371 if (TARGET_EXPLICIT_RELOCS)
9372 fputs ("\t.set nomacro\n", asm_out_file);
9373 if (TARGET_SUPPORT_ARCH | TARGET_BWX | TARGET_MAX | TARGET_FIX | TARGET_CIX)
9377 if (alpha_cpu == PROCESSOR_EV6 || TARGET_FIX || TARGET_CIX)
9379 else if (TARGET_MAX)
9381 else if (TARGET_BWX)
9383 else if (alpha_cpu == PROCESSOR_EV5)
9388 fprintf (asm_out_file, "\t.arch %s\n", arch);
9393 #ifdef OBJECT_FORMAT_ELF
9395 /* Return a section for X. The only special thing we do here is to
9396 honor small data. */
9399 alpha_elf_select_rtx_section (enum machine_mode mode, rtx x,
9400 unsigned HOST_WIDE_INT align)
9402 if (TARGET_SMALL_DATA && GET_MODE_SIZE (mode) <= g_switch_value)
9403 /* ??? Consider using mergeable sdata sections. */
9404 return sdata_section;
9406 return default_elf_select_rtx_section (mode, x, align);
9409 #endif /* OBJECT_FORMAT_ELF */
9411 /* Structure to collect function names for final output in link section. */
9412 /* Note that items marked with GTY can't be ifdef'ed out. */
9414 enum links_kind {KIND_UNUSED, KIND_LOCAL, KIND_EXTERN};
9415 enum reloc_kind {KIND_LINKAGE, KIND_CODEADDR};
9417 struct alpha_links GTY(())
9421 enum links_kind lkind;
9422 enum reloc_kind rkind;
9425 struct alpha_funcs GTY(())
9428 splay_tree GTY ((param1_is (char *), param2_is (struct alpha_links *)))
9432 static GTY ((param1_is (char *), param2_is (struct alpha_links *)))
9433 splay_tree alpha_links_tree;
9434 static GTY ((param1_is (tree), param2_is (struct alpha_funcs *)))
9435 splay_tree alpha_funcs_tree;
9437 static GTY(()) int alpha_funcs_num;
9439 #if TARGET_ABI_OPEN_VMS
9441 /* Return the VMS argument type corresponding to MODE. */
9444 alpha_arg_type (enum machine_mode mode)
9449 return TARGET_FLOAT_VAX ? FF : FS;
9451 return TARGET_FLOAT_VAX ? FD : FT;
9457 /* Return an rtx for an integer representing the VMS Argument Information
9461 alpha_arg_info_reg_val (CUMULATIVE_ARGS cum)
9463 unsigned HOST_WIDE_INT regval = cum.num_args;
9466 for (i = 0; i < 6; i++)
9467 regval |= ((int) cum.atypes[i]) << (i * 3 + 8);
9469 return GEN_INT (regval);
9472 /* Make (or fake) .linkage entry for function call.
9474 IS_LOCAL is 0 if name is used in call, 1 if name is used in definition.
9476 Return an SYMBOL_REF rtx for the linkage. */
9479 alpha_need_linkage (const char *name, int is_local)
9481 splay_tree_node node;
9482 struct alpha_links *al;
9489 struct alpha_funcs *cfaf;
9491 if (!alpha_funcs_tree)
9492 alpha_funcs_tree = splay_tree_new_ggc ((splay_tree_compare_fn)
9493 splay_tree_compare_pointers);
9495 cfaf = (struct alpha_funcs *) ggc_alloc (sizeof (struct alpha_funcs));
9498 cfaf->num = ++alpha_funcs_num;
9500 splay_tree_insert (alpha_funcs_tree,
9501 (splay_tree_key) current_function_decl,
9502 (splay_tree_value) cfaf);
9505 if (alpha_links_tree)
9507 /* Is this name already defined? */
9509 node = splay_tree_lookup (alpha_links_tree, (splay_tree_key) name);
9512 al = (struct alpha_links *) node->value;
9515 /* Defined here but external assumed. */
9516 if (al->lkind == KIND_EXTERN)
9517 al->lkind = KIND_LOCAL;
9521 /* Used here but unused assumed. */
9522 if (al->lkind == KIND_UNUSED)
9523 al->lkind = KIND_LOCAL;
9529 alpha_links_tree = splay_tree_new_ggc ((splay_tree_compare_fn) strcmp);
9531 al = (struct alpha_links *) ggc_alloc (sizeof (struct alpha_links));
9532 name = ggc_strdup (name);
9534 /* Assume external if no definition. */
9535 al->lkind = (is_local ? KIND_UNUSED : KIND_EXTERN);
9537 /* Ensure we have an IDENTIFIER so assemble_name can mark it used. */
9538 get_identifier (name);
9540 /* Construct a SYMBOL_REF for us to call. */
9542 size_t name_len = strlen (name);
9543 char *linksym = alloca (name_len + 6);
9545 memcpy (linksym + 1, name, name_len);
9546 memcpy (linksym + 1 + name_len, "..lk", 5);
9547 al->linkage = gen_rtx_SYMBOL_REF (Pmode,
9548 ggc_alloc_string (linksym, name_len + 5));
9551 splay_tree_insert (alpha_links_tree, (splay_tree_key) name,
9552 (splay_tree_value) al);
9558 alpha_use_linkage (rtx linkage, tree cfundecl, int lflag, int rflag)
9560 splay_tree_node cfunnode;
9561 struct alpha_funcs *cfaf;
9562 struct alpha_links *al;
9563 const char *name = XSTR (linkage, 0);
9565 cfaf = (struct alpha_funcs *) 0;
9566 al = (struct alpha_links *) 0;
9568 cfunnode = splay_tree_lookup (alpha_funcs_tree, (splay_tree_key) cfundecl);
9569 cfaf = (struct alpha_funcs *) cfunnode->value;
9573 splay_tree_node lnode;
9575 /* Is this name already defined? */
9577 lnode = splay_tree_lookup (cfaf->links, (splay_tree_key) name);
9579 al = (struct alpha_links *) lnode->value;
9582 cfaf->links = splay_tree_new_ggc ((splay_tree_compare_fn) strcmp);
9590 splay_tree_node node = 0;
9591 struct alpha_links *anl;
9596 name_len = strlen (name);
9598 al = (struct alpha_links *) ggc_alloc (sizeof (struct alpha_links));
9599 al->num = cfaf->num;
9601 node = splay_tree_lookup (alpha_links_tree, (splay_tree_key) name);
9604 anl = (struct alpha_links *) node->value;
9605 al->lkind = anl->lkind;
9608 sprintf (buf, "$%d..%s..lk", cfaf->num, name);
9609 buflen = strlen (buf);
9610 linksym = alloca (buflen + 1);
9611 memcpy (linksym, buf, buflen + 1);
9613 al->linkage = gen_rtx_SYMBOL_REF
9614 (Pmode, ggc_alloc_string (linksym, buflen + 1));
9616 splay_tree_insert (cfaf->links, (splay_tree_key) name,
9617 (splay_tree_value) al);
9621 al->rkind = KIND_CODEADDR;
9623 al->rkind = KIND_LINKAGE;
9626 return gen_rtx_MEM (Pmode, plus_constant (al->linkage, 8));
9632 alpha_write_one_linkage (splay_tree_node node, void *data)
9634 const char *const name = (const char *) node->key;
9635 struct alpha_links *link = (struct alpha_links *) node->value;
9636 FILE *stream = (FILE *) data;
9638 fprintf (stream, "$%d..%s..lk:\n", link->num, name);
9639 if (link->rkind == KIND_CODEADDR)
9641 if (link->lkind == KIND_LOCAL)
9643 /* Local and used */
9644 fprintf (stream, "\t.quad %s..en\n", name);
9648 /* External and used, request code address. */
9649 fprintf (stream, "\t.code_address %s\n", name);
9654 if (link->lkind == KIND_LOCAL)
9656 /* Local and used, build linkage pair. */
9657 fprintf (stream, "\t.quad %s..en\n", name);
9658 fprintf (stream, "\t.quad %s\n", name);
9662 /* External and used, request linkage pair. */
9663 fprintf (stream, "\t.linkage %s\n", name);
9671 alpha_write_linkage (FILE *stream, const char *funname, tree fundecl)
9673 splay_tree_node node;
9674 struct alpha_funcs *func;
9676 fprintf (stream, "\t.link\n");
9677 fprintf (stream, "\t.align 3\n");
9680 node = splay_tree_lookup (alpha_funcs_tree, (splay_tree_key) fundecl);
9681 func = (struct alpha_funcs *) node->value;
9683 fputs ("\t.name ", stream);
9684 assemble_name (stream, funname);
9685 fputs ("..na\n", stream);
9686 ASM_OUTPUT_LABEL (stream, funname);
9687 fprintf (stream, "\t.pdesc ");
9688 assemble_name (stream, funname);
9689 fprintf (stream, "..en,%s\n",
9690 alpha_procedure_type == PT_STACK ? "stack"
9691 : alpha_procedure_type == PT_REGISTER ? "reg" : "null");
9695 splay_tree_foreach (func->links, alpha_write_one_linkage, stream);
9696 /* splay_tree_delete (func->links); */
9700 /* Given a decl, a section name, and whether the decl initializer
9701 has relocs, choose attributes for the section. */
9703 #define SECTION_VMS_OVERLAY SECTION_FORGET
9704 #define SECTION_VMS_GLOBAL SECTION_MACH_DEP
9705 #define SECTION_VMS_INITIALIZE (SECTION_VMS_GLOBAL << 1)
9708 vms_section_type_flags (tree decl, const char *name, int reloc)
9710 unsigned int flags = default_section_type_flags (decl, name, reloc);
9712 if (decl && DECL_ATTRIBUTES (decl)
9713 && lookup_attribute ("overlaid", DECL_ATTRIBUTES (decl)))
9714 flags |= SECTION_VMS_OVERLAY;
9715 if (decl && DECL_ATTRIBUTES (decl)
9716 && lookup_attribute ("global", DECL_ATTRIBUTES (decl)))
9717 flags |= SECTION_VMS_GLOBAL;
9718 if (decl && DECL_ATTRIBUTES (decl)
9719 && lookup_attribute ("initialize", DECL_ATTRIBUTES (decl)))
9720 flags |= SECTION_VMS_INITIALIZE;
9725 /* Switch to an arbitrary section NAME with attributes as specified
9726 by FLAGS. ALIGN specifies any known alignment requirements for
9727 the section; 0 if the default should be used. */
9730 vms_asm_named_section (const char *name, unsigned int flags,
9731 tree decl ATTRIBUTE_UNUSED)
9733 fputc ('\n', asm_out_file);
9734 fprintf (asm_out_file, ".section\t%s", name);
9736 if (flags & SECTION_VMS_OVERLAY)
9737 fprintf (asm_out_file, ",OVR");
9738 if (flags & SECTION_VMS_GLOBAL)
9739 fprintf (asm_out_file, ",GBL");
9740 if (flags & SECTION_VMS_INITIALIZE)
9741 fprintf (asm_out_file, ",NOMOD");
9742 if (flags & SECTION_DEBUG)
9743 fprintf (asm_out_file, ",NOWRT");
9745 fputc ('\n', asm_out_file);
9748 /* Record an element in the table of global constructors. SYMBOL is
9749 a SYMBOL_REF of the function to be called; PRIORITY is a number
9750 between 0 and MAX_INIT_PRIORITY.
9752 Differs from default_ctors_section_asm_out_constructor in that the
9753 width of the .ctors entry is always 64 bits, rather than the 32 bits
9754 used by a normal pointer. */
9757 vms_asm_out_constructor (rtx symbol, int priority ATTRIBUTE_UNUSED)
9759 switch_to_section (ctors_section);
9760 assemble_align (BITS_PER_WORD);
9761 assemble_integer (symbol, UNITS_PER_WORD, BITS_PER_WORD, 1);
9765 vms_asm_out_destructor (rtx symbol, int priority ATTRIBUTE_UNUSED)
9767 switch_to_section (dtors_section);
9768 assemble_align (BITS_PER_WORD);
9769 assemble_integer (symbol, UNITS_PER_WORD, BITS_PER_WORD, 1);
9774 alpha_need_linkage (const char *name ATTRIBUTE_UNUSED,
9775 int is_local ATTRIBUTE_UNUSED)
9781 alpha_use_linkage (rtx linkage ATTRIBUTE_UNUSED,
9782 tree cfundecl ATTRIBUTE_UNUSED,
9783 int lflag ATTRIBUTE_UNUSED,
9784 int rflag ATTRIBUTE_UNUSED)
9789 #endif /* TARGET_ABI_OPEN_VMS */
9791 #if TARGET_ABI_UNICOSMK
9793 /* This evaluates to true if we do not know how to pass TYPE solely in
9794 registers. This is the case for all arguments that do not fit in two
9798 unicosmk_must_pass_in_stack (enum machine_mode mode, tree type)
9803 if (TREE_CODE (TYPE_SIZE (type)) != INTEGER_CST)
9805 if (TREE_ADDRESSABLE (type))
9808 return ALPHA_ARG_SIZE (mode, type, 0) > 2;
9811 /* Define the offset between two registers, one to be eliminated, and the
9812 other its replacement, at the start of a routine. */
9815 unicosmk_initial_elimination_offset (int from, int to)
9819 fixed_size = alpha_sa_size();
9820 if (fixed_size != 0)
9823 if (from == FRAME_POINTER_REGNUM && to == HARD_FRAME_POINTER_REGNUM)
9825 else if (from == ARG_POINTER_REGNUM && to == HARD_FRAME_POINTER_REGNUM)
9827 else if (from == FRAME_POINTER_REGNUM && to == STACK_POINTER_REGNUM)
9828 return (ALPHA_ROUND (current_function_outgoing_args_size)
9829 + ALPHA_ROUND (get_frame_size()));
9830 else if (from == ARG_POINTER_REGNUM && to == STACK_POINTER_REGNUM)
9831 return (ALPHA_ROUND (fixed_size)
9832 + ALPHA_ROUND (get_frame_size()
9833 + current_function_outgoing_args_size));
9838 /* Output the module name for .ident and .end directives. We have to strip
9839 directories and add make sure that the module name starts with a letter
9843 unicosmk_output_module_name (FILE *file)
9845 const char *name = lbasename (main_input_filename);
9846 unsigned len = strlen (name);
9847 char *clean_name = alloca (len + 2);
9848 char *ptr = clean_name;
9850 /* CAM only accepts module names that start with a letter or '$'. We
9851 prefix the module name with a '$' if necessary. */
9853 if (!ISALPHA (*name))
9855 memcpy (ptr, name, len + 1);
9856 clean_symbol_name (clean_name);
9857 fputs (clean_name, file);
9860 /* Output the definition of a common variable. */
9863 unicosmk_output_common (FILE *file, const char *name, int size, int align)
9866 printf ("T3E__: common %s\n", name);
9869 fputs("\t.endp\n\n\t.psect ", file);
9870 assemble_name(file, name);
9871 fprintf(file, ",%d,common\n", floor_log2 (align / BITS_PER_UNIT));
9872 fprintf(file, "\t.byte\t0:%d\n", size);
9874 /* Mark the symbol as defined in this module. */
9875 name_tree = get_identifier (name);
9876 TREE_ASM_WRITTEN (name_tree) = 1;
9879 #define SECTION_PUBLIC SECTION_MACH_DEP
9880 #define SECTION_MAIN (SECTION_PUBLIC << 1)
9881 static int current_section_align;
9883 /* A get_unnamed_section callback for switching to the text section. */
9886 unicosmk_output_text_section_asm_op (const void *data ATTRIBUTE_UNUSED)
9888 static int count = 0;
9889 fprintf (asm_out_file, "\t.endp\n\n\t.psect\tgcc@text___%d,code\n", count++);
9892 /* A get_unnamed_section callback for switching to the data section. */
9895 unicosmk_output_data_section_asm_op (const void *data ATTRIBUTE_UNUSED)
9897 static int count = 1;
9898 fprintf (asm_out_file, "\t.endp\n\n\t.psect\tgcc@data___%d,data\n", count++);
9901 /* Implement TARGET_ASM_INIT_SECTIONS.
9903 The Cray assembler is really weird with respect to sections. It has only
9904 named sections and you can't reopen a section once it has been closed.
9905 This means that we have to generate unique names whenever we want to
9906 reenter the text or the data section. */
9909 unicosmk_init_sections (void)
9911 text_section = get_unnamed_section (SECTION_CODE,
9912 unicosmk_output_text_section_asm_op,
9914 data_section = get_unnamed_section (SECTION_WRITE,
9915 unicosmk_output_data_section_asm_op,
9917 readonly_data_section = data_section;
9921 unicosmk_section_type_flags (tree decl, const char *name,
9922 int reloc ATTRIBUTE_UNUSED)
9924 unsigned int flags = default_section_type_flags (decl, name, reloc);
9929 if (TREE_CODE (decl) == FUNCTION_DECL)
9931 current_section_align = floor_log2 (FUNCTION_BOUNDARY / BITS_PER_UNIT);
9932 if (align_functions_log > current_section_align)
9933 current_section_align = align_functions_log;
9935 if (! strcmp (IDENTIFIER_POINTER (DECL_ASSEMBLER_NAME (decl)), "main"))
9936 flags |= SECTION_MAIN;
9939 current_section_align = floor_log2 (DECL_ALIGN (decl) / BITS_PER_UNIT);
9941 if (TREE_PUBLIC (decl))
9942 flags |= SECTION_PUBLIC;
9947 /* Generate a section name for decl and associate it with the
9951 unicosmk_unique_section (tree decl, int reloc ATTRIBUTE_UNUSED)
9958 name = IDENTIFIER_POINTER (DECL_ASSEMBLER_NAME (decl));
9959 name = default_strip_name_encoding (name);
9960 len = strlen (name);
9962 if (TREE_CODE (decl) == FUNCTION_DECL)
9966 /* It is essential that we prefix the section name here because
9967 otherwise the section names generated for constructors and
9968 destructors confuse collect2. */
9970 string = alloca (len + 6);
9971 sprintf (string, "code@%s", name);
9972 DECL_SECTION_NAME (decl) = build_string (len + 5, string);
9974 else if (TREE_PUBLIC (decl))
9975 DECL_SECTION_NAME (decl) = build_string (len, name);
9980 string = alloca (len + 6);
9981 sprintf (string, "data@%s", name);
9982 DECL_SECTION_NAME (decl) = build_string (len + 5, string);
9986 /* Switch to an arbitrary section NAME with attributes as specified
9987 by FLAGS. ALIGN specifies any known alignment requirements for
9988 the section; 0 if the default should be used. */
9991 unicosmk_asm_named_section (const char *name, unsigned int flags,
9992 tree decl ATTRIBUTE_UNUSED)
9996 /* Close the previous section. */
9998 fputs ("\t.endp\n\n", asm_out_file);
10000 /* Find out what kind of section we are opening. */
10002 if (flags & SECTION_MAIN)
10003 fputs ("\t.start\tmain\n", asm_out_file);
10005 if (flags & SECTION_CODE)
10007 else if (flags & SECTION_PUBLIC)
10012 if (current_section_align != 0)
10013 fprintf (asm_out_file, "\t.psect\t%s,%d,%s\n", name,
10014 current_section_align, kind);
10016 fprintf (asm_out_file, "\t.psect\t%s,%s\n", name, kind);
10020 unicosmk_insert_attributes (tree decl, tree *attr_ptr ATTRIBUTE_UNUSED)
10023 && (TREE_PUBLIC (decl) || TREE_CODE (decl) == FUNCTION_DECL))
10024 unicosmk_unique_section (decl, 0);
10027 /* Output an alignment directive. We have to use the macro 'gcc@code@align'
10028 in code sections because .align fill unused space with zeroes. */
10031 unicosmk_output_align (FILE *file, int align)
10033 if (inside_function)
10034 fprintf (file, "\tgcc@code@align\t%d\n", align);
10036 fprintf (file, "\t.align\t%d\n", align);
10039 /* Add a case vector to the current function's list of deferred case
10040 vectors. Case vectors have to be put into a separate section because CAM
10041 does not allow data definitions in code sections. */
10044 unicosmk_defer_case_vector (rtx lab, rtx vec)
10046 struct machine_function *machine = cfun->machine;
10048 vec = gen_rtx_EXPR_LIST (VOIDmode, lab, vec);
10049 machine->addr_list = gen_rtx_EXPR_LIST (VOIDmode, vec,
10050 machine->addr_list);
10053 /* Output a case vector. */
10056 unicosmk_output_addr_vec (FILE *file, rtx vec)
10058 rtx lab = XEXP (vec, 0);
10059 rtx body = XEXP (vec, 1);
10060 int vlen = XVECLEN (body, 0);
10063 (*targetm.asm_out.internal_label) (file, "L", CODE_LABEL_NUMBER (lab));
10065 for (idx = 0; idx < vlen; idx++)
10067 ASM_OUTPUT_ADDR_VEC_ELT
10068 (file, CODE_LABEL_NUMBER (XEXP (XVECEXP (body, 0, idx), 0)));
10072 /* Output current function's deferred case vectors. */
10075 unicosmk_output_deferred_case_vectors (FILE *file)
10077 struct machine_function *machine = cfun->machine;
10080 if (machine->addr_list == NULL_RTX)
10083 switch_to_section (data_section);
10084 for (t = machine->addr_list; t; t = XEXP (t, 1))
10085 unicosmk_output_addr_vec (file, XEXP (t, 0));
10088 /* Generate the name of the SSIB section for the current function. */
10090 #define SSIB_PREFIX "__SSIB_"
10091 #define SSIB_PREFIX_LEN 7
10093 static const char *
10094 unicosmk_ssib_name (void)
10096 /* This is ok since CAM won't be able to deal with names longer than that
10099 static char name[256];
10102 const char *fnname;
10105 x = DECL_RTL (cfun->decl);
10106 gcc_assert (GET_CODE (x) == MEM);
10108 gcc_assert (GET_CODE (x) == SYMBOL_REF);
10109 fnname = XSTR (x, 0);
10111 len = strlen (fnname);
10112 if (len + SSIB_PREFIX_LEN > 255)
10113 len = 255 - SSIB_PREFIX_LEN;
10115 strcpy (name, SSIB_PREFIX);
10116 strncpy (name + SSIB_PREFIX_LEN, fnname, len);
10117 name[len + SSIB_PREFIX_LEN] = 0;
10122 /* Set up the dynamic subprogram information block (DSIB) and update the
10123 frame pointer register ($15) for subroutines which have a frame. If the
10124 subroutine doesn't have a frame, simply increment $15. */
10127 unicosmk_gen_dsib (unsigned long *imaskP)
10129 if (alpha_procedure_type == PT_STACK)
10131 const char *ssib_name;
10134 /* Allocate 64 bytes for the DSIB. */
10136 FRP (emit_insn (gen_adddi3 (stack_pointer_rtx, stack_pointer_rtx,
10138 emit_insn (gen_blockage ());
10140 /* Save the return address. */
10142 mem = gen_rtx_MEM (DImode, plus_constant (stack_pointer_rtx, 56));
10143 set_mem_alias_set (mem, alpha_sr_alias_set);
10144 FRP (emit_move_insn (mem, gen_rtx_REG (DImode, REG_RA)));
10145 (*imaskP) &= ~(1UL << REG_RA);
10147 /* Save the old frame pointer. */
10149 mem = gen_rtx_MEM (DImode, plus_constant (stack_pointer_rtx, 48));
10150 set_mem_alias_set (mem, alpha_sr_alias_set);
10151 FRP (emit_move_insn (mem, hard_frame_pointer_rtx));
10152 (*imaskP) &= ~(1UL << HARD_FRAME_POINTER_REGNUM);
10154 emit_insn (gen_blockage ());
10156 /* Store the SSIB pointer. */
10158 ssib_name = ggc_strdup (unicosmk_ssib_name ());
10159 mem = gen_rtx_MEM (DImode, plus_constant (stack_pointer_rtx, 32));
10160 set_mem_alias_set (mem, alpha_sr_alias_set);
10162 FRP (emit_move_insn (gen_rtx_REG (DImode, 5),
10163 gen_rtx_SYMBOL_REF (Pmode, ssib_name)));
10164 FRP (emit_move_insn (mem, gen_rtx_REG (DImode, 5)));
10166 /* Save the CIW index. */
10168 mem = gen_rtx_MEM (DImode, plus_constant (stack_pointer_rtx, 24));
10169 set_mem_alias_set (mem, alpha_sr_alias_set);
10170 FRP (emit_move_insn (mem, gen_rtx_REG (DImode, 25)));
10172 emit_insn (gen_blockage ());
10174 /* Set the new frame pointer. */
10176 FRP (emit_insn (gen_adddi3 (hard_frame_pointer_rtx,
10177 stack_pointer_rtx, GEN_INT (64))));
10182 /* Increment the frame pointer register to indicate that we do not
10185 FRP (emit_insn (gen_adddi3 (hard_frame_pointer_rtx,
10186 hard_frame_pointer_rtx, const1_rtx)));
10190 /* Output the static subroutine information block for the current
10194 unicosmk_output_ssib (FILE *file, const char *fnname)
10200 struct machine_function *machine = cfun->machine;
10203 fprintf (file, "\t.endp\n\n\t.psect\t%s%s,data\n", user_label_prefix,
10204 unicosmk_ssib_name ());
10206 /* Some required stuff and the function name length. */
10208 len = strlen (fnname);
10209 fprintf (file, "\t.quad\t^X20008%2.2X28\n", len);
10212 ??? We don't do that yet. */
10214 fputs ("\t.quad\t0\n", file);
10216 /* Function address. */
10218 fputs ("\t.quad\t", file);
10219 assemble_name (file, fnname);
10222 fputs ("\t.quad\t0\n", file);
10223 fputs ("\t.quad\t0\n", file);
10226 ??? We do it the same way Cray CC does it but this could be
10229 for( i = 0; i < len; i++ )
10230 fprintf (file, "\t.byte\t%d\n", (int)(fnname[i]));
10231 if( (len % 8) == 0 )
10232 fputs ("\t.quad\t0\n", file);
10234 fprintf (file, "\t.bits\t%d : 0\n", (8 - (len % 8))*8);
10236 /* All call information words used in the function. */
10238 for (x = machine->first_ciw; x; x = XEXP (x, 1))
10241 #if HOST_BITS_PER_WIDE_INT == 32
10242 fprintf (file, "\t.quad\t" HOST_WIDE_INT_PRINT_DOUBLE_HEX "\n",
10243 CONST_DOUBLE_HIGH (ciw), CONST_DOUBLE_LOW (ciw));
10245 fprintf (file, "\t.quad\t" HOST_WIDE_INT_PRINT_HEX "\n", INTVAL (ciw));
10250 /* Add a call information word (CIW) to the list of the current function's
10251 CIWs and return its index.
10253 X is a CONST_INT or CONST_DOUBLE representing the CIW. */
10256 unicosmk_add_call_info_word (rtx x)
10259 struct machine_function *machine = cfun->machine;
10261 node = gen_rtx_EXPR_LIST (VOIDmode, x, NULL_RTX);
10262 if (machine->first_ciw == NULL_RTX)
10263 machine->first_ciw = node;
10265 XEXP (machine->last_ciw, 1) = node;
10267 machine->last_ciw = node;
10268 ++machine->ciw_count;
10270 return GEN_INT (machine->ciw_count
10271 + strlen (current_function_name ())/8 + 5);
10274 /* The Cray assembler doesn't accept extern declarations for symbols which
10275 are defined in the same file. We have to keep track of all global
10276 symbols which are referenced and/or defined in a source file and output
10277 extern declarations for those which are referenced but not defined at
10278 the end of file. */
10280 /* List of identifiers for which an extern declaration might have to be
10282 /* FIXME: needs to use GC, so it can be saved and restored for PCH. */
10284 struct unicosmk_extern_list
10286 struct unicosmk_extern_list *next;
10290 static struct unicosmk_extern_list *unicosmk_extern_head = 0;
10292 /* Output extern declarations which are required for every asm file. */
10295 unicosmk_output_default_externs (FILE *file)
10297 static const char *const externs[] =
10298 { "__T3E_MISMATCH" };
10303 n = ARRAY_SIZE (externs);
10305 for (i = 0; i < n; i++)
10306 fprintf (file, "\t.extern\t%s\n", externs[i]);
10309 /* Output extern declarations for global symbols which are have been
10310 referenced but not defined. */
10313 unicosmk_output_externs (FILE *file)
10315 struct unicosmk_extern_list *p;
10316 const char *real_name;
10320 len = strlen (user_label_prefix);
10321 for (p = unicosmk_extern_head; p != 0; p = p->next)
10323 /* We have to strip the encoding and possibly remove user_label_prefix
10324 from the identifier in order to handle -fleading-underscore and
10325 explicit asm names correctly (cf. gcc.dg/asm-names-1.c). */
10326 real_name = default_strip_name_encoding (p->name);
10327 if (len && p->name[0] == '*'
10328 && !memcmp (real_name, user_label_prefix, len))
10331 name_tree = get_identifier (real_name);
10332 if (! TREE_ASM_WRITTEN (name_tree))
10334 TREE_ASM_WRITTEN (name_tree) = 1;
10335 fputs ("\t.extern\t", file);
10336 assemble_name (file, p->name);
10342 /* Record an extern. */
10345 unicosmk_add_extern (const char *name)
10347 struct unicosmk_extern_list *p;
10349 p = (struct unicosmk_extern_list *)
10350 xmalloc (sizeof (struct unicosmk_extern_list));
10351 p->next = unicosmk_extern_head;
10353 unicosmk_extern_head = p;
10356 /* The Cray assembler generates incorrect code if identifiers which
10357 conflict with register names are used as instruction operands. We have
10358 to replace such identifiers with DEX expressions. */
10360 /* Structure to collect identifiers which have been replaced by DEX
10362 /* FIXME: needs to use GC, so it can be saved and restored for PCH. */
10364 struct unicosmk_dex {
10365 struct unicosmk_dex *next;
10369 /* List of identifiers which have been replaced by DEX expressions. The DEX
10370 number is determined by the position in the list. */
10372 static struct unicosmk_dex *unicosmk_dex_list = NULL;
10374 /* The number of elements in the DEX list. */
10376 static int unicosmk_dex_count = 0;
10378 /* Check if NAME must be replaced by a DEX expression. */
10381 unicosmk_special_name (const char *name)
10383 if (name[0] == '*')
10386 if (name[0] == '$')
10389 if (name[0] != 'r' && name[0] != 'f' && name[0] != 'R' && name[0] != 'F')
10394 case '1': case '2':
10395 return (name[2] == '\0' || (ISDIGIT (name[2]) && name[3] == '\0'));
10398 return (name[2] == '\0'
10399 || ((name[2] == '0' || name[2] == '1') && name[3] == '\0'));
10402 return (ISDIGIT (name[1]) && name[2] == '\0');
10406 /* Return the DEX number if X must be replaced by a DEX expression and 0
10410 unicosmk_need_dex (rtx x)
10412 struct unicosmk_dex *dex;
10416 if (GET_CODE (x) != SYMBOL_REF)
10420 if (! unicosmk_special_name (name))
10423 i = unicosmk_dex_count;
10424 for (dex = unicosmk_dex_list; dex; dex = dex->next)
10426 if (! strcmp (name, dex->name))
10431 dex = (struct unicosmk_dex *) xmalloc (sizeof (struct unicosmk_dex));
10433 dex->next = unicosmk_dex_list;
10434 unicosmk_dex_list = dex;
10436 ++unicosmk_dex_count;
10437 return unicosmk_dex_count;
10440 /* Output the DEX definitions for this file. */
10443 unicosmk_output_dex (FILE *file)
10445 struct unicosmk_dex *dex;
10448 if (unicosmk_dex_list == NULL)
10451 fprintf (file, "\t.dexstart\n");
10453 i = unicosmk_dex_count;
10454 for (dex = unicosmk_dex_list; dex; dex = dex->next)
10456 fprintf (file, "\tDEX (%d) = ", i);
10457 assemble_name (file, dex->name);
10462 fprintf (file, "\t.dexend\n");
10465 /* Output text that to appear at the beginning of an assembler file. */
10468 unicosmk_file_start (void)
10472 fputs ("\t.ident\t", asm_out_file);
10473 unicosmk_output_module_name (asm_out_file);
10474 fputs ("\n\n", asm_out_file);
10476 /* The Unicos/Mk assembler uses different register names. Instead of trying
10477 to support them, we simply use micro definitions. */
10479 /* CAM has different register names: rN for the integer register N and fN
10480 for the floating-point register N. Instead of trying to use these in
10481 alpha.md, we define the symbols $N and $fN to refer to the appropriate
10484 for (i = 0; i < 32; ++i)
10485 fprintf (asm_out_file, "$%d <- r%d\n", i, i);
10487 for (i = 0; i < 32; ++i)
10488 fprintf (asm_out_file, "$f%d <- f%d\n", i, i);
10490 putc ('\n', asm_out_file);
10492 /* The .align directive fill unused space with zeroes which does not work
10493 in code sections. We define the macro 'gcc@code@align' which uses nops
10494 instead. Note that it assumes that code sections always have the
10495 biggest possible alignment since . refers to the current offset from
10496 the beginning of the section. */
10498 fputs ("\t.macro gcc@code@align n\n", asm_out_file);
10499 fputs ("gcc@n@bytes = 1 << n\n", asm_out_file);
10500 fputs ("gcc@here = . % gcc@n@bytes\n", asm_out_file);
10501 fputs ("\t.if ne, gcc@here, 0\n", asm_out_file);
10502 fputs ("\t.repeat (gcc@n@bytes - gcc@here) / 4\n", asm_out_file);
10503 fputs ("\tbis r31,r31,r31\n", asm_out_file);
10504 fputs ("\t.endr\n", asm_out_file);
10505 fputs ("\t.endif\n", asm_out_file);
10506 fputs ("\t.endm gcc@code@align\n\n", asm_out_file);
10508 /* Output extern declarations which should always be visible. */
10509 unicosmk_output_default_externs (asm_out_file);
10511 /* Open a dummy section. We always need to be inside a section for the
10512 section-switching code to work correctly.
10513 ??? This should be a module id or something like that. I still have to
10514 figure out what the rules for those are. */
10515 fputs ("\n\t.psect\t$SG00000,data\n", asm_out_file);
10518 /* Output text to appear at the end of an assembler file. This includes all
10519 pending extern declarations and DEX expressions. */
10522 unicosmk_file_end (void)
10524 fputs ("\t.endp\n\n", asm_out_file);
10526 /* Output all pending externs. */
10528 unicosmk_output_externs (asm_out_file);
10530 /* Output dex definitions used for functions whose names conflict with
10533 unicosmk_output_dex (asm_out_file);
10535 fputs ("\t.end\t", asm_out_file);
10536 unicosmk_output_module_name (asm_out_file);
10537 putc ('\n', asm_out_file);
10543 unicosmk_output_deferred_case_vectors (FILE *file ATTRIBUTE_UNUSED)
10547 unicosmk_gen_dsib (unsigned long *imaskP ATTRIBUTE_UNUSED)
10551 unicosmk_output_ssib (FILE * file ATTRIBUTE_UNUSED,
10552 const char * fnname ATTRIBUTE_UNUSED)
10556 unicosmk_add_call_info_word (rtx x ATTRIBUTE_UNUSED)
10562 unicosmk_need_dex (rtx x ATTRIBUTE_UNUSED)
10567 #endif /* TARGET_ABI_UNICOSMK */
10570 alpha_init_libfuncs (void)
10572 if (TARGET_ABI_UNICOSMK)
10574 /* Prevent gcc from generating calls to __divsi3. */
10575 set_optab_libfunc (sdiv_optab, SImode, 0);
10576 set_optab_libfunc (udiv_optab, SImode, 0);
10578 /* Use the functions provided by the system library
10579 for DImode integer division. */
10580 set_optab_libfunc (sdiv_optab, DImode, "$sldiv");
10581 set_optab_libfunc (udiv_optab, DImode, "$uldiv");
10583 else if (TARGET_ABI_OPEN_VMS)
10585 /* Use the VMS runtime library functions for division and
10587 set_optab_libfunc (sdiv_optab, SImode, "OTS$DIV_I");
10588 set_optab_libfunc (sdiv_optab, DImode, "OTS$DIV_L");
10589 set_optab_libfunc (udiv_optab, SImode, "OTS$DIV_UI");
10590 set_optab_libfunc (udiv_optab, DImode, "OTS$DIV_UL");
10591 set_optab_libfunc (smod_optab, SImode, "OTS$REM_I");
10592 set_optab_libfunc (smod_optab, DImode, "OTS$REM_L");
10593 set_optab_libfunc (umod_optab, SImode, "OTS$REM_UI");
10594 set_optab_libfunc (umod_optab, DImode, "OTS$REM_UL");
10599 /* Initialize the GCC target structure. */
10600 #if TARGET_ABI_OPEN_VMS
10601 # undef TARGET_ATTRIBUTE_TABLE
10602 # define TARGET_ATTRIBUTE_TABLE vms_attribute_table
10603 # undef TARGET_SECTION_TYPE_FLAGS
10604 # define TARGET_SECTION_TYPE_FLAGS vms_section_type_flags
10607 #undef TARGET_IN_SMALL_DATA_P
10608 #define TARGET_IN_SMALL_DATA_P alpha_in_small_data_p
10610 #if TARGET_ABI_UNICOSMK
10611 # undef TARGET_INSERT_ATTRIBUTES
10612 # define TARGET_INSERT_ATTRIBUTES unicosmk_insert_attributes
10613 # undef TARGET_SECTION_TYPE_FLAGS
10614 # define TARGET_SECTION_TYPE_FLAGS unicosmk_section_type_flags
10615 # undef TARGET_ASM_UNIQUE_SECTION
10616 # define TARGET_ASM_UNIQUE_SECTION unicosmk_unique_section
10617 #undef TARGET_ASM_FUNCTION_RODATA_SECTION
10618 #define TARGET_ASM_FUNCTION_RODATA_SECTION default_no_function_rodata_section
10619 # undef TARGET_ASM_GLOBALIZE_LABEL
10620 # define TARGET_ASM_GLOBALIZE_LABEL hook_void_FILEptr_constcharptr
10621 # undef TARGET_MUST_PASS_IN_STACK
10622 # define TARGET_MUST_PASS_IN_STACK unicosmk_must_pass_in_stack
10625 #undef TARGET_ASM_ALIGNED_HI_OP
10626 #define TARGET_ASM_ALIGNED_HI_OP "\t.word\t"
10627 #undef TARGET_ASM_ALIGNED_DI_OP
10628 #define TARGET_ASM_ALIGNED_DI_OP "\t.quad\t"
10630 /* Default unaligned ops are provided for ELF systems. To get unaligned
10631 data for non-ELF systems, we have to turn off auto alignment. */
10632 #ifndef OBJECT_FORMAT_ELF
10633 #undef TARGET_ASM_UNALIGNED_HI_OP
10634 #define TARGET_ASM_UNALIGNED_HI_OP "\t.align 0\n\t.word\t"
10635 #undef TARGET_ASM_UNALIGNED_SI_OP
10636 #define TARGET_ASM_UNALIGNED_SI_OP "\t.align 0\n\t.long\t"
10637 #undef TARGET_ASM_UNALIGNED_DI_OP
10638 #define TARGET_ASM_UNALIGNED_DI_OP "\t.align 0\n\t.quad\t"
10641 #ifdef OBJECT_FORMAT_ELF
10642 #undef TARGET_ASM_SELECT_RTX_SECTION
10643 #define TARGET_ASM_SELECT_RTX_SECTION alpha_elf_select_rtx_section
10646 #undef TARGET_ASM_FUNCTION_END_PROLOGUE
10647 #define TARGET_ASM_FUNCTION_END_PROLOGUE alpha_output_function_end_prologue
10649 #undef TARGET_INIT_LIBFUNCS
10650 #define TARGET_INIT_LIBFUNCS alpha_init_libfuncs
10652 #if TARGET_ABI_UNICOSMK
10653 #undef TARGET_ASM_FILE_START
10654 #define TARGET_ASM_FILE_START unicosmk_file_start
10655 #undef TARGET_ASM_FILE_END
10656 #define TARGET_ASM_FILE_END unicosmk_file_end
10658 #undef TARGET_ASM_FILE_START
10659 #define TARGET_ASM_FILE_START alpha_file_start
10660 #undef TARGET_ASM_FILE_START_FILE_DIRECTIVE
10661 #define TARGET_ASM_FILE_START_FILE_DIRECTIVE true
10664 #undef TARGET_SCHED_ADJUST_COST
10665 #define TARGET_SCHED_ADJUST_COST alpha_adjust_cost
10666 #undef TARGET_SCHED_ISSUE_RATE
10667 #define TARGET_SCHED_ISSUE_RATE alpha_issue_rate
10668 #undef TARGET_SCHED_FIRST_CYCLE_MULTIPASS_DFA_LOOKAHEAD
10669 #define TARGET_SCHED_FIRST_CYCLE_MULTIPASS_DFA_LOOKAHEAD \
10670 alpha_multipass_dfa_lookahead
10672 #undef TARGET_HAVE_TLS
10673 #define TARGET_HAVE_TLS HAVE_AS_TLS
10675 #undef TARGET_INIT_BUILTINS
10676 #define TARGET_INIT_BUILTINS alpha_init_builtins
10677 #undef TARGET_EXPAND_BUILTIN
10678 #define TARGET_EXPAND_BUILTIN alpha_expand_builtin
10679 #undef TARGET_FOLD_BUILTIN
10680 #define TARGET_FOLD_BUILTIN alpha_fold_builtin
10682 #undef TARGET_FUNCTION_OK_FOR_SIBCALL
10683 #define TARGET_FUNCTION_OK_FOR_SIBCALL alpha_function_ok_for_sibcall
10684 #undef TARGET_CANNOT_COPY_INSN_P
10685 #define TARGET_CANNOT_COPY_INSN_P alpha_cannot_copy_insn_p
10686 #undef TARGET_CANNOT_FORCE_CONST_MEM
10687 #define TARGET_CANNOT_FORCE_CONST_MEM alpha_cannot_force_const_mem
10690 #undef TARGET_ASM_OUTPUT_MI_THUNK
10691 #define TARGET_ASM_OUTPUT_MI_THUNK alpha_output_mi_thunk_osf
10692 #undef TARGET_ASM_CAN_OUTPUT_MI_THUNK
10693 #define TARGET_ASM_CAN_OUTPUT_MI_THUNK hook_bool_tree_hwi_hwi_tree_true
10694 #undef TARGET_STDARG_OPTIMIZE_HOOK
10695 #define TARGET_STDARG_OPTIMIZE_HOOK alpha_stdarg_optimize_hook
10698 #undef TARGET_RTX_COSTS
10699 #define TARGET_RTX_COSTS alpha_rtx_costs
10700 #undef TARGET_ADDRESS_COST
10701 #define TARGET_ADDRESS_COST hook_int_rtx_0
10703 #undef TARGET_MACHINE_DEPENDENT_REORG
10704 #define TARGET_MACHINE_DEPENDENT_REORG alpha_reorg
10706 #undef TARGET_PROMOTE_FUNCTION_ARGS
10707 #define TARGET_PROMOTE_FUNCTION_ARGS hook_bool_tree_true
10708 #undef TARGET_PROMOTE_FUNCTION_RETURN
10709 #define TARGET_PROMOTE_FUNCTION_RETURN hook_bool_tree_true
10710 #undef TARGET_PROMOTE_PROTOTYPES
10711 #define TARGET_PROMOTE_PROTOTYPES hook_bool_tree_false
10712 #undef TARGET_RETURN_IN_MEMORY
10713 #define TARGET_RETURN_IN_MEMORY alpha_return_in_memory
10714 #undef TARGET_PASS_BY_REFERENCE
10715 #define TARGET_PASS_BY_REFERENCE alpha_pass_by_reference
10716 #undef TARGET_SETUP_INCOMING_VARARGS
10717 #define TARGET_SETUP_INCOMING_VARARGS alpha_setup_incoming_varargs
10718 #undef TARGET_STRICT_ARGUMENT_NAMING
10719 #define TARGET_STRICT_ARGUMENT_NAMING hook_bool_CUMULATIVE_ARGS_true
10720 #undef TARGET_PRETEND_OUTGOING_VARARGS_NAMED
10721 #define TARGET_PRETEND_OUTGOING_VARARGS_NAMED hook_bool_CUMULATIVE_ARGS_true
10722 #undef TARGET_SPLIT_COMPLEX_ARG
10723 #define TARGET_SPLIT_COMPLEX_ARG alpha_split_complex_arg
10724 #undef TARGET_GIMPLIFY_VA_ARG_EXPR
10725 #define TARGET_GIMPLIFY_VA_ARG_EXPR alpha_gimplify_va_arg
10726 #undef TARGET_ARG_PARTIAL_BYTES
10727 #define TARGET_ARG_PARTIAL_BYTES alpha_arg_partial_bytes
10729 #undef TARGET_SCALAR_MODE_SUPPORTED_P
10730 #define TARGET_SCALAR_MODE_SUPPORTED_P alpha_scalar_mode_supported_p
10731 #undef TARGET_VECTOR_MODE_SUPPORTED_P
10732 #define TARGET_VECTOR_MODE_SUPPORTED_P alpha_vector_mode_supported_p
10734 #undef TARGET_BUILD_BUILTIN_VA_LIST
10735 #define TARGET_BUILD_BUILTIN_VA_LIST alpha_build_builtin_va_list
10737 /* The Alpha architecture does not require sequential consistency. See
10738 http://www.cs.umd.edu/~pugh/java/memoryModel/AlphaReordering.html
10739 for an example of how it can be violated in practice. */
10740 #undef TARGET_RELAXED_ORDERING
10741 #define TARGET_RELAXED_ORDERING true
10743 #undef TARGET_DEFAULT_TARGET_FLAGS
10744 #define TARGET_DEFAULT_TARGET_FLAGS \
10745 (TARGET_DEFAULT | TARGET_CPU_DEFAULT | TARGET_DEFAULT_EXPLICIT_RELOCS)
10746 #undef TARGET_HANDLE_OPTION
10747 #define TARGET_HANDLE_OPTION alpha_handle_option
10749 #ifdef TARGET_ALTERNATE_LONG_DOUBLE_MANGLING
10750 #undef TARGET_MANGLE_FUNDAMENTAL_TYPE
10751 #define TARGET_MANGLE_FUNDAMENTAL_TYPE alpha_mangle_fundamental_type
10754 struct gcc_target targetm = TARGET_INITIALIZER;
10757 #include "gt-alpha.h"