1 /* Subroutines used for code generation on the DEC Alpha.
2 Copyright (C) 1992, 1993, 1994, 1995, 1996, 1997, 1998, 1999,
3 2000, 2001, 2002, 2003, 2004, 2005 Free Software Foundation, Inc.
4 Contributed by Richard Kenner (kenner@vlsi1.ultra.nyu.edu)
6 This file is part of GCC.
8 GCC is free software; you can redistribute it and/or modify
9 it under the terms of the GNU General Public License as published by
10 the Free Software Foundation; either version 2, or (at your option)
13 GCC is distributed in the hope that it will be useful,
14 but WITHOUT ANY WARRANTY; without even the implied warranty of
15 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 GNU General Public License for more details.
18 You should have received a copy of the GNU General Public License
19 along with GCC; see the file COPYING. If not, write to
20 the Free Software Foundation, 51 Franklin Street, Fifth Floor,
21 Boston, MA 02110-1301, USA. */
26 #include "coretypes.h"
31 #include "hard-reg-set.h"
33 #include "insn-config.h"
34 #include "conditions.h"
36 #include "insn-attr.h"
47 #include "integrate.h"
50 #include "target-def.h"
52 #include "langhooks.h"
53 #include <splay-tree.h>
54 #include "cfglayout.h"
55 #include "tree-gimple.h"
56 #include "tree-flow.h"
57 #include "tree-stdarg.h"
59 /* Specify which cpu to schedule for. */
60 enum processor_type alpha_tune;
62 /* Which cpu we're generating code for. */
63 enum processor_type alpha_cpu;
65 static const char * const alpha_cpu_name[] =
70 /* Specify how accurate floating-point traps need to be. */
72 enum alpha_trap_precision alpha_tp;
74 /* Specify the floating-point rounding mode. */
76 enum alpha_fp_rounding_mode alpha_fprm;
78 /* Specify which things cause traps. */
80 enum alpha_fp_trap_mode alpha_fptm;
82 /* Save information from a "cmpxx" operation until the branch or scc is
85 struct alpha_compare alpha_compare;
87 /* Nonzero if inside of a function, because the Alpha asm can't
88 handle .files inside of functions. */
90 static int inside_function = FALSE;
92 /* The number of cycles of latency we should assume on memory reads. */
94 int alpha_memory_latency = 3;
96 /* Whether the function needs the GP. */
98 static int alpha_function_needs_gp;
100 /* The alias set for prologue/epilogue register save/restore. */
102 static GTY(()) int alpha_sr_alias_set;
104 /* The assembler name of the current function. */
106 static const char *alpha_fnname;
108 /* The next explicit relocation sequence number. */
109 extern GTY(()) int alpha_next_sequence_number;
110 int alpha_next_sequence_number = 1;
112 /* The literal and gpdisp sequence numbers for this insn, as printed
113 by %# and %* respectively. */
114 extern GTY(()) int alpha_this_literal_sequence_number;
115 extern GTY(()) int alpha_this_gpdisp_sequence_number;
116 int alpha_this_literal_sequence_number;
117 int alpha_this_gpdisp_sequence_number;
119 /* Costs of various operations on the different architectures. */
121 struct alpha_rtx_cost_data
123 unsigned char fp_add;
124 unsigned char fp_mult;
125 unsigned char fp_div_sf;
126 unsigned char fp_div_df;
127 unsigned char int_mult_si;
128 unsigned char int_mult_di;
129 unsigned char int_shift;
130 unsigned char int_cmov;
131 unsigned short int_div;
134 static struct alpha_rtx_cost_data const alpha_rtx_cost_data[PROCESSOR_MAX] =
137 COSTS_N_INSNS (6), /* fp_add */
138 COSTS_N_INSNS (6), /* fp_mult */
139 COSTS_N_INSNS (34), /* fp_div_sf */
140 COSTS_N_INSNS (63), /* fp_div_df */
141 COSTS_N_INSNS (23), /* int_mult_si */
142 COSTS_N_INSNS (23), /* int_mult_di */
143 COSTS_N_INSNS (2), /* int_shift */
144 COSTS_N_INSNS (2), /* int_cmov */
145 COSTS_N_INSNS (97), /* int_div */
148 COSTS_N_INSNS (4), /* fp_add */
149 COSTS_N_INSNS (4), /* fp_mult */
150 COSTS_N_INSNS (15), /* fp_div_sf */
151 COSTS_N_INSNS (22), /* fp_div_df */
152 COSTS_N_INSNS (8), /* int_mult_si */
153 COSTS_N_INSNS (12), /* int_mult_di */
154 COSTS_N_INSNS (1) + 1, /* int_shift */
155 COSTS_N_INSNS (1), /* int_cmov */
156 COSTS_N_INSNS (83), /* int_div */
159 COSTS_N_INSNS (4), /* fp_add */
160 COSTS_N_INSNS (4), /* fp_mult */
161 COSTS_N_INSNS (12), /* fp_div_sf */
162 COSTS_N_INSNS (15), /* fp_div_df */
163 COSTS_N_INSNS (7), /* int_mult_si */
164 COSTS_N_INSNS (7), /* int_mult_di */
165 COSTS_N_INSNS (1), /* int_shift */
166 COSTS_N_INSNS (2), /* int_cmov */
167 COSTS_N_INSNS (86), /* int_div */
171 /* Similar but tuned for code size instead of execution latency. The
172 extra +N is fractional cost tuning based on latency. It's used to
173 encourage use of cheaper insns like shift, but only if there's just
176 static struct alpha_rtx_cost_data const alpha_rtx_cost_size =
178 COSTS_N_INSNS (1), /* fp_add */
179 COSTS_N_INSNS (1), /* fp_mult */
180 COSTS_N_INSNS (1), /* fp_div_sf */
181 COSTS_N_INSNS (1) + 1, /* fp_div_df */
182 COSTS_N_INSNS (1) + 1, /* int_mult_si */
183 COSTS_N_INSNS (1) + 2, /* int_mult_di */
184 COSTS_N_INSNS (1), /* int_shift */
185 COSTS_N_INSNS (1), /* int_cmov */
186 COSTS_N_INSNS (6), /* int_div */
189 /* Get the number of args of a function in one of two ways. */
190 #if TARGET_ABI_OPEN_VMS || TARGET_ABI_UNICOSMK
191 #define NUM_ARGS current_function_args_info.num_args
193 #define NUM_ARGS current_function_args_info
199 /* Declarations of static functions. */
200 static struct machine_function *alpha_init_machine_status (void);
201 static rtx alpha_emit_xfloating_compare (enum rtx_code *, rtx, rtx);
203 #if TARGET_ABI_OPEN_VMS
204 static void alpha_write_linkage (FILE *, const char *, tree);
207 static void unicosmk_output_deferred_case_vectors (FILE *);
208 static void unicosmk_gen_dsib (unsigned long *);
209 static void unicosmk_output_ssib (FILE *, const char *);
210 static int unicosmk_need_dex (rtx);
212 /* Implement TARGET_HANDLE_OPTION. */
215 alpha_handle_option (size_t code, const char *arg, int value)
221 target_flags |= MASK_SOFT_FP;
225 case OPT_mieee_with_inexact:
226 target_flags |= MASK_IEEE_CONFORMANT;
230 if (value != 16 && value != 32 && value != 64)
231 error ("bad value %qs for -mtls-size switch", arg);
238 /* Parse target option strings. */
241 override_options (void)
243 static const struct cpu_table {
244 const char *const name;
245 const enum processor_type processor;
248 { "ev4", PROCESSOR_EV4, 0 },
249 { "ev45", PROCESSOR_EV4, 0 },
250 { "21064", PROCESSOR_EV4, 0 },
251 { "ev5", PROCESSOR_EV5, 0 },
252 { "21164", PROCESSOR_EV5, 0 },
253 { "ev56", PROCESSOR_EV5, MASK_BWX },
254 { "21164a", PROCESSOR_EV5, MASK_BWX },
255 { "pca56", PROCESSOR_EV5, MASK_BWX|MASK_MAX },
256 { "21164PC",PROCESSOR_EV5, MASK_BWX|MASK_MAX },
257 { "21164pc",PROCESSOR_EV5, MASK_BWX|MASK_MAX },
258 { "ev6", PROCESSOR_EV6, MASK_BWX|MASK_MAX|MASK_FIX },
259 { "21264", PROCESSOR_EV6, MASK_BWX|MASK_MAX|MASK_FIX },
260 { "ev67", PROCESSOR_EV6, MASK_BWX|MASK_MAX|MASK_FIX|MASK_CIX },
261 { "21264a", PROCESSOR_EV6, MASK_BWX|MASK_MAX|MASK_FIX|MASK_CIX },
267 /* Unicos/Mk doesn't have shared libraries. */
268 if (TARGET_ABI_UNICOSMK && flag_pic)
270 warning (0, "-f%s ignored for Unicos/Mk (not supported)",
271 (flag_pic > 1) ? "PIC" : "pic");
275 /* On Unicos/Mk, the native compiler consistently generates /d suffices for
276 floating-point instructions. Make that the default for this target. */
277 if (TARGET_ABI_UNICOSMK)
278 alpha_fprm = ALPHA_FPRM_DYN;
280 alpha_fprm = ALPHA_FPRM_NORM;
282 alpha_tp = ALPHA_TP_PROG;
283 alpha_fptm = ALPHA_FPTM_N;
285 /* We cannot use su and sui qualifiers for conversion instructions on
286 Unicos/Mk. I'm not sure if this is due to assembler or hardware
287 limitations. Right now, we issue a warning if -mieee is specified
288 and then ignore it; eventually, we should either get it right or
289 disable the option altogether. */
293 if (TARGET_ABI_UNICOSMK)
294 warning (0, "-mieee not supported on Unicos/Mk");
297 alpha_tp = ALPHA_TP_INSN;
298 alpha_fptm = ALPHA_FPTM_SU;
302 if (TARGET_IEEE_WITH_INEXACT)
304 if (TARGET_ABI_UNICOSMK)
305 warning (0, "-mieee-with-inexact not supported on Unicos/Mk");
308 alpha_tp = ALPHA_TP_INSN;
309 alpha_fptm = ALPHA_FPTM_SUI;
315 if (! strcmp (alpha_tp_string, "p"))
316 alpha_tp = ALPHA_TP_PROG;
317 else if (! strcmp (alpha_tp_string, "f"))
318 alpha_tp = ALPHA_TP_FUNC;
319 else if (! strcmp (alpha_tp_string, "i"))
320 alpha_tp = ALPHA_TP_INSN;
322 error ("bad value %qs for -mtrap-precision switch", alpha_tp_string);
325 if (alpha_fprm_string)
327 if (! strcmp (alpha_fprm_string, "n"))
328 alpha_fprm = ALPHA_FPRM_NORM;
329 else if (! strcmp (alpha_fprm_string, "m"))
330 alpha_fprm = ALPHA_FPRM_MINF;
331 else if (! strcmp (alpha_fprm_string, "c"))
332 alpha_fprm = ALPHA_FPRM_CHOP;
333 else if (! strcmp (alpha_fprm_string,"d"))
334 alpha_fprm = ALPHA_FPRM_DYN;
336 error ("bad value %qs for -mfp-rounding-mode switch",
340 if (alpha_fptm_string)
342 if (strcmp (alpha_fptm_string, "n") == 0)
343 alpha_fptm = ALPHA_FPTM_N;
344 else if (strcmp (alpha_fptm_string, "u") == 0)
345 alpha_fptm = ALPHA_FPTM_U;
346 else if (strcmp (alpha_fptm_string, "su") == 0)
347 alpha_fptm = ALPHA_FPTM_SU;
348 else if (strcmp (alpha_fptm_string, "sui") == 0)
349 alpha_fptm = ALPHA_FPTM_SUI;
351 error ("bad value %qs for -mfp-trap-mode switch", alpha_fptm_string);
354 if (alpha_cpu_string)
356 for (i = 0; cpu_table [i].name; i++)
357 if (! strcmp (alpha_cpu_string, cpu_table [i].name))
359 alpha_tune = alpha_cpu = cpu_table [i].processor;
360 target_flags &= ~ (MASK_BWX | MASK_MAX | MASK_FIX | MASK_CIX);
361 target_flags |= cpu_table [i].flags;
364 if (! cpu_table [i].name)
365 error ("bad value %qs for -mcpu switch", alpha_cpu_string);
368 if (alpha_tune_string)
370 for (i = 0; cpu_table [i].name; i++)
371 if (! strcmp (alpha_tune_string, cpu_table [i].name))
373 alpha_tune = cpu_table [i].processor;
376 if (! cpu_table [i].name)
377 error ("bad value %qs for -mcpu switch", alpha_tune_string);
380 /* Do some sanity checks on the above options. */
382 if (TARGET_ABI_UNICOSMK && alpha_fptm != ALPHA_FPTM_N)
384 warning (0, "trap mode not supported on Unicos/Mk");
385 alpha_fptm = ALPHA_FPTM_N;
388 if ((alpha_fptm == ALPHA_FPTM_SU || alpha_fptm == ALPHA_FPTM_SUI)
389 && alpha_tp != ALPHA_TP_INSN && alpha_cpu != PROCESSOR_EV6)
391 warning (0, "fp software completion requires -mtrap-precision=i");
392 alpha_tp = ALPHA_TP_INSN;
395 if (alpha_cpu == PROCESSOR_EV6)
397 /* Except for EV6 pass 1 (not released), we always have precise
398 arithmetic traps. Which means we can do software completion
399 without minding trap shadows. */
400 alpha_tp = ALPHA_TP_PROG;
403 if (TARGET_FLOAT_VAX)
405 if (alpha_fprm == ALPHA_FPRM_MINF || alpha_fprm == ALPHA_FPRM_DYN)
407 warning (0, "rounding mode not supported for VAX floats");
408 alpha_fprm = ALPHA_FPRM_NORM;
410 if (alpha_fptm == ALPHA_FPTM_SUI)
412 warning (0, "trap mode not supported for VAX floats");
413 alpha_fptm = ALPHA_FPTM_SU;
415 if (target_flags_explicit & MASK_LONG_DOUBLE_128)
416 warning (0, "128-bit long double not supported for VAX floats");
417 target_flags &= ~MASK_LONG_DOUBLE_128;
424 if (!alpha_mlat_string)
425 alpha_mlat_string = "L1";
427 if (ISDIGIT ((unsigned char)alpha_mlat_string[0])
428 && (lat = strtol (alpha_mlat_string, &end, 10), *end == '\0'))
430 else if ((alpha_mlat_string[0] == 'L' || alpha_mlat_string[0] == 'l')
431 && ISDIGIT ((unsigned char)alpha_mlat_string[1])
432 && alpha_mlat_string[2] == '\0')
434 static int const cache_latency[][4] =
436 { 3, 30, -1 }, /* ev4 -- Bcache is a guess */
437 { 2, 12, 38 }, /* ev5 -- Bcache from PC164 LMbench numbers */
438 { 3, 12, 30 }, /* ev6 -- Bcache from DS20 LMbench. */
441 lat = alpha_mlat_string[1] - '0';
442 if (lat <= 0 || lat > 3 || cache_latency[alpha_tune][lat-1] == -1)
444 warning (0, "L%d cache latency unknown for %s",
445 lat, alpha_cpu_name[alpha_tune]);
449 lat = cache_latency[alpha_tune][lat-1];
451 else if (! strcmp (alpha_mlat_string, "main"))
453 /* Most current memories have about 370ns latency. This is
454 a reasonable guess for a fast cpu. */
459 warning (0, "bad value %qs for -mmemory-latency", alpha_mlat_string);
463 alpha_memory_latency = lat;
466 /* Default the definition of "small data" to 8 bytes. */
470 /* Infer TARGET_SMALL_DATA from -fpic/-fPIC. */
472 target_flags |= MASK_SMALL_DATA;
473 else if (flag_pic == 2)
474 target_flags &= ~MASK_SMALL_DATA;
476 /* Align labels and loops for optimal branching. */
477 /* ??? Kludge these by not doing anything if we don't optimize and also if
478 we are writing ECOFF symbols to work around a bug in DEC's assembler. */
479 if (optimize > 0 && write_symbols != SDB_DEBUG)
481 if (align_loops <= 0)
483 if (align_jumps <= 0)
486 if (align_functions <= 0)
487 align_functions = 16;
489 /* Acquire a unique set number for our register saves and restores. */
490 alpha_sr_alias_set = new_alias_set ();
492 /* Register variables and functions with the garbage collector. */
494 /* Set up function hooks. */
495 init_machine_status = alpha_init_machine_status;
497 /* Tell the compiler when we're using VAX floating point. */
498 if (TARGET_FLOAT_VAX)
500 REAL_MODE_FORMAT (SFmode) = &vax_f_format;
501 REAL_MODE_FORMAT (DFmode) = &vax_g_format;
502 REAL_MODE_FORMAT (TFmode) = NULL;
506 /* Returns 1 if VALUE is a mask that contains full bytes of zero or ones. */
509 zap_mask (HOST_WIDE_INT value)
513 for (i = 0; i < HOST_BITS_PER_WIDE_INT / HOST_BITS_PER_CHAR;
515 if ((value & 0xff) != 0 && (value & 0xff) != 0xff)
521 /* Return true if OP is valid for a particular TLS relocation.
522 We are already guaranteed that OP is a CONST. */
525 tls_symbolic_operand_1 (rtx op, int size, int unspec)
529 if (GET_CODE (op) != UNSPEC || XINT (op, 1) != unspec)
531 op = XVECEXP (op, 0, 0);
533 if (GET_CODE (op) != SYMBOL_REF)
536 switch (SYMBOL_REF_TLS_MODEL (op))
538 case TLS_MODEL_LOCAL_DYNAMIC:
539 return unspec == UNSPEC_DTPREL && size == alpha_tls_size;
540 case TLS_MODEL_INITIAL_EXEC:
541 return unspec == UNSPEC_TPREL && size == 64;
542 case TLS_MODEL_LOCAL_EXEC:
543 return unspec == UNSPEC_TPREL && size == alpha_tls_size;
549 /* Used by aligned_memory_operand and unaligned_memory_operand to
550 resolve what reload is going to do with OP if it's a register. */
553 resolve_reload_operand (rtx op)
555 if (reload_in_progress)
558 if (GET_CODE (tmp) == SUBREG)
559 tmp = SUBREG_REG (tmp);
560 if (GET_CODE (tmp) == REG
561 && REGNO (tmp) >= FIRST_PSEUDO_REGISTER)
563 op = reg_equiv_memory_loc[REGNO (tmp)];
571 /* Implements CONST_OK_FOR_LETTER_P. Return true if the value matches
572 the range defined for C in [I-P]. */
575 alpha_const_ok_for_letter_p (HOST_WIDE_INT value, int c)
580 /* An unsigned 8 bit constant. */
581 return (unsigned HOST_WIDE_INT) value < 0x100;
583 /* The constant zero. */
586 /* A signed 16 bit constant. */
587 return (unsigned HOST_WIDE_INT) (value + 0x8000) < 0x10000;
589 /* A shifted signed 16 bit constant appropriate for LDAH. */
590 return ((value & 0xffff) == 0
591 && ((value) >> 31 == -1 || value >> 31 == 0));
593 /* A constant that can be AND'ed with using a ZAP insn. */
594 return zap_mask (value);
596 /* A complemented unsigned 8 bit constant. */
597 return (unsigned HOST_WIDE_INT) (~ value) < 0x100;
599 /* A negated unsigned 8 bit constant. */
600 return (unsigned HOST_WIDE_INT) (- value) < 0x100;
602 /* The constant 1, 2 or 3. */
603 return value == 1 || value == 2 || value == 3;
610 /* Implements CONST_DOUBLE_OK_FOR_LETTER_P. Return true if VALUE
611 matches for C in [GH]. */
614 alpha_const_double_ok_for_letter_p (rtx value, int c)
619 /* The floating point zero constant. */
620 return (GET_MODE_CLASS (GET_MODE (value)) == MODE_FLOAT
621 && value == CONST0_RTX (GET_MODE (value)));
624 /* A valid operand of a ZAP insn. */
625 return (GET_MODE (value) == VOIDmode
626 && zap_mask (CONST_DOUBLE_LOW (value))
627 && zap_mask (CONST_DOUBLE_HIGH (value)));
634 /* Implements CONST_DOUBLE_OK_FOR_LETTER_P. Return true if VALUE
638 alpha_extra_constraint (rtx value, int c)
643 return normal_memory_operand (value, VOIDmode);
645 return direct_call_operand (value, Pmode);
647 return (GET_CODE (value) == CONST_INT
648 && (unsigned HOST_WIDE_INT) INTVAL (value) < 64);
650 return GET_CODE (value) == HIGH;
652 return TARGET_ABI_UNICOSMK && symbolic_operand (value, VOIDmode);
654 return (GET_CODE (value) == CONST_VECTOR
655 && value == CONST0_RTX (GET_MODE (value)));
661 /* The scalar modes supported differs from the default check-what-c-supports
662 version in that sometimes TFmode is available even when long double
663 indicates only DFmode. On unicosmk, we have the situation that HImode
664 doesn't map to any C type, but of course we still support that. */
667 alpha_scalar_mode_supported_p (enum machine_mode mode)
675 case TImode: /* via optabs.c */
683 return TARGET_HAS_XFLOATING_LIBS;
690 /* Alpha implements a couple of integer vector mode operations when
691 TARGET_MAX is enabled. We do not check TARGET_MAX here, however,
692 which allows the vectorizer to operate on e.g. move instructions,
693 or when expand_vector_operations can do something useful. */
696 alpha_vector_mode_supported_p (enum machine_mode mode)
698 return mode == V8QImode || mode == V4HImode || mode == V2SImode;
701 /* Return 1 if this function can directly return via $26. */
706 return (! TARGET_ABI_OPEN_VMS && ! TARGET_ABI_UNICOSMK
708 && alpha_sa_size () == 0
709 && get_frame_size () == 0
710 && current_function_outgoing_args_size == 0
711 && current_function_pretend_args_size == 0);
714 /* Return the ADDR_VEC associated with a tablejump insn. */
717 alpha_tablejump_addr_vec (rtx insn)
721 tmp = JUMP_LABEL (insn);
724 tmp = NEXT_INSN (tmp);
727 if (GET_CODE (tmp) == JUMP_INSN
728 && GET_CODE (PATTERN (tmp)) == ADDR_DIFF_VEC)
729 return PATTERN (tmp);
733 /* Return the label of the predicted edge, or CONST0_RTX if we don't know. */
736 alpha_tablejump_best_label (rtx insn)
738 rtx jump_table = alpha_tablejump_addr_vec (insn);
739 rtx best_label = NULL_RTX;
741 /* ??? Once the CFG doesn't keep getting completely rebuilt, look
742 there for edge frequency counts from profile data. */
746 int n_labels = XVECLEN (jump_table, 1);
750 for (i = 0; i < n_labels; i++)
754 for (j = i + 1; j < n_labels; j++)
755 if (XEXP (XVECEXP (jump_table, 1, i), 0)
756 == XEXP (XVECEXP (jump_table, 1, j), 0))
759 if (count > best_count)
760 best_count = count, best_label = XVECEXP (jump_table, 1, i);
764 return best_label ? best_label : const0_rtx;
767 /* Return the TLS model to use for SYMBOL. */
769 static enum tls_model
770 tls_symbolic_operand_type (rtx symbol)
772 enum tls_model model;
774 if (GET_CODE (symbol) != SYMBOL_REF)
776 model = SYMBOL_REF_TLS_MODEL (symbol);
778 /* Local-exec with a 64-bit size is the same code as initial-exec. */
779 if (model == TLS_MODEL_LOCAL_EXEC && alpha_tls_size == 64)
780 model = TLS_MODEL_INITIAL_EXEC;
785 /* Return true if the function DECL will share the same GP as any
786 function in the current unit of translation. */
789 decl_has_samegp (tree decl)
791 /* Functions that are not local can be overridden, and thus may
792 not share the same gp. */
793 if (!(*targetm.binds_local_p) (decl))
796 /* If -msmall-data is in effect, assume that there is only one GP
797 for the module, and so any local symbol has this property. We
798 need explicit relocations to be able to enforce this for symbols
799 not defined in this unit of translation, however. */
800 if (TARGET_EXPLICIT_RELOCS && TARGET_SMALL_DATA)
803 /* Functions that are not external are defined in this UoT. */
804 /* ??? Irritatingly, static functions not yet emitted are still
805 marked "external". Apply this to non-static functions only. */
806 return !TREE_PUBLIC (decl) || !DECL_EXTERNAL (decl);
809 /* Return true if EXP should be placed in the small data section. */
812 alpha_in_small_data_p (tree exp)
814 /* We want to merge strings, so we never consider them small data. */
815 if (TREE_CODE (exp) == STRING_CST)
818 /* Functions are never in the small data area. Duh. */
819 if (TREE_CODE (exp) == FUNCTION_DECL)
822 if (TREE_CODE (exp) == VAR_DECL && DECL_SECTION_NAME (exp))
824 const char *section = TREE_STRING_POINTER (DECL_SECTION_NAME (exp));
825 if (strcmp (section, ".sdata") == 0
826 || strcmp (section, ".sbss") == 0)
831 HOST_WIDE_INT size = int_size_in_bytes (TREE_TYPE (exp));
833 /* If this is an incomplete type with size 0, then we can't put it
834 in sdata because it might be too big when completed. */
835 if (size > 0 && (unsigned HOST_WIDE_INT) size <= g_switch_value)
842 #if TARGET_ABI_OPEN_VMS
844 alpha_linkage_symbol_p (const char *symname)
846 int symlen = strlen (symname);
849 return strcmp (&symname [symlen - 4], "..lk") == 0;
854 #define LINKAGE_SYMBOL_REF_P(X) \
855 ((GET_CODE (X) == SYMBOL_REF \
856 && alpha_linkage_symbol_p (XSTR (X, 0))) \
857 || (GET_CODE (X) == CONST \
858 && GET_CODE (XEXP (X, 0)) == PLUS \
859 && GET_CODE (XEXP (XEXP (X, 0), 0)) == SYMBOL_REF \
860 && alpha_linkage_symbol_p (XSTR (XEXP (XEXP (X, 0), 0), 0))))
863 /* legitimate_address_p recognizes an RTL expression that is a valid
864 memory address for an instruction. The MODE argument is the
865 machine mode for the MEM expression that wants to use this address.
867 For Alpha, we have either a constant address or the sum of a
868 register and a constant address, or just a register. For DImode,
869 any of those forms can be surrounded with an AND that clear the
870 low-order three bits; this is an "unaligned" access. */
873 alpha_legitimate_address_p (enum machine_mode mode, rtx x, int strict)
875 /* If this is an ldq_u type address, discard the outer AND. */
877 && GET_CODE (x) == AND
878 && GET_CODE (XEXP (x, 1)) == CONST_INT
879 && INTVAL (XEXP (x, 1)) == -8)
882 /* Discard non-paradoxical subregs. */
883 if (GET_CODE (x) == SUBREG
884 && (GET_MODE_SIZE (GET_MODE (x))
885 < GET_MODE_SIZE (GET_MODE (SUBREG_REG (x)))))
888 /* Unadorned general registers are valid. */
891 ? STRICT_REG_OK_FOR_BASE_P (x)
892 : NONSTRICT_REG_OK_FOR_BASE_P (x)))
895 /* Constant addresses (i.e. +/- 32k) are valid. */
896 if (CONSTANT_ADDRESS_P (x))
899 #if TARGET_ABI_OPEN_VMS
900 if (LINKAGE_SYMBOL_REF_P (x))
904 /* Register plus a small constant offset is valid. */
905 if (GET_CODE (x) == PLUS)
907 rtx ofs = XEXP (x, 1);
910 /* Discard non-paradoxical subregs. */
911 if (GET_CODE (x) == SUBREG
912 && (GET_MODE_SIZE (GET_MODE (x))
913 < GET_MODE_SIZE (GET_MODE (SUBREG_REG (x)))))
919 && NONSTRICT_REG_OK_FP_BASE_P (x)
920 && GET_CODE (ofs) == CONST_INT)
923 ? STRICT_REG_OK_FOR_BASE_P (x)
924 : NONSTRICT_REG_OK_FOR_BASE_P (x))
925 && CONSTANT_ADDRESS_P (ofs))
930 /* If we're managing explicit relocations, LO_SUM is valid, as
931 are small data symbols. */
932 else if (TARGET_EXPLICIT_RELOCS)
934 if (small_symbolic_operand (x, Pmode))
937 if (GET_CODE (x) == LO_SUM)
939 rtx ofs = XEXP (x, 1);
942 /* Discard non-paradoxical subregs. */
943 if (GET_CODE (x) == SUBREG
944 && (GET_MODE_SIZE (GET_MODE (x))
945 < GET_MODE_SIZE (GET_MODE (SUBREG_REG (x)))))
948 /* Must have a valid base register. */
951 ? STRICT_REG_OK_FOR_BASE_P (x)
952 : NONSTRICT_REG_OK_FOR_BASE_P (x))))
955 /* The symbol must be local. */
956 if (local_symbolic_operand (ofs, Pmode)
957 || dtp32_symbolic_operand (ofs, Pmode)
958 || tp32_symbolic_operand (ofs, Pmode))
966 /* Build the SYMBOL_REF for __tls_get_addr. */
968 static GTY(()) rtx tls_get_addr_libfunc;
971 get_tls_get_addr (void)
973 if (!tls_get_addr_libfunc)
974 tls_get_addr_libfunc = init_one_libfunc ("__tls_get_addr");
975 return tls_get_addr_libfunc;
978 /* Try machine-dependent ways of modifying an illegitimate address
979 to be legitimate. If we find one, return the new, valid address. */
982 alpha_legitimize_address (rtx x, rtx scratch,
983 enum machine_mode mode ATTRIBUTE_UNUSED)
985 HOST_WIDE_INT addend;
987 /* If the address is (plus reg const_int) and the CONST_INT is not a
988 valid offset, compute the high part of the constant and add it to
989 the register. Then our address is (plus temp low-part-const). */
990 if (GET_CODE (x) == PLUS
991 && GET_CODE (XEXP (x, 0)) == REG
992 && GET_CODE (XEXP (x, 1)) == CONST_INT
993 && ! CONSTANT_ADDRESS_P (XEXP (x, 1)))
995 addend = INTVAL (XEXP (x, 1));
1000 /* If the address is (const (plus FOO const_int)), find the low-order
1001 part of the CONST_INT. Then load FOO plus any high-order part of the
1002 CONST_INT into a register. Our address is (plus reg low-part-const).
1003 This is done to reduce the number of GOT entries. */
1005 && GET_CODE (x) == CONST
1006 && GET_CODE (XEXP (x, 0)) == PLUS
1007 && GET_CODE (XEXP (XEXP (x, 0), 1)) == CONST_INT)
1009 addend = INTVAL (XEXP (XEXP (x, 0), 1));
1010 x = force_reg (Pmode, XEXP (XEXP (x, 0), 0));
1014 /* If we have a (plus reg const), emit the load as in (2), then add
1015 the two registers, and finally generate (plus reg low-part-const) as
1018 && GET_CODE (x) == PLUS
1019 && GET_CODE (XEXP (x, 0)) == REG
1020 && GET_CODE (XEXP (x, 1)) == CONST
1021 && GET_CODE (XEXP (XEXP (x, 1), 0)) == PLUS
1022 && GET_CODE (XEXP (XEXP (XEXP (x, 1), 0), 1)) == CONST_INT)
1024 addend = INTVAL (XEXP (XEXP (XEXP (x, 1), 0), 1));
1025 x = expand_simple_binop (Pmode, PLUS, XEXP (x, 0),
1026 XEXP (XEXP (XEXP (x, 1), 0), 0),
1027 NULL_RTX, 1, OPTAB_LIB_WIDEN);
1031 /* If this is a local symbol, split the address into HIGH/LO_SUM parts. */
1032 if (TARGET_EXPLICIT_RELOCS && symbolic_operand (x, Pmode))
1034 rtx r0, r16, eqv, tga, tp, insn, dest, seq;
1036 switch (tls_symbolic_operand_type (x))
1038 case TLS_MODEL_NONE:
1041 case TLS_MODEL_GLOBAL_DYNAMIC:
1044 r0 = gen_rtx_REG (Pmode, 0);
1045 r16 = gen_rtx_REG (Pmode, 16);
1046 tga = get_tls_get_addr ();
1047 dest = gen_reg_rtx (Pmode);
1048 seq = GEN_INT (alpha_next_sequence_number++);
1050 emit_insn (gen_movdi_er_tlsgd (r16, pic_offset_table_rtx, x, seq));
1051 insn = gen_call_value_osf_tlsgd (r0, tga, seq);
1052 insn = emit_call_insn (insn);
1053 CONST_OR_PURE_CALL_P (insn) = 1;
1054 use_reg (&CALL_INSN_FUNCTION_USAGE (insn), r16);
1056 insn = get_insns ();
1059 emit_libcall_block (insn, dest, r0, x);
1062 case TLS_MODEL_LOCAL_DYNAMIC:
1065 r0 = gen_rtx_REG (Pmode, 0);
1066 r16 = gen_rtx_REG (Pmode, 16);
1067 tga = get_tls_get_addr ();
1068 scratch = gen_reg_rtx (Pmode);
1069 seq = GEN_INT (alpha_next_sequence_number++);
1071 emit_insn (gen_movdi_er_tlsldm (r16, pic_offset_table_rtx, seq));
1072 insn = gen_call_value_osf_tlsldm (r0, tga, seq);
1073 insn = emit_call_insn (insn);
1074 CONST_OR_PURE_CALL_P (insn) = 1;
1075 use_reg (&CALL_INSN_FUNCTION_USAGE (insn), r16);
1077 insn = get_insns ();
1080 eqv = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, const0_rtx),
1081 UNSPEC_TLSLDM_CALL);
1082 emit_libcall_block (insn, scratch, r0, eqv);
1084 eqv = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, x), UNSPEC_DTPREL);
1085 eqv = gen_rtx_CONST (Pmode, eqv);
1087 if (alpha_tls_size == 64)
1089 dest = gen_reg_rtx (Pmode);
1090 emit_insn (gen_rtx_SET (VOIDmode, dest, eqv));
1091 emit_insn (gen_adddi3 (dest, dest, scratch));
1094 if (alpha_tls_size == 32)
1096 insn = gen_rtx_HIGH (Pmode, eqv);
1097 insn = gen_rtx_PLUS (Pmode, scratch, insn);
1098 scratch = gen_reg_rtx (Pmode);
1099 emit_insn (gen_rtx_SET (VOIDmode, scratch, insn));
1101 return gen_rtx_LO_SUM (Pmode, scratch, eqv);
1103 case TLS_MODEL_INITIAL_EXEC:
1104 eqv = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, x), UNSPEC_TPREL);
1105 eqv = gen_rtx_CONST (Pmode, eqv);
1106 tp = gen_reg_rtx (Pmode);
1107 scratch = gen_reg_rtx (Pmode);
1108 dest = gen_reg_rtx (Pmode);
1110 emit_insn (gen_load_tp (tp));
1111 emit_insn (gen_rtx_SET (VOIDmode, scratch, eqv));
1112 emit_insn (gen_adddi3 (dest, tp, scratch));
1115 case TLS_MODEL_LOCAL_EXEC:
1116 eqv = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, x), UNSPEC_TPREL);
1117 eqv = gen_rtx_CONST (Pmode, eqv);
1118 tp = gen_reg_rtx (Pmode);
1120 emit_insn (gen_load_tp (tp));
1121 if (alpha_tls_size == 32)
1123 insn = gen_rtx_HIGH (Pmode, eqv);
1124 insn = gen_rtx_PLUS (Pmode, tp, insn);
1125 tp = gen_reg_rtx (Pmode);
1126 emit_insn (gen_rtx_SET (VOIDmode, tp, insn));
1128 return gen_rtx_LO_SUM (Pmode, tp, eqv);
1134 if (local_symbolic_operand (x, Pmode))
1136 if (small_symbolic_operand (x, Pmode))
1140 if (!no_new_pseudos)
1141 scratch = gen_reg_rtx (Pmode);
1142 emit_insn (gen_rtx_SET (VOIDmode, scratch,
1143 gen_rtx_HIGH (Pmode, x)));
1144 return gen_rtx_LO_SUM (Pmode, scratch, x);
1153 HOST_WIDE_INT low, high;
1155 low = ((addend & 0xffff) ^ 0x8000) - 0x8000;
1157 high = ((addend & 0xffffffff) ^ 0x80000000) - 0x80000000;
1161 x = expand_simple_binop (Pmode, PLUS, x, GEN_INT (addend),
1162 (no_new_pseudos ? scratch : NULL_RTX),
1163 1, OPTAB_LIB_WIDEN);
1165 x = expand_simple_binop (Pmode, PLUS, x, GEN_INT (high),
1166 (no_new_pseudos ? scratch : NULL_RTX),
1167 1, OPTAB_LIB_WIDEN);
1169 return plus_constant (x, low);
1173 /* Primarily this is required for TLS symbols, but given that our move
1174 patterns *ought* to be able to handle any symbol at any time, we
1175 should never be spilling symbolic operands to the constant pool, ever. */
1178 alpha_cannot_force_const_mem (rtx x)
1180 enum rtx_code code = GET_CODE (x);
1181 return code == SYMBOL_REF || code == LABEL_REF || code == CONST;
1184 /* We do not allow indirect calls to be optimized into sibling calls, nor
1185 can we allow a call to a function with a different GP to be optimized
1189 alpha_function_ok_for_sibcall (tree decl, tree exp ATTRIBUTE_UNUSED)
1191 /* Can't do indirect tail calls, since we don't know if the target
1192 uses the same GP. */
1196 /* Otherwise, we can make a tail call if the target function shares
1198 return decl_has_samegp (decl);
1202 some_small_symbolic_operand_int (rtx *px, void *data ATTRIBUTE_UNUSED)
1206 /* Don't re-split. */
1207 if (GET_CODE (x) == LO_SUM)
1210 return small_symbolic_operand (x, Pmode) != 0;
1214 split_small_symbolic_operand_1 (rtx *px, void *data ATTRIBUTE_UNUSED)
1218 /* Don't re-split. */
1219 if (GET_CODE (x) == LO_SUM)
1222 if (small_symbolic_operand (x, Pmode))
1224 x = gen_rtx_LO_SUM (Pmode, pic_offset_table_rtx, x);
1233 split_small_symbolic_operand (rtx x)
1236 for_each_rtx (&x, split_small_symbolic_operand_1, NULL);
1240 /* Indicate that INSN cannot be duplicated. This is true for any insn
1241 that we've marked with gpdisp relocs, since those have to stay in
1242 1-1 correspondence with one another.
1244 Technically we could copy them if we could set up a mapping from one
1245 sequence number to another, across the set of insns to be duplicated.
1246 This seems overly complicated and error-prone since interblock motion
1247 from sched-ebb could move one of the pair of insns to a different block.
1249 Also cannot allow jsr insns to be duplicated. If they throw exceptions,
1250 then they'll be in a different block from their ldgp. Which could lead
1251 the bb reorder code to think that it would be ok to copy just the block
1252 containing the call and branch to the block containing the ldgp. */
1255 alpha_cannot_copy_insn_p (rtx insn)
1257 if (!reload_completed || !TARGET_EXPLICIT_RELOCS)
1259 if (recog_memoized (insn) >= 0)
1260 return get_attr_cannot_copy (insn);
1266 /* Try a machine-dependent way of reloading an illegitimate address
1267 operand. If we find one, push the reload and return the new rtx. */
1270 alpha_legitimize_reload_address (rtx x,
1271 enum machine_mode mode ATTRIBUTE_UNUSED,
1272 int opnum, int type,
1273 int ind_levels ATTRIBUTE_UNUSED)
1275 /* We must recognize output that we have already generated ourselves. */
1276 if (GET_CODE (x) == PLUS
1277 && GET_CODE (XEXP (x, 0)) == PLUS
1278 && GET_CODE (XEXP (XEXP (x, 0), 0)) == REG
1279 && GET_CODE (XEXP (XEXP (x, 0), 1)) == CONST_INT
1280 && GET_CODE (XEXP (x, 1)) == CONST_INT)
1282 push_reload (XEXP (x, 0), NULL_RTX, &XEXP (x, 0), NULL,
1283 BASE_REG_CLASS, GET_MODE (x), VOIDmode, 0, 0,
1288 /* We wish to handle large displacements off a base register by
1289 splitting the addend across an ldah and the mem insn. This
1290 cuts number of extra insns needed from 3 to 1. */
1291 if (GET_CODE (x) == PLUS
1292 && GET_CODE (XEXP (x, 0)) == REG
1293 && REGNO (XEXP (x, 0)) < FIRST_PSEUDO_REGISTER
1294 && REGNO_OK_FOR_BASE_P (REGNO (XEXP (x, 0)))
1295 && GET_CODE (XEXP (x, 1)) == CONST_INT)
1297 HOST_WIDE_INT val = INTVAL (XEXP (x, 1));
1298 HOST_WIDE_INT low = ((val & 0xffff) ^ 0x8000) - 0x8000;
1300 = (((val - low) & 0xffffffff) ^ 0x80000000) - 0x80000000;
1302 /* Check for 32-bit overflow. */
1303 if (high + low != val)
1306 /* Reload the high part into a base reg; leave the low part
1307 in the mem directly. */
1308 x = gen_rtx_PLUS (GET_MODE (x),
1309 gen_rtx_PLUS (GET_MODE (x), XEXP (x, 0),
1313 push_reload (XEXP (x, 0), NULL_RTX, &XEXP (x, 0), NULL,
1314 BASE_REG_CLASS, GET_MODE (x), VOIDmode, 0, 0,
1322 /* Compute a (partial) cost for rtx X. Return true if the complete
1323 cost has been computed, and false if subexpressions should be
1324 scanned. In either case, *TOTAL contains the cost result. */
1327 alpha_rtx_costs (rtx x, int code, int outer_code, int *total)
1329 enum machine_mode mode = GET_MODE (x);
1330 bool float_mode_p = FLOAT_MODE_P (mode);
1331 const struct alpha_rtx_cost_data *cost_data;
1334 cost_data = &alpha_rtx_cost_size;
1336 cost_data = &alpha_rtx_cost_data[alpha_tune];
1341 /* If this is an 8-bit constant, return zero since it can be used
1342 nearly anywhere with no cost. If it is a valid operand for an
1343 ADD or AND, likewise return 0 if we know it will be used in that
1344 context. Otherwise, return 2 since it might be used there later.
1345 All other constants take at least two insns. */
1346 if (INTVAL (x) >= 0 && INTVAL (x) < 256)
1354 if (x == CONST0_RTX (mode))
1356 else if ((outer_code == PLUS && add_operand (x, VOIDmode))
1357 || (outer_code == AND && and_operand (x, VOIDmode)))
1359 else if (add_operand (x, VOIDmode) || and_operand (x, VOIDmode))
1362 *total = COSTS_N_INSNS (2);
1368 if (TARGET_EXPLICIT_RELOCS && small_symbolic_operand (x, VOIDmode))
1369 *total = COSTS_N_INSNS (outer_code != MEM);
1370 else if (TARGET_EXPLICIT_RELOCS && local_symbolic_operand (x, VOIDmode))
1371 *total = COSTS_N_INSNS (1 + (outer_code != MEM));
1372 else if (tls_symbolic_operand_type (x))
1373 /* Estimate of cost for call_pal rduniq. */
1374 /* ??? How many insns do we emit here? More than one... */
1375 *total = COSTS_N_INSNS (15);
1377 /* Otherwise we do a load from the GOT. */
1378 *total = COSTS_N_INSNS (optimize_size ? 1 : alpha_memory_latency);
1382 /* This is effectively an add_operand. */
1389 *total = cost_data->fp_add;
1390 else if (GET_CODE (XEXP (x, 0)) == MULT
1391 && const48_operand (XEXP (XEXP (x, 0), 1), VOIDmode))
1393 *total = (rtx_cost (XEXP (XEXP (x, 0), 0), outer_code)
1394 + rtx_cost (XEXP (x, 1), outer_code) + COSTS_N_INSNS (1));
1401 *total = cost_data->fp_mult;
1402 else if (mode == DImode)
1403 *total = cost_data->int_mult_di;
1405 *total = cost_data->int_mult_si;
1409 if (GET_CODE (XEXP (x, 1)) == CONST_INT
1410 && INTVAL (XEXP (x, 1)) <= 3)
1412 *total = COSTS_N_INSNS (1);
1419 *total = cost_data->int_shift;
1424 *total = cost_data->fp_add;
1426 *total = cost_data->int_cmov;
1434 *total = cost_data->int_div;
1435 else if (mode == SFmode)
1436 *total = cost_data->fp_div_sf;
1438 *total = cost_data->fp_div_df;
1442 *total = COSTS_N_INSNS (optimize_size ? 1 : alpha_memory_latency);
1448 *total = COSTS_N_INSNS (1);
1456 *total = COSTS_N_INSNS (1) + cost_data->int_cmov;
1462 case UNSIGNED_FLOAT:
1465 case FLOAT_TRUNCATE:
1466 *total = cost_data->fp_add;
1470 if (GET_CODE (XEXP (x, 0)) == MEM)
1473 *total = cost_data->fp_add;
1481 /* REF is an alignable memory location. Place an aligned SImode
1482 reference into *PALIGNED_MEM and the number of bits to shift into
1483 *PBITNUM. SCRATCH is a free register for use in reloading out
1484 of range stack slots. */
1487 get_aligned_mem (rtx ref, rtx *paligned_mem, rtx *pbitnum)
1490 HOST_WIDE_INT disp, offset;
1492 gcc_assert (GET_CODE (ref) == MEM);
1494 if (reload_in_progress
1495 && ! memory_address_p (GET_MODE (ref), XEXP (ref, 0)))
1497 base = find_replacement (&XEXP (ref, 0));
1498 gcc_assert (memory_address_p (GET_MODE (ref), base));
1501 base = XEXP (ref, 0);
1503 if (GET_CODE (base) == PLUS)
1504 disp = INTVAL (XEXP (base, 1)), base = XEXP (base, 0);
1508 /* Find the byte offset within an aligned word. If the memory itself is
1509 claimed to be aligned, believe it. Otherwise, aligned_memory_operand
1510 will have examined the base register and determined it is aligned, and
1511 thus displacements from it are naturally alignable. */
1512 if (MEM_ALIGN (ref) >= 32)
1517 /* Access the entire aligned word. */
1518 *paligned_mem = widen_memory_access (ref, SImode, -offset);
1520 /* Convert the byte offset within the word to a bit offset. */
1521 if (WORDS_BIG_ENDIAN)
1522 offset = 32 - (GET_MODE_BITSIZE (GET_MODE (ref)) + offset * 8);
1525 *pbitnum = GEN_INT (offset);
1528 /* Similar, but just get the address. Handle the two reload cases.
1529 Add EXTRA_OFFSET to the address we return. */
1532 get_unaligned_address (rtx ref, int extra_offset)
1535 HOST_WIDE_INT offset = 0;
1537 gcc_assert (GET_CODE (ref) == MEM);
1539 if (reload_in_progress
1540 && ! memory_address_p (GET_MODE (ref), XEXP (ref, 0)))
1542 base = find_replacement (&XEXP (ref, 0));
1544 gcc_assert (memory_address_p (GET_MODE (ref), base));
1547 base = XEXP (ref, 0);
1549 if (GET_CODE (base) == PLUS)
1550 offset += INTVAL (XEXP (base, 1)), base = XEXP (base, 0);
1552 return plus_constant (base, offset + extra_offset);
1555 /* On the Alpha, all (non-symbolic) constants except zero go into
1556 a floating-point register via memory. Note that we cannot
1557 return anything that is not a subset of CLASS, and that some
1558 symbolic constants cannot be dropped to memory. */
1561 alpha_preferred_reload_class(rtx x, enum reg_class class)
1563 /* Zero is present in any register class. */
1564 if (x == CONST0_RTX (GET_MODE (x)))
1567 /* These sorts of constants we can easily drop to memory. */
1568 if (GET_CODE (x) == CONST_INT
1569 || GET_CODE (x) == CONST_DOUBLE
1570 || GET_CODE (x) == CONST_VECTOR)
1572 if (class == FLOAT_REGS)
1574 if (class == ALL_REGS)
1575 return GENERAL_REGS;
1579 /* All other kinds of constants should not (and in the case of HIGH
1580 cannot) be dropped to memory -- instead we use a GENERAL_REGS
1581 secondary reload. */
1583 return (class == ALL_REGS ? GENERAL_REGS : class);
1588 /* Loading and storing HImode or QImode values to and from memory
1589 usually requires a scratch register. The exceptions are loading
1590 QImode and HImode from an aligned address to a general register
1591 unless byte instructions are permitted.
1593 We also cannot load an unaligned address or a paradoxical SUBREG
1594 into an FP register.
1596 We also cannot do integral arithmetic into FP regs, as might result
1597 from register elimination into a DImode fp register. */
1600 alpha_secondary_reload_class (enum reg_class class, enum machine_mode mode,
1603 if ((mode == QImode || mode == HImode) && ! TARGET_BWX)
1605 if (GET_CODE (x) == MEM
1606 || (GET_CODE (x) == REG && REGNO (x) >= FIRST_PSEUDO_REGISTER)
1607 || (GET_CODE (x) == SUBREG
1608 && (GET_CODE (SUBREG_REG (x)) == MEM
1609 || (GET_CODE (SUBREG_REG (x)) == REG
1610 && REGNO (SUBREG_REG (x)) >= FIRST_PSEUDO_REGISTER))))
1612 if (!in || !aligned_memory_operand(x, mode))
1613 return GENERAL_REGS;
1617 if (class == FLOAT_REGS)
1619 if (GET_CODE (x) == MEM && GET_CODE (XEXP (x, 0)) == AND)
1620 return GENERAL_REGS;
1622 if (GET_CODE (x) == SUBREG
1623 && (GET_MODE_SIZE (GET_MODE (x))
1624 > GET_MODE_SIZE (GET_MODE (SUBREG_REG (x)))))
1625 return GENERAL_REGS;
1627 if (in && INTEGRAL_MODE_P (mode)
1628 && ! (memory_operand (x, mode) || x == const0_rtx))
1629 return GENERAL_REGS;
1635 /* Subfunction of the following function. Update the flags of any MEM
1636 found in part of X. */
1639 alpha_set_memflags_1 (rtx *xp, void *data)
1641 rtx x = *xp, orig = (rtx) data;
1643 if (GET_CODE (x) != MEM)
1646 MEM_VOLATILE_P (x) = MEM_VOLATILE_P (orig);
1647 MEM_IN_STRUCT_P (x) = MEM_IN_STRUCT_P (orig);
1648 MEM_SCALAR_P (x) = MEM_SCALAR_P (orig);
1649 MEM_NOTRAP_P (x) = MEM_NOTRAP_P (orig);
1650 MEM_READONLY_P (x) = MEM_READONLY_P (orig);
1652 /* Sadly, we cannot use alias sets because the extra aliasing
1653 produced by the AND interferes. Given that two-byte quantities
1654 are the only thing we would be able to differentiate anyway,
1655 there does not seem to be any point in convoluting the early
1656 out of the alias check. */
1661 /* Given INSN, which is an INSN list or the PATTERN of a single insn
1662 generated to perform a memory operation, look for any MEMs in either
1663 a SET_DEST or a SET_SRC and copy the in-struct, unchanging, and
1664 volatile flags from REF into each of the MEMs found. If REF is not
1665 a MEM, don't do anything. */
1668 alpha_set_memflags (rtx insn, rtx ref)
1672 if (GET_CODE (ref) != MEM)
1675 /* This is only called from alpha.md, after having had something
1676 generated from one of the insn patterns. So if everything is
1677 zero, the pattern is already up-to-date. */
1678 if (!MEM_VOLATILE_P (ref)
1679 && !MEM_IN_STRUCT_P (ref)
1680 && !MEM_SCALAR_P (ref)
1681 && !MEM_NOTRAP_P (ref)
1682 && !MEM_READONLY_P (ref))
1686 base_ptr = &PATTERN (insn);
1689 for_each_rtx (base_ptr, alpha_set_memflags_1, (void *) ref);
1692 static rtx alpha_emit_set_const (rtx, enum machine_mode, HOST_WIDE_INT,
1695 /* Internal routine for alpha_emit_set_const to check for N or below insns.
1696 If NO_OUTPUT is true, then we only check to see if N insns are possible,
1697 and return pc_rtx if successful. */
1700 alpha_emit_set_const_1 (rtx target, enum machine_mode mode,
1701 HOST_WIDE_INT c, int n, bool no_output)
1705 /* Use a pseudo if highly optimizing and still generating RTL. */
1707 = (flag_expensive_optimizations && !no_new_pseudos ? 0 : target);
1710 /* If this is a sign-extended 32-bit constant, we can do this in at most
1711 three insns, so do it if we have enough insns left. We always have
1712 a sign-extended 32-bit constant when compiling on a narrow machine. */
1714 if (HOST_BITS_PER_WIDE_INT != 64
1715 || c >> 31 == -1 || c >> 31 == 0)
1717 HOST_WIDE_INT low = ((c & 0xffff) ^ 0x8000) - 0x8000;
1718 HOST_WIDE_INT tmp1 = c - low;
1719 HOST_WIDE_INT high = (((tmp1 >> 16) & 0xffff) ^ 0x8000) - 0x8000;
1720 HOST_WIDE_INT extra = 0;
1722 /* If HIGH will be interpreted as negative but the constant is
1723 positive, we must adjust it to do two ldha insns. */
1725 if ((high & 0x8000) != 0 && c >= 0)
1729 high = ((tmp1 >> 16) & 0xffff) - 2 * ((tmp1 >> 16) & 0x8000);
1732 if (c == low || (low == 0 && extra == 0))
1734 /* We used to use copy_to_suggested_reg (GEN_INT (c), target, mode)
1735 but that meant that we can't handle INT_MIN on 32-bit machines
1736 (like NT/Alpha), because we recurse indefinitely through
1737 emit_move_insn to gen_movdi. So instead, since we know exactly
1738 what we want, create it explicitly. */
1743 target = gen_reg_rtx (mode);
1744 emit_insn (gen_rtx_SET (VOIDmode, target, GEN_INT (c)));
1747 else if (n >= 2 + (extra != 0))
1753 emit_insn (gen_rtx_SET (VOIDmode, target, GEN_INT (high << 16)));
1757 temp = copy_to_suggested_reg (GEN_INT (high << 16),
1760 /* As of 2002-02-23, addsi3 is only available when not optimizing.
1761 This means that if we go through expand_binop, we'll try to
1762 generate extensions, etc, which will require new pseudos, which
1763 will fail during some split phases. The SImode add patterns
1764 still exist, but are not named. So build the insns by hand. */
1769 subtarget = gen_reg_rtx (mode);
1770 insn = gen_rtx_PLUS (mode, temp, GEN_INT (extra << 16));
1771 insn = gen_rtx_SET (VOIDmode, subtarget, insn);
1777 target = gen_reg_rtx (mode);
1778 insn = gen_rtx_PLUS (mode, temp, GEN_INT (low));
1779 insn = gen_rtx_SET (VOIDmode, target, insn);
1785 /* If we couldn't do it that way, try some other methods. But if we have
1786 no instructions left, don't bother. Likewise, if this is SImode and
1787 we can't make pseudos, we can't do anything since the expand_binop
1788 and expand_unop calls will widen and try to make pseudos. */
1790 if (n == 1 || (mode == SImode && no_new_pseudos))
1793 /* Next, see if we can load a related constant and then shift and possibly
1794 negate it to get the constant we want. Try this once each increasing
1795 numbers of insns. */
1797 for (i = 1; i < n; i++)
1799 /* First, see if minus some low bits, we've an easy load of
1802 new = ((c & 0xffff) ^ 0x8000) - 0x8000;
1805 temp = alpha_emit_set_const (subtarget, mode, c - new, i, no_output);
1810 return expand_binop (mode, add_optab, temp, GEN_INT (new),
1811 target, 0, OPTAB_WIDEN);
1815 /* Next try complementing. */
1816 temp = alpha_emit_set_const (subtarget, mode, ~c, i, no_output);
1821 return expand_unop (mode, one_cmpl_optab, temp, target, 0);
1824 /* Next try to form a constant and do a left shift. We can do this
1825 if some low-order bits are zero; the exact_log2 call below tells
1826 us that information. The bits we are shifting out could be any
1827 value, but here we'll just try the 0- and sign-extended forms of
1828 the constant. To try to increase the chance of having the same
1829 constant in more than one insn, start at the highest number of
1830 bits to shift, but try all possibilities in case a ZAPNOT will
1833 bits = exact_log2 (c & -c);
1835 for (; bits > 0; bits--)
1838 temp = alpha_emit_set_const (subtarget, mode, new, i, no_output);
1841 new = (unsigned HOST_WIDE_INT)c >> bits;
1842 temp = alpha_emit_set_const (subtarget, mode, new,
1849 return expand_binop (mode, ashl_optab, temp, GEN_INT (bits),
1850 target, 0, OPTAB_WIDEN);
1854 /* Now try high-order zero bits. Here we try the shifted-in bits as
1855 all zero and all ones. Be careful to avoid shifting outside the
1856 mode and to avoid shifting outside the host wide int size. */
1857 /* On narrow hosts, don't shift a 1 into the high bit, since we'll
1858 confuse the recursive call and set all of the high 32 bits. */
1860 bits = (MIN (HOST_BITS_PER_WIDE_INT, GET_MODE_SIZE (mode) * 8)
1861 - floor_log2 (c) - 1 - (HOST_BITS_PER_WIDE_INT < 64));
1863 for (; bits > 0; bits--)
1866 temp = alpha_emit_set_const (subtarget, mode, new, i, no_output);
1869 new = (c << bits) | (((HOST_WIDE_INT) 1 << bits) - 1);
1870 temp = alpha_emit_set_const (subtarget, mode, new,
1877 return expand_binop (mode, lshr_optab, temp, GEN_INT (bits),
1878 target, 1, OPTAB_WIDEN);
1882 /* Now try high-order 1 bits. We get that with a sign-extension.
1883 But one bit isn't enough here. Be careful to avoid shifting outside
1884 the mode and to avoid shifting outside the host wide int size. */
1886 bits = (MIN (HOST_BITS_PER_WIDE_INT, GET_MODE_SIZE (mode) * 8)
1887 - floor_log2 (~ c) - 2);
1889 for (; bits > 0; bits--)
1892 temp = alpha_emit_set_const (subtarget, mode, new, i, no_output);
1895 new = (c << bits) | (((HOST_WIDE_INT) 1 << bits) - 1);
1896 temp = alpha_emit_set_const (subtarget, mode, new,
1903 return expand_binop (mode, ashr_optab, temp, GEN_INT (bits),
1904 target, 0, OPTAB_WIDEN);
1909 #if HOST_BITS_PER_WIDE_INT == 64
1910 /* Finally, see if can load a value into the target that is the same as the
1911 constant except that all bytes that are 0 are changed to be 0xff. If we
1912 can, then we can do a ZAPNOT to obtain the desired constant. */
1915 for (i = 0; i < 64; i += 8)
1916 if ((new & ((HOST_WIDE_INT) 0xff << i)) == 0)
1917 new |= (HOST_WIDE_INT) 0xff << i;
1919 /* We are only called for SImode and DImode. If this is SImode, ensure that
1920 we are sign extended to a full word. */
1923 new = ((new & 0xffffffff) ^ 0x80000000) - 0x80000000;
1927 temp = alpha_emit_set_const (subtarget, mode, new, n - 1, no_output);
1932 return expand_binop (mode, and_optab, temp, GEN_INT (c | ~ new),
1933 target, 0, OPTAB_WIDEN);
1941 /* Try to output insns to set TARGET equal to the constant C if it can be
1942 done in less than N insns. Do all computations in MODE. Returns the place
1943 where the output has been placed if it can be done and the insns have been
1944 emitted. If it would take more than N insns, zero is returned and no
1945 insns and emitted. */
1948 alpha_emit_set_const (rtx target, enum machine_mode mode,
1949 HOST_WIDE_INT c, int n, bool no_output)
1951 enum machine_mode orig_mode = mode;
1952 rtx orig_target = target;
1956 /* If we can't make any pseudos, TARGET is an SImode hard register, we
1957 can't load this constant in one insn, do this in DImode. */
1958 if (no_new_pseudos && mode == SImode
1959 && GET_CODE (target) == REG && REGNO (target) < FIRST_PSEUDO_REGISTER)
1961 result = alpha_emit_set_const_1 (target, mode, c, 1, no_output);
1965 target = no_output ? NULL : gen_lowpart (DImode, target);
1968 else if (mode == V8QImode || mode == V4HImode || mode == V2SImode)
1970 target = no_output ? NULL : gen_lowpart (DImode, target);
1974 /* Try 1 insn, then 2, then up to N. */
1975 for (i = 1; i <= n; i++)
1977 result = alpha_emit_set_const_1 (target, mode, c, i, no_output);
1985 insn = get_last_insn ();
1986 set = single_set (insn);
1987 if (! CONSTANT_P (SET_SRC (set)))
1988 set_unique_reg_note (get_last_insn (), REG_EQUAL, GEN_INT (c));
1993 /* Allow for the case where we changed the mode of TARGET. */
1996 if (result == target)
1997 result = orig_target;
1998 else if (mode != orig_mode)
1999 result = gen_lowpart (orig_mode, result);
2005 /* Having failed to find a 3 insn sequence in alpha_emit_set_const,
2006 fall back to a straight forward decomposition. We do this to avoid
2007 exponential run times encountered when looking for longer sequences
2008 with alpha_emit_set_const. */
2011 alpha_emit_set_long_const (rtx target, HOST_WIDE_INT c1, HOST_WIDE_INT c2)
2013 HOST_WIDE_INT d1, d2, d3, d4;
2015 /* Decompose the entire word */
2016 #if HOST_BITS_PER_WIDE_INT >= 64
2017 gcc_assert (c2 == -(c1 < 0));
2018 d1 = ((c1 & 0xffff) ^ 0x8000) - 0x8000;
2020 d2 = ((c1 & 0xffffffff) ^ 0x80000000) - 0x80000000;
2021 c1 = (c1 - d2) >> 32;
2022 d3 = ((c1 & 0xffff) ^ 0x8000) - 0x8000;
2024 d4 = ((c1 & 0xffffffff) ^ 0x80000000) - 0x80000000;
2025 gcc_assert (c1 == d4);
2027 d1 = ((c1 & 0xffff) ^ 0x8000) - 0x8000;
2029 d2 = ((c1 & 0xffffffff) ^ 0x80000000) - 0x80000000;
2030 gcc_assert (c1 == d2);
2032 d3 = ((c2 & 0xffff) ^ 0x8000) - 0x8000;
2034 d4 = ((c2 & 0xffffffff) ^ 0x80000000) - 0x80000000;
2035 gcc_assert (c2 == d4);
2038 /* Construct the high word */
2041 emit_move_insn (target, GEN_INT (d4));
2043 emit_move_insn (target, gen_rtx_PLUS (DImode, target, GEN_INT (d3)));
2046 emit_move_insn (target, GEN_INT (d3));
2048 /* Shift it into place */
2049 emit_move_insn (target, gen_rtx_ASHIFT (DImode, target, GEN_INT (32)));
2051 /* Add in the low bits. */
2053 emit_move_insn (target, gen_rtx_PLUS (DImode, target, GEN_INT (d2)));
2055 emit_move_insn (target, gen_rtx_PLUS (DImode, target, GEN_INT (d1)));
2060 /* Given an integral CONST_INT, CONST_DOUBLE, or CONST_VECTOR, return
2064 alpha_extract_integer (rtx x, HOST_WIDE_INT *p0, HOST_WIDE_INT *p1)
2066 HOST_WIDE_INT i0, i1;
2068 if (GET_CODE (x) == CONST_VECTOR)
2069 x = simplify_subreg (DImode, x, GET_MODE (x), 0);
2072 if (GET_CODE (x) == CONST_INT)
2077 else if (HOST_BITS_PER_WIDE_INT >= 64)
2079 i0 = CONST_DOUBLE_LOW (x);
2084 i0 = CONST_DOUBLE_LOW (x);
2085 i1 = CONST_DOUBLE_HIGH (x);
2092 /* Implement LEGITIMATE_CONSTANT_P. This is all constants for which we
2093 are willing to load the value into a register via a move pattern.
2094 Normally this is all symbolic constants, integral constants that
2095 take three or fewer instructions, and floating-point zero. */
2098 alpha_legitimate_constant_p (rtx x)
2100 enum machine_mode mode = GET_MODE (x);
2101 HOST_WIDE_INT i0, i1;
2103 switch (GET_CODE (x))
2112 if (x == CONST0_RTX (mode))
2114 if (FLOAT_MODE_P (mode))
2119 if (x == CONST0_RTX (mode))
2121 if (GET_MODE_CLASS (mode) != MODE_VECTOR_INT)
2123 if (GET_MODE_SIZE (mode) != 8)
2129 if (TARGET_BUILD_CONSTANTS)
2131 alpha_extract_integer (x, &i0, &i1);
2132 if (HOST_BITS_PER_WIDE_INT >= 64 || i1 == (-i0 < 0))
2133 return alpha_emit_set_const_1 (x, mode, i0, 3, true) != NULL;
2141 /* Operand 1 is known to be a constant, and should require more than one
2142 instruction to load. Emit that multi-part load. */
2145 alpha_split_const_mov (enum machine_mode mode, rtx *operands)
2147 HOST_WIDE_INT i0, i1;
2148 rtx temp = NULL_RTX;
2150 alpha_extract_integer (operands[1], &i0, &i1);
2152 if (HOST_BITS_PER_WIDE_INT >= 64 || i1 == -(i0 < 0))
2153 temp = alpha_emit_set_const (operands[0], mode, i0, 3, false);
2155 if (!temp && TARGET_BUILD_CONSTANTS)
2156 temp = alpha_emit_set_long_const (operands[0], i0, i1);
2160 if (!rtx_equal_p (operands[0], temp))
2161 emit_move_insn (operands[0], temp);
2168 /* Expand a move instruction; return true if all work is done.
2169 We don't handle non-bwx subword loads here. */
2172 alpha_expand_mov (enum machine_mode mode, rtx *operands)
2174 /* If the output is not a register, the input must be. */
2175 if (GET_CODE (operands[0]) == MEM
2176 && ! reg_or_0_operand (operands[1], mode))
2177 operands[1] = force_reg (mode, operands[1]);
2179 /* Allow legitimize_address to perform some simplifications. */
2180 if (mode == Pmode && symbolic_operand (operands[1], mode))
2184 tmp = alpha_legitimize_address (operands[1], operands[0], mode);
2187 if (tmp == operands[0])
2194 /* Early out for non-constants and valid constants. */
2195 if (! CONSTANT_P (operands[1]) || input_operand (operands[1], mode))
2198 /* Split large integers. */
2199 if (GET_CODE (operands[1]) == CONST_INT
2200 || GET_CODE (operands[1]) == CONST_DOUBLE
2201 || GET_CODE (operands[1]) == CONST_VECTOR)
2203 if (alpha_split_const_mov (mode, operands))
2207 /* Otherwise we've nothing left but to drop the thing to memory. */
2208 operands[1] = force_const_mem (mode, operands[1]);
2209 if (reload_in_progress)
2211 emit_move_insn (operands[0], XEXP (operands[1], 0));
2212 operands[1] = copy_rtx (operands[1]);
2213 XEXP (operands[1], 0) = operands[0];
2216 operands[1] = validize_mem (operands[1]);
2220 /* Expand a non-bwx QImode or HImode move instruction;
2221 return true if all work is done. */
2224 alpha_expand_mov_nobwx (enum machine_mode mode, rtx *operands)
2226 /* If the output is not a register, the input must be. */
2227 if (GET_CODE (operands[0]) == MEM)
2228 operands[1] = force_reg (mode, operands[1]);
2230 /* Handle four memory cases, unaligned and aligned for either the input
2231 or the output. The only case where we can be called during reload is
2232 for aligned loads; all other cases require temporaries. */
2234 if (GET_CODE (operands[1]) == MEM
2235 || (GET_CODE (operands[1]) == SUBREG
2236 && GET_CODE (SUBREG_REG (operands[1])) == MEM)
2237 || (reload_in_progress && GET_CODE (operands[1]) == REG
2238 && REGNO (operands[1]) >= FIRST_PSEUDO_REGISTER)
2239 || (reload_in_progress && GET_CODE (operands[1]) == SUBREG
2240 && GET_CODE (SUBREG_REG (operands[1])) == REG
2241 && REGNO (SUBREG_REG (operands[1])) >= FIRST_PSEUDO_REGISTER))
2243 if (aligned_memory_operand (operands[1], mode))
2245 if (reload_in_progress)
2247 emit_insn ((mode == QImode
2248 ? gen_reload_inqi_help
2249 : gen_reload_inhi_help)
2250 (operands[0], operands[1],
2251 gen_rtx_REG (SImode, REGNO (operands[0]))));
2255 rtx aligned_mem, bitnum;
2256 rtx scratch = gen_reg_rtx (SImode);
2260 get_aligned_mem (operands[1], &aligned_mem, &bitnum);
2262 subtarget = operands[0];
2263 if (GET_CODE (subtarget) == REG)
2264 subtarget = gen_lowpart (DImode, subtarget), copyout = false;
2266 subtarget = gen_reg_rtx (DImode), copyout = true;
2268 emit_insn ((mode == QImode
2269 ? gen_aligned_loadqi
2270 : gen_aligned_loadhi)
2271 (subtarget, aligned_mem, bitnum, scratch));
2274 emit_move_insn (operands[0], gen_lowpart (mode, subtarget));
2279 /* Don't pass these as parameters since that makes the generated
2280 code depend on parameter evaluation order which will cause
2281 bootstrap failures. */
2283 rtx temp1, temp2, seq, subtarget;
2286 temp1 = gen_reg_rtx (DImode);
2287 temp2 = gen_reg_rtx (DImode);
2289 subtarget = operands[0];
2290 if (GET_CODE (subtarget) == REG)
2291 subtarget = gen_lowpart (DImode, subtarget), copyout = false;
2293 subtarget = gen_reg_rtx (DImode), copyout = true;
2295 seq = ((mode == QImode
2296 ? gen_unaligned_loadqi
2297 : gen_unaligned_loadhi)
2298 (subtarget, get_unaligned_address (operands[1], 0),
2300 alpha_set_memflags (seq, operands[1]);
2304 emit_move_insn (operands[0], gen_lowpart (mode, subtarget));
2309 if (GET_CODE (operands[0]) == MEM
2310 || (GET_CODE (operands[0]) == SUBREG
2311 && GET_CODE (SUBREG_REG (operands[0])) == MEM)
2312 || (reload_in_progress && GET_CODE (operands[0]) == REG
2313 && REGNO (operands[0]) >= FIRST_PSEUDO_REGISTER)
2314 || (reload_in_progress && GET_CODE (operands[0]) == SUBREG
2315 && GET_CODE (SUBREG_REG (operands[0])) == REG
2316 && REGNO (operands[0]) >= FIRST_PSEUDO_REGISTER))
2318 if (aligned_memory_operand (operands[0], mode))
2320 rtx aligned_mem, bitnum;
2321 rtx temp1 = gen_reg_rtx (SImode);
2322 rtx temp2 = gen_reg_rtx (SImode);
2324 get_aligned_mem (operands[0], &aligned_mem, &bitnum);
2326 emit_insn (gen_aligned_store (aligned_mem, operands[1], bitnum,
2331 rtx temp1 = gen_reg_rtx (DImode);
2332 rtx temp2 = gen_reg_rtx (DImode);
2333 rtx temp3 = gen_reg_rtx (DImode);
2334 rtx seq = ((mode == QImode
2335 ? gen_unaligned_storeqi
2336 : gen_unaligned_storehi)
2337 (get_unaligned_address (operands[0], 0),
2338 operands[1], temp1, temp2, temp3));
2340 alpha_set_memflags (seq, operands[0]);
2349 /* Implement the movmisalign patterns. One of the operands is a memory
2350 that is not naturally aligned. Emit instructions to load it. */
2353 alpha_expand_movmisalign (enum machine_mode mode, rtx *operands)
2355 /* Honor misaligned loads, for those we promised to do so. */
2356 if (MEM_P (operands[1]))
2360 if (register_operand (operands[0], mode))
2363 tmp = gen_reg_rtx (mode);
2365 alpha_expand_unaligned_load (tmp, operands[1], 8, 0, 0);
2366 if (tmp != operands[0])
2367 emit_move_insn (operands[0], tmp);
2369 else if (MEM_P (operands[0]))
2371 if (!reg_or_0_operand (operands[1], mode))
2372 operands[1] = force_reg (mode, operands[1]);
2373 alpha_expand_unaligned_store (operands[0], operands[1], 8, 0);
2379 /* Generate an unsigned DImode to FP conversion. This is the same code
2380 optabs would emit if we didn't have TFmode patterns.
2382 For SFmode, this is the only construction I've found that can pass
2383 gcc.c-torture/execute/ieee/rbug.c. No scenario that uses DFmode
2384 intermediates will work, because you'll get intermediate rounding
2385 that ruins the end result. Some of this could be fixed by turning
2386 on round-to-positive-infinity, but that requires diddling the fpsr,
2387 which kills performance. I tried turning this around and converting
2388 to a negative number, so that I could turn on /m, but either I did
2389 it wrong or there's something else cause I wound up with the exact
2390 same single-bit error. There is a branch-less form of this same code:
2401 fcmoveq $f10,$f11,$f0
2403 I'm not using it because it's the same number of instructions as
2404 this branch-full form, and it has more serialized long latency
2405 instructions on the critical path.
2407 For DFmode, we can avoid rounding errors by breaking up the word
2408 into two pieces, converting them separately, and adding them back:
2410 LC0: .long 0,0x5f800000
2415 cpyse $f11,$f31,$f10
2416 cpyse $f31,$f11,$f11
2424 This doesn't seem to be a clear-cut win over the optabs form.
2425 It probably all depends on the distribution of numbers being
2426 converted -- in the optabs form, all but high-bit-set has a
2427 much lower minimum execution time. */
2430 alpha_emit_floatuns (rtx operands[2])
2432 rtx neglab, donelab, i0, i1, f0, in, out;
2433 enum machine_mode mode;
2436 in = force_reg (DImode, operands[1]);
2437 mode = GET_MODE (out);
2438 neglab = gen_label_rtx ();
2439 donelab = gen_label_rtx ();
2440 i0 = gen_reg_rtx (DImode);
2441 i1 = gen_reg_rtx (DImode);
2442 f0 = gen_reg_rtx (mode);
2444 emit_cmp_and_jump_insns (in, const0_rtx, LT, const0_rtx, DImode, 0, neglab);
2446 emit_insn (gen_rtx_SET (VOIDmode, out, gen_rtx_FLOAT (mode, in)));
2447 emit_jump_insn (gen_jump (donelab));
2450 emit_label (neglab);
2452 emit_insn (gen_lshrdi3 (i0, in, const1_rtx));
2453 emit_insn (gen_anddi3 (i1, in, const1_rtx));
2454 emit_insn (gen_iordi3 (i0, i0, i1));
2455 emit_insn (gen_rtx_SET (VOIDmode, f0, gen_rtx_FLOAT (mode, i0)));
2456 emit_insn (gen_rtx_SET (VOIDmode, out, gen_rtx_PLUS (mode, f0, f0)));
2458 emit_label (donelab);
2461 /* Generate the comparison for a conditional branch. */
2464 alpha_emit_conditional_branch (enum rtx_code code)
2466 enum rtx_code cmp_code, branch_code;
2467 enum machine_mode cmp_mode, branch_mode = VOIDmode;
2468 rtx op0 = alpha_compare.op0, op1 = alpha_compare.op1;
2471 if (alpha_compare.fp_p && GET_MODE (op0) == TFmode)
2473 op0 = alpha_emit_xfloating_compare (&code, op0, op1);
2475 alpha_compare.fp_p = 0;
2478 /* The general case: fold the comparison code to the types of compares
2479 that we have, choosing the branch as necessary. */
2482 case EQ: case LE: case LT: case LEU: case LTU:
2484 /* We have these compares: */
2485 cmp_code = code, branch_code = NE;
2490 /* These must be reversed. */
2491 cmp_code = reverse_condition (code), branch_code = EQ;
2494 case GE: case GT: case GEU: case GTU:
2495 /* For FP, we swap them, for INT, we reverse them. */
2496 if (alpha_compare.fp_p)
2498 cmp_code = swap_condition (code);
2500 tem = op0, op0 = op1, op1 = tem;
2504 cmp_code = reverse_condition (code);
2513 if (alpha_compare.fp_p)
2516 if (flag_unsafe_math_optimizations)
2518 /* When we are not as concerned about non-finite values, and we
2519 are comparing against zero, we can branch directly. */
2520 if (op1 == CONST0_RTX (DFmode))
2521 cmp_code = UNKNOWN, branch_code = code;
2522 else if (op0 == CONST0_RTX (DFmode))
2524 /* Undo the swap we probably did just above. */
2525 tem = op0, op0 = op1, op1 = tem;
2526 branch_code = swap_condition (cmp_code);
2532 /* ??? We mark the branch mode to be CCmode to prevent the
2533 compare and branch from being combined, since the compare
2534 insn follows IEEE rules that the branch does not. */
2535 branch_mode = CCmode;
2542 /* The following optimizations are only for signed compares. */
2543 if (code != LEU && code != LTU && code != GEU && code != GTU)
2545 /* Whee. Compare and branch against 0 directly. */
2546 if (op1 == const0_rtx)
2547 cmp_code = UNKNOWN, branch_code = code;
2549 /* If the constants doesn't fit into an immediate, but can
2550 be generated by lda/ldah, we adjust the argument and
2551 compare against zero, so we can use beq/bne directly. */
2552 /* ??? Don't do this when comparing against symbols, otherwise
2553 we'll reduce (&x == 0x1234) to (&x-0x1234 == 0), which will
2554 be declared false out of hand (at least for non-weak). */
2555 else if (GET_CODE (op1) == CONST_INT
2556 && (code == EQ || code == NE)
2557 && !(symbolic_operand (op0, VOIDmode)
2558 || (GET_CODE (op0) == REG && REG_POINTER (op0))))
2560 HOST_WIDE_INT v = INTVAL (op1), n = -v;
2562 if (! CONST_OK_FOR_LETTER_P (v, 'I')
2563 && (CONST_OK_FOR_LETTER_P (n, 'K')
2564 || CONST_OK_FOR_LETTER_P (n, 'L')))
2566 cmp_code = PLUS, branch_code = code;
2572 if (!reg_or_0_operand (op0, DImode))
2573 op0 = force_reg (DImode, op0);
2574 if (cmp_code != PLUS && !reg_or_8bit_operand (op1, DImode))
2575 op1 = force_reg (DImode, op1);
2578 /* Emit an initial compare instruction, if necessary. */
2580 if (cmp_code != UNKNOWN)
2582 tem = gen_reg_rtx (cmp_mode);
2583 emit_move_insn (tem, gen_rtx_fmt_ee (cmp_code, cmp_mode, op0, op1));
2586 /* Zero the operands. */
2587 memset (&alpha_compare, 0, sizeof (alpha_compare));
2589 /* Return the branch comparison. */
2590 return gen_rtx_fmt_ee (branch_code, branch_mode, tem, CONST0_RTX (cmp_mode));
2593 /* Certain simplifications can be done to make invalid setcc operations
2594 valid. Return the final comparison, or NULL if we can't work. */
2597 alpha_emit_setcc (enum rtx_code code)
2599 enum rtx_code cmp_code;
2600 rtx op0 = alpha_compare.op0, op1 = alpha_compare.op1;
2601 int fp_p = alpha_compare.fp_p;
2604 /* Zero the operands. */
2605 memset (&alpha_compare, 0, sizeof (alpha_compare));
2607 if (fp_p && GET_MODE (op0) == TFmode)
2609 op0 = alpha_emit_xfloating_compare (&code, op0, op1);
2614 if (fp_p && !TARGET_FIX)
2617 /* The general case: fold the comparison code to the types of compares
2618 that we have, choosing the branch as necessary. */
2623 case EQ: case LE: case LT: case LEU: case LTU:
2625 /* We have these compares. */
2627 cmp_code = code, code = NE;
2631 if (!fp_p && op1 == const0_rtx)
2636 cmp_code = reverse_condition (code);
2640 case GE: case GT: case GEU: case GTU:
2641 /* These normally need swapping, but for integer zero we have
2642 special patterns that recognize swapped operands. */
2643 if (!fp_p && op1 == const0_rtx)
2645 code = swap_condition (code);
2647 cmp_code = code, code = NE;
2648 tmp = op0, op0 = op1, op1 = tmp;
2657 if (!register_operand (op0, DImode))
2658 op0 = force_reg (DImode, op0);
2659 if (!reg_or_8bit_operand (op1, DImode))
2660 op1 = force_reg (DImode, op1);
2663 /* Emit an initial compare instruction, if necessary. */
2664 if (cmp_code != UNKNOWN)
2666 enum machine_mode mode = fp_p ? DFmode : DImode;
2668 tmp = gen_reg_rtx (mode);
2669 emit_insn (gen_rtx_SET (VOIDmode, tmp,
2670 gen_rtx_fmt_ee (cmp_code, mode, op0, op1)));
2672 op0 = fp_p ? gen_lowpart (DImode, tmp) : tmp;
2676 /* Return the setcc comparison. */
2677 return gen_rtx_fmt_ee (code, DImode, op0, op1);
2681 /* Rewrite a comparison against zero CMP of the form
2682 (CODE (cc0) (const_int 0)) so it can be written validly in
2683 a conditional move (if_then_else CMP ...).
2684 If both of the operands that set cc0 are nonzero we must emit
2685 an insn to perform the compare (it can't be done within
2686 the conditional move). */
2689 alpha_emit_conditional_move (rtx cmp, enum machine_mode mode)
2691 enum rtx_code code = GET_CODE (cmp);
2692 enum rtx_code cmov_code = NE;
2693 rtx op0 = alpha_compare.op0;
2694 rtx op1 = alpha_compare.op1;
2695 int fp_p = alpha_compare.fp_p;
2696 enum machine_mode cmp_mode
2697 = (GET_MODE (op0) == VOIDmode ? DImode : GET_MODE (op0));
2698 enum machine_mode cmp_op_mode = fp_p ? DFmode : DImode;
2699 enum machine_mode cmov_mode = VOIDmode;
2700 int local_fast_math = flag_unsafe_math_optimizations;
2703 /* Zero the operands. */
2704 memset (&alpha_compare, 0, sizeof (alpha_compare));
2706 if (fp_p != FLOAT_MODE_P (mode))
2708 enum rtx_code cmp_code;
2713 /* If we have fp<->int register move instructions, do a cmov by
2714 performing the comparison in fp registers, and move the
2715 zero/nonzero value to integer registers, where we can then
2716 use a normal cmov, or vice-versa. */
2720 case EQ: case LE: case LT: case LEU: case LTU:
2721 /* We have these compares. */
2722 cmp_code = code, code = NE;
2726 /* This must be reversed. */
2727 cmp_code = EQ, code = EQ;
2730 case GE: case GT: case GEU: case GTU:
2731 /* These normally need swapping, but for integer zero we have
2732 special patterns that recognize swapped operands. */
2733 if (!fp_p && op1 == const0_rtx)
2734 cmp_code = code, code = NE;
2737 cmp_code = swap_condition (code);
2739 tem = op0, op0 = op1, op1 = tem;
2747 tem = gen_reg_rtx (cmp_op_mode);
2748 emit_insn (gen_rtx_SET (VOIDmode, tem,
2749 gen_rtx_fmt_ee (cmp_code, cmp_op_mode,
2752 cmp_mode = cmp_op_mode = fp_p ? DImode : DFmode;
2753 op0 = gen_lowpart (cmp_op_mode, tem);
2754 op1 = CONST0_RTX (cmp_op_mode);
2756 local_fast_math = 1;
2759 /* We may be able to use a conditional move directly.
2760 This avoids emitting spurious compares. */
2761 if (signed_comparison_operator (cmp, VOIDmode)
2762 && (!fp_p || local_fast_math)
2763 && (op0 == CONST0_RTX (cmp_mode) || op1 == CONST0_RTX (cmp_mode)))
2764 return gen_rtx_fmt_ee (code, VOIDmode, op0, op1);
2766 /* We can't put the comparison inside the conditional move;
2767 emit a compare instruction and put that inside the
2768 conditional move. Make sure we emit only comparisons we have;
2769 swap or reverse as necessary. */
2776 case EQ: case LE: case LT: case LEU: case LTU:
2777 /* We have these compares: */
2781 /* This must be reversed. */
2782 code = reverse_condition (code);
2786 case GE: case GT: case GEU: case GTU:
2787 /* These must be swapped. */
2788 if (op1 != CONST0_RTX (cmp_mode))
2790 code = swap_condition (code);
2791 tem = op0, op0 = op1, op1 = tem;
2801 if (!reg_or_0_operand (op0, DImode))
2802 op0 = force_reg (DImode, op0);
2803 if (!reg_or_8bit_operand (op1, DImode))
2804 op1 = force_reg (DImode, op1);
2807 /* ??? We mark the branch mode to be CCmode to prevent the compare
2808 and cmov from being combined, since the compare insn follows IEEE
2809 rules that the cmov does not. */
2810 if (fp_p && !local_fast_math)
2813 tem = gen_reg_rtx (cmp_op_mode);
2814 emit_move_insn (tem, gen_rtx_fmt_ee (code, cmp_op_mode, op0, op1));
2815 return gen_rtx_fmt_ee (cmov_code, cmov_mode, tem, CONST0_RTX (cmp_op_mode));
2818 /* Simplify a conditional move of two constants into a setcc with
2819 arithmetic. This is done with a splitter since combine would
2820 just undo the work if done during code generation. It also catches
2821 cases we wouldn't have before cse. */
2824 alpha_split_conditional_move (enum rtx_code code, rtx dest, rtx cond,
2825 rtx t_rtx, rtx f_rtx)
2827 HOST_WIDE_INT t, f, diff;
2828 enum machine_mode mode;
2829 rtx target, subtarget, tmp;
2831 mode = GET_MODE (dest);
2836 if (((code == NE || code == EQ) && diff < 0)
2837 || (code == GE || code == GT))
2839 code = reverse_condition (code);
2840 diff = t, t = f, f = diff;
2844 subtarget = target = dest;
2847 target = gen_lowpart (DImode, dest);
2848 if (! no_new_pseudos)
2849 subtarget = gen_reg_rtx (DImode);
2853 /* Below, we must be careful to use copy_rtx on target and subtarget
2854 in intermediate insns, as they may be a subreg rtx, which may not
2857 if (f == 0 && exact_log2 (diff) > 0
2858 /* On EV6, we've got enough shifters to make non-arithmetic shifts
2859 viable over a longer latency cmove. On EV5, the E0 slot is a
2860 scarce resource, and on EV4 shift has the same latency as a cmove. */
2861 && (diff <= 8 || alpha_tune == PROCESSOR_EV6))
2863 tmp = gen_rtx_fmt_ee (code, DImode, cond, const0_rtx);
2864 emit_insn (gen_rtx_SET (VOIDmode, copy_rtx (subtarget), tmp));
2866 tmp = gen_rtx_ASHIFT (DImode, copy_rtx (subtarget),
2867 GEN_INT (exact_log2 (t)));
2868 emit_insn (gen_rtx_SET (VOIDmode, target, tmp));
2870 else if (f == 0 && t == -1)
2872 tmp = gen_rtx_fmt_ee (code, DImode, cond, const0_rtx);
2873 emit_insn (gen_rtx_SET (VOIDmode, copy_rtx (subtarget), tmp));
2875 emit_insn (gen_negdi2 (target, copy_rtx (subtarget)));
2877 else if (diff == 1 || diff == 4 || diff == 8)
2881 tmp = gen_rtx_fmt_ee (code, DImode, cond, const0_rtx);
2882 emit_insn (gen_rtx_SET (VOIDmode, copy_rtx (subtarget), tmp));
2885 emit_insn (gen_adddi3 (target, copy_rtx (subtarget), GEN_INT (f)));
2888 add_op = GEN_INT (f);
2889 if (sext_add_operand (add_op, mode))
2891 tmp = gen_rtx_MULT (DImode, copy_rtx (subtarget),
2893 tmp = gen_rtx_PLUS (DImode, tmp, add_op);
2894 emit_insn (gen_rtx_SET (VOIDmode, target, tmp));
2906 /* Look up the function X_floating library function name for the
2909 struct xfloating_op GTY(())
2911 const enum rtx_code code;
2912 const char *const GTY((skip)) osf_func;
2913 const char *const GTY((skip)) vms_func;
2917 static GTY(()) struct xfloating_op xfloating_ops[] =
2919 { PLUS, "_OtsAddX", "OTS$ADD_X", 0 },
2920 { MINUS, "_OtsSubX", "OTS$SUB_X", 0 },
2921 { MULT, "_OtsMulX", "OTS$MUL_X", 0 },
2922 { DIV, "_OtsDivX", "OTS$DIV_X", 0 },
2923 { EQ, "_OtsEqlX", "OTS$EQL_X", 0 },
2924 { NE, "_OtsNeqX", "OTS$NEQ_X", 0 },
2925 { LT, "_OtsLssX", "OTS$LSS_X", 0 },
2926 { LE, "_OtsLeqX", "OTS$LEQ_X", 0 },
2927 { GT, "_OtsGtrX", "OTS$GTR_X", 0 },
2928 { GE, "_OtsGeqX", "OTS$GEQ_X", 0 },
2929 { FIX, "_OtsCvtXQ", "OTS$CVTXQ", 0 },
2930 { FLOAT, "_OtsCvtQX", "OTS$CVTQX", 0 },
2931 { UNSIGNED_FLOAT, "_OtsCvtQUX", "OTS$CVTQUX", 0 },
2932 { FLOAT_EXTEND, "_OtsConvertFloatTX", "OTS$CVT_FLOAT_T_X", 0 },
2933 { FLOAT_TRUNCATE, "_OtsConvertFloatXT", "OTS$CVT_FLOAT_X_T", 0 }
2936 static GTY(()) struct xfloating_op vax_cvt_ops[] =
2938 { FLOAT_EXTEND, "_OtsConvertFloatGX", "OTS$CVT_FLOAT_G_X", 0 },
2939 { FLOAT_TRUNCATE, "_OtsConvertFloatXG", "OTS$CVT_FLOAT_X_G", 0 }
2943 alpha_lookup_xfloating_lib_func (enum rtx_code code)
2945 struct xfloating_op *ops = xfloating_ops;
2946 long n = ARRAY_SIZE (xfloating_ops);
2949 gcc_assert (TARGET_HAS_XFLOATING_LIBS);
2951 /* How irritating. Nothing to key off for the main table. */
2952 if (TARGET_FLOAT_VAX && (code == FLOAT_EXTEND || code == FLOAT_TRUNCATE))
2955 n = ARRAY_SIZE (vax_cvt_ops);
2958 for (i = 0; i < n; ++i, ++ops)
2959 if (ops->code == code)
2961 rtx func = ops->libcall;
2964 func = init_one_libfunc (TARGET_ABI_OPEN_VMS
2965 ? ops->vms_func : ops->osf_func);
2966 ops->libcall = func;
2974 /* Most X_floating operations take the rounding mode as an argument.
2975 Compute that here. */
2978 alpha_compute_xfloating_mode_arg (enum rtx_code code,
2979 enum alpha_fp_rounding_mode round)
2985 case ALPHA_FPRM_NORM:
2988 case ALPHA_FPRM_MINF:
2991 case ALPHA_FPRM_CHOP:
2994 case ALPHA_FPRM_DYN:
3000 /* XXX For reference, round to +inf is mode = 3. */
3003 if (code == FLOAT_TRUNCATE && alpha_fptm == ALPHA_FPTM_N)
3009 /* Emit an X_floating library function call.
3011 Note that these functions do not follow normal calling conventions:
3012 TFmode arguments are passed in two integer registers (as opposed to
3013 indirect); TFmode return values appear in R16+R17.
3015 FUNC is the function to call.
3016 TARGET is where the output belongs.
3017 OPERANDS are the inputs.
3018 NOPERANDS is the count of inputs.
3019 EQUIV is the expression equivalent for the function.
3023 alpha_emit_xfloating_libcall (rtx func, rtx target, rtx operands[],
3024 int noperands, rtx equiv)
3026 rtx usage = NULL_RTX, tmp, reg;
3031 for (i = 0; i < noperands; ++i)
3033 switch (GET_MODE (operands[i]))
3036 reg = gen_rtx_REG (TFmode, regno);
3041 reg = gen_rtx_REG (DFmode, regno + 32);
3046 gcc_assert (GET_CODE (operands[i]) == CONST_INT);
3049 reg = gen_rtx_REG (DImode, regno);
3057 emit_move_insn (reg, operands[i]);
3058 usage = alloc_EXPR_LIST (0, gen_rtx_USE (VOIDmode, reg), usage);
3061 switch (GET_MODE (target))
3064 reg = gen_rtx_REG (TFmode, 16);
3067 reg = gen_rtx_REG (DFmode, 32);
3070 reg = gen_rtx_REG (DImode, 0);
3076 tmp = gen_rtx_MEM (QImode, func);
3077 tmp = emit_call_insn (GEN_CALL_VALUE (reg, tmp, const0_rtx,
3078 const0_rtx, const0_rtx));
3079 CALL_INSN_FUNCTION_USAGE (tmp) = usage;
3080 CONST_OR_PURE_CALL_P (tmp) = 1;
3085 emit_libcall_block (tmp, target, reg, equiv);
3088 /* Emit an X_floating library function call for arithmetic (+,-,*,/). */
3091 alpha_emit_xfloating_arith (enum rtx_code code, rtx operands[])
3095 rtx out_operands[3];
3097 func = alpha_lookup_xfloating_lib_func (code);
3098 mode = alpha_compute_xfloating_mode_arg (code, alpha_fprm);
3100 out_operands[0] = operands[1];
3101 out_operands[1] = operands[2];
3102 out_operands[2] = GEN_INT (mode);
3103 alpha_emit_xfloating_libcall (func, operands[0], out_operands, 3,
3104 gen_rtx_fmt_ee (code, TFmode, operands[1],
3108 /* Emit an X_floating library function call for a comparison. */
3111 alpha_emit_xfloating_compare (enum rtx_code *pcode, rtx op0, rtx op1)
3113 enum rtx_code cmp_code, res_code;
3114 rtx func, out, operands[2];
3116 /* X_floating library comparison functions return
3120 Convert the compare against the raw return value. */
3148 func = alpha_lookup_xfloating_lib_func (cmp_code);
3152 out = gen_reg_rtx (DImode);
3154 /* ??? Strange mode for equiv because what's actually returned
3155 is -1,0,1, not a proper boolean value. */
3156 alpha_emit_xfloating_libcall (func, out, operands, 2,
3157 gen_rtx_fmt_ee (cmp_code, CCmode, op0, op1));
3162 /* Emit an X_floating library function call for a conversion. */
3165 alpha_emit_xfloating_cvt (enum rtx_code orig_code, rtx operands[])
3167 int noperands = 1, mode;
3168 rtx out_operands[2];
3170 enum rtx_code code = orig_code;
3172 if (code == UNSIGNED_FIX)
3175 func = alpha_lookup_xfloating_lib_func (code);
3177 out_operands[0] = operands[1];
3182 mode = alpha_compute_xfloating_mode_arg (code, ALPHA_FPRM_CHOP);
3183 out_operands[1] = GEN_INT (mode);
3186 case FLOAT_TRUNCATE:
3187 mode = alpha_compute_xfloating_mode_arg (code, alpha_fprm);
3188 out_operands[1] = GEN_INT (mode);
3195 alpha_emit_xfloating_libcall (func, operands[0], out_operands, noperands,
3196 gen_rtx_fmt_e (orig_code,
3197 GET_MODE (operands[0]),
3201 /* Split a TImode or TFmode move from OP[1] to OP[0] into a pair of
3202 DImode moves from OP[2,3] to OP[0,1]. If FIXUP_OVERLAP is true,
3203 guarantee that the sequence
3206 is valid. Naturally, output operand ordering is little-endian.
3207 This is used by *movtf_internal and *movti_internal. */
3210 alpha_split_tmode_pair (rtx operands[4], enum machine_mode mode,
3213 switch (GET_CODE (operands[1]))
3216 operands[3] = gen_rtx_REG (DImode, REGNO (operands[1]) + 1);
3217 operands[2] = gen_rtx_REG (DImode, REGNO (operands[1]));
3221 operands[3] = adjust_address (operands[1], DImode, 8);
3222 operands[2] = adjust_address (operands[1], DImode, 0);
3227 gcc_assert (operands[1] == CONST0_RTX (mode));
3228 operands[2] = operands[3] = const0_rtx;
3235 switch (GET_CODE (operands[0]))
3238 operands[1] = gen_rtx_REG (DImode, REGNO (operands[0]) + 1);
3239 operands[0] = gen_rtx_REG (DImode, REGNO (operands[0]));
3243 operands[1] = adjust_address (operands[0], DImode, 8);
3244 operands[0] = adjust_address (operands[0], DImode, 0);
3251 if (fixup_overlap && reg_overlap_mentioned_p (operands[0], operands[3]))
3254 tmp = operands[0], operands[0] = operands[1], operands[1] = tmp;
3255 tmp = operands[2], operands[2] = operands[3], operands[3] = tmp;
3259 /* Implement negtf2 or abstf2. Op0 is destination, op1 is source,
3260 op2 is a register containing the sign bit, operation is the
3261 logical operation to be performed. */
3264 alpha_split_tfmode_frobsign (rtx operands[3], rtx (*operation) (rtx, rtx, rtx))
3266 rtx high_bit = operands[2];
3270 alpha_split_tmode_pair (operands, TFmode, false);
3272 /* Detect three flavors of operand overlap. */
3274 if (rtx_equal_p (operands[0], operands[2]))
3276 else if (rtx_equal_p (operands[1], operands[2]))
3278 if (rtx_equal_p (operands[0], high_bit))
3285 emit_move_insn (operands[0], operands[2]);
3287 /* ??? If the destination overlaps both source tf and high_bit, then
3288 assume source tf is dead in its entirety and use the other half
3289 for a scratch register. Otherwise "scratch" is just the proper
3290 destination register. */
3291 scratch = operands[move < 2 ? 1 : 3];
3293 emit_insn ((*operation) (scratch, high_bit, operands[3]));
3297 emit_move_insn (operands[0], operands[2]);
3299 emit_move_insn (operands[1], scratch);
3303 /* Use ext[wlq][lh] as the Architecture Handbook describes for extracting
3307 word: ldq_u r1,X(r11) ldq_u r1,X(r11)
3308 ldq_u r2,X+1(r11) ldq_u r2,X+1(r11)
3309 lda r3,X(r11) lda r3,X+2(r11)
3310 extwl r1,r3,r1 extql r1,r3,r1
3311 extwh r2,r3,r2 extqh r2,r3,r2
3312 or r1.r2.r1 or r1,r2,r1
3315 long: ldq_u r1,X(r11) ldq_u r1,X(r11)
3316 ldq_u r2,X+3(r11) ldq_u r2,X+3(r11)
3317 lda r3,X(r11) lda r3,X(r11)
3318 extll r1,r3,r1 extll r1,r3,r1
3319 extlh r2,r3,r2 extlh r2,r3,r2
3320 or r1.r2.r1 addl r1,r2,r1
3322 quad: ldq_u r1,X(r11)
3331 alpha_expand_unaligned_load (rtx tgt, rtx mem, HOST_WIDE_INT size,
3332 HOST_WIDE_INT ofs, int sign)
3334 rtx meml, memh, addr, extl, exth, tmp, mema;
3335 enum machine_mode mode;
3337 if (TARGET_BWX && size == 2)
3339 meml = adjust_address (mem, QImode, ofs);
3340 memh = adjust_address (mem, QImode, ofs+1);
3341 if (BYTES_BIG_ENDIAN)
3342 tmp = meml, meml = memh, memh = tmp;
3343 extl = gen_reg_rtx (DImode);
3344 exth = gen_reg_rtx (DImode);
3345 emit_insn (gen_zero_extendqidi2 (extl, meml));
3346 emit_insn (gen_zero_extendqidi2 (exth, memh));
3347 exth = expand_simple_binop (DImode, ASHIFT, exth, GEN_INT (8),
3348 NULL, 1, OPTAB_LIB_WIDEN);
3349 addr = expand_simple_binop (DImode, IOR, extl, exth,
3350 NULL, 1, OPTAB_LIB_WIDEN);
3352 if (sign && GET_MODE (tgt) != HImode)
3354 addr = gen_lowpart (HImode, addr);
3355 emit_insn (gen_extend_insn (tgt, addr, GET_MODE (tgt), HImode, 0));
3359 if (GET_MODE (tgt) != DImode)
3360 addr = gen_lowpart (GET_MODE (tgt), addr);
3361 emit_move_insn (tgt, addr);
3366 meml = gen_reg_rtx (DImode);
3367 memh = gen_reg_rtx (DImode);
3368 addr = gen_reg_rtx (DImode);
3369 extl = gen_reg_rtx (DImode);
3370 exth = gen_reg_rtx (DImode);
3372 mema = XEXP (mem, 0);
3373 if (GET_CODE (mema) == LO_SUM)
3374 mema = force_reg (Pmode, mema);
3376 /* AND addresses cannot be in any alias set, since they may implicitly
3377 alias surrounding code. Ideally we'd have some alias set that
3378 covered all types except those with alignment 8 or higher. */
3380 tmp = change_address (mem, DImode,
3381 gen_rtx_AND (DImode,
3382 plus_constant (mema, ofs),
3384 set_mem_alias_set (tmp, 0);
3385 emit_move_insn (meml, tmp);
3387 tmp = change_address (mem, DImode,
3388 gen_rtx_AND (DImode,
3389 plus_constant (mema, ofs + size - 1),
3391 set_mem_alias_set (tmp, 0);
3392 emit_move_insn (memh, tmp);
3394 if (WORDS_BIG_ENDIAN && sign && (size == 2 || size == 4))
3396 emit_move_insn (addr, plus_constant (mema, -1));
3398 emit_insn (gen_extqh_be (extl, meml, addr));
3399 emit_insn (gen_extxl_be (exth, memh, GEN_INT (64), addr));
3401 addr = expand_binop (DImode, ior_optab, extl, exth, tgt, 1, OPTAB_WIDEN);
3402 addr = expand_binop (DImode, ashr_optab, addr, GEN_INT (64 - size*8),
3403 addr, 1, OPTAB_WIDEN);
3405 else if (sign && size == 2)
3407 emit_move_insn (addr, plus_constant (mema, ofs+2));
3409 emit_insn (gen_extxl_le (extl, meml, GEN_INT (64), addr));
3410 emit_insn (gen_extqh_le (exth, memh, addr));
3412 /* We must use tgt here for the target. Alpha-vms port fails if we use
3413 addr for the target, because addr is marked as a pointer and combine
3414 knows that pointers are always sign-extended 32 bit values. */
3415 addr = expand_binop (DImode, ior_optab, extl, exth, tgt, 1, OPTAB_WIDEN);
3416 addr = expand_binop (DImode, ashr_optab, addr, GEN_INT (48),
3417 addr, 1, OPTAB_WIDEN);
3421 if (WORDS_BIG_ENDIAN)
3423 emit_move_insn (addr, plus_constant (mema, ofs+size-1));
3427 emit_insn (gen_extwh_be (extl, meml, addr));
3432 emit_insn (gen_extlh_be (extl, meml, addr));
3437 emit_insn (gen_extqh_be (extl, meml, addr));
3444 emit_insn (gen_extxl_be (exth, memh, GEN_INT (size*8), addr));
3448 emit_move_insn (addr, plus_constant (mema, ofs));
3449 emit_insn (gen_extxl_le (extl, meml, GEN_INT (size*8), addr));
3453 emit_insn (gen_extwh_le (exth, memh, addr));
3458 emit_insn (gen_extlh_le (exth, memh, addr));
3463 emit_insn (gen_extqh_le (exth, memh, addr));
3472 addr = expand_binop (mode, ior_optab, gen_lowpart (mode, extl),
3473 gen_lowpart (mode, exth), gen_lowpart (mode, tgt),
3478 emit_move_insn (tgt, gen_lowpart (GET_MODE (tgt), addr));
3481 /* Similarly, use ins and msk instructions to perform unaligned stores. */
3484 alpha_expand_unaligned_store (rtx dst, rtx src,
3485 HOST_WIDE_INT size, HOST_WIDE_INT ofs)
3487 rtx dstl, dsth, addr, insl, insh, meml, memh, dsta;
3489 if (TARGET_BWX && size == 2)
3491 if (src != const0_rtx)
3493 dstl = gen_lowpart (QImode, src);
3494 dsth = expand_simple_binop (DImode, LSHIFTRT, src, GEN_INT (8),
3495 NULL, 1, OPTAB_LIB_WIDEN);
3496 dsth = gen_lowpart (QImode, dsth);
3499 dstl = dsth = const0_rtx;
3501 meml = adjust_address (dst, QImode, ofs);
3502 memh = adjust_address (dst, QImode, ofs+1);
3503 if (BYTES_BIG_ENDIAN)
3504 addr = meml, meml = memh, memh = addr;
3506 emit_move_insn (meml, dstl);
3507 emit_move_insn (memh, dsth);
3511 dstl = gen_reg_rtx (DImode);
3512 dsth = gen_reg_rtx (DImode);
3513 insl = gen_reg_rtx (DImode);
3514 insh = gen_reg_rtx (DImode);
3516 dsta = XEXP (dst, 0);
3517 if (GET_CODE (dsta) == LO_SUM)
3518 dsta = force_reg (Pmode, dsta);
3520 /* AND addresses cannot be in any alias set, since they may implicitly
3521 alias surrounding code. Ideally we'd have some alias set that
3522 covered all types except those with alignment 8 or higher. */
3524 meml = change_address (dst, DImode,
3525 gen_rtx_AND (DImode,
3526 plus_constant (dsta, ofs),
3528 set_mem_alias_set (meml, 0);
3530 memh = change_address (dst, DImode,
3531 gen_rtx_AND (DImode,
3532 plus_constant (dsta, ofs + size - 1),
3534 set_mem_alias_set (memh, 0);
3536 emit_move_insn (dsth, memh);
3537 emit_move_insn (dstl, meml);
3538 if (WORDS_BIG_ENDIAN)
3540 addr = copy_addr_to_reg (plus_constant (dsta, ofs+size-1));
3542 if (src != const0_rtx)
3547 emit_insn (gen_inswl_be (insh, gen_lowpart (HImode,src), addr));
3550 emit_insn (gen_insll_be (insh, gen_lowpart (SImode,src), addr));
3553 emit_insn (gen_insql_be (insh, gen_lowpart (DImode,src), addr));
3556 emit_insn (gen_insxh (insl, gen_lowpart (DImode, src),
3557 GEN_INT (size*8), addr));
3563 emit_insn (gen_mskxl_be (dsth, dsth, GEN_INT (0xffff), addr));
3567 rtx msk = immed_double_const (0xffffffff, 0, DImode);
3568 emit_insn (gen_mskxl_be (dsth, dsth, msk, addr));
3572 emit_insn (gen_mskxl_be (dsth, dsth, constm1_rtx, addr));
3576 emit_insn (gen_mskxh (dstl, dstl, GEN_INT (size*8), addr));
3580 addr = copy_addr_to_reg (plus_constant (dsta, ofs));
3582 if (src != CONST0_RTX (GET_MODE (src)))
3584 emit_insn (gen_insxh (insh, gen_lowpart (DImode, src),
3585 GEN_INT (size*8), addr));
3590 emit_insn (gen_inswl_le (insl, gen_lowpart (HImode, src), addr));
3593 emit_insn (gen_insll_le (insl, gen_lowpart (SImode, src), addr));
3596 emit_insn (gen_insql_le (insl, src, addr));
3601 emit_insn (gen_mskxh (dsth, dsth, GEN_INT (size*8), addr));
3606 emit_insn (gen_mskxl_le (dstl, dstl, GEN_INT (0xffff), addr));
3610 rtx msk = immed_double_const (0xffffffff, 0, DImode);
3611 emit_insn (gen_mskxl_le (dstl, dstl, msk, addr));
3615 emit_insn (gen_mskxl_le (dstl, dstl, constm1_rtx, addr));
3620 if (src != CONST0_RTX (GET_MODE (src)))
3622 dsth = expand_binop (DImode, ior_optab, insh, dsth, dsth, 0, OPTAB_WIDEN);
3623 dstl = expand_binop (DImode, ior_optab, insl, dstl, dstl, 0, OPTAB_WIDEN);
3626 if (WORDS_BIG_ENDIAN)
3628 emit_move_insn (meml, dstl);
3629 emit_move_insn (memh, dsth);
3633 /* Must store high before low for degenerate case of aligned. */
3634 emit_move_insn (memh, dsth);
3635 emit_move_insn (meml, dstl);
3639 /* The block move code tries to maximize speed by separating loads and
3640 stores at the expense of register pressure: we load all of the data
3641 before we store it back out. There are two secondary effects worth
3642 mentioning, that this speeds copying to/from aligned and unaligned
3643 buffers, and that it makes the code significantly easier to write. */
3645 #define MAX_MOVE_WORDS 8
3647 /* Load an integral number of consecutive unaligned quadwords. */
3650 alpha_expand_unaligned_load_words (rtx *out_regs, rtx smem,
3651 HOST_WIDE_INT words, HOST_WIDE_INT ofs)
3653 rtx const im8 = GEN_INT (-8);
3654 rtx const i64 = GEN_INT (64);
3655 rtx ext_tmps[MAX_MOVE_WORDS], data_regs[MAX_MOVE_WORDS+1];
3656 rtx sreg, areg, tmp, smema;
3659 smema = XEXP (smem, 0);
3660 if (GET_CODE (smema) == LO_SUM)
3661 smema = force_reg (Pmode, smema);
3663 /* Generate all the tmp registers we need. */
3664 for (i = 0; i < words; ++i)
3666 data_regs[i] = out_regs[i];
3667 ext_tmps[i] = gen_reg_rtx (DImode);
3669 data_regs[words] = gen_reg_rtx (DImode);
3672 smem = adjust_address (smem, GET_MODE (smem), ofs);
3674 /* Load up all of the source data. */
3675 for (i = 0; i < words; ++i)
3677 tmp = change_address (smem, DImode,
3678 gen_rtx_AND (DImode,
3679 plus_constant (smema, 8*i),
3681 set_mem_alias_set (tmp, 0);
3682 emit_move_insn (data_regs[i], tmp);
3685 tmp = change_address (smem, DImode,
3686 gen_rtx_AND (DImode,
3687 plus_constant (smema, 8*words - 1),
3689 set_mem_alias_set (tmp, 0);
3690 emit_move_insn (data_regs[words], tmp);
3692 /* Extract the half-word fragments. Unfortunately DEC decided to make
3693 extxh with offset zero a noop instead of zeroing the register, so
3694 we must take care of that edge condition ourselves with cmov. */
3696 sreg = copy_addr_to_reg (smema);
3697 areg = expand_binop (DImode, and_optab, sreg, GEN_INT (7), NULL,
3699 if (WORDS_BIG_ENDIAN)
3700 emit_move_insn (sreg, plus_constant (sreg, 7));
3701 for (i = 0; i < words; ++i)
3703 if (WORDS_BIG_ENDIAN)
3705 emit_insn (gen_extqh_be (data_regs[i], data_regs[i], sreg));
3706 emit_insn (gen_extxl_be (ext_tmps[i], data_regs[i+1], i64, sreg));
3710 emit_insn (gen_extxl_le (data_regs[i], data_regs[i], i64, sreg));
3711 emit_insn (gen_extqh_le (ext_tmps[i], data_regs[i+1], sreg));
3713 emit_insn (gen_rtx_SET (VOIDmode, ext_tmps[i],
3714 gen_rtx_IF_THEN_ELSE (DImode,
3715 gen_rtx_EQ (DImode, areg,
3717 const0_rtx, ext_tmps[i])));
3720 /* Merge the half-words into whole words. */
3721 for (i = 0; i < words; ++i)
3723 out_regs[i] = expand_binop (DImode, ior_optab, data_regs[i],
3724 ext_tmps[i], data_regs[i], 1, OPTAB_WIDEN);
3728 /* Store an integral number of consecutive unaligned quadwords. DATA_REGS
3729 may be NULL to store zeros. */
3732 alpha_expand_unaligned_store_words (rtx *data_regs, rtx dmem,
3733 HOST_WIDE_INT words, HOST_WIDE_INT ofs)
3735 rtx const im8 = GEN_INT (-8);
3736 rtx const i64 = GEN_INT (64);
3737 rtx ins_tmps[MAX_MOVE_WORDS];
3738 rtx st_tmp_1, st_tmp_2, dreg;
3739 rtx st_addr_1, st_addr_2, dmema;
3742 dmema = XEXP (dmem, 0);
3743 if (GET_CODE (dmema) == LO_SUM)
3744 dmema = force_reg (Pmode, dmema);
3746 /* Generate all the tmp registers we need. */
3747 if (data_regs != NULL)
3748 for (i = 0; i < words; ++i)
3749 ins_tmps[i] = gen_reg_rtx(DImode);
3750 st_tmp_1 = gen_reg_rtx(DImode);
3751 st_tmp_2 = gen_reg_rtx(DImode);
3754 dmem = adjust_address (dmem, GET_MODE (dmem), ofs);
3756 st_addr_2 = change_address (dmem, DImode,
3757 gen_rtx_AND (DImode,
3758 plus_constant (dmema, words*8 - 1),
3760 set_mem_alias_set (st_addr_2, 0);
3762 st_addr_1 = change_address (dmem, DImode,
3763 gen_rtx_AND (DImode, dmema, im8));
3764 set_mem_alias_set (st_addr_1, 0);
3766 /* Load up the destination end bits. */
3767 emit_move_insn (st_tmp_2, st_addr_2);
3768 emit_move_insn (st_tmp_1, st_addr_1);
3770 /* Shift the input data into place. */
3771 dreg = copy_addr_to_reg (dmema);
3772 if (WORDS_BIG_ENDIAN)
3773 emit_move_insn (dreg, plus_constant (dreg, 7));
3774 if (data_regs != NULL)
3776 for (i = words-1; i >= 0; --i)
3778 if (WORDS_BIG_ENDIAN)
3780 emit_insn (gen_insql_be (ins_tmps[i], data_regs[i], dreg));
3781 emit_insn (gen_insxh (data_regs[i], data_regs[i], i64, dreg));
3785 emit_insn (gen_insxh (ins_tmps[i], data_regs[i], i64, dreg));
3786 emit_insn (gen_insql_le (data_regs[i], data_regs[i], dreg));
3789 for (i = words-1; i > 0; --i)
3791 ins_tmps[i-1] = expand_binop (DImode, ior_optab, data_regs[i],
3792 ins_tmps[i-1], ins_tmps[i-1], 1,
3797 /* Split and merge the ends with the destination data. */
3798 if (WORDS_BIG_ENDIAN)
3800 emit_insn (gen_mskxl_be (st_tmp_2, st_tmp_2, constm1_rtx, dreg));
3801 emit_insn (gen_mskxh (st_tmp_1, st_tmp_1, i64, dreg));
3805 emit_insn (gen_mskxh (st_tmp_2, st_tmp_2, i64, dreg));
3806 emit_insn (gen_mskxl_le (st_tmp_1, st_tmp_1, constm1_rtx, dreg));
3809 if (data_regs != NULL)
3811 st_tmp_2 = expand_binop (DImode, ior_optab, st_tmp_2, ins_tmps[words-1],
3812 st_tmp_2, 1, OPTAB_WIDEN);
3813 st_tmp_1 = expand_binop (DImode, ior_optab, st_tmp_1, data_regs[0],
3814 st_tmp_1, 1, OPTAB_WIDEN);
3818 if (WORDS_BIG_ENDIAN)
3819 emit_move_insn (st_addr_1, st_tmp_1);
3821 emit_move_insn (st_addr_2, st_tmp_2);
3822 for (i = words-1; i > 0; --i)
3824 rtx tmp = change_address (dmem, DImode,
3825 gen_rtx_AND (DImode,
3826 plus_constant(dmema,
3827 WORDS_BIG_ENDIAN ? i*8-1 : i*8),
3829 set_mem_alias_set (tmp, 0);
3830 emit_move_insn (tmp, data_regs ? ins_tmps[i-1] : const0_rtx);
3832 if (WORDS_BIG_ENDIAN)
3833 emit_move_insn (st_addr_2, st_tmp_2);
3835 emit_move_insn (st_addr_1, st_tmp_1);
3839 /* Expand string/block move operations.
3841 operands[0] is the pointer to the destination.
3842 operands[1] is the pointer to the source.
3843 operands[2] is the number of bytes to move.
3844 operands[3] is the alignment. */
3847 alpha_expand_block_move (rtx operands[])
3849 rtx bytes_rtx = operands[2];
3850 rtx align_rtx = operands[3];
3851 HOST_WIDE_INT orig_bytes = INTVAL (bytes_rtx);
3852 HOST_WIDE_INT bytes = orig_bytes;
3853 HOST_WIDE_INT src_align = INTVAL (align_rtx) * BITS_PER_UNIT;
3854 HOST_WIDE_INT dst_align = src_align;
3855 rtx orig_src = operands[1];
3856 rtx orig_dst = operands[0];
3857 rtx data_regs[2 * MAX_MOVE_WORDS + 16];
3859 unsigned int i, words, ofs, nregs = 0;
3861 if (orig_bytes <= 0)
3863 else if (orig_bytes > MAX_MOVE_WORDS * UNITS_PER_WORD)
3866 /* Look for additional alignment information from recorded register info. */
3868 tmp = XEXP (orig_src, 0);
3869 if (GET_CODE (tmp) == REG)
3870 src_align = MAX (src_align, REGNO_POINTER_ALIGN (REGNO (tmp)));
3871 else if (GET_CODE (tmp) == PLUS
3872 && GET_CODE (XEXP (tmp, 0)) == REG
3873 && GET_CODE (XEXP (tmp, 1)) == CONST_INT)
3875 unsigned HOST_WIDE_INT c = INTVAL (XEXP (tmp, 1));
3876 unsigned int a = REGNO_POINTER_ALIGN (REGNO (XEXP (tmp, 0)));
3880 if (a >= 64 && c % 8 == 0)
3882 else if (a >= 32 && c % 4 == 0)
3884 else if (a >= 16 && c % 2 == 0)
3889 tmp = XEXP (orig_dst, 0);
3890 if (GET_CODE (tmp) == REG)
3891 dst_align = MAX (dst_align, REGNO_POINTER_ALIGN (REGNO (tmp)));
3892 else if (GET_CODE (tmp) == PLUS
3893 && GET_CODE (XEXP (tmp, 0)) == REG
3894 && GET_CODE (XEXP (tmp, 1)) == CONST_INT)
3896 unsigned HOST_WIDE_INT c = INTVAL (XEXP (tmp, 1));
3897 unsigned int a = REGNO_POINTER_ALIGN (REGNO (XEXP (tmp, 0)));
3901 if (a >= 64 && c % 8 == 0)
3903 else if (a >= 32 && c % 4 == 0)
3905 else if (a >= 16 && c % 2 == 0)
3911 if (src_align >= 64 && bytes >= 8)
3915 for (i = 0; i < words; ++i)
3916 data_regs[nregs + i] = gen_reg_rtx (DImode);
3918 for (i = 0; i < words; ++i)
3919 emit_move_insn (data_regs[nregs + i],
3920 adjust_address (orig_src, DImode, ofs + i * 8));
3927 if (src_align >= 32 && bytes >= 4)
3931 for (i = 0; i < words; ++i)
3932 data_regs[nregs + i] = gen_reg_rtx (SImode);
3934 for (i = 0; i < words; ++i)
3935 emit_move_insn (data_regs[nregs + i],
3936 adjust_address (orig_src, SImode, ofs + i * 4));
3947 for (i = 0; i < words+1; ++i)
3948 data_regs[nregs + i] = gen_reg_rtx (DImode);
3950 alpha_expand_unaligned_load_words (data_regs + nregs, orig_src,
3958 if (! TARGET_BWX && bytes >= 4)
3960 data_regs[nregs++] = tmp = gen_reg_rtx (SImode);
3961 alpha_expand_unaligned_load (tmp, orig_src, 4, ofs, 0);
3968 if (src_align >= 16)
3971 data_regs[nregs++] = tmp = gen_reg_rtx (HImode);
3972 emit_move_insn (tmp, adjust_address (orig_src, HImode, ofs));
3975 } while (bytes >= 2);
3977 else if (! TARGET_BWX)
3979 data_regs[nregs++] = tmp = gen_reg_rtx (HImode);
3980 alpha_expand_unaligned_load (tmp, orig_src, 2, ofs, 0);
3988 data_regs[nregs++] = tmp = gen_reg_rtx (QImode);
3989 emit_move_insn (tmp, adjust_address (orig_src, QImode, ofs));
3994 gcc_assert (nregs <= ARRAY_SIZE (data_regs));
3996 /* Now save it back out again. */
4000 /* Write out the data in whatever chunks reading the source allowed. */
4001 if (dst_align >= 64)
4003 while (i < nregs && GET_MODE (data_regs[i]) == DImode)
4005 emit_move_insn (adjust_address (orig_dst, DImode, ofs),
4012 if (dst_align >= 32)
4014 /* If the source has remaining DImode regs, write them out in
4016 while (i < nregs && GET_MODE (data_regs[i]) == DImode)
4018 tmp = expand_binop (DImode, lshr_optab, data_regs[i], GEN_INT (32),
4019 NULL_RTX, 1, OPTAB_WIDEN);
4021 emit_move_insn (adjust_address (orig_dst, SImode, ofs),
4022 gen_lowpart (SImode, data_regs[i]));
4023 emit_move_insn (adjust_address (orig_dst, SImode, ofs + 4),
4024 gen_lowpart (SImode, tmp));
4029 while (i < nregs && GET_MODE (data_regs[i]) == SImode)
4031 emit_move_insn (adjust_address (orig_dst, SImode, ofs),
4038 if (i < nregs && GET_MODE (data_regs[i]) == DImode)
4040 /* Write out a remaining block of words using unaligned methods. */
4042 for (words = 1; i + words < nregs; words++)
4043 if (GET_MODE (data_regs[i + words]) != DImode)
4047 alpha_expand_unaligned_store (orig_dst, data_regs[i], 8, ofs);
4049 alpha_expand_unaligned_store_words (data_regs + i, orig_dst,
4056 /* Due to the above, this won't be aligned. */
4057 /* ??? If we have more than one of these, consider constructing full
4058 words in registers and using alpha_expand_unaligned_store_words. */
4059 while (i < nregs && GET_MODE (data_regs[i]) == SImode)
4061 alpha_expand_unaligned_store (orig_dst, data_regs[i], 4, ofs);
4066 if (dst_align >= 16)
4067 while (i < nregs && GET_MODE (data_regs[i]) == HImode)
4069 emit_move_insn (adjust_address (orig_dst, HImode, ofs), data_regs[i]);
4074 while (i < nregs && GET_MODE (data_regs[i]) == HImode)
4076 alpha_expand_unaligned_store (orig_dst, data_regs[i], 2, ofs);
4081 /* The remainder must be byte copies. */
4084 gcc_assert (GET_MODE (data_regs[i]) == QImode);
4085 emit_move_insn (adjust_address (orig_dst, QImode, ofs), data_regs[i]);
4094 alpha_expand_block_clear (rtx operands[])
4096 rtx bytes_rtx = operands[1];
4097 rtx align_rtx = operands[3];
4098 HOST_WIDE_INT orig_bytes = INTVAL (bytes_rtx);
4099 HOST_WIDE_INT bytes = orig_bytes;
4100 HOST_WIDE_INT align = INTVAL (align_rtx) * BITS_PER_UNIT;
4101 HOST_WIDE_INT alignofs = 0;
4102 rtx orig_dst = operands[0];
4104 int i, words, ofs = 0;
4106 if (orig_bytes <= 0)
4108 if (orig_bytes > MAX_MOVE_WORDS * UNITS_PER_WORD)
4111 /* Look for stricter alignment. */
4112 tmp = XEXP (orig_dst, 0);
4113 if (GET_CODE (tmp) == REG)
4114 align = MAX (align, REGNO_POINTER_ALIGN (REGNO (tmp)));
4115 else if (GET_CODE (tmp) == PLUS
4116 && GET_CODE (XEXP (tmp, 0)) == REG
4117 && GET_CODE (XEXP (tmp, 1)) == CONST_INT)
4119 HOST_WIDE_INT c = INTVAL (XEXP (tmp, 1));
4120 int a = REGNO_POINTER_ALIGN (REGNO (XEXP (tmp, 0)));
4125 align = a, alignofs = 8 - c % 8;
4127 align = a, alignofs = 4 - c % 4;
4129 align = a, alignofs = 2 - c % 2;
4133 /* Handle an unaligned prefix first. */
4137 #if HOST_BITS_PER_WIDE_INT >= 64
4138 /* Given that alignofs is bounded by align, the only time BWX could
4139 generate three stores is for a 7 byte fill. Prefer two individual
4140 stores over a load/mask/store sequence. */
4141 if ((!TARGET_BWX || alignofs == 7)
4143 && !(alignofs == 4 && bytes >= 4))
4145 enum machine_mode mode = (align >= 64 ? DImode : SImode);
4146 int inv_alignofs = (align >= 64 ? 8 : 4) - alignofs;
4150 mem = adjust_address (orig_dst, mode, ofs - inv_alignofs);
4151 set_mem_alias_set (mem, 0);
4153 mask = ~(~(HOST_WIDE_INT)0 << (inv_alignofs * 8));
4154 if (bytes < alignofs)
4156 mask |= ~(HOST_WIDE_INT)0 << ((inv_alignofs + bytes) * 8);
4167 tmp = expand_binop (mode, and_optab, mem, GEN_INT (mask),
4168 NULL_RTX, 1, OPTAB_WIDEN);
4170 emit_move_insn (mem, tmp);
4174 if (TARGET_BWX && (alignofs & 1) && bytes >= 1)
4176 emit_move_insn (adjust_address (orig_dst, QImode, ofs), const0_rtx);
4181 if (TARGET_BWX && align >= 16 && (alignofs & 3) == 2 && bytes >= 2)
4183 emit_move_insn (adjust_address (orig_dst, HImode, ofs), const0_rtx);
4188 if (alignofs == 4 && bytes >= 4)
4190 emit_move_insn (adjust_address (orig_dst, SImode, ofs), const0_rtx);
4196 /* If we've not used the extra lead alignment information by now,
4197 we won't be able to. Downgrade align to match what's left over. */
4200 alignofs = alignofs & -alignofs;
4201 align = MIN (align, alignofs * BITS_PER_UNIT);
4205 /* Handle a block of contiguous long-words. */
4207 if (align >= 64 && bytes >= 8)
4211 for (i = 0; i < words; ++i)
4212 emit_move_insn (adjust_address (orig_dst, DImode, ofs + i * 8),
4219 /* If the block is large and appropriately aligned, emit a single
4220 store followed by a sequence of stq_u insns. */
4222 if (align >= 32 && bytes > 16)
4226 emit_move_insn (adjust_address (orig_dst, SImode, ofs), const0_rtx);
4230 orig_dsta = XEXP (orig_dst, 0);
4231 if (GET_CODE (orig_dsta) == LO_SUM)
4232 orig_dsta = force_reg (Pmode, orig_dsta);
4235 for (i = 0; i < words; ++i)
4238 = change_address (orig_dst, DImode,
4239 gen_rtx_AND (DImode,
4240 plus_constant (orig_dsta, ofs + i*8),
4242 set_mem_alias_set (mem, 0);
4243 emit_move_insn (mem, const0_rtx);
4246 /* Depending on the alignment, the first stq_u may have overlapped
4247 with the initial stl, which means that the last stq_u didn't
4248 write as much as it would appear. Leave those questionable bytes
4250 bytes -= words * 8 - 4;
4251 ofs += words * 8 - 4;
4254 /* Handle a smaller block of aligned words. */
4256 if ((align >= 64 && bytes == 4)
4257 || (align == 32 && bytes >= 4))
4261 for (i = 0; i < words; ++i)
4262 emit_move_insn (adjust_address (orig_dst, SImode, ofs + i * 4),
4269 /* An unaligned block uses stq_u stores for as many as possible. */
4275 alpha_expand_unaligned_store_words (NULL, orig_dst, words, ofs);
4281 /* Next clean up any trailing pieces. */
4283 #if HOST_BITS_PER_WIDE_INT >= 64
4284 /* Count the number of bits in BYTES for which aligned stores could
4287 for (i = (TARGET_BWX ? 1 : 4); i * BITS_PER_UNIT <= align ; i <<= 1)
4291 /* If we have appropriate alignment (and it wouldn't take too many
4292 instructions otherwise), mask out the bytes we need. */
4293 if (TARGET_BWX ? words > 2 : bytes > 0)
4300 mem = adjust_address (orig_dst, DImode, ofs);
4301 set_mem_alias_set (mem, 0);
4303 mask = ~(HOST_WIDE_INT)0 << (bytes * 8);
4305 tmp = expand_binop (DImode, and_optab, mem, GEN_INT (mask),
4306 NULL_RTX, 1, OPTAB_WIDEN);
4308 emit_move_insn (mem, tmp);
4311 else if (align >= 32 && bytes < 4)
4316 mem = adjust_address (orig_dst, SImode, ofs);
4317 set_mem_alias_set (mem, 0);
4319 mask = ~(HOST_WIDE_INT)0 << (bytes * 8);
4321 tmp = expand_binop (SImode, and_optab, mem, GEN_INT (mask),
4322 NULL_RTX, 1, OPTAB_WIDEN);
4324 emit_move_insn (mem, tmp);
4330 if (!TARGET_BWX && bytes >= 4)
4332 alpha_expand_unaligned_store (orig_dst, const0_rtx, 4, ofs);
4342 emit_move_insn (adjust_address (orig_dst, HImode, ofs),
4346 } while (bytes >= 2);
4348 else if (! TARGET_BWX)
4350 alpha_expand_unaligned_store (orig_dst, const0_rtx, 2, ofs);
4358 emit_move_insn (adjust_address (orig_dst, QImode, ofs), const0_rtx);
4366 /* Returns a mask so that zap(x, value) == x & mask. */
4369 alpha_expand_zap_mask (HOST_WIDE_INT value)
4374 if (HOST_BITS_PER_WIDE_INT >= 64)
4376 HOST_WIDE_INT mask = 0;
4378 for (i = 7; i >= 0; --i)
4381 if (!((value >> i) & 1))
4385 result = gen_int_mode (mask, DImode);
4389 HOST_WIDE_INT mask_lo = 0, mask_hi = 0;
4391 gcc_assert (HOST_BITS_PER_WIDE_INT == 32);
4393 for (i = 7; i >= 4; --i)
4396 if (!((value >> i) & 1))
4400 for (i = 3; i >= 0; --i)
4403 if (!((value >> i) & 1))
4407 result = immed_double_const (mask_lo, mask_hi, DImode);
4414 alpha_expand_builtin_vector_binop (rtx (*gen) (rtx, rtx, rtx),
4415 enum machine_mode mode,
4416 rtx op0, rtx op1, rtx op2)
4418 op0 = gen_lowpart (mode, op0);
4420 if (op1 == const0_rtx)
4421 op1 = CONST0_RTX (mode);
4423 op1 = gen_lowpart (mode, op1);
4425 if (op2 == const0_rtx)
4426 op2 = CONST0_RTX (mode);
4428 op2 = gen_lowpart (mode, op2);
4430 emit_insn ((*gen) (op0, op1, op2));
4433 /* A subroutine of the atomic operation splitters. Jump to LABEL if
4434 COND is true. Mark the jump as unlikely to be taken. */
4437 emit_unlikely_jump (rtx cond, rtx label)
4439 rtx very_unlikely = GEN_INT (REG_BR_PROB_BASE / 100 - 1);
4442 x = gen_rtx_IF_THEN_ELSE (VOIDmode, cond, label, pc_rtx);
4443 x = emit_jump_insn (gen_rtx_SET (VOIDmode, pc_rtx, x));
4444 REG_NOTES (x) = gen_rtx_EXPR_LIST (REG_BR_PROB, very_unlikely, NULL_RTX);
4447 /* A subroutine of the atomic operation splitters. Emit a load-locked
4448 instruction in MODE. */
4451 emit_load_locked (enum machine_mode mode, rtx reg, rtx mem)
4453 rtx (*fn) (rtx, rtx) = NULL;
4455 fn = gen_load_locked_si;
4456 else if (mode == DImode)
4457 fn = gen_load_locked_di;
4458 emit_insn (fn (reg, mem));
4461 /* A subroutine of the atomic operation splitters. Emit a store-conditional
4462 instruction in MODE. */
4465 emit_store_conditional (enum machine_mode mode, rtx res, rtx mem, rtx val)
4467 rtx (*fn) (rtx, rtx, rtx) = NULL;
4469 fn = gen_store_conditional_si;
4470 else if (mode == DImode)
4471 fn = gen_store_conditional_di;
4472 emit_insn (fn (res, mem, val));
4475 /* A subroutine of the atomic operation splitters. Emit an insxl
4476 instruction in MODE. */
4479 emit_insxl (enum machine_mode mode, rtx op1, rtx op2)
4481 rtx ret = gen_reg_rtx (DImode);
4482 rtx (*fn) (rtx, rtx, rtx);
4484 if (WORDS_BIG_ENDIAN)
4498 emit_insn (fn (ret, op1, op2));
4503 /* Expand an an atomic fetch-and-operate pattern. CODE is the binary operation
4504 to perform. MEM is the memory on which to operate. VAL is the second
4505 operand of the binary operator. BEFORE and AFTER are optional locations to
4506 return the value of MEM either before of after the operation. SCRATCH is
4507 a scratch register. */
4510 alpha_split_atomic_op (enum rtx_code code, rtx mem, rtx val,
4511 rtx before, rtx after, rtx scratch)
4513 enum machine_mode mode = GET_MODE (mem);
4514 rtx label, x, cond = gen_rtx_REG (DImode, REGNO (scratch));
4516 emit_insn (gen_memory_barrier ());
4518 label = gen_label_rtx ();
4520 label = gen_rtx_LABEL_REF (DImode, label);
4524 emit_load_locked (mode, before, mem);
4527 x = gen_rtx_AND (mode, gen_rtx_NOT (mode, before), val);
4529 x = gen_rtx_fmt_ee (code, mode, before, val);
4531 emit_insn (gen_rtx_SET (VOIDmode, after, copy_rtx (x)));
4532 emit_insn (gen_rtx_SET (VOIDmode, scratch, x));
4534 emit_store_conditional (mode, cond, mem, scratch);
4536 x = gen_rtx_EQ (DImode, cond, const0_rtx);
4537 emit_unlikely_jump (x, label);
4539 emit_insn (gen_memory_barrier ());
4542 /* Expand a compare and swap operation. */
4545 alpha_split_compare_and_swap (rtx retval, rtx mem, rtx oldval, rtx newval,
4548 enum machine_mode mode = GET_MODE (mem);
4549 rtx label1, label2, x, cond = gen_lowpart (DImode, scratch);
4551 emit_insn (gen_memory_barrier ());
4553 label1 = gen_rtx_LABEL_REF (DImode, gen_label_rtx ());
4554 label2 = gen_rtx_LABEL_REF (DImode, gen_label_rtx ());
4555 emit_label (XEXP (label1, 0));
4557 emit_load_locked (mode, retval, mem);
4559 x = gen_lowpart (DImode, retval);
4560 if (oldval == const0_rtx)
4561 x = gen_rtx_NE (DImode, x, const0_rtx);
4564 x = gen_rtx_EQ (DImode, x, oldval);
4565 emit_insn (gen_rtx_SET (VOIDmode, cond, x));
4566 x = gen_rtx_EQ (DImode, cond, const0_rtx);
4568 emit_unlikely_jump (x, label2);
4570 emit_move_insn (scratch, newval);
4571 emit_store_conditional (mode, cond, mem, scratch);
4573 x = gen_rtx_EQ (DImode, cond, const0_rtx);
4574 emit_unlikely_jump (x, label1);
4576 emit_insn (gen_memory_barrier ());
4577 emit_label (XEXP (label2, 0));
4581 alpha_expand_compare_and_swap_12 (rtx dst, rtx mem, rtx oldval, rtx newval)
4583 enum machine_mode mode = GET_MODE (mem);
4584 rtx addr, align, wdst;
4585 rtx (*fn5) (rtx, rtx, rtx, rtx, rtx);
4587 addr = force_reg (DImode, XEXP (mem, 0));
4588 align = expand_simple_binop (Pmode, AND, addr, GEN_INT (-8),
4589 NULL_RTX, 1, OPTAB_DIRECT);
4591 oldval = convert_modes (DImode, mode, oldval, 1);
4592 newval = emit_insxl (mode, newval, addr);
4594 wdst = gen_reg_rtx (DImode);
4596 fn5 = gen_sync_compare_and_swapqi_1;
4598 fn5 = gen_sync_compare_and_swaphi_1;
4599 emit_insn (fn5 (wdst, addr, oldval, newval, align));
4601 emit_move_insn (dst, gen_lowpart (mode, wdst));
4605 alpha_split_compare_and_swap_12 (enum machine_mode mode, rtx dest, rtx addr,
4606 rtx oldval, rtx newval, rtx align,
4607 rtx scratch, rtx cond)
4609 rtx label1, label2, mem, width, mask, x;
4611 mem = gen_rtx_MEM (DImode, align);
4612 MEM_VOLATILE_P (mem) = 1;
4614 emit_insn (gen_memory_barrier ());
4615 label1 = gen_rtx_LABEL_REF (DImode, gen_label_rtx ());
4616 label2 = gen_rtx_LABEL_REF (DImode, gen_label_rtx ());
4617 emit_label (XEXP (label1, 0));
4619 emit_load_locked (DImode, scratch, mem);
4621 width = GEN_INT (GET_MODE_BITSIZE (mode));
4622 mask = GEN_INT (mode == QImode ? 0xff : 0xffff);
4623 if (WORDS_BIG_ENDIAN)
4624 emit_insn (gen_extxl_be (dest, scratch, width, addr));
4626 emit_insn (gen_extxl_le (dest, scratch, width, addr));
4628 if (oldval == const0_rtx)
4629 x = gen_rtx_NE (DImode, dest, const0_rtx);
4632 x = gen_rtx_EQ (DImode, dest, oldval);
4633 emit_insn (gen_rtx_SET (VOIDmode, cond, x));
4634 x = gen_rtx_EQ (DImode, cond, const0_rtx);
4636 emit_unlikely_jump (x, label2);
4638 if (WORDS_BIG_ENDIAN)
4639 emit_insn (gen_mskxl_be (scratch, scratch, mask, addr));
4641 emit_insn (gen_mskxl_le (scratch, scratch, mask, addr));
4642 emit_insn (gen_iordi3 (scratch, scratch, newval));
4644 emit_store_conditional (DImode, scratch, mem, scratch);
4646 x = gen_rtx_EQ (DImode, scratch, const0_rtx);
4647 emit_unlikely_jump (x, label1);
4649 emit_insn (gen_memory_barrier ());
4650 emit_label (XEXP (label2, 0));
4653 /* Expand an atomic exchange operation. */
4656 alpha_split_lock_test_and_set (rtx retval, rtx mem, rtx val, rtx scratch)
4658 enum machine_mode mode = GET_MODE (mem);
4659 rtx label, x, cond = gen_lowpart (DImode, scratch);
4661 emit_insn (gen_memory_barrier ());
4663 label = gen_rtx_LABEL_REF (DImode, gen_label_rtx ());
4664 emit_label (XEXP (label, 0));
4666 emit_load_locked (mode, retval, mem);
4667 emit_move_insn (scratch, val);
4668 emit_store_conditional (mode, cond, mem, scratch);
4670 x = gen_rtx_EQ (DImode, cond, const0_rtx);
4671 emit_unlikely_jump (x, label);
4675 alpha_expand_lock_test_and_set_12 (rtx dst, rtx mem, rtx val)
4677 enum machine_mode mode = GET_MODE (mem);
4678 rtx addr, align, wdst;
4679 rtx (*fn4) (rtx, rtx, rtx, rtx);
4681 /* Force the address into a register. */
4682 addr = force_reg (DImode, XEXP (mem, 0));
4684 /* Align it to a multiple of 8. */
4685 align = expand_simple_binop (Pmode, AND, addr, GEN_INT (-8),
4686 NULL_RTX, 1, OPTAB_DIRECT);
4688 /* Insert val into the correct byte location within the word. */
4689 val = emit_insxl (mode, val, addr);
4691 wdst = gen_reg_rtx (DImode);
4693 fn4 = gen_sync_lock_test_and_setqi_1;
4695 fn4 = gen_sync_lock_test_and_sethi_1;
4696 emit_insn (fn4 (wdst, addr, val, align));
4698 emit_move_insn (dst, gen_lowpart (mode, wdst));
4702 alpha_split_lock_test_and_set_12 (enum machine_mode mode, rtx dest, rtx addr,
4703 rtx val, rtx align, rtx scratch)
4705 rtx label, mem, width, mask, x;
4707 mem = gen_rtx_MEM (DImode, align);
4708 MEM_VOLATILE_P (mem) = 1;
4710 emit_insn (gen_memory_barrier ());
4711 label = gen_rtx_LABEL_REF (DImode, gen_label_rtx ());
4712 emit_label (XEXP (label, 0));
4714 emit_load_locked (DImode, scratch, mem);
4716 width = GEN_INT (GET_MODE_BITSIZE (mode));
4717 mask = GEN_INT (mode == QImode ? 0xff : 0xffff);
4718 if (WORDS_BIG_ENDIAN)
4720 emit_insn (gen_extxl_be (dest, scratch, width, addr));
4721 emit_insn (gen_mskxl_be (scratch, scratch, mask, addr));
4725 emit_insn (gen_extxl_le (dest, scratch, width, addr));
4726 emit_insn (gen_mskxl_le (scratch, scratch, mask, addr));
4728 emit_insn (gen_iordi3 (scratch, scratch, val));
4730 emit_store_conditional (DImode, scratch, mem, scratch);
4732 x = gen_rtx_EQ (DImode, scratch, const0_rtx);
4733 emit_unlikely_jump (x, label);
4736 /* Adjust the cost of a scheduling dependency. Return the new cost of
4737 a dependency LINK or INSN on DEP_INSN. COST is the current cost. */
4740 alpha_adjust_cost (rtx insn, rtx link, rtx dep_insn, int cost)
4742 enum attr_type insn_type, dep_insn_type;
4744 /* If the dependence is an anti-dependence, there is no cost. For an
4745 output dependence, there is sometimes a cost, but it doesn't seem
4746 worth handling those few cases. */
4747 if (REG_NOTE_KIND (link) != 0)
4750 /* If we can't recognize the insns, we can't really do anything. */
4751 if (recog_memoized (insn) < 0 || recog_memoized (dep_insn) < 0)
4754 insn_type = get_attr_type (insn);
4755 dep_insn_type = get_attr_type (dep_insn);
4757 /* Bring in the user-defined memory latency. */
4758 if (dep_insn_type == TYPE_ILD
4759 || dep_insn_type == TYPE_FLD
4760 || dep_insn_type == TYPE_LDSYM)
4761 cost += alpha_memory_latency-1;
4763 /* Everything else handled in DFA bypasses now. */
4768 /* The number of instructions that can be issued per cycle. */
4771 alpha_issue_rate (void)
4773 return (alpha_tune == PROCESSOR_EV4 ? 2 : 4);
4776 /* How many alternative schedules to try. This should be as wide as the
4777 scheduling freedom in the DFA, but no wider. Making this value too
4778 large results extra work for the scheduler.
4780 For EV4, loads can be issued to either IB0 or IB1, thus we have 2
4781 alternative schedules. For EV5, we can choose between E0/E1 and
4782 FA/FM. For EV6, an arithmetic insn can be issued to U0/U1/L0/L1. */
4785 alpha_multipass_dfa_lookahead (void)
4787 return (alpha_tune == PROCESSOR_EV6 ? 4 : 2);
4790 /* Machine-specific function data. */
4792 struct machine_function GTY(())
4795 /* List of call information words for calls from this function. */
4796 struct rtx_def *first_ciw;
4797 struct rtx_def *last_ciw;
4800 /* List of deferred case vectors. */
4801 struct rtx_def *addr_list;
4804 const char *some_ld_name;
4806 /* For TARGET_LD_BUGGY_LDGP. */
4807 struct rtx_def *gp_save_rtx;
4810 /* How to allocate a 'struct machine_function'. */
4812 static struct machine_function *
4813 alpha_init_machine_status (void)
4815 return ((struct machine_function *)
4816 ggc_alloc_cleared (sizeof (struct machine_function)));
4819 /* Functions to save and restore alpha_return_addr_rtx. */
4821 /* Start the ball rolling with RETURN_ADDR_RTX. */
4824 alpha_return_addr (int count, rtx frame ATTRIBUTE_UNUSED)
4829 return get_hard_reg_initial_val (Pmode, REG_RA);
4832 /* Return or create a memory slot containing the gp value for the current
4833 function. Needed only if TARGET_LD_BUGGY_LDGP. */
4836 alpha_gp_save_rtx (void)
4838 rtx seq, m = cfun->machine->gp_save_rtx;
4844 m = assign_stack_local (DImode, UNITS_PER_WORD, BITS_PER_WORD);
4845 m = validize_mem (m);
4846 emit_move_insn (m, pic_offset_table_rtx);
4850 emit_insn_after (seq, entry_of_function ());
4852 cfun->machine->gp_save_rtx = m;
4859 alpha_ra_ever_killed (void)
4863 if (!has_hard_reg_initial_val (Pmode, REG_RA))
4864 return regs_ever_live[REG_RA];
4866 push_topmost_sequence ();
4868 pop_topmost_sequence ();
4870 return reg_set_between_p (gen_rtx_REG (Pmode, REG_RA), top, NULL_RTX);
4874 /* Return the trap mode suffix applicable to the current
4875 instruction, or NULL. */
4878 get_trap_mode_suffix (void)
4880 enum attr_trap_suffix s = get_attr_trap_suffix (current_output_insn);
4884 case TRAP_SUFFIX_NONE:
4887 case TRAP_SUFFIX_SU:
4888 if (alpha_fptm >= ALPHA_FPTM_SU)
4892 case TRAP_SUFFIX_SUI:
4893 if (alpha_fptm >= ALPHA_FPTM_SUI)
4897 case TRAP_SUFFIX_V_SV:
4905 case ALPHA_FPTM_SUI:
4911 case TRAP_SUFFIX_V_SV_SVI:
4920 case ALPHA_FPTM_SUI:
4927 case TRAP_SUFFIX_U_SU_SUI:
4936 case ALPHA_FPTM_SUI:
4949 /* Return the rounding mode suffix applicable to the current
4950 instruction, or NULL. */
4953 get_round_mode_suffix (void)
4955 enum attr_round_suffix s = get_attr_round_suffix (current_output_insn);
4959 case ROUND_SUFFIX_NONE:
4961 case ROUND_SUFFIX_NORMAL:
4964 case ALPHA_FPRM_NORM:
4966 case ALPHA_FPRM_MINF:
4968 case ALPHA_FPRM_CHOP:
4970 case ALPHA_FPRM_DYN:
4977 case ROUND_SUFFIX_C:
4986 /* Locate some local-dynamic symbol still in use by this function
4987 so that we can print its name in some movdi_er_tlsldm pattern. */
4990 get_some_local_dynamic_name_1 (rtx *px, void *data ATTRIBUTE_UNUSED)
4994 if (GET_CODE (x) == SYMBOL_REF
4995 && SYMBOL_REF_TLS_MODEL (x) == TLS_MODEL_LOCAL_DYNAMIC)
4997 cfun->machine->some_ld_name = XSTR (x, 0);
5005 get_some_local_dynamic_name (void)
5009 if (cfun->machine->some_ld_name)
5010 return cfun->machine->some_ld_name;
5012 for (insn = get_insns (); insn ; insn = NEXT_INSN (insn))
5014 && for_each_rtx (&PATTERN (insn), get_some_local_dynamic_name_1, 0))
5015 return cfun->machine->some_ld_name;
5020 /* Print an operand. Recognize special options, documented below. */
5023 print_operand (FILE *file, rtx x, int code)
5030 /* Print the assembler name of the current function. */
5031 assemble_name (file, alpha_fnname);
5035 assemble_name (file, get_some_local_dynamic_name ());
5040 const char *trap = get_trap_mode_suffix ();
5041 const char *round = get_round_mode_suffix ();
5044 fprintf (file, (TARGET_AS_SLASH_BEFORE_SUFFIX ? "/%s%s" : "%s%s"),
5045 (trap ? trap : ""), (round ? round : ""));
5050 /* Generates single precision instruction suffix. */
5051 fputc ((TARGET_FLOAT_VAX ? 'f' : 's'), file);
5055 /* Generates double precision instruction suffix. */
5056 fputc ((TARGET_FLOAT_VAX ? 'g' : 't'), file);
5060 /* Generates a nop after a noreturn call at the very end of the
5062 if (next_real_insn (current_output_insn) == 0)
5063 fprintf (file, "\n\tnop");
5067 if (alpha_this_literal_sequence_number == 0)
5068 alpha_this_literal_sequence_number = alpha_next_sequence_number++;
5069 fprintf (file, "%d", alpha_this_literal_sequence_number);
5073 if (alpha_this_gpdisp_sequence_number == 0)
5074 alpha_this_gpdisp_sequence_number = alpha_next_sequence_number++;
5075 fprintf (file, "%d", alpha_this_gpdisp_sequence_number);
5079 if (GET_CODE (x) == HIGH)
5080 output_addr_const (file, XEXP (x, 0));
5082 output_operand_lossage ("invalid %%H value");
5089 if (GET_CODE (x) == UNSPEC && XINT (x, 1) == UNSPEC_TLSGD_CALL)
5091 x = XVECEXP (x, 0, 0);
5092 lituse = "lituse_tlsgd";
5094 else if (GET_CODE (x) == UNSPEC && XINT (x, 1) == UNSPEC_TLSLDM_CALL)
5096 x = XVECEXP (x, 0, 0);
5097 lituse = "lituse_tlsldm";
5099 else if (GET_CODE (x) == CONST_INT)
5100 lituse = "lituse_jsr";
5103 output_operand_lossage ("invalid %%J value");
5107 if (x != const0_rtx)
5108 fprintf (file, "\t\t!%s!%d", lituse, (int) INTVAL (x));
5116 #ifdef HAVE_AS_JSRDIRECT_RELOCS
5117 lituse = "lituse_jsrdirect";
5119 lituse = "lituse_jsr";
5122 gcc_assert (INTVAL (x) != 0);
5123 fprintf (file, "\t\t!%s!%d", lituse, (int) INTVAL (x));
5127 /* If this operand is the constant zero, write it as "$31". */
5128 if (GET_CODE (x) == REG)
5129 fprintf (file, "%s", reg_names[REGNO (x)]);
5130 else if (x == CONST0_RTX (GET_MODE (x)))
5131 fprintf (file, "$31");
5133 output_operand_lossage ("invalid %%r value");
5137 /* Similar, but for floating-point. */
5138 if (GET_CODE (x) == REG)
5139 fprintf (file, "%s", reg_names[REGNO (x)]);
5140 else if (x == CONST0_RTX (GET_MODE (x)))
5141 fprintf (file, "$f31");
5143 output_operand_lossage ("invalid %%R value");
5147 /* Write the 1's complement of a constant. */
5148 if (GET_CODE (x) != CONST_INT)
5149 output_operand_lossage ("invalid %%N value");
5151 fprintf (file, HOST_WIDE_INT_PRINT_DEC, ~ INTVAL (x));
5155 /* Write 1 << C, for a constant C. */
5156 if (GET_CODE (x) != CONST_INT)
5157 output_operand_lossage ("invalid %%P value");
5159 fprintf (file, HOST_WIDE_INT_PRINT_DEC, (HOST_WIDE_INT) 1 << INTVAL (x));
5163 /* Write the high-order 16 bits of a constant, sign-extended. */
5164 if (GET_CODE (x) != CONST_INT)
5165 output_operand_lossage ("invalid %%h value");
5167 fprintf (file, HOST_WIDE_INT_PRINT_DEC, INTVAL (x) >> 16);
5171 /* Write the low-order 16 bits of a constant, sign-extended. */
5172 if (GET_CODE (x) != CONST_INT)
5173 output_operand_lossage ("invalid %%L value");
5175 fprintf (file, HOST_WIDE_INT_PRINT_DEC,
5176 (INTVAL (x) & 0xffff) - 2 * (INTVAL (x) & 0x8000));
5180 /* Write mask for ZAP insn. */
5181 if (GET_CODE (x) == CONST_DOUBLE)
5183 HOST_WIDE_INT mask = 0;
5184 HOST_WIDE_INT value;
5186 value = CONST_DOUBLE_LOW (x);
5187 for (i = 0; i < HOST_BITS_PER_WIDE_INT / HOST_BITS_PER_CHAR;
5192 value = CONST_DOUBLE_HIGH (x);
5193 for (i = 0; i < HOST_BITS_PER_WIDE_INT / HOST_BITS_PER_CHAR;
5196 mask |= (1 << (i + sizeof (int)));
5198 fprintf (file, HOST_WIDE_INT_PRINT_DEC, mask & 0xff);
5201 else if (GET_CODE (x) == CONST_INT)
5203 HOST_WIDE_INT mask = 0, value = INTVAL (x);
5205 for (i = 0; i < 8; i++, value >>= 8)
5209 fprintf (file, HOST_WIDE_INT_PRINT_DEC, mask);
5212 output_operand_lossage ("invalid %%m value");
5216 /* 'b', 'w', 'l', or 'q' as the value of the constant. */
5217 if (GET_CODE (x) != CONST_INT
5218 || (INTVAL (x) != 8 && INTVAL (x) != 16
5219 && INTVAL (x) != 32 && INTVAL (x) != 64))
5220 output_operand_lossage ("invalid %%M value");
5222 fprintf (file, "%s",
5223 (INTVAL (x) == 8 ? "b"
5224 : INTVAL (x) == 16 ? "w"
5225 : INTVAL (x) == 32 ? "l"
5230 /* Similar, except do it from the mask. */
5231 if (GET_CODE (x) == CONST_INT)
5233 HOST_WIDE_INT value = INTVAL (x);
5240 if (value == 0xffff)
5245 if (value == 0xffffffff)
5256 else if (HOST_BITS_PER_WIDE_INT == 32
5257 && GET_CODE (x) == CONST_DOUBLE
5258 && CONST_DOUBLE_LOW (x) == 0xffffffff
5259 && CONST_DOUBLE_HIGH (x) == 0)
5264 output_operand_lossage ("invalid %%U value");
5268 /* Write the constant value divided by 8 for little-endian mode or
5269 (56 - value) / 8 for big-endian mode. */
5271 if (GET_CODE (x) != CONST_INT
5272 || (unsigned HOST_WIDE_INT) INTVAL (x) >= (WORDS_BIG_ENDIAN
5275 || (INTVAL (x) & 7) != 0)
5276 output_operand_lossage ("invalid %%s value");
5278 fprintf (file, HOST_WIDE_INT_PRINT_DEC,
5280 ? (56 - INTVAL (x)) / 8
5285 /* Same, except compute (64 - c) / 8 */
5287 if (GET_CODE (x) != CONST_INT
5288 && (unsigned HOST_WIDE_INT) INTVAL (x) >= 64
5289 && (INTVAL (x) & 7) != 8)
5290 output_operand_lossage ("invalid %%s value");
5292 fprintf (file, HOST_WIDE_INT_PRINT_DEC, (64 - INTVAL (x)) / 8);
5297 /* On Unicos/Mk systems: use a DEX expression if the symbol
5298 clashes with a register name. */
5299 int dex = unicosmk_need_dex (x);
5301 fprintf (file, "DEX(%d)", dex);
5303 output_addr_const (file, x);
5307 case 'C': case 'D': case 'c': case 'd':
5308 /* Write out comparison name. */
5310 enum rtx_code c = GET_CODE (x);
5312 if (!COMPARISON_P (x))
5313 output_operand_lossage ("invalid %%C value");
5315 else if (code == 'D')
5316 c = reverse_condition (c);
5317 else if (code == 'c')
5318 c = swap_condition (c);
5319 else if (code == 'd')
5320 c = swap_condition (reverse_condition (c));
5323 fprintf (file, "ule");
5325 fprintf (file, "ult");
5326 else if (c == UNORDERED)
5327 fprintf (file, "un");
5329 fprintf (file, "%s", GET_RTX_NAME (c));
5334 /* Write the divide or modulus operator. */
5335 switch (GET_CODE (x))
5338 fprintf (file, "div%s", GET_MODE (x) == SImode ? "l" : "q");
5341 fprintf (file, "div%su", GET_MODE (x) == SImode ? "l" : "q");
5344 fprintf (file, "rem%s", GET_MODE (x) == SImode ? "l" : "q");
5347 fprintf (file, "rem%su", GET_MODE (x) == SImode ? "l" : "q");
5350 output_operand_lossage ("invalid %%E value");
5356 /* Write "_u" for unaligned access. */
5357 if (GET_CODE (x) == MEM && GET_CODE (XEXP (x, 0)) == AND)
5358 fprintf (file, "_u");
5362 if (GET_CODE (x) == REG)
5363 fprintf (file, "%s", reg_names[REGNO (x)]);
5364 else if (GET_CODE (x) == MEM)
5365 output_address (XEXP (x, 0));
5366 else if (GET_CODE (x) == CONST && GET_CODE (XEXP (x, 0)) == UNSPEC)
5368 switch (XINT (XEXP (x, 0), 1))
5372 output_addr_const (file, XVECEXP (XEXP (x, 0), 0, 0));
5375 output_operand_lossage ("unknown relocation unspec");
5380 output_addr_const (file, x);
5384 output_operand_lossage ("invalid %%xn code");
5389 print_operand_address (FILE *file, rtx addr)
5392 HOST_WIDE_INT offset = 0;
5394 if (GET_CODE (addr) == AND)
5395 addr = XEXP (addr, 0);
5397 if (GET_CODE (addr) == PLUS
5398 && GET_CODE (XEXP (addr, 1)) == CONST_INT)
5400 offset = INTVAL (XEXP (addr, 1));
5401 addr = XEXP (addr, 0);
5404 if (GET_CODE (addr) == LO_SUM)
5406 const char *reloc16, *reloclo;
5407 rtx op1 = XEXP (addr, 1);
5409 if (GET_CODE (op1) == CONST && GET_CODE (XEXP (op1, 0)) == UNSPEC)
5411 op1 = XEXP (op1, 0);
5412 switch (XINT (op1, 1))
5416 reloclo = (alpha_tls_size == 16 ? "dtprel" : "dtprello");
5420 reloclo = (alpha_tls_size == 16 ? "tprel" : "tprello");
5423 output_operand_lossage ("unknown relocation unspec");
5427 output_addr_const (file, XVECEXP (op1, 0, 0));
5432 reloclo = "gprellow";
5433 output_addr_const (file, op1);
5437 fprintf (file, "+" HOST_WIDE_INT_PRINT_DEC, offset);
5439 addr = XEXP (addr, 0);
5440 switch (GET_CODE (addr))
5443 basereg = REGNO (addr);
5447 basereg = subreg_regno (addr);
5454 fprintf (file, "($%d)\t\t!%s", basereg,
5455 (basereg == 29 ? reloc16 : reloclo));
5459 switch (GET_CODE (addr))
5462 basereg = REGNO (addr);
5466 basereg = subreg_regno (addr);
5470 offset = INTVAL (addr);
5473 #if TARGET_ABI_OPEN_VMS
5475 fprintf (file, "%s", XSTR (addr, 0));
5479 gcc_assert (GET_CODE (XEXP (addr, 0)) == PLUS
5480 && GET_CODE (XEXP (XEXP (addr, 0), 0)) == SYMBOL_REF);
5481 fprintf (file, "%s+" HOST_WIDE_INT_PRINT_DEC,
5482 XSTR (XEXP (XEXP (addr, 0), 0), 0),
5483 INTVAL (XEXP (XEXP (addr, 0), 1)));
5491 fprintf (file, HOST_WIDE_INT_PRINT_DEC "($%d)", offset, basereg);
5494 /* Emit RTL insns to initialize the variable parts of a trampoline at
5495 TRAMP. FNADDR is an RTX for the address of the function's pure
5496 code. CXT is an RTX for the static chain value for the function.
5498 The three offset parameters are for the individual template's
5499 layout. A JMPOFS < 0 indicates that the trampoline does not
5500 contain instructions at all.
5502 We assume here that a function will be called many more times than
5503 its address is taken (e.g., it might be passed to qsort), so we
5504 take the trouble to initialize the "hint" field in the JMP insn.
5505 Note that the hint field is PC (new) + 4 * bits 13:0. */
5508 alpha_initialize_trampoline (rtx tramp, rtx fnaddr, rtx cxt,
5509 int fnofs, int cxtofs, int jmpofs)
5511 rtx temp, temp1, addr;
5512 /* VMS really uses DImode pointers in memory at this point. */
5513 enum machine_mode mode = TARGET_ABI_OPEN_VMS ? Pmode : ptr_mode;
5515 #ifdef POINTERS_EXTEND_UNSIGNED
5516 fnaddr = convert_memory_address (mode, fnaddr);
5517 cxt = convert_memory_address (mode, cxt);
5520 /* Store function address and CXT. */
5521 addr = memory_address (mode, plus_constant (tramp, fnofs));
5522 emit_move_insn (gen_rtx_MEM (mode, addr), fnaddr);
5523 addr = memory_address (mode, plus_constant (tramp, cxtofs));
5524 emit_move_insn (gen_rtx_MEM (mode, addr), cxt);
5526 /* This has been disabled since the hint only has a 32k range, and in
5527 no existing OS is the stack within 32k of the text segment. */
5528 if (0 && jmpofs >= 0)
5530 /* Compute hint value. */
5531 temp = force_operand (plus_constant (tramp, jmpofs+4), NULL_RTX);
5532 temp = expand_binop (DImode, sub_optab, fnaddr, temp, temp, 1,
5534 temp = expand_shift (RSHIFT_EXPR, Pmode, temp,
5535 build_int_cst (NULL_TREE, 2), NULL_RTX, 1);
5536 temp = expand_and (SImode, gen_lowpart (SImode, temp),
5537 GEN_INT (0x3fff), 0);
5539 /* Merge in the hint. */
5540 addr = memory_address (SImode, plus_constant (tramp, jmpofs));
5541 temp1 = force_reg (SImode, gen_rtx_MEM (SImode, addr));
5542 temp1 = expand_and (SImode, temp1, GEN_INT (0xffffc000), NULL_RTX);
5543 temp1 = expand_binop (SImode, ior_optab, temp1, temp, temp1, 1,
5545 emit_move_insn (gen_rtx_MEM (SImode, addr), temp1);
5548 #ifdef ENABLE_EXECUTE_STACK
5549 emit_library_call (init_one_libfunc ("__enable_execute_stack"),
5550 0, VOIDmode, 1, tramp, Pmode);
5554 emit_insn (gen_imb ());
5557 /* Determine where to put an argument to a function.
5558 Value is zero to push the argument on the stack,
5559 or a hard register in which to store the argument.
5561 MODE is the argument's machine mode.
5562 TYPE is the data type of the argument (as a tree).
5563 This is null for libcalls where that information may
5565 CUM is a variable of type CUMULATIVE_ARGS which gives info about
5566 the preceding args and about the function being called.
5567 NAMED is nonzero if this argument is a named parameter
5568 (otherwise it is an extra parameter matching an ellipsis).
5570 On Alpha the first 6 words of args are normally in registers
5571 and the rest are pushed. */
5574 function_arg (CUMULATIVE_ARGS cum, enum machine_mode mode, tree type,
5575 int named ATTRIBUTE_UNUSED)
5580 /* Don't get confused and pass small structures in FP registers. */
5581 if (type && AGGREGATE_TYPE_P (type))
5585 #ifdef ENABLE_CHECKING
5586 /* With alpha_split_complex_arg, we shouldn't see any raw complex
5588 gcc_assert (!COMPLEX_MODE_P (mode));
5591 /* Set up defaults for FP operands passed in FP registers, and
5592 integral operands passed in integer registers. */
5593 if (TARGET_FPREGS && GET_MODE_CLASS (mode) == MODE_FLOAT)
5599 /* ??? Irritatingly, the definition of CUMULATIVE_ARGS is different for
5600 the three platforms, so we can't avoid conditional compilation. */
5601 #if TARGET_ABI_OPEN_VMS
5603 if (mode == VOIDmode)
5604 return alpha_arg_info_reg_val (cum);
5606 num_args = cum.num_args;
5608 || targetm.calls.must_pass_in_stack (mode, type))
5611 #elif TARGET_ABI_UNICOSMK
5615 /* If this is the last argument, generate the call info word (CIW). */
5616 /* ??? We don't include the caller's line number in the CIW because
5617 I don't know how to determine it if debug infos are turned off. */
5618 if (mode == VOIDmode)
5627 for (i = 0; i < cum.num_reg_words && i < 5; i++)
5628 if (cum.reg_args_type[i])
5629 lo |= (1 << (7 - i));
5631 if (cum.num_reg_words == 6 && cum.reg_args_type[5])
5634 lo |= cum.num_reg_words;
5636 #if HOST_BITS_PER_WIDE_INT == 32
5637 hi = (cum.num_args << 20) | cum.num_arg_words;
5639 lo = lo | ((HOST_WIDE_INT) cum.num_args << 52)
5640 | ((HOST_WIDE_INT) cum.num_arg_words << 32);
5643 ciw = immed_double_const (lo, hi, DImode);
5645 return gen_rtx_UNSPEC (DImode, gen_rtvec (1, ciw),
5646 UNSPEC_UMK_LOAD_CIW);
5649 size = ALPHA_ARG_SIZE (mode, type, named);
5650 num_args = cum.num_reg_words;
5652 || cum.num_reg_words + size > 6
5653 || targetm.calls.must_pass_in_stack (mode, type))
5655 else if (type && TYPE_MODE (type) == BLKmode)
5659 reg1 = gen_rtx_REG (DImode, num_args + 16);
5660 reg1 = gen_rtx_EXPR_LIST (DImode, reg1, const0_rtx);
5662 /* The argument fits in two registers. Note that we still need to
5663 reserve a register for empty structures. */
5667 return gen_rtx_PARALLEL (mode, gen_rtvec (1, reg1));
5670 reg2 = gen_rtx_REG (DImode, num_args + 17);
5671 reg2 = gen_rtx_EXPR_LIST (DImode, reg2, GEN_INT (8));
5672 return gen_rtx_PARALLEL (mode, gen_rtvec (2, reg1, reg2));
5676 #elif TARGET_ABI_OSF
5682 /* VOID is passed as a special flag for "last argument". */
5683 if (type == void_type_node)
5685 else if (targetm.calls.must_pass_in_stack (mode, type))
5689 #error Unhandled ABI
5692 return gen_rtx_REG (mode, num_args + basereg);
5696 alpha_arg_partial_bytes (CUMULATIVE_ARGS *cum ATTRIBUTE_UNUSED,
5697 enum machine_mode mode ATTRIBUTE_UNUSED,
5698 tree type ATTRIBUTE_UNUSED,
5699 bool named ATTRIBUTE_UNUSED)
5703 #if TARGET_ABI_OPEN_VMS
5704 if (cum->num_args < 6
5705 && 6 < cum->num_args + ALPHA_ARG_SIZE (mode, type, named))
5706 words = 6 - cum->num_args;
5707 #elif TARGET_ABI_UNICOSMK
5708 /* Never any split arguments. */
5709 #elif TARGET_ABI_OSF
5710 if (*cum < 6 && 6 < *cum + ALPHA_ARG_SIZE (mode, type, named))
5713 #error Unhandled ABI
5716 return words * UNITS_PER_WORD;
5720 /* Return true if TYPE must be returned in memory, instead of in registers. */
5723 alpha_return_in_memory (tree type, tree fndecl ATTRIBUTE_UNUSED)
5725 enum machine_mode mode = VOIDmode;
5730 mode = TYPE_MODE (type);
5732 /* All aggregates are returned in memory. */
5733 if (AGGREGATE_TYPE_P (type))
5737 size = GET_MODE_SIZE (mode);
5738 switch (GET_MODE_CLASS (mode))
5740 case MODE_VECTOR_FLOAT:
5741 /* Pass all float vectors in memory, like an aggregate. */
5744 case MODE_COMPLEX_FLOAT:
5745 /* We judge complex floats on the size of their element,
5746 not the size of the whole type. */
5747 size = GET_MODE_UNIT_SIZE (mode);
5752 case MODE_COMPLEX_INT:
5753 case MODE_VECTOR_INT:
5757 /* ??? We get called on all sorts of random stuff from
5758 aggregate_value_p. We must return something, but it's not
5759 clear what's safe to return. Pretend it's a struct I
5764 /* Otherwise types must fit in one register. */
5765 return size > UNITS_PER_WORD;
5768 /* Return true if TYPE should be passed by invisible reference. */
5771 alpha_pass_by_reference (CUMULATIVE_ARGS *ca ATTRIBUTE_UNUSED,
5772 enum machine_mode mode,
5773 tree type ATTRIBUTE_UNUSED,
5774 bool named ATTRIBUTE_UNUSED)
5776 return mode == TFmode || mode == TCmode;
5779 /* Define how to find the value returned by a function. VALTYPE is the
5780 data type of the value (as a tree). If the precise function being
5781 called is known, FUNC is its FUNCTION_DECL; otherwise, FUNC is 0.
5782 MODE is set instead of VALTYPE for libcalls.
5784 On Alpha the value is found in $0 for integer functions and
5785 $f0 for floating-point functions. */
5788 function_value (tree valtype, tree func ATTRIBUTE_UNUSED,
5789 enum machine_mode mode)
5791 unsigned int regnum, dummy;
5792 enum mode_class class;
5794 gcc_assert (!valtype || !alpha_return_in_memory (valtype, func));
5797 mode = TYPE_MODE (valtype);
5799 class = GET_MODE_CLASS (mode);
5803 PROMOTE_MODE (mode, dummy, valtype);
5806 case MODE_COMPLEX_INT:
5807 case MODE_VECTOR_INT:
5815 case MODE_COMPLEX_FLOAT:
5817 enum machine_mode cmode = GET_MODE_INNER (mode);
5819 return gen_rtx_PARALLEL
5822 gen_rtx_EXPR_LIST (VOIDmode, gen_rtx_REG (cmode, 32),
5824 gen_rtx_EXPR_LIST (VOIDmode, gen_rtx_REG (cmode, 33),
5825 GEN_INT (GET_MODE_SIZE (cmode)))));
5832 return gen_rtx_REG (mode, regnum);
5835 /* TCmode complex values are passed by invisible reference. We
5836 should not split these values. */
5839 alpha_split_complex_arg (tree type)
5841 return TYPE_MODE (type) != TCmode;
5845 alpha_build_builtin_va_list (void)
5847 tree base, ofs, space, record, type_decl;
5849 if (TARGET_ABI_OPEN_VMS || TARGET_ABI_UNICOSMK)
5850 return ptr_type_node;
5852 record = (*lang_hooks.types.make_type) (RECORD_TYPE);
5853 type_decl = build_decl (TYPE_DECL, get_identifier ("__va_list_tag"), record);
5854 TREE_CHAIN (record) = type_decl;
5855 TYPE_NAME (record) = type_decl;
5857 /* C++? SET_IS_AGGR_TYPE (record, 1); */
5859 /* Dummy field to prevent alignment warnings. */
5860 space = build_decl (FIELD_DECL, NULL_TREE, integer_type_node);
5861 DECL_FIELD_CONTEXT (space) = record;
5862 DECL_ARTIFICIAL (space) = 1;
5863 DECL_IGNORED_P (space) = 1;
5865 ofs = build_decl (FIELD_DECL, get_identifier ("__offset"),
5867 DECL_FIELD_CONTEXT (ofs) = record;
5868 TREE_CHAIN (ofs) = space;
5870 base = build_decl (FIELD_DECL, get_identifier ("__base"),
5872 DECL_FIELD_CONTEXT (base) = record;
5873 TREE_CHAIN (base) = ofs;
5875 TYPE_FIELDS (record) = base;
5876 layout_type (record);
5878 va_list_gpr_counter_field = ofs;
5883 /* Helper function for alpha_stdarg_optimize_hook. Skip over casts
5884 and constant additions. */
5887 va_list_skip_additions (tree lhs)
5891 if (TREE_CODE (lhs) != SSA_NAME)
5896 stmt = SSA_NAME_DEF_STMT (lhs);
5898 if (TREE_CODE (stmt) == PHI_NODE)
5901 if (TREE_CODE (stmt) != MODIFY_EXPR
5902 || TREE_OPERAND (stmt, 0) != lhs)
5905 rhs = TREE_OPERAND (stmt, 1);
5906 if (TREE_CODE (rhs) == WITH_SIZE_EXPR)
5907 rhs = TREE_OPERAND (rhs, 0);
5909 if ((TREE_CODE (rhs) != NOP_EXPR
5910 && TREE_CODE (rhs) != CONVERT_EXPR
5911 && (TREE_CODE (rhs) != PLUS_EXPR
5912 || TREE_CODE (TREE_OPERAND (rhs, 1)) != INTEGER_CST
5913 || !host_integerp (TREE_OPERAND (rhs, 1), 1)))
5914 || TREE_CODE (TREE_OPERAND (rhs, 0)) != SSA_NAME)
5917 lhs = TREE_OPERAND (rhs, 0);
5921 /* Check if LHS = RHS statement is
5922 LHS = *(ap.__base + ap.__offset + cst)
5925 + ((ap.__offset + cst <= 47)
5926 ? ap.__offset + cst - 48 : ap.__offset + cst) + cst2).
5927 If the former, indicate that GPR registers are needed,
5928 if the latter, indicate that FPR registers are needed.
5929 On alpha, cfun->va_list_gpr_size is used as size of the needed
5930 regs and cfun->va_list_fpr_size is a bitmask, bit 0 set if
5931 GPR registers are needed and bit 1 set if FPR registers are needed.
5932 Return true if va_list references should not be scanned for the current
5936 alpha_stdarg_optimize_hook (struct stdarg_info *si, tree lhs, tree rhs)
5938 tree base, offset, arg1, arg2;
5941 if (TREE_CODE (rhs) != INDIRECT_REF
5942 || TREE_CODE (TREE_OPERAND (rhs, 0)) != SSA_NAME)
5945 lhs = va_list_skip_additions (TREE_OPERAND (rhs, 0));
5946 if (lhs == NULL_TREE
5947 || TREE_CODE (lhs) != PLUS_EXPR)
5950 base = TREE_OPERAND (lhs, 0);
5951 if (TREE_CODE (base) == SSA_NAME)
5952 base = va_list_skip_additions (base);
5954 if (TREE_CODE (base) != COMPONENT_REF
5955 || TREE_OPERAND (base, 1) != TYPE_FIELDS (va_list_type_node))
5957 base = TREE_OPERAND (lhs, 0);
5958 if (TREE_CODE (base) == SSA_NAME)
5959 base = va_list_skip_additions (base);
5961 if (TREE_CODE (base) != COMPONENT_REF
5962 || TREE_OPERAND (base, 1) != TYPE_FIELDS (va_list_type_node))
5968 base = get_base_address (base);
5969 if (TREE_CODE (base) != VAR_DECL
5970 || !bitmap_bit_p (si->va_list_vars, DECL_UID (base)))
5973 offset = TREE_OPERAND (lhs, offset_arg);
5974 if (TREE_CODE (offset) == SSA_NAME)
5975 offset = va_list_skip_additions (offset);
5977 if (TREE_CODE (offset) == PHI_NODE)
5981 if (PHI_NUM_ARGS (offset) != 2)
5984 arg1 = va_list_skip_additions (PHI_ARG_DEF (offset, 0));
5985 arg2 = va_list_skip_additions (PHI_ARG_DEF (offset, 1));
5986 if (TREE_CODE (arg2) != MINUS_EXPR && TREE_CODE (arg2) != PLUS_EXPR)
5992 if (TREE_CODE (arg2) != MINUS_EXPR && TREE_CODE (arg2) != PLUS_EXPR)
5995 if (!host_integerp (TREE_OPERAND (arg2, 1), 0))
5998 sub = tree_low_cst (TREE_OPERAND (arg2, 1), 0);
5999 if (TREE_CODE (arg2) == MINUS_EXPR)
6001 if (sub < -48 || sub > -32)
6004 arg2 = va_list_skip_additions (TREE_OPERAND (arg2, 0));
6008 if (TREE_CODE (arg1) == SSA_NAME)
6009 arg1 = va_list_skip_additions (arg1);
6011 if (TREE_CODE (arg1) != COMPONENT_REF
6012 || TREE_OPERAND (arg1, 1) != va_list_gpr_counter_field
6013 || get_base_address (arg1) != base)
6016 /* Need floating point regs. */
6017 cfun->va_list_fpr_size |= 2;
6019 else if (TREE_CODE (offset) != COMPONENT_REF
6020 || TREE_OPERAND (offset, 1) != va_list_gpr_counter_field
6021 || get_base_address (offset) != base)
6024 /* Need general regs. */
6025 cfun->va_list_fpr_size |= 1;
6029 si->va_list_escapes = true;
6034 /* Perform any needed actions needed for a function that is receiving a
6035 variable number of arguments. */
6038 alpha_setup_incoming_varargs (CUMULATIVE_ARGS *pcum, enum machine_mode mode,
6039 tree type, int *pretend_size, int no_rtl)
6041 CUMULATIVE_ARGS cum = *pcum;
6043 /* Skip the current argument. */
6044 FUNCTION_ARG_ADVANCE (cum, mode, type, 1);
6046 #if TARGET_ABI_UNICOSMK
6047 /* On Unicos/Mk, the standard subroutine __T3E_MISMATCH stores all register
6048 arguments on the stack. Unfortunately, it doesn't always store the first
6049 one (i.e. the one that arrives in $16 or $f16). This is not a problem
6050 with stdargs as we always have at least one named argument there. */
6051 if (cum.num_reg_words < 6)
6055 emit_insn (gen_umk_mismatch_args (GEN_INT (cum.num_reg_words)));
6056 emit_insn (gen_arg_home_umk ());
6060 #elif TARGET_ABI_OPEN_VMS
6061 /* For VMS, we allocate space for all 6 arg registers plus a count.
6063 However, if NO registers need to be saved, don't allocate any space.
6064 This is not only because we won't need the space, but because AP
6065 includes the current_pretend_args_size and we don't want to mess up
6066 any ap-relative addresses already made. */
6067 if (cum.num_args < 6)
6071 emit_move_insn (gen_rtx_REG (DImode, 1), virtual_incoming_args_rtx);
6072 emit_insn (gen_arg_home ());
6074 *pretend_size = 7 * UNITS_PER_WORD;
6077 /* On OSF/1 and friends, we allocate space for all 12 arg registers, but
6078 only push those that are remaining. However, if NO registers need to
6079 be saved, don't allocate any space. This is not only because we won't
6080 need the space, but because AP includes the current_pretend_args_size
6081 and we don't want to mess up any ap-relative addresses already made.
6083 If we are not to use the floating-point registers, save the integer
6084 registers where we would put the floating-point registers. This is
6085 not the most efficient way to implement varargs with just one register
6086 class, but it isn't worth doing anything more efficient in this rare
6093 int count, set = get_varargs_alias_set ();
6096 count = cfun->va_list_gpr_size / UNITS_PER_WORD;
6097 if (count > 6 - cum)
6100 /* Detect whether integer registers or floating-point registers
6101 are needed by the detected va_arg statements. See above for
6102 how these values are computed. Note that the "escape" value
6103 is VA_LIST_MAX_FPR_SIZE, which is 255, which has both of
6105 gcc_assert ((VA_LIST_MAX_FPR_SIZE & 3) == 3);
6107 if (cfun->va_list_fpr_size & 1)
6109 tmp = gen_rtx_MEM (BLKmode,
6110 plus_constant (virtual_incoming_args_rtx,
6111 (cum + 6) * UNITS_PER_WORD));
6112 MEM_NOTRAP_P (tmp) = 1;
6113 set_mem_alias_set (tmp, set);
6114 move_block_from_reg (16 + cum, tmp, count);
6117 if (cfun->va_list_fpr_size & 2)
6119 tmp = gen_rtx_MEM (BLKmode,
6120 plus_constant (virtual_incoming_args_rtx,
6121 cum * UNITS_PER_WORD));
6122 MEM_NOTRAP_P (tmp) = 1;
6123 set_mem_alias_set (tmp, set);
6124 move_block_from_reg (16 + cum + TARGET_FPREGS*32, tmp, count);
6127 *pretend_size = 12 * UNITS_PER_WORD;
6132 alpha_va_start (tree valist, rtx nextarg ATTRIBUTE_UNUSED)
6134 HOST_WIDE_INT offset;
6135 tree t, offset_field, base_field;
6137 if (TREE_CODE (TREE_TYPE (valist)) == ERROR_MARK)
6140 if (TARGET_ABI_UNICOSMK)
6141 std_expand_builtin_va_start (valist, nextarg);
6143 /* For Unix, TARGET_SETUP_INCOMING_VARARGS moves the starting address base
6144 up by 48, storing fp arg registers in the first 48 bytes, and the
6145 integer arg registers in the next 48 bytes. This is only done,
6146 however, if any integer registers need to be stored.
6148 If no integer registers need be stored, then we must subtract 48
6149 in order to account for the integer arg registers which are counted
6150 in argsize above, but which are not actually stored on the stack.
6151 Must further be careful here about structures straddling the last
6152 integer argument register; that futzes with pretend_args_size,
6153 which changes the meaning of AP. */
6156 offset = TARGET_ABI_OPEN_VMS ? UNITS_PER_WORD : 6 * UNITS_PER_WORD;
6158 offset = -6 * UNITS_PER_WORD + current_function_pretend_args_size;
6160 if (TARGET_ABI_OPEN_VMS)
6162 nextarg = plus_constant (nextarg, offset);
6163 nextarg = plus_constant (nextarg, NUM_ARGS * UNITS_PER_WORD);
6164 t = build2 (MODIFY_EXPR, TREE_TYPE (valist), valist,
6165 make_tree (ptr_type_node, nextarg));
6166 TREE_SIDE_EFFECTS (t) = 1;
6168 expand_expr (t, const0_rtx, VOIDmode, EXPAND_NORMAL);
6172 base_field = TYPE_FIELDS (TREE_TYPE (valist));
6173 offset_field = TREE_CHAIN (base_field);
6175 base_field = build3 (COMPONENT_REF, TREE_TYPE (base_field),
6176 valist, base_field, NULL_TREE);
6177 offset_field = build3 (COMPONENT_REF, TREE_TYPE (offset_field),
6178 valist, offset_field, NULL_TREE);
6180 t = make_tree (ptr_type_node, virtual_incoming_args_rtx);
6181 t = build2 (PLUS_EXPR, ptr_type_node, t,
6182 build_int_cst (NULL_TREE, offset));
6183 t = build2 (MODIFY_EXPR, TREE_TYPE (base_field), base_field, t);
6184 TREE_SIDE_EFFECTS (t) = 1;
6185 expand_expr (t, const0_rtx, VOIDmode, EXPAND_NORMAL);
6187 t = build_int_cst (NULL_TREE, NUM_ARGS * UNITS_PER_WORD);
6188 t = build2 (MODIFY_EXPR, TREE_TYPE (offset_field), offset_field, t);
6189 TREE_SIDE_EFFECTS (t) = 1;
6190 expand_expr (t, const0_rtx, VOIDmode, EXPAND_NORMAL);
6195 alpha_gimplify_va_arg_1 (tree type, tree base, tree offset, tree *pre_p)
6197 tree type_size, ptr_type, addend, t, addr, internal_post;
6199 /* If the type could not be passed in registers, skip the block
6200 reserved for the registers. */
6201 if (targetm.calls.must_pass_in_stack (TYPE_MODE (type), type))
6203 t = build_int_cst (TREE_TYPE (offset), 6*8);
6204 t = build2 (MODIFY_EXPR, TREE_TYPE (offset), offset,
6205 build2 (MAX_EXPR, TREE_TYPE (offset), offset, t));
6206 gimplify_and_add (t, pre_p);
6210 ptr_type = build_pointer_type (type);
6212 if (TREE_CODE (type) == COMPLEX_TYPE)
6214 tree real_part, imag_part, real_temp;
6216 real_part = alpha_gimplify_va_arg_1 (TREE_TYPE (type), base,
6219 /* Copy the value into a new temporary, lest the formal temporary
6220 be reused out from under us. */
6221 real_temp = get_initialized_tmp_var (real_part, pre_p, NULL);
6223 imag_part = alpha_gimplify_va_arg_1 (TREE_TYPE (type), base,
6226 return build2 (COMPLEX_EXPR, type, real_temp, imag_part);
6228 else if (TREE_CODE (type) == REAL_TYPE)
6230 tree fpaddend, cond, fourtyeight;
6232 fourtyeight = build_int_cst (TREE_TYPE (addend), 6*8);
6233 fpaddend = fold_build2 (MINUS_EXPR, TREE_TYPE (addend),
6234 addend, fourtyeight);
6235 cond = fold_build2 (LT_EXPR, boolean_type_node, addend, fourtyeight);
6236 addend = fold_build3 (COND_EXPR, TREE_TYPE (addend), cond,
6240 /* Build the final address and force that value into a temporary. */
6241 addr = build2 (PLUS_EXPR, ptr_type, fold_convert (ptr_type, base),
6242 fold_convert (ptr_type, addend));
6243 internal_post = NULL;
6244 gimplify_expr (&addr, pre_p, &internal_post, is_gimple_val, fb_rvalue);
6245 append_to_statement_list (internal_post, pre_p);
6247 /* Update the offset field. */
6248 type_size = TYPE_SIZE_UNIT (TYPE_MAIN_VARIANT (type));
6249 if (type_size == NULL || TREE_OVERFLOW (type_size))
6253 t = size_binop (PLUS_EXPR, type_size, size_int (7));
6254 t = size_binop (TRUNC_DIV_EXPR, t, size_int (8));
6255 t = size_binop (MULT_EXPR, t, size_int (8));
6257 t = fold_convert (TREE_TYPE (offset), t);
6258 t = build2 (MODIFY_EXPR, void_type_node, offset,
6259 build2 (PLUS_EXPR, TREE_TYPE (offset), offset, t));
6260 gimplify_and_add (t, pre_p);
6262 return build_va_arg_indirect_ref (addr);
6266 alpha_gimplify_va_arg (tree valist, tree type, tree *pre_p, tree *post_p)
6268 tree offset_field, base_field, offset, base, t, r;
6271 if (TARGET_ABI_OPEN_VMS || TARGET_ABI_UNICOSMK)
6272 return std_gimplify_va_arg_expr (valist, type, pre_p, post_p);
6274 base_field = TYPE_FIELDS (va_list_type_node);
6275 offset_field = TREE_CHAIN (base_field);
6276 base_field = build3 (COMPONENT_REF, TREE_TYPE (base_field),
6277 valist, base_field, NULL_TREE);
6278 offset_field = build3 (COMPONENT_REF, TREE_TYPE (offset_field),
6279 valist, offset_field, NULL_TREE);
6281 /* Pull the fields of the structure out into temporaries. Since we never
6282 modify the base field, we can use a formal temporary. Sign-extend the
6283 offset field so that it's the proper width for pointer arithmetic. */
6284 base = get_formal_tmp_var (base_field, pre_p);
6286 t = fold_convert (lang_hooks.types.type_for_size (64, 0), offset_field);
6287 offset = get_initialized_tmp_var (t, pre_p, NULL);
6289 indirect = pass_by_reference (NULL, TYPE_MODE (type), type, false);
6291 type = build_pointer_type (type);
6293 /* Find the value. Note that this will be a stable indirection, or
6294 a composite of stable indirections in the case of complex. */
6295 r = alpha_gimplify_va_arg_1 (type, base, offset, pre_p);
6297 /* Stuff the offset temporary back into its field. */
6298 t = build2 (MODIFY_EXPR, void_type_node, offset_field,
6299 fold_convert (TREE_TYPE (offset_field), offset));
6300 gimplify_and_add (t, pre_p);
6303 r = build_va_arg_indirect_ref (r);
6312 ALPHA_BUILTIN_CMPBGE,
6313 ALPHA_BUILTIN_EXTBL,
6314 ALPHA_BUILTIN_EXTWL,
6315 ALPHA_BUILTIN_EXTLL,
6316 ALPHA_BUILTIN_EXTQL,
6317 ALPHA_BUILTIN_EXTWH,
6318 ALPHA_BUILTIN_EXTLH,
6319 ALPHA_BUILTIN_EXTQH,
6320 ALPHA_BUILTIN_INSBL,
6321 ALPHA_BUILTIN_INSWL,
6322 ALPHA_BUILTIN_INSLL,
6323 ALPHA_BUILTIN_INSQL,
6324 ALPHA_BUILTIN_INSWH,
6325 ALPHA_BUILTIN_INSLH,
6326 ALPHA_BUILTIN_INSQH,
6327 ALPHA_BUILTIN_MSKBL,
6328 ALPHA_BUILTIN_MSKWL,
6329 ALPHA_BUILTIN_MSKLL,
6330 ALPHA_BUILTIN_MSKQL,
6331 ALPHA_BUILTIN_MSKWH,
6332 ALPHA_BUILTIN_MSKLH,
6333 ALPHA_BUILTIN_MSKQH,
6334 ALPHA_BUILTIN_UMULH,
6336 ALPHA_BUILTIN_ZAPNOT,
6337 ALPHA_BUILTIN_AMASK,
6338 ALPHA_BUILTIN_IMPLVER,
6340 ALPHA_BUILTIN_THREAD_POINTER,
6341 ALPHA_BUILTIN_SET_THREAD_POINTER,
6344 ALPHA_BUILTIN_MINUB8,
6345 ALPHA_BUILTIN_MINSB8,
6346 ALPHA_BUILTIN_MINUW4,
6347 ALPHA_BUILTIN_MINSW4,
6348 ALPHA_BUILTIN_MAXUB8,
6349 ALPHA_BUILTIN_MAXSB8,
6350 ALPHA_BUILTIN_MAXUW4,
6351 ALPHA_BUILTIN_MAXSW4,
6355 ALPHA_BUILTIN_UNPKBL,
6356 ALPHA_BUILTIN_UNPKBW,
6361 ALPHA_BUILTIN_CTPOP,
6366 static unsigned int const code_for_builtin[ALPHA_BUILTIN_max] = {
6367 CODE_FOR_builtin_cmpbge,
6368 CODE_FOR_builtin_extbl,
6369 CODE_FOR_builtin_extwl,
6370 CODE_FOR_builtin_extll,
6371 CODE_FOR_builtin_extql,
6372 CODE_FOR_builtin_extwh,
6373 CODE_FOR_builtin_extlh,
6374 CODE_FOR_builtin_extqh,
6375 CODE_FOR_builtin_insbl,
6376 CODE_FOR_builtin_inswl,
6377 CODE_FOR_builtin_insll,
6378 CODE_FOR_builtin_insql,
6379 CODE_FOR_builtin_inswh,
6380 CODE_FOR_builtin_inslh,
6381 CODE_FOR_builtin_insqh,
6382 CODE_FOR_builtin_mskbl,
6383 CODE_FOR_builtin_mskwl,
6384 CODE_FOR_builtin_mskll,
6385 CODE_FOR_builtin_mskql,
6386 CODE_FOR_builtin_mskwh,
6387 CODE_FOR_builtin_msklh,
6388 CODE_FOR_builtin_mskqh,
6389 CODE_FOR_umuldi3_highpart,
6390 CODE_FOR_builtin_zap,
6391 CODE_FOR_builtin_zapnot,
6392 CODE_FOR_builtin_amask,
6393 CODE_FOR_builtin_implver,
6394 CODE_FOR_builtin_rpcc,
6399 CODE_FOR_builtin_minub8,
6400 CODE_FOR_builtin_minsb8,
6401 CODE_FOR_builtin_minuw4,
6402 CODE_FOR_builtin_minsw4,
6403 CODE_FOR_builtin_maxub8,
6404 CODE_FOR_builtin_maxsb8,
6405 CODE_FOR_builtin_maxuw4,
6406 CODE_FOR_builtin_maxsw4,
6407 CODE_FOR_builtin_perr,
6408 CODE_FOR_builtin_pklb,
6409 CODE_FOR_builtin_pkwb,
6410 CODE_FOR_builtin_unpkbl,
6411 CODE_FOR_builtin_unpkbw,
6416 CODE_FOR_popcountdi2
6419 struct alpha_builtin_def
6422 enum alpha_builtin code;
6423 unsigned int target_mask;
6427 static struct alpha_builtin_def const zero_arg_builtins[] = {
6428 { "__builtin_alpha_implver", ALPHA_BUILTIN_IMPLVER, 0, true },
6429 { "__builtin_alpha_rpcc", ALPHA_BUILTIN_RPCC, 0, false }
6432 static struct alpha_builtin_def const one_arg_builtins[] = {
6433 { "__builtin_alpha_amask", ALPHA_BUILTIN_AMASK, 0, true },
6434 { "__builtin_alpha_pklb", ALPHA_BUILTIN_PKLB, MASK_MAX, true },
6435 { "__builtin_alpha_pkwb", ALPHA_BUILTIN_PKWB, MASK_MAX, true },
6436 { "__builtin_alpha_unpkbl", ALPHA_BUILTIN_UNPKBL, MASK_MAX, true },
6437 { "__builtin_alpha_unpkbw", ALPHA_BUILTIN_UNPKBW, MASK_MAX, true },
6438 { "__builtin_alpha_cttz", ALPHA_BUILTIN_CTTZ, MASK_CIX, true },
6439 { "__builtin_alpha_ctlz", ALPHA_BUILTIN_CTLZ, MASK_CIX, true },
6440 { "__builtin_alpha_ctpop", ALPHA_BUILTIN_CTPOP, MASK_CIX, true }
6443 static struct alpha_builtin_def const two_arg_builtins[] = {
6444 { "__builtin_alpha_cmpbge", ALPHA_BUILTIN_CMPBGE, 0, true },
6445 { "__builtin_alpha_extbl", ALPHA_BUILTIN_EXTBL, 0, true },
6446 { "__builtin_alpha_extwl", ALPHA_BUILTIN_EXTWL, 0, true },
6447 { "__builtin_alpha_extll", ALPHA_BUILTIN_EXTLL, 0, true },
6448 { "__builtin_alpha_extql", ALPHA_BUILTIN_EXTQL, 0, true },
6449 { "__builtin_alpha_extwh", ALPHA_BUILTIN_EXTWH, 0, true },
6450 { "__builtin_alpha_extlh", ALPHA_BUILTIN_EXTLH, 0, true },
6451 { "__builtin_alpha_extqh", ALPHA_BUILTIN_EXTQH, 0, true },
6452 { "__builtin_alpha_insbl", ALPHA_BUILTIN_INSBL, 0, true },
6453 { "__builtin_alpha_inswl", ALPHA_BUILTIN_INSWL, 0, true },
6454 { "__builtin_alpha_insll", ALPHA_BUILTIN_INSLL, 0, true },
6455 { "__builtin_alpha_insql", ALPHA_BUILTIN_INSQL, 0, true },
6456 { "__builtin_alpha_inswh", ALPHA_BUILTIN_INSWH, 0, true },
6457 { "__builtin_alpha_inslh", ALPHA_BUILTIN_INSLH, 0, true },
6458 { "__builtin_alpha_insqh", ALPHA_BUILTIN_INSQH, 0, true },
6459 { "__builtin_alpha_mskbl", ALPHA_BUILTIN_MSKBL, 0, true },
6460 { "__builtin_alpha_mskwl", ALPHA_BUILTIN_MSKWL, 0, true },
6461 { "__builtin_alpha_mskll", ALPHA_BUILTIN_MSKLL, 0, true },
6462 { "__builtin_alpha_mskql", ALPHA_BUILTIN_MSKQL, 0, true },
6463 { "__builtin_alpha_mskwh", ALPHA_BUILTIN_MSKWH, 0, true },
6464 { "__builtin_alpha_msklh", ALPHA_BUILTIN_MSKLH, 0, true },
6465 { "__builtin_alpha_mskqh", ALPHA_BUILTIN_MSKQH, 0, true },
6466 { "__builtin_alpha_umulh", ALPHA_BUILTIN_UMULH, 0, true },
6467 { "__builtin_alpha_zap", ALPHA_BUILTIN_ZAP, 0, true },
6468 { "__builtin_alpha_zapnot", ALPHA_BUILTIN_ZAPNOT, 0, true },
6469 { "__builtin_alpha_minub8", ALPHA_BUILTIN_MINUB8, MASK_MAX, true },
6470 { "__builtin_alpha_minsb8", ALPHA_BUILTIN_MINSB8, MASK_MAX, true },
6471 { "__builtin_alpha_minuw4", ALPHA_BUILTIN_MINUW4, MASK_MAX, true },
6472 { "__builtin_alpha_minsw4", ALPHA_BUILTIN_MINSW4, MASK_MAX, true },
6473 { "__builtin_alpha_maxub8", ALPHA_BUILTIN_MAXUB8, MASK_MAX, true },
6474 { "__builtin_alpha_maxsb8", ALPHA_BUILTIN_MAXSB8, MASK_MAX, true },
6475 { "__builtin_alpha_maxuw4", ALPHA_BUILTIN_MAXUW4, MASK_MAX, true },
6476 { "__builtin_alpha_maxsw4", ALPHA_BUILTIN_MAXSW4, MASK_MAX, true },
6477 { "__builtin_alpha_perr", ALPHA_BUILTIN_PERR, MASK_MAX, true }
6480 static GTY(()) tree alpha_v8qi_u;
6481 static GTY(()) tree alpha_v8qi_s;
6482 static GTY(()) tree alpha_v4hi_u;
6483 static GTY(()) tree alpha_v4hi_s;
6486 alpha_init_builtins (void)
6488 const struct alpha_builtin_def *p;
6489 tree dimode_integer_type_node;
6490 tree ftype, attrs[2];
6493 dimode_integer_type_node = lang_hooks.types.type_for_mode (DImode, 0);
6495 attrs[0] = tree_cons (get_identifier ("nothrow"), NULL, NULL);
6496 attrs[1] = tree_cons (get_identifier ("const"), NULL, attrs[0]);
6498 ftype = build_function_type (dimode_integer_type_node, void_list_node);
6500 p = zero_arg_builtins;
6501 for (i = 0; i < ARRAY_SIZE (zero_arg_builtins); ++i, ++p)
6502 if ((target_flags & p->target_mask) == p->target_mask)
6503 lang_hooks.builtin_function (p->name, ftype, p->code, BUILT_IN_MD,
6504 NULL, attrs[p->is_const]);
6506 ftype = build_function_type_list (dimode_integer_type_node,
6507 dimode_integer_type_node, NULL_TREE);
6509 p = one_arg_builtins;
6510 for (i = 0; i < ARRAY_SIZE (one_arg_builtins); ++i, ++p)
6511 if ((target_flags & p->target_mask) == p->target_mask)
6512 lang_hooks.builtin_function (p->name, ftype, p->code, BUILT_IN_MD,
6513 NULL, attrs[p->is_const]);
6515 ftype = build_function_type_list (dimode_integer_type_node,
6516 dimode_integer_type_node,
6517 dimode_integer_type_node, NULL_TREE);
6519 p = two_arg_builtins;
6520 for (i = 0; i < ARRAY_SIZE (two_arg_builtins); ++i, ++p)
6521 if ((target_flags & p->target_mask) == p->target_mask)
6522 lang_hooks.builtin_function (p->name, ftype, p->code, BUILT_IN_MD,
6523 NULL, attrs[p->is_const]);
6525 ftype = build_function_type (ptr_type_node, void_list_node);
6526 lang_hooks.builtin_function ("__builtin_thread_pointer", ftype,
6527 ALPHA_BUILTIN_THREAD_POINTER, BUILT_IN_MD,
6530 ftype = build_function_type_list (void_type_node, ptr_type_node, NULL_TREE);
6531 lang_hooks.builtin_function ("__builtin_set_thread_pointer", ftype,
6532 ALPHA_BUILTIN_SET_THREAD_POINTER, BUILT_IN_MD,
6535 alpha_v8qi_u = build_vector_type (unsigned_intQI_type_node, 8);
6536 alpha_v8qi_s = build_vector_type (intQI_type_node, 8);
6537 alpha_v4hi_u = build_vector_type (unsigned_intHI_type_node, 4);
6538 alpha_v4hi_s = build_vector_type (intHI_type_node, 4);
6541 /* Expand an expression EXP that calls a built-in function,
6542 with result going to TARGET if that's convenient
6543 (and in mode MODE if that's convenient).
6544 SUBTARGET may be used as the target for computing one of EXP's operands.
6545 IGNORE is nonzero if the value is to be ignored. */
6548 alpha_expand_builtin (tree exp, rtx target,
6549 rtx subtarget ATTRIBUTE_UNUSED,
6550 enum machine_mode mode ATTRIBUTE_UNUSED,
6551 int ignore ATTRIBUTE_UNUSED)
6555 tree fndecl = TREE_OPERAND (TREE_OPERAND (exp, 0), 0);
6556 unsigned int fcode = DECL_FUNCTION_CODE (fndecl);
6557 tree arglist = TREE_OPERAND (exp, 1);
6558 enum insn_code icode;
6559 rtx op[MAX_ARGS], pat;
6563 if (fcode >= ALPHA_BUILTIN_max)
6564 internal_error ("bad builtin fcode");
6565 icode = code_for_builtin[fcode];
6567 internal_error ("bad builtin fcode");
6569 nonvoid = TREE_TYPE (TREE_TYPE (fndecl)) != void_type_node;
6571 for (arglist = TREE_OPERAND (exp, 1), arity = 0;
6573 arglist = TREE_CHAIN (arglist), arity++)
6575 const struct insn_operand_data *insn_op;
6577 tree arg = TREE_VALUE (arglist);
6578 if (arg == error_mark_node)
6580 if (arity > MAX_ARGS)
6583 insn_op = &insn_data[icode].operand[arity + nonvoid];
6585 op[arity] = expand_expr (arg, NULL_RTX, insn_op->mode, 0);
6587 if (!(*insn_op->predicate) (op[arity], insn_op->mode))
6588 op[arity] = copy_to_mode_reg (insn_op->mode, op[arity]);
6593 enum machine_mode tmode = insn_data[icode].operand[0].mode;
6595 || GET_MODE (target) != tmode
6596 || !(*insn_data[icode].operand[0].predicate) (target, tmode))
6597 target = gen_reg_rtx (tmode);
6603 pat = GEN_FCN (icode) (target);
6607 pat = GEN_FCN (icode) (target, op[0]);
6609 pat = GEN_FCN (icode) (op[0]);
6612 pat = GEN_FCN (icode) (target, op[0], op[1]);
6628 /* Several bits below assume HWI >= 64 bits. This should be enforced
6630 #if HOST_BITS_PER_WIDE_INT < 64
6631 # error "HOST_WIDE_INT too small"
6634 /* Fold the builtin for the CMPBGE instruction. This is a vector comparison
6635 with an 8 bit output vector. OPINT contains the integer operands; bit N
6636 of OP_CONST is set if OPINT[N] is valid. */
6639 alpha_fold_builtin_cmpbge (unsigned HOST_WIDE_INT opint[], long op_const)
6644 for (i = 0, val = 0; i < 8; ++i)
6646 unsigned HOST_WIDE_INT c0 = (opint[0] >> (i * 8)) & 0xff;
6647 unsigned HOST_WIDE_INT c1 = (opint[1] >> (i * 8)) & 0xff;
6651 return build_int_cst (long_integer_type_node, val);
6653 else if (op_const == 2 && opint[1] == 0)
6654 return build_int_cst (long_integer_type_node, 0xff);
6658 /* Fold the builtin for the ZAPNOT instruction. This is essentially a
6659 specialized form of an AND operation. Other byte manipulation instructions
6660 are defined in terms of this instruction, so this is also used as a
6661 subroutine for other builtins.
6663 OP contains the tree operands; OPINT contains the extracted integer values.
6664 Bit N of OP_CONST it set if OPINT[N] is valid. OP may be null if only
6665 OPINT may be considered. */
6668 alpha_fold_builtin_zapnot (tree *op, unsigned HOST_WIDE_INT opint[],
6673 unsigned HOST_WIDE_INT mask = 0;
6676 for (i = 0; i < 8; ++i)
6677 if ((opint[1] >> i) & 1)
6678 mask |= (unsigned HOST_WIDE_INT)0xff << (i * 8);
6681 return build_int_cst (long_integer_type_node, opint[0] & mask);
6684 return fold (build2 (BIT_AND_EXPR, long_integer_type_node, op[0],
6685 build_int_cst (long_integer_type_node, mask)));
6687 else if ((op_const & 1) && opint[0] == 0)
6688 return build_int_cst (long_integer_type_node, 0);
6692 /* Fold the builtins for the EXT family of instructions. */
6695 alpha_fold_builtin_extxx (tree op[], unsigned HOST_WIDE_INT opint[],
6696 long op_const, unsigned HOST_WIDE_INT bytemask,
6700 tree *zap_op = NULL;
6704 unsigned HOST_WIDE_INT loc;
6707 if (BYTES_BIG_ENDIAN)
6715 unsigned HOST_WIDE_INT temp = opint[0];
6728 opint[1] = bytemask;
6729 return alpha_fold_builtin_zapnot (zap_op, opint, zap_const);
6732 /* Fold the builtins for the INS family of instructions. */
6735 alpha_fold_builtin_insxx (tree op[], unsigned HOST_WIDE_INT opint[],
6736 long op_const, unsigned HOST_WIDE_INT bytemask,
6739 if ((op_const & 1) && opint[0] == 0)
6740 return build_int_cst (long_integer_type_node, 0);
6744 unsigned HOST_WIDE_INT temp, loc, byteloc;
6745 tree *zap_op = NULL;
6748 if (BYTES_BIG_ENDIAN)
6755 byteloc = (64 - (loc * 8)) & 0x3f;
6772 opint[1] = bytemask;
6773 return alpha_fold_builtin_zapnot (zap_op, opint, op_const);
6780 alpha_fold_builtin_mskxx (tree op[], unsigned HOST_WIDE_INT opint[],
6781 long op_const, unsigned HOST_WIDE_INT bytemask,
6786 unsigned HOST_WIDE_INT loc;
6789 if (BYTES_BIG_ENDIAN)
6796 opint[1] = bytemask ^ 0xff;
6799 return alpha_fold_builtin_zapnot (op, opint, op_const);
6803 alpha_fold_builtin_umulh (unsigned HOST_WIDE_INT opint[], long op_const)
6809 unsigned HOST_WIDE_INT l;
6812 mul_double (opint[0], 0, opint[1], 0, &l, &h);
6814 #if HOST_BITS_PER_WIDE_INT > 64
6818 return build_int_cst (long_integer_type_node, h);
6822 opint[1] = opint[0];
6825 /* Note that (X*1) >> 64 == 0. */
6826 if (opint[1] == 0 || opint[1] == 1)
6827 return build_int_cst (long_integer_type_node, 0);
6834 alpha_fold_vector_minmax (enum tree_code code, tree op[], tree vtype)
6836 tree op0 = fold_convert (vtype, op[0]);
6837 tree op1 = fold_convert (vtype, op[1]);
6838 tree val = fold (build2 (code, vtype, op0, op1));
6839 return fold_convert (long_integer_type_node, val);
6843 alpha_fold_builtin_perr (unsigned HOST_WIDE_INT opint[], long op_const)
6845 unsigned HOST_WIDE_INT temp = 0;
6851 for (i = 0; i < 8; ++i)
6853 unsigned HOST_WIDE_INT a = (opint[0] >> (i * 8)) & 0xff;
6854 unsigned HOST_WIDE_INT b = (opint[1] >> (i * 8)) & 0xff;
6861 return build_int_cst (long_integer_type_node, temp);
6865 alpha_fold_builtin_pklb (unsigned HOST_WIDE_INT opint[], long op_const)
6867 unsigned HOST_WIDE_INT temp;
6872 temp = opint[0] & 0xff;
6873 temp |= (opint[0] >> 24) & 0xff00;
6875 return build_int_cst (long_integer_type_node, temp);
6879 alpha_fold_builtin_pkwb (unsigned HOST_WIDE_INT opint[], long op_const)
6881 unsigned HOST_WIDE_INT temp;
6886 temp = opint[0] & 0xff;
6887 temp |= (opint[0] >> 8) & 0xff00;
6888 temp |= (opint[0] >> 16) & 0xff0000;
6889 temp |= (opint[0] >> 24) & 0xff000000;
6891 return build_int_cst (long_integer_type_node, temp);
6895 alpha_fold_builtin_unpkbl (unsigned HOST_WIDE_INT opint[], long op_const)
6897 unsigned HOST_WIDE_INT temp;
6902 temp = opint[0] & 0xff;
6903 temp |= (opint[0] & 0xff00) << 24;
6905 return build_int_cst (long_integer_type_node, temp);
6909 alpha_fold_builtin_unpkbw (unsigned HOST_WIDE_INT opint[], long op_const)
6911 unsigned HOST_WIDE_INT temp;
6916 temp = opint[0] & 0xff;
6917 temp |= (opint[0] & 0x0000ff00) << 8;
6918 temp |= (opint[0] & 0x00ff0000) << 16;
6919 temp |= (opint[0] & 0xff000000) << 24;
6921 return build_int_cst (long_integer_type_node, temp);
6925 alpha_fold_builtin_cttz (unsigned HOST_WIDE_INT opint[], long op_const)
6927 unsigned HOST_WIDE_INT temp;
6935 temp = exact_log2 (opint[0] & -opint[0]);
6937 return build_int_cst (long_integer_type_node, temp);
6941 alpha_fold_builtin_ctlz (unsigned HOST_WIDE_INT opint[], long op_const)
6943 unsigned HOST_WIDE_INT temp;
6951 temp = 64 - floor_log2 (opint[0]) - 1;
6953 return build_int_cst (long_integer_type_node, temp);
6957 alpha_fold_builtin_ctpop (unsigned HOST_WIDE_INT opint[], long op_const)
6959 unsigned HOST_WIDE_INT temp, op;
6967 temp++, op &= op - 1;
6969 return build_int_cst (long_integer_type_node, temp);
6972 /* Fold one of our builtin functions. */
6975 alpha_fold_builtin (tree fndecl, tree arglist, bool ignore ATTRIBUTE_UNUSED)
6977 tree op[MAX_ARGS], t;
6978 unsigned HOST_WIDE_INT opint[MAX_ARGS];
6979 long op_const = 0, arity = 0;
6981 for (t = arglist; t ; t = TREE_CHAIN (t), ++arity)
6983 tree arg = TREE_VALUE (t);
6984 if (arg == error_mark_node)
6986 if (arity >= MAX_ARGS)
6991 if (TREE_CODE (arg) == INTEGER_CST)
6993 op_const |= 1L << arity;
6994 opint[arity] = int_cst_value (arg);
6998 switch (DECL_FUNCTION_CODE (fndecl))
7000 case ALPHA_BUILTIN_CMPBGE:
7001 return alpha_fold_builtin_cmpbge (opint, op_const);
7003 case ALPHA_BUILTIN_EXTBL:
7004 return alpha_fold_builtin_extxx (op, opint, op_const, 0x01, false);
7005 case ALPHA_BUILTIN_EXTWL:
7006 return alpha_fold_builtin_extxx (op, opint, op_const, 0x03, false);
7007 case ALPHA_BUILTIN_EXTLL:
7008 return alpha_fold_builtin_extxx (op, opint, op_const, 0x0f, false);
7009 case ALPHA_BUILTIN_EXTQL:
7010 return alpha_fold_builtin_extxx (op, opint, op_const, 0xff, false);
7011 case ALPHA_BUILTIN_EXTWH:
7012 return alpha_fold_builtin_extxx (op, opint, op_const, 0x03, true);
7013 case ALPHA_BUILTIN_EXTLH:
7014 return alpha_fold_builtin_extxx (op, opint, op_const, 0x0f, true);
7015 case ALPHA_BUILTIN_EXTQH:
7016 return alpha_fold_builtin_extxx (op, opint, op_const, 0xff, true);
7018 case ALPHA_BUILTIN_INSBL:
7019 return alpha_fold_builtin_insxx (op, opint, op_const, 0x01, false);
7020 case ALPHA_BUILTIN_INSWL:
7021 return alpha_fold_builtin_insxx (op, opint, op_const, 0x03, false);
7022 case ALPHA_BUILTIN_INSLL:
7023 return alpha_fold_builtin_insxx (op, opint, op_const, 0x0f, false);
7024 case ALPHA_BUILTIN_INSQL:
7025 return alpha_fold_builtin_insxx (op, opint, op_const, 0xff, false);
7026 case ALPHA_BUILTIN_INSWH:
7027 return alpha_fold_builtin_insxx (op, opint, op_const, 0x03, true);
7028 case ALPHA_BUILTIN_INSLH:
7029 return alpha_fold_builtin_insxx (op, opint, op_const, 0x0f, true);
7030 case ALPHA_BUILTIN_INSQH:
7031 return alpha_fold_builtin_insxx (op, opint, op_const, 0xff, true);
7033 case ALPHA_BUILTIN_MSKBL:
7034 return alpha_fold_builtin_mskxx (op, opint, op_const, 0x01, false);
7035 case ALPHA_BUILTIN_MSKWL:
7036 return alpha_fold_builtin_mskxx (op, opint, op_const, 0x03, false);
7037 case ALPHA_BUILTIN_MSKLL:
7038 return alpha_fold_builtin_mskxx (op, opint, op_const, 0x0f, false);
7039 case ALPHA_BUILTIN_MSKQL:
7040 return alpha_fold_builtin_mskxx (op, opint, op_const, 0xff, false);
7041 case ALPHA_BUILTIN_MSKWH:
7042 return alpha_fold_builtin_mskxx (op, opint, op_const, 0x03, true);
7043 case ALPHA_BUILTIN_MSKLH:
7044 return alpha_fold_builtin_mskxx (op, opint, op_const, 0x0f, true);
7045 case ALPHA_BUILTIN_MSKQH:
7046 return alpha_fold_builtin_mskxx (op, opint, op_const, 0xff, true);
7048 case ALPHA_BUILTIN_UMULH:
7049 return alpha_fold_builtin_umulh (opint, op_const);
7051 case ALPHA_BUILTIN_ZAP:
7054 case ALPHA_BUILTIN_ZAPNOT:
7055 return alpha_fold_builtin_zapnot (op, opint, op_const);
7057 case ALPHA_BUILTIN_MINUB8:
7058 return alpha_fold_vector_minmax (MIN_EXPR, op, alpha_v8qi_u);
7059 case ALPHA_BUILTIN_MINSB8:
7060 return alpha_fold_vector_minmax (MIN_EXPR, op, alpha_v8qi_s);
7061 case ALPHA_BUILTIN_MINUW4:
7062 return alpha_fold_vector_minmax (MIN_EXPR, op, alpha_v4hi_u);
7063 case ALPHA_BUILTIN_MINSW4:
7064 return alpha_fold_vector_minmax (MIN_EXPR, op, alpha_v4hi_s);
7065 case ALPHA_BUILTIN_MAXUB8:
7066 return alpha_fold_vector_minmax (MAX_EXPR, op, alpha_v8qi_u);
7067 case ALPHA_BUILTIN_MAXSB8:
7068 return alpha_fold_vector_minmax (MAX_EXPR, op, alpha_v8qi_s);
7069 case ALPHA_BUILTIN_MAXUW4:
7070 return alpha_fold_vector_minmax (MAX_EXPR, op, alpha_v4hi_u);
7071 case ALPHA_BUILTIN_MAXSW4:
7072 return alpha_fold_vector_minmax (MAX_EXPR, op, alpha_v4hi_s);
7074 case ALPHA_BUILTIN_PERR:
7075 return alpha_fold_builtin_perr (opint, op_const);
7076 case ALPHA_BUILTIN_PKLB:
7077 return alpha_fold_builtin_pklb (opint, op_const);
7078 case ALPHA_BUILTIN_PKWB:
7079 return alpha_fold_builtin_pkwb (opint, op_const);
7080 case ALPHA_BUILTIN_UNPKBL:
7081 return alpha_fold_builtin_unpkbl (opint, op_const);
7082 case ALPHA_BUILTIN_UNPKBW:
7083 return alpha_fold_builtin_unpkbw (opint, op_const);
7085 case ALPHA_BUILTIN_CTTZ:
7086 return alpha_fold_builtin_cttz (opint, op_const);
7087 case ALPHA_BUILTIN_CTLZ:
7088 return alpha_fold_builtin_ctlz (opint, op_const);
7089 case ALPHA_BUILTIN_CTPOP:
7090 return alpha_fold_builtin_ctpop (opint, op_const);
7092 case ALPHA_BUILTIN_AMASK:
7093 case ALPHA_BUILTIN_IMPLVER:
7094 case ALPHA_BUILTIN_RPCC:
7095 case ALPHA_BUILTIN_THREAD_POINTER:
7096 case ALPHA_BUILTIN_SET_THREAD_POINTER:
7097 /* None of these are foldable at compile-time. */
7103 /* This page contains routines that are used to determine what the function
7104 prologue and epilogue code will do and write them out. */
7106 /* Compute the size of the save area in the stack. */
7108 /* These variables are used for communication between the following functions.
7109 They indicate various things about the current function being compiled
7110 that are used to tell what kind of prologue, epilogue and procedure
7111 descriptor to generate. */
7113 /* Nonzero if we need a stack procedure. */
7114 enum alpha_procedure_types {PT_NULL = 0, PT_REGISTER = 1, PT_STACK = 2};
7115 static enum alpha_procedure_types alpha_procedure_type;
7117 /* Register number (either FP or SP) that is used to unwind the frame. */
7118 static int vms_unwind_regno;
7120 /* Register number used to save FP. We need not have one for RA since
7121 we don't modify it for register procedures. This is only defined
7122 for register frame procedures. */
7123 static int vms_save_fp_regno;
7125 /* Register number used to reference objects off our PV. */
7126 static int vms_base_regno;
7128 /* Compute register masks for saved registers. */
7131 alpha_sa_mask (unsigned long *imaskP, unsigned long *fmaskP)
7133 unsigned long imask = 0;
7134 unsigned long fmask = 0;
7137 /* When outputting a thunk, we don't have valid register life info,
7138 but assemble_start_function wants to output .frame and .mask
7140 if (current_function_is_thunk)
7147 if (TARGET_ABI_OPEN_VMS && alpha_procedure_type == PT_STACK)
7148 imask |= (1UL << HARD_FRAME_POINTER_REGNUM);
7150 /* One for every register we have to save. */
7151 for (i = 0; i < FIRST_PSEUDO_REGISTER; i++)
7152 if (! fixed_regs[i] && ! call_used_regs[i]
7153 && regs_ever_live[i] && i != REG_RA
7154 && (!TARGET_ABI_UNICOSMK || i != HARD_FRAME_POINTER_REGNUM))
7157 imask |= (1UL << i);
7159 fmask |= (1UL << (i - 32));
7162 /* We need to restore these for the handler. */
7163 if (current_function_calls_eh_return)
7167 unsigned regno = EH_RETURN_DATA_REGNO (i);
7168 if (regno == INVALID_REGNUM)
7170 imask |= 1UL << regno;
7174 /* If any register spilled, then spill the return address also. */
7175 /* ??? This is required by the Digital stack unwind specification
7176 and isn't needed if we're doing Dwarf2 unwinding. */
7177 if (imask || fmask || alpha_ra_ever_killed ())
7178 imask |= (1UL << REG_RA);
7185 alpha_sa_size (void)
7187 unsigned long mask[2];
7191 alpha_sa_mask (&mask[0], &mask[1]);
7193 if (TARGET_ABI_UNICOSMK)
7195 if (mask[0] || mask[1])
7200 for (j = 0; j < 2; ++j)
7201 for (i = 0; i < 32; ++i)
7202 if ((mask[j] >> i) & 1)
7206 if (TARGET_ABI_UNICOSMK)
7208 /* We might not need to generate a frame if we don't make any calls
7209 (including calls to __T3E_MISMATCH if this is a vararg function),
7210 don't have any local variables which require stack slots, don't
7211 use alloca and have not determined that we need a frame for other
7214 alpha_procedure_type
7215 = (sa_size || get_frame_size() != 0
7216 || current_function_outgoing_args_size
7217 || current_function_stdarg || current_function_calls_alloca
7218 || frame_pointer_needed)
7219 ? PT_STACK : PT_REGISTER;
7221 /* Always reserve space for saving callee-saved registers if we
7222 need a frame as required by the calling convention. */
7223 if (alpha_procedure_type == PT_STACK)
7226 else if (TARGET_ABI_OPEN_VMS)
7228 /* Start by assuming we can use a register procedure if we don't
7229 make any calls (REG_RA not used) or need to save any
7230 registers and a stack procedure if we do. */
7231 if ((mask[0] >> REG_RA) & 1)
7232 alpha_procedure_type = PT_STACK;
7233 else if (get_frame_size() != 0)
7234 alpha_procedure_type = PT_REGISTER;
7236 alpha_procedure_type = PT_NULL;
7238 /* Don't reserve space for saving FP & RA yet. Do that later after we've
7239 made the final decision on stack procedure vs register procedure. */
7240 if (alpha_procedure_type == PT_STACK)
7243 /* Decide whether to refer to objects off our PV via FP or PV.
7244 If we need FP for something else or if we receive a nonlocal
7245 goto (which expects PV to contain the value), we must use PV.
7246 Otherwise, start by assuming we can use FP. */
7249 = (frame_pointer_needed
7250 || current_function_has_nonlocal_label
7251 || alpha_procedure_type == PT_STACK
7252 || current_function_outgoing_args_size)
7253 ? REG_PV : HARD_FRAME_POINTER_REGNUM;
7255 /* If we want to copy PV into FP, we need to find some register
7256 in which to save FP. */
7258 vms_save_fp_regno = -1;
7259 if (vms_base_regno == HARD_FRAME_POINTER_REGNUM)
7260 for (i = 0; i < 32; i++)
7261 if (! fixed_regs[i] && call_used_regs[i] && ! regs_ever_live[i])
7262 vms_save_fp_regno = i;
7264 if (vms_save_fp_regno == -1 && alpha_procedure_type == PT_REGISTER)
7265 vms_base_regno = REG_PV, alpha_procedure_type = PT_STACK;
7266 else if (alpha_procedure_type == PT_NULL)
7267 vms_base_regno = REG_PV;
7269 /* Stack unwinding should be done via FP unless we use it for PV. */
7270 vms_unwind_regno = (vms_base_regno == REG_PV
7271 ? HARD_FRAME_POINTER_REGNUM : STACK_POINTER_REGNUM);
7273 /* If this is a stack procedure, allow space for saving FP and RA. */
7274 if (alpha_procedure_type == PT_STACK)
7279 /* Our size must be even (multiple of 16 bytes). */
7287 /* Define the offset between two registers, one to be eliminated,
7288 and the other its replacement, at the start of a routine. */
7291 alpha_initial_elimination_offset (unsigned int from,
7292 unsigned int to ATTRIBUTE_UNUSED)
7296 ret = alpha_sa_size ();
7297 ret += ALPHA_ROUND (current_function_outgoing_args_size);
7301 case FRAME_POINTER_REGNUM:
7304 case ARG_POINTER_REGNUM:
7305 ret += (ALPHA_ROUND (get_frame_size ()
7306 + current_function_pretend_args_size)
7307 - current_function_pretend_args_size);
7318 alpha_pv_save_size (void)
7321 return alpha_procedure_type == PT_STACK ? 8 : 0;
7325 alpha_using_fp (void)
7328 return vms_unwind_regno == HARD_FRAME_POINTER_REGNUM;
7331 #if TARGET_ABI_OPEN_VMS
7333 const struct attribute_spec vms_attribute_table[] =
7335 /* { name, min_len, max_len, decl_req, type_req, fn_type_req, handler } */
7336 { "overlaid", 0, 0, true, false, false, NULL },
7337 { "global", 0, 0, true, false, false, NULL },
7338 { "initialize", 0, 0, true, false, false, NULL },
7339 { NULL, 0, 0, false, false, false, NULL }
7345 find_lo_sum_using_gp (rtx *px, void *data ATTRIBUTE_UNUSED)
7347 return GET_CODE (*px) == LO_SUM && XEXP (*px, 0) == pic_offset_table_rtx;
7351 alpha_find_lo_sum_using_gp (rtx insn)
7353 return for_each_rtx (&PATTERN (insn), find_lo_sum_using_gp, NULL) > 0;
7357 alpha_does_function_need_gp (void)
7361 /* The GP being variable is an OSF abi thing. */
7362 if (! TARGET_ABI_OSF)
7365 /* We need the gp to load the address of __mcount. */
7366 if (TARGET_PROFILING_NEEDS_GP && current_function_profile)
7369 /* The code emitted by alpha_output_mi_thunk_osf uses the gp. */
7370 if (current_function_is_thunk)
7373 /* The nonlocal receiver pattern assumes that the gp is valid for
7374 the nested function. Reasonable because it's almost always set
7375 correctly already. For the cases where that's wrong, make sure
7376 the nested function loads its gp on entry. */
7377 if (current_function_has_nonlocal_goto)
7380 /* If we need a GP (we have a LDSYM insn or a CALL_INSN), load it first.
7381 Even if we are a static function, we still need to do this in case
7382 our address is taken and passed to something like qsort. */
7384 push_topmost_sequence ();
7385 insn = get_insns ();
7386 pop_topmost_sequence ();
7388 for (; insn; insn = NEXT_INSN (insn))
7390 && GET_CODE (PATTERN (insn)) != USE
7391 && GET_CODE (PATTERN (insn)) != CLOBBER
7392 && get_attr_usegp (insn))
7399 /* Helper function to set RTX_FRAME_RELATED_P on instructions, including
7403 set_frame_related_p (void)
7405 rtx seq = get_insns ();
7416 while (insn != NULL_RTX)
7418 RTX_FRAME_RELATED_P (insn) = 1;
7419 insn = NEXT_INSN (insn);
7421 seq = emit_insn (seq);
7425 seq = emit_insn (seq);
7426 RTX_FRAME_RELATED_P (seq) = 1;
7431 #define FRP(exp) (start_sequence (), exp, set_frame_related_p ())
7433 /* Generates a store with the proper unwind info attached. VALUE is
7434 stored at BASE_REG+BASE_OFS. If FRAME_BIAS is nonzero, then BASE_REG
7435 contains SP+FRAME_BIAS, and that is the unwind info that should be
7436 generated. If FRAME_REG != VALUE, then VALUE is being stored on
7437 behalf of FRAME_REG, and FRAME_REG should be present in the unwind. */
7440 emit_frame_store_1 (rtx value, rtx base_reg, HOST_WIDE_INT frame_bias,
7441 HOST_WIDE_INT base_ofs, rtx frame_reg)
7443 rtx addr, mem, insn;
7445 addr = plus_constant (base_reg, base_ofs);
7446 mem = gen_rtx_MEM (DImode, addr);
7447 set_mem_alias_set (mem, alpha_sr_alias_set);
7449 insn = emit_move_insn (mem, value);
7450 RTX_FRAME_RELATED_P (insn) = 1;
7452 if (frame_bias || value != frame_reg)
7456 addr = plus_constant (stack_pointer_rtx, frame_bias + base_ofs);
7457 mem = gen_rtx_MEM (DImode, addr);
7461 = gen_rtx_EXPR_LIST (REG_FRAME_RELATED_EXPR,
7462 gen_rtx_SET (VOIDmode, mem, frame_reg),
7468 emit_frame_store (unsigned int regno, rtx base_reg,
7469 HOST_WIDE_INT frame_bias, HOST_WIDE_INT base_ofs)
7471 rtx reg = gen_rtx_REG (DImode, regno);
7472 emit_frame_store_1 (reg, base_reg, frame_bias, base_ofs, reg);
7475 /* Write function prologue. */
7477 /* On vms we have two kinds of functions:
7479 - stack frame (PROC_STACK)
7480 these are 'normal' functions with local vars and which are
7481 calling other functions
7482 - register frame (PROC_REGISTER)
7483 keeps all data in registers, needs no stack
7485 We must pass this to the assembler so it can generate the
7486 proper pdsc (procedure descriptor)
7487 This is done with the '.pdesc' command.
7489 On not-vms, we don't really differentiate between the two, as we can
7490 simply allocate stack without saving registers. */
7493 alpha_expand_prologue (void)
7495 /* Registers to save. */
7496 unsigned long imask = 0;
7497 unsigned long fmask = 0;
7498 /* Stack space needed for pushing registers clobbered by us. */
7499 HOST_WIDE_INT sa_size;
7500 /* Complete stack size needed. */
7501 HOST_WIDE_INT frame_size;
7502 /* Offset from base reg to register save area. */
7503 HOST_WIDE_INT reg_offset;
7507 sa_size = alpha_sa_size ();
7509 frame_size = get_frame_size ();
7510 if (TARGET_ABI_OPEN_VMS)
7511 frame_size = ALPHA_ROUND (sa_size
7512 + (alpha_procedure_type == PT_STACK ? 8 : 0)
7514 + current_function_pretend_args_size);
7515 else if (TARGET_ABI_UNICOSMK)
7516 /* We have to allocate space for the DSIB if we generate a frame. */
7517 frame_size = ALPHA_ROUND (sa_size
7518 + (alpha_procedure_type == PT_STACK ? 48 : 0))
7519 + ALPHA_ROUND (frame_size
7520 + current_function_outgoing_args_size);
7522 frame_size = (ALPHA_ROUND (current_function_outgoing_args_size)
7524 + ALPHA_ROUND (frame_size
7525 + current_function_pretend_args_size));
7527 if (TARGET_ABI_OPEN_VMS)
7530 reg_offset = ALPHA_ROUND (current_function_outgoing_args_size);
7532 alpha_sa_mask (&imask, &fmask);
7534 /* Emit an insn to reload GP, if needed. */
7537 alpha_function_needs_gp = alpha_does_function_need_gp ();
7538 if (alpha_function_needs_gp)
7539 emit_insn (gen_prologue_ldgp ());
7542 /* TARGET_PROFILING_NEEDS_GP actually implies that we need to insert
7543 the call to mcount ourselves, rather than having the linker do it
7544 magically in response to -pg. Since _mcount has special linkage,
7545 don't represent the call as a call. */
7546 if (TARGET_PROFILING_NEEDS_GP && current_function_profile)
7547 emit_insn (gen_prologue_mcount ());
7549 if (TARGET_ABI_UNICOSMK)
7550 unicosmk_gen_dsib (&imask);
7552 /* Adjust the stack by the frame size. If the frame size is > 4096
7553 bytes, we need to be sure we probe somewhere in the first and last
7554 4096 bytes (we can probably get away without the latter test) and
7555 every 8192 bytes in between. If the frame size is > 32768, we
7556 do this in a loop. Otherwise, we generate the explicit probe
7559 Note that we are only allowed to adjust sp once in the prologue. */
7561 if (frame_size <= 32768)
7563 if (frame_size > 4096)
7567 for (probed = 4096; probed < frame_size; probed += 8192)
7568 emit_insn (gen_probe_stack (GEN_INT (TARGET_ABI_UNICOSMK
7572 /* We only have to do this probe if we aren't saving registers. */
7573 if (sa_size == 0 && frame_size > probed - 4096)
7574 emit_insn (gen_probe_stack (GEN_INT (-frame_size)));
7577 if (frame_size != 0)
7578 FRP (emit_insn (gen_adddi3 (stack_pointer_rtx, stack_pointer_rtx,
7579 GEN_INT (TARGET_ABI_UNICOSMK
7585 /* Here we generate code to set R22 to SP + 4096 and set R23 to the
7586 number of 8192 byte blocks to probe. We then probe each block
7587 in the loop and then set SP to the proper location. If the
7588 amount remaining is > 4096, we have to do one more probe if we
7589 are not saving any registers. */
7591 HOST_WIDE_INT blocks = (frame_size + 4096) / 8192;
7592 HOST_WIDE_INT leftover = frame_size + 4096 - blocks * 8192;
7593 rtx ptr = gen_rtx_REG (DImode, 22);
7594 rtx count = gen_rtx_REG (DImode, 23);
7597 emit_move_insn (count, GEN_INT (blocks));
7598 emit_insn (gen_adddi3 (ptr, stack_pointer_rtx,
7599 GEN_INT (TARGET_ABI_UNICOSMK ? 4096 - 64 : 4096)));
7601 /* Because of the difficulty in emitting a new basic block this
7602 late in the compilation, generate the loop as a single insn. */
7603 emit_insn (gen_prologue_stack_probe_loop (count, ptr));
7605 if (leftover > 4096 && sa_size == 0)
7607 rtx last = gen_rtx_MEM (DImode, plus_constant (ptr, -leftover));
7608 MEM_VOLATILE_P (last) = 1;
7609 emit_move_insn (last, const0_rtx);
7612 if (TARGET_ABI_WINDOWS_NT)
7614 /* For NT stack unwind (done by 'reverse execution'), it's
7615 not OK to take the result of a loop, even though the value
7616 is already in ptr, so we reload it via a single operation
7617 and subtract it to sp.
7619 Yes, that's correct -- we have to reload the whole constant
7620 into a temporary via ldah+lda then subtract from sp. */
7622 HOST_WIDE_INT lo, hi;
7623 lo = ((frame_size & 0xffff) ^ 0x8000) - 0x8000;
7624 hi = frame_size - lo;
7626 emit_move_insn (ptr, GEN_INT (hi));
7627 emit_insn (gen_adddi3 (ptr, ptr, GEN_INT (lo)));
7628 seq = emit_insn (gen_subdi3 (stack_pointer_rtx, stack_pointer_rtx,
7633 seq = emit_insn (gen_adddi3 (stack_pointer_rtx, ptr,
7634 GEN_INT (-leftover)));
7637 /* This alternative is special, because the DWARF code cannot
7638 possibly intuit through the loop above. So we invent this
7639 note it looks at instead. */
7640 RTX_FRAME_RELATED_P (seq) = 1;
7642 = gen_rtx_EXPR_LIST (REG_FRAME_RELATED_EXPR,
7643 gen_rtx_SET (VOIDmode, stack_pointer_rtx,
7644 gen_rtx_PLUS (Pmode, stack_pointer_rtx,
7645 GEN_INT (TARGET_ABI_UNICOSMK
7651 if (!TARGET_ABI_UNICOSMK)
7653 HOST_WIDE_INT sa_bias = 0;
7655 /* Cope with very large offsets to the register save area. */
7656 sa_reg = stack_pointer_rtx;
7657 if (reg_offset + sa_size > 0x8000)
7659 int low = ((reg_offset & 0xffff) ^ 0x8000) - 0x8000;
7662 if (low + sa_size <= 0x8000)
7663 sa_bias = reg_offset - low, reg_offset = low;
7665 sa_bias = reg_offset, reg_offset = 0;
7667 sa_reg = gen_rtx_REG (DImode, 24);
7668 sa_bias_rtx = GEN_INT (sa_bias);
7670 if (add_operand (sa_bias_rtx, DImode))
7671 emit_insn (gen_adddi3 (sa_reg, stack_pointer_rtx, sa_bias_rtx));
7674 emit_move_insn (sa_reg, sa_bias_rtx);
7675 emit_insn (gen_adddi3 (sa_reg, stack_pointer_rtx, sa_reg));
7679 /* Save regs in stack order. Beginning with VMS PV. */
7680 if (TARGET_ABI_OPEN_VMS && alpha_procedure_type == PT_STACK)
7681 emit_frame_store (REG_PV, stack_pointer_rtx, 0, 0);
7683 /* Save register RA next. */
7684 if (imask & (1UL << REG_RA))
7686 emit_frame_store (REG_RA, sa_reg, sa_bias, reg_offset);
7687 imask &= ~(1UL << REG_RA);
7691 /* Now save any other registers required to be saved. */
7692 for (i = 0; i < 31; i++)
7693 if (imask & (1UL << i))
7695 emit_frame_store (i, sa_reg, sa_bias, reg_offset);
7699 for (i = 0; i < 31; i++)
7700 if (fmask & (1UL << i))
7702 emit_frame_store (i+32, sa_reg, sa_bias, reg_offset);
7706 else if (TARGET_ABI_UNICOSMK && alpha_procedure_type == PT_STACK)
7708 /* The standard frame on the T3E includes space for saving registers.
7709 We just have to use it. We don't have to save the return address and
7710 the old frame pointer here - they are saved in the DSIB. */
7713 for (i = 9; i < 15; i++)
7714 if (imask & (1UL << i))
7716 emit_frame_store (i, hard_frame_pointer_rtx, 0, reg_offset);
7719 for (i = 2; i < 10; i++)
7720 if (fmask & (1UL << i))
7722 emit_frame_store (i+32, hard_frame_pointer_rtx, 0, reg_offset);
7727 if (TARGET_ABI_OPEN_VMS)
7729 if (alpha_procedure_type == PT_REGISTER)
7730 /* Register frame procedures save the fp.
7731 ?? Ought to have a dwarf2 save for this. */
7732 emit_move_insn (gen_rtx_REG (DImode, vms_save_fp_regno),
7733 hard_frame_pointer_rtx);
7735 if (alpha_procedure_type != PT_NULL && vms_base_regno != REG_PV)
7736 emit_insn (gen_force_movdi (gen_rtx_REG (DImode, vms_base_regno),
7737 gen_rtx_REG (DImode, REG_PV)));
7739 if (alpha_procedure_type != PT_NULL
7740 && vms_unwind_regno == HARD_FRAME_POINTER_REGNUM)
7741 FRP (emit_move_insn (hard_frame_pointer_rtx, stack_pointer_rtx));
7743 /* If we have to allocate space for outgoing args, do it now. */
7744 if (current_function_outgoing_args_size != 0)
7747 = emit_move_insn (stack_pointer_rtx,
7749 (hard_frame_pointer_rtx,
7751 (current_function_outgoing_args_size))));
7753 /* Only set FRAME_RELATED_P on the stack adjustment we just emitted
7754 if ! frame_pointer_needed. Setting the bit will change the CFA
7755 computation rule to use sp again, which would be wrong if we had
7756 frame_pointer_needed, as this means sp might move unpredictably
7760 frame_pointer_needed
7761 => vms_unwind_regno == HARD_FRAME_POINTER_REGNUM
7763 current_function_outgoing_args_size != 0
7764 => alpha_procedure_type != PT_NULL,
7766 so when we are not setting the bit here, we are guaranteed to
7767 have emitted an FRP frame pointer update just before. */
7768 RTX_FRAME_RELATED_P (seq) = ! frame_pointer_needed;
7771 else if (!TARGET_ABI_UNICOSMK)
7773 /* If we need a frame pointer, set it from the stack pointer. */
7774 if (frame_pointer_needed)
7776 if (TARGET_CAN_FAULT_IN_PROLOGUE)
7777 FRP (emit_move_insn (hard_frame_pointer_rtx, stack_pointer_rtx));
7779 /* This must always be the last instruction in the
7780 prologue, thus we emit a special move + clobber. */
7781 FRP (emit_insn (gen_init_fp (hard_frame_pointer_rtx,
7782 stack_pointer_rtx, sa_reg)));
7786 /* The ABIs for VMS and OSF/1 say that while we can schedule insns into
7787 the prologue, for exception handling reasons, we cannot do this for
7788 any insn that might fault. We could prevent this for mems with a
7789 (clobber:BLK (scratch)), but this doesn't work for fp insns. So we
7790 have to prevent all such scheduling with a blockage.
7792 Linux, on the other hand, never bothered to implement OSF/1's
7793 exception handling, and so doesn't care about such things. Anyone
7794 planning to use dwarf2 frame-unwind info can also omit the blockage. */
7796 if (! TARGET_CAN_FAULT_IN_PROLOGUE)
7797 emit_insn (gen_blockage ());
7800 /* Count the number of .file directives, so that .loc is up to date. */
7801 int num_source_filenames = 0;
7803 /* Output the textual info surrounding the prologue. */
7806 alpha_start_function (FILE *file, const char *fnname,
7807 tree decl ATTRIBUTE_UNUSED)
7809 unsigned long imask = 0;
7810 unsigned long fmask = 0;
7811 /* Stack space needed for pushing registers clobbered by us. */
7812 HOST_WIDE_INT sa_size;
7813 /* Complete stack size needed. */
7814 unsigned HOST_WIDE_INT frame_size;
7815 /* Offset from base reg to register save area. */
7816 HOST_WIDE_INT reg_offset;
7817 char *entry_label = (char *) alloca (strlen (fnname) + 6);
7820 /* Don't emit an extern directive for functions defined in the same file. */
7821 if (TARGET_ABI_UNICOSMK)
7824 name_tree = get_identifier (fnname);
7825 TREE_ASM_WRITTEN (name_tree) = 1;
7828 alpha_fnname = fnname;
7829 sa_size = alpha_sa_size ();
7831 frame_size = get_frame_size ();
7832 if (TARGET_ABI_OPEN_VMS)
7833 frame_size = ALPHA_ROUND (sa_size
7834 + (alpha_procedure_type == PT_STACK ? 8 : 0)
7836 + current_function_pretend_args_size);
7837 else if (TARGET_ABI_UNICOSMK)
7838 frame_size = ALPHA_ROUND (sa_size
7839 + (alpha_procedure_type == PT_STACK ? 48 : 0))
7840 + ALPHA_ROUND (frame_size
7841 + current_function_outgoing_args_size);
7843 frame_size = (ALPHA_ROUND (current_function_outgoing_args_size)
7845 + ALPHA_ROUND (frame_size
7846 + current_function_pretend_args_size));
7848 if (TARGET_ABI_OPEN_VMS)
7851 reg_offset = ALPHA_ROUND (current_function_outgoing_args_size);
7853 alpha_sa_mask (&imask, &fmask);
7855 /* Ecoff can handle multiple .file directives, so put out file and lineno.
7856 We have to do that before the .ent directive as we cannot switch
7857 files within procedures with native ecoff because line numbers are
7858 linked to procedure descriptors.
7859 Outputting the lineno helps debugging of one line functions as they
7860 would otherwise get no line number at all. Please note that we would
7861 like to put out last_linenum from final.c, but it is not accessible. */
7863 if (write_symbols == SDB_DEBUG)
7865 #ifdef ASM_OUTPUT_SOURCE_FILENAME
7866 ASM_OUTPUT_SOURCE_FILENAME (file,
7867 DECL_SOURCE_FILE (current_function_decl));
7869 #ifdef SDB_OUTPUT_SOURCE_LINE
7870 if (debug_info_level != DINFO_LEVEL_TERSE)
7871 SDB_OUTPUT_SOURCE_LINE (file,
7872 DECL_SOURCE_LINE (current_function_decl));
7876 /* Issue function start and label. */
7877 if (TARGET_ABI_OPEN_VMS
7878 || (!TARGET_ABI_UNICOSMK && !flag_inhibit_size_directive))
7880 fputs ("\t.ent ", file);
7881 assemble_name (file, fnname);
7884 /* If the function needs GP, we'll write the "..ng" label there.
7885 Otherwise, do it here. */
7887 && ! alpha_function_needs_gp
7888 && ! current_function_is_thunk)
7891 assemble_name (file, fnname);
7892 fputs ("..ng:\n", file);
7896 strcpy (entry_label, fnname);
7897 if (TARGET_ABI_OPEN_VMS)
7898 strcat (entry_label, "..en");
7900 /* For public functions, the label must be globalized by appending an
7901 additional colon. */
7902 if (TARGET_ABI_UNICOSMK && TREE_PUBLIC (decl))
7903 strcat (entry_label, ":");
7905 ASM_OUTPUT_LABEL (file, entry_label);
7906 inside_function = TRUE;
7908 if (TARGET_ABI_OPEN_VMS)
7909 fprintf (file, "\t.base $%d\n", vms_base_regno);
7911 if (!TARGET_ABI_OPEN_VMS && !TARGET_ABI_UNICOSMK && TARGET_IEEE_CONFORMANT
7912 && !flag_inhibit_size_directive)
7914 /* Set flags in procedure descriptor to request IEEE-conformant
7915 math-library routines. The value we set it to is PDSC_EXC_IEEE
7916 (/usr/include/pdsc.h). */
7917 fputs ("\t.eflag 48\n", file);
7920 /* Set up offsets to alpha virtual arg/local debugging pointer. */
7921 alpha_auto_offset = -frame_size + current_function_pretend_args_size;
7922 alpha_arg_offset = -frame_size + 48;
7924 /* Describe our frame. If the frame size is larger than an integer,
7925 print it as zero to avoid an assembler error. We won't be
7926 properly describing such a frame, but that's the best we can do. */
7927 if (TARGET_ABI_UNICOSMK)
7929 else if (TARGET_ABI_OPEN_VMS)
7930 fprintf (file, "\t.frame $%d," HOST_WIDE_INT_PRINT_DEC ",$26,"
7931 HOST_WIDE_INT_PRINT_DEC "\n",
7933 frame_size >= (1UL << 31) ? 0 : frame_size,
7935 else if (!flag_inhibit_size_directive)
7936 fprintf (file, "\t.frame $%d," HOST_WIDE_INT_PRINT_DEC ",$26,%d\n",
7937 (frame_pointer_needed
7938 ? HARD_FRAME_POINTER_REGNUM : STACK_POINTER_REGNUM),
7939 frame_size >= (1UL << 31) ? 0 : frame_size,
7940 current_function_pretend_args_size);
7942 /* Describe which registers were spilled. */
7943 if (TARGET_ABI_UNICOSMK)
7945 else if (TARGET_ABI_OPEN_VMS)
7948 /* ??? Does VMS care if mask contains ra? The old code didn't
7949 set it, so I don't here. */
7950 fprintf (file, "\t.mask 0x%lx,0\n", imask & ~(1UL << REG_RA));
7952 fprintf (file, "\t.fmask 0x%lx,0\n", fmask);
7953 if (alpha_procedure_type == PT_REGISTER)
7954 fprintf (file, "\t.fp_save $%d\n", vms_save_fp_regno);
7956 else if (!flag_inhibit_size_directive)
7960 fprintf (file, "\t.mask 0x%lx," HOST_WIDE_INT_PRINT_DEC "\n", imask,
7961 frame_size >= (1UL << 31) ? 0 : reg_offset - frame_size);
7963 for (i = 0; i < 32; ++i)
7964 if (imask & (1UL << i))
7969 fprintf (file, "\t.fmask 0x%lx," HOST_WIDE_INT_PRINT_DEC "\n", fmask,
7970 frame_size >= (1UL << 31) ? 0 : reg_offset - frame_size);
7973 #if TARGET_ABI_OPEN_VMS
7974 /* Ifdef'ed cause link_section are only available then. */
7975 switch_to_section (readonly_data_section);
7976 fprintf (file, "\t.align 3\n");
7977 assemble_name (file, fnname); fputs ("..na:\n", file);
7978 fputs ("\t.ascii \"", file);
7979 assemble_name (file, fnname);
7980 fputs ("\\0\"\n", file);
7981 alpha_need_linkage (fnname, 1);
7982 switch_to_section (text_section);
7986 /* Emit the .prologue note at the scheduled end of the prologue. */
7989 alpha_output_function_end_prologue (FILE *file)
7991 if (TARGET_ABI_UNICOSMK)
7993 else if (TARGET_ABI_OPEN_VMS)
7994 fputs ("\t.prologue\n", file);
7995 else if (TARGET_ABI_WINDOWS_NT)
7996 fputs ("\t.prologue 0\n", file);
7997 else if (!flag_inhibit_size_directive)
7998 fprintf (file, "\t.prologue %d\n",
7999 alpha_function_needs_gp || current_function_is_thunk);
8002 /* Write function epilogue. */
8004 /* ??? At some point we will want to support full unwind, and so will
8005 need to mark the epilogue as well. At the moment, we just confuse
8008 #define FRP(exp) exp
8011 alpha_expand_epilogue (void)
8013 /* Registers to save. */
8014 unsigned long imask = 0;
8015 unsigned long fmask = 0;
8016 /* Stack space needed for pushing registers clobbered by us. */
8017 HOST_WIDE_INT sa_size;
8018 /* Complete stack size needed. */
8019 HOST_WIDE_INT frame_size;
8020 /* Offset from base reg to register save area. */
8021 HOST_WIDE_INT reg_offset;
8022 int fp_is_frame_pointer, fp_offset;
8023 rtx sa_reg, sa_reg_exp = NULL;
8024 rtx sp_adj1, sp_adj2, mem;
8028 sa_size = alpha_sa_size ();
8030 frame_size = get_frame_size ();
8031 if (TARGET_ABI_OPEN_VMS)
8032 frame_size = ALPHA_ROUND (sa_size
8033 + (alpha_procedure_type == PT_STACK ? 8 : 0)
8035 + current_function_pretend_args_size);
8036 else if (TARGET_ABI_UNICOSMK)
8037 frame_size = ALPHA_ROUND (sa_size
8038 + (alpha_procedure_type == PT_STACK ? 48 : 0))
8039 + ALPHA_ROUND (frame_size
8040 + current_function_outgoing_args_size);
8042 frame_size = (ALPHA_ROUND (current_function_outgoing_args_size)
8044 + ALPHA_ROUND (frame_size
8045 + current_function_pretend_args_size));
8047 if (TARGET_ABI_OPEN_VMS)
8049 if (alpha_procedure_type == PT_STACK)
8055 reg_offset = ALPHA_ROUND (current_function_outgoing_args_size);
8057 alpha_sa_mask (&imask, &fmask);
8060 = ((TARGET_ABI_OPEN_VMS && alpha_procedure_type == PT_STACK)
8061 || (!TARGET_ABI_OPEN_VMS && frame_pointer_needed));
8063 sa_reg = stack_pointer_rtx;
8065 if (current_function_calls_eh_return)
8066 eh_ofs = EH_RETURN_STACKADJ_RTX;
8070 if (!TARGET_ABI_UNICOSMK && sa_size)
8072 /* If we have a frame pointer, restore SP from it. */
8073 if ((TARGET_ABI_OPEN_VMS
8074 && vms_unwind_regno == HARD_FRAME_POINTER_REGNUM)
8075 || (!TARGET_ABI_OPEN_VMS && frame_pointer_needed))
8076 FRP (emit_move_insn (stack_pointer_rtx, hard_frame_pointer_rtx));
8078 /* Cope with very large offsets to the register save area. */
8079 if (reg_offset + sa_size > 0x8000)
8081 int low = ((reg_offset & 0xffff) ^ 0x8000) - 0x8000;
8084 if (low + sa_size <= 0x8000)
8085 bias = reg_offset - low, reg_offset = low;
8087 bias = reg_offset, reg_offset = 0;
8089 sa_reg = gen_rtx_REG (DImode, 22);
8090 sa_reg_exp = plus_constant (stack_pointer_rtx, bias);
8092 FRP (emit_move_insn (sa_reg, sa_reg_exp));
8095 /* Restore registers in order, excepting a true frame pointer. */
8097 mem = gen_rtx_MEM (DImode, plus_constant (sa_reg, reg_offset));
8099 set_mem_alias_set (mem, alpha_sr_alias_set);
8100 FRP (emit_move_insn (gen_rtx_REG (DImode, REG_RA), mem));
8103 imask &= ~(1UL << REG_RA);
8105 for (i = 0; i < 31; ++i)
8106 if (imask & (1UL << i))
8108 if (i == HARD_FRAME_POINTER_REGNUM && fp_is_frame_pointer)
8109 fp_offset = reg_offset;
8112 mem = gen_rtx_MEM (DImode, plus_constant(sa_reg, reg_offset));
8113 set_mem_alias_set (mem, alpha_sr_alias_set);
8114 FRP (emit_move_insn (gen_rtx_REG (DImode, i), mem));
8119 for (i = 0; i < 31; ++i)
8120 if (fmask & (1UL << i))
8122 mem = gen_rtx_MEM (DFmode, plus_constant(sa_reg, reg_offset));
8123 set_mem_alias_set (mem, alpha_sr_alias_set);
8124 FRP (emit_move_insn (gen_rtx_REG (DFmode, i+32), mem));
8128 else if (TARGET_ABI_UNICOSMK && alpha_procedure_type == PT_STACK)
8130 /* Restore callee-saved general-purpose registers. */
8134 for (i = 9; i < 15; i++)
8135 if (imask & (1UL << i))
8137 mem = gen_rtx_MEM (DImode, plus_constant(hard_frame_pointer_rtx,
8139 set_mem_alias_set (mem, alpha_sr_alias_set);
8140 FRP (emit_move_insn (gen_rtx_REG (DImode, i), mem));
8144 for (i = 2; i < 10; i++)
8145 if (fmask & (1UL << i))
8147 mem = gen_rtx_MEM (DFmode, plus_constant(hard_frame_pointer_rtx,
8149 set_mem_alias_set (mem, alpha_sr_alias_set);
8150 FRP (emit_move_insn (gen_rtx_REG (DFmode, i+32), mem));
8154 /* Restore the return address from the DSIB. */
8156 mem = gen_rtx_MEM (DImode, plus_constant(hard_frame_pointer_rtx, -8));
8157 set_mem_alias_set (mem, alpha_sr_alias_set);
8158 FRP (emit_move_insn (gen_rtx_REG (DImode, REG_RA), mem));
8161 if (frame_size || eh_ofs)
8163 sp_adj1 = stack_pointer_rtx;
8167 sp_adj1 = gen_rtx_REG (DImode, 23);
8168 emit_move_insn (sp_adj1,
8169 gen_rtx_PLUS (Pmode, stack_pointer_rtx, eh_ofs));
8172 /* If the stack size is large, begin computation into a temporary
8173 register so as not to interfere with a potential fp restore,
8174 which must be consecutive with an SP restore. */
8175 if (frame_size < 32768
8176 && ! (TARGET_ABI_UNICOSMK && current_function_calls_alloca))
8177 sp_adj2 = GEN_INT (frame_size);
8178 else if (TARGET_ABI_UNICOSMK)
8180 sp_adj1 = gen_rtx_REG (DImode, 23);
8181 FRP (emit_move_insn (sp_adj1, hard_frame_pointer_rtx));
8182 sp_adj2 = const0_rtx;
8184 else if (frame_size < 0x40007fffL)
8186 int low = ((frame_size & 0xffff) ^ 0x8000) - 0x8000;
8188 sp_adj2 = plus_constant (sp_adj1, frame_size - low);
8189 if (sa_reg_exp && rtx_equal_p (sa_reg_exp, sp_adj2))
8193 sp_adj1 = gen_rtx_REG (DImode, 23);
8194 FRP (emit_move_insn (sp_adj1, sp_adj2));
8196 sp_adj2 = GEN_INT (low);
8200 rtx tmp = gen_rtx_REG (DImode, 23);
8201 FRP (sp_adj2 = alpha_emit_set_const (tmp, DImode, frame_size,
8205 /* We can't drop new things to memory this late, afaik,
8206 so build it up by pieces. */
8207 FRP (sp_adj2 = alpha_emit_set_long_const (tmp, frame_size,
8208 -(frame_size < 0)));
8209 gcc_assert (sp_adj2);
8213 /* From now on, things must be in order. So emit blockages. */
8215 /* Restore the frame pointer. */
8216 if (TARGET_ABI_UNICOSMK)
8218 emit_insn (gen_blockage ());
8219 mem = gen_rtx_MEM (DImode,
8220 plus_constant (hard_frame_pointer_rtx, -16));
8221 set_mem_alias_set (mem, alpha_sr_alias_set);
8222 FRP (emit_move_insn (hard_frame_pointer_rtx, mem));
8224 else if (fp_is_frame_pointer)
8226 emit_insn (gen_blockage ());
8227 mem = gen_rtx_MEM (DImode, plus_constant (sa_reg, fp_offset));
8228 set_mem_alias_set (mem, alpha_sr_alias_set);
8229 FRP (emit_move_insn (hard_frame_pointer_rtx, mem));
8231 else if (TARGET_ABI_OPEN_VMS)
8233 emit_insn (gen_blockage ());
8234 FRP (emit_move_insn (hard_frame_pointer_rtx,
8235 gen_rtx_REG (DImode, vms_save_fp_regno)));
8238 /* Restore the stack pointer. */
8239 emit_insn (gen_blockage ());
8240 if (sp_adj2 == const0_rtx)
8241 FRP (emit_move_insn (stack_pointer_rtx, sp_adj1));
8243 FRP (emit_move_insn (stack_pointer_rtx,
8244 gen_rtx_PLUS (DImode, sp_adj1, sp_adj2)));
8248 if (TARGET_ABI_OPEN_VMS && alpha_procedure_type == PT_REGISTER)
8250 emit_insn (gen_blockage ());
8251 FRP (emit_move_insn (hard_frame_pointer_rtx,
8252 gen_rtx_REG (DImode, vms_save_fp_regno)));
8254 else if (TARGET_ABI_UNICOSMK && alpha_procedure_type != PT_STACK)
8256 /* Decrement the frame pointer if the function does not have a
8259 emit_insn (gen_blockage ());
8260 FRP (emit_insn (gen_adddi3 (hard_frame_pointer_rtx,
8261 hard_frame_pointer_rtx, constm1_rtx)));
8266 /* Output the rest of the textual info surrounding the epilogue. */
8269 alpha_end_function (FILE *file, const char *fnname, tree decl ATTRIBUTE_UNUSED)
8271 #if TARGET_ABI_OPEN_VMS
8272 alpha_write_linkage (file, fnname, decl);
8275 /* End the function. */
8276 if (!TARGET_ABI_UNICOSMK && !flag_inhibit_size_directive)
8278 fputs ("\t.end ", file);
8279 assemble_name (file, fnname);
8282 inside_function = FALSE;
8284 /* Output jump tables and the static subroutine information block. */
8285 if (TARGET_ABI_UNICOSMK)
8287 unicosmk_output_ssib (file, fnname);
8288 unicosmk_output_deferred_case_vectors (file);
8293 /* Emit a tail call to FUNCTION after adjusting THIS by DELTA.
8295 In order to avoid the hordes of differences between generated code
8296 with and without TARGET_EXPLICIT_RELOCS, and to avoid duplicating
8297 lots of code loading up large constants, generate rtl and emit it
8298 instead of going straight to text.
8300 Not sure why this idea hasn't been explored before... */
8303 alpha_output_mi_thunk_osf (FILE *file, tree thunk_fndecl ATTRIBUTE_UNUSED,
8304 HOST_WIDE_INT delta, HOST_WIDE_INT vcall_offset,
8307 HOST_WIDE_INT hi, lo;
8308 rtx this, insn, funexp;
8310 reset_block_changes ();
8312 /* We always require a valid GP. */
8313 emit_insn (gen_prologue_ldgp ());
8314 emit_note (NOTE_INSN_PROLOGUE_END);
8316 /* Find the "this" pointer. If the function returns a structure,
8317 the structure return pointer is in $16. */
8318 if (aggregate_value_p (TREE_TYPE (TREE_TYPE (function)), function))
8319 this = gen_rtx_REG (Pmode, 17);
8321 this = gen_rtx_REG (Pmode, 16);
8323 /* Add DELTA. When possible we use ldah+lda. Otherwise load the
8324 entire constant for the add. */
8325 lo = ((delta & 0xffff) ^ 0x8000) - 0x8000;
8326 hi = (((delta - lo) & 0xffffffff) ^ 0x80000000) - 0x80000000;
8327 if (hi + lo == delta)
8330 emit_insn (gen_adddi3 (this, this, GEN_INT (hi)));
8332 emit_insn (gen_adddi3 (this, this, GEN_INT (lo)));
8336 rtx tmp = alpha_emit_set_long_const (gen_rtx_REG (Pmode, 0),
8337 delta, -(delta < 0));
8338 emit_insn (gen_adddi3 (this, this, tmp));
8341 /* Add a delta stored in the vtable at VCALL_OFFSET. */
8346 tmp = gen_rtx_REG (Pmode, 0);
8347 emit_move_insn (tmp, gen_rtx_MEM (Pmode, this));
8349 lo = ((vcall_offset & 0xffff) ^ 0x8000) - 0x8000;
8350 hi = (((vcall_offset - lo) & 0xffffffff) ^ 0x80000000) - 0x80000000;
8351 if (hi + lo == vcall_offset)
8354 emit_insn (gen_adddi3 (tmp, tmp, GEN_INT (hi)));
8358 tmp2 = alpha_emit_set_long_const (gen_rtx_REG (Pmode, 1),
8359 vcall_offset, -(vcall_offset < 0));
8360 emit_insn (gen_adddi3 (tmp, tmp, tmp2));
8364 tmp2 = gen_rtx_PLUS (Pmode, tmp, GEN_INT (lo));
8367 emit_move_insn (tmp, gen_rtx_MEM (Pmode, tmp2));
8369 emit_insn (gen_adddi3 (this, this, tmp));
8372 /* Generate a tail call to the target function. */
8373 if (! TREE_USED (function))
8375 assemble_external (function);
8376 TREE_USED (function) = 1;
8378 funexp = XEXP (DECL_RTL (function), 0);
8379 funexp = gen_rtx_MEM (FUNCTION_MODE, funexp);
8380 insn = emit_call_insn (gen_sibcall (funexp, const0_rtx));
8381 SIBLING_CALL_P (insn) = 1;
8383 /* Run just enough of rest_of_compilation to get the insns emitted.
8384 There's not really enough bulk here to make other passes such as
8385 instruction scheduling worth while. Note that use_thunk calls
8386 assemble_start_function and assemble_end_function. */
8387 insn = get_insns ();
8388 insn_locators_initialize ();
8389 shorten_branches (insn);
8390 final_start_function (insn, file, 1);
8391 final (insn, file, 1);
8392 final_end_function ();
8394 #endif /* TARGET_ABI_OSF */
8396 /* Debugging support. */
8400 /* Count the number of sdb related labels are generated (to find block
8401 start and end boundaries). */
8403 int sdb_label_count = 0;
8405 /* Name of the file containing the current function. */
8407 static const char *current_function_file = "";
8409 /* Offsets to alpha virtual arg/local debugging pointers. */
8411 long alpha_arg_offset;
8412 long alpha_auto_offset;
8414 /* Emit a new filename to a stream. */
8417 alpha_output_filename (FILE *stream, const char *name)
8419 static int first_time = TRUE;
8424 ++num_source_filenames;
8425 current_function_file = name;
8426 fprintf (stream, "\t.file\t%d ", num_source_filenames);
8427 output_quoted_string (stream, name);
8428 fprintf (stream, "\n");
8429 if (!TARGET_GAS && write_symbols == DBX_DEBUG)
8430 fprintf (stream, "\t#@stabs\n");
8433 else if (write_symbols == DBX_DEBUG)
8434 /* dbxout.c will emit an appropriate .stabs directive. */
8437 else if (name != current_function_file
8438 && strcmp (name, current_function_file) != 0)
8440 if (inside_function && ! TARGET_GAS)
8441 fprintf (stream, "\t#.file\t%d ", num_source_filenames);
8444 ++num_source_filenames;
8445 current_function_file = name;
8446 fprintf (stream, "\t.file\t%d ", num_source_filenames);
8449 output_quoted_string (stream, name);
8450 fprintf (stream, "\n");
8454 /* Structure to show the current status of registers and memory. */
8456 struct shadow_summary
8459 unsigned int i : 31; /* Mask of int regs */
8460 unsigned int fp : 31; /* Mask of fp regs */
8461 unsigned int mem : 1; /* mem == imem | fpmem */
8465 /* Summary the effects of expression X on the machine. Update SUM, a pointer
8466 to the summary structure. SET is nonzero if the insn is setting the
8467 object, otherwise zero. */
8470 summarize_insn (rtx x, struct shadow_summary *sum, int set)
8472 const char *format_ptr;
8478 switch (GET_CODE (x))
8480 /* ??? Note that this case would be incorrect if the Alpha had a
8481 ZERO_EXTRACT in SET_DEST. */
8483 summarize_insn (SET_SRC (x), sum, 0);
8484 summarize_insn (SET_DEST (x), sum, 1);
8488 summarize_insn (XEXP (x, 0), sum, 1);
8492 summarize_insn (XEXP (x, 0), sum, 0);
8496 for (i = ASM_OPERANDS_INPUT_LENGTH (x) - 1; i >= 0; i--)
8497 summarize_insn (ASM_OPERANDS_INPUT (x, i), sum, 0);
8501 for (i = XVECLEN (x, 0) - 1; i >= 0; i--)
8502 summarize_insn (XVECEXP (x, 0, i), sum, 0);
8506 summarize_insn (SUBREG_REG (x), sum, 0);
8511 int regno = REGNO (x);
8512 unsigned long mask = ((unsigned long) 1) << (regno % 32);
8514 if (regno == 31 || regno == 63)
8520 sum->defd.i |= mask;
8522 sum->defd.fp |= mask;
8527 sum->used.i |= mask;
8529 sum->used.fp |= mask;
8540 /* Find the regs used in memory address computation: */
8541 summarize_insn (XEXP (x, 0), sum, 0);
8544 case CONST_INT: case CONST_DOUBLE:
8545 case SYMBOL_REF: case LABEL_REF: case CONST:
8546 case SCRATCH: case ASM_INPUT:
8549 /* Handle common unary and binary ops for efficiency. */
8550 case COMPARE: case PLUS: case MINUS: case MULT: case DIV:
8551 case MOD: case UDIV: case UMOD: case AND: case IOR:
8552 case XOR: case ASHIFT: case ROTATE: case ASHIFTRT: case LSHIFTRT:
8553 case ROTATERT: case SMIN: case SMAX: case UMIN: case UMAX:
8554 case NE: case EQ: case GE: case GT: case LE:
8555 case LT: case GEU: case GTU: case LEU: case LTU:
8556 summarize_insn (XEXP (x, 0), sum, 0);
8557 summarize_insn (XEXP (x, 1), sum, 0);
8560 case NEG: case NOT: case SIGN_EXTEND: case ZERO_EXTEND:
8561 case TRUNCATE: case FLOAT_EXTEND: case FLOAT_TRUNCATE: case FLOAT:
8562 case FIX: case UNSIGNED_FLOAT: case UNSIGNED_FIX: case ABS:
8563 case SQRT: case FFS:
8564 summarize_insn (XEXP (x, 0), sum, 0);
8568 format_ptr = GET_RTX_FORMAT (GET_CODE (x));
8569 for (i = GET_RTX_LENGTH (GET_CODE (x)) - 1; i >= 0; i--)
8570 switch (format_ptr[i])
8573 summarize_insn (XEXP (x, i), sum, 0);
8577 for (j = XVECLEN (x, i) - 1; j >= 0; j--)
8578 summarize_insn (XVECEXP (x, i, j), sum, 0);
8590 /* Ensure a sufficient number of `trapb' insns are in the code when
8591 the user requests code with a trap precision of functions or
8594 In naive mode, when the user requests a trap-precision of
8595 "instruction", a trapb is needed after every instruction that may
8596 generate a trap. This ensures that the code is resumption safe but
8599 When optimizations are turned on, we delay issuing a trapb as long
8600 as possible. In this context, a trap shadow is the sequence of
8601 instructions that starts with a (potentially) trap generating
8602 instruction and extends to the next trapb or call_pal instruction
8603 (but GCC never generates call_pal by itself). We can delay (and
8604 therefore sometimes omit) a trapb subject to the following
8607 (a) On entry to the trap shadow, if any Alpha register or memory
8608 location contains a value that is used as an operand value by some
8609 instruction in the trap shadow (live on entry), then no instruction
8610 in the trap shadow may modify the register or memory location.
8612 (b) Within the trap shadow, the computation of the base register
8613 for a memory load or store instruction may not involve using the
8614 result of an instruction that might generate an UNPREDICTABLE
8617 (c) Within the trap shadow, no register may be used more than once
8618 as a destination register. (This is to make life easier for the
8621 (d) The trap shadow may not include any branch instructions. */
8624 alpha_handle_trap_shadows (void)
8626 struct shadow_summary shadow;
8627 int trap_pending, exception_nesting;
8631 exception_nesting = 0;
8634 shadow.used.mem = 0;
8635 shadow.defd = shadow.used;
8637 for (i = get_insns (); i ; i = NEXT_INSN (i))
8639 if (GET_CODE (i) == NOTE)
8641 switch (NOTE_LINE_NUMBER (i))
8643 case NOTE_INSN_EH_REGION_BEG:
8644 exception_nesting++;
8649 case NOTE_INSN_EH_REGION_END:
8650 exception_nesting--;
8655 case NOTE_INSN_EPILOGUE_BEG:
8656 if (trap_pending && alpha_tp >= ALPHA_TP_FUNC)
8661 else if (trap_pending)
8663 if (alpha_tp == ALPHA_TP_FUNC)
8665 if (GET_CODE (i) == JUMP_INSN
8666 && GET_CODE (PATTERN (i)) == RETURN)
8669 else if (alpha_tp == ALPHA_TP_INSN)
8673 struct shadow_summary sum;
8678 sum.defd = sum.used;
8680 switch (GET_CODE (i))
8683 /* Annoyingly, get_attr_trap will die on these. */
8684 if (GET_CODE (PATTERN (i)) == USE
8685 || GET_CODE (PATTERN (i)) == CLOBBER)
8688 summarize_insn (PATTERN (i), &sum, 0);
8690 if ((sum.defd.i & shadow.defd.i)
8691 || (sum.defd.fp & shadow.defd.fp))
8693 /* (c) would be violated */
8697 /* Combine shadow with summary of current insn: */
8698 shadow.used.i |= sum.used.i;
8699 shadow.used.fp |= sum.used.fp;
8700 shadow.used.mem |= sum.used.mem;
8701 shadow.defd.i |= sum.defd.i;
8702 shadow.defd.fp |= sum.defd.fp;
8703 shadow.defd.mem |= sum.defd.mem;
8705 if ((sum.defd.i & shadow.used.i)
8706 || (sum.defd.fp & shadow.used.fp)
8707 || (sum.defd.mem & shadow.used.mem))
8709 /* (a) would be violated (also takes care of (b)) */
8710 gcc_assert (get_attr_trap (i) != TRAP_YES
8711 || (!(sum.defd.i & sum.used.i)
8712 && !(sum.defd.fp & sum.used.fp)));
8730 n = emit_insn_before (gen_trapb (), i);
8731 PUT_MODE (n, TImode);
8732 PUT_MODE (i, TImode);
8736 shadow.used.mem = 0;
8737 shadow.defd = shadow.used;
8742 if ((exception_nesting > 0 || alpha_tp >= ALPHA_TP_FUNC)
8743 && GET_CODE (i) == INSN
8744 && GET_CODE (PATTERN (i)) != USE
8745 && GET_CODE (PATTERN (i)) != CLOBBER
8746 && get_attr_trap (i) == TRAP_YES)
8748 if (optimize && !trap_pending)
8749 summarize_insn (PATTERN (i), &shadow, 0);
8755 /* Alpha can only issue instruction groups simultaneously if they are
8756 suitably aligned. This is very processor-specific. */
8757 /* There are a number of entries in alphaev4_insn_pipe and alphaev5_insn_pipe
8758 that are marked "fake". These instructions do not exist on that target,
8759 but it is possible to see these insns with deranged combinations of
8760 command-line options, such as "-mtune=ev4 -mmax". Instead of aborting,
8761 choose a result at random. */
8763 enum alphaev4_pipe {
8770 enum alphaev5_pipe {
8781 static enum alphaev4_pipe
8782 alphaev4_insn_pipe (rtx insn)
8784 if (recog_memoized (insn) < 0)
8786 if (get_attr_length (insn) != 4)
8789 switch (get_attr_type (insn))
8805 case TYPE_MVI: /* fake */
8820 case TYPE_FSQRT: /* fake */
8821 case TYPE_FTOI: /* fake */
8822 case TYPE_ITOF: /* fake */
8830 static enum alphaev5_pipe
8831 alphaev5_insn_pipe (rtx insn)
8833 if (recog_memoized (insn) < 0)
8835 if (get_attr_length (insn) != 4)
8838 switch (get_attr_type (insn))
8858 case TYPE_FTOI: /* fake */
8859 case TYPE_ITOF: /* fake */
8874 case TYPE_FSQRT: /* fake */
8885 /* IN_USE is a mask of the slots currently filled within the insn group.
8886 The mask bits come from alphaev4_pipe above. If EV4_IBX is set, then
8887 the insn in EV4_IB0 can be swapped by the hardware into EV4_IB1.
8889 LEN is, of course, the length of the group in bytes. */
8892 alphaev4_next_group (rtx insn, int *pin_use, int *plen)
8899 || GET_CODE (PATTERN (insn)) == CLOBBER
8900 || GET_CODE (PATTERN (insn)) == USE)
8905 enum alphaev4_pipe pipe;
8907 pipe = alphaev4_insn_pipe (insn);
8911 /* Force complex instructions to start new groups. */
8915 /* If this is a completely unrecognized insn, it's an asm.
8916 We don't know how long it is, so record length as -1 to
8917 signal a needed realignment. */
8918 if (recog_memoized (insn) < 0)
8921 len = get_attr_length (insn);
8925 if (in_use & EV4_IB0)
8927 if (in_use & EV4_IB1)
8932 in_use |= EV4_IB0 | EV4_IBX;
8936 if (in_use & EV4_IB0)
8938 if (!(in_use & EV4_IBX) || (in_use & EV4_IB1))
8946 if (in_use & EV4_IB1)
8956 /* Haifa doesn't do well scheduling branches. */
8957 if (GET_CODE (insn) == JUMP_INSN)
8961 insn = next_nonnote_insn (insn);
8963 if (!insn || ! INSN_P (insn))
8966 /* Let Haifa tell us where it thinks insn group boundaries are. */
8967 if (GET_MODE (insn) == TImode)
8970 if (GET_CODE (insn) == CLOBBER || GET_CODE (insn) == USE)
8975 insn = next_nonnote_insn (insn);
8983 /* IN_USE is a mask of the slots currently filled within the insn group.
8984 The mask bits come from alphaev5_pipe above. If EV5_E01 is set, then
8985 the insn in EV5_E0 can be swapped by the hardware into EV5_E1.
8987 LEN is, of course, the length of the group in bytes. */
8990 alphaev5_next_group (rtx insn, int *pin_use, int *plen)
8997 || GET_CODE (PATTERN (insn)) == CLOBBER
8998 || GET_CODE (PATTERN (insn)) == USE)
9003 enum alphaev5_pipe pipe;
9005 pipe = alphaev5_insn_pipe (insn);
9009 /* Force complex instructions to start new groups. */
9013 /* If this is a completely unrecognized insn, it's an asm.
9014 We don't know how long it is, so record length as -1 to
9015 signal a needed realignment. */
9016 if (recog_memoized (insn) < 0)
9019 len = get_attr_length (insn);
9022 /* ??? Most of the places below, we would like to assert never
9023 happen, as it would indicate an error either in Haifa, or
9024 in the scheduling description. Unfortunately, Haifa never
9025 schedules the last instruction of the BB, so we don't have
9026 an accurate TI bit to go off. */
9028 if (in_use & EV5_E0)
9030 if (in_use & EV5_E1)
9035 in_use |= EV5_E0 | EV5_E01;
9039 if (in_use & EV5_E0)
9041 if (!(in_use & EV5_E01) || (in_use & EV5_E1))
9049 if (in_use & EV5_E1)
9055 if (in_use & EV5_FA)
9057 if (in_use & EV5_FM)
9062 in_use |= EV5_FA | EV5_FAM;
9066 if (in_use & EV5_FA)
9072 if (in_use & EV5_FM)
9085 /* Haifa doesn't do well scheduling branches. */
9086 /* ??? If this is predicted not-taken, slotting continues, except
9087 that no more IBR, FBR, or JSR insns may be slotted. */
9088 if (GET_CODE (insn) == JUMP_INSN)
9092 insn = next_nonnote_insn (insn);
9094 if (!insn || ! INSN_P (insn))
9097 /* Let Haifa tell us where it thinks insn group boundaries are. */
9098 if (GET_MODE (insn) == TImode)
9101 if (GET_CODE (insn) == CLOBBER || GET_CODE (insn) == USE)
9106 insn = next_nonnote_insn (insn);
9115 alphaev4_next_nop (int *pin_use)
9117 int in_use = *pin_use;
9120 if (!(in_use & EV4_IB0))
9125 else if ((in_use & (EV4_IBX|EV4_IB1)) == EV4_IBX)
9130 else if (TARGET_FP && !(in_use & EV4_IB1))
9143 alphaev5_next_nop (int *pin_use)
9145 int in_use = *pin_use;
9148 if (!(in_use & EV5_E1))
9153 else if (TARGET_FP && !(in_use & EV5_FA))
9158 else if (TARGET_FP && !(in_use & EV5_FM))
9170 /* The instruction group alignment main loop. */
9173 alpha_align_insns (unsigned int max_align,
9174 rtx (*next_group) (rtx, int *, int *),
9175 rtx (*next_nop) (int *))
9177 /* ALIGN is the known alignment for the insn group. */
9179 /* OFS is the offset of the current insn in the insn group. */
9181 int prev_in_use, in_use, len, ldgp;
9184 /* Let shorten branches care for assigning alignments to code labels. */
9185 shorten_branches (get_insns ());
9187 if (align_functions < 4)
9189 else if ((unsigned int) align_functions < max_align)
9190 align = align_functions;
9194 ofs = prev_in_use = 0;
9196 if (GET_CODE (i) == NOTE)
9197 i = next_nonnote_insn (i);
9199 ldgp = alpha_function_needs_gp ? 8 : 0;
9203 next = (*next_group) (i, &in_use, &len);
9205 /* When we see a label, resync alignment etc. */
9206 if (GET_CODE (i) == CODE_LABEL)
9208 unsigned int new_align = 1 << label_to_alignment (i);
9210 if (new_align >= align)
9212 align = new_align < max_align ? new_align : max_align;
9216 else if (ofs & (new_align-1))
9217 ofs = (ofs | (new_align-1)) + 1;
9221 /* Handle complex instructions special. */
9222 else if (in_use == 0)
9224 /* Asms will have length < 0. This is a signal that we have
9225 lost alignment knowledge. Assume, however, that the asm
9226 will not mis-align instructions. */
9235 /* If the known alignment is smaller than the recognized insn group,
9236 realign the output. */
9237 else if ((int) align < len)
9239 unsigned int new_log_align = len > 8 ? 4 : 3;
9242 where = prev = prev_nonnote_insn (i);
9243 if (!where || GET_CODE (where) != CODE_LABEL)
9246 /* Can't realign between a call and its gp reload. */
9247 if (! (TARGET_EXPLICIT_RELOCS
9248 && prev && GET_CODE (prev) == CALL_INSN))
9250 emit_insn_before (gen_realign (GEN_INT (new_log_align)), where);
9251 align = 1 << new_log_align;
9256 /* We may not insert padding inside the initial ldgp sequence. */
9260 /* If the group won't fit in the same INT16 as the previous,
9261 we need to add padding to keep the group together. Rather
9262 than simply leaving the insn filling to the assembler, we
9263 can make use of the knowledge of what sorts of instructions
9264 were issued in the previous group to make sure that all of
9265 the added nops are really free. */
9266 else if (ofs + len > (int) align)
9268 int nop_count = (align - ofs) / 4;
9271 /* Insert nops before labels, branches, and calls to truly merge
9272 the execution of the nops with the previous instruction group. */
9273 where = prev_nonnote_insn (i);
9276 if (GET_CODE (where) == CODE_LABEL)
9278 rtx where2 = prev_nonnote_insn (where);
9279 if (where2 && GET_CODE (where2) == JUMP_INSN)
9282 else if (GET_CODE (where) == INSN)
9289 emit_insn_before ((*next_nop)(&prev_in_use), where);
9290 while (--nop_count);
9294 ofs = (ofs + len) & (align - 1);
9295 prev_in_use = in_use;
9300 /* Machine dependent reorg pass. */
9305 if (alpha_tp != ALPHA_TP_PROG || flag_exceptions)
9306 alpha_handle_trap_shadows ();
9308 /* Due to the number of extra trapb insns, don't bother fixing up
9309 alignment when trap precision is instruction. Moreover, we can
9310 only do our job when sched2 is run. */
9311 if (optimize && !optimize_size
9312 && alpha_tp != ALPHA_TP_INSN
9313 && flag_schedule_insns_after_reload)
9315 if (alpha_tune == PROCESSOR_EV4)
9316 alpha_align_insns (8, alphaev4_next_group, alphaev4_next_nop);
9317 else if (alpha_tune == PROCESSOR_EV5)
9318 alpha_align_insns (16, alphaev5_next_group, alphaev5_next_nop);
9322 #if !TARGET_ABI_UNICOSMK
9329 alpha_file_start (void)
9331 #ifdef OBJECT_FORMAT_ELF
9332 /* If emitting dwarf2 debug information, we cannot generate a .file
9333 directive to start the file, as it will conflict with dwarf2out
9334 file numbers. So it's only useful when emitting mdebug output. */
9335 targetm.file_start_file_directive = (write_symbols == DBX_DEBUG);
9338 default_file_start ();
9340 fprintf (asm_out_file, "\t.verstamp %d %d\n", MS_STAMP, LS_STAMP);
9343 fputs ("\t.set noreorder\n", asm_out_file);
9344 fputs ("\t.set volatile\n", asm_out_file);
9345 if (!TARGET_ABI_OPEN_VMS)
9346 fputs ("\t.set noat\n", asm_out_file);
9347 if (TARGET_EXPLICIT_RELOCS)
9348 fputs ("\t.set nomacro\n", asm_out_file);
9349 if (TARGET_SUPPORT_ARCH | TARGET_BWX | TARGET_MAX | TARGET_FIX | TARGET_CIX)
9353 if (alpha_cpu == PROCESSOR_EV6 || TARGET_FIX || TARGET_CIX)
9355 else if (TARGET_MAX)
9357 else if (TARGET_BWX)
9359 else if (alpha_cpu == PROCESSOR_EV5)
9364 fprintf (asm_out_file, "\t.arch %s\n", arch);
9369 #ifdef OBJECT_FORMAT_ELF
9371 /* Return a section for X. The only special thing we do here is to
9372 honor small data. */
9375 alpha_elf_select_rtx_section (enum machine_mode mode, rtx x,
9376 unsigned HOST_WIDE_INT align)
9378 if (TARGET_SMALL_DATA && GET_MODE_SIZE (mode) <= g_switch_value)
9379 /* ??? Consider using mergeable sdata sections. */
9380 return sdata_section;
9382 return default_elf_select_rtx_section (mode, x, align);
9385 #endif /* OBJECT_FORMAT_ELF */
9387 /* Structure to collect function names for final output in link section. */
9388 /* Note that items marked with GTY can't be ifdef'ed out. */
9390 enum links_kind {KIND_UNUSED, KIND_LOCAL, KIND_EXTERN};
9391 enum reloc_kind {KIND_LINKAGE, KIND_CODEADDR};
9393 struct alpha_links GTY(())
9397 enum links_kind lkind;
9398 enum reloc_kind rkind;
9401 struct alpha_funcs GTY(())
9404 splay_tree GTY ((param1_is (char *), param2_is (struct alpha_links *)))
9408 static GTY ((param1_is (char *), param2_is (struct alpha_links *)))
9409 splay_tree alpha_links_tree;
9410 static GTY ((param1_is (tree), param2_is (struct alpha_funcs *)))
9411 splay_tree alpha_funcs_tree;
9413 static GTY(()) int alpha_funcs_num;
9415 #if TARGET_ABI_OPEN_VMS
9417 /* Return the VMS argument type corresponding to MODE. */
9420 alpha_arg_type (enum machine_mode mode)
9425 return TARGET_FLOAT_VAX ? FF : FS;
9427 return TARGET_FLOAT_VAX ? FD : FT;
9433 /* Return an rtx for an integer representing the VMS Argument Information
9437 alpha_arg_info_reg_val (CUMULATIVE_ARGS cum)
9439 unsigned HOST_WIDE_INT regval = cum.num_args;
9442 for (i = 0; i < 6; i++)
9443 regval |= ((int) cum.atypes[i]) << (i * 3 + 8);
9445 return GEN_INT (regval);
9448 /* Make (or fake) .linkage entry for function call.
9450 IS_LOCAL is 0 if name is used in call, 1 if name is used in definition.
9452 Return an SYMBOL_REF rtx for the linkage. */
9455 alpha_need_linkage (const char *name, int is_local)
9457 splay_tree_node node;
9458 struct alpha_links *al;
9465 struct alpha_funcs *cfaf;
9467 if (!alpha_funcs_tree)
9468 alpha_funcs_tree = splay_tree_new_ggc ((splay_tree_compare_fn)
9469 splay_tree_compare_pointers);
9471 cfaf = (struct alpha_funcs *) ggc_alloc (sizeof (struct alpha_funcs));
9474 cfaf->num = ++alpha_funcs_num;
9476 splay_tree_insert (alpha_funcs_tree,
9477 (splay_tree_key) current_function_decl,
9478 (splay_tree_value) cfaf);
9481 if (alpha_links_tree)
9483 /* Is this name already defined? */
9485 node = splay_tree_lookup (alpha_links_tree, (splay_tree_key) name);
9488 al = (struct alpha_links *) node->value;
9491 /* Defined here but external assumed. */
9492 if (al->lkind == KIND_EXTERN)
9493 al->lkind = KIND_LOCAL;
9497 /* Used here but unused assumed. */
9498 if (al->lkind == KIND_UNUSED)
9499 al->lkind = KIND_LOCAL;
9505 alpha_links_tree = splay_tree_new_ggc ((splay_tree_compare_fn) strcmp);
9507 al = (struct alpha_links *) ggc_alloc (sizeof (struct alpha_links));
9508 name = ggc_strdup (name);
9510 /* Assume external if no definition. */
9511 al->lkind = (is_local ? KIND_UNUSED : KIND_EXTERN);
9513 /* Ensure we have an IDENTIFIER so assemble_name can mark it used. */
9514 get_identifier (name);
9516 /* Construct a SYMBOL_REF for us to call. */
9518 size_t name_len = strlen (name);
9519 char *linksym = alloca (name_len + 6);
9521 memcpy (linksym + 1, name, name_len);
9522 memcpy (linksym + 1 + name_len, "..lk", 5);
9523 al->linkage = gen_rtx_SYMBOL_REF (Pmode,
9524 ggc_alloc_string (linksym, name_len + 5));
9527 splay_tree_insert (alpha_links_tree, (splay_tree_key) name,
9528 (splay_tree_value) al);
9534 alpha_use_linkage (rtx linkage, tree cfundecl, int lflag, int rflag)
9536 splay_tree_node cfunnode;
9537 struct alpha_funcs *cfaf;
9538 struct alpha_links *al;
9539 const char *name = XSTR (linkage, 0);
9541 cfaf = (struct alpha_funcs *) 0;
9542 al = (struct alpha_links *) 0;
9544 cfunnode = splay_tree_lookup (alpha_funcs_tree, (splay_tree_key) cfundecl);
9545 cfaf = (struct alpha_funcs *) cfunnode->value;
9549 splay_tree_node lnode;
9551 /* Is this name already defined? */
9553 lnode = splay_tree_lookup (cfaf->links, (splay_tree_key) name);
9555 al = (struct alpha_links *) lnode->value;
9558 cfaf->links = splay_tree_new_ggc ((splay_tree_compare_fn) strcmp);
9566 splay_tree_node node = 0;
9567 struct alpha_links *anl;
9572 name_len = strlen (name);
9574 al = (struct alpha_links *) ggc_alloc (sizeof (struct alpha_links));
9575 al->num = cfaf->num;
9577 node = splay_tree_lookup (alpha_links_tree, (splay_tree_key) name);
9580 anl = (struct alpha_links *) node->value;
9581 al->lkind = anl->lkind;
9584 sprintf (buf, "$%d..%s..lk", cfaf->num, name);
9585 buflen = strlen (buf);
9586 linksym = alloca (buflen + 1);
9587 memcpy (linksym, buf, buflen + 1);
9589 al->linkage = gen_rtx_SYMBOL_REF
9590 (Pmode, ggc_alloc_string (linksym, buflen + 1));
9592 splay_tree_insert (cfaf->links, (splay_tree_key) name,
9593 (splay_tree_value) al);
9597 al->rkind = KIND_CODEADDR;
9599 al->rkind = KIND_LINKAGE;
9602 return gen_rtx_MEM (Pmode, plus_constant (al->linkage, 8));
9608 alpha_write_one_linkage (splay_tree_node node, void *data)
9610 const char *const name = (const char *) node->key;
9611 struct alpha_links *link = (struct alpha_links *) node->value;
9612 FILE *stream = (FILE *) data;
9614 fprintf (stream, "$%d..%s..lk:\n", link->num, name);
9615 if (link->rkind == KIND_CODEADDR)
9617 if (link->lkind == KIND_LOCAL)
9619 /* Local and used */
9620 fprintf (stream, "\t.quad %s..en\n", name);
9624 /* External and used, request code address. */
9625 fprintf (stream, "\t.code_address %s\n", name);
9630 if (link->lkind == KIND_LOCAL)
9632 /* Local and used, build linkage pair. */
9633 fprintf (stream, "\t.quad %s..en\n", name);
9634 fprintf (stream, "\t.quad %s\n", name);
9638 /* External and used, request linkage pair. */
9639 fprintf (stream, "\t.linkage %s\n", name);
9647 alpha_write_linkage (FILE *stream, const char *funname, tree fundecl)
9649 splay_tree_node node;
9650 struct alpha_funcs *func;
9652 fprintf (stream, "\t.link\n");
9653 fprintf (stream, "\t.align 3\n");
9656 node = splay_tree_lookup (alpha_funcs_tree, (splay_tree_key) fundecl);
9657 func = (struct alpha_funcs *) node->value;
9659 fputs ("\t.name ", stream);
9660 assemble_name (stream, funname);
9661 fputs ("..na\n", stream);
9662 ASM_OUTPUT_LABEL (stream, funname);
9663 fprintf (stream, "\t.pdesc ");
9664 assemble_name (stream, funname);
9665 fprintf (stream, "..en,%s\n",
9666 alpha_procedure_type == PT_STACK ? "stack"
9667 : alpha_procedure_type == PT_REGISTER ? "reg" : "null");
9671 splay_tree_foreach (func->links, alpha_write_one_linkage, stream);
9672 /* splay_tree_delete (func->links); */
9676 /* Given a decl, a section name, and whether the decl initializer
9677 has relocs, choose attributes for the section. */
9679 #define SECTION_VMS_OVERLAY SECTION_FORGET
9680 #define SECTION_VMS_GLOBAL SECTION_MACH_DEP
9681 #define SECTION_VMS_INITIALIZE (SECTION_VMS_GLOBAL << 1)
9684 vms_section_type_flags (tree decl, const char *name, int reloc)
9686 unsigned int flags = default_section_type_flags (decl, name, reloc);
9688 if (decl && DECL_ATTRIBUTES (decl)
9689 && lookup_attribute ("overlaid", DECL_ATTRIBUTES (decl)))
9690 flags |= SECTION_VMS_OVERLAY;
9691 if (decl && DECL_ATTRIBUTES (decl)
9692 && lookup_attribute ("global", DECL_ATTRIBUTES (decl)))
9693 flags |= SECTION_VMS_GLOBAL;
9694 if (decl && DECL_ATTRIBUTES (decl)
9695 && lookup_attribute ("initialize", DECL_ATTRIBUTES (decl)))
9696 flags |= SECTION_VMS_INITIALIZE;
9701 /* Switch to an arbitrary section NAME with attributes as specified
9702 by FLAGS. ALIGN specifies any known alignment requirements for
9703 the section; 0 if the default should be used. */
9706 vms_asm_named_section (const char *name, unsigned int flags,
9707 tree decl ATTRIBUTE_UNUSED)
9709 fputc ('\n', asm_out_file);
9710 fprintf (asm_out_file, ".section\t%s", name);
9712 if (flags & SECTION_VMS_OVERLAY)
9713 fprintf (asm_out_file, ",OVR");
9714 if (flags & SECTION_VMS_GLOBAL)
9715 fprintf (asm_out_file, ",GBL");
9716 if (flags & SECTION_VMS_INITIALIZE)
9717 fprintf (asm_out_file, ",NOMOD");
9718 if (flags & SECTION_DEBUG)
9719 fprintf (asm_out_file, ",NOWRT");
9721 fputc ('\n', asm_out_file);
9724 /* Record an element in the table of global constructors. SYMBOL is
9725 a SYMBOL_REF of the function to be called; PRIORITY is a number
9726 between 0 and MAX_INIT_PRIORITY.
9728 Differs from default_ctors_section_asm_out_constructor in that the
9729 width of the .ctors entry is always 64 bits, rather than the 32 bits
9730 used by a normal pointer. */
9733 vms_asm_out_constructor (rtx symbol, int priority ATTRIBUTE_UNUSED)
9735 switch_to_section (ctors_section);
9736 assemble_align (BITS_PER_WORD);
9737 assemble_integer (symbol, UNITS_PER_WORD, BITS_PER_WORD, 1);
9741 vms_asm_out_destructor (rtx symbol, int priority ATTRIBUTE_UNUSED)
9743 switch_to_section (dtors_section);
9744 assemble_align (BITS_PER_WORD);
9745 assemble_integer (symbol, UNITS_PER_WORD, BITS_PER_WORD, 1);
9750 alpha_need_linkage (const char *name ATTRIBUTE_UNUSED,
9751 int is_local ATTRIBUTE_UNUSED)
9757 alpha_use_linkage (rtx linkage ATTRIBUTE_UNUSED,
9758 tree cfundecl ATTRIBUTE_UNUSED,
9759 int lflag ATTRIBUTE_UNUSED,
9760 int rflag ATTRIBUTE_UNUSED)
9765 #endif /* TARGET_ABI_OPEN_VMS */
9767 #if TARGET_ABI_UNICOSMK
9769 /* This evaluates to true if we do not know how to pass TYPE solely in
9770 registers. This is the case for all arguments that do not fit in two
9774 unicosmk_must_pass_in_stack (enum machine_mode mode, tree type)
9779 if (TREE_CODE (TYPE_SIZE (type)) != INTEGER_CST)
9781 if (TREE_ADDRESSABLE (type))
9784 return ALPHA_ARG_SIZE (mode, type, 0) > 2;
9787 /* Define the offset between two registers, one to be eliminated, and the
9788 other its replacement, at the start of a routine. */
9791 unicosmk_initial_elimination_offset (int from, int to)
9795 fixed_size = alpha_sa_size();
9796 if (fixed_size != 0)
9799 if (from == FRAME_POINTER_REGNUM && to == HARD_FRAME_POINTER_REGNUM)
9801 else if (from == ARG_POINTER_REGNUM && to == HARD_FRAME_POINTER_REGNUM)
9803 else if (from == FRAME_POINTER_REGNUM && to == STACK_POINTER_REGNUM)
9804 return (ALPHA_ROUND (current_function_outgoing_args_size)
9805 + ALPHA_ROUND (get_frame_size()));
9806 else if (from == ARG_POINTER_REGNUM && to == STACK_POINTER_REGNUM)
9807 return (ALPHA_ROUND (fixed_size)
9808 + ALPHA_ROUND (get_frame_size()
9809 + current_function_outgoing_args_size));
9814 /* Output the module name for .ident and .end directives. We have to strip
9815 directories and add make sure that the module name starts with a letter
9819 unicosmk_output_module_name (FILE *file)
9821 const char *name = lbasename (main_input_filename);
9822 unsigned len = strlen (name);
9823 char *clean_name = alloca (len + 2);
9824 char *ptr = clean_name;
9826 /* CAM only accepts module names that start with a letter or '$'. We
9827 prefix the module name with a '$' if necessary. */
9829 if (!ISALPHA (*name))
9831 memcpy (ptr, name, len + 1);
9832 clean_symbol_name (clean_name);
9833 fputs (clean_name, file);
9836 /* Output the definition of a common variable. */
9839 unicosmk_output_common (FILE *file, const char *name, int size, int align)
9842 printf ("T3E__: common %s\n", name);
9845 fputs("\t.endp\n\n\t.psect ", file);
9846 assemble_name(file, name);
9847 fprintf(file, ",%d,common\n", floor_log2 (align / BITS_PER_UNIT));
9848 fprintf(file, "\t.byte\t0:%d\n", size);
9850 /* Mark the symbol as defined in this module. */
9851 name_tree = get_identifier (name);
9852 TREE_ASM_WRITTEN (name_tree) = 1;
9855 #define SECTION_PUBLIC SECTION_MACH_DEP
9856 #define SECTION_MAIN (SECTION_PUBLIC << 1)
9857 static int current_section_align;
9859 /* A get_unnamed_section callback for switching to the text section. */
9862 unicosmk_output_text_section_asm_op (const void *data ATTRIBUTE_UNUSED)
9864 static int count = 0;
9865 fprintf (asm_out_file, "\t.endp\n\n\t.psect\tgcc@text___%d,code\n", count++);
9868 /* A get_unnamed_section callback for switching to the data section. */
9871 unicosmk_output_data_section_asm_op (const void *data ATTRIBUTE_UNUSED)
9873 static int count = 1;
9874 fprintf (asm_out_file, "\t.endp\n\n\t.psect\tgcc@data___%d,data\n", count++);
9877 /* Implement TARGET_ASM_INIT_SECTIONS.
9879 The Cray assembler is really weird with respect to sections. It has only
9880 named sections and you can't reopen a section once it has been closed.
9881 This means that we have to generate unique names whenever we want to
9882 reenter the text or the data section. */
9885 unicosmk_init_sections (void)
9887 text_section = get_unnamed_section (SECTION_CODE,
9888 unicosmk_output_text_section_asm_op,
9890 data_section = get_unnamed_section (SECTION_WRITE,
9891 unicosmk_output_data_section_asm_op,
9893 readonly_data_section = data_section;
9897 unicosmk_section_type_flags (tree decl, const char *name,
9898 int reloc ATTRIBUTE_UNUSED)
9900 unsigned int flags = default_section_type_flags (decl, name, reloc);
9905 if (TREE_CODE (decl) == FUNCTION_DECL)
9907 current_section_align = floor_log2 (FUNCTION_BOUNDARY / BITS_PER_UNIT);
9908 if (align_functions_log > current_section_align)
9909 current_section_align = align_functions_log;
9911 if (! strcmp (IDENTIFIER_POINTER (DECL_ASSEMBLER_NAME (decl)), "main"))
9912 flags |= SECTION_MAIN;
9915 current_section_align = floor_log2 (DECL_ALIGN (decl) / BITS_PER_UNIT);
9917 if (TREE_PUBLIC (decl))
9918 flags |= SECTION_PUBLIC;
9923 /* Generate a section name for decl and associate it with the
9927 unicosmk_unique_section (tree decl, int reloc ATTRIBUTE_UNUSED)
9934 name = IDENTIFIER_POINTER (DECL_ASSEMBLER_NAME (decl));
9935 name = default_strip_name_encoding (name);
9936 len = strlen (name);
9938 if (TREE_CODE (decl) == FUNCTION_DECL)
9942 /* It is essential that we prefix the section name here because
9943 otherwise the section names generated for constructors and
9944 destructors confuse collect2. */
9946 string = alloca (len + 6);
9947 sprintf (string, "code@%s", name);
9948 DECL_SECTION_NAME (decl) = build_string (len + 5, string);
9950 else if (TREE_PUBLIC (decl))
9951 DECL_SECTION_NAME (decl) = build_string (len, name);
9956 string = alloca (len + 6);
9957 sprintf (string, "data@%s", name);
9958 DECL_SECTION_NAME (decl) = build_string (len + 5, string);
9962 /* Switch to an arbitrary section NAME with attributes as specified
9963 by FLAGS. ALIGN specifies any known alignment requirements for
9964 the section; 0 if the default should be used. */
9967 unicosmk_asm_named_section (const char *name, unsigned int flags,
9968 tree decl ATTRIBUTE_UNUSED)
9972 /* Close the previous section. */
9974 fputs ("\t.endp\n\n", asm_out_file);
9976 /* Find out what kind of section we are opening. */
9978 if (flags & SECTION_MAIN)
9979 fputs ("\t.start\tmain\n", asm_out_file);
9981 if (flags & SECTION_CODE)
9983 else if (flags & SECTION_PUBLIC)
9988 if (current_section_align != 0)
9989 fprintf (asm_out_file, "\t.psect\t%s,%d,%s\n", name,
9990 current_section_align, kind);
9992 fprintf (asm_out_file, "\t.psect\t%s,%s\n", name, kind);
9996 unicosmk_insert_attributes (tree decl, tree *attr_ptr ATTRIBUTE_UNUSED)
9999 && (TREE_PUBLIC (decl) || TREE_CODE (decl) == FUNCTION_DECL))
10000 unicosmk_unique_section (decl, 0);
10003 /* Output an alignment directive. We have to use the macro 'gcc@code@align'
10004 in code sections because .align fill unused space with zeroes. */
10007 unicosmk_output_align (FILE *file, int align)
10009 if (inside_function)
10010 fprintf (file, "\tgcc@code@align\t%d\n", align);
10012 fprintf (file, "\t.align\t%d\n", align);
10015 /* Add a case vector to the current function's list of deferred case
10016 vectors. Case vectors have to be put into a separate section because CAM
10017 does not allow data definitions in code sections. */
10020 unicosmk_defer_case_vector (rtx lab, rtx vec)
10022 struct machine_function *machine = cfun->machine;
10024 vec = gen_rtx_EXPR_LIST (VOIDmode, lab, vec);
10025 machine->addr_list = gen_rtx_EXPR_LIST (VOIDmode, vec,
10026 machine->addr_list);
10029 /* Output a case vector. */
10032 unicosmk_output_addr_vec (FILE *file, rtx vec)
10034 rtx lab = XEXP (vec, 0);
10035 rtx body = XEXP (vec, 1);
10036 int vlen = XVECLEN (body, 0);
10039 (*targetm.asm_out.internal_label) (file, "L", CODE_LABEL_NUMBER (lab));
10041 for (idx = 0; idx < vlen; idx++)
10043 ASM_OUTPUT_ADDR_VEC_ELT
10044 (file, CODE_LABEL_NUMBER (XEXP (XVECEXP (body, 0, idx), 0)));
10048 /* Output current function's deferred case vectors. */
10051 unicosmk_output_deferred_case_vectors (FILE *file)
10053 struct machine_function *machine = cfun->machine;
10056 if (machine->addr_list == NULL_RTX)
10059 switch_to_section (data_section);
10060 for (t = machine->addr_list; t; t = XEXP (t, 1))
10061 unicosmk_output_addr_vec (file, XEXP (t, 0));
10064 /* Generate the name of the SSIB section for the current function. */
10066 #define SSIB_PREFIX "__SSIB_"
10067 #define SSIB_PREFIX_LEN 7
10069 static const char *
10070 unicosmk_ssib_name (void)
10072 /* This is ok since CAM won't be able to deal with names longer than that
10075 static char name[256];
10078 const char *fnname;
10081 x = DECL_RTL (cfun->decl);
10082 gcc_assert (GET_CODE (x) == MEM);
10084 gcc_assert (GET_CODE (x) == SYMBOL_REF);
10085 fnname = XSTR (x, 0);
10087 len = strlen (fnname);
10088 if (len + SSIB_PREFIX_LEN > 255)
10089 len = 255 - SSIB_PREFIX_LEN;
10091 strcpy (name, SSIB_PREFIX);
10092 strncpy (name + SSIB_PREFIX_LEN, fnname, len);
10093 name[len + SSIB_PREFIX_LEN] = 0;
10098 /* Set up the dynamic subprogram information block (DSIB) and update the
10099 frame pointer register ($15) for subroutines which have a frame. If the
10100 subroutine doesn't have a frame, simply increment $15. */
10103 unicosmk_gen_dsib (unsigned long *imaskP)
10105 if (alpha_procedure_type == PT_STACK)
10107 const char *ssib_name;
10110 /* Allocate 64 bytes for the DSIB. */
10112 FRP (emit_insn (gen_adddi3 (stack_pointer_rtx, stack_pointer_rtx,
10114 emit_insn (gen_blockage ());
10116 /* Save the return address. */
10118 mem = gen_rtx_MEM (DImode, plus_constant (stack_pointer_rtx, 56));
10119 set_mem_alias_set (mem, alpha_sr_alias_set);
10120 FRP (emit_move_insn (mem, gen_rtx_REG (DImode, REG_RA)));
10121 (*imaskP) &= ~(1UL << REG_RA);
10123 /* Save the old frame pointer. */
10125 mem = gen_rtx_MEM (DImode, plus_constant (stack_pointer_rtx, 48));
10126 set_mem_alias_set (mem, alpha_sr_alias_set);
10127 FRP (emit_move_insn (mem, hard_frame_pointer_rtx));
10128 (*imaskP) &= ~(1UL << HARD_FRAME_POINTER_REGNUM);
10130 emit_insn (gen_blockage ());
10132 /* Store the SSIB pointer. */
10134 ssib_name = ggc_strdup (unicosmk_ssib_name ());
10135 mem = gen_rtx_MEM (DImode, plus_constant (stack_pointer_rtx, 32));
10136 set_mem_alias_set (mem, alpha_sr_alias_set);
10138 FRP (emit_move_insn (gen_rtx_REG (DImode, 5),
10139 gen_rtx_SYMBOL_REF (Pmode, ssib_name)));
10140 FRP (emit_move_insn (mem, gen_rtx_REG (DImode, 5)));
10142 /* Save the CIW index. */
10144 mem = gen_rtx_MEM (DImode, plus_constant (stack_pointer_rtx, 24));
10145 set_mem_alias_set (mem, alpha_sr_alias_set);
10146 FRP (emit_move_insn (mem, gen_rtx_REG (DImode, 25)));
10148 emit_insn (gen_blockage ());
10150 /* Set the new frame pointer. */
10152 FRP (emit_insn (gen_adddi3 (hard_frame_pointer_rtx,
10153 stack_pointer_rtx, GEN_INT (64))));
10158 /* Increment the frame pointer register to indicate that we do not
10161 FRP (emit_insn (gen_adddi3 (hard_frame_pointer_rtx,
10162 hard_frame_pointer_rtx, const1_rtx)));
10166 /* Output the static subroutine information block for the current
10170 unicosmk_output_ssib (FILE *file, const char *fnname)
10176 struct machine_function *machine = cfun->machine;
10179 fprintf (file, "\t.endp\n\n\t.psect\t%s%s,data\n", user_label_prefix,
10180 unicosmk_ssib_name ());
10182 /* Some required stuff and the function name length. */
10184 len = strlen (fnname);
10185 fprintf (file, "\t.quad\t^X20008%2.2X28\n", len);
10188 ??? We don't do that yet. */
10190 fputs ("\t.quad\t0\n", file);
10192 /* Function address. */
10194 fputs ("\t.quad\t", file);
10195 assemble_name (file, fnname);
10198 fputs ("\t.quad\t0\n", file);
10199 fputs ("\t.quad\t0\n", file);
10202 ??? We do it the same way Cray CC does it but this could be
10205 for( i = 0; i < len; i++ )
10206 fprintf (file, "\t.byte\t%d\n", (int)(fnname[i]));
10207 if( (len % 8) == 0 )
10208 fputs ("\t.quad\t0\n", file);
10210 fprintf (file, "\t.bits\t%d : 0\n", (8 - (len % 8))*8);
10212 /* All call information words used in the function. */
10214 for (x = machine->first_ciw; x; x = XEXP (x, 1))
10217 #if HOST_BITS_PER_WIDE_INT == 32
10218 fprintf (file, "\t.quad\t" HOST_WIDE_INT_PRINT_DOUBLE_HEX "\n",
10219 CONST_DOUBLE_HIGH (ciw), CONST_DOUBLE_LOW (ciw));
10221 fprintf (file, "\t.quad\t" HOST_WIDE_INT_PRINT_HEX "\n", INTVAL (ciw));
10226 /* Add a call information word (CIW) to the list of the current function's
10227 CIWs and return its index.
10229 X is a CONST_INT or CONST_DOUBLE representing the CIW. */
10232 unicosmk_add_call_info_word (rtx x)
10235 struct machine_function *machine = cfun->machine;
10237 node = gen_rtx_EXPR_LIST (VOIDmode, x, NULL_RTX);
10238 if (machine->first_ciw == NULL_RTX)
10239 machine->first_ciw = node;
10241 XEXP (machine->last_ciw, 1) = node;
10243 machine->last_ciw = node;
10244 ++machine->ciw_count;
10246 return GEN_INT (machine->ciw_count
10247 + strlen (current_function_name ())/8 + 5);
10250 /* The Cray assembler doesn't accept extern declarations for symbols which
10251 are defined in the same file. We have to keep track of all global
10252 symbols which are referenced and/or defined in a source file and output
10253 extern declarations for those which are referenced but not defined at
10254 the end of file. */
10256 /* List of identifiers for which an extern declaration might have to be
10258 /* FIXME: needs to use GC, so it can be saved and restored for PCH. */
10260 struct unicosmk_extern_list
10262 struct unicosmk_extern_list *next;
10266 static struct unicosmk_extern_list *unicosmk_extern_head = 0;
10268 /* Output extern declarations which are required for every asm file. */
10271 unicosmk_output_default_externs (FILE *file)
10273 static const char *const externs[] =
10274 { "__T3E_MISMATCH" };
10279 n = ARRAY_SIZE (externs);
10281 for (i = 0; i < n; i++)
10282 fprintf (file, "\t.extern\t%s\n", externs[i]);
10285 /* Output extern declarations for global symbols which are have been
10286 referenced but not defined. */
10289 unicosmk_output_externs (FILE *file)
10291 struct unicosmk_extern_list *p;
10292 const char *real_name;
10296 len = strlen (user_label_prefix);
10297 for (p = unicosmk_extern_head; p != 0; p = p->next)
10299 /* We have to strip the encoding and possibly remove user_label_prefix
10300 from the identifier in order to handle -fleading-underscore and
10301 explicit asm names correctly (cf. gcc.dg/asm-names-1.c). */
10302 real_name = default_strip_name_encoding (p->name);
10303 if (len && p->name[0] == '*'
10304 && !memcmp (real_name, user_label_prefix, len))
10307 name_tree = get_identifier (real_name);
10308 if (! TREE_ASM_WRITTEN (name_tree))
10310 TREE_ASM_WRITTEN (name_tree) = 1;
10311 fputs ("\t.extern\t", file);
10312 assemble_name (file, p->name);
10318 /* Record an extern. */
10321 unicosmk_add_extern (const char *name)
10323 struct unicosmk_extern_list *p;
10325 p = (struct unicosmk_extern_list *)
10326 xmalloc (sizeof (struct unicosmk_extern_list));
10327 p->next = unicosmk_extern_head;
10329 unicosmk_extern_head = p;
10332 /* The Cray assembler generates incorrect code if identifiers which
10333 conflict with register names are used as instruction operands. We have
10334 to replace such identifiers with DEX expressions. */
10336 /* Structure to collect identifiers which have been replaced by DEX
10338 /* FIXME: needs to use GC, so it can be saved and restored for PCH. */
10340 struct unicosmk_dex {
10341 struct unicosmk_dex *next;
10345 /* List of identifiers which have been replaced by DEX expressions. The DEX
10346 number is determined by the position in the list. */
10348 static struct unicosmk_dex *unicosmk_dex_list = NULL;
10350 /* The number of elements in the DEX list. */
10352 static int unicosmk_dex_count = 0;
10354 /* Check if NAME must be replaced by a DEX expression. */
10357 unicosmk_special_name (const char *name)
10359 if (name[0] == '*')
10362 if (name[0] == '$')
10365 if (name[0] != 'r' && name[0] != 'f' && name[0] != 'R' && name[0] != 'F')
10370 case '1': case '2':
10371 return (name[2] == '\0' || (ISDIGIT (name[2]) && name[3] == '\0'));
10374 return (name[2] == '\0'
10375 || ((name[2] == '0' || name[2] == '1') && name[3] == '\0'));
10378 return (ISDIGIT (name[1]) && name[2] == '\0');
10382 /* Return the DEX number if X must be replaced by a DEX expression and 0
10386 unicosmk_need_dex (rtx x)
10388 struct unicosmk_dex *dex;
10392 if (GET_CODE (x) != SYMBOL_REF)
10396 if (! unicosmk_special_name (name))
10399 i = unicosmk_dex_count;
10400 for (dex = unicosmk_dex_list; dex; dex = dex->next)
10402 if (! strcmp (name, dex->name))
10407 dex = (struct unicosmk_dex *) xmalloc (sizeof (struct unicosmk_dex));
10409 dex->next = unicosmk_dex_list;
10410 unicosmk_dex_list = dex;
10412 ++unicosmk_dex_count;
10413 return unicosmk_dex_count;
10416 /* Output the DEX definitions for this file. */
10419 unicosmk_output_dex (FILE *file)
10421 struct unicosmk_dex *dex;
10424 if (unicosmk_dex_list == NULL)
10427 fprintf (file, "\t.dexstart\n");
10429 i = unicosmk_dex_count;
10430 for (dex = unicosmk_dex_list; dex; dex = dex->next)
10432 fprintf (file, "\tDEX (%d) = ", i);
10433 assemble_name (file, dex->name);
10438 fprintf (file, "\t.dexend\n");
10441 /* Output text that to appear at the beginning of an assembler file. */
10444 unicosmk_file_start (void)
10448 fputs ("\t.ident\t", asm_out_file);
10449 unicosmk_output_module_name (asm_out_file);
10450 fputs ("\n\n", asm_out_file);
10452 /* The Unicos/Mk assembler uses different register names. Instead of trying
10453 to support them, we simply use micro definitions. */
10455 /* CAM has different register names: rN for the integer register N and fN
10456 for the floating-point register N. Instead of trying to use these in
10457 alpha.md, we define the symbols $N and $fN to refer to the appropriate
10460 for (i = 0; i < 32; ++i)
10461 fprintf (asm_out_file, "$%d <- r%d\n", i, i);
10463 for (i = 0; i < 32; ++i)
10464 fprintf (asm_out_file, "$f%d <- f%d\n", i, i);
10466 putc ('\n', asm_out_file);
10468 /* The .align directive fill unused space with zeroes which does not work
10469 in code sections. We define the macro 'gcc@code@align' which uses nops
10470 instead. Note that it assumes that code sections always have the
10471 biggest possible alignment since . refers to the current offset from
10472 the beginning of the section. */
10474 fputs ("\t.macro gcc@code@align n\n", asm_out_file);
10475 fputs ("gcc@n@bytes = 1 << n\n", asm_out_file);
10476 fputs ("gcc@here = . % gcc@n@bytes\n", asm_out_file);
10477 fputs ("\t.if ne, gcc@here, 0\n", asm_out_file);
10478 fputs ("\t.repeat (gcc@n@bytes - gcc@here) / 4\n", asm_out_file);
10479 fputs ("\tbis r31,r31,r31\n", asm_out_file);
10480 fputs ("\t.endr\n", asm_out_file);
10481 fputs ("\t.endif\n", asm_out_file);
10482 fputs ("\t.endm gcc@code@align\n\n", asm_out_file);
10484 /* Output extern declarations which should always be visible. */
10485 unicosmk_output_default_externs (asm_out_file);
10487 /* Open a dummy section. We always need to be inside a section for the
10488 section-switching code to work correctly.
10489 ??? This should be a module id or something like that. I still have to
10490 figure out what the rules for those are. */
10491 fputs ("\n\t.psect\t$SG00000,data\n", asm_out_file);
10494 /* Output text to appear at the end of an assembler file. This includes all
10495 pending extern declarations and DEX expressions. */
10498 unicosmk_file_end (void)
10500 fputs ("\t.endp\n\n", asm_out_file);
10502 /* Output all pending externs. */
10504 unicosmk_output_externs (asm_out_file);
10506 /* Output dex definitions used for functions whose names conflict with
10509 unicosmk_output_dex (asm_out_file);
10511 fputs ("\t.end\t", asm_out_file);
10512 unicosmk_output_module_name (asm_out_file);
10513 putc ('\n', asm_out_file);
10519 unicosmk_output_deferred_case_vectors (FILE *file ATTRIBUTE_UNUSED)
10523 unicosmk_gen_dsib (unsigned long *imaskP ATTRIBUTE_UNUSED)
10527 unicosmk_output_ssib (FILE * file ATTRIBUTE_UNUSED,
10528 const char * fnname ATTRIBUTE_UNUSED)
10532 unicosmk_add_call_info_word (rtx x ATTRIBUTE_UNUSED)
10538 unicosmk_need_dex (rtx x ATTRIBUTE_UNUSED)
10543 #endif /* TARGET_ABI_UNICOSMK */
10546 alpha_init_libfuncs (void)
10548 if (TARGET_ABI_UNICOSMK)
10550 /* Prevent gcc from generating calls to __divsi3. */
10551 set_optab_libfunc (sdiv_optab, SImode, 0);
10552 set_optab_libfunc (udiv_optab, SImode, 0);
10554 /* Use the functions provided by the system library
10555 for DImode integer division. */
10556 set_optab_libfunc (sdiv_optab, DImode, "$sldiv");
10557 set_optab_libfunc (udiv_optab, DImode, "$uldiv");
10559 else if (TARGET_ABI_OPEN_VMS)
10561 /* Use the VMS runtime library functions for division and
10563 set_optab_libfunc (sdiv_optab, SImode, "OTS$DIV_I");
10564 set_optab_libfunc (sdiv_optab, DImode, "OTS$DIV_L");
10565 set_optab_libfunc (udiv_optab, SImode, "OTS$DIV_UI");
10566 set_optab_libfunc (udiv_optab, DImode, "OTS$DIV_UL");
10567 set_optab_libfunc (smod_optab, SImode, "OTS$REM_I");
10568 set_optab_libfunc (smod_optab, DImode, "OTS$REM_L");
10569 set_optab_libfunc (umod_optab, SImode, "OTS$REM_UI");
10570 set_optab_libfunc (umod_optab, DImode, "OTS$REM_UL");
10575 /* Initialize the GCC target structure. */
10576 #if TARGET_ABI_OPEN_VMS
10577 # undef TARGET_ATTRIBUTE_TABLE
10578 # define TARGET_ATTRIBUTE_TABLE vms_attribute_table
10579 # undef TARGET_SECTION_TYPE_FLAGS
10580 # define TARGET_SECTION_TYPE_FLAGS vms_section_type_flags
10583 #undef TARGET_IN_SMALL_DATA_P
10584 #define TARGET_IN_SMALL_DATA_P alpha_in_small_data_p
10586 #if TARGET_ABI_UNICOSMK
10587 # undef TARGET_INSERT_ATTRIBUTES
10588 # define TARGET_INSERT_ATTRIBUTES unicosmk_insert_attributes
10589 # undef TARGET_SECTION_TYPE_FLAGS
10590 # define TARGET_SECTION_TYPE_FLAGS unicosmk_section_type_flags
10591 # undef TARGET_ASM_UNIQUE_SECTION
10592 # define TARGET_ASM_UNIQUE_SECTION unicosmk_unique_section
10593 #undef TARGET_ASM_FUNCTION_RODATA_SECTION
10594 #define TARGET_ASM_FUNCTION_RODATA_SECTION default_no_function_rodata_section
10595 # undef TARGET_ASM_GLOBALIZE_LABEL
10596 # define TARGET_ASM_GLOBALIZE_LABEL hook_void_FILEptr_constcharptr
10597 # undef TARGET_MUST_PASS_IN_STACK
10598 # define TARGET_MUST_PASS_IN_STACK unicosmk_must_pass_in_stack
10601 #undef TARGET_ASM_ALIGNED_HI_OP
10602 #define TARGET_ASM_ALIGNED_HI_OP "\t.word\t"
10603 #undef TARGET_ASM_ALIGNED_DI_OP
10604 #define TARGET_ASM_ALIGNED_DI_OP "\t.quad\t"
10606 /* Default unaligned ops are provided for ELF systems. To get unaligned
10607 data for non-ELF systems, we have to turn off auto alignment. */
10608 #ifndef OBJECT_FORMAT_ELF
10609 #undef TARGET_ASM_UNALIGNED_HI_OP
10610 #define TARGET_ASM_UNALIGNED_HI_OP "\t.align 0\n\t.word\t"
10611 #undef TARGET_ASM_UNALIGNED_SI_OP
10612 #define TARGET_ASM_UNALIGNED_SI_OP "\t.align 0\n\t.long\t"
10613 #undef TARGET_ASM_UNALIGNED_DI_OP
10614 #define TARGET_ASM_UNALIGNED_DI_OP "\t.align 0\n\t.quad\t"
10617 #ifdef OBJECT_FORMAT_ELF
10618 #undef TARGET_ASM_SELECT_RTX_SECTION
10619 #define TARGET_ASM_SELECT_RTX_SECTION alpha_elf_select_rtx_section
10622 #undef TARGET_ASM_FUNCTION_END_PROLOGUE
10623 #define TARGET_ASM_FUNCTION_END_PROLOGUE alpha_output_function_end_prologue
10625 #undef TARGET_INIT_LIBFUNCS
10626 #define TARGET_INIT_LIBFUNCS alpha_init_libfuncs
10628 #if TARGET_ABI_UNICOSMK
10629 #undef TARGET_ASM_FILE_START
10630 #define TARGET_ASM_FILE_START unicosmk_file_start
10631 #undef TARGET_ASM_FILE_END
10632 #define TARGET_ASM_FILE_END unicosmk_file_end
10634 #undef TARGET_ASM_FILE_START
10635 #define TARGET_ASM_FILE_START alpha_file_start
10636 #undef TARGET_ASM_FILE_START_FILE_DIRECTIVE
10637 #define TARGET_ASM_FILE_START_FILE_DIRECTIVE true
10640 #undef TARGET_SCHED_ADJUST_COST
10641 #define TARGET_SCHED_ADJUST_COST alpha_adjust_cost
10642 #undef TARGET_SCHED_ISSUE_RATE
10643 #define TARGET_SCHED_ISSUE_RATE alpha_issue_rate
10644 #undef TARGET_SCHED_FIRST_CYCLE_MULTIPASS_DFA_LOOKAHEAD
10645 #define TARGET_SCHED_FIRST_CYCLE_MULTIPASS_DFA_LOOKAHEAD \
10646 alpha_multipass_dfa_lookahead
10648 #undef TARGET_HAVE_TLS
10649 #define TARGET_HAVE_TLS HAVE_AS_TLS
10651 #undef TARGET_INIT_BUILTINS
10652 #define TARGET_INIT_BUILTINS alpha_init_builtins
10653 #undef TARGET_EXPAND_BUILTIN
10654 #define TARGET_EXPAND_BUILTIN alpha_expand_builtin
10655 #undef TARGET_FOLD_BUILTIN
10656 #define TARGET_FOLD_BUILTIN alpha_fold_builtin
10658 #undef TARGET_FUNCTION_OK_FOR_SIBCALL
10659 #define TARGET_FUNCTION_OK_FOR_SIBCALL alpha_function_ok_for_sibcall
10660 #undef TARGET_CANNOT_COPY_INSN_P
10661 #define TARGET_CANNOT_COPY_INSN_P alpha_cannot_copy_insn_p
10662 #undef TARGET_CANNOT_FORCE_CONST_MEM
10663 #define TARGET_CANNOT_FORCE_CONST_MEM alpha_cannot_force_const_mem
10666 #undef TARGET_ASM_OUTPUT_MI_THUNK
10667 #define TARGET_ASM_OUTPUT_MI_THUNK alpha_output_mi_thunk_osf
10668 #undef TARGET_ASM_CAN_OUTPUT_MI_THUNK
10669 #define TARGET_ASM_CAN_OUTPUT_MI_THUNK hook_bool_tree_hwi_hwi_tree_true
10670 #undef TARGET_STDARG_OPTIMIZE_HOOK
10671 #define TARGET_STDARG_OPTIMIZE_HOOK alpha_stdarg_optimize_hook
10674 #undef TARGET_RTX_COSTS
10675 #define TARGET_RTX_COSTS alpha_rtx_costs
10676 #undef TARGET_ADDRESS_COST
10677 #define TARGET_ADDRESS_COST hook_int_rtx_0
10679 #undef TARGET_MACHINE_DEPENDENT_REORG
10680 #define TARGET_MACHINE_DEPENDENT_REORG alpha_reorg
10682 #undef TARGET_PROMOTE_FUNCTION_ARGS
10683 #define TARGET_PROMOTE_FUNCTION_ARGS hook_bool_tree_true
10684 #undef TARGET_PROMOTE_FUNCTION_RETURN
10685 #define TARGET_PROMOTE_FUNCTION_RETURN hook_bool_tree_true
10686 #undef TARGET_PROMOTE_PROTOTYPES
10687 #define TARGET_PROMOTE_PROTOTYPES hook_bool_tree_false
10688 #undef TARGET_RETURN_IN_MEMORY
10689 #define TARGET_RETURN_IN_MEMORY alpha_return_in_memory
10690 #undef TARGET_PASS_BY_REFERENCE
10691 #define TARGET_PASS_BY_REFERENCE alpha_pass_by_reference
10692 #undef TARGET_SETUP_INCOMING_VARARGS
10693 #define TARGET_SETUP_INCOMING_VARARGS alpha_setup_incoming_varargs
10694 #undef TARGET_STRICT_ARGUMENT_NAMING
10695 #define TARGET_STRICT_ARGUMENT_NAMING hook_bool_CUMULATIVE_ARGS_true
10696 #undef TARGET_PRETEND_OUTGOING_VARARGS_NAMED
10697 #define TARGET_PRETEND_OUTGOING_VARARGS_NAMED hook_bool_CUMULATIVE_ARGS_true
10698 #undef TARGET_SPLIT_COMPLEX_ARG
10699 #define TARGET_SPLIT_COMPLEX_ARG alpha_split_complex_arg
10700 #undef TARGET_GIMPLIFY_VA_ARG_EXPR
10701 #define TARGET_GIMPLIFY_VA_ARG_EXPR alpha_gimplify_va_arg
10702 #undef TARGET_ARG_PARTIAL_BYTES
10703 #define TARGET_ARG_PARTIAL_BYTES alpha_arg_partial_bytes
10705 #undef TARGET_SCALAR_MODE_SUPPORTED_P
10706 #define TARGET_SCALAR_MODE_SUPPORTED_P alpha_scalar_mode_supported_p
10707 #undef TARGET_VECTOR_MODE_SUPPORTED_P
10708 #define TARGET_VECTOR_MODE_SUPPORTED_P alpha_vector_mode_supported_p
10710 #undef TARGET_BUILD_BUILTIN_VA_LIST
10711 #define TARGET_BUILD_BUILTIN_VA_LIST alpha_build_builtin_va_list
10713 /* The Alpha architecture does not require sequential consistency. See
10714 http://www.cs.umd.edu/~pugh/java/memoryModel/AlphaReordering.html
10715 for an example of how it can be violated in practice. */
10716 #undef TARGET_RELAXED_ORDERING
10717 #define TARGET_RELAXED_ORDERING true
10719 #undef TARGET_DEFAULT_TARGET_FLAGS
10720 #define TARGET_DEFAULT_TARGET_FLAGS \
10721 (TARGET_DEFAULT | TARGET_CPU_DEFAULT | TARGET_DEFAULT_EXPLICIT_RELOCS)
10722 #undef TARGET_HANDLE_OPTION
10723 #define TARGET_HANDLE_OPTION alpha_handle_option
10725 struct gcc_target targetm = TARGET_INITIALIZER;
10728 #include "gt-alpha.h"