1 /* Subroutines used for code generation on the DEC Alpha.
2 Copyright (C) 1992, 1993, 1994, 1995, 1996, 1997, 1998, 1999, 2000, 2001,
3 2002, 2003, 2004, 2005, 2006, 2007 Free Software Foundation, Inc.
4 Contributed by Richard Kenner (kenner@vlsi1.ultra.nyu.edu)
6 This file is part of GCC.
8 GCC is free software; you can redistribute it and/or modify
9 it under the terms of the GNU General Public License as published by
10 the Free Software Foundation; either version 3, or (at your option)
13 GCC is distributed in the hope that it will be useful,
14 but WITHOUT ANY WARRANTY; without even the implied warranty of
15 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 GNU General Public License for more details.
18 You should have received a copy of the GNU General Public License
19 along with GCC; see the file COPYING3. If not see
20 <http://www.gnu.org/licenses/>. */
25 #include "coretypes.h"
30 #include "hard-reg-set.h"
32 #include "insn-config.h"
33 #include "conditions.h"
35 #include "insn-attr.h"
46 #include "integrate.h"
49 #include "target-def.h"
51 #include "langhooks.h"
52 #include <splay-tree.h>
53 #include "cfglayout.h"
54 #include "tree-gimple.h"
55 #include "tree-flow.h"
56 #include "tree-stdarg.h"
57 #include "tm-constrs.h"
60 /* Specify which cpu to schedule for. */
61 enum processor_type alpha_tune;
63 /* Which cpu we're generating code for. */
64 enum processor_type alpha_cpu;
66 static const char * const alpha_cpu_name[] =
71 /* Specify how accurate floating-point traps need to be. */
73 enum alpha_trap_precision alpha_tp;
75 /* Specify the floating-point rounding mode. */
77 enum alpha_fp_rounding_mode alpha_fprm;
79 /* Specify which things cause traps. */
81 enum alpha_fp_trap_mode alpha_fptm;
83 /* Save information from a "cmpxx" operation until the branch or scc is
86 struct alpha_compare alpha_compare;
88 /* Nonzero if inside of a function, because the Alpha asm can't
89 handle .files inside of functions. */
91 static int inside_function = FALSE;
93 /* The number of cycles of latency we should assume on memory reads. */
95 int alpha_memory_latency = 3;
97 /* Whether the function needs the GP. */
99 static int alpha_function_needs_gp;
101 /* The alias set for prologue/epilogue register save/restore. */
103 static GTY(()) int alpha_sr_alias_set;
105 /* The assembler name of the current function. */
107 static const char *alpha_fnname;
109 /* The next explicit relocation sequence number. */
110 extern GTY(()) int alpha_next_sequence_number;
111 int alpha_next_sequence_number = 1;
113 /* The literal and gpdisp sequence numbers for this insn, as printed
114 by %# and %* respectively. */
115 extern GTY(()) int alpha_this_literal_sequence_number;
116 extern GTY(()) int alpha_this_gpdisp_sequence_number;
117 int alpha_this_literal_sequence_number;
118 int alpha_this_gpdisp_sequence_number;
120 /* Costs of various operations on the different architectures. */
122 struct alpha_rtx_cost_data
124 unsigned char fp_add;
125 unsigned char fp_mult;
126 unsigned char fp_div_sf;
127 unsigned char fp_div_df;
128 unsigned char int_mult_si;
129 unsigned char int_mult_di;
130 unsigned char int_shift;
131 unsigned char int_cmov;
132 unsigned short int_div;
135 static struct alpha_rtx_cost_data const alpha_rtx_cost_data[PROCESSOR_MAX] =
138 COSTS_N_INSNS (6), /* fp_add */
139 COSTS_N_INSNS (6), /* fp_mult */
140 COSTS_N_INSNS (34), /* fp_div_sf */
141 COSTS_N_INSNS (63), /* fp_div_df */
142 COSTS_N_INSNS (23), /* int_mult_si */
143 COSTS_N_INSNS (23), /* int_mult_di */
144 COSTS_N_INSNS (2), /* int_shift */
145 COSTS_N_INSNS (2), /* int_cmov */
146 COSTS_N_INSNS (97), /* int_div */
149 COSTS_N_INSNS (4), /* fp_add */
150 COSTS_N_INSNS (4), /* fp_mult */
151 COSTS_N_INSNS (15), /* fp_div_sf */
152 COSTS_N_INSNS (22), /* fp_div_df */
153 COSTS_N_INSNS (8), /* int_mult_si */
154 COSTS_N_INSNS (12), /* int_mult_di */
155 COSTS_N_INSNS (1) + 1, /* int_shift */
156 COSTS_N_INSNS (1), /* int_cmov */
157 COSTS_N_INSNS (83), /* int_div */
160 COSTS_N_INSNS (4), /* fp_add */
161 COSTS_N_INSNS (4), /* fp_mult */
162 COSTS_N_INSNS (12), /* fp_div_sf */
163 COSTS_N_INSNS (15), /* fp_div_df */
164 COSTS_N_INSNS (7), /* int_mult_si */
165 COSTS_N_INSNS (7), /* int_mult_di */
166 COSTS_N_INSNS (1), /* int_shift */
167 COSTS_N_INSNS (2), /* int_cmov */
168 COSTS_N_INSNS (86), /* int_div */
172 /* Similar but tuned for code size instead of execution latency. The
173 extra +N is fractional cost tuning based on latency. It's used to
174 encourage use of cheaper insns like shift, but only if there's just
177 static struct alpha_rtx_cost_data const alpha_rtx_cost_size =
179 COSTS_N_INSNS (1), /* fp_add */
180 COSTS_N_INSNS (1), /* fp_mult */
181 COSTS_N_INSNS (1), /* fp_div_sf */
182 COSTS_N_INSNS (1) + 1, /* fp_div_df */
183 COSTS_N_INSNS (1) + 1, /* int_mult_si */
184 COSTS_N_INSNS (1) + 2, /* int_mult_di */
185 COSTS_N_INSNS (1), /* int_shift */
186 COSTS_N_INSNS (1), /* int_cmov */
187 COSTS_N_INSNS (6), /* int_div */
190 /* Get the number of args of a function in one of two ways. */
191 #if TARGET_ABI_OPEN_VMS || TARGET_ABI_UNICOSMK
192 #define NUM_ARGS current_function_args_info.num_args
194 #define NUM_ARGS current_function_args_info
200 /* Declarations of static functions. */
201 static struct machine_function *alpha_init_machine_status (void);
202 static rtx alpha_emit_xfloating_compare (enum rtx_code *, rtx, rtx);
204 #if TARGET_ABI_OPEN_VMS
205 static void alpha_write_linkage (FILE *, const char *, tree);
208 static void unicosmk_output_deferred_case_vectors (FILE *);
209 static void unicosmk_gen_dsib (unsigned long *);
210 static void unicosmk_output_ssib (FILE *, const char *);
211 static int unicosmk_need_dex (rtx);
213 /* Implement TARGET_HANDLE_OPTION. */
216 alpha_handle_option (size_t code, const char *arg, int value)
222 target_flags |= MASK_SOFT_FP;
226 case OPT_mieee_with_inexact:
227 target_flags |= MASK_IEEE_CONFORMANT;
231 if (value != 16 && value != 32 && value != 64)
232 error ("bad value %qs for -mtls-size switch", arg);
239 #ifdef TARGET_ALTERNATE_LONG_DOUBLE_MANGLING
240 /* Implement TARGET_MANGLE_TYPE. */
243 alpha_mangle_type (tree type)
245 if (TYPE_MAIN_VARIANT (type) == long_double_type_node
246 && TARGET_LONG_DOUBLE_128)
249 /* For all other types, use normal C++ mangling. */
254 /* Parse target option strings. */
257 override_options (void)
259 static const struct cpu_table {
260 const char *const name;
261 const enum processor_type processor;
264 { "ev4", PROCESSOR_EV4, 0 },
265 { "ev45", PROCESSOR_EV4, 0 },
266 { "21064", PROCESSOR_EV4, 0 },
267 { "ev5", PROCESSOR_EV5, 0 },
268 { "21164", PROCESSOR_EV5, 0 },
269 { "ev56", PROCESSOR_EV5, MASK_BWX },
270 { "21164a", PROCESSOR_EV5, MASK_BWX },
271 { "pca56", PROCESSOR_EV5, MASK_BWX|MASK_MAX },
272 { "21164PC",PROCESSOR_EV5, MASK_BWX|MASK_MAX },
273 { "21164pc",PROCESSOR_EV5, MASK_BWX|MASK_MAX },
274 { "ev6", PROCESSOR_EV6, MASK_BWX|MASK_MAX|MASK_FIX },
275 { "21264", PROCESSOR_EV6, MASK_BWX|MASK_MAX|MASK_FIX },
276 { "ev67", PROCESSOR_EV6, MASK_BWX|MASK_MAX|MASK_FIX|MASK_CIX },
277 { "21264a", PROCESSOR_EV6, MASK_BWX|MASK_MAX|MASK_FIX|MASK_CIX },
283 /* Unicos/Mk doesn't have shared libraries. */
284 if (TARGET_ABI_UNICOSMK && flag_pic)
286 warning (0, "-f%s ignored for Unicos/Mk (not supported)",
287 (flag_pic > 1) ? "PIC" : "pic");
291 /* On Unicos/Mk, the native compiler consistently generates /d suffices for
292 floating-point instructions. Make that the default for this target. */
293 if (TARGET_ABI_UNICOSMK)
294 alpha_fprm = ALPHA_FPRM_DYN;
296 alpha_fprm = ALPHA_FPRM_NORM;
298 alpha_tp = ALPHA_TP_PROG;
299 alpha_fptm = ALPHA_FPTM_N;
301 /* We cannot use su and sui qualifiers for conversion instructions on
302 Unicos/Mk. I'm not sure if this is due to assembler or hardware
303 limitations. Right now, we issue a warning if -mieee is specified
304 and then ignore it; eventually, we should either get it right or
305 disable the option altogether. */
309 if (TARGET_ABI_UNICOSMK)
310 warning (0, "-mieee not supported on Unicos/Mk");
313 alpha_tp = ALPHA_TP_INSN;
314 alpha_fptm = ALPHA_FPTM_SU;
318 if (TARGET_IEEE_WITH_INEXACT)
320 if (TARGET_ABI_UNICOSMK)
321 warning (0, "-mieee-with-inexact not supported on Unicos/Mk");
324 alpha_tp = ALPHA_TP_INSN;
325 alpha_fptm = ALPHA_FPTM_SUI;
331 if (! strcmp (alpha_tp_string, "p"))
332 alpha_tp = ALPHA_TP_PROG;
333 else if (! strcmp (alpha_tp_string, "f"))
334 alpha_tp = ALPHA_TP_FUNC;
335 else if (! strcmp (alpha_tp_string, "i"))
336 alpha_tp = ALPHA_TP_INSN;
338 error ("bad value %qs for -mtrap-precision switch", alpha_tp_string);
341 if (alpha_fprm_string)
343 if (! strcmp (alpha_fprm_string, "n"))
344 alpha_fprm = ALPHA_FPRM_NORM;
345 else if (! strcmp (alpha_fprm_string, "m"))
346 alpha_fprm = ALPHA_FPRM_MINF;
347 else if (! strcmp (alpha_fprm_string, "c"))
348 alpha_fprm = ALPHA_FPRM_CHOP;
349 else if (! strcmp (alpha_fprm_string,"d"))
350 alpha_fprm = ALPHA_FPRM_DYN;
352 error ("bad value %qs for -mfp-rounding-mode switch",
356 if (alpha_fptm_string)
358 if (strcmp (alpha_fptm_string, "n") == 0)
359 alpha_fptm = ALPHA_FPTM_N;
360 else if (strcmp (alpha_fptm_string, "u") == 0)
361 alpha_fptm = ALPHA_FPTM_U;
362 else if (strcmp (alpha_fptm_string, "su") == 0)
363 alpha_fptm = ALPHA_FPTM_SU;
364 else if (strcmp (alpha_fptm_string, "sui") == 0)
365 alpha_fptm = ALPHA_FPTM_SUI;
367 error ("bad value %qs for -mfp-trap-mode switch", alpha_fptm_string);
370 if (alpha_cpu_string)
372 for (i = 0; cpu_table [i].name; i++)
373 if (! strcmp (alpha_cpu_string, cpu_table [i].name))
375 alpha_tune = alpha_cpu = cpu_table [i].processor;
376 target_flags &= ~ (MASK_BWX | MASK_MAX | MASK_FIX | MASK_CIX);
377 target_flags |= cpu_table [i].flags;
380 if (! cpu_table [i].name)
381 error ("bad value %qs for -mcpu switch", alpha_cpu_string);
384 if (alpha_tune_string)
386 for (i = 0; cpu_table [i].name; i++)
387 if (! strcmp (alpha_tune_string, cpu_table [i].name))
389 alpha_tune = cpu_table [i].processor;
392 if (! cpu_table [i].name)
393 error ("bad value %qs for -mcpu switch", alpha_tune_string);
396 /* Do some sanity checks on the above options. */
398 if (TARGET_ABI_UNICOSMK && alpha_fptm != ALPHA_FPTM_N)
400 warning (0, "trap mode not supported on Unicos/Mk");
401 alpha_fptm = ALPHA_FPTM_N;
404 if ((alpha_fptm == ALPHA_FPTM_SU || alpha_fptm == ALPHA_FPTM_SUI)
405 && alpha_tp != ALPHA_TP_INSN && alpha_cpu != PROCESSOR_EV6)
407 warning (0, "fp software completion requires -mtrap-precision=i");
408 alpha_tp = ALPHA_TP_INSN;
411 if (alpha_cpu == PROCESSOR_EV6)
413 /* Except for EV6 pass 1 (not released), we always have precise
414 arithmetic traps. Which means we can do software completion
415 without minding trap shadows. */
416 alpha_tp = ALPHA_TP_PROG;
419 if (TARGET_FLOAT_VAX)
421 if (alpha_fprm == ALPHA_FPRM_MINF || alpha_fprm == ALPHA_FPRM_DYN)
423 warning (0, "rounding mode not supported for VAX floats");
424 alpha_fprm = ALPHA_FPRM_NORM;
426 if (alpha_fptm == ALPHA_FPTM_SUI)
428 warning (0, "trap mode not supported for VAX floats");
429 alpha_fptm = ALPHA_FPTM_SU;
431 if (target_flags_explicit & MASK_LONG_DOUBLE_128)
432 warning (0, "128-bit long double not supported for VAX floats");
433 target_flags &= ~MASK_LONG_DOUBLE_128;
440 if (!alpha_mlat_string)
441 alpha_mlat_string = "L1";
443 if (ISDIGIT ((unsigned char)alpha_mlat_string[0])
444 && (lat = strtol (alpha_mlat_string, &end, 10), *end == '\0'))
446 else if ((alpha_mlat_string[0] == 'L' || alpha_mlat_string[0] == 'l')
447 && ISDIGIT ((unsigned char)alpha_mlat_string[1])
448 && alpha_mlat_string[2] == '\0')
450 static int const cache_latency[][4] =
452 { 3, 30, -1 }, /* ev4 -- Bcache is a guess */
453 { 2, 12, 38 }, /* ev5 -- Bcache from PC164 LMbench numbers */
454 { 3, 12, 30 }, /* ev6 -- Bcache from DS20 LMbench. */
457 lat = alpha_mlat_string[1] - '0';
458 if (lat <= 0 || lat > 3 || cache_latency[alpha_tune][lat-1] == -1)
460 warning (0, "L%d cache latency unknown for %s",
461 lat, alpha_cpu_name[alpha_tune]);
465 lat = cache_latency[alpha_tune][lat-1];
467 else if (! strcmp (alpha_mlat_string, "main"))
469 /* Most current memories have about 370ns latency. This is
470 a reasonable guess for a fast cpu. */
475 warning (0, "bad value %qs for -mmemory-latency", alpha_mlat_string);
479 alpha_memory_latency = lat;
482 /* Default the definition of "small data" to 8 bytes. */
486 /* Infer TARGET_SMALL_DATA from -fpic/-fPIC. */
488 target_flags |= MASK_SMALL_DATA;
489 else if (flag_pic == 2)
490 target_flags &= ~MASK_SMALL_DATA;
492 /* Align labels and loops for optimal branching. */
493 /* ??? Kludge these by not doing anything if we don't optimize and also if
494 we are writing ECOFF symbols to work around a bug in DEC's assembler. */
495 if (optimize > 0 && write_symbols != SDB_DEBUG)
497 if (align_loops <= 0)
499 if (align_jumps <= 0)
502 if (align_functions <= 0)
503 align_functions = 16;
505 /* Acquire a unique set number for our register saves and restores. */
506 alpha_sr_alias_set = new_alias_set ();
508 /* Register variables and functions with the garbage collector. */
510 /* Set up function hooks. */
511 init_machine_status = alpha_init_machine_status;
513 /* Tell the compiler when we're using VAX floating point. */
514 if (TARGET_FLOAT_VAX)
516 REAL_MODE_FORMAT (SFmode) = &vax_f_format;
517 REAL_MODE_FORMAT (DFmode) = &vax_g_format;
518 REAL_MODE_FORMAT (TFmode) = NULL;
521 #ifdef TARGET_DEFAULT_LONG_DOUBLE_128
522 if (!(target_flags_explicit & MASK_LONG_DOUBLE_128))
523 target_flags |= MASK_LONG_DOUBLE_128;
527 /* Returns 1 if VALUE is a mask that contains full bytes of zero or ones. */
530 zap_mask (HOST_WIDE_INT value)
534 for (i = 0; i < HOST_BITS_PER_WIDE_INT / HOST_BITS_PER_CHAR;
536 if ((value & 0xff) != 0 && (value & 0xff) != 0xff)
542 /* Return true if OP is valid for a particular TLS relocation.
543 We are already guaranteed that OP is a CONST. */
546 tls_symbolic_operand_1 (rtx op, int size, int unspec)
550 if (GET_CODE (op) != UNSPEC || XINT (op, 1) != unspec)
552 op = XVECEXP (op, 0, 0);
554 if (GET_CODE (op) != SYMBOL_REF)
557 switch (SYMBOL_REF_TLS_MODEL (op))
559 case TLS_MODEL_LOCAL_DYNAMIC:
560 return unspec == UNSPEC_DTPREL && size == alpha_tls_size;
561 case TLS_MODEL_INITIAL_EXEC:
562 return unspec == UNSPEC_TPREL && size == 64;
563 case TLS_MODEL_LOCAL_EXEC:
564 return unspec == UNSPEC_TPREL && size == alpha_tls_size;
570 /* Used by aligned_memory_operand and unaligned_memory_operand to
571 resolve what reload is going to do with OP if it's a register. */
574 resolve_reload_operand (rtx op)
576 if (reload_in_progress)
579 if (GET_CODE (tmp) == SUBREG)
580 tmp = SUBREG_REG (tmp);
581 if (GET_CODE (tmp) == REG
582 && REGNO (tmp) >= FIRST_PSEUDO_REGISTER)
584 op = reg_equiv_memory_loc[REGNO (tmp)];
592 /* The scalar modes supported differs from the default check-what-c-supports
593 version in that sometimes TFmode is available even when long double
594 indicates only DFmode. On unicosmk, we have the situation that HImode
595 doesn't map to any C type, but of course we still support that. */
598 alpha_scalar_mode_supported_p (enum machine_mode mode)
606 case TImode: /* via optabs.c */
614 return TARGET_HAS_XFLOATING_LIBS;
621 /* Alpha implements a couple of integer vector mode operations when
622 TARGET_MAX is enabled. We do not check TARGET_MAX here, however,
623 which allows the vectorizer to operate on e.g. move instructions,
624 or when expand_vector_operations can do something useful. */
627 alpha_vector_mode_supported_p (enum machine_mode mode)
629 return mode == V8QImode || mode == V4HImode || mode == V2SImode;
632 /* Return 1 if this function can directly return via $26. */
637 return (! TARGET_ABI_OPEN_VMS && ! TARGET_ABI_UNICOSMK
639 && alpha_sa_size () == 0
640 && get_frame_size () == 0
641 && current_function_outgoing_args_size == 0
642 && current_function_pretend_args_size == 0);
645 /* Return the ADDR_VEC associated with a tablejump insn. */
648 alpha_tablejump_addr_vec (rtx insn)
652 tmp = JUMP_LABEL (insn);
655 tmp = NEXT_INSN (tmp);
658 if (GET_CODE (tmp) == JUMP_INSN
659 && GET_CODE (PATTERN (tmp)) == ADDR_DIFF_VEC)
660 return PATTERN (tmp);
664 /* Return the label of the predicted edge, or CONST0_RTX if we don't know. */
667 alpha_tablejump_best_label (rtx insn)
669 rtx jump_table = alpha_tablejump_addr_vec (insn);
670 rtx best_label = NULL_RTX;
672 /* ??? Once the CFG doesn't keep getting completely rebuilt, look
673 there for edge frequency counts from profile data. */
677 int n_labels = XVECLEN (jump_table, 1);
681 for (i = 0; i < n_labels; i++)
685 for (j = i + 1; j < n_labels; j++)
686 if (XEXP (XVECEXP (jump_table, 1, i), 0)
687 == XEXP (XVECEXP (jump_table, 1, j), 0))
690 if (count > best_count)
691 best_count = count, best_label = XVECEXP (jump_table, 1, i);
695 return best_label ? best_label : const0_rtx;
698 /* Return the TLS model to use for SYMBOL. */
700 static enum tls_model
701 tls_symbolic_operand_type (rtx symbol)
703 enum tls_model model;
705 if (GET_CODE (symbol) != SYMBOL_REF)
707 model = SYMBOL_REF_TLS_MODEL (symbol);
709 /* Local-exec with a 64-bit size is the same code as initial-exec. */
710 if (model == TLS_MODEL_LOCAL_EXEC && alpha_tls_size == 64)
711 model = TLS_MODEL_INITIAL_EXEC;
716 /* Return true if the function DECL will share the same GP as any
717 function in the current unit of translation. */
720 decl_has_samegp (tree decl)
722 /* Functions that are not local can be overridden, and thus may
723 not share the same gp. */
724 if (!(*targetm.binds_local_p) (decl))
727 /* If -msmall-data is in effect, assume that there is only one GP
728 for the module, and so any local symbol has this property. We
729 need explicit relocations to be able to enforce this for symbols
730 not defined in this unit of translation, however. */
731 if (TARGET_EXPLICIT_RELOCS && TARGET_SMALL_DATA)
734 /* Functions that are not external are defined in this UoT. */
735 /* ??? Irritatingly, static functions not yet emitted are still
736 marked "external". Apply this to non-static functions only. */
737 return !TREE_PUBLIC (decl) || !DECL_EXTERNAL (decl);
740 /* Return true if EXP should be placed in the small data section. */
743 alpha_in_small_data_p (tree exp)
745 /* We want to merge strings, so we never consider them small data. */
746 if (TREE_CODE (exp) == STRING_CST)
749 /* Functions are never in the small data area. Duh. */
750 if (TREE_CODE (exp) == FUNCTION_DECL)
753 if (TREE_CODE (exp) == VAR_DECL && DECL_SECTION_NAME (exp))
755 const char *section = TREE_STRING_POINTER (DECL_SECTION_NAME (exp));
756 if (strcmp (section, ".sdata") == 0
757 || strcmp (section, ".sbss") == 0)
762 HOST_WIDE_INT size = int_size_in_bytes (TREE_TYPE (exp));
764 /* If this is an incomplete type with size 0, then we can't put it
765 in sdata because it might be too big when completed. */
766 if (size > 0 && (unsigned HOST_WIDE_INT) size <= g_switch_value)
773 #if TARGET_ABI_OPEN_VMS
775 alpha_linkage_symbol_p (const char *symname)
777 int symlen = strlen (symname);
780 return strcmp (&symname [symlen - 4], "..lk") == 0;
785 #define LINKAGE_SYMBOL_REF_P(X) \
786 ((GET_CODE (X) == SYMBOL_REF \
787 && alpha_linkage_symbol_p (XSTR (X, 0))) \
788 || (GET_CODE (X) == CONST \
789 && GET_CODE (XEXP (X, 0)) == PLUS \
790 && GET_CODE (XEXP (XEXP (X, 0), 0)) == SYMBOL_REF \
791 && alpha_linkage_symbol_p (XSTR (XEXP (XEXP (X, 0), 0), 0))))
794 /* legitimate_address_p recognizes an RTL expression that is a valid
795 memory address for an instruction. The MODE argument is the
796 machine mode for the MEM expression that wants to use this address.
798 For Alpha, we have either a constant address or the sum of a
799 register and a constant address, or just a register. For DImode,
800 any of those forms can be surrounded with an AND that clear the
801 low-order three bits; this is an "unaligned" access. */
804 alpha_legitimate_address_p (enum machine_mode mode, rtx x, int strict)
806 /* If this is an ldq_u type address, discard the outer AND. */
808 && GET_CODE (x) == AND
809 && GET_CODE (XEXP (x, 1)) == CONST_INT
810 && INTVAL (XEXP (x, 1)) == -8)
813 /* Discard non-paradoxical subregs. */
814 if (GET_CODE (x) == SUBREG
815 && (GET_MODE_SIZE (GET_MODE (x))
816 < GET_MODE_SIZE (GET_MODE (SUBREG_REG (x)))))
819 /* Unadorned general registers are valid. */
822 ? STRICT_REG_OK_FOR_BASE_P (x)
823 : NONSTRICT_REG_OK_FOR_BASE_P (x)))
826 /* Constant addresses (i.e. +/- 32k) are valid. */
827 if (CONSTANT_ADDRESS_P (x))
830 #if TARGET_ABI_OPEN_VMS
831 if (LINKAGE_SYMBOL_REF_P (x))
835 /* Register plus a small constant offset is valid. */
836 if (GET_CODE (x) == PLUS)
838 rtx ofs = XEXP (x, 1);
841 /* Discard non-paradoxical subregs. */
842 if (GET_CODE (x) == SUBREG
843 && (GET_MODE_SIZE (GET_MODE (x))
844 < GET_MODE_SIZE (GET_MODE (SUBREG_REG (x)))))
850 && NONSTRICT_REG_OK_FP_BASE_P (x)
851 && GET_CODE (ofs) == CONST_INT)
854 ? STRICT_REG_OK_FOR_BASE_P (x)
855 : NONSTRICT_REG_OK_FOR_BASE_P (x))
856 && CONSTANT_ADDRESS_P (ofs))
861 /* If we're managing explicit relocations, LO_SUM is valid, as
862 are small data symbols. */
863 else if (TARGET_EXPLICIT_RELOCS)
865 if (small_symbolic_operand (x, Pmode))
868 if (GET_CODE (x) == LO_SUM)
870 rtx ofs = XEXP (x, 1);
873 /* Discard non-paradoxical subregs. */
874 if (GET_CODE (x) == SUBREG
875 && (GET_MODE_SIZE (GET_MODE (x))
876 < GET_MODE_SIZE (GET_MODE (SUBREG_REG (x)))))
879 /* Must have a valid base register. */
882 ? STRICT_REG_OK_FOR_BASE_P (x)
883 : NONSTRICT_REG_OK_FOR_BASE_P (x))))
886 /* The symbol must be local. */
887 if (local_symbolic_operand (ofs, Pmode)
888 || dtp32_symbolic_operand (ofs, Pmode)
889 || tp32_symbolic_operand (ofs, Pmode))
897 /* Build the SYMBOL_REF for __tls_get_addr. */
899 static GTY(()) rtx tls_get_addr_libfunc;
902 get_tls_get_addr (void)
904 if (!tls_get_addr_libfunc)
905 tls_get_addr_libfunc = init_one_libfunc ("__tls_get_addr");
906 return tls_get_addr_libfunc;
909 /* Try machine-dependent ways of modifying an illegitimate address
910 to be legitimate. If we find one, return the new, valid address. */
913 alpha_legitimize_address (rtx x, rtx scratch,
914 enum machine_mode mode ATTRIBUTE_UNUSED)
916 HOST_WIDE_INT addend;
918 /* If the address is (plus reg const_int) and the CONST_INT is not a
919 valid offset, compute the high part of the constant and add it to
920 the register. Then our address is (plus temp low-part-const). */
921 if (GET_CODE (x) == PLUS
922 && GET_CODE (XEXP (x, 0)) == REG
923 && GET_CODE (XEXP (x, 1)) == CONST_INT
924 && ! CONSTANT_ADDRESS_P (XEXP (x, 1)))
926 addend = INTVAL (XEXP (x, 1));
931 /* If the address is (const (plus FOO const_int)), find the low-order
932 part of the CONST_INT. Then load FOO plus any high-order part of the
933 CONST_INT into a register. Our address is (plus reg low-part-const).
934 This is done to reduce the number of GOT entries. */
935 if (can_create_pseudo_p ()
936 && GET_CODE (x) == CONST
937 && GET_CODE (XEXP (x, 0)) == PLUS
938 && GET_CODE (XEXP (XEXP (x, 0), 1)) == CONST_INT)
940 addend = INTVAL (XEXP (XEXP (x, 0), 1));
941 x = force_reg (Pmode, XEXP (XEXP (x, 0), 0));
945 /* If we have a (plus reg const), emit the load as in (2), then add
946 the two registers, and finally generate (plus reg low-part-const) as
948 if (can_create_pseudo_p ()
949 && GET_CODE (x) == PLUS
950 && GET_CODE (XEXP (x, 0)) == REG
951 && GET_CODE (XEXP (x, 1)) == CONST
952 && GET_CODE (XEXP (XEXP (x, 1), 0)) == PLUS
953 && GET_CODE (XEXP (XEXP (XEXP (x, 1), 0), 1)) == CONST_INT)
955 addend = INTVAL (XEXP (XEXP (XEXP (x, 1), 0), 1));
956 x = expand_simple_binop (Pmode, PLUS, XEXP (x, 0),
957 XEXP (XEXP (XEXP (x, 1), 0), 0),
958 NULL_RTX, 1, OPTAB_LIB_WIDEN);
962 /* If this is a local symbol, split the address into HIGH/LO_SUM parts. */
963 if (TARGET_EXPLICIT_RELOCS && symbolic_operand (x, Pmode))
965 rtx r0, r16, eqv, tga, tp, insn, dest, seq;
967 switch (tls_symbolic_operand_type (x))
972 case TLS_MODEL_GLOBAL_DYNAMIC:
975 r0 = gen_rtx_REG (Pmode, 0);
976 r16 = gen_rtx_REG (Pmode, 16);
977 tga = get_tls_get_addr ();
978 dest = gen_reg_rtx (Pmode);
979 seq = GEN_INT (alpha_next_sequence_number++);
981 emit_insn (gen_movdi_er_tlsgd (r16, pic_offset_table_rtx, x, seq));
982 insn = gen_call_value_osf_tlsgd (r0, tga, seq);
983 insn = emit_call_insn (insn);
984 CONST_OR_PURE_CALL_P (insn) = 1;
985 use_reg (&CALL_INSN_FUNCTION_USAGE (insn), r16);
990 emit_libcall_block (insn, dest, r0, x);
993 case TLS_MODEL_LOCAL_DYNAMIC:
996 r0 = gen_rtx_REG (Pmode, 0);
997 r16 = gen_rtx_REG (Pmode, 16);
998 tga = get_tls_get_addr ();
999 scratch = gen_reg_rtx (Pmode);
1000 seq = GEN_INT (alpha_next_sequence_number++);
1002 emit_insn (gen_movdi_er_tlsldm (r16, pic_offset_table_rtx, seq));
1003 insn = gen_call_value_osf_tlsldm (r0, tga, seq);
1004 insn = emit_call_insn (insn);
1005 CONST_OR_PURE_CALL_P (insn) = 1;
1006 use_reg (&CALL_INSN_FUNCTION_USAGE (insn), r16);
1008 insn = get_insns ();
1011 eqv = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, const0_rtx),
1012 UNSPEC_TLSLDM_CALL);
1013 emit_libcall_block (insn, scratch, r0, eqv);
1015 eqv = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, x), UNSPEC_DTPREL);
1016 eqv = gen_rtx_CONST (Pmode, eqv);
1018 if (alpha_tls_size == 64)
1020 dest = gen_reg_rtx (Pmode);
1021 emit_insn (gen_rtx_SET (VOIDmode, dest, eqv));
1022 emit_insn (gen_adddi3 (dest, dest, scratch));
1025 if (alpha_tls_size == 32)
1027 insn = gen_rtx_HIGH (Pmode, eqv);
1028 insn = gen_rtx_PLUS (Pmode, scratch, insn);
1029 scratch = gen_reg_rtx (Pmode);
1030 emit_insn (gen_rtx_SET (VOIDmode, scratch, insn));
1032 return gen_rtx_LO_SUM (Pmode, scratch, eqv);
1034 case TLS_MODEL_INITIAL_EXEC:
1035 eqv = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, x), UNSPEC_TPREL);
1036 eqv = gen_rtx_CONST (Pmode, eqv);
1037 tp = gen_reg_rtx (Pmode);
1038 scratch = gen_reg_rtx (Pmode);
1039 dest = gen_reg_rtx (Pmode);
1041 emit_insn (gen_load_tp (tp));
1042 emit_insn (gen_rtx_SET (VOIDmode, scratch, eqv));
1043 emit_insn (gen_adddi3 (dest, tp, scratch));
1046 case TLS_MODEL_LOCAL_EXEC:
1047 eqv = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, x), UNSPEC_TPREL);
1048 eqv = gen_rtx_CONST (Pmode, eqv);
1049 tp = gen_reg_rtx (Pmode);
1051 emit_insn (gen_load_tp (tp));
1052 if (alpha_tls_size == 32)
1054 insn = gen_rtx_HIGH (Pmode, eqv);
1055 insn = gen_rtx_PLUS (Pmode, tp, insn);
1056 tp = gen_reg_rtx (Pmode);
1057 emit_insn (gen_rtx_SET (VOIDmode, tp, insn));
1059 return gen_rtx_LO_SUM (Pmode, tp, eqv);
1065 if (local_symbolic_operand (x, Pmode))
1067 if (small_symbolic_operand (x, Pmode))
1071 if (can_create_pseudo_p ())
1072 scratch = gen_reg_rtx (Pmode);
1073 emit_insn (gen_rtx_SET (VOIDmode, scratch,
1074 gen_rtx_HIGH (Pmode, x)));
1075 return gen_rtx_LO_SUM (Pmode, scratch, x);
1084 HOST_WIDE_INT low, high;
1086 low = ((addend & 0xffff) ^ 0x8000) - 0x8000;
1088 high = ((addend & 0xffffffff) ^ 0x80000000) - 0x80000000;
1092 x = expand_simple_binop (Pmode, PLUS, x, GEN_INT (addend),
1093 (!can_create_pseudo_p () ? scratch : NULL_RTX),
1094 1, OPTAB_LIB_WIDEN);
1096 x = expand_simple_binop (Pmode, PLUS, x, GEN_INT (high),
1097 (!can_create_pseudo_p () ? scratch : NULL_RTX),
1098 1, OPTAB_LIB_WIDEN);
1100 return plus_constant (x, low);
1104 /* Primarily this is required for TLS symbols, but given that our move
1105 patterns *ought* to be able to handle any symbol at any time, we
1106 should never be spilling symbolic operands to the constant pool, ever. */
1109 alpha_cannot_force_const_mem (rtx x)
1111 enum rtx_code code = GET_CODE (x);
1112 return code == SYMBOL_REF || code == LABEL_REF || code == CONST;
1115 /* We do not allow indirect calls to be optimized into sibling calls, nor
1116 can we allow a call to a function with a different GP to be optimized
1120 alpha_function_ok_for_sibcall (tree decl, tree exp ATTRIBUTE_UNUSED)
1122 /* Can't do indirect tail calls, since we don't know if the target
1123 uses the same GP. */
1127 /* Otherwise, we can make a tail call if the target function shares
1129 return decl_has_samegp (decl);
1133 some_small_symbolic_operand_int (rtx *px, void *data ATTRIBUTE_UNUSED)
1137 /* Don't re-split. */
1138 if (GET_CODE (x) == LO_SUM)
1141 return small_symbolic_operand (x, Pmode) != 0;
1145 split_small_symbolic_operand_1 (rtx *px, void *data ATTRIBUTE_UNUSED)
1149 /* Don't re-split. */
1150 if (GET_CODE (x) == LO_SUM)
1153 if (small_symbolic_operand (x, Pmode))
1155 x = gen_rtx_LO_SUM (Pmode, pic_offset_table_rtx, x);
1164 split_small_symbolic_operand (rtx x)
1167 for_each_rtx (&x, split_small_symbolic_operand_1, NULL);
1171 /* Indicate that INSN cannot be duplicated. This is true for any insn
1172 that we've marked with gpdisp relocs, since those have to stay in
1173 1-1 correspondence with one another.
1175 Technically we could copy them if we could set up a mapping from one
1176 sequence number to another, across the set of insns to be duplicated.
1177 This seems overly complicated and error-prone since interblock motion
1178 from sched-ebb could move one of the pair of insns to a different block.
1180 Also cannot allow jsr insns to be duplicated. If they throw exceptions,
1181 then they'll be in a different block from their ldgp. Which could lead
1182 the bb reorder code to think that it would be ok to copy just the block
1183 containing the call and branch to the block containing the ldgp. */
1186 alpha_cannot_copy_insn_p (rtx insn)
1188 if (!reload_completed || !TARGET_EXPLICIT_RELOCS)
1190 if (recog_memoized (insn) >= 0)
1191 return get_attr_cannot_copy (insn);
1197 /* Try a machine-dependent way of reloading an illegitimate address
1198 operand. If we find one, push the reload and return the new rtx. */
1201 alpha_legitimize_reload_address (rtx x,
1202 enum machine_mode mode ATTRIBUTE_UNUSED,
1203 int opnum, int type,
1204 int ind_levels ATTRIBUTE_UNUSED)
1206 /* We must recognize output that we have already generated ourselves. */
1207 if (GET_CODE (x) == PLUS
1208 && GET_CODE (XEXP (x, 0)) == PLUS
1209 && GET_CODE (XEXP (XEXP (x, 0), 0)) == REG
1210 && GET_CODE (XEXP (XEXP (x, 0), 1)) == CONST_INT
1211 && GET_CODE (XEXP (x, 1)) == CONST_INT)
1213 push_reload (XEXP (x, 0), NULL_RTX, &XEXP (x, 0), NULL,
1214 BASE_REG_CLASS, GET_MODE (x), VOIDmode, 0, 0,
1219 /* We wish to handle large displacements off a base register by
1220 splitting the addend across an ldah and the mem insn. This
1221 cuts number of extra insns needed from 3 to 1. */
1222 if (GET_CODE (x) == PLUS
1223 && GET_CODE (XEXP (x, 0)) == REG
1224 && REGNO (XEXP (x, 0)) < FIRST_PSEUDO_REGISTER
1225 && REGNO_OK_FOR_BASE_P (REGNO (XEXP (x, 0)))
1226 && GET_CODE (XEXP (x, 1)) == CONST_INT)
1228 HOST_WIDE_INT val = INTVAL (XEXP (x, 1));
1229 HOST_WIDE_INT low = ((val & 0xffff) ^ 0x8000) - 0x8000;
1231 = (((val - low) & 0xffffffff) ^ 0x80000000) - 0x80000000;
1233 /* Check for 32-bit overflow. */
1234 if (high + low != val)
1237 /* Reload the high part into a base reg; leave the low part
1238 in the mem directly. */
1239 x = gen_rtx_PLUS (GET_MODE (x),
1240 gen_rtx_PLUS (GET_MODE (x), XEXP (x, 0),
1244 push_reload (XEXP (x, 0), NULL_RTX, &XEXP (x, 0), NULL,
1245 BASE_REG_CLASS, GET_MODE (x), VOIDmode, 0, 0,
1253 /* Compute a (partial) cost for rtx X. Return true if the complete
1254 cost has been computed, and false if subexpressions should be
1255 scanned. In either case, *TOTAL contains the cost result. */
1258 alpha_rtx_costs (rtx x, int code, int outer_code, int *total)
1260 enum machine_mode mode = GET_MODE (x);
1261 bool float_mode_p = FLOAT_MODE_P (mode);
1262 const struct alpha_rtx_cost_data *cost_data;
1265 cost_data = &alpha_rtx_cost_size;
1267 cost_data = &alpha_rtx_cost_data[alpha_tune];
1272 /* If this is an 8-bit constant, return zero since it can be used
1273 nearly anywhere with no cost. If it is a valid operand for an
1274 ADD or AND, likewise return 0 if we know it will be used in that
1275 context. Otherwise, return 2 since it might be used there later.
1276 All other constants take at least two insns. */
1277 if (INTVAL (x) >= 0 && INTVAL (x) < 256)
1285 if (x == CONST0_RTX (mode))
1287 else if ((outer_code == PLUS && add_operand (x, VOIDmode))
1288 || (outer_code == AND && and_operand (x, VOIDmode)))
1290 else if (add_operand (x, VOIDmode) || and_operand (x, VOIDmode))
1293 *total = COSTS_N_INSNS (2);
1299 if (TARGET_EXPLICIT_RELOCS && small_symbolic_operand (x, VOIDmode))
1300 *total = COSTS_N_INSNS (outer_code != MEM);
1301 else if (TARGET_EXPLICIT_RELOCS && local_symbolic_operand (x, VOIDmode))
1302 *total = COSTS_N_INSNS (1 + (outer_code != MEM));
1303 else if (tls_symbolic_operand_type (x))
1304 /* Estimate of cost for call_pal rduniq. */
1305 /* ??? How many insns do we emit here? More than one... */
1306 *total = COSTS_N_INSNS (15);
1308 /* Otherwise we do a load from the GOT. */
1309 *total = COSTS_N_INSNS (optimize_size ? 1 : alpha_memory_latency);
1313 /* This is effectively an add_operand. */
1320 *total = cost_data->fp_add;
1321 else if (GET_CODE (XEXP (x, 0)) == MULT
1322 && const48_operand (XEXP (XEXP (x, 0), 1), VOIDmode))
1324 *total = (rtx_cost (XEXP (XEXP (x, 0), 0), outer_code)
1325 + rtx_cost (XEXP (x, 1), outer_code) + COSTS_N_INSNS (1));
1332 *total = cost_data->fp_mult;
1333 else if (mode == DImode)
1334 *total = cost_data->int_mult_di;
1336 *total = cost_data->int_mult_si;
1340 if (GET_CODE (XEXP (x, 1)) == CONST_INT
1341 && INTVAL (XEXP (x, 1)) <= 3)
1343 *total = COSTS_N_INSNS (1);
1350 *total = cost_data->int_shift;
1355 *total = cost_data->fp_add;
1357 *total = cost_data->int_cmov;
1365 *total = cost_data->int_div;
1366 else if (mode == SFmode)
1367 *total = cost_data->fp_div_sf;
1369 *total = cost_data->fp_div_df;
1373 *total = COSTS_N_INSNS (optimize_size ? 1 : alpha_memory_latency);
1379 *total = COSTS_N_INSNS (1);
1387 *total = COSTS_N_INSNS (1) + cost_data->int_cmov;
1393 case UNSIGNED_FLOAT:
1396 case FLOAT_TRUNCATE:
1397 *total = cost_data->fp_add;
1401 if (GET_CODE (XEXP (x, 0)) == MEM)
1404 *total = cost_data->fp_add;
1412 /* REF is an alignable memory location. Place an aligned SImode
1413 reference into *PALIGNED_MEM and the number of bits to shift into
1414 *PBITNUM. SCRATCH is a free register for use in reloading out
1415 of range stack slots. */
1418 get_aligned_mem (rtx ref, rtx *paligned_mem, rtx *pbitnum)
1421 HOST_WIDE_INT disp, offset;
1423 gcc_assert (GET_CODE (ref) == MEM);
1425 if (reload_in_progress
1426 && ! memory_address_p (GET_MODE (ref), XEXP (ref, 0)))
1428 base = find_replacement (&XEXP (ref, 0));
1429 gcc_assert (memory_address_p (GET_MODE (ref), base));
1432 base = XEXP (ref, 0);
1434 if (GET_CODE (base) == PLUS)
1435 disp = INTVAL (XEXP (base, 1)), base = XEXP (base, 0);
1439 /* Find the byte offset within an aligned word. If the memory itself is
1440 claimed to be aligned, believe it. Otherwise, aligned_memory_operand
1441 will have examined the base register and determined it is aligned, and
1442 thus displacements from it are naturally alignable. */
1443 if (MEM_ALIGN (ref) >= 32)
1448 /* Access the entire aligned word. */
1449 *paligned_mem = widen_memory_access (ref, SImode, -offset);
1451 /* Convert the byte offset within the word to a bit offset. */
1452 if (WORDS_BIG_ENDIAN)
1453 offset = 32 - (GET_MODE_BITSIZE (GET_MODE (ref)) + offset * 8);
1456 *pbitnum = GEN_INT (offset);
1459 /* Similar, but just get the address. Handle the two reload cases.
1460 Add EXTRA_OFFSET to the address we return. */
1463 get_unaligned_address (rtx ref)
1466 HOST_WIDE_INT offset = 0;
1468 gcc_assert (GET_CODE (ref) == MEM);
1470 if (reload_in_progress
1471 && ! memory_address_p (GET_MODE (ref), XEXP (ref, 0)))
1473 base = find_replacement (&XEXP (ref, 0));
1475 gcc_assert (memory_address_p (GET_MODE (ref), base));
1478 base = XEXP (ref, 0);
1480 if (GET_CODE (base) == PLUS)
1481 offset += INTVAL (XEXP (base, 1)), base = XEXP (base, 0);
1483 return plus_constant (base, offset);
1486 /* Compute a value X, such that X & 7 == (ADDR + OFS) & 7.
1487 X is always returned in a register. */
1490 get_unaligned_offset (rtx addr, HOST_WIDE_INT ofs)
1492 if (GET_CODE (addr) == PLUS)
1494 ofs += INTVAL (XEXP (addr, 1));
1495 addr = XEXP (addr, 0);
1498 return expand_simple_binop (Pmode, PLUS, addr, GEN_INT (ofs & 7),
1499 NULL_RTX, 1, OPTAB_LIB_WIDEN);
1502 /* On the Alpha, all (non-symbolic) constants except zero go into
1503 a floating-point register via memory. Note that we cannot
1504 return anything that is not a subset of CLASS, and that some
1505 symbolic constants cannot be dropped to memory. */
1508 alpha_preferred_reload_class(rtx x, enum reg_class class)
1510 /* Zero is present in any register class. */
1511 if (x == CONST0_RTX (GET_MODE (x)))
1514 /* These sorts of constants we can easily drop to memory. */
1515 if (GET_CODE (x) == CONST_INT
1516 || GET_CODE (x) == CONST_DOUBLE
1517 || GET_CODE (x) == CONST_VECTOR)
1519 if (class == FLOAT_REGS)
1521 if (class == ALL_REGS)
1522 return GENERAL_REGS;
1526 /* All other kinds of constants should not (and in the case of HIGH
1527 cannot) be dropped to memory -- instead we use a GENERAL_REGS
1528 secondary reload. */
1530 return (class == ALL_REGS ? GENERAL_REGS : class);
1535 /* Inform reload about cases where moving X with a mode MODE to a register in
1536 CLASS requires an extra scratch or immediate register. Return the class
1537 needed for the immediate register. */
1539 static enum reg_class
1540 alpha_secondary_reload (bool in_p, rtx x, enum reg_class class,
1541 enum machine_mode mode, secondary_reload_info *sri)
1543 /* Loading and storing HImode or QImode values to and from memory
1544 usually requires a scratch register. */
1545 if (!TARGET_BWX && (mode == QImode || mode == HImode || mode == CQImode))
1547 if (any_memory_operand (x, mode))
1551 if (!aligned_memory_operand (x, mode))
1552 sri->icode = reload_in_optab[mode];
1555 sri->icode = reload_out_optab[mode];
1560 /* We also cannot do integral arithmetic into FP regs, as might result
1561 from register elimination into a DImode fp register. */
1562 if (class == FLOAT_REGS)
1564 if (MEM_P (x) && GET_CODE (XEXP (x, 0)) == AND)
1565 return GENERAL_REGS;
1566 if (in_p && INTEGRAL_MODE_P (mode)
1567 && !MEM_P (x) && !REG_P (x) && !CONST_INT_P (x))
1568 return GENERAL_REGS;
1574 /* Subfunction of the following function. Update the flags of any MEM
1575 found in part of X. */
1578 alpha_set_memflags_1 (rtx *xp, void *data)
1580 rtx x = *xp, orig = (rtx) data;
1582 if (GET_CODE (x) != MEM)
1585 MEM_VOLATILE_P (x) = MEM_VOLATILE_P (orig);
1586 MEM_IN_STRUCT_P (x) = MEM_IN_STRUCT_P (orig);
1587 MEM_SCALAR_P (x) = MEM_SCALAR_P (orig);
1588 MEM_NOTRAP_P (x) = MEM_NOTRAP_P (orig);
1589 MEM_READONLY_P (x) = MEM_READONLY_P (orig);
1591 /* Sadly, we cannot use alias sets because the extra aliasing
1592 produced by the AND interferes. Given that two-byte quantities
1593 are the only thing we would be able to differentiate anyway,
1594 there does not seem to be any point in convoluting the early
1595 out of the alias check. */
1600 /* Given INSN, which is an INSN list or the PATTERN of a single insn
1601 generated to perform a memory operation, look for any MEMs in either
1602 a SET_DEST or a SET_SRC and copy the in-struct, unchanging, and
1603 volatile flags from REF into each of the MEMs found. If REF is not
1604 a MEM, don't do anything. */
1607 alpha_set_memflags (rtx insn, rtx ref)
1611 if (GET_CODE (ref) != MEM)
1614 /* This is only called from alpha.md, after having had something
1615 generated from one of the insn patterns. So if everything is
1616 zero, the pattern is already up-to-date. */
1617 if (!MEM_VOLATILE_P (ref)
1618 && !MEM_IN_STRUCT_P (ref)
1619 && !MEM_SCALAR_P (ref)
1620 && !MEM_NOTRAP_P (ref)
1621 && !MEM_READONLY_P (ref))
1625 base_ptr = &PATTERN (insn);
1628 for_each_rtx (base_ptr, alpha_set_memflags_1, (void *) ref);
1631 static rtx alpha_emit_set_const (rtx, enum machine_mode, HOST_WIDE_INT,
1634 /* Internal routine for alpha_emit_set_const to check for N or below insns.
1635 If NO_OUTPUT is true, then we only check to see if N insns are possible,
1636 and return pc_rtx if successful. */
1639 alpha_emit_set_const_1 (rtx target, enum machine_mode mode,
1640 HOST_WIDE_INT c, int n, bool no_output)
1644 /* Use a pseudo if highly optimizing and still generating RTL. */
1646 = (flag_expensive_optimizations && can_create_pseudo_p () ? 0 : target);
1649 /* If this is a sign-extended 32-bit constant, we can do this in at most
1650 three insns, so do it if we have enough insns left. We always have
1651 a sign-extended 32-bit constant when compiling on a narrow machine. */
1653 if (HOST_BITS_PER_WIDE_INT != 64
1654 || c >> 31 == -1 || c >> 31 == 0)
1656 HOST_WIDE_INT low = ((c & 0xffff) ^ 0x8000) - 0x8000;
1657 HOST_WIDE_INT tmp1 = c - low;
1658 HOST_WIDE_INT high = (((tmp1 >> 16) & 0xffff) ^ 0x8000) - 0x8000;
1659 HOST_WIDE_INT extra = 0;
1661 /* If HIGH will be interpreted as negative but the constant is
1662 positive, we must adjust it to do two ldha insns. */
1664 if ((high & 0x8000) != 0 && c >= 0)
1668 high = ((tmp1 >> 16) & 0xffff) - 2 * ((tmp1 >> 16) & 0x8000);
1671 if (c == low || (low == 0 && extra == 0))
1673 /* We used to use copy_to_suggested_reg (GEN_INT (c), target, mode)
1674 but that meant that we can't handle INT_MIN on 32-bit machines
1675 (like NT/Alpha), because we recurse indefinitely through
1676 emit_move_insn to gen_movdi. So instead, since we know exactly
1677 what we want, create it explicitly. */
1682 target = gen_reg_rtx (mode);
1683 emit_insn (gen_rtx_SET (VOIDmode, target, GEN_INT (c)));
1686 else if (n >= 2 + (extra != 0))
1690 if (!can_create_pseudo_p ())
1692 emit_insn (gen_rtx_SET (VOIDmode, target, GEN_INT (high << 16)));
1696 temp = copy_to_suggested_reg (GEN_INT (high << 16),
1699 /* As of 2002-02-23, addsi3 is only available when not optimizing.
1700 This means that if we go through expand_binop, we'll try to
1701 generate extensions, etc, which will require new pseudos, which
1702 will fail during some split phases. The SImode add patterns
1703 still exist, but are not named. So build the insns by hand. */
1708 subtarget = gen_reg_rtx (mode);
1709 insn = gen_rtx_PLUS (mode, temp, GEN_INT (extra << 16));
1710 insn = gen_rtx_SET (VOIDmode, subtarget, insn);
1716 target = gen_reg_rtx (mode);
1717 insn = gen_rtx_PLUS (mode, temp, GEN_INT (low));
1718 insn = gen_rtx_SET (VOIDmode, target, insn);
1724 /* If we couldn't do it that way, try some other methods. But if we have
1725 no instructions left, don't bother. Likewise, if this is SImode and
1726 we can't make pseudos, we can't do anything since the expand_binop
1727 and expand_unop calls will widen and try to make pseudos. */
1729 if (n == 1 || (mode == SImode && !can_create_pseudo_p ()))
1732 /* Next, see if we can load a related constant and then shift and possibly
1733 negate it to get the constant we want. Try this once each increasing
1734 numbers of insns. */
1736 for (i = 1; i < n; i++)
1738 /* First, see if minus some low bits, we've an easy load of
1741 new = ((c & 0xffff) ^ 0x8000) - 0x8000;
1744 temp = alpha_emit_set_const (subtarget, mode, c - new, i, no_output);
1749 return expand_binop (mode, add_optab, temp, GEN_INT (new),
1750 target, 0, OPTAB_WIDEN);
1754 /* Next try complementing. */
1755 temp = alpha_emit_set_const (subtarget, mode, ~c, i, no_output);
1760 return expand_unop (mode, one_cmpl_optab, temp, target, 0);
1763 /* Next try to form a constant and do a left shift. We can do this
1764 if some low-order bits are zero; the exact_log2 call below tells
1765 us that information. The bits we are shifting out could be any
1766 value, but here we'll just try the 0- and sign-extended forms of
1767 the constant. To try to increase the chance of having the same
1768 constant in more than one insn, start at the highest number of
1769 bits to shift, but try all possibilities in case a ZAPNOT will
1772 bits = exact_log2 (c & -c);
1774 for (; bits > 0; bits--)
1777 temp = alpha_emit_set_const (subtarget, mode, new, i, no_output);
1780 new = (unsigned HOST_WIDE_INT)c >> bits;
1781 temp = alpha_emit_set_const (subtarget, mode, new,
1788 return expand_binop (mode, ashl_optab, temp, GEN_INT (bits),
1789 target, 0, OPTAB_WIDEN);
1793 /* Now try high-order zero bits. Here we try the shifted-in bits as
1794 all zero and all ones. Be careful to avoid shifting outside the
1795 mode and to avoid shifting outside the host wide int size. */
1796 /* On narrow hosts, don't shift a 1 into the high bit, since we'll
1797 confuse the recursive call and set all of the high 32 bits. */
1799 bits = (MIN (HOST_BITS_PER_WIDE_INT, GET_MODE_SIZE (mode) * 8)
1800 - floor_log2 (c) - 1 - (HOST_BITS_PER_WIDE_INT < 64));
1802 for (; bits > 0; bits--)
1805 temp = alpha_emit_set_const (subtarget, mode, new, i, no_output);
1808 new = (c << bits) | (((HOST_WIDE_INT) 1 << bits) - 1);
1809 temp = alpha_emit_set_const (subtarget, mode, new,
1816 return expand_binop (mode, lshr_optab, temp, GEN_INT (bits),
1817 target, 1, OPTAB_WIDEN);
1821 /* Now try high-order 1 bits. We get that with a sign-extension.
1822 But one bit isn't enough here. Be careful to avoid shifting outside
1823 the mode and to avoid shifting outside the host wide int size. */
1825 bits = (MIN (HOST_BITS_PER_WIDE_INT, GET_MODE_SIZE (mode) * 8)
1826 - floor_log2 (~ c) - 2);
1828 for (; bits > 0; bits--)
1831 temp = alpha_emit_set_const (subtarget, mode, new, i, no_output);
1834 new = (c << bits) | (((HOST_WIDE_INT) 1 << bits) - 1);
1835 temp = alpha_emit_set_const (subtarget, mode, new,
1842 return expand_binop (mode, ashr_optab, temp, GEN_INT (bits),
1843 target, 0, OPTAB_WIDEN);
1848 #if HOST_BITS_PER_WIDE_INT == 64
1849 /* Finally, see if can load a value into the target that is the same as the
1850 constant except that all bytes that are 0 are changed to be 0xff. If we
1851 can, then we can do a ZAPNOT to obtain the desired constant. */
1854 for (i = 0; i < 64; i += 8)
1855 if ((new & ((HOST_WIDE_INT) 0xff << i)) == 0)
1856 new |= (HOST_WIDE_INT) 0xff << i;
1858 /* We are only called for SImode and DImode. If this is SImode, ensure that
1859 we are sign extended to a full word. */
1862 new = ((new & 0xffffffff) ^ 0x80000000) - 0x80000000;
1866 temp = alpha_emit_set_const (subtarget, mode, new, n - 1, no_output);
1871 return expand_binop (mode, and_optab, temp, GEN_INT (c | ~ new),
1872 target, 0, OPTAB_WIDEN);
1880 /* Try to output insns to set TARGET equal to the constant C if it can be
1881 done in less than N insns. Do all computations in MODE. Returns the place
1882 where the output has been placed if it can be done and the insns have been
1883 emitted. If it would take more than N insns, zero is returned and no
1884 insns and emitted. */
1887 alpha_emit_set_const (rtx target, enum machine_mode mode,
1888 HOST_WIDE_INT c, int n, bool no_output)
1890 enum machine_mode orig_mode = mode;
1891 rtx orig_target = target;
1895 /* If we can't make any pseudos, TARGET is an SImode hard register, we
1896 can't load this constant in one insn, do this in DImode. */
1897 if (!can_create_pseudo_p () && mode == SImode
1898 && GET_CODE (target) == REG && REGNO (target) < FIRST_PSEUDO_REGISTER)
1900 result = alpha_emit_set_const_1 (target, mode, c, 1, no_output);
1904 target = no_output ? NULL : gen_lowpart (DImode, target);
1907 else if (mode == V8QImode || mode == V4HImode || mode == V2SImode)
1909 target = no_output ? NULL : gen_lowpart (DImode, target);
1913 /* Try 1 insn, then 2, then up to N. */
1914 for (i = 1; i <= n; i++)
1916 result = alpha_emit_set_const_1 (target, mode, c, i, no_output);
1924 insn = get_last_insn ();
1925 set = single_set (insn);
1926 if (! CONSTANT_P (SET_SRC (set)))
1927 set_unique_reg_note (get_last_insn (), REG_EQUAL, GEN_INT (c));
1932 /* Allow for the case where we changed the mode of TARGET. */
1935 if (result == target)
1936 result = orig_target;
1937 else if (mode != orig_mode)
1938 result = gen_lowpart (orig_mode, result);
1944 /* Having failed to find a 3 insn sequence in alpha_emit_set_const,
1945 fall back to a straight forward decomposition. We do this to avoid
1946 exponential run times encountered when looking for longer sequences
1947 with alpha_emit_set_const. */
1950 alpha_emit_set_long_const (rtx target, HOST_WIDE_INT c1, HOST_WIDE_INT c2)
1952 HOST_WIDE_INT d1, d2, d3, d4;
1954 /* Decompose the entire word */
1955 #if HOST_BITS_PER_WIDE_INT >= 64
1956 gcc_assert (c2 == -(c1 < 0));
1957 d1 = ((c1 & 0xffff) ^ 0x8000) - 0x8000;
1959 d2 = ((c1 & 0xffffffff) ^ 0x80000000) - 0x80000000;
1960 c1 = (c1 - d2) >> 32;
1961 d3 = ((c1 & 0xffff) ^ 0x8000) - 0x8000;
1963 d4 = ((c1 & 0xffffffff) ^ 0x80000000) - 0x80000000;
1964 gcc_assert (c1 == d4);
1966 d1 = ((c1 & 0xffff) ^ 0x8000) - 0x8000;
1968 d2 = ((c1 & 0xffffffff) ^ 0x80000000) - 0x80000000;
1969 gcc_assert (c1 == d2);
1971 d3 = ((c2 & 0xffff) ^ 0x8000) - 0x8000;
1973 d4 = ((c2 & 0xffffffff) ^ 0x80000000) - 0x80000000;
1974 gcc_assert (c2 == d4);
1977 /* Construct the high word */
1980 emit_move_insn (target, GEN_INT (d4));
1982 emit_move_insn (target, gen_rtx_PLUS (DImode, target, GEN_INT (d3)));
1985 emit_move_insn (target, GEN_INT (d3));
1987 /* Shift it into place */
1988 emit_move_insn (target, gen_rtx_ASHIFT (DImode, target, GEN_INT (32)));
1990 /* Add in the low bits. */
1992 emit_move_insn (target, gen_rtx_PLUS (DImode, target, GEN_INT (d2)));
1994 emit_move_insn (target, gen_rtx_PLUS (DImode, target, GEN_INT (d1)));
1999 /* Given an integral CONST_INT, CONST_DOUBLE, or CONST_VECTOR, return
2003 alpha_extract_integer (rtx x, HOST_WIDE_INT *p0, HOST_WIDE_INT *p1)
2005 HOST_WIDE_INT i0, i1;
2007 if (GET_CODE (x) == CONST_VECTOR)
2008 x = simplify_subreg (DImode, x, GET_MODE (x), 0);
2011 if (GET_CODE (x) == CONST_INT)
2016 else if (HOST_BITS_PER_WIDE_INT >= 64)
2018 i0 = CONST_DOUBLE_LOW (x);
2023 i0 = CONST_DOUBLE_LOW (x);
2024 i1 = CONST_DOUBLE_HIGH (x);
2031 /* Implement LEGITIMATE_CONSTANT_P. This is all constants for which we
2032 are willing to load the value into a register via a move pattern.
2033 Normally this is all symbolic constants, integral constants that
2034 take three or fewer instructions, and floating-point zero. */
2037 alpha_legitimate_constant_p (rtx x)
2039 enum machine_mode mode = GET_MODE (x);
2040 HOST_WIDE_INT i0, i1;
2042 switch (GET_CODE (x))
2050 /* TLS symbols are never valid. */
2051 return SYMBOL_REF_TLS_MODEL (x) == 0;
2054 if (x == CONST0_RTX (mode))
2056 if (FLOAT_MODE_P (mode))
2061 if (x == CONST0_RTX (mode))
2063 if (GET_MODE_CLASS (mode) != MODE_VECTOR_INT)
2065 if (GET_MODE_SIZE (mode) != 8)
2071 if (TARGET_BUILD_CONSTANTS)
2073 alpha_extract_integer (x, &i0, &i1);
2074 if (HOST_BITS_PER_WIDE_INT >= 64 || i1 == (-i0 < 0))
2075 return alpha_emit_set_const_1 (x, mode, i0, 3, true) != NULL;
2083 /* Operand 1 is known to be a constant, and should require more than one
2084 instruction to load. Emit that multi-part load. */
2087 alpha_split_const_mov (enum machine_mode mode, rtx *operands)
2089 HOST_WIDE_INT i0, i1;
2090 rtx temp = NULL_RTX;
2092 alpha_extract_integer (operands[1], &i0, &i1);
2094 if (HOST_BITS_PER_WIDE_INT >= 64 || i1 == -(i0 < 0))
2095 temp = alpha_emit_set_const (operands[0], mode, i0, 3, false);
2097 if (!temp && TARGET_BUILD_CONSTANTS)
2098 temp = alpha_emit_set_long_const (operands[0], i0, i1);
2102 if (!rtx_equal_p (operands[0], temp))
2103 emit_move_insn (operands[0], temp);
2110 /* Expand a move instruction; return true if all work is done.
2111 We don't handle non-bwx subword loads here. */
2114 alpha_expand_mov (enum machine_mode mode, rtx *operands)
2116 /* If the output is not a register, the input must be. */
2117 if (GET_CODE (operands[0]) == MEM
2118 && ! reg_or_0_operand (operands[1], mode))
2119 operands[1] = force_reg (mode, operands[1]);
2121 /* Allow legitimize_address to perform some simplifications. */
2122 if (mode == Pmode && symbolic_operand (operands[1], mode))
2126 tmp = alpha_legitimize_address (operands[1], operands[0], mode);
2129 if (tmp == operands[0])
2136 /* Early out for non-constants and valid constants. */
2137 if (! CONSTANT_P (operands[1]) || input_operand (operands[1], mode))
2140 /* Split large integers. */
2141 if (GET_CODE (operands[1]) == CONST_INT
2142 || GET_CODE (operands[1]) == CONST_DOUBLE
2143 || GET_CODE (operands[1]) == CONST_VECTOR)
2145 if (alpha_split_const_mov (mode, operands))
2149 /* Otherwise we've nothing left but to drop the thing to memory. */
2150 operands[1] = force_const_mem (mode, operands[1]);
2151 if (reload_in_progress)
2153 emit_move_insn (operands[0], XEXP (operands[1], 0));
2154 operands[1] = replace_equiv_address (operands[1], operands[0]);
2157 operands[1] = validize_mem (operands[1]);
2161 /* Expand a non-bwx QImode or HImode move instruction;
2162 return true if all work is done. */
2165 alpha_expand_mov_nobwx (enum machine_mode mode, rtx *operands)
2169 /* If the output is not a register, the input must be. */
2170 if (MEM_P (operands[0]))
2171 operands[1] = force_reg (mode, operands[1]);
2173 /* Handle four memory cases, unaligned and aligned for either the input
2174 or the output. The only case where we can be called during reload is
2175 for aligned loads; all other cases require temporaries. */
2177 if (any_memory_operand (operands[1], mode))
2179 if (aligned_memory_operand (operands[1], mode))
2181 if (reload_in_progress)
2184 seq = gen_reload_inqi_aligned (operands[0], operands[1]);
2186 seq = gen_reload_inhi_aligned (operands[0], operands[1]);
2191 rtx aligned_mem, bitnum;
2192 rtx scratch = gen_reg_rtx (SImode);
2196 get_aligned_mem (operands[1], &aligned_mem, &bitnum);
2198 subtarget = operands[0];
2199 if (GET_CODE (subtarget) == REG)
2200 subtarget = gen_lowpart (DImode, subtarget), copyout = false;
2202 subtarget = gen_reg_rtx (DImode), copyout = true;
2205 seq = gen_aligned_loadqi (subtarget, aligned_mem,
2208 seq = gen_aligned_loadhi (subtarget, aligned_mem,
2213 emit_move_insn (operands[0], gen_lowpart (mode, subtarget));
2218 /* Don't pass these as parameters since that makes the generated
2219 code depend on parameter evaluation order which will cause
2220 bootstrap failures. */
2222 rtx temp1, temp2, subtarget, ua;
2225 temp1 = gen_reg_rtx (DImode);
2226 temp2 = gen_reg_rtx (DImode);
2228 subtarget = operands[0];
2229 if (GET_CODE (subtarget) == REG)
2230 subtarget = gen_lowpart (DImode, subtarget), copyout = false;
2232 subtarget = gen_reg_rtx (DImode), copyout = true;
2234 ua = get_unaligned_address (operands[1]);
2236 seq = gen_unaligned_loadqi (subtarget, ua, temp1, temp2);
2238 seq = gen_unaligned_loadhi (subtarget, ua, temp1, temp2);
2240 alpha_set_memflags (seq, operands[1]);
2244 emit_move_insn (operands[0], gen_lowpart (mode, subtarget));
2249 if (any_memory_operand (operands[0], mode))
2251 if (aligned_memory_operand (operands[0], mode))
2253 rtx aligned_mem, bitnum;
2254 rtx temp1 = gen_reg_rtx (SImode);
2255 rtx temp2 = gen_reg_rtx (SImode);
2257 get_aligned_mem (operands[0], &aligned_mem, &bitnum);
2259 emit_insn (gen_aligned_store (aligned_mem, operands[1], bitnum,
2264 rtx temp1 = gen_reg_rtx (DImode);
2265 rtx temp2 = gen_reg_rtx (DImode);
2266 rtx temp3 = gen_reg_rtx (DImode);
2267 rtx ua = get_unaligned_address (operands[0]);
2270 seq = gen_unaligned_storeqi (ua, operands[1], temp1, temp2, temp3);
2272 seq = gen_unaligned_storehi (ua, operands[1], temp1, temp2, temp3);
2274 alpha_set_memflags (seq, operands[0]);
2283 /* Implement the movmisalign patterns. One of the operands is a memory
2284 that is not naturally aligned. Emit instructions to load it. */
2287 alpha_expand_movmisalign (enum machine_mode mode, rtx *operands)
2289 /* Honor misaligned loads, for those we promised to do so. */
2290 if (MEM_P (operands[1]))
2294 if (register_operand (operands[0], mode))
2297 tmp = gen_reg_rtx (mode);
2299 alpha_expand_unaligned_load (tmp, operands[1], 8, 0, 0);
2300 if (tmp != operands[0])
2301 emit_move_insn (operands[0], tmp);
2303 else if (MEM_P (operands[0]))
2305 if (!reg_or_0_operand (operands[1], mode))
2306 operands[1] = force_reg (mode, operands[1]);
2307 alpha_expand_unaligned_store (operands[0], operands[1], 8, 0);
2313 /* Generate an unsigned DImode to FP conversion. This is the same code
2314 optabs would emit if we didn't have TFmode patterns.
2316 For SFmode, this is the only construction I've found that can pass
2317 gcc.c-torture/execute/ieee/rbug.c. No scenario that uses DFmode
2318 intermediates will work, because you'll get intermediate rounding
2319 that ruins the end result. Some of this could be fixed by turning
2320 on round-to-positive-infinity, but that requires diddling the fpsr,
2321 which kills performance. I tried turning this around and converting
2322 to a negative number, so that I could turn on /m, but either I did
2323 it wrong or there's something else cause I wound up with the exact
2324 same single-bit error. There is a branch-less form of this same code:
2335 fcmoveq $f10,$f11,$f0
2337 I'm not using it because it's the same number of instructions as
2338 this branch-full form, and it has more serialized long latency
2339 instructions on the critical path.
2341 For DFmode, we can avoid rounding errors by breaking up the word
2342 into two pieces, converting them separately, and adding them back:
2344 LC0: .long 0,0x5f800000
2349 cpyse $f11,$f31,$f10
2350 cpyse $f31,$f11,$f11
2358 This doesn't seem to be a clear-cut win over the optabs form.
2359 It probably all depends on the distribution of numbers being
2360 converted -- in the optabs form, all but high-bit-set has a
2361 much lower minimum execution time. */
2364 alpha_emit_floatuns (rtx operands[2])
2366 rtx neglab, donelab, i0, i1, f0, in, out;
2367 enum machine_mode mode;
2370 in = force_reg (DImode, operands[1]);
2371 mode = GET_MODE (out);
2372 neglab = gen_label_rtx ();
2373 donelab = gen_label_rtx ();
2374 i0 = gen_reg_rtx (DImode);
2375 i1 = gen_reg_rtx (DImode);
2376 f0 = gen_reg_rtx (mode);
2378 emit_cmp_and_jump_insns (in, const0_rtx, LT, const0_rtx, DImode, 0, neglab);
2380 emit_insn (gen_rtx_SET (VOIDmode, out, gen_rtx_FLOAT (mode, in)));
2381 emit_jump_insn (gen_jump (donelab));
2384 emit_label (neglab);
2386 emit_insn (gen_lshrdi3 (i0, in, const1_rtx));
2387 emit_insn (gen_anddi3 (i1, in, const1_rtx));
2388 emit_insn (gen_iordi3 (i0, i0, i1));
2389 emit_insn (gen_rtx_SET (VOIDmode, f0, gen_rtx_FLOAT (mode, i0)));
2390 emit_insn (gen_rtx_SET (VOIDmode, out, gen_rtx_PLUS (mode, f0, f0)));
2392 emit_label (donelab);
2395 /* Generate the comparison for a conditional branch. */
2398 alpha_emit_conditional_branch (enum rtx_code code)
2400 enum rtx_code cmp_code, branch_code;
2401 enum machine_mode cmp_mode, branch_mode = VOIDmode;
2402 rtx op0 = alpha_compare.op0, op1 = alpha_compare.op1;
2405 if (alpha_compare.fp_p && GET_MODE (op0) == TFmode)
2407 op0 = alpha_emit_xfloating_compare (&code, op0, op1);
2409 alpha_compare.fp_p = 0;
2412 /* The general case: fold the comparison code to the types of compares
2413 that we have, choosing the branch as necessary. */
2416 case EQ: case LE: case LT: case LEU: case LTU:
2418 /* We have these compares: */
2419 cmp_code = code, branch_code = NE;
2424 /* These must be reversed. */
2425 cmp_code = reverse_condition (code), branch_code = EQ;
2428 case GE: case GT: case GEU: case GTU:
2429 /* For FP, we swap them, for INT, we reverse them. */
2430 if (alpha_compare.fp_p)
2432 cmp_code = swap_condition (code);
2434 tem = op0, op0 = op1, op1 = tem;
2438 cmp_code = reverse_condition (code);
2447 if (alpha_compare.fp_p)
2450 if (flag_unsafe_math_optimizations)
2452 /* When we are not as concerned about non-finite values, and we
2453 are comparing against zero, we can branch directly. */
2454 if (op1 == CONST0_RTX (DFmode))
2455 cmp_code = UNKNOWN, branch_code = code;
2456 else if (op0 == CONST0_RTX (DFmode))
2458 /* Undo the swap we probably did just above. */
2459 tem = op0, op0 = op1, op1 = tem;
2460 branch_code = swap_condition (cmp_code);
2466 /* ??? We mark the branch mode to be CCmode to prevent the
2467 compare and branch from being combined, since the compare
2468 insn follows IEEE rules that the branch does not. */
2469 branch_mode = CCmode;
2476 /* The following optimizations are only for signed compares. */
2477 if (code != LEU && code != LTU && code != GEU && code != GTU)
2479 /* Whee. Compare and branch against 0 directly. */
2480 if (op1 == const0_rtx)
2481 cmp_code = UNKNOWN, branch_code = code;
2483 /* If the constants doesn't fit into an immediate, but can
2484 be generated by lda/ldah, we adjust the argument and
2485 compare against zero, so we can use beq/bne directly. */
2486 /* ??? Don't do this when comparing against symbols, otherwise
2487 we'll reduce (&x == 0x1234) to (&x-0x1234 == 0), which will
2488 be declared false out of hand (at least for non-weak). */
2489 else if (GET_CODE (op1) == CONST_INT
2490 && (code == EQ || code == NE)
2491 && !(symbolic_operand (op0, VOIDmode)
2492 || (GET_CODE (op0) == REG && REG_POINTER (op0))))
2494 rtx n_op1 = GEN_INT (-INTVAL (op1));
2496 if (! satisfies_constraint_I (op1)
2497 && (satisfies_constraint_K (n_op1)
2498 || satisfies_constraint_L (n_op1)))
2499 cmp_code = PLUS, branch_code = code, op1 = n_op1;
2503 if (!reg_or_0_operand (op0, DImode))
2504 op0 = force_reg (DImode, op0);
2505 if (cmp_code != PLUS && !reg_or_8bit_operand (op1, DImode))
2506 op1 = force_reg (DImode, op1);
2509 /* Emit an initial compare instruction, if necessary. */
2511 if (cmp_code != UNKNOWN)
2513 tem = gen_reg_rtx (cmp_mode);
2514 emit_move_insn (tem, gen_rtx_fmt_ee (cmp_code, cmp_mode, op0, op1));
2517 /* Zero the operands. */
2518 memset (&alpha_compare, 0, sizeof (alpha_compare));
2520 /* Return the branch comparison. */
2521 return gen_rtx_fmt_ee (branch_code, branch_mode, tem, CONST0_RTX (cmp_mode));
2524 /* Certain simplifications can be done to make invalid setcc operations
2525 valid. Return the final comparison, or NULL if we can't work. */
2528 alpha_emit_setcc (enum rtx_code code)
2530 enum rtx_code cmp_code;
2531 rtx op0 = alpha_compare.op0, op1 = alpha_compare.op1;
2532 int fp_p = alpha_compare.fp_p;
2535 /* Zero the operands. */
2536 memset (&alpha_compare, 0, sizeof (alpha_compare));
2538 if (fp_p && GET_MODE (op0) == TFmode)
2540 op0 = alpha_emit_xfloating_compare (&code, op0, op1);
2545 if (fp_p && !TARGET_FIX)
2548 /* The general case: fold the comparison code to the types of compares
2549 that we have, choosing the branch as necessary. */
2554 case EQ: case LE: case LT: case LEU: case LTU:
2556 /* We have these compares. */
2558 cmp_code = code, code = NE;
2562 if (!fp_p && op1 == const0_rtx)
2567 cmp_code = reverse_condition (code);
2571 case GE: case GT: case GEU: case GTU:
2572 /* These normally need swapping, but for integer zero we have
2573 special patterns that recognize swapped operands. */
2574 if (!fp_p && op1 == const0_rtx)
2576 code = swap_condition (code);
2578 cmp_code = code, code = NE;
2579 tmp = op0, op0 = op1, op1 = tmp;
2588 if (!register_operand (op0, DImode))
2589 op0 = force_reg (DImode, op0);
2590 if (!reg_or_8bit_operand (op1, DImode))
2591 op1 = force_reg (DImode, op1);
2594 /* Emit an initial compare instruction, if necessary. */
2595 if (cmp_code != UNKNOWN)
2597 enum machine_mode mode = fp_p ? DFmode : DImode;
2599 tmp = gen_reg_rtx (mode);
2600 emit_insn (gen_rtx_SET (VOIDmode, tmp,
2601 gen_rtx_fmt_ee (cmp_code, mode, op0, op1)));
2603 op0 = fp_p ? gen_lowpart (DImode, tmp) : tmp;
2607 /* Return the setcc comparison. */
2608 return gen_rtx_fmt_ee (code, DImode, op0, op1);
2612 /* Rewrite a comparison against zero CMP of the form
2613 (CODE (cc0) (const_int 0)) so it can be written validly in
2614 a conditional move (if_then_else CMP ...).
2615 If both of the operands that set cc0 are nonzero we must emit
2616 an insn to perform the compare (it can't be done within
2617 the conditional move). */
2620 alpha_emit_conditional_move (rtx cmp, enum machine_mode mode)
2622 enum rtx_code code = GET_CODE (cmp);
2623 enum rtx_code cmov_code = NE;
2624 rtx op0 = alpha_compare.op0;
2625 rtx op1 = alpha_compare.op1;
2626 int fp_p = alpha_compare.fp_p;
2627 enum machine_mode cmp_mode
2628 = (GET_MODE (op0) == VOIDmode ? DImode : GET_MODE (op0));
2629 enum machine_mode cmp_op_mode = fp_p ? DFmode : DImode;
2630 enum machine_mode cmov_mode = VOIDmode;
2631 int local_fast_math = flag_unsafe_math_optimizations;
2634 /* Zero the operands. */
2635 memset (&alpha_compare, 0, sizeof (alpha_compare));
2637 if (fp_p != FLOAT_MODE_P (mode))
2639 enum rtx_code cmp_code;
2644 /* If we have fp<->int register move instructions, do a cmov by
2645 performing the comparison in fp registers, and move the
2646 zero/nonzero value to integer registers, where we can then
2647 use a normal cmov, or vice-versa. */
2651 case EQ: case LE: case LT: case LEU: case LTU:
2652 /* We have these compares. */
2653 cmp_code = code, code = NE;
2657 /* This must be reversed. */
2658 cmp_code = EQ, code = EQ;
2661 case GE: case GT: case GEU: case GTU:
2662 /* These normally need swapping, but for integer zero we have
2663 special patterns that recognize swapped operands. */
2664 if (!fp_p && op1 == const0_rtx)
2665 cmp_code = code, code = NE;
2668 cmp_code = swap_condition (code);
2670 tem = op0, op0 = op1, op1 = tem;
2678 tem = gen_reg_rtx (cmp_op_mode);
2679 emit_insn (gen_rtx_SET (VOIDmode, tem,
2680 gen_rtx_fmt_ee (cmp_code, cmp_op_mode,
2683 cmp_mode = cmp_op_mode = fp_p ? DImode : DFmode;
2684 op0 = gen_lowpart (cmp_op_mode, tem);
2685 op1 = CONST0_RTX (cmp_op_mode);
2687 local_fast_math = 1;
2690 /* We may be able to use a conditional move directly.
2691 This avoids emitting spurious compares. */
2692 if (signed_comparison_operator (cmp, VOIDmode)
2693 && (!fp_p || local_fast_math)
2694 && (op0 == CONST0_RTX (cmp_mode) || op1 == CONST0_RTX (cmp_mode)))
2695 return gen_rtx_fmt_ee (code, VOIDmode, op0, op1);
2697 /* We can't put the comparison inside the conditional move;
2698 emit a compare instruction and put that inside the
2699 conditional move. Make sure we emit only comparisons we have;
2700 swap or reverse as necessary. */
2702 if (!can_create_pseudo_p ())
2707 case EQ: case LE: case LT: case LEU: case LTU:
2708 /* We have these compares: */
2712 /* This must be reversed. */
2713 code = reverse_condition (code);
2717 case GE: case GT: case GEU: case GTU:
2718 /* These must be swapped. */
2719 if (op1 != CONST0_RTX (cmp_mode))
2721 code = swap_condition (code);
2722 tem = op0, op0 = op1, op1 = tem;
2732 if (!reg_or_0_operand (op0, DImode))
2733 op0 = force_reg (DImode, op0);
2734 if (!reg_or_8bit_operand (op1, DImode))
2735 op1 = force_reg (DImode, op1);
2738 /* ??? We mark the branch mode to be CCmode to prevent the compare
2739 and cmov from being combined, since the compare insn follows IEEE
2740 rules that the cmov does not. */
2741 if (fp_p && !local_fast_math)
2744 tem = gen_reg_rtx (cmp_op_mode);
2745 emit_move_insn (tem, gen_rtx_fmt_ee (code, cmp_op_mode, op0, op1));
2746 return gen_rtx_fmt_ee (cmov_code, cmov_mode, tem, CONST0_RTX (cmp_op_mode));
2749 /* Simplify a conditional move of two constants into a setcc with
2750 arithmetic. This is done with a splitter since combine would
2751 just undo the work if done during code generation. It also catches
2752 cases we wouldn't have before cse. */
2755 alpha_split_conditional_move (enum rtx_code code, rtx dest, rtx cond,
2756 rtx t_rtx, rtx f_rtx)
2758 HOST_WIDE_INT t, f, diff;
2759 enum machine_mode mode;
2760 rtx target, subtarget, tmp;
2762 mode = GET_MODE (dest);
2767 if (((code == NE || code == EQ) && diff < 0)
2768 || (code == GE || code == GT))
2770 code = reverse_condition (code);
2771 diff = t, t = f, f = diff;
2775 subtarget = target = dest;
2778 target = gen_lowpart (DImode, dest);
2779 if (can_create_pseudo_p ())
2780 subtarget = gen_reg_rtx (DImode);
2784 /* Below, we must be careful to use copy_rtx on target and subtarget
2785 in intermediate insns, as they may be a subreg rtx, which may not
2788 if (f == 0 && exact_log2 (diff) > 0
2789 /* On EV6, we've got enough shifters to make non-arithmetic shifts
2790 viable over a longer latency cmove. On EV5, the E0 slot is a
2791 scarce resource, and on EV4 shift has the same latency as a cmove. */
2792 && (diff <= 8 || alpha_tune == PROCESSOR_EV6))
2794 tmp = gen_rtx_fmt_ee (code, DImode, cond, const0_rtx);
2795 emit_insn (gen_rtx_SET (VOIDmode, copy_rtx (subtarget), tmp));
2797 tmp = gen_rtx_ASHIFT (DImode, copy_rtx (subtarget),
2798 GEN_INT (exact_log2 (t)));
2799 emit_insn (gen_rtx_SET (VOIDmode, target, tmp));
2801 else if (f == 0 && t == -1)
2803 tmp = gen_rtx_fmt_ee (code, DImode, cond, const0_rtx);
2804 emit_insn (gen_rtx_SET (VOIDmode, copy_rtx (subtarget), tmp));
2806 emit_insn (gen_negdi2 (target, copy_rtx (subtarget)));
2808 else if (diff == 1 || diff == 4 || diff == 8)
2812 tmp = gen_rtx_fmt_ee (code, DImode, cond, const0_rtx);
2813 emit_insn (gen_rtx_SET (VOIDmode, copy_rtx (subtarget), tmp));
2816 emit_insn (gen_adddi3 (target, copy_rtx (subtarget), GEN_INT (f)));
2819 add_op = GEN_INT (f);
2820 if (sext_add_operand (add_op, mode))
2822 tmp = gen_rtx_MULT (DImode, copy_rtx (subtarget),
2824 tmp = gen_rtx_PLUS (DImode, tmp, add_op);
2825 emit_insn (gen_rtx_SET (VOIDmode, target, tmp));
2837 /* Look up the function X_floating library function name for the
2840 struct xfloating_op GTY(())
2842 const enum rtx_code code;
2843 const char *const GTY((skip)) osf_func;
2844 const char *const GTY((skip)) vms_func;
2848 static GTY(()) struct xfloating_op xfloating_ops[] =
2850 { PLUS, "_OtsAddX", "OTS$ADD_X", 0 },
2851 { MINUS, "_OtsSubX", "OTS$SUB_X", 0 },
2852 { MULT, "_OtsMulX", "OTS$MUL_X", 0 },
2853 { DIV, "_OtsDivX", "OTS$DIV_X", 0 },
2854 { EQ, "_OtsEqlX", "OTS$EQL_X", 0 },
2855 { NE, "_OtsNeqX", "OTS$NEQ_X", 0 },
2856 { LT, "_OtsLssX", "OTS$LSS_X", 0 },
2857 { LE, "_OtsLeqX", "OTS$LEQ_X", 0 },
2858 { GT, "_OtsGtrX", "OTS$GTR_X", 0 },
2859 { GE, "_OtsGeqX", "OTS$GEQ_X", 0 },
2860 { FIX, "_OtsCvtXQ", "OTS$CVTXQ", 0 },
2861 { FLOAT, "_OtsCvtQX", "OTS$CVTQX", 0 },
2862 { UNSIGNED_FLOAT, "_OtsCvtQUX", "OTS$CVTQUX", 0 },
2863 { FLOAT_EXTEND, "_OtsConvertFloatTX", "OTS$CVT_FLOAT_T_X", 0 },
2864 { FLOAT_TRUNCATE, "_OtsConvertFloatXT", "OTS$CVT_FLOAT_X_T", 0 }
2867 static GTY(()) struct xfloating_op vax_cvt_ops[] =
2869 { FLOAT_EXTEND, "_OtsConvertFloatGX", "OTS$CVT_FLOAT_G_X", 0 },
2870 { FLOAT_TRUNCATE, "_OtsConvertFloatXG", "OTS$CVT_FLOAT_X_G", 0 }
2874 alpha_lookup_xfloating_lib_func (enum rtx_code code)
2876 struct xfloating_op *ops = xfloating_ops;
2877 long n = ARRAY_SIZE (xfloating_ops);
2880 gcc_assert (TARGET_HAS_XFLOATING_LIBS);
2882 /* How irritating. Nothing to key off for the main table. */
2883 if (TARGET_FLOAT_VAX && (code == FLOAT_EXTEND || code == FLOAT_TRUNCATE))
2886 n = ARRAY_SIZE (vax_cvt_ops);
2889 for (i = 0; i < n; ++i, ++ops)
2890 if (ops->code == code)
2892 rtx func = ops->libcall;
2895 func = init_one_libfunc (TARGET_ABI_OPEN_VMS
2896 ? ops->vms_func : ops->osf_func);
2897 ops->libcall = func;
2905 /* Most X_floating operations take the rounding mode as an argument.
2906 Compute that here. */
2909 alpha_compute_xfloating_mode_arg (enum rtx_code code,
2910 enum alpha_fp_rounding_mode round)
2916 case ALPHA_FPRM_NORM:
2919 case ALPHA_FPRM_MINF:
2922 case ALPHA_FPRM_CHOP:
2925 case ALPHA_FPRM_DYN:
2931 /* XXX For reference, round to +inf is mode = 3. */
2934 if (code == FLOAT_TRUNCATE && alpha_fptm == ALPHA_FPTM_N)
2940 /* Emit an X_floating library function call.
2942 Note that these functions do not follow normal calling conventions:
2943 TFmode arguments are passed in two integer registers (as opposed to
2944 indirect); TFmode return values appear in R16+R17.
2946 FUNC is the function to call.
2947 TARGET is where the output belongs.
2948 OPERANDS are the inputs.
2949 NOPERANDS is the count of inputs.
2950 EQUIV is the expression equivalent for the function.
2954 alpha_emit_xfloating_libcall (rtx func, rtx target, rtx operands[],
2955 int noperands, rtx equiv)
2957 rtx usage = NULL_RTX, tmp, reg;
2962 for (i = 0; i < noperands; ++i)
2964 switch (GET_MODE (operands[i]))
2967 reg = gen_rtx_REG (TFmode, regno);
2972 reg = gen_rtx_REG (DFmode, regno + 32);
2977 gcc_assert (GET_CODE (operands[i]) == CONST_INT);
2980 reg = gen_rtx_REG (DImode, regno);
2988 emit_move_insn (reg, operands[i]);
2989 usage = alloc_EXPR_LIST (0, gen_rtx_USE (VOIDmode, reg), usage);
2992 switch (GET_MODE (target))
2995 reg = gen_rtx_REG (TFmode, 16);
2998 reg = gen_rtx_REG (DFmode, 32);
3001 reg = gen_rtx_REG (DImode, 0);
3007 tmp = gen_rtx_MEM (QImode, func);
3008 tmp = emit_call_insn (GEN_CALL_VALUE (reg, tmp, const0_rtx,
3009 const0_rtx, const0_rtx));
3010 CALL_INSN_FUNCTION_USAGE (tmp) = usage;
3011 CONST_OR_PURE_CALL_P (tmp) = 1;
3016 emit_libcall_block (tmp, target, reg, equiv);
3019 /* Emit an X_floating library function call for arithmetic (+,-,*,/). */
3022 alpha_emit_xfloating_arith (enum rtx_code code, rtx operands[])
3026 rtx out_operands[3];
3028 func = alpha_lookup_xfloating_lib_func (code);
3029 mode = alpha_compute_xfloating_mode_arg (code, alpha_fprm);
3031 out_operands[0] = operands[1];
3032 out_operands[1] = operands[2];
3033 out_operands[2] = GEN_INT (mode);
3034 alpha_emit_xfloating_libcall (func, operands[0], out_operands, 3,
3035 gen_rtx_fmt_ee (code, TFmode, operands[1],
3039 /* Emit an X_floating library function call for a comparison. */
3042 alpha_emit_xfloating_compare (enum rtx_code *pcode, rtx op0, rtx op1)
3044 enum rtx_code cmp_code, res_code;
3045 rtx func, out, operands[2];
3047 /* X_floating library comparison functions return
3051 Convert the compare against the raw return value. */
3079 func = alpha_lookup_xfloating_lib_func (cmp_code);
3083 out = gen_reg_rtx (DImode);
3085 /* ??? Strange mode for equiv because what's actually returned
3086 is -1,0,1, not a proper boolean value. */
3087 alpha_emit_xfloating_libcall (func, out, operands, 2,
3088 gen_rtx_fmt_ee (cmp_code, CCmode, op0, op1));
3093 /* Emit an X_floating library function call for a conversion. */
3096 alpha_emit_xfloating_cvt (enum rtx_code orig_code, rtx operands[])
3098 int noperands = 1, mode;
3099 rtx out_operands[2];
3101 enum rtx_code code = orig_code;
3103 if (code == UNSIGNED_FIX)
3106 func = alpha_lookup_xfloating_lib_func (code);
3108 out_operands[0] = operands[1];
3113 mode = alpha_compute_xfloating_mode_arg (code, ALPHA_FPRM_CHOP);
3114 out_operands[1] = GEN_INT (mode);
3117 case FLOAT_TRUNCATE:
3118 mode = alpha_compute_xfloating_mode_arg (code, alpha_fprm);
3119 out_operands[1] = GEN_INT (mode);
3126 alpha_emit_xfloating_libcall (func, operands[0], out_operands, noperands,
3127 gen_rtx_fmt_e (orig_code,
3128 GET_MODE (operands[0]),
3132 /* Split a TImode or TFmode move from OP[1] to OP[0] into a pair of
3133 DImode moves from OP[2,3] to OP[0,1]. If FIXUP_OVERLAP is true,
3134 guarantee that the sequence
3137 is valid. Naturally, output operand ordering is little-endian.
3138 This is used by *movtf_internal and *movti_internal. */
3141 alpha_split_tmode_pair (rtx operands[4], enum machine_mode mode,
3144 switch (GET_CODE (operands[1]))
3147 operands[3] = gen_rtx_REG (DImode, REGNO (operands[1]) + 1);
3148 operands[2] = gen_rtx_REG (DImode, REGNO (operands[1]));
3152 operands[3] = adjust_address (operands[1], DImode, 8);
3153 operands[2] = adjust_address (operands[1], DImode, 0);
3158 gcc_assert (operands[1] == CONST0_RTX (mode));
3159 operands[2] = operands[3] = const0_rtx;
3166 switch (GET_CODE (operands[0]))
3169 operands[1] = gen_rtx_REG (DImode, REGNO (operands[0]) + 1);
3170 operands[0] = gen_rtx_REG (DImode, REGNO (operands[0]));
3174 operands[1] = adjust_address (operands[0], DImode, 8);
3175 operands[0] = adjust_address (operands[0], DImode, 0);
3182 if (fixup_overlap && reg_overlap_mentioned_p (operands[0], operands[3]))
3185 tmp = operands[0], operands[0] = operands[1], operands[1] = tmp;
3186 tmp = operands[2], operands[2] = operands[3], operands[3] = tmp;
3190 /* Implement negtf2 or abstf2. Op0 is destination, op1 is source,
3191 op2 is a register containing the sign bit, operation is the
3192 logical operation to be performed. */
3195 alpha_split_tfmode_frobsign (rtx operands[3], rtx (*operation) (rtx, rtx, rtx))
3197 rtx high_bit = operands[2];
3201 alpha_split_tmode_pair (operands, TFmode, false);
3203 /* Detect three flavors of operand overlap. */
3205 if (rtx_equal_p (operands[0], operands[2]))
3207 else if (rtx_equal_p (operands[1], operands[2]))
3209 if (rtx_equal_p (operands[0], high_bit))
3216 emit_move_insn (operands[0], operands[2]);
3218 /* ??? If the destination overlaps both source tf and high_bit, then
3219 assume source tf is dead in its entirety and use the other half
3220 for a scratch register. Otherwise "scratch" is just the proper
3221 destination register. */
3222 scratch = operands[move < 2 ? 1 : 3];
3224 emit_insn ((*operation) (scratch, high_bit, operands[3]));
3228 emit_move_insn (operands[0], operands[2]);
3230 emit_move_insn (operands[1], scratch);
3234 /* Use ext[wlq][lh] as the Architecture Handbook describes for extracting
3238 word: ldq_u r1,X(r11) ldq_u r1,X(r11)
3239 ldq_u r2,X+1(r11) ldq_u r2,X+1(r11)
3240 lda r3,X(r11) lda r3,X+2(r11)
3241 extwl r1,r3,r1 extql r1,r3,r1
3242 extwh r2,r3,r2 extqh r2,r3,r2
3243 or r1.r2.r1 or r1,r2,r1
3246 long: ldq_u r1,X(r11) ldq_u r1,X(r11)
3247 ldq_u r2,X+3(r11) ldq_u r2,X+3(r11)
3248 lda r3,X(r11) lda r3,X(r11)
3249 extll r1,r3,r1 extll r1,r3,r1
3250 extlh r2,r3,r2 extlh r2,r3,r2
3251 or r1.r2.r1 addl r1,r2,r1
3253 quad: ldq_u r1,X(r11)
3262 alpha_expand_unaligned_load (rtx tgt, rtx mem, HOST_WIDE_INT size,
3263 HOST_WIDE_INT ofs, int sign)
3265 rtx meml, memh, addr, extl, exth, tmp, mema;
3266 enum machine_mode mode;
3268 if (TARGET_BWX && size == 2)
3270 meml = adjust_address (mem, QImode, ofs);
3271 memh = adjust_address (mem, QImode, ofs+1);
3272 if (BYTES_BIG_ENDIAN)
3273 tmp = meml, meml = memh, memh = tmp;
3274 extl = gen_reg_rtx (DImode);
3275 exth = gen_reg_rtx (DImode);
3276 emit_insn (gen_zero_extendqidi2 (extl, meml));
3277 emit_insn (gen_zero_extendqidi2 (exth, memh));
3278 exth = expand_simple_binop (DImode, ASHIFT, exth, GEN_INT (8),
3279 NULL, 1, OPTAB_LIB_WIDEN);
3280 addr = expand_simple_binop (DImode, IOR, extl, exth,
3281 NULL, 1, OPTAB_LIB_WIDEN);
3283 if (sign && GET_MODE (tgt) != HImode)
3285 addr = gen_lowpart (HImode, addr);
3286 emit_insn (gen_extend_insn (tgt, addr, GET_MODE (tgt), HImode, 0));
3290 if (GET_MODE (tgt) != DImode)
3291 addr = gen_lowpart (GET_MODE (tgt), addr);
3292 emit_move_insn (tgt, addr);
3297 meml = gen_reg_rtx (DImode);
3298 memh = gen_reg_rtx (DImode);
3299 addr = gen_reg_rtx (DImode);
3300 extl = gen_reg_rtx (DImode);
3301 exth = gen_reg_rtx (DImode);
3303 mema = XEXP (mem, 0);
3304 if (GET_CODE (mema) == LO_SUM)
3305 mema = force_reg (Pmode, mema);
3307 /* AND addresses cannot be in any alias set, since they may implicitly
3308 alias surrounding code. Ideally we'd have some alias set that
3309 covered all types except those with alignment 8 or higher. */
3311 tmp = change_address (mem, DImode,
3312 gen_rtx_AND (DImode,
3313 plus_constant (mema, ofs),
3315 set_mem_alias_set (tmp, 0);
3316 emit_move_insn (meml, tmp);
3318 tmp = change_address (mem, DImode,
3319 gen_rtx_AND (DImode,
3320 plus_constant (mema, ofs + size - 1),
3322 set_mem_alias_set (tmp, 0);
3323 emit_move_insn (memh, tmp);
3325 if (WORDS_BIG_ENDIAN && sign && (size == 2 || size == 4))
3327 emit_move_insn (addr, plus_constant (mema, -1));
3329 emit_insn (gen_extqh_be (extl, meml, addr));
3330 emit_insn (gen_extxl_be (exth, memh, GEN_INT (64), addr));
3332 addr = expand_binop (DImode, ior_optab, extl, exth, tgt, 1, OPTAB_WIDEN);
3333 addr = expand_binop (DImode, ashr_optab, addr, GEN_INT (64 - size*8),
3334 addr, 1, OPTAB_WIDEN);
3336 else if (sign && size == 2)
3338 emit_move_insn (addr, plus_constant (mema, ofs+2));
3340 emit_insn (gen_extxl_le (extl, meml, GEN_INT (64), addr));
3341 emit_insn (gen_extqh_le (exth, memh, addr));
3343 /* We must use tgt here for the target. Alpha-vms port fails if we use
3344 addr for the target, because addr is marked as a pointer and combine
3345 knows that pointers are always sign-extended 32-bit values. */
3346 addr = expand_binop (DImode, ior_optab, extl, exth, tgt, 1, OPTAB_WIDEN);
3347 addr = expand_binop (DImode, ashr_optab, addr, GEN_INT (48),
3348 addr, 1, OPTAB_WIDEN);
3352 if (WORDS_BIG_ENDIAN)
3354 emit_move_insn (addr, plus_constant (mema, ofs+size-1));
3358 emit_insn (gen_extwh_be (extl, meml, addr));
3363 emit_insn (gen_extlh_be (extl, meml, addr));
3368 emit_insn (gen_extqh_be (extl, meml, addr));
3375 emit_insn (gen_extxl_be (exth, memh, GEN_INT (size*8), addr));
3379 emit_move_insn (addr, plus_constant (mema, ofs));
3380 emit_insn (gen_extxl_le (extl, meml, GEN_INT (size*8), addr));
3384 emit_insn (gen_extwh_le (exth, memh, addr));
3389 emit_insn (gen_extlh_le (exth, memh, addr));
3394 emit_insn (gen_extqh_le (exth, memh, addr));
3403 addr = expand_binop (mode, ior_optab, gen_lowpart (mode, extl),
3404 gen_lowpart (mode, exth), gen_lowpart (mode, tgt),
3409 emit_move_insn (tgt, gen_lowpart (GET_MODE (tgt), addr));
3412 /* Similarly, use ins and msk instructions to perform unaligned stores. */
3415 alpha_expand_unaligned_store (rtx dst, rtx src,
3416 HOST_WIDE_INT size, HOST_WIDE_INT ofs)
3418 rtx dstl, dsth, addr, insl, insh, meml, memh, dsta;
3420 if (TARGET_BWX && size == 2)
3422 if (src != const0_rtx)
3424 dstl = gen_lowpart (QImode, src);
3425 dsth = expand_simple_binop (DImode, LSHIFTRT, src, GEN_INT (8),
3426 NULL, 1, OPTAB_LIB_WIDEN);
3427 dsth = gen_lowpart (QImode, dsth);
3430 dstl = dsth = const0_rtx;
3432 meml = adjust_address (dst, QImode, ofs);
3433 memh = adjust_address (dst, QImode, ofs+1);
3434 if (BYTES_BIG_ENDIAN)
3435 addr = meml, meml = memh, memh = addr;
3437 emit_move_insn (meml, dstl);
3438 emit_move_insn (memh, dsth);
3442 dstl = gen_reg_rtx (DImode);
3443 dsth = gen_reg_rtx (DImode);
3444 insl = gen_reg_rtx (DImode);
3445 insh = gen_reg_rtx (DImode);
3447 dsta = XEXP (dst, 0);
3448 if (GET_CODE (dsta) == LO_SUM)
3449 dsta = force_reg (Pmode, dsta);
3451 /* AND addresses cannot be in any alias set, since they may implicitly
3452 alias surrounding code. Ideally we'd have some alias set that
3453 covered all types except those with alignment 8 or higher. */
3455 meml = change_address (dst, DImode,
3456 gen_rtx_AND (DImode,
3457 plus_constant (dsta, ofs),
3459 set_mem_alias_set (meml, 0);
3461 memh = change_address (dst, DImode,
3462 gen_rtx_AND (DImode,
3463 plus_constant (dsta, ofs + size - 1),
3465 set_mem_alias_set (memh, 0);
3467 emit_move_insn (dsth, memh);
3468 emit_move_insn (dstl, meml);
3469 if (WORDS_BIG_ENDIAN)
3471 addr = copy_addr_to_reg (plus_constant (dsta, ofs+size-1));
3473 if (src != const0_rtx)
3478 emit_insn (gen_inswl_be (insh, gen_lowpart (HImode,src), addr));
3481 emit_insn (gen_insll_be (insh, gen_lowpart (SImode,src), addr));
3484 emit_insn (gen_insql_be (insh, gen_lowpart (DImode,src), addr));
3487 emit_insn (gen_insxh (insl, gen_lowpart (DImode, src),
3488 GEN_INT (size*8), addr));
3494 emit_insn (gen_mskxl_be (dsth, dsth, GEN_INT (0xffff), addr));
3498 rtx msk = immed_double_const (0xffffffff, 0, DImode);
3499 emit_insn (gen_mskxl_be (dsth, dsth, msk, addr));
3503 emit_insn (gen_mskxl_be (dsth, dsth, constm1_rtx, addr));
3507 emit_insn (gen_mskxh (dstl, dstl, GEN_INT (size*8), addr));
3511 addr = copy_addr_to_reg (plus_constant (dsta, ofs));
3513 if (src != CONST0_RTX (GET_MODE (src)))
3515 emit_insn (gen_insxh (insh, gen_lowpart (DImode, src),
3516 GEN_INT (size*8), addr));
3521 emit_insn (gen_inswl_le (insl, gen_lowpart (HImode, src), addr));
3524 emit_insn (gen_insll_le (insl, gen_lowpart (SImode, src), addr));
3527 emit_insn (gen_insql_le (insl, src, addr));
3532 emit_insn (gen_mskxh (dsth, dsth, GEN_INT (size*8), addr));
3537 emit_insn (gen_mskxl_le (dstl, dstl, GEN_INT (0xffff), addr));
3541 rtx msk = immed_double_const (0xffffffff, 0, DImode);
3542 emit_insn (gen_mskxl_le (dstl, dstl, msk, addr));
3546 emit_insn (gen_mskxl_le (dstl, dstl, constm1_rtx, addr));
3551 if (src != CONST0_RTX (GET_MODE (src)))
3553 dsth = expand_binop (DImode, ior_optab, insh, dsth, dsth, 0, OPTAB_WIDEN);
3554 dstl = expand_binop (DImode, ior_optab, insl, dstl, dstl, 0, OPTAB_WIDEN);
3557 if (WORDS_BIG_ENDIAN)
3559 emit_move_insn (meml, dstl);
3560 emit_move_insn (memh, dsth);
3564 /* Must store high before low for degenerate case of aligned. */
3565 emit_move_insn (memh, dsth);
3566 emit_move_insn (meml, dstl);
3570 /* The block move code tries to maximize speed by separating loads and
3571 stores at the expense of register pressure: we load all of the data
3572 before we store it back out. There are two secondary effects worth
3573 mentioning, that this speeds copying to/from aligned and unaligned
3574 buffers, and that it makes the code significantly easier to write. */
3576 #define MAX_MOVE_WORDS 8
3578 /* Load an integral number of consecutive unaligned quadwords. */
3581 alpha_expand_unaligned_load_words (rtx *out_regs, rtx smem,
3582 HOST_WIDE_INT words, HOST_WIDE_INT ofs)
3584 rtx const im8 = GEN_INT (-8);
3585 rtx const i64 = GEN_INT (64);
3586 rtx ext_tmps[MAX_MOVE_WORDS], data_regs[MAX_MOVE_WORDS+1];
3587 rtx sreg, areg, tmp, smema;
3590 smema = XEXP (smem, 0);
3591 if (GET_CODE (smema) == LO_SUM)
3592 smema = force_reg (Pmode, smema);
3594 /* Generate all the tmp registers we need. */
3595 for (i = 0; i < words; ++i)
3597 data_regs[i] = out_regs[i];
3598 ext_tmps[i] = gen_reg_rtx (DImode);
3600 data_regs[words] = gen_reg_rtx (DImode);
3603 smem = adjust_address (smem, GET_MODE (smem), ofs);
3605 /* Load up all of the source data. */
3606 for (i = 0; i < words; ++i)
3608 tmp = change_address (smem, DImode,
3609 gen_rtx_AND (DImode,
3610 plus_constant (smema, 8*i),
3612 set_mem_alias_set (tmp, 0);
3613 emit_move_insn (data_regs[i], tmp);
3616 tmp = change_address (smem, DImode,
3617 gen_rtx_AND (DImode,
3618 plus_constant (smema, 8*words - 1),
3620 set_mem_alias_set (tmp, 0);
3621 emit_move_insn (data_regs[words], tmp);
3623 /* Extract the half-word fragments. Unfortunately DEC decided to make
3624 extxh with offset zero a noop instead of zeroing the register, so
3625 we must take care of that edge condition ourselves with cmov. */
3627 sreg = copy_addr_to_reg (smema);
3628 areg = expand_binop (DImode, and_optab, sreg, GEN_INT (7), NULL,
3630 if (WORDS_BIG_ENDIAN)
3631 emit_move_insn (sreg, plus_constant (sreg, 7));
3632 for (i = 0; i < words; ++i)
3634 if (WORDS_BIG_ENDIAN)
3636 emit_insn (gen_extqh_be (data_regs[i], data_regs[i], sreg));
3637 emit_insn (gen_extxl_be (ext_tmps[i], data_regs[i+1], i64, sreg));
3641 emit_insn (gen_extxl_le (data_regs[i], data_regs[i], i64, sreg));
3642 emit_insn (gen_extqh_le (ext_tmps[i], data_regs[i+1], sreg));
3644 emit_insn (gen_rtx_SET (VOIDmode, ext_tmps[i],
3645 gen_rtx_IF_THEN_ELSE (DImode,
3646 gen_rtx_EQ (DImode, areg,
3648 const0_rtx, ext_tmps[i])));
3651 /* Merge the half-words into whole words. */
3652 for (i = 0; i < words; ++i)
3654 out_regs[i] = expand_binop (DImode, ior_optab, data_regs[i],
3655 ext_tmps[i], data_regs[i], 1, OPTAB_WIDEN);
3659 /* Store an integral number of consecutive unaligned quadwords. DATA_REGS
3660 may be NULL to store zeros. */
3663 alpha_expand_unaligned_store_words (rtx *data_regs, rtx dmem,
3664 HOST_WIDE_INT words, HOST_WIDE_INT ofs)
3666 rtx const im8 = GEN_INT (-8);
3667 rtx const i64 = GEN_INT (64);
3668 rtx ins_tmps[MAX_MOVE_WORDS];
3669 rtx st_tmp_1, st_tmp_2, dreg;
3670 rtx st_addr_1, st_addr_2, dmema;
3673 dmema = XEXP (dmem, 0);
3674 if (GET_CODE (dmema) == LO_SUM)
3675 dmema = force_reg (Pmode, dmema);
3677 /* Generate all the tmp registers we need. */
3678 if (data_regs != NULL)
3679 for (i = 0; i < words; ++i)
3680 ins_tmps[i] = gen_reg_rtx(DImode);
3681 st_tmp_1 = gen_reg_rtx(DImode);
3682 st_tmp_2 = gen_reg_rtx(DImode);
3685 dmem = adjust_address (dmem, GET_MODE (dmem), ofs);
3687 st_addr_2 = change_address (dmem, DImode,
3688 gen_rtx_AND (DImode,
3689 plus_constant (dmema, words*8 - 1),
3691 set_mem_alias_set (st_addr_2, 0);
3693 st_addr_1 = change_address (dmem, DImode,
3694 gen_rtx_AND (DImode, dmema, im8));
3695 set_mem_alias_set (st_addr_1, 0);
3697 /* Load up the destination end bits. */
3698 emit_move_insn (st_tmp_2, st_addr_2);
3699 emit_move_insn (st_tmp_1, st_addr_1);
3701 /* Shift the input data into place. */
3702 dreg = copy_addr_to_reg (dmema);
3703 if (WORDS_BIG_ENDIAN)
3704 emit_move_insn (dreg, plus_constant (dreg, 7));
3705 if (data_regs != NULL)
3707 for (i = words-1; i >= 0; --i)
3709 if (WORDS_BIG_ENDIAN)
3711 emit_insn (gen_insql_be (ins_tmps[i], data_regs[i], dreg));
3712 emit_insn (gen_insxh (data_regs[i], data_regs[i], i64, dreg));
3716 emit_insn (gen_insxh (ins_tmps[i], data_regs[i], i64, dreg));
3717 emit_insn (gen_insql_le (data_regs[i], data_regs[i], dreg));
3720 for (i = words-1; i > 0; --i)
3722 ins_tmps[i-1] = expand_binop (DImode, ior_optab, data_regs[i],
3723 ins_tmps[i-1], ins_tmps[i-1], 1,
3728 /* Split and merge the ends with the destination data. */
3729 if (WORDS_BIG_ENDIAN)
3731 emit_insn (gen_mskxl_be (st_tmp_2, st_tmp_2, constm1_rtx, dreg));
3732 emit_insn (gen_mskxh (st_tmp_1, st_tmp_1, i64, dreg));
3736 emit_insn (gen_mskxh (st_tmp_2, st_tmp_2, i64, dreg));
3737 emit_insn (gen_mskxl_le (st_tmp_1, st_tmp_1, constm1_rtx, dreg));
3740 if (data_regs != NULL)
3742 st_tmp_2 = expand_binop (DImode, ior_optab, st_tmp_2, ins_tmps[words-1],
3743 st_tmp_2, 1, OPTAB_WIDEN);
3744 st_tmp_1 = expand_binop (DImode, ior_optab, st_tmp_1, data_regs[0],
3745 st_tmp_1, 1, OPTAB_WIDEN);
3749 if (WORDS_BIG_ENDIAN)
3750 emit_move_insn (st_addr_1, st_tmp_1);
3752 emit_move_insn (st_addr_2, st_tmp_2);
3753 for (i = words-1; i > 0; --i)
3755 rtx tmp = change_address (dmem, DImode,
3756 gen_rtx_AND (DImode,
3757 plus_constant(dmema,
3758 WORDS_BIG_ENDIAN ? i*8-1 : i*8),
3760 set_mem_alias_set (tmp, 0);
3761 emit_move_insn (tmp, data_regs ? ins_tmps[i-1] : const0_rtx);
3763 if (WORDS_BIG_ENDIAN)
3764 emit_move_insn (st_addr_2, st_tmp_2);
3766 emit_move_insn (st_addr_1, st_tmp_1);
3770 /* Expand string/block move operations.
3772 operands[0] is the pointer to the destination.
3773 operands[1] is the pointer to the source.
3774 operands[2] is the number of bytes to move.
3775 operands[3] is the alignment. */
3778 alpha_expand_block_move (rtx operands[])
3780 rtx bytes_rtx = operands[2];
3781 rtx align_rtx = operands[3];
3782 HOST_WIDE_INT orig_bytes = INTVAL (bytes_rtx);
3783 HOST_WIDE_INT bytes = orig_bytes;
3784 HOST_WIDE_INT src_align = INTVAL (align_rtx) * BITS_PER_UNIT;
3785 HOST_WIDE_INT dst_align = src_align;
3786 rtx orig_src = operands[1];
3787 rtx orig_dst = operands[0];
3788 rtx data_regs[2 * MAX_MOVE_WORDS + 16];
3790 unsigned int i, words, ofs, nregs = 0;
3792 if (orig_bytes <= 0)
3794 else if (orig_bytes > MAX_MOVE_WORDS * UNITS_PER_WORD)
3797 /* Look for additional alignment information from recorded register info. */
3799 tmp = XEXP (orig_src, 0);
3800 if (GET_CODE (tmp) == REG)
3801 src_align = MAX (src_align, REGNO_POINTER_ALIGN (REGNO (tmp)));
3802 else if (GET_CODE (tmp) == PLUS
3803 && GET_CODE (XEXP (tmp, 0)) == REG
3804 && GET_CODE (XEXP (tmp, 1)) == CONST_INT)
3806 unsigned HOST_WIDE_INT c = INTVAL (XEXP (tmp, 1));
3807 unsigned int a = REGNO_POINTER_ALIGN (REGNO (XEXP (tmp, 0)));
3811 if (a >= 64 && c % 8 == 0)
3813 else if (a >= 32 && c % 4 == 0)
3815 else if (a >= 16 && c % 2 == 0)
3820 tmp = XEXP (orig_dst, 0);
3821 if (GET_CODE (tmp) == REG)
3822 dst_align = MAX (dst_align, REGNO_POINTER_ALIGN (REGNO (tmp)));
3823 else if (GET_CODE (tmp) == PLUS
3824 && GET_CODE (XEXP (tmp, 0)) == REG
3825 && GET_CODE (XEXP (tmp, 1)) == CONST_INT)
3827 unsigned HOST_WIDE_INT c = INTVAL (XEXP (tmp, 1));
3828 unsigned int a = REGNO_POINTER_ALIGN (REGNO (XEXP (tmp, 0)));
3832 if (a >= 64 && c % 8 == 0)
3834 else if (a >= 32 && c % 4 == 0)
3836 else if (a >= 16 && c % 2 == 0)
3842 if (src_align >= 64 && bytes >= 8)
3846 for (i = 0; i < words; ++i)
3847 data_regs[nregs + i] = gen_reg_rtx (DImode);
3849 for (i = 0; i < words; ++i)
3850 emit_move_insn (data_regs[nregs + i],
3851 adjust_address (orig_src, DImode, ofs + i * 8));
3858 if (src_align >= 32 && bytes >= 4)
3862 for (i = 0; i < words; ++i)
3863 data_regs[nregs + i] = gen_reg_rtx (SImode);
3865 for (i = 0; i < words; ++i)
3866 emit_move_insn (data_regs[nregs + i],
3867 adjust_address (orig_src, SImode, ofs + i * 4));
3878 for (i = 0; i < words+1; ++i)
3879 data_regs[nregs + i] = gen_reg_rtx (DImode);
3881 alpha_expand_unaligned_load_words (data_regs + nregs, orig_src,
3889 if (! TARGET_BWX && bytes >= 4)
3891 data_regs[nregs++] = tmp = gen_reg_rtx (SImode);
3892 alpha_expand_unaligned_load (tmp, orig_src, 4, ofs, 0);
3899 if (src_align >= 16)
3902 data_regs[nregs++] = tmp = gen_reg_rtx (HImode);
3903 emit_move_insn (tmp, adjust_address (orig_src, HImode, ofs));
3906 } while (bytes >= 2);
3908 else if (! TARGET_BWX)
3910 data_regs[nregs++] = tmp = gen_reg_rtx (HImode);
3911 alpha_expand_unaligned_load (tmp, orig_src, 2, ofs, 0);
3919 data_regs[nregs++] = tmp = gen_reg_rtx (QImode);
3920 emit_move_insn (tmp, adjust_address (orig_src, QImode, ofs));
3925 gcc_assert (nregs <= ARRAY_SIZE (data_regs));
3927 /* Now save it back out again. */
3931 /* Write out the data in whatever chunks reading the source allowed. */
3932 if (dst_align >= 64)
3934 while (i < nregs && GET_MODE (data_regs[i]) == DImode)
3936 emit_move_insn (adjust_address (orig_dst, DImode, ofs),
3943 if (dst_align >= 32)
3945 /* If the source has remaining DImode regs, write them out in
3947 while (i < nregs && GET_MODE (data_regs[i]) == DImode)
3949 tmp = expand_binop (DImode, lshr_optab, data_regs[i], GEN_INT (32),
3950 NULL_RTX, 1, OPTAB_WIDEN);
3952 emit_move_insn (adjust_address (orig_dst, SImode, ofs),
3953 gen_lowpart (SImode, data_regs[i]));
3954 emit_move_insn (adjust_address (orig_dst, SImode, ofs + 4),
3955 gen_lowpart (SImode, tmp));
3960 while (i < nregs && GET_MODE (data_regs[i]) == SImode)
3962 emit_move_insn (adjust_address (orig_dst, SImode, ofs),
3969 if (i < nregs && GET_MODE (data_regs[i]) == DImode)
3971 /* Write out a remaining block of words using unaligned methods. */
3973 for (words = 1; i + words < nregs; words++)
3974 if (GET_MODE (data_regs[i + words]) != DImode)
3978 alpha_expand_unaligned_store (orig_dst, data_regs[i], 8, ofs);
3980 alpha_expand_unaligned_store_words (data_regs + i, orig_dst,
3987 /* Due to the above, this won't be aligned. */
3988 /* ??? If we have more than one of these, consider constructing full
3989 words in registers and using alpha_expand_unaligned_store_words. */
3990 while (i < nregs && GET_MODE (data_regs[i]) == SImode)
3992 alpha_expand_unaligned_store (orig_dst, data_regs[i], 4, ofs);
3997 if (dst_align >= 16)
3998 while (i < nregs && GET_MODE (data_regs[i]) == HImode)
4000 emit_move_insn (adjust_address (orig_dst, HImode, ofs), data_regs[i]);
4005 while (i < nregs && GET_MODE (data_regs[i]) == HImode)
4007 alpha_expand_unaligned_store (orig_dst, data_regs[i], 2, ofs);
4012 /* The remainder must be byte copies. */
4015 gcc_assert (GET_MODE (data_regs[i]) == QImode);
4016 emit_move_insn (adjust_address (orig_dst, QImode, ofs), data_regs[i]);
4025 alpha_expand_block_clear (rtx operands[])
4027 rtx bytes_rtx = operands[1];
4028 rtx align_rtx = operands[3];
4029 HOST_WIDE_INT orig_bytes = INTVAL (bytes_rtx);
4030 HOST_WIDE_INT bytes = orig_bytes;
4031 HOST_WIDE_INT align = INTVAL (align_rtx) * BITS_PER_UNIT;
4032 HOST_WIDE_INT alignofs = 0;
4033 rtx orig_dst = operands[0];
4035 int i, words, ofs = 0;
4037 if (orig_bytes <= 0)
4039 if (orig_bytes > MAX_MOVE_WORDS * UNITS_PER_WORD)
4042 /* Look for stricter alignment. */
4043 tmp = XEXP (orig_dst, 0);
4044 if (GET_CODE (tmp) == REG)
4045 align = MAX (align, REGNO_POINTER_ALIGN (REGNO (tmp)));
4046 else if (GET_CODE (tmp) == PLUS
4047 && GET_CODE (XEXP (tmp, 0)) == REG
4048 && GET_CODE (XEXP (tmp, 1)) == CONST_INT)
4050 HOST_WIDE_INT c = INTVAL (XEXP (tmp, 1));
4051 int a = REGNO_POINTER_ALIGN (REGNO (XEXP (tmp, 0)));
4056 align = a, alignofs = 8 - c % 8;
4058 align = a, alignofs = 4 - c % 4;
4060 align = a, alignofs = 2 - c % 2;
4064 /* Handle an unaligned prefix first. */
4068 #if HOST_BITS_PER_WIDE_INT >= 64
4069 /* Given that alignofs is bounded by align, the only time BWX could
4070 generate three stores is for a 7 byte fill. Prefer two individual
4071 stores over a load/mask/store sequence. */
4072 if ((!TARGET_BWX || alignofs == 7)
4074 && !(alignofs == 4 && bytes >= 4))
4076 enum machine_mode mode = (align >= 64 ? DImode : SImode);
4077 int inv_alignofs = (align >= 64 ? 8 : 4) - alignofs;
4081 mem = adjust_address (orig_dst, mode, ofs - inv_alignofs);
4082 set_mem_alias_set (mem, 0);
4084 mask = ~(~(HOST_WIDE_INT)0 << (inv_alignofs * 8));
4085 if (bytes < alignofs)
4087 mask |= ~(HOST_WIDE_INT)0 << ((inv_alignofs + bytes) * 8);
4098 tmp = expand_binop (mode, and_optab, mem, GEN_INT (mask),
4099 NULL_RTX, 1, OPTAB_WIDEN);
4101 emit_move_insn (mem, tmp);
4105 if (TARGET_BWX && (alignofs & 1) && bytes >= 1)
4107 emit_move_insn (adjust_address (orig_dst, QImode, ofs), const0_rtx);
4112 if (TARGET_BWX && align >= 16 && (alignofs & 3) == 2 && bytes >= 2)
4114 emit_move_insn (adjust_address (orig_dst, HImode, ofs), const0_rtx);
4119 if (alignofs == 4 && bytes >= 4)
4121 emit_move_insn (adjust_address (orig_dst, SImode, ofs), const0_rtx);
4127 /* If we've not used the extra lead alignment information by now,
4128 we won't be able to. Downgrade align to match what's left over. */
4131 alignofs = alignofs & -alignofs;
4132 align = MIN (align, alignofs * BITS_PER_UNIT);
4136 /* Handle a block of contiguous long-words. */
4138 if (align >= 64 && bytes >= 8)
4142 for (i = 0; i < words; ++i)
4143 emit_move_insn (adjust_address (orig_dst, DImode, ofs + i * 8),
4150 /* If the block is large and appropriately aligned, emit a single
4151 store followed by a sequence of stq_u insns. */
4153 if (align >= 32 && bytes > 16)
4157 emit_move_insn (adjust_address (orig_dst, SImode, ofs), const0_rtx);
4161 orig_dsta = XEXP (orig_dst, 0);
4162 if (GET_CODE (orig_dsta) == LO_SUM)
4163 orig_dsta = force_reg (Pmode, orig_dsta);
4166 for (i = 0; i < words; ++i)
4169 = change_address (orig_dst, DImode,
4170 gen_rtx_AND (DImode,
4171 plus_constant (orig_dsta, ofs + i*8),
4173 set_mem_alias_set (mem, 0);
4174 emit_move_insn (mem, const0_rtx);
4177 /* Depending on the alignment, the first stq_u may have overlapped
4178 with the initial stl, which means that the last stq_u didn't
4179 write as much as it would appear. Leave those questionable bytes
4181 bytes -= words * 8 - 4;
4182 ofs += words * 8 - 4;
4185 /* Handle a smaller block of aligned words. */
4187 if ((align >= 64 && bytes == 4)
4188 || (align == 32 && bytes >= 4))
4192 for (i = 0; i < words; ++i)
4193 emit_move_insn (adjust_address (orig_dst, SImode, ofs + i * 4),
4200 /* An unaligned block uses stq_u stores for as many as possible. */
4206 alpha_expand_unaligned_store_words (NULL, orig_dst, words, ofs);
4212 /* Next clean up any trailing pieces. */
4214 #if HOST_BITS_PER_WIDE_INT >= 64
4215 /* Count the number of bits in BYTES for which aligned stores could
4218 for (i = (TARGET_BWX ? 1 : 4); i * BITS_PER_UNIT <= align ; i <<= 1)
4222 /* If we have appropriate alignment (and it wouldn't take too many
4223 instructions otherwise), mask out the bytes we need. */
4224 if (TARGET_BWX ? words > 2 : bytes > 0)
4231 mem = adjust_address (orig_dst, DImode, ofs);
4232 set_mem_alias_set (mem, 0);
4234 mask = ~(HOST_WIDE_INT)0 << (bytes * 8);
4236 tmp = expand_binop (DImode, and_optab, mem, GEN_INT (mask),
4237 NULL_RTX, 1, OPTAB_WIDEN);
4239 emit_move_insn (mem, tmp);
4242 else if (align >= 32 && bytes < 4)
4247 mem = adjust_address (orig_dst, SImode, ofs);
4248 set_mem_alias_set (mem, 0);
4250 mask = ~(HOST_WIDE_INT)0 << (bytes * 8);
4252 tmp = expand_binop (SImode, and_optab, mem, GEN_INT (mask),
4253 NULL_RTX, 1, OPTAB_WIDEN);
4255 emit_move_insn (mem, tmp);
4261 if (!TARGET_BWX && bytes >= 4)
4263 alpha_expand_unaligned_store (orig_dst, const0_rtx, 4, ofs);
4273 emit_move_insn (adjust_address (orig_dst, HImode, ofs),
4277 } while (bytes >= 2);
4279 else if (! TARGET_BWX)
4281 alpha_expand_unaligned_store (orig_dst, const0_rtx, 2, ofs);
4289 emit_move_insn (adjust_address (orig_dst, QImode, ofs), const0_rtx);
4297 /* Returns a mask so that zap(x, value) == x & mask. */
4300 alpha_expand_zap_mask (HOST_WIDE_INT value)
4305 if (HOST_BITS_PER_WIDE_INT >= 64)
4307 HOST_WIDE_INT mask = 0;
4309 for (i = 7; i >= 0; --i)
4312 if (!((value >> i) & 1))
4316 result = gen_int_mode (mask, DImode);
4320 HOST_WIDE_INT mask_lo = 0, mask_hi = 0;
4322 gcc_assert (HOST_BITS_PER_WIDE_INT == 32);
4324 for (i = 7; i >= 4; --i)
4327 if (!((value >> i) & 1))
4331 for (i = 3; i >= 0; --i)
4334 if (!((value >> i) & 1))
4338 result = immed_double_const (mask_lo, mask_hi, DImode);
4345 alpha_expand_builtin_vector_binop (rtx (*gen) (rtx, rtx, rtx),
4346 enum machine_mode mode,
4347 rtx op0, rtx op1, rtx op2)
4349 op0 = gen_lowpart (mode, op0);
4351 if (op1 == const0_rtx)
4352 op1 = CONST0_RTX (mode);
4354 op1 = gen_lowpart (mode, op1);
4356 if (op2 == const0_rtx)
4357 op2 = CONST0_RTX (mode);
4359 op2 = gen_lowpart (mode, op2);
4361 emit_insn ((*gen) (op0, op1, op2));
4364 /* A subroutine of the atomic operation splitters. Jump to LABEL if
4365 COND is true. Mark the jump as unlikely to be taken. */
4368 emit_unlikely_jump (rtx cond, rtx label)
4370 rtx very_unlikely = GEN_INT (REG_BR_PROB_BASE / 100 - 1);
4373 x = gen_rtx_IF_THEN_ELSE (VOIDmode, cond, label, pc_rtx);
4374 x = emit_jump_insn (gen_rtx_SET (VOIDmode, pc_rtx, x));
4375 REG_NOTES (x) = gen_rtx_EXPR_LIST (REG_BR_PROB, very_unlikely, NULL_RTX);
4378 /* A subroutine of the atomic operation splitters. Emit a load-locked
4379 instruction in MODE. */
4382 emit_load_locked (enum machine_mode mode, rtx reg, rtx mem)
4384 rtx (*fn) (rtx, rtx) = NULL;
4386 fn = gen_load_locked_si;
4387 else if (mode == DImode)
4388 fn = gen_load_locked_di;
4389 emit_insn (fn (reg, mem));
4392 /* A subroutine of the atomic operation splitters. Emit a store-conditional
4393 instruction in MODE. */
4396 emit_store_conditional (enum machine_mode mode, rtx res, rtx mem, rtx val)
4398 rtx (*fn) (rtx, rtx, rtx) = NULL;
4400 fn = gen_store_conditional_si;
4401 else if (mode == DImode)
4402 fn = gen_store_conditional_di;
4403 emit_insn (fn (res, mem, val));
4406 /* A subroutine of the atomic operation splitters. Emit an insxl
4407 instruction in MODE. */
4410 emit_insxl (enum machine_mode mode, rtx op1, rtx op2)
4412 rtx ret = gen_reg_rtx (DImode);
4413 rtx (*fn) (rtx, rtx, rtx);
4415 if (WORDS_BIG_ENDIAN)
4429 /* The insbl and inswl patterns require a register operand. */
4430 op1 = force_reg (mode, op1);
4431 emit_insn (fn (ret, op1, op2));
4436 /* Expand an atomic fetch-and-operate pattern. CODE is the binary operation
4437 to perform. MEM is the memory on which to operate. VAL is the second
4438 operand of the binary operator. BEFORE and AFTER are optional locations to
4439 return the value of MEM either before of after the operation. SCRATCH is
4440 a scratch register. */
4443 alpha_split_atomic_op (enum rtx_code code, rtx mem, rtx val,
4444 rtx before, rtx after, rtx scratch)
4446 enum machine_mode mode = GET_MODE (mem);
4447 rtx label, x, cond = gen_rtx_REG (DImode, REGNO (scratch));
4449 emit_insn (gen_memory_barrier ());
4451 label = gen_label_rtx ();
4453 label = gen_rtx_LABEL_REF (DImode, label);
4457 emit_load_locked (mode, before, mem);
4460 x = gen_rtx_AND (mode, gen_rtx_NOT (mode, before), val);
4462 x = gen_rtx_fmt_ee (code, mode, before, val);
4464 emit_insn (gen_rtx_SET (VOIDmode, after, copy_rtx (x)));
4465 emit_insn (gen_rtx_SET (VOIDmode, scratch, x));
4467 emit_store_conditional (mode, cond, mem, scratch);
4469 x = gen_rtx_EQ (DImode, cond, const0_rtx);
4470 emit_unlikely_jump (x, label);
4472 emit_insn (gen_memory_barrier ());
4475 /* Expand a compare and swap operation. */
4478 alpha_split_compare_and_swap (rtx retval, rtx mem, rtx oldval, rtx newval,
4481 enum machine_mode mode = GET_MODE (mem);
4482 rtx label1, label2, x, cond = gen_lowpart (DImode, scratch);
4484 emit_insn (gen_memory_barrier ());
4486 label1 = gen_rtx_LABEL_REF (DImode, gen_label_rtx ());
4487 label2 = gen_rtx_LABEL_REF (DImode, gen_label_rtx ());
4488 emit_label (XEXP (label1, 0));
4490 emit_load_locked (mode, retval, mem);
4492 x = gen_lowpart (DImode, retval);
4493 if (oldval == const0_rtx)
4494 x = gen_rtx_NE (DImode, x, const0_rtx);
4497 x = gen_rtx_EQ (DImode, x, oldval);
4498 emit_insn (gen_rtx_SET (VOIDmode, cond, x));
4499 x = gen_rtx_EQ (DImode, cond, const0_rtx);
4501 emit_unlikely_jump (x, label2);
4503 emit_move_insn (scratch, newval);
4504 emit_store_conditional (mode, cond, mem, scratch);
4506 x = gen_rtx_EQ (DImode, cond, const0_rtx);
4507 emit_unlikely_jump (x, label1);
4509 emit_insn (gen_memory_barrier ());
4510 emit_label (XEXP (label2, 0));
4514 alpha_expand_compare_and_swap_12 (rtx dst, rtx mem, rtx oldval, rtx newval)
4516 enum machine_mode mode = GET_MODE (mem);
4517 rtx addr, align, wdst;
4518 rtx (*fn5) (rtx, rtx, rtx, rtx, rtx);
4520 addr = force_reg (DImode, XEXP (mem, 0));
4521 align = expand_simple_binop (Pmode, AND, addr, GEN_INT (-8),
4522 NULL_RTX, 1, OPTAB_DIRECT);
4524 oldval = convert_modes (DImode, mode, oldval, 1);
4525 newval = emit_insxl (mode, newval, addr);
4527 wdst = gen_reg_rtx (DImode);
4529 fn5 = gen_sync_compare_and_swapqi_1;
4531 fn5 = gen_sync_compare_and_swaphi_1;
4532 emit_insn (fn5 (wdst, addr, oldval, newval, align));
4534 emit_move_insn (dst, gen_lowpart (mode, wdst));
4538 alpha_split_compare_and_swap_12 (enum machine_mode mode, rtx dest, rtx addr,
4539 rtx oldval, rtx newval, rtx align,
4540 rtx scratch, rtx cond)
4542 rtx label1, label2, mem, width, mask, x;
4544 mem = gen_rtx_MEM (DImode, align);
4545 MEM_VOLATILE_P (mem) = 1;
4547 emit_insn (gen_memory_barrier ());
4548 label1 = gen_rtx_LABEL_REF (DImode, gen_label_rtx ());
4549 label2 = gen_rtx_LABEL_REF (DImode, gen_label_rtx ());
4550 emit_label (XEXP (label1, 0));
4552 emit_load_locked (DImode, scratch, mem);
4554 width = GEN_INT (GET_MODE_BITSIZE (mode));
4555 mask = GEN_INT (mode == QImode ? 0xff : 0xffff);
4556 if (WORDS_BIG_ENDIAN)
4557 emit_insn (gen_extxl_be (dest, scratch, width, addr));
4559 emit_insn (gen_extxl_le (dest, scratch, width, addr));
4561 if (oldval == const0_rtx)
4562 x = gen_rtx_NE (DImode, dest, const0_rtx);
4565 x = gen_rtx_EQ (DImode, dest, oldval);
4566 emit_insn (gen_rtx_SET (VOIDmode, cond, x));
4567 x = gen_rtx_EQ (DImode, cond, const0_rtx);
4569 emit_unlikely_jump (x, label2);
4571 if (WORDS_BIG_ENDIAN)
4572 emit_insn (gen_mskxl_be (scratch, scratch, mask, addr));
4574 emit_insn (gen_mskxl_le (scratch, scratch, mask, addr));
4575 emit_insn (gen_iordi3 (scratch, scratch, newval));
4577 emit_store_conditional (DImode, scratch, mem, scratch);
4579 x = gen_rtx_EQ (DImode, scratch, const0_rtx);
4580 emit_unlikely_jump (x, label1);
4582 emit_insn (gen_memory_barrier ());
4583 emit_label (XEXP (label2, 0));
4586 /* Expand an atomic exchange operation. */
4589 alpha_split_lock_test_and_set (rtx retval, rtx mem, rtx val, rtx scratch)
4591 enum machine_mode mode = GET_MODE (mem);
4592 rtx label, x, cond = gen_lowpart (DImode, scratch);
4594 emit_insn (gen_memory_barrier ());
4596 label = gen_rtx_LABEL_REF (DImode, gen_label_rtx ());
4597 emit_label (XEXP (label, 0));
4599 emit_load_locked (mode, retval, mem);
4600 emit_move_insn (scratch, val);
4601 emit_store_conditional (mode, cond, mem, scratch);
4603 x = gen_rtx_EQ (DImode, cond, const0_rtx);
4604 emit_unlikely_jump (x, label);
4608 alpha_expand_lock_test_and_set_12 (rtx dst, rtx mem, rtx val)
4610 enum machine_mode mode = GET_MODE (mem);
4611 rtx addr, align, wdst;
4612 rtx (*fn4) (rtx, rtx, rtx, rtx);
4614 /* Force the address into a register. */
4615 addr = force_reg (DImode, XEXP (mem, 0));
4617 /* Align it to a multiple of 8. */
4618 align = expand_simple_binop (Pmode, AND, addr, GEN_INT (-8),
4619 NULL_RTX, 1, OPTAB_DIRECT);
4621 /* Insert val into the correct byte location within the word. */
4622 val = emit_insxl (mode, val, addr);
4624 wdst = gen_reg_rtx (DImode);
4626 fn4 = gen_sync_lock_test_and_setqi_1;
4628 fn4 = gen_sync_lock_test_and_sethi_1;
4629 emit_insn (fn4 (wdst, addr, val, align));
4631 emit_move_insn (dst, gen_lowpart (mode, wdst));
4635 alpha_split_lock_test_and_set_12 (enum machine_mode mode, rtx dest, rtx addr,
4636 rtx val, rtx align, rtx scratch)
4638 rtx label, mem, width, mask, x;
4640 mem = gen_rtx_MEM (DImode, align);
4641 MEM_VOLATILE_P (mem) = 1;
4643 emit_insn (gen_memory_barrier ());
4644 label = gen_rtx_LABEL_REF (DImode, gen_label_rtx ());
4645 emit_label (XEXP (label, 0));
4647 emit_load_locked (DImode, scratch, mem);
4649 width = GEN_INT (GET_MODE_BITSIZE (mode));
4650 mask = GEN_INT (mode == QImode ? 0xff : 0xffff);
4651 if (WORDS_BIG_ENDIAN)
4653 emit_insn (gen_extxl_be (dest, scratch, width, addr));
4654 emit_insn (gen_mskxl_be (scratch, scratch, mask, addr));
4658 emit_insn (gen_extxl_le (dest, scratch, width, addr));
4659 emit_insn (gen_mskxl_le (scratch, scratch, mask, addr));
4661 emit_insn (gen_iordi3 (scratch, scratch, val));
4663 emit_store_conditional (DImode, scratch, mem, scratch);
4665 x = gen_rtx_EQ (DImode, scratch, const0_rtx);
4666 emit_unlikely_jump (x, label);
4669 /* Adjust the cost of a scheduling dependency. Return the new cost of
4670 a dependency LINK or INSN on DEP_INSN. COST is the current cost. */
4673 alpha_adjust_cost (rtx insn, rtx link, rtx dep_insn, int cost)
4675 enum attr_type insn_type, dep_insn_type;
4677 /* If the dependence is an anti-dependence, there is no cost. For an
4678 output dependence, there is sometimes a cost, but it doesn't seem
4679 worth handling those few cases. */
4680 if (REG_NOTE_KIND (link) != 0)
4683 /* If we can't recognize the insns, we can't really do anything. */
4684 if (recog_memoized (insn) < 0 || recog_memoized (dep_insn) < 0)
4687 insn_type = get_attr_type (insn);
4688 dep_insn_type = get_attr_type (dep_insn);
4690 /* Bring in the user-defined memory latency. */
4691 if (dep_insn_type == TYPE_ILD
4692 || dep_insn_type == TYPE_FLD
4693 || dep_insn_type == TYPE_LDSYM)
4694 cost += alpha_memory_latency-1;
4696 /* Everything else handled in DFA bypasses now. */
4701 /* The number of instructions that can be issued per cycle. */
4704 alpha_issue_rate (void)
4706 return (alpha_tune == PROCESSOR_EV4 ? 2 : 4);
4709 /* How many alternative schedules to try. This should be as wide as the
4710 scheduling freedom in the DFA, but no wider. Making this value too
4711 large results extra work for the scheduler.
4713 For EV4, loads can be issued to either IB0 or IB1, thus we have 2
4714 alternative schedules. For EV5, we can choose between E0/E1 and
4715 FA/FM. For EV6, an arithmetic insn can be issued to U0/U1/L0/L1. */
4718 alpha_multipass_dfa_lookahead (void)
4720 return (alpha_tune == PROCESSOR_EV6 ? 4 : 2);
4723 /* Machine-specific function data. */
4725 struct machine_function GTY(())
4728 /* List of call information words for calls from this function. */
4729 struct rtx_def *first_ciw;
4730 struct rtx_def *last_ciw;
4733 /* List of deferred case vectors. */
4734 struct rtx_def *addr_list;
4737 const char *some_ld_name;
4739 /* For TARGET_LD_BUGGY_LDGP. */
4740 struct rtx_def *gp_save_rtx;
4743 /* How to allocate a 'struct machine_function'. */
4745 static struct machine_function *
4746 alpha_init_machine_status (void)
4748 return ((struct machine_function *)
4749 ggc_alloc_cleared (sizeof (struct machine_function)));
4752 /* Functions to save and restore alpha_return_addr_rtx. */
4754 /* Start the ball rolling with RETURN_ADDR_RTX. */
4757 alpha_return_addr (int count, rtx frame ATTRIBUTE_UNUSED)
4762 return get_hard_reg_initial_val (Pmode, REG_RA);
4765 /* Return or create a memory slot containing the gp value for the current
4766 function. Needed only if TARGET_LD_BUGGY_LDGP. */
4769 alpha_gp_save_rtx (void)
4771 rtx seq, m = cfun->machine->gp_save_rtx;
4777 m = assign_stack_local (DImode, UNITS_PER_WORD, BITS_PER_WORD);
4778 m = validize_mem (m);
4779 emit_move_insn (m, pic_offset_table_rtx);
4783 emit_insn_at_entry (seq);
4785 cfun->machine->gp_save_rtx = m;
4792 alpha_ra_ever_killed (void)
4796 if (!has_hard_reg_initial_val (Pmode, REG_RA))
4797 return (int)df_regs_ever_live_p (REG_RA);
4799 push_topmost_sequence ();
4801 pop_topmost_sequence ();
4803 return reg_set_between_p (gen_rtx_REG (Pmode, REG_RA), top, NULL_RTX);
4807 /* Return the trap mode suffix applicable to the current
4808 instruction, or NULL. */
4811 get_trap_mode_suffix (void)
4813 enum attr_trap_suffix s = get_attr_trap_suffix (current_output_insn);
4817 case TRAP_SUFFIX_NONE:
4820 case TRAP_SUFFIX_SU:
4821 if (alpha_fptm >= ALPHA_FPTM_SU)
4825 case TRAP_SUFFIX_SUI:
4826 if (alpha_fptm >= ALPHA_FPTM_SUI)
4830 case TRAP_SUFFIX_V_SV:
4838 case ALPHA_FPTM_SUI:
4844 case TRAP_SUFFIX_V_SV_SVI:
4853 case ALPHA_FPTM_SUI:
4860 case TRAP_SUFFIX_U_SU_SUI:
4869 case ALPHA_FPTM_SUI:
4882 /* Return the rounding mode suffix applicable to the current
4883 instruction, or NULL. */
4886 get_round_mode_suffix (void)
4888 enum attr_round_suffix s = get_attr_round_suffix (current_output_insn);
4892 case ROUND_SUFFIX_NONE:
4894 case ROUND_SUFFIX_NORMAL:
4897 case ALPHA_FPRM_NORM:
4899 case ALPHA_FPRM_MINF:
4901 case ALPHA_FPRM_CHOP:
4903 case ALPHA_FPRM_DYN:
4910 case ROUND_SUFFIX_C:
4919 /* Locate some local-dynamic symbol still in use by this function
4920 so that we can print its name in some movdi_er_tlsldm pattern. */
4923 get_some_local_dynamic_name_1 (rtx *px, void *data ATTRIBUTE_UNUSED)
4927 if (GET_CODE (x) == SYMBOL_REF
4928 && SYMBOL_REF_TLS_MODEL (x) == TLS_MODEL_LOCAL_DYNAMIC)
4930 cfun->machine->some_ld_name = XSTR (x, 0);
4938 get_some_local_dynamic_name (void)
4942 if (cfun->machine->some_ld_name)
4943 return cfun->machine->some_ld_name;
4945 for (insn = get_insns (); insn ; insn = NEXT_INSN (insn))
4947 && for_each_rtx (&PATTERN (insn), get_some_local_dynamic_name_1, 0))
4948 return cfun->machine->some_ld_name;
4953 /* Print an operand. Recognize special options, documented below. */
4956 print_operand (FILE *file, rtx x, int code)
4963 /* Print the assembler name of the current function. */
4964 assemble_name (file, alpha_fnname);
4968 assemble_name (file, get_some_local_dynamic_name ());
4973 const char *trap = get_trap_mode_suffix ();
4974 const char *round = get_round_mode_suffix ();
4977 fprintf (file, (TARGET_AS_SLASH_BEFORE_SUFFIX ? "/%s%s" : "%s%s"),
4978 (trap ? trap : ""), (round ? round : ""));
4983 /* Generates single precision instruction suffix. */
4984 fputc ((TARGET_FLOAT_VAX ? 'f' : 's'), file);
4988 /* Generates double precision instruction suffix. */
4989 fputc ((TARGET_FLOAT_VAX ? 'g' : 't'), file);
4993 if (alpha_this_literal_sequence_number == 0)
4994 alpha_this_literal_sequence_number = alpha_next_sequence_number++;
4995 fprintf (file, "%d", alpha_this_literal_sequence_number);
4999 if (alpha_this_gpdisp_sequence_number == 0)
5000 alpha_this_gpdisp_sequence_number = alpha_next_sequence_number++;
5001 fprintf (file, "%d", alpha_this_gpdisp_sequence_number);
5005 if (GET_CODE (x) == HIGH)
5006 output_addr_const (file, XEXP (x, 0));
5008 output_operand_lossage ("invalid %%H value");
5015 if (GET_CODE (x) == UNSPEC && XINT (x, 1) == UNSPEC_TLSGD_CALL)
5017 x = XVECEXP (x, 0, 0);
5018 lituse = "lituse_tlsgd";
5020 else if (GET_CODE (x) == UNSPEC && XINT (x, 1) == UNSPEC_TLSLDM_CALL)
5022 x = XVECEXP (x, 0, 0);
5023 lituse = "lituse_tlsldm";
5025 else if (GET_CODE (x) == CONST_INT)
5026 lituse = "lituse_jsr";
5029 output_operand_lossage ("invalid %%J value");
5033 if (x != const0_rtx)
5034 fprintf (file, "\t\t!%s!%d", lituse, (int) INTVAL (x));
5042 #ifdef HAVE_AS_JSRDIRECT_RELOCS
5043 lituse = "lituse_jsrdirect";
5045 lituse = "lituse_jsr";
5048 gcc_assert (INTVAL (x) != 0);
5049 fprintf (file, "\t\t!%s!%d", lituse, (int) INTVAL (x));
5053 /* If this operand is the constant zero, write it as "$31". */
5054 if (GET_CODE (x) == REG)
5055 fprintf (file, "%s", reg_names[REGNO (x)]);
5056 else if (x == CONST0_RTX (GET_MODE (x)))
5057 fprintf (file, "$31");
5059 output_operand_lossage ("invalid %%r value");
5063 /* Similar, but for floating-point. */
5064 if (GET_CODE (x) == REG)
5065 fprintf (file, "%s", reg_names[REGNO (x)]);
5066 else if (x == CONST0_RTX (GET_MODE (x)))
5067 fprintf (file, "$f31");
5069 output_operand_lossage ("invalid %%R value");
5073 /* Write the 1's complement of a constant. */
5074 if (GET_CODE (x) != CONST_INT)
5075 output_operand_lossage ("invalid %%N value");
5077 fprintf (file, HOST_WIDE_INT_PRINT_DEC, ~ INTVAL (x));
5081 /* Write 1 << C, for a constant C. */
5082 if (GET_CODE (x) != CONST_INT)
5083 output_operand_lossage ("invalid %%P value");
5085 fprintf (file, HOST_WIDE_INT_PRINT_DEC, (HOST_WIDE_INT) 1 << INTVAL (x));
5089 /* Write the high-order 16 bits of a constant, sign-extended. */
5090 if (GET_CODE (x) != CONST_INT)
5091 output_operand_lossage ("invalid %%h value");
5093 fprintf (file, HOST_WIDE_INT_PRINT_DEC, INTVAL (x) >> 16);
5097 /* Write the low-order 16 bits of a constant, sign-extended. */
5098 if (GET_CODE (x) != CONST_INT)
5099 output_operand_lossage ("invalid %%L value");
5101 fprintf (file, HOST_WIDE_INT_PRINT_DEC,
5102 (INTVAL (x) & 0xffff) - 2 * (INTVAL (x) & 0x8000));
5106 /* Write mask for ZAP insn. */
5107 if (GET_CODE (x) == CONST_DOUBLE)
5109 HOST_WIDE_INT mask = 0;
5110 HOST_WIDE_INT value;
5112 value = CONST_DOUBLE_LOW (x);
5113 for (i = 0; i < HOST_BITS_PER_WIDE_INT / HOST_BITS_PER_CHAR;
5118 value = CONST_DOUBLE_HIGH (x);
5119 for (i = 0; i < HOST_BITS_PER_WIDE_INT / HOST_BITS_PER_CHAR;
5122 mask |= (1 << (i + sizeof (int)));
5124 fprintf (file, HOST_WIDE_INT_PRINT_DEC, mask & 0xff);
5127 else if (GET_CODE (x) == CONST_INT)
5129 HOST_WIDE_INT mask = 0, value = INTVAL (x);
5131 for (i = 0; i < 8; i++, value >>= 8)
5135 fprintf (file, HOST_WIDE_INT_PRINT_DEC, mask);
5138 output_operand_lossage ("invalid %%m value");
5142 /* 'b', 'w', 'l', or 'q' as the value of the constant. */
5143 if (GET_CODE (x) != CONST_INT
5144 || (INTVAL (x) != 8 && INTVAL (x) != 16
5145 && INTVAL (x) != 32 && INTVAL (x) != 64))
5146 output_operand_lossage ("invalid %%M value");
5148 fprintf (file, "%s",
5149 (INTVAL (x) == 8 ? "b"
5150 : INTVAL (x) == 16 ? "w"
5151 : INTVAL (x) == 32 ? "l"
5156 /* Similar, except do it from the mask. */
5157 if (GET_CODE (x) == CONST_INT)
5159 HOST_WIDE_INT value = INTVAL (x);
5166 if (value == 0xffff)
5171 if (value == 0xffffffff)
5182 else if (HOST_BITS_PER_WIDE_INT == 32
5183 && GET_CODE (x) == CONST_DOUBLE
5184 && CONST_DOUBLE_LOW (x) == 0xffffffff
5185 && CONST_DOUBLE_HIGH (x) == 0)
5190 output_operand_lossage ("invalid %%U value");
5194 /* Write the constant value divided by 8 for little-endian mode or
5195 (56 - value) / 8 for big-endian mode. */
5197 if (GET_CODE (x) != CONST_INT
5198 || (unsigned HOST_WIDE_INT) INTVAL (x) >= (WORDS_BIG_ENDIAN
5201 || (INTVAL (x) & 7) != 0)
5202 output_operand_lossage ("invalid %%s value");
5204 fprintf (file, HOST_WIDE_INT_PRINT_DEC,
5206 ? (56 - INTVAL (x)) / 8
5211 /* Same, except compute (64 - c) / 8 */
5213 if (GET_CODE (x) != CONST_INT
5214 && (unsigned HOST_WIDE_INT) INTVAL (x) >= 64
5215 && (INTVAL (x) & 7) != 8)
5216 output_operand_lossage ("invalid %%s value");
5218 fprintf (file, HOST_WIDE_INT_PRINT_DEC, (64 - INTVAL (x)) / 8);
5223 /* On Unicos/Mk systems: use a DEX expression if the symbol
5224 clashes with a register name. */
5225 int dex = unicosmk_need_dex (x);
5227 fprintf (file, "DEX(%d)", dex);
5229 output_addr_const (file, x);
5233 case 'C': case 'D': case 'c': case 'd':
5234 /* Write out comparison name. */
5236 enum rtx_code c = GET_CODE (x);
5238 if (!COMPARISON_P (x))
5239 output_operand_lossage ("invalid %%C value");
5241 else if (code == 'D')
5242 c = reverse_condition (c);
5243 else if (code == 'c')
5244 c = swap_condition (c);
5245 else if (code == 'd')
5246 c = swap_condition (reverse_condition (c));
5249 fprintf (file, "ule");
5251 fprintf (file, "ult");
5252 else if (c == UNORDERED)
5253 fprintf (file, "un");
5255 fprintf (file, "%s", GET_RTX_NAME (c));
5260 /* Write the divide or modulus operator. */
5261 switch (GET_CODE (x))
5264 fprintf (file, "div%s", GET_MODE (x) == SImode ? "l" : "q");
5267 fprintf (file, "div%su", GET_MODE (x) == SImode ? "l" : "q");
5270 fprintf (file, "rem%s", GET_MODE (x) == SImode ? "l" : "q");
5273 fprintf (file, "rem%su", GET_MODE (x) == SImode ? "l" : "q");
5276 output_operand_lossage ("invalid %%E value");
5282 /* Write "_u" for unaligned access. */
5283 if (GET_CODE (x) == MEM && GET_CODE (XEXP (x, 0)) == AND)
5284 fprintf (file, "_u");
5288 if (GET_CODE (x) == REG)
5289 fprintf (file, "%s", reg_names[REGNO (x)]);
5290 else if (GET_CODE (x) == MEM)
5291 output_address (XEXP (x, 0));
5292 else if (GET_CODE (x) == CONST && GET_CODE (XEXP (x, 0)) == UNSPEC)
5294 switch (XINT (XEXP (x, 0), 1))
5298 output_addr_const (file, XVECEXP (XEXP (x, 0), 0, 0));
5301 output_operand_lossage ("unknown relocation unspec");
5306 output_addr_const (file, x);
5310 output_operand_lossage ("invalid %%xn code");
5315 print_operand_address (FILE *file, rtx addr)
5318 HOST_WIDE_INT offset = 0;
5320 if (GET_CODE (addr) == AND)
5321 addr = XEXP (addr, 0);
5323 if (GET_CODE (addr) == PLUS
5324 && GET_CODE (XEXP (addr, 1)) == CONST_INT)
5326 offset = INTVAL (XEXP (addr, 1));
5327 addr = XEXP (addr, 0);
5330 if (GET_CODE (addr) == LO_SUM)
5332 const char *reloc16, *reloclo;
5333 rtx op1 = XEXP (addr, 1);
5335 if (GET_CODE (op1) == CONST && GET_CODE (XEXP (op1, 0)) == UNSPEC)
5337 op1 = XEXP (op1, 0);
5338 switch (XINT (op1, 1))
5342 reloclo = (alpha_tls_size == 16 ? "dtprel" : "dtprello");
5346 reloclo = (alpha_tls_size == 16 ? "tprel" : "tprello");
5349 output_operand_lossage ("unknown relocation unspec");
5353 output_addr_const (file, XVECEXP (op1, 0, 0));
5358 reloclo = "gprellow";
5359 output_addr_const (file, op1);
5363 fprintf (file, "+" HOST_WIDE_INT_PRINT_DEC, offset);
5365 addr = XEXP (addr, 0);
5366 switch (GET_CODE (addr))
5369 basereg = REGNO (addr);
5373 basereg = subreg_regno (addr);
5380 fprintf (file, "($%d)\t\t!%s", basereg,
5381 (basereg == 29 ? reloc16 : reloclo));
5385 switch (GET_CODE (addr))
5388 basereg = REGNO (addr);
5392 basereg = subreg_regno (addr);
5396 offset = INTVAL (addr);
5399 #if TARGET_ABI_OPEN_VMS
5401 fprintf (file, "%s", XSTR (addr, 0));
5405 gcc_assert (GET_CODE (XEXP (addr, 0)) == PLUS
5406 && GET_CODE (XEXP (XEXP (addr, 0), 0)) == SYMBOL_REF);
5407 fprintf (file, "%s+" HOST_WIDE_INT_PRINT_DEC,
5408 XSTR (XEXP (XEXP (addr, 0), 0), 0),
5409 INTVAL (XEXP (XEXP (addr, 0), 1)));
5417 fprintf (file, HOST_WIDE_INT_PRINT_DEC "($%d)", offset, basereg);
5420 /* Emit RTL insns to initialize the variable parts of a trampoline at
5421 TRAMP. FNADDR is an RTX for the address of the function's pure
5422 code. CXT is an RTX for the static chain value for the function.
5424 The three offset parameters are for the individual template's
5425 layout. A JMPOFS < 0 indicates that the trampoline does not
5426 contain instructions at all.
5428 We assume here that a function will be called many more times than
5429 its address is taken (e.g., it might be passed to qsort), so we
5430 take the trouble to initialize the "hint" field in the JMP insn.
5431 Note that the hint field is PC (new) + 4 * bits 13:0. */
5434 alpha_initialize_trampoline (rtx tramp, rtx fnaddr, rtx cxt,
5435 int fnofs, int cxtofs, int jmpofs)
5437 rtx temp, temp1, addr;
5438 /* VMS really uses DImode pointers in memory at this point. */
5439 enum machine_mode mode = TARGET_ABI_OPEN_VMS ? Pmode : ptr_mode;
5441 #ifdef POINTERS_EXTEND_UNSIGNED
5442 fnaddr = convert_memory_address (mode, fnaddr);
5443 cxt = convert_memory_address (mode, cxt);
5446 /* Store function address and CXT. */
5447 addr = memory_address (mode, plus_constant (tramp, fnofs));
5448 emit_move_insn (gen_rtx_MEM (mode, addr), fnaddr);
5449 addr = memory_address (mode, plus_constant (tramp, cxtofs));
5450 emit_move_insn (gen_rtx_MEM (mode, addr), cxt);
5452 /* This has been disabled since the hint only has a 32k range, and in
5453 no existing OS is the stack within 32k of the text segment. */
5454 if (0 && jmpofs >= 0)
5456 /* Compute hint value. */
5457 temp = force_operand (plus_constant (tramp, jmpofs+4), NULL_RTX);
5458 temp = expand_binop (DImode, sub_optab, fnaddr, temp, temp, 1,
5460 temp = expand_shift (RSHIFT_EXPR, Pmode, temp,
5461 build_int_cst (NULL_TREE, 2), NULL_RTX, 1);
5462 temp = expand_and (SImode, gen_lowpart (SImode, temp),
5463 GEN_INT (0x3fff), 0);
5465 /* Merge in the hint. */
5466 addr = memory_address (SImode, plus_constant (tramp, jmpofs));
5467 temp1 = force_reg (SImode, gen_rtx_MEM (SImode, addr));
5468 temp1 = expand_and (SImode, temp1, GEN_INT (0xffffc000), NULL_RTX);
5469 temp1 = expand_binop (SImode, ior_optab, temp1, temp, temp1, 1,
5471 emit_move_insn (gen_rtx_MEM (SImode, addr), temp1);
5474 #ifdef ENABLE_EXECUTE_STACK
5475 emit_library_call (init_one_libfunc ("__enable_execute_stack"),
5476 0, VOIDmode, 1, tramp, Pmode);
5480 emit_insn (gen_imb ());
5483 /* Determine where to put an argument to a function.
5484 Value is zero to push the argument on the stack,
5485 or a hard register in which to store the argument.
5487 MODE is the argument's machine mode.
5488 TYPE is the data type of the argument (as a tree).
5489 This is null for libcalls where that information may
5491 CUM is a variable of type CUMULATIVE_ARGS which gives info about
5492 the preceding args and about the function being called.
5493 NAMED is nonzero if this argument is a named parameter
5494 (otherwise it is an extra parameter matching an ellipsis).
5496 On Alpha the first 6 words of args are normally in registers
5497 and the rest are pushed. */
5500 function_arg (CUMULATIVE_ARGS cum, enum machine_mode mode, tree type,
5501 int named ATTRIBUTE_UNUSED)
5506 /* Don't get confused and pass small structures in FP registers. */
5507 if (type && AGGREGATE_TYPE_P (type))
5511 #ifdef ENABLE_CHECKING
5512 /* With alpha_split_complex_arg, we shouldn't see any raw complex
5514 gcc_assert (!COMPLEX_MODE_P (mode));
5517 /* Set up defaults for FP operands passed in FP registers, and
5518 integral operands passed in integer registers. */
5519 if (TARGET_FPREGS && GET_MODE_CLASS (mode) == MODE_FLOAT)
5525 /* ??? Irritatingly, the definition of CUMULATIVE_ARGS is different for
5526 the three platforms, so we can't avoid conditional compilation. */
5527 #if TARGET_ABI_OPEN_VMS
5529 if (mode == VOIDmode)
5530 return alpha_arg_info_reg_val (cum);
5532 num_args = cum.num_args;
5534 || targetm.calls.must_pass_in_stack (mode, type))
5537 #elif TARGET_ABI_UNICOSMK
5541 /* If this is the last argument, generate the call info word (CIW). */
5542 /* ??? We don't include the caller's line number in the CIW because
5543 I don't know how to determine it if debug infos are turned off. */
5544 if (mode == VOIDmode)
5553 for (i = 0; i < cum.num_reg_words && i < 5; i++)
5554 if (cum.reg_args_type[i])
5555 lo |= (1 << (7 - i));
5557 if (cum.num_reg_words == 6 && cum.reg_args_type[5])
5560 lo |= cum.num_reg_words;
5562 #if HOST_BITS_PER_WIDE_INT == 32
5563 hi = (cum.num_args << 20) | cum.num_arg_words;
5565 lo = lo | ((HOST_WIDE_INT) cum.num_args << 52)
5566 | ((HOST_WIDE_INT) cum.num_arg_words << 32);
5569 ciw = immed_double_const (lo, hi, DImode);
5571 return gen_rtx_UNSPEC (DImode, gen_rtvec (1, ciw),
5572 UNSPEC_UMK_LOAD_CIW);
5575 size = ALPHA_ARG_SIZE (mode, type, named);
5576 num_args = cum.num_reg_words;
5578 || cum.num_reg_words + size > 6
5579 || targetm.calls.must_pass_in_stack (mode, type))
5581 else if (type && TYPE_MODE (type) == BLKmode)
5585 reg1 = gen_rtx_REG (DImode, num_args + 16);
5586 reg1 = gen_rtx_EXPR_LIST (DImode, reg1, const0_rtx);
5588 /* The argument fits in two registers. Note that we still need to
5589 reserve a register for empty structures. */
5593 return gen_rtx_PARALLEL (mode, gen_rtvec (1, reg1));
5596 reg2 = gen_rtx_REG (DImode, num_args + 17);
5597 reg2 = gen_rtx_EXPR_LIST (DImode, reg2, GEN_INT (8));
5598 return gen_rtx_PARALLEL (mode, gen_rtvec (2, reg1, reg2));
5602 #elif TARGET_ABI_OSF
5608 /* VOID is passed as a special flag for "last argument". */
5609 if (type == void_type_node)
5611 else if (targetm.calls.must_pass_in_stack (mode, type))
5615 #error Unhandled ABI
5618 return gen_rtx_REG (mode, num_args + basereg);
5622 alpha_arg_partial_bytes (CUMULATIVE_ARGS *cum ATTRIBUTE_UNUSED,
5623 enum machine_mode mode ATTRIBUTE_UNUSED,
5624 tree type ATTRIBUTE_UNUSED,
5625 bool named ATTRIBUTE_UNUSED)
5629 #if TARGET_ABI_OPEN_VMS
5630 if (cum->num_args < 6
5631 && 6 < cum->num_args + ALPHA_ARG_SIZE (mode, type, named))
5632 words = 6 - cum->num_args;
5633 #elif TARGET_ABI_UNICOSMK
5634 /* Never any split arguments. */
5635 #elif TARGET_ABI_OSF
5636 if (*cum < 6 && 6 < *cum + ALPHA_ARG_SIZE (mode, type, named))
5639 #error Unhandled ABI
5642 return words * UNITS_PER_WORD;
5646 /* Return true if TYPE must be returned in memory, instead of in registers. */
5649 alpha_return_in_memory (tree type, tree fndecl ATTRIBUTE_UNUSED)
5651 enum machine_mode mode = VOIDmode;
5656 mode = TYPE_MODE (type);
5658 /* All aggregates are returned in memory. */
5659 if (AGGREGATE_TYPE_P (type))
5663 size = GET_MODE_SIZE (mode);
5664 switch (GET_MODE_CLASS (mode))
5666 case MODE_VECTOR_FLOAT:
5667 /* Pass all float vectors in memory, like an aggregate. */
5670 case MODE_COMPLEX_FLOAT:
5671 /* We judge complex floats on the size of their element,
5672 not the size of the whole type. */
5673 size = GET_MODE_UNIT_SIZE (mode);
5678 case MODE_COMPLEX_INT:
5679 case MODE_VECTOR_INT:
5683 /* ??? We get called on all sorts of random stuff from
5684 aggregate_value_p. We must return something, but it's not
5685 clear what's safe to return. Pretend it's a struct I
5690 /* Otherwise types must fit in one register. */
5691 return size > UNITS_PER_WORD;
5694 /* Return true if TYPE should be passed by invisible reference. */
5697 alpha_pass_by_reference (CUMULATIVE_ARGS *ca ATTRIBUTE_UNUSED,
5698 enum machine_mode mode,
5699 tree type ATTRIBUTE_UNUSED,
5700 bool named ATTRIBUTE_UNUSED)
5702 return mode == TFmode || mode == TCmode;
5705 /* Define how to find the value returned by a function. VALTYPE is the
5706 data type of the value (as a tree). If the precise function being
5707 called is known, FUNC is its FUNCTION_DECL; otherwise, FUNC is 0.
5708 MODE is set instead of VALTYPE for libcalls.
5710 On Alpha the value is found in $0 for integer functions and
5711 $f0 for floating-point functions. */
5714 function_value (tree valtype, tree func ATTRIBUTE_UNUSED,
5715 enum machine_mode mode)
5717 unsigned int regnum, dummy;
5718 enum mode_class class;
5720 gcc_assert (!valtype || !alpha_return_in_memory (valtype, func));
5723 mode = TYPE_MODE (valtype);
5725 class = GET_MODE_CLASS (mode);
5729 PROMOTE_MODE (mode, dummy, valtype);
5732 case MODE_COMPLEX_INT:
5733 case MODE_VECTOR_INT:
5741 case MODE_COMPLEX_FLOAT:
5743 enum machine_mode cmode = GET_MODE_INNER (mode);
5745 return gen_rtx_PARALLEL
5748 gen_rtx_EXPR_LIST (VOIDmode, gen_rtx_REG (cmode, 32),
5750 gen_rtx_EXPR_LIST (VOIDmode, gen_rtx_REG (cmode, 33),
5751 GEN_INT (GET_MODE_SIZE (cmode)))));
5758 return gen_rtx_REG (mode, regnum);
5761 /* TCmode complex values are passed by invisible reference. We
5762 should not split these values. */
5765 alpha_split_complex_arg (tree type)
5767 return TYPE_MODE (type) != TCmode;
5771 alpha_build_builtin_va_list (void)
5773 tree base, ofs, space, record, type_decl;
5775 if (TARGET_ABI_OPEN_VMS || TARGET_ABI_UNICOSMK)
5776 return ptr_type_node;
5778 record = (*lang_hooks.types.make_type) (RECORD_TYPE);
5779 type_decl = build_decl (TYPE_DECL, get_identifier ("__va_list_tag"), record);
5780 TREE_CHAIN (record) = type_decl;
5781 TYPE_NAME (record) = type_decl;
5783 /* C++? SET_IS_AGGR_TYPE (record, 1); */
5785 /* Dummy field to prevent alignment warnings. */
5786 space = build_decl (FIELD_DECL, NULL_TREE, integer_type_node);
5787 DECL_FIELD_CONTEXT (space) = record;
5788 DECL_ARTIFICIAL (space) = 1;
5789 DECL_IGNORED_P (space) = 1;
5791 ofs = build_decl (FIELD_DECL, get_identifier ("__offset"),
5793 DECL_FIELD_CONTEXT (ofs) = record;
5794 TREE_CHAIN (ofs) = space;
5796 base = build_decl (FIELD_DECL, get_identifier ("__base"),
5798 DECL_FIELD_CONTEXT (base) = record;
5799 TREE_CHAIN (base) = ofs;
5801 TYPE_FIELDS (record) = base;
5802 layout_type (record);
5804 va_list_gpr_counter_field = ofs;
5809 /* Helper function for alpha_stdarg_optimize_hook. Skip over casts
5810 and constant additions. */
5813 va_list_skip_additions (tree lhs)
5817 if (TREE_CODE (lhs) != SSA_NAME)
5822 stmt = SSA_NAME_DEF_STMT (lhs);
5824 if (TREE_CODE (stmt) == PHI_NODE)
5827 if (TREE_CODE (stmt) != GIMPLE_MODIFY_STMT
5828 || GIMPLE_STMT_OPERAND (stmt, 0) != lhs)
5831 rhs = GIMPLE_STMT_OPERAND (stmt, 1);
5832 if (TREE_CODE (rhs) == WITH_SIZE_EXPR)
5833 rhs = TREE_OPERAND (rhs, 0);
5835 if ((TREE_CODE (rhs) != NOP_EXPR
5836 && TREE_CODE (rhs) != CONVERT_EXPR
5837 && (TREE_CODE (rhs) != PLUS_EXPR
5838 || TREE_CODE (TREE_OPERAND (rhs, 1)) != INTEGER_CST
5839 || !host_integerp (TREE_OPERAND (rhs, 1), 1)))
5840 || TREE_CODE (TREE_OPERAND (rhs, 0)) != SSA_NAME)
5843 lhs = TREE_OPERAND (rhs, 0);
5847 /* Check if LHS = RHS statement is
5848 LHS = *(ap.__base + ap.__offset + cst)
5851 + ((ap.__offset + cst <= 47)
5852 ? ap.__offset + cst - 48 : ap.__offset + cst) + cst2).
5853 If the former, indicate that GPR registers are needed,
5854 if the latter, indicate that FPR registers are needed.
5856 Also look for LHS = (*ptr).field, where ptr is one of the forms
5859 On alpha, cfun->va_list_gpr_size is used as size of the needed
5860 regs and cfun->va_list_fpr_size is a bitmask, bit 0 set if GPR
5861 registers are needed and bit 1 set if FPR registers are needed.
5862 Return true if va_list references should not be scanned for the
5863 current statement. */
5866 alpha_stdarg_optimize_hook (struct stdarg_info *si, tree lhs, tree rhs)
5868 tree base, offset, arg1, arg2;
5871 while (handled_component_p (rhs))
5872 rhs = TREE_OPERAND (rhs, 0);
5873 if (TREE_CODE (rhs) != INDIRECT_REF
5874 || TREE_CODE (TREE_OPERAND (rhs, 0)) != SSA_NAME)
5877 lhs = va_list_skip_additions (TREE_OPERAND (rhs, 0));
5878 if (lhs == NULL_TREE
5879 || TREE_CODE (lhs) != PLUS_EXPR)
5882 base = TREE_OPERAND (lhs, 0);
5883 if (TREE_CODE (base) == SSA_NAME)
5884 base = va_list_skip_additions (base);
5886 if (TREE_CODE (base) != COMPONENT_REF
5887 || TREE_OPERAND (base, 1) != TYPE_FIELDS (va_list_type_node))
5889 base = TREE_OPERAND (lhs, 0);
5890 if (TREE_CODE (base) == SSA_NAME)
5891 base = va_list_skip_additions (base);
5893 if (TREE_CODE (base) != COMPONENT_REF
5894 || TREE_OPERAND (base, 1) != TYPE_FIELDS (va_list_type_node))
5900 base = get_base_address (base);
5901 if (TREE_CODE (base) != VAR_DECL
5902 || !bitmap_bit_p (si->va_list_vars, DECL_UID (base)))
5905 offset = TREE_OPERAND (lhs, offset_arg);
5906 if (TREE_CODE (offset) == SSA_NAME)
5907 offset = va_list_skip_additions (offset);
5909 if (TREE_CODE (offset) == PHI_NODE)
5913 if (PHI_NUM_ARGS (offset) != 2)
5916 arg1 = va_list_skip_additions (PHI_ARG_DEF (offset, 0));
5917 arg2 = va_list_skip_additions (PHI_ARG_DEF (offset, 1));
5918 if (TREE_CODE (arg2) != MINUS_EXPR && TREE_CODE (arg2) != PLUS_EXPR)
5924 if (TREE_CODE (arg2) != MINUS_EXPR && TREE_CODE (arg2) != PLUS_EXPR)
5927 if (!host_integerp (TREE_OPERAND (arg2, 1), 0))
5930 sub = tree_low_cst (TREE_OPERAND (arg2, 1), 0);
5931 if (TREE_CODE (arg2) == MINUS_EXPR)
5933 if (sub < -48 || sub > -32)
5936 arg2 = va_list_skip_additions (TREE_OPERAND (arg2, 0));
5940 if (TREE_CODE (arg1) == SSA_NAME)
5941 arg1 = va_list_skip_additions (arg1);
5943 if (TREE_CODE (arg1) != COMPONENT_REF
5944 || TREE_OPERAND (arg1, 1) != va_list_gpr_counter_field
5945 || get_base_address (arg1) != base)
5948 /* Need floating point regs. */
5949 cfun->va_list_fpr_size |= 2;
5951 else if (TREE_CODE (offset) != COMPONENT_REF
5952 || TREE_OPERAND (offset, 1) != va_list_gpr_counter_field
5953 || get_base_address (offset) != base)
5956 /* Need general regs. */
5957 cfun->va_list_fpr_size |= 1;
5961 si->va_list_escapes = true;
5966 /* Perform any needed actions needed for a function that is receiving a
5967 variable number of arguments. */
5970 alpha_setup_incoming_varargs (CUMULATIVE_ARGS *pcum, enum machine_mode mode,
5971 tree type, int *pretend_size, int no_rtl)
5973 CUMULATIVE_ARGS cum = *pcum;
5975 /* Skip the current argument. */
5976 FUNCTION_ARG_ADVANCE (cum, mode, type, 1);
5978 #if TARGET_ABI_UNICOSMK
5979 /* On Unicos/Mk, the standard subroutine __T3E_MISMATCH stores all register
5980 arguments on the stack. Unfortunately, it doesn't always store the first
5981 one (i.e. the one that arrives in $16 or $f16). This is not a problem
5982 with stdargs as we always have at least one named argument there. */
5983 if (cum.num_reg_words < 6)
5987 emit_insn (gen_umk_mismatch_args (GEN_INT (cum.num_reg_words)));
5988 emit_insn (gen_arg_home_umk ());
5992 #elif TARGET_ABI_OPEN_VMS
5993 /* For VMS, we allocate space for all 6 arg registers plus a count.
5995 However, if NO registers need to be saved, don't allocate any space.
5996 This is not only because we won't need the space, but because AP
5997 includes the current_pretend_args_size and we don't want to mess up
5998 any ap-relative addresses already made. */
5999 if (cum.num_args < 6)
6003 emit_move_insn (gen_rtx_REG (DImode, 1), virtual_incoming_args_rtx);
6004 emit_insn (gen_arg_home ());
6006 *pretend_size = 7 * UNITS_PER_WORD;
6009 /* On OSF/1 and friends, we allocate space for all 12 arg registers, but
6010 only push those that are remaining. However, if NO registers need to
6011 be saved, don't allocate any space. This is not only because we won't
6012 need the space, but because AP includes the current_pretend_args_size
6013 and we don't want to mess up any ap-relative addresses already made.
6015 If we are not to use the floating-point registers, save the integer
6016 registers where we would put the floating-point registers. This is
6017 not the most efficient way to implement varargs with just one register
6018 class, but it isn't worth doing anything more efficient in this rare
6025 int count, set = get_varargs_alias_set ();
6028 count = cfun->va_list_gpr_size / UNITS_PER_WORD;
6029 if (count > 6 - cum)
6032 /* Detect whether integer registers or floating-point registers
6033 are needed by the detected va_arg statements. See above for
6034 how these values are computed. Note that the "escape" value
6035 is VA_LIST_MAX_FPR_SIZE, which is 255, which has both of
6037 gcc_assert ((VA_LIST_MAX_FPR_SIZE & 3) == 3);
6039 if (cfun->va_list_fpr_size & 1)
6041 tmp = gen_rtx_MEM (BLKmode,
6042 plus_constant (virtual_incoming_args_rtx,
6043 (cum + 6) * UNITS_PER_WORD));
6044 MEM_NOTRAP_P (tmp) = 1;
6045 set_mem_alias_set (tmp, set);
6046 move_block_from_reg (16 + cum, tmp, count);
6049 if (cfun->va_list_fpr_size & 2)
6051 tmp = gen_rtx_MEM (BLKmode,
6052 plus_constant (virtual_incoming_args_rtx,
6053 cum * UNITS_PER_WORD));
6054 MEM_NOTRAP_P (tmp) = 1;
6055 set_mem_alias_set (tmp, set);
6056 move_block_from_reg (16 + cum + TARGET_FPREGS*32, tmp, count);
6059 *pretend_size = 12 * UNITS_PER_WORD;
6064 alpha_va_start (tree valist, rtx nextarg ATTRIBUTE_UNUSED)
6066 HOST_WIDE_INT offset;
6067 tree t, offset_field, base_field;
6069 if (TREE_CODE (TREE_TYPE (valist)) == ERROR_MARK)
6072 if (TARGET_ABI_UNICOSMK)
6073 std_expand_builtin_va_start (valist, nextarg);
6075 /* For Unix, TARGET_SETUP_INCOMING_VARARGS moves the starting address base
6076 up by 48, storing fp arg registers in the first 48 bytes, and the
6077 integer arg registers in the next 48 bytes. This is only done,
6078 however, if any integer registers need to be stored.
6080 If no integer registers need be stored, then we must subtract 48
6081 in order to account for the integer arg registers which are counted
6082 in argsize above, but which are not actually stored on the stack.
6083 Must further be careful here about structures straddling the last
6084 integer argument register; that futzes with pretend_args_size,
6085 which changes the meaning of AP. */
6088 offset = TARGET_ABI_OPEN_VMS ? UNITS_PER_WORD : 6 * UNITS_PER_WORD;
6090 offset = -6 * UNITS_PER_WORD + current_function_pretend_args_size;
6092 if (TARGET_ABI_OPEN_VMS)
6094 nextarg = plus_constant (nextarg, offset);
6095 nextarg = plus_constant (nextarg, NUM_ARGS * UNITS_PER_WORD);
6096 t = build2 (GIMPLE_MODIFY_STMT, TREE_TYPE (valist), valist,
6097 make_tree (ptr_type_node, nextarg));
6098 TREE_SIDE_EFFECTS (t) = 1;
6100 expand_expr (t, const0_rtx, VOIDmode, EXPAND_NORMAL);
6104 base_field = TYPE_FIELDS (TREE_TYPE (valist));
6105 offset_field = TREE_CHAIN (base_field);
6107 base_field = build3 (COMPONENT_REF, TREE_TYPE (base_field),
6108 valist, base_field, NULL_TREE);
6109 offset_field = build3 (COMPONENT_REF, TREE_TYPE (offset_field),
6110 valist, offset_field, NULL_TREE);
6112 t = make_tree (ptr_type_node, virtual_incoming_args_rtx);
6113 t = build2 (PLUS_EXPR, ptr_type_node, t,
6114 build_int_cst (NULL_TREE, offset));
6115 t = build2 (GIMPLE_MODIFY_STMT, TREE_TYPE (base_field), base_field, t);
6116 TREE_SIDE_EFFECTS (t) = 1;
6117 expand_expr (t, const0_rtx, VOIDmode, EXPAND_NORMAL);
6119 t = build_int_cst (NULL_TREE, NUM_ARGS * UNITS_PER_WORD);
6120 t = build2 (GIMPLE_MODIFY_STMT, TREE_TYPE (offset_field),
6122 TREE_SIDE_EFFECTS (t) = 1;
6123 expand_expr (t, const0_rtx, VOIDmode, EXPAND_NORMAL);
6128 alpha_gimplify_va_arg_1 (tree type, tree base, tree offset, tree *pre_p)
6130 tree type_size, ptr_type, addend, t, addr, internal_post;
6132 /* If the type could not be passed in registers, skip the block
6133 reserved for the registers. */
6134 if (targetm.calls.must_pass_in_stack (TYPE_MODE (type), type))
6136 t = build_int_cst (TREE_TYPE (offset), 6*8);
6137 t = build2 (GIMPLE_MODIFY_STMT, TREE_TYPE (offset), offset,
6138 build2 (MAX_EXPR, TREE_TYPE (offset), offset, t));
6139 gimplify_and_add (t, pre_p);
6143 ptr_type = build_pointer_type (type);
6145 if (TREE_CODE (type) == COMPLEX_TYPE)
6147 tree real_part, imag_part, real_temp;
6149 real_part = alpha_gimplify_va_arg_1 (TREE_TYPE (type), base,
6152 /* Copy the value into a new temporary, lest the formal temporary
6153 be reused out from under us. */
6154 real_temp = get_initialized_tmp_var (real_part, pre_p, NULL);
6156 imag_part = alpha_gimplify_va_arg_1 (TREE_TYPE (type), base,
6159 return build2 (COMPLEX_EXPR, type, real_temp, imag_part);
6161 else if (TREE_CODE (type) == REAL_TYPE)
6163 tree fpaddend, cond, fourtyeight;
6165 fourtyeight = build_int_cst (TREE_TYPE (addend), 6*8);
6166 fpaddend = fold_build2 (MINUS_EXPR, TREE_TYPE (addend),
6167 addend, fourtyeight);
6168 cond = fold_build2 (LT_EXPR, boolean_type_node, addend, fourtyeight);
6169 addend = fold_build3 (COND_EXPR, TREE_TYPE (addend), cond,
6173 /* Build the final address and force that value into a temporary. */
6174 addr = build2 (PLUS_EXPR, ptr_type, fold_convert (ptr_type, base),
6175 fold_convert (ptr_type, addend));
6176 internal_post = NULL;
6177 gimplify_expr (&addr, pre_p, &internal_post, is_gimple_val, fb_rvalue);
6178 append_to_statement_list (internal_post, pre_p);
6180 /* Update the offset field. */
6181 type_size = TYPE_SIZE_UNIT (TYPE_MAIN_VARIANT (type));
6182 if (type_size == NULL || TREE_OVERFLOW (type_size))
6186 t = size_binop (PLUS_EXPR, type_size, size_int (7));
6187 t = size_binop (TRUNC_DIV_EXPR, t, size_int (8));
6188 t = size_binop (MULT_EXPR, t, size_int (8));
6190 t = fold_convert (TREE_TYPE (offset), t);
6191 t = build2 (GIMPLE_MODIFY_STMT, void_type_node, offset,
6192 build2 (PLUS_EXPR, TREE_TYPE (offset), offset, t));
6193 gimplify_and_add (t, pre_p);
6195 return build_va_arg_indirect_ref (addr);
6199 alpha_gimplify_va_arg (tree valist, tree type, tree *pre_p, tree *post_p)
6201 tree offset_field, base_field, offset, base, t, r;
6204 if (TARGET_ABI_OPEN_VMS || TARGET_ABI_UNICOSMK)
6205 return std_gimplify_va_arg_expr (valist, type, pre_p, post_p);
6207 base_field = TYPE_FIELDS (va_list_type_node);
6208 offset_field = TREE_CHAIN (base_field);
6209 base_field = build3 (COMPONENT_REF, TREE_TYPE (base_field),
6210 valist, base_field, NULL_TREE);
6211 offset_field = build3 (COMPONENT_REF, TREE_TYPE (offset_field),
6212 valist, offset_field, NULL_TREE);
6214 /* Pull the fields of the structure out into temporaries. Since we never
6215 modify the base field, we can use a formal temporary. Sign-extend the
6216 offset field so that it's the proper width for pointer arithmetic. */
6217 base = get_formal_tmp_var (base_field, pre_p);
6219 t = fold_convert (lang_hooks.types.type_for_size (64, 0), offset_field);
6220 offset = get_initialized_tmp_var (t, pre_p, NULL);
6222 indirect = pass_by_reference (NULL, TYPE_MODE (type), type, false);
6224 type = build_pointer_type (type);
6226 /* Find the value. Note that this will be a stable indirection, or
6227 a composite of stable indirections in the case of complex. */
6228 r = alpha_gimplify_va_arg_1 (type, base, offset, pre_p);
6230 /* Stuff the offset temporary back into its field. */
6231 t = build2 (GIMPLE_MODIFY_STMT, void_type_node, offset_field,
6232 fold_convert (TREE_TYPE (offset_field), offset));
6233 gimplify_and_add (t, pre_p);
6236 r = build_va_arg_indirect_ref (r);
6245 ALPHA_BUILTIN_CMPBGE,
6246 ALPHA_BUILTIN_EXTBL,
6247 ALPHA_BUILTIN_EXTWL,
6248 ALPHA_BUILTIN_EXTLL,
6249 ALPHA_BUILTIN_EXTQL,
6250 ALPHA_BUILTIN_EXTWH,
6251 ALPHA_BUILTIN_EXTLH,
6252 ALPHA_BUILTIN_EXTQH,
6253 ALPHA_BUILTIN_INSBL,
6254 ALPHA_BUILTIN_INSWL,
6255 ALPHA_BUILTIN_INSLL,
6256 ALPHA_BUILTIN_INSQL,
6257 ALPHA_BUILTIN_INSWH,
6258 ALPHA_BUILTIN_INSLH,
6259 ALPHA_BUILTIN_INSQH,
6260 ALPHA_BUILTIN_MSKBL,
6261 ALPHA_BUILTIN_MSKWL,
6262 ALPHA_BUILTIN_MSKLL,
6263 ALPHA_BUILTIN_MSKQL,
6264 ALPHA_BUILTIN_MSKWH,
6265 ALPHA_BUILTIN_MSKLH,
6266 ALPHA_BUILTIN_MSKQH,
6267 ALPHA_BUILTIN_UMULH,
6269 ALPHA_BUILTIN_ZAPNOT,
6270 ALPHA_BUILTIN_AMASK,
6271 ALPHA_BUILTIN_IMPLVER,
6273 ALPHA_BUILTIN_THREAD_POINTER,
6274 ALPHA_BUILTIN_SET_THREAD_POINTER,
6277 ALPHA_BUILTIN_MINUB8,
6278 ALPHA_BUILTIN_MINSB8,
6279 ALPHA_BUILTIN_MINUW4,
6280 ALPHA_BUILTIN_MINSW4,
6281 ALPHA_BUILTIN_MAXUB8,
6282 ALPHA_BUILTIN_MAXSB8,
6283 ALPHA_BUILTIN_MAXUW4,
6284 ALPHA_BUILTIN_MAXSW4,
6288 ALPHA_BUILTIN_UNPKBL,
6289 ALPHA_BUILTIN_UNPKBW,
6294 ALPHA_BUILTIN_CTPOP,
6299 static unsigned int const code_for_builtin[ALPHA_BUILTIN_max] = {
6300 CODE_FOR_builtin_cmpbge,
6301 CODE_FOR_builtin_extbl,
6302 CODE_FOR_builtin_extwl,
6303 CODE_FOR_builtin_extll,
6304 CODE_FOR_builtin_extql,
6305 CODE_FOR_builtin_extwh,
6306 CODE_FOR_builtin_extlh,
6307 CODE_FOR_builtin_extqh,
6308 CODE_FOR_builtin_insbl,
6309 CODE_FOR_builtin_inswl,
6310 CODE_FOR_builtin_insll,
6311 CODE_FOR_builtin_insql,
6312 CODE_FOR_builtin_inswh,
6313 CODE_FOR_builtin_inslh,
6314 CODE_FOR_builtin_insqh,
6315 CODE_FOR_builtin_mskbl,
6316 CODE_FOR_builtin_mskwl,
6317 CODE_FOR_builtin_mskll,
6318 CODE_FOR_builtin_mskql,
6319 CODE_FOR_builtin_mskwh,
6320 CODE_FOR_builtin_msklh,
6321 CODE_FOR_builtin_mskqh,
6322 CODE_FOR_umuldi3_highpart,
6323 CODE_FOR_builtin_zap,
6324 CODE_FOR_builtin_zapnot,
6325 CODE_FOR_builtin_amask,
6326 CODE_FOR_builtin_implver,
6327 CODE_FOR_builtin_rpcc,
6332 CODE_FOR_builtin_minub8,
6333 CODE_FOR_builtin_minsb8,
6334 CODE_FOR_builtin_minuw4,
6335 CODE_FOR_builtin_minsw4,
6336 CODE_FOR_builtin_maxub8,
6337 CODE_FOR_builtin_maxsb8,
6338 CODE_FOR_builtin_maxuw4,
6339 CODE_FOR_builtin_maxsw4,
6340 CODE_FOR_builtin_perr,
6341 CODE_FOR_builtin_pklb,
6342 CODE_FOR_builtin_pkwb,
6343 CODE_FOR_builtin_unpkbl,
6344 CODE_FOR_builtin_unpkbw,
6349 CODE_FOR_popcountdi2
6352 struct alpha_builtin_def
6355 enum alpha_builtin code;
6356 unsigned int target_mask;
6360 static struct alpha_builtin_def const zero_arg_builtins[] = {
6361 { "__builtin_alpha_implver", ALPHA_BUILTIN_IMPLVER, 0, true },
6362 { "__builtin_alpha_rpcc", ALPHA_BUILTIN_RPCC, 0, false }
6365 static struct alpha_builtin_def const one_arg_builtins[] = {
6366 { "__builtin_alpha_amask", ALPHA_BUILTIN_AMASK, 0, true },
6367 { "__builtin_alpha_pklb", ALPHA_BUILTIN_PKLB, MASK_MAX, true },
6368 { "__builtin_alpha_pkwb", ALPHA_BUILTIN_PKWB, MASK_MAX, true },
6369 { "__builtin_alpha_unpkbl", ALPHA_BUILTIN_UNPKBL, MASK_MAX, true },
6370 { "__builtin_alpha_unpkbw", ALPHA_BUILTIN_UNPKBW, MASK_MAX, true },
6371 { "__builtin_alpha_cttz", ALPHA_BUILTIN_CTTZ, MASK_CIX, true },
6372 { "__builtin_alpha_ctlz", ALPHA_BUILTIN_CTLZ, MASK_CIX, true },
6373 { "__builtin_alpha_ctpop", ALPHA_BUILTIN_CTPOP, MASK_CIX, true }
6376 static struct alpha_builtin_def const two_arg_builtins[] = {
6377 { "__builtin_alpha_cmpbge", ALPHA_BUILTIN_CMPBGE, 0, true },
6378 { "__builtin_alpha_extbl", ALPHA_BUILTIN_EXTBL, 0, true },
6379 { "__builtin_alpha_extwl", ALPHA_BUILTIN_EXTWL, 0, true },
6380 { "__builtin_alpha_extll", ALPHA_BUILTIN_EXTLL, 0, true },
6381 { "__builtin_alpha_extql", ALPHA_BUILTIN_EXTQL, 0, true },
6382 { "__builtin_alpha_extwh", ALPHA_BUILTIN_EXTWH, 0, true },
6383 { "__builtin_alpha_extlh", ALPHA_BUILTIN_EXTLH, 0, true },
6384 { "__builtin_alpha_extqh", ALPHA_BUILTIN_EXTQH, 0, true },
6385 { "__builtin_alpha_insbl", ALPHA_BUILTIN_INSBL, 0, true },
6386 { "__builtin_alpha_inswl", ALPHA_BUILTIN_INSWL, 0, true },
6387 { "__builtin_alpha_insll", ALPHA_BUILTIN_INSLL, 0, true },
6388 { "__builtin_alpha_insql", ALPHA_BUILTIN_INSQL, 0, true },
6389 { "__builtin_alpha_inswh", ALPHA_BUILTIN_INSWH, 0, true },
6390 { "__builtin_alpha_inslh", ALPHA_BUILTIN_INSLH, 0, true },
6391 { "__builtin_alpha_insqh", ALPHA_BUILTIN_INSQH, 0, true },
6392 { "__builtin_alpha_mskbl", ALPHA_BUILTIN_MSKBL, 0, true },
6393 { "__builtin_alpha_mskwl", ALPHA_BUILTIN_MSKWL, 0, true },
6394 { "__builtin_alpha_mskll", ALPHA_BUILTIN_MSKLL, 0, true },
6395 { "__builtin_alpha_mskql", ALPHA_BUILTIN_MSKQL, 0, true },
6396 { "__builtin_alpha_mskwh", ALPHA_BUILTIN_MSKWH, 0, true },
6397 { "__builtin_alpha_msklh", ALPHA_BUILTIN_MSKLH, 0, true },
6398 { "__builtin_alpha_mskqh", ALPHA_BUILTIN_MSKQH, 0, true },
6399 { "__builtin_alpha_umulh", ALPHA_BUILTIN_UMULH, 0, true },
6400 { "__builtin_alpha_zap", ALPHA_BUILTIN_ZAP, 0, true },
6401 { "__builtin_alpha_zapnot", ALPHA_BUILTIN_ZAPNOT, 0, true },
6402 { "__builtin_alpha_minub8", ALPHA_BUILTIN_MINUB8, MASK_MAX, true },
6403 { "__builtin_alpha_minsb8", ALPHA_BUILTIN_MINSB8, MASK_MAX, true },
6404 { "__builtin_alpha_minuw4", ALPHA_BUILTIN_MINUW4, MASK_MAX, true },
6405 { "__builtin_alpha_minsw4", ALPHA_BUILTIN_MINSW4, MASK_MAX, true },
6406 { "__builtin_alpha_maxub8", ALPHA_BUILTIN_MAXUB8, MASK_MAX, true },
6407 { "__builtin_alpha_maxsb8", ALPHA_BUILTIN_MAXSB8, MASK_MAX, true },
6408 { "__builtin_alpha_maxuw4", ALPHA_BUILTIN_MAXUW4, MASK_MAX, true },
6409 { "__builtin_alpha_maxsw4", ALPHA_BUILTIN_MAXSW4, MASK_MAX, true },
6410 { "__builtin_alpha_perr", ALPHA_BUILTIN_PERR, MASK_MAX, true }
6413 static GTY(()) tree alpha_v8qi_u;
6414 static GTY(()) tree alpha_v8qi_s;
6415 static GTY(()) tree alpha_v4hi_u;
6416 static GTY(()) tree alpha_v4hi_s;
6418 /* Helper function of alpha_init_builtins. Add the COUNT built-in
6419 functions pointed to by P, with function type FTYPE. */
6422 alpha_add_builtins (const struct alpha_builtin_def *p, size_t count,
6428 for (i = 0; i < count; ++i, ++p)
6429 if ((target_flags & p->target_mask) == p->target_mask)
6431 decl = add_builtin_function (p->name, ftype, p->code, BUILT_IN_MD,
6434 TREE_READONLY (decl) = 1;
6435 TREE_NOTHROW (decl) = 1;
6441 alpha_init_builtins (void)
6443 tree dimode_integer_type_node;
6446 dimode_integer_type_node = lang_hooks.types.type_for_mode (DImode, 0);
6448 ftype = build_function_type (dimode_integer_type_node, void_list_node);
6449 alpha_add_builtins (zero_arg_builtins, ARRAY_SIZE (zero_arg_builtins),
6452 ftype = build_function_type_list (dimode_integer_type_node,
6453 dimode_integer_type_node, NULL_TREE);
6454 alpha_add_builtins (one_arg_builtins, ARRAY_SIZE (one_arg_builtins),
6457 ftype = build_function_type_list (dimode_integer_type_node,
6458 dimode_integer_type_node,
6459 dimode_integer_type_node, NULL_TREE);
6460 alpha_add_builtins (two_arg_builtins, ARRAY_SIZE (two_arg_builtins),
6463 ftype = build_function_type (ptr_type_node, void_list_node);
6464 decl = add_builtin_function ("__builtin_thread_pointer", ftype,
6465 ALPHA_BUILTIN_THREAD_POINTER, BUILT_IN_MD,
6467 TREE_NOTHROW (decl) = 1;
6469 ftype = build_function_type_list (void_type_node, ptr_type_node, NULL_TREE);
6470 decl = add_builtin_function ("__builtin_set_thread_pointer", ftype,
6471 ALPHA_BUILTIN_SET_THREAD_POINTER, BUILT_IN_MD,
6473 TREE_NOTHROW (decl) = 1;
6475 alpha_v8qi_u = build_vector_type (unsigned_intQI_type_node, 8);
6476 alpha_v8qi_s = build_vector_type (intQI_type_node, 8);
6477 alpha_v4hi_u = build_vector_type (unsigned_intHI_type_node, 4);
6478 alpha_v4hi_s = build_vector_type (intHI_type_node, 4);
6481 /* Expand an expression EXP that calls a built-in function,
6482 with result going to TARGET if that's convenient
6483 (and in mode MODE if that's convenient).
6484 SUBTARGET may be used as the target for computing one of EXP's operands.
6485 IGNORE is nonzero if the value is to be ignored. */
6488 alpha_expand_builtin (tree exp, rtx target,
6489 rtx subtarget ATTRIBUTE_UNUSED,
6490 enum machine_mode mode ATTRIBUTE_UNUSED,
6491 int ignore ATTRIBUTE_UNUSED)
6495 tree fndecl = TREE_OPERAND (CALL_EXPR_FN (exp), 0);
6496 unsigned int fcode = DECL_FUNCTION_CODE (fndecl);
6498 call_expr_arg_iterator iter;
6499 enum insn_code icode;
6500 rtx op[MAX_ARGS], pat;
6504 if (fcode >= ALPHA_BUILTIN_max)
6505 internal_error ("bad builtin fcode");
6506 icode = code_for_builtin[fcode];
6508 internal_error ("bad builtin fcode");
6510 nonvoid = TREE_TYPE (TREE_TYPE (fndecl)) != void_type_node;
6513 FOR_EACH_CALL_EXPR_ARG (arg, iter, exp)
6515 const struct insn_operand_data *insn_op;
6517 if (arg == error_mark_node)
6519 if (arity > MAX_ARGS)
6522 insn_op = &insn_data[icode].operand[arity + nonvoid];
6524 op[arity] = expand_expr (arg, NULL_RTX, insn_op->mode, 0);
6526 if (!(*insn_op->predicate) (op[arity], insn_op->mode))
6527 op[arity] = copy_to_mode_reg (insn_op->mode, op[arity]);
6533 enum machine_mode tmode = insn_data[icode].operand[0].mode;
6535 || GET_MODE (target) != tmode
6536 || !(*insn_data[icode].operand[0].predicate) (target, tmode))
6537 target = gen_reg_rtx (tmode);
6543 pat = GEN_FCN (icode) (target);
6547 pat = GEN_FCN (icode) (target, op[0]);
6549 pat = GEN_FCN (icode) (op[0]);
6552 pat = GEN_FCN (icode) (target, op[0], op[1]);
6568 /* Several bits below assume HWI >= 64 bits. This should be enforced
6570 #if HOST_BITS_PER_WIDE_INT < 64
6571 # error "HOST_WIDE_INT too small"
6574 /* Fold the builtin for the CMPBGE instruction. This is a vector comparison
6575 with an 8-bit output vector. OPINT contains the integer operands; bit N
6576 of OP_CONST is set if OPINT[N] is valid. */
6579 alpha_fold_builtin_cmpbge (unsigned HOST_WIDE_INT opint[], long op_const)
6584 for (i = 0, val = 0; i < 8; ++i)
6586 unsigned HOST_WIDE_INT c0 = (opint[0] >> (i * 8)) & 0xff;
6587 unsigned HOST_WIDE_INT c1 = (opint[1] >> (i * 8)) & 0xff;
6591 return build_int_cst (long_integer_type_node, val);
6593 else if (op_const == 2 && opint[1] == 0)
6594 return build_int_cst (long_integer_type_node, 0xff);
6598 /* Fold the builtin for the ZAPNOT instruction. This is essentially a
6599 specialized form of an AND operation. Other byte manipulation instructions
6600 are defined in terms of this instruction, so this is also used as a
6601 subroutine for other builtins.
6603 OP contains the tree operands; OPINT contains the extracted integer values.
6604 Bit N of OP_CONST it set if OPINT[N] is valid. OP may be null if only
6605 OPINT may be considered. */
6608 alpha_fold_builtin_zapnot (tree *op, unsigned HOST_WIDE_INT opint[],
6613 unsigned HOST_WIDE_INT mask = 0;
6616 for (i = 0; i < 8; ++i)
6617 if ((opint[1] >> i) & 1)
6618 mask |= (unsigned HOST_WIDE_INT)0xff << (i * 8);
6621 return build_int_cst (long_integer_type_node, opint[0] & mask);
6624 return fold_build2 (BIT_AND_EXPR, long_integer_type_node, op[0],
6625 build_int_cst (long_integer_type_node, mask));
6627 else if ((op_const & 1) && opint[0] == 0)
6628 return build_int_cst (long_integer_type_node, 0);
6632 /* Fold the builtins for the EXT family of instructions. */
6635 alpha_fold_builtin_extxx (tree op[], unsigned HOST_WIDE_INT opint[],
6636 long op_const, unsigned HOST_WIDE_INT bytemask,
6640 tree *zap_op = NULL;
6644 unsigned HOST_WIDE_INT loc;
6647 if (BYTES_BIG_ENDIAN)
6655 unsigned HOST_WIDE_INT temp = opint[0];
6668 opint[1] = bytemask;
6669 return alpha_fold_builtin_zapnot (zap_op, opint, zap_const);
6672 /* Fold the builtins for the INS family of instructions. */
6675 alpha_fold_builtin_insxx (tree op[], unsigned HOST_WIDE_INT opint[],
6676 long op_const, unsigned HOST_WIDE_INT bytemask,
6679 if ((op_const & 1) && opint[0] == 0)
6680 return build_int_cst (long_integer_type_node, 0);
6684 unsigned HOST_WIDE_INT temp, loc, byteloc;
6685 tree *zap_op = NULL;
6688 if (BYTES_BIG_ENDIAN)
6695 byteloc = (64 - (loc * 8)) & 0x3f;
6712 opint[1] = bytemask;
6713 return alpha_fold_builtin_zapnot (zap_op, opint, op_const);
6720 alpha_fold_builtin_mskxx (tree op[], unsigned HOST_WIDE_INT opint[],
6721 long op_const, unsigned HOST_WIDE_INT bytemask,
6726 unsigned HOST_WIDE_INT loc;
6729 if (BYTES_BIG_ENDIAN)
6736 opint[1] = bytemask ^ 0xff;
6739 return alpha_fold_builtin_zapnot (op, opint, op_const);
6743 alpha_fold_builtin_umulh (unsigned HOST_WIDE_INT opint[], long op_const)
6749 unsigned HOST_WIDE_INT l;
6752 mul_double (opint[0], 0, opint[1], 0, &l, &h);
6754 #if HOST_BITS_PER_WIDE_INT > 64
6758 return build_int_cst (long_integer_type_node, h);
6762 opint[1] = opint[0];
6765 /* Note that (X*1) >> 64 == 0. */
6766 if (opint[1] == 0 || opint[1] == 1)
6767 return build_int_cst (long_integer_type_node, 0);
6774 alpha_fold_vector_minmax (enum tree_code code, tree op[], tree vtype)
6776 tree op0 = fold_convert (vtype, op[0]);
6777 tree op1 = fold_convert (vtype, op[1]);
6778 tree val = fold_build2 (code, vtype, op0, op1);
6779 return fold_convert (long_integer_type_node, val);
6783 alpha_fold_builtin_perr (unsigned HOST_WIDE_INT opint[], long op_const)
6785 unsigned HOST_WIDE_INT temp = 0;
6791 for (i = 0; i < 8; ++i)
6793 unsigned HOST_WIDE_INT a = (opint[0] >> (i * 8)) & 0xff;
6794 unsigned HOST_WIDE_INT b = (opint[1] >> (i * 8)) & 0xff;
6801 return build_int_cst (long_integer_type_node, temp);
6805 alpha_fold_builtin_pklb (unsigned HOST_WIDE_INT opint[], long op_const)
6807 unsigned HOST_WIDE_INT temp;
6812 temp = opint[0] & 0xff;
6813 temp |= (opint[0] >> 24) & 0xff00;
6815 return build_int_cst (long_integer_type_node, temp);
6819 alpha_fold_builtin_pkwb (unsigned HOST_WIDE_INT opint[], long op_const)
6821 unsigned HOST_WIDE_INT temp;
6826 temp = opint[0] & 0xff;
6827 temp |= (opint[0] >> 8) & 0xff00;
6828 temp |= (opint[0] >> 16) & 0xff0000;
6829 temp |= (opint[0] >> 24) & 0xff000000;
6831 return build_int_cst (long_integer_type_node, temp);
6835 alpha_fold_builtin_unpkbl (unsigned HOST_WIDE_INT opint[], long op_const)
6837 unsigned HOST_WIDE_INT temp;
6842 temp = opint[0] & 0xff;
6843 temp |= (opint[0] & 0xff00) << 24;
6845 return build_int_cst (long_integer_type_node, temp);
6849 alpha_fold_builtin_unpkbw (unsigned HOST_WIDE_INT opint[], long op_const)
6851 unsigned HOST_WIDE_INT temp;
6856 temp = opint[0] & 0xff;
6857 temp |= (opint[0] & 0x0000ff00) << 8;
6858 temp |= (opint[0] & 0x00ff0000) << 16;
6859 temp |= (opint[0] & 0xff000000) << 24;
6861 return build_int_cst (long_integer_type_node, temp);
6865 alpha_fold_builtin_cttz (unsigned HOST_WIDE_INT opint[], long op_const)
6867 unsigned HOST_WIDE_INT temp;
6875 temp = exact_log2 (opint[0] & -opint[0]);
6877 return build_int_cst (long_integer_type_node, temp);
6881 alpha_fold_builtin_ctlz (unsigned HOST_WIDE_INT opint[], long op_const)
6883 unsigned HOST_WIDE_INT temp;
6891 temp = 64 - floor_log2 (opint[0]) - 1;
6893 return build_int_cst (long_integer_type_node, temp);
6897 alpha_fold_builtin_ctpop (unsigned HOST_WIDE_INT opint[], long op_const)
6899 unsigned HOST_WIDE_INT temp, op;
6907 temp++, op &= op - 1;
6909 return build_int_cst (long_integer_type_node, temp);
6912 /* Fold one of our builtin functions. */
6915 alpha_fold_builtin (tree fndecl, tree arglist, bool ignore ATTRIBUTE_UNUSED)
6917 tree op[MAX_ARGS], t;
6918 unsigned HOST_WIDE_INT opint[MAX_ARGS];
6919 long op_const = 0, arity = 0;
6921 for (t = arglist; t ; t = TREE_CHAIN (t), ++arity)
6923 tree arg = TREE_VALUE (t);
6924 if (arg == error_mark_node)
6926 if (arity >= MAX_ARGS)
6931 if (TREE_CODE (arg) == INTEGER_CST)
6933 op_const |= 1L << arity;
6934 opint[arity] = int_cst_value (arg);
6938 switch (DECL_FUNCTION_CODE (fndecl))
6940 case ALPHA_BUILTIN_CMPBGE:
6941 return alpha_fold_builtin_cmpbge (opint, op_const);
6943 case ALPHA_BUILTIN_EXTBL:
6944 return alpha_fold_builtin_extxx (op, opint, op_const, 0x01, false);
6945 case ALPHA_BUILTIN_EXTWL:
6946 return alpha_fold_builtin_extxx (op, opint, op_const, 0x03, false);
6947 case ALPHA_BUILTIN_EXTLL:
6948 return alpha_fold_builtin_extxx (op, opint, op_const, 0x0f, false);
6949 case ALPHA_BUILTIN_EXTQL:
6950 return alpha_fold_builtin_extxx (op, opint, op_const, 0xff, false);
6951 case ALPHA_BUILTIN_EXTWH:
6952 return alpha_fold_builtin_extxx (op, opint, op_const, 0x03, true);
6953 case ALPHA_BUILTIN_EXTLH:
6954 return alpha_fold_builtin_extxx (op, opint, op_const, 0x0f, true);
6955 case ALPHA_BUILTIN_EXTQH:
6956 return alpha_fold_builtin_extxx (op, opint, op_const, 0xff, true);
6958 case ALPHA_BUILTIN_INSBL:
6959 return alpha_fold_builtin_insxx (op, opint, op_const, 0x01, false);
6960 case ALPHA_BUILTIN_INSWL:
6961 return alpha_fold_builtin_insxx (op, opint, op_const, 0x03, false);
6962 case ALPHA_BUILTIN_INSLL:
6963 return alpha_fold_builtin_insxx (op, opint, op_const, 0x0f, false);
6964 case ALPHA_BUILTIN_INSQL:
6965 return alpha_fold_builtin_insxx (op, opint, op_const, 0xff, false);
6966 case ALPHA_BUILTIN_INSWH:
6967 return alpha_fold_builtin_insxx (op, opint, op_const, 0x03, true);
6968 case ALPHA_BUILTIN_INSLH:
6969 return alpha_fold_builtin_insxx (op, opint, op_const, 0x0f, true);
6970 case ALPHA_BUILTIN_INSQH:
6971 return alpha_fold_builtin_insxx (op, opint, op_const, 0xff, true);
6973 case ALPHA_BUILTIN_MSKBL:
6974 return alpha_fold_builtin_mskxx (op, opint, op_const, 0x01, false);
6975 case ALPHA_BUILTIN_MSKWL:
6976 return alpha_fold_builtin_mskxx (op, opint, op_const, 0x03, false);
6977 case ALPHA_BUILTIN_MSKLL:
6978 return alpha_fold_builtin_mskxx (op, opint, op_const, 0x0f, false);
6979 case ALPHA_BUILTIN_MSKQL:
6980 return alpha_fold_builtin_mskxx (op, opint, op_const, 0xff, false);
6981 case ALPHA_BUILTIN_MSKWH:
6982 return alpha_fold_builtin_mskxx (op, opint, op_const, 0x03, true);
6983 case ALPHA_BUILTIN_MSKLH:
6984 return alpha_fold_builtin_mskxx (op, opint, op_const, 0x0f, true);
6985 case ALPHA_BUILTIN_MSKQH:
6986 return alpha_fold_builtin_mskxx (op, opint, op_const, 0xff, true);
6988 case ALPHA_BUILTIN_UMULH:
6989 return alpha_fold_builtin_umulh (opint, op_const);
6991 case ALPHA_BUILTIN_ZAP:
6994 case ALPHA_BUILTIN_ZAPNOT:
6995 return alpha_fold_builtin_zapnot (op, opint, op_const);
6997 case ALPHA_BUILTIN_MINUB8:
6998 return alpha_fold_vector_minmax (MIN_EXPR, op, alpha_v8qi_u);
6999 case ALPHA_BUILTIN_MINSB8:
7000 return alpha_fold_vector_minmax (MIN_EXPR, op, alpha_v8qi_s);
7001 case ALPHA_BUILTIN_MINUW4:
7002 return alpha_fold_vector_minmax (MIN_EXPR, op, alpha_v4hi_u);
7003 case ALPHA_BUILTIN_MINSW4:
7004 return alpha_fold_vector_minmax (MIN_EXPR, op, alpha_v4hi_s);
7005 case ALPHA_BUILTIN_MAXUB8:
7006 return alpha_fold_vector_minmax (MAX_EXPR, op, alpha_v8qi_u);
7007 case ALPHA_BUILTIN_MAXSB8:
7008 return alpha_fold_vector_minmax (MAX_EXPR, op, alpha_v8qi_s);
7009 case ALPHA_BUILTIN_MAXUW4:
7010 return alpha_fold_vector_minmax (MAX_EXPR, op, alpha_v4hi_u);
7011 case ALPHA_BUILTIN_MAXSW4:
7012 return alpha_fold_vector_minmax (MAX_EXPR, op, alpha_v4hi_s);
7014 case ALPHA_BUILTIN_PERR:
7015 return alpha_fold_builtin_perr (opint, op_const);
7016 case ALPHA_BUILTIN_PKLB:
7017 return alpha_fold_builtin_pklb (opint, op_const);
7018 case ALPHA_BUILTIN_PKWB:
7019 return alpha_fold_builtin_pkwb (opint, op_const);
7020 case ALPHA_BUILTIN_UNPKBL:
7021 return alpha_fold_builtin_unpkbl (opint, op_const);
7022 case ALPHA_BUILTIN_UNPKBW:
7023 return alpha_fold_builtin_unpkbw (opint, op_const);
7025 case ALPHA_BUILTIN_CTTZ:
7026 return alpha_fold_builtin_cttz (opint, op_const);
7027 case ALPHA_BUILTIN_CTLZ:
7028 return alpha_fold_builtin_ctlz (opint, op_const);
7029 case ALPHA_BUILTIN_CTPOP:
7030 return alpha_fold_builtin_ctpop (opint, op_const);
7032 case ALPHA_BUILTIN_AMASK:
7033 case ALPHA_BUILTIN_IMPLVER:
7034 case ALPHA_BUILTIN_RPCC:
7035 case ALPHA_BUILTIN_THREAD_POINTER:
7036 case ALPHA_BUILTIN_SET_THREAD_POINTER:
7037 /* None of these are foldable at compile-time. */
7043 /* This page contains routines that are used to determine what the function
7044 prologue and epilogue code will do and write them out. */
7046 /* Compute the size of the save area in the stack. */
7048 /* These variables are used for communication between the following functions.
7049 They indicate various things about the current function being compiled
7050 that are used to tell what kind of prologue, epilogue and procedure
7051 descriptor to generate. */
7053 /* Nonzero if we need a stack procedure. */
7054 enum alpha_procedure_types {PT_NULL = 0, PT_REGISTER = 1, PT_STACK = 2};
7055 static enum alpha_procedure_types alpha_procedure_type;
7057 /* Register number (either FP or SP) that is used to unwind the frame. */
7058 static int vms_unwind_regno;
7060 /* Register number used to save FP. We need not have one for RA since
7061 we don't modify it for register procedures. This is only defined
7062 for register frame procedures. */
7063 static int vms_save_fp_regno;
7065 /* Register number used to reference objects off our PV. */
7066 static int vms_base_regno;
7068 /* Compute register masks for saved registers. */
7071 alpha_sa_mask (unsigned long *imaskP, unsigned long *fmaskP)
7073 unsigned long imask = 0;
7074 unsigned long fmask = 0;
7077 /* When outputting a thunk, we don't have valid register life info,
7078 but assemble_start_function wants to output .frame and .mask
7080 if (current_function_is_thunk)
7087 if (TARGET_ABI_OPEN_VMS && alpha_procedure_type == PT_STACK)
7088 imask |= (1UL << HARD_FRAME_POINTER_REGNUM);
7090 /* One for every register we have to save. */
7091 for (i = 0; i < FIRST_PSEUDO_REGISTER; i++)
7092 if (! fixed_regs[i] && ! call_used_regs[i]
7093 && df_regs_ever_live_p (i) && i != REG_RA
7094 && (!TARGET_ABI_UNICOSMK || i != HARD_FRAME_POINTER_REGNUM))
7097 imask |= (1UL << i);
7099 fmask |= (1UL << (i - 32));
7102 /* We need to restore these for the handler. */
7103 if (current_function_calls_eh_return)
7107 unsigned regno = EH_RETURN_DATA_REGNO (i);
7108 if (regno == INVALID_REGNUM)
7110 imask |= 1UL << regno;
7114 /* If any register spilled, then spill the return address also. */
7115 /* ??? This is required by the Digital stack unwind specification
7116 and isn't needed if we're doing Dwarf2 unwinding. */
7117 if (imask || fmask || alpha_ra_ever_killed ())
7118 imask |= (1UL << REG_RA);
7125 alpha_sa_size (void)
7127 unsigned long mask[2];
7131 alpha_sa_mask (&mask[0], &mask[1]);
7133 if (TARGET_ABI_UNICOSMK)
7135 if (mask[0] || mask[1])
7140 for (j = 0; j < 2; ++j)
7141 for (i = 0; i < 32; ++i)
7142 if ((mask[j] >> i) & 1)
7146 if (TARGET_ABI_UNICOSMK)
7148 /* We might not need to generate a frame if we don't make any calls
7149 (including calls to __T3E_MISMATCH if this is a vararg function),
7150 don't have any local variables which require stack slots, don't
7151 use alloca and have not determined that we need a frame for other
7154 alpha_procedure_type
7155 = (sa_size || get_frame_size() != 0
7156 || current_function_outgoing_args_size
7157 || current_function_stdarg || current_function_calls_alloca
7158 || frame_pointer_needed)
7159 ? PT_STACK : PT_REGISTER;
7161 /* Always reserve space for saving callee-saved registers if we
7162 need a frame as required by the calling convention. */
7163 if (alpha_procedure_type == PT_STACK)
7166 else if (TARGET_ABI_OPEN_VMS)
7168 /* Start by assuming we can use a register procedure if we don't
7169 make any calls (REG_RA not used) or need to save any
7170 registers and a stack procedure if we do. */
7171 if ((mask[0] >> REG_RA) & 1)
7172 alpha_procedure_type = PT_STACK;
7173 else if (get_frame_size() != 0)
7174 alpha_procedure_type = PT_REGISTER;
7176 alpha_procedure_type = PT_NULL;
7178 /* Don't reserve space for saving FP & RA yet. Do that later after we've
7179 made the final decision on stack procedure vs register procedure. */
7180 if (alpha_procedure_type == PT_STACK)
7183 /* Decide whether to refer to objects off our PV via FP or PV.
7184 If we need FP for something else or if we receive a nonlocal
7185 goto (which expects PV to contain the value), we must use PV.
7186 Otherwise, start by assuming we can use FP. */
7189 = (frame_pointer_needed
7190 || current_function_has_nonlocal_label
7191 || alpha_procedure_type == PT_STACK
7192 || current_function_outgoing_args_size)
7193 ? REG_PV : HARD_FRAME_POINTER_REGNUM;
7195 /* If we want to copy PV into FP, we need to find some register
7196 in which to save FP. */
7198 vms_save_fp_regno = -1;
7199 if (vms_base_regno == HARD_FRAME_POINTER_REGNUM)
7200 for (i = 0; i < 32; i++)
7201 if (! fixed_regs[i] && call_used_regs[i] && ! df_regs_ever_live_p (i))
7202 vms_save_fp_regno = i;
7204 if (vms_save_fp_regno == -1 && alpha_procedure_type == PT_REGISTER)
7205 vms_base_regno = REG_PV, alpha_procedure_type = PT_STACK;
7206 else if (alpha_procedure_type == PT_NULL)
7207 vms_base_regno = REG_PV;
7209 /* Stack unwinding should be done via FP unless we use it for PV. */
7210 vms_unwind_regno = (vms_base_regno == REG_PV
7211 ? HARD_FRAME_POINTER_REGNUM : STACK_POINTER_REGNUM);
7213 /* If this is a stack procedure, allow space for saving FP and RA. */
7214 if (alpha_procedure_type == PT_STACK)
7219 /* Our size must be even (multiple of 16 bytes). */
7227 /* Define the offset between two registers, one to be eliminated,
7228 and the other its replacement, at the start of a routine. */
7231 alpha_initial_elimination_offset (unsigned int from,
7232 unsigned int to ATTRIBUTE_UNUSED)
7236 ret = alpha_sa_size ();
7237 ret += ALPHA_ROUND (current_function_outgoing_args_size);
7241 case FRAME_POINTER_REGNUM:
7244 case ARG_POINTER_REGNUM:
7245 ret += (ALPHA_ROUND (get_frame_size ()
7246 + current_function_pretend_args_size)
7247 - current_function_pretend_args_size);
7258 alpha_pv_save_size (void)
7261 return alpha_procedure_type == PT_STACK ? 8 : 0;
7265 alpha_using_fp (void)
7268 return vms_unwind_regno == HARD_FRAME_POINTER_REGNUM;
7271 #if TARGET_ABI_OPEN_VMS
7273 const struct attribute_spec vms_attribute_table[] =
7275 /* { name, min_len, max_len, decl_req, type_req, fn_type_req, handler } */
7276 { "overlaid", 0, 0, true, false, false, NULL },
7277 { "global", 0, 0, true, false, false, NULL },
7278 { "initialize", 0, 0, true, false, false, NULL },
7279 { NULL, 0, 0, false, false, false, NULL }
7285 find_lo_sum_using_gp (rtx *px, void *data ATTRIBUTE_UNUSED)
7287 return GET_CODE (*px) == LO_SUM && XEXP (*px, 0) == pic_offset_table_rtx;
7291 alpha_find_lo_sum_using_gp (rtx insn)
7293 return for_each_rtx (&PATTERN (insn), find_lo_sum_using_gp, NULL) > 0;
7297 alpha_does_function_need_gp (void)
7301 /* The GP being variable is an OSF abi thing. */
7302 if (! TARGET_ABI_OSF)
7305 /* We need the gp to load the address of __mcount. */
7306 if (TARGET_PROFILING_NEEDS_GP && current_function_profile)
7309 /* The code emitted by alpha_output_mi_thunk_osf uses the gp. */
7310 if (current_function_is_thunk)
7313 /* The nonlocal receiver pattern assumes that the gp is valid for
7314 the nested function. Reasonable because it's almost always set
7315 correctly already. For the cases where that's wrong, make sure
7316 the nested function loads its gp on entry. */
7317 if (current_function_has_nonlocal_goto)
7320 /* If we need a GP (we have a LDSYM insn or a CALL_INSN), load it first.
7321 Even if we are a static function, we still need to do this in case
7322 our address is taken and passed to something like qsort. */
7324 push_topmost_sequence ();
7325 insn = get_insns ();
7326 pop_topmost_sequence ();
7328 for (; insn; insn = NEXT_INSN (insn))
7330 && ! JUMP_TABLE_DATA_P (insn)
7331 && GET_CODE (PATTERN (insn)) != USE
7332 && GET_CODE (PATTERN (insn)) != CLOBBER
7333 && get_attr_usegp (insn))
7340 /* Helper function to set RTX_FRAME_RELATED_P on instructions, including
7344 set_frame_related_p (void)
7346 rtx seq = get_insns ();
7357 while (insn != NULL_RTX)
7359 RTX_FRAME_RELATED_P (insn) = 1;
7360 insn = NEXT_INSN (insn);
7362 seq = emit_insn (seq);
7366 seq = emit_insn (seq);
7367 RTX_FRAME_RELATED_P (seq) = 1;
7372 #define FRP(exp) (start_sequence (), exp, set_frame_related_p ())
7374 /* Generates a store with the proper unwind info attached. VALUE is
7375 stored at BASE_REG+BASE_OFS. If FRAME_BIAS is nonzero, then BASE_REG
7376 contains SP+FRAME_BIAS, and that is the unwind info that should be
7377 generated. If FRAME_REG != VALUE, then VALUE is being stored on
7378 behalf of FRAME_REG, and FRAME_REG should be present in the unwind. */
7381 emit_frame_store_1 (rtx value, rtx base_reg, HOST_WIDE_INT frame_bias,
7382 HOST_WIDE_INT base_ofs, rtx frame_reg)
7384 rtx addr, mem, insn;
7386 addr = plus_constant (base_reg, base_ofs);
7387 mem = gen_rtx_MEM (DImode, addr);
7388 set_mem_alias_set (mem, alpha_sr_alias_set);
7390 insn = emit_move_insn (mem, value);
7391 RTX_FRAME_RELATED_P (insn) = 1;
7393 if (frame_bias || value != frame_reg)
7397 addr = plus_constant (stack_pointer_rtx, frame_bias + base_ofs);
7398 mem = gen_rtx_MEM (DImode, addr);
7402 = gen_rtx_EXPR_LIST (REG_FRAME_RELATED_EXPR,
7403 gen_rtx_SET (VOIDmode, mem, frame_reg),
7409 emit_frame_store (unsigned int regno, rtx base_reg,
7410 HOST_WIDE_INT frame_bias, HOST_WIDE_INT base_ofs)
7412 rtx reg = gen_rtx_REG (DImode, regno);
7413 emit_frame_store_1 (reg, base_reg, frame_bias, base_ofs, reg);
7416 /* Write function prologue. */
7418 /* On vms we have two kinds of functions:
7420 - stack frame (PROC_STACK)
7421 these are 'normal' functions with local vars and which are
7422 calling other functions
7423 - register frame (PROC_REGISTER)
7424 keeps all data in registers, needs no stack
7426 We must pass this to the assembler so it can generate the
7427 proper pdsc (procedure descriptor)
7428 This is done with the '.pdesc' command.
7430 On not-vms, we don't really differentiate between the two, as we can
7431 simply allocate stack without saving registers. */
7434 alpha_expand_prologue (void)
7436 /* Registers to save. */
7437 unsigned long imask = 0;
7438 unsigned long fmask = 0;
7439 /* Stack space needed for pushing registers clobbered by us. */
7440 HOST_WIDE_INT sa_size;
7441 /* Complete stack size needed. */
7442 HOST_WIDE_INT frame_size;
7443 /* Offset from base reg to register save area. */
7444 HOST_WIDE_INT reg_offset;
7448 sa_size = alpha_sa_size ();
7450 frame_size = get_frame_size ();
7451 if (TARGET_ABI_OPEN_VMS)
7452 frame_size = ALPHA_ROUND (sa_size
7453 + (alpha_procedure_type == PT_STACK ? 8 : 0)
7455 + current_function_pretend_args_size);
7456 else if (TARGET_ABI_UNICOSMK)
7457 /* We have to allocate space for the DSIB if we generate a frame. */
7458 frame_size = ALPHA_ROUND (sa_size
7459 + (alpha_procedure_type == PT_STACK ? 48 : 0))
7460 + ALPHA_ROUND (frame_size
7461 + current_function_outgoing_args_size);
7463 frame_size = (ALPHA_ROUND (current_function_outgoing_args_size)
7465 + ALPHA_ROUND (frame_size
7466 + current_function_pretend_args_size));
7468 if (TARGET_ABI_OPEN_VMS)
7471 reg_offset = ALPHA_ROUND (current_function_outgoing_args_size);
7473 alpha_sa_mask (&imask, &fmask);
7475 /* Emit an insn to reload GP, if needed. */
7478 alpha_function_needs_gp = alpha_does_function_need_gp ();
7479 if (alpha_function_needs_gp)
7480 emit_insn (gen_prologue_ldgp ());
7483 /* TARGET_PROFILING_NEEDS_GP actually implies that we need to insert
7484 the call to mcount ourselves, rather than having the linker do it
7485 magically in response to -pg. Since _mcount has special linkage,
7486 don't represent the call as a call. */
7487 if (TARGET_PROFILING_NEEDS_GP && current_function_profile)
7488 emit_insn (gen_prologue_mcount ());
7490 if (TARGET_ABI_UNICOSMK)
7491 unicosmk_gen_dsib (&imask);
7493 /* Adjust the stack by the frame size. If the frame size is > 4096
7494 bytes, we need to be sure we probe somewhere in the first and last
7495 4096 bytes (we can probably get away without the latter test) and
7496 every 8192 bytes in between. If the frame size is > 32768, we
7497 do this in a loop. Otherwise, we generate the explicit probe
7500 Note that we are only allowed to adjust sp once in the prologue. */
7502 if (frame_size <= 32768)
7504 if (frame_size > 4096)
7508 for (probed = 4096; probed < frame_size; probed += 8192)
7509 emit_insn (gen_probe_stack (GEN_INT (TARGET_ABI_UNICOSMK
7513 /* We only have to do this probe if we aren't saving registers. */
7514 if (sa_size == 0 && frame_size > probed - 4096)
7515 emit_insn (gen_probe_stack (GEN_INT (-frame_size)));
7518 if (frame_size != 0)
7519 FRP (emit_insn (gen_adddi3 (stack_pointer_rtx, stack_pointer_rtx,
7520 GEN_INT (TARGET_ABI_UNICOSMK
7526 /* Here we generate code to set R22 to SP + 4096 and set R23 to the
7527 number of 8192 byte blocks to probe. We then probe each block
7528 in the loop and then set SP to the proper location. If the
7529 amount remaining is > 4096, we have to do one more probe if we
7530 are not saving any registers. */
7532 HOST_WIDE_INT blocks = (frame_size + 4096) / 8192;
7533 HOST_WIDE_INT leftover = frame_size + 4096 - blocks * 8192;
7534 rtx ptr = gen_rtx_REG (DImode, 22);
7535 rtx count = gen_rtx_REG (DImode, 23);
7538 emit_move_insn (count, GEN_INT (blocks));
7539 emit_insn (gen_adddi3 (ptr, stack_pointer_rtx,
7540 GEN_INT (TARGET_ABI_UNICOSMK ? 4096 - 64 : 4096)));
7542 /* Because of the difficulty in emitting a new basic block this
7543 late in the compilation, generate the loop as a single insn. */
7544 emit_insn (gen_prologue_stack_probe_loop (count, ptr));
7546 if (leftover > 4096 && sa_size == 0)
7548 rtx last = gen_rtx_MEM (DImode, plus_constant (ptr, -leftover));
7549 MEM_VOLATILE_P (last) = 1;
7550 emit_move_insn (last, const0_rtx);
7553 if (TARGET_ABI_WINDOWS_NT)
7555 /* For NT stack unwind (done by 'reverse execution'), it's
7556 not OK to take the result of a loop, even though the value
7557 is already in ptr, so we reload it via a single operation
7558 and subtract it to sp.
7560 Yes, that's correct -- we have to reload the whole constant
7561 into a temporary via ldah+lda then subtract from sp. */
7563 HOST_WIDE_INT lo, hi;
7564 lo = ((frame_size & 0xffff) ^ 0x8000) - 0x8000;
7565 hi = frame_size - lo;
7567 emit_move_insn (ptr, GEN_INT (hi));
7568 emit_insn (gen_adddi3 (ptr, ptr, GEN_INT (lo)));
7569 seq = emit_insn (gen_subdi3 (stack_pointer_rtx, stack_pointer_rtx,
7574 seq = emit_insn (gen_adddi3 (stack_pointer_rtx, ptr,
7575 GEN_INT (-leftover)));
7578 /* This alternative is special, because the DWARF code cannot
7579 possibly intuit through the loop above. So we invent this
7580 note it looks at instead. */
7581 RTX_FRAME_RELATED_P (seq) = 1;
7583 = gen_rtx_EXPR_LIST (REG_FRAME_RELATED_EXPR,
7584 gen_rtx_SET (VOIDmode, stack_pointer_rtx,
7585 gen_rtx_PLUS (Pmode, stack_pointer_rtx,
7586 GEN_INT (TARGET_ABI_UNICOSMK
7592 if (!TARGET_ABI_UNICOSMK)
7594 HOST_WIDE_INT sa_bias = 0;
7596 /* Cope with very large offsets to the register save area. */
7597 sa_reg = stack_pointer_rtx;
7598 if (reg_offset + sa_size > 0x8000)
7600 int low = ((reg_offset & 0xffff) ^ 0x8000) - 0x8000;
7603 if (low + sa_size <= 0x8000)
7604 sa_bias = reg_offset - low, reg_offset = low;
7606 sa_bias = reg_offset, reg_offset = 0;
7608 sa_reg = gen_rtx_REG (DImode, 24);
7609 sa_bias_rtx = GEN_INT (sa_bias);
7611 if (add_operand (sa_bias_rtx, DImode))
7612 emit_insn (gen_adddi3 (sa_reg, stack_pointer_rtx, sa_bias_rtx));
7615 emit_move_insn (sa_reg, sa_bias_rtx);
7616 emit_insn (gen_adddi3 (sa_reg, stack_pointer_rtx, sa_reg));
7620 /* Save regs in stack order. Beginning with VMS PV. */
7621 if (TARGET_ABI_OPEN_VMS && alpha_procedure_type == PT_STACK)
7622 emit_frame_store (REG_PV, stack_pointer_rtx, 0, 0);
7624 /* Save register RA next. */
7625 if (imask & (1UL << REG_RA))
7627 emit_frame_store (REG_RA, sa_reg, sa_bias, reg_offset);
7628 imask &= ~(1UL << REG_RA);
7632 /* Now save any other registers required to be saved. */
7633 for (i = 0; i < 31; i++)
7634 if (imask & (1UL << i))
7636 emit_frame_store (i, sa_reg, sa_bias, reg_offset);
7640 for (i = 0; i < 31; i++)
7641 if (fmask & (1UL << i))
7643 emit_frame_store (i+32, sa_reg, sa_bias, reg_offset);
7647 else if (TARGET_ABI_UNICOSMK && alpha_procedure_type == PT_STACK)
7649 /* The standard frame on the T3E includes space for saving registers.
7650 We just have to use it. We don't have to save the return address and
7651 the old frame pointer here - they are saved in the DSIB. */
7654 for (i = 9; i < 15; i++)
7655 if (imask & (1UL << i))
7657 emit_frame_store (i, hard_frame_pointer_rtx, 0, reg_offset);
7660 for (i = 2; i < 10; i++)
7661 if (fmask & (1UL << i))
7663 emit_frame_store (i+32, hard_frame_pointer_rtx, 0, reg_offset);
7668 if (TARGET_ABI_OPEN_VMS)
7670 if (alpha_procedure_type == PT_REGISTER)
7671 /* Register frame procedures save the fp.
7672 ?? Ought to have a dwarf2 save for this. */
7673 emit_move_insn (gen_rtx_REG (DImode, vms_save_fp_regno),
7674 hard_frame_pointer_rtx);
7676 if (alpha_procedure_type != PT_NULL && vms_base_regno != REG_PV)
7677 emit_insn (gen_force_movdi (gen_rtx_REG (DImode, vms_base_regno),
7678 gen_rtx_REG (DImode, REG_PV)));
7680 if (alpha_procedure_type != PT_NULL
7681 && vms_unwind_regno == HARD_FRAME_POINTER_REGNUM)
7682 FRP (emit_move_insn (hard_frame_pointer_rtx, stack_pointer_rtx));
7684 /* If we have to allocate space for outgoing args, do it now. */
7685 if (current_function_outgoing_args_size != 0)
7688 = emit_move_insn (stack_pointer_rtx,
7690 (hard_frame_pointer_rtx,
7692 (current_function_outgoing_args_size))));
7694 /* Only set FRAME_RELATED_P on the stack adjustment we just emitted
7695 if ! frame_pointer_needed. Setting the bit will change the CFA
7696 computation rule to use sp again, which would be wrong if we had
7697 frame_pointer_needed, as this means sp might move unpredictably
7701 frame_pointer_needed
7702 => vms_unwind_regno == HARD_FRAME_POINTER_REGNUM
7704 current_function_outgoing_args_size != 0
7705 => alpha_procedure_type != PT_NULL,
7707 so when we are not setting the bit here, we are guaranteed to
7708 have emitted an FRP frame pointer update just before. */
7709 RTX_FRAME_RELATED_P (seq) = ! frame_pointer_needed;
7712 else if (!TARGET_ABI_UNICOSMK)
7714 /* If we need a frame pointer, set it from the stack pointer. */
7715 if (frame_pointer_needed)
7717 if (TARGET_CAN_FAULT_IN_PROLOGUE)
7718 FRP (emit_move_insn (hard_frame_pointer_rtx, stack_pointer_rtx));
7720 /* This must always be the last instruction in the
7721 prologue, thus we emit a special move + clobber. */
7722 FRP (emit_insn (gen_init_fp (hard_frame_pointer_rtx,
7723 stack_pointer_rtx, sa_reg)));
7727 /* The ABIs for VMS and OSF/1 say that while we can schedule insns into
7728 the prologue, for exception handling reasons, we cannot do this for
7729 any insn that might fault. We could prevent this for mems with a
7730 (clobber:BLK (scratch)), but this doesn't work for fp insns. So we
7731 have to prevent all such scheduling with a blockage.
7733 Linux, on the other hand, never bothered to implement OSF/1's
7734 exception handling, and so doesn't care about such things. Anyone
7735 planning to use dwarf2 frame-unwind info can also omit the blockage. */
7737 if (! TARGET_CAN_FAULT_IN_PROLOGUE)
7738 emit_insn (gen_blockage ());
7741 /* Count the number of .file directives, so that .loc is up to date. */
7742 int num_source_filenames = 0;
7744 /* Output the textual info surrounding the prologue. */
7747 alpha_start_function (FILE *file, const char *fnname,
7748 tree decl ATTRIBUTE_UNUSED)
7750 unsigned long imask = 0;
7751 unsigned long fmask = 0;
7752 /* Stack space needed for pushing registers clobbered by us. */
7753 HOST_WIDE_INT sa_size;
7754 /* Complete stack size needed. */
7755 unsigned HOST_WIDE_INT frame_size;
7756 /* The maximum debuggable frame size (512 Kbytes using Tru64 as). */
7757 unsigned HOST_WIDE_INT max_frame_size = TARGET_ABI_OSF && !TARGET_GAS
7760 /* Offset from base reg to register save area. */
7761 HOST_WIDE_INT reg_offset;
7762 char *entry_label = (char *) alloca (strlen (fnname) + 6);
7765 /* Don't emit an extern directive for functions defined in the same file. */
7766 if (TARGET_ABI_UNICOSMK)
7769 name_tree = get_identifier (fnname);
7770 TREE_ASM_WRITTEN (name_tree) = 1;
7773 alpha_fnname = fnname;
7774 sa_size = alpha_sa_size ();
7776 frame_size = get_frame_size ();
7777 if (TARGET_ABI_OPEN_VMS)
7778 frame_size = ALPHA_ROUND (sa_size
7779 + (alpha_procedure_type == PT_STACK ? 8 : 0)
7781 + current_function_pretend_args_size);
7782 else if (TARGET_ABI_UNICOSMK)
7783 frame_size = ALPHA_ROUND (sa_size
7784 + (alpha_procedure_type == PT_STACK ? 48 : 0))
7785 + ALPHA_ROUND (frame_size
7786 + current_function_outgoing_args_size);
7788 frame_size = (ALPHA_ROUND (current_function_outgoing_args_size)
7790 + ALPHA_ROUND (frame_size
7791 + current_function_pretend_args_size));
7793 if (TARGET_ABI_OPEN_VMS)
7796 reg_offset = ALPHA_ROUND (current_function_outgoing_args_size);
7798 alpha_sa_mask (&imask, &fmask);
7800 /* Ecoff can handle multiple .file directives, so put out file and lineno.
7801 We have to do that before the .ent directive as we cannot switch
7802 files within procedures with native ecoff because line numbers are
7803 linked to procedure descriptors.
7804 Outputting the lineno helps debugging of one line functions as they
7805 would otherwise get no line number at all. Please note that we would
7806 like to put out last_linenum from final.c, but it is not accessible. */
7808 if (write_symbols == SDB_DEBUG)
7810 #ifdef ASM_OUTPUT_SOURCE_FILENAME
7811 ASM_OUTPUT_SOURCE_FILENAME (file,
7812 DECL_SOURCE_FILE (current_function_decl));
7814 #ifdef SDB_OUTPUT_SOURCE_LINE
7815 if (debug_info_level != DINFO_LEVEL_TERSE)
7816 SDB_OUTPUT_SOURCE_LINE (file,
7817 DECL_SOURCE_LINE (current_function_decl));
7821 /* Issue function start and label. */
7822 if (TARGET_ABI_OPEN_VMS
7823 || (!TARGET_ABI_UNICOSMK && !flag_inhibit_size_directive))
7825 fputs ("\t.ent ", file);
7826 assemble_name (file, fnname);
7829 /* If the function needs GP, we'll write the "..ng" label there.
7830 Otherwise, do it here. */
7832 && ! alpha_function_needs_gp
7833 && ! current_function_is_thunk)
7836 assemble_name (file, fnname);
7837 fputs ("..ng:\n", file);
7841 strcpy (entry_label, fnname);
7842 if (TARGET_ABI_OPEN_VMS)
7843 strcat (entry_label, "..en");
7845 /* For public functions, the label must be globalized by appending an
7846 additional colon. */
7847 if (TARGET_ABI_UNICOSMK && TREE_PUBLIC (decl))
7848 strcat (entry_label, ":");
7850 ASM_OUTPUT_LABEL (file, entry_label);
7851 inside_function = TRUE;
7853 if (TARGET_ABI_OPEN_VMS)
7854 fprintf (file, "\t.base $%d\n", vms_base_regno);
7856 if (!TARGET_ABI_OPEN_VMS && !TARGET_ABI_UNICOSMK && TARGET_IEEE_CONFORMANT
7857 && !flag_inhibit_size_directive)
7859 /* Set flags in procedure descriptor to request IEEE-conformant
7860 math-library routines. The value we set it to is PDSC_EXC_IEEE
7861 (/usr/include/pdsc.h). */
7862 fputs ("\t.eflag 48\n", file);
7865 /* Set up offsets to alpha virtual arg/local debugging pointer. */
7866 alpha_auto_offset = -frame_size + current_function_pretend_args_size;
7867 alpha_arg_offset = -frame_size + 48;
7869 /* Describe our frame. If the frame size is larger than an integer,
7870 print it as zero to avoid an assembler error. We won't be
7871 properly describing such a frame, but that's the best we can do. */
7872 if (TARGET_ABI_UNICOSMK)
7874 else if (TARGET_ABI_OPEN_VMS)
7875 fprintf (file, "\t.frame $%d," HOST_WIDE_INT_PRINT_DEC ",$26,"
7876 HOST_WIDE_INT_PRINT_DEC "\n",
7878 frame_size >= (1UL << 31) ? 0 : frame_size,
7880 else if (!flag_inhibit_size_directive)
7881 fprintf (file, "\t.frame $%d," HOST_WIDE_INT_PRINT_DEC ",$26,%d\n",
7882 (frame_pointer_needed
7883 ? HARD_FRAME_POINTER_REGNUM : STACK_POINTER_REGNUM),
7884 frame_size >= max_frame_size ? 0 : frame_size,
7885 current_function_pretend_args_size);
7887 /* Describe which registers were spilled. */
7888 if (TARGET_ABI_UNICOSMK)
7890 else if (TARGET_ABI_OPEN_VMS)
7893 /* ??? Does VMS care if mask contains ra? The old code didn't
7894 set it, so I don't here. */
7895 fprintf (file, "\t.mask 0x%lx,0\n", imask & ~(1UL << REG_RA));
7897 fprintf (file, "\t.fmask 0x%lx,0\n", fmask);
7898 if (alpha_procedure_type == PT_REGISTER)
7899 fprintf (file, "\t.fp_save $%d\n", vms_save_fp_regno);
7901 else if (!flag_inhibit_size_directive)
7905 fprintf (file, "\t.mask 0x%lx," HOST_WIDE_INT_PRINT_DEC "\n", imask,
7906 frame_size >= max_frame_size ? 0 : reg_offset - frame_size);
7908 for (i = 0; i < 32; ++i)
7909 if (imask & (1UL << i))
7914 fprintf (file, "\t.fmask 0x%lx," HOST_WIDE_INT_PRINT_DEC "\n", fmask,
7915 frame_size >= max_frame_size ? 0 : reg_offset - frame_size);
7918 #if TARGET_ABI_OPEN_VMS
7919 /* Ifdef'ed cause link_section are only available then. */
7920 switch_to_section (readonly_data_section);
7921 fprintf (file, "\t.align 3\n");
7922 assemble_name (file, fnname); fputs ("..na:\n", file);
7923 fputs ("\t.ascii \"", file);
7924 assemble_name (file, fnname);
7925 fputs ("\\0\"\n", file);
7926 alpha_need_linkage (fnname, 1);
7927 switch_to_section (text_section);
7931 /* Emit the .prologue note at the scheduled end of the prologue. */
7934 alpha_output_function_end_prologue (FILE *file)
7936 if (TARGET_ABI_UNICOSMK)
7938 else if (TARGET_ABI_OPEN_VMS)
7939 fputs ("\t.prologue\n", file);
7940 else if (TARGET_ABI_WINDOWS_NT)
7941 fputs ("\t.prologue 0\n", file);
7942 else if (!flag_inhibit_size_directive)
7943 fprintf (file, "\t.prologue %d\n",
7944 alpha_function_needs_gp || current_function_is_thunk);
7947 /* Write function epilogue. */
7949 /* ??? At some point we will want to support full unwind, and so will
7950 need to mark the epilogue as well. At the moment, we just confuse
7953 #define FRP(exp) exp
7956 alpha_expand_epilogue (void)
7958 /* Registers to save. */
7959 unsigned long imask = 0;
7960 unsigned long fmask = 0;
7961 /* Stack space needed for pushing registers clobbered by us. */
7962 HOST_WIDE_INT sa_size;
7963 /* Complete stack size needed. */
7964 HOST_WIDE_INT frame_size;
7965 /* Offset from base reg to register save area. */
7966 HOST_WIDE_INT reg_offset;
7967 int fp_is_frame_pointer, fp_offset;
7968 rtx sa_reg, sa_reg_exp = NULL;
7969 rtx sp_adj1, sp_adj2, mem;
7973 sa_size = alpha_sa_size ();
7975 frame_size = get_frame_size ();
7976 if (TARGET_ABI_OPEN_VMS)
7977 frame_size = ALPHA_ROUND (sa_size
7978 + (alpha_procedure_type == PT_STACK ? 8 : 0)
7980 + current_function_pretend_args_size);
7981 else if (TARGET_ABI_UNICOSMK)
7982 frame_size = ALPHA_ROUND (sa_size
7983 + (alpha_procedure_type == PT_STACK ? 48 : 0))
7984 + ALPHA_ROUND (frame_size
7985 + current_function_outgoing_args_size);
7987 frame_size = (ALPHA_ROUND (current_function_outgoing_args_size)
7989 + ALPHA_ROUND (frame_size
7990 + current_function_pretend_args_size));
7992 if (TARGET_ABI_OPEN_VMS)
7994 if (alpha_procedure_type == PT_STACK)
8000 reg_offset = ALPHA_ROUND (current_function_outgoing_args_size);
8002 alpha_sa_mask (&imask, &fmask);
8005 = ((TARGET_ABI_OPEN_VMS && alpha_procedure_type == PT_STACK)
8006 || (!TARGET_ABI_OPEN_VMS && frame_pointer_needed));
8008 sa_reg = stack_pointer_rtx;
8010 if (current_function_calls_eh_return)
8011 eh_ofs = EH_RETURN_STACKADJ_RTX;
8015 if (!TARGET_ABI_UNICOSMK && sa_size)
8017 /* If we have a frame pointer, restore SP from it. */
8018 if ((TARGET_ABI_OPEN_VMS
8019 && vms_unwind_regno == HARD_FRAME_POINTER_REGNUM)
8020 || (!TARGET_ABI_OPEN_VMS && frame_pointer_needed))
8021 FRP (emit_move_insn (stack_pointer_rtx, hard_frame_pointer_rtx));
8023 /* Cope with very large offsets to the register save area. */
8024 if (reg_offset + sa_size > 0x8000)
8026 int low = ((reg_offset & 0xffff) ^ 0x8000) - 0x8000;
8029 if (low + sa_size <= 0x8000)
8030 bias = reg_offset - low, reg_offset = low;
8032 bias = reg_offset, reg_offset = 0;
8034 sa_reg = gen_rtx_REG (DImode, 22);
8035 sa_reg_exp = plus_constant (stack_pointer_rtx, bias);
8037 FRP (emit_move_insn (sa_reg, sa_reg_exp));
8040 /* Restore registers in order, excepting a true frame pointer. */
8042 mem = gen_rtx_MEM (DImode, plus_constant (sa_reg, reg_offset));
8044 set_mem_alias_set (mem, alpha_sr_alias_set);
8045 FRP (emit_move_insn (gen_rtx_REG (DImode, REG_RA), mem));
8048 imask &= ~(1UL << REG_RA);
8050 for (i = 0; i < 31; ++i)
8051 if (imask & (1UL << i))
8053 if (i == HARD_FRAME_POINTER_REGNUM && fp_is_frame_pointer)
8054 fp_offset = reg_offset;
8057 mem = gen_rtx_MEM (DImode, plus_constant(sa_reg, reg_offset));
8058 set_mem_alias_set (mem, alpha_sr_alias_set);
8059 FRP (emit_move_insn (gen_rtx_REG (DImode, i), mem));
8064 for (i = 0; i < 31; ++i)
8065 if (fmask & (1UL << i))
8067 mem = gen_rtx_MEM (DFmode, plus_constant(sa_reg, reg_offset));
8068 set_mem_alias_set (mem, alpha_sr_alias_set);
8069 FRP (emit_move_insn (gen_rtx_REG (DFmode, i+32), mem));
8073 else if (TARGET_ABI_UNICOSMK && alpha_procedure_type == PT_STACK)
8075 /* Restore callee-saved general-purpose registers. */
8079 for (i = 9; i < 15; i++)
8080 if (imask & (1UL << i))
8082 mem = gen_rtx_MEM (DImode, plus_constant(hard_frame_pointer_rtx,
8084 set_mem_alias_set (mem, alpha_sr_alias_set);
8085 FRP (emit_move_insn (gen_rtx_REG (DImode, i), mem));
8089 for (i = 2; i < 10; i++)
8090 if (fmask & (1UL << i))
8092 mem = gen_rtx_MEM (DFmode, plus_constant(hard_frame_pointer_rtx,
8094 set_mem_alias_set (mem, alpha_sr_alias_set);
8095 FRP (emit_move_insn (gen_rtx_REG (DFmode, i+32), mem));
8099 /* Restore the return address from the DSIB. */
8101 mem = gen_rtx_MEM (DImode, plus_constant(hard_frame_pointer_rtx, -8));
8102 set_mem_alias_set (mem, alpha_sr_alias_set);
8103 FRP (emit_move_insn (gen_rtx_REG (DImode, REG_RA), mem));
8106 if (frame_size || eh_ofs)
8108 sp_adj1 = stack_pointer_rtx;
8112 sp_adj1 = gen_rtx_REG (DImode, 23);
8113 emit_move_insn (sp_adj1,
8114 gen_rtx_PLUS (Pmode, stack_pointer_rtx, eh_ofs));
8117 /* If the stack size is large, begin computation into a temporary
8118 register so as not to interfere with a potential fp restore,
8119 which must be consecutive with an SP restore. */
8120 if (frame_size < 32768
8121 && ! (TARGET_ABI_UNICOSMK && current_function_calls_alloca))
8122 sp_adj2 = GEN_INT (frame_size);
8123 else if (TARGET_ABI_UNICOSMK)
8125 sp_adj1 = gen_rtx_REG (DImode, 23);
8126 FRP (emit_move_insn (sp_adj1, hard_frame_pointer_rtx));
8127 sp_adj2 = const0_rtx;
8129 else if (frame_size < 0x40007fffL)
8131 int low = ((frame_size & 0xffff) ^ 0x8000) - 0x8000;
8133 sp_adj2 = plus_constant (sp_adj1, frame_size - low);
8134 if (sa_reg_exp && rtx_equal_p (sa_reg_exp, sp_adj2))
8138 sp_adj1 = gen_rtx_REG (DImode, 23);
8139 FRP (emit_move_insn (sp_adj1, sp_adj2));
8141 sp_adj2 = GEN_INT (low);
8145 rtx tmp = gen_rtx_REG (DImode, 23);
8146 FRP (sp_adj2 = alpha_emit_set_const (tmp, DImode, frame_size,
8150 /* We can't drop new things to memory this late, afaik,
8151 so build it up by pieces. */
8152 FRP (sp_adj2 = alpha_emit_set_long_const (tmp, frame_size,
8153 -(frame_size < 0)));
8154 gcc_assert (sp_adj2);
8158 /* From now on, things must be in order. So emit blockages. */
8160 /* Restore the frame pointer. */
8161 if (TARGET_ABI_UNICOSMK)
8163 emit_insn (gen_blockage ());
8164 mem = gen_rtx_MEM (DImode,
8165 plus_constant (hard_frame_pointer_rtx, -16));
8166 set_mem_alias_set (mem, alpha_sr_alias_set);
8167 FRP (emit_move_insn (hard_frame_pointer_rtx, mem));
8169 else if (fp_is_frame_pointer)
8171 emit_insn (gen_blockage ());
8172 mem = gen_rtx_MEM (DImode, plus_constant (sa_reg, fp_offset));
8173 set_mem_alias_set (mem, alpha_sr_alias_set);
8174 FRP (emit_move_insn (hard_frame_pointer_rtx, mem));
8176 else if (TARGET_ABI_OPEN_VMS)
8178 emit_insn (gen_blockage ());
8179 FRP (emit_move_insn (hard_frame_pointer_rtx,
8180 gen_rtx_REG (DImode, vms_save_fp_regno)));
8183 /* Restore the stack pointer. */
8184 emit_insn (gen_blockage ());
8185 if (sp_adj2 == const0_rtx)
8186 FRP (emit_move_insn (stack_pointer_rtx, sp_adj1));
8188 FRP (emit_move_insn (stack_pointer_rtx,
8189 gen_rtx_PLUS (DImode, sp_adj1, sp_adj2)));
8193 if (TARGET_ABI_OPEN_VMS && alpha_procedure_type == PT_REGISTER)
8195 emit_insn (gen_blockage ());
8196 FRP (emit_move_insn (hard_frame_pointer_rtx,
8197 gen_rtx_REG (DImode, vms_save_fp_regno)));
8199 else if (TARGET_ABI_UNICOSMK && alpha_procedure_type != PT_STACK)
8201 /* Decrement the frame pointer if the function does not have a
8204 emit_insn (gen_blockage ());
8205 FRP (emit_insn (gen_adddi3 (hard_frame_pointer_rtx,
8206 hard_frame_pointer_rtx, constm1_rtx)));
8211 /* Output the rest of the textual info surrounding the epilogue. */
8214 alpha_end_function (FILE *file, const char *fnname, tree decl ATTRIBUTE_UNUSED)
8218 /* We output a nop after noreturn calls at the very end of the function to
8219 ensure that the return address always remains in the caller's code range,
8220 as not doing so might confuse unwinding engines. */
8221 insn = get_last_insn ();
8223 insn = prev_active_insn (insn);
8224 if (GET_CODE (insn) == CALL_INSN)
8225 output_asm_insn (get_insn_template (CODE_FOR_nop, NULL), NULL);
8227 #if TARGET_ABI_OPEN_VMS
8228 alpha_write_linkage (file, fnname, decl);
8231 /* End the function. */
8232 if (!TARGET_ABI_UNICOSMK && !flag_inhibit_size_directive)
8234 fputs ("\t.end ", file);
8235 assemble_name (file, fnname);
8238 inside_function = FALSE;
8240 /* Output jump tables and the static subroutine information block. */
8241 if (TARGET_ABI_UNICOSMK)
8243 unicosmk_output_ssib (file, fnname);
8244 unicosmk_output_deferred_case_vectors (file);
8249 /* Emit a tail call to FUNCTION after adjusting THIS by DELTA.
8251 In order to avoid the hordes of differences between generated code
8252 with and without TARGET_EXPLICIT_RELOCS, and to avoid duplicating
8253 lots of code loading up large constants, generate rtl and emit it
8254 instead of going straight to text.
8256 Not sure why this idea hasn't been explored before... */
8259 alpha_output_mi_thunk_osf (FILE *file, tree thunk_fndecl ATTRIBUTE_UNUSED,
8260 HOST_WIDE_INT delta, HOST_WIDE_INT vcall_offset,
8263 HOST_WIDE_INT hi, lo;
8264 rtx this, insn, funexp;
8266 /* We always require a valid GP. */
8267 emit_insn (gen_prologue_ldgp ());
8268 emit_note (NOTE_INSN_PROLOGUE_END);
8270 /* Find the "this" pointer. If the function returns a structure,
8271 the structure return pointer is in $16. */
8272 if (aggregate_value_p (TREE_TYPE (TREE_TYPE (function)), function))
8273 this = gen_rtx_REG (Pmode, 17);
8275 this = gen_rtx_REG (Pmode, 16);
8277 /* Add DELTA. When possible we use ldah+lda. Otherwise load the
8278 entire constant for the add. */
8279 lo = ((delta & 0xffff) ^ 0x8000) - 0x8000;
8280 hi = (((delta - lo) & 0xffffffff) ^ 0x80000000) - 0x80000000;
8281 if (hi + lo == delta)
8284 emit_insn (gen_adddi3 (this, this, GEN_INT (hi)));
8286 emit_insn (gen_adddi3 (this, this, GEN_INT (lo)));
8290 rtx tmp = alpha_emit_set_long_const (gen_rtx_REG (Pmode, 0),
8291 delta, -(delta < 0));
8292 emit_insn (gen_adddi3 (this, this, tmp));
8295 /* Add a delta stored in the vtable at VCALL_OFFSET. */
8300 tmp = gen_rtx_REG (Pmode, 0);
8301 emit_move_insn (tmp, gen_rtx_MEM (Pmode, this));
8303 lo = ((vcall_offset & 0xffff) ^ 0x8000) - 0x8000;
8304 hi = (((vcall_offset - lo) & 0xffffffff) ^ 0x80000000) - 0x80000000;
8305 if (hi + lo == vcall_offset)
8308 emit_insn (gen_adddi3 (tmp, tmp, GEN_INT (hi)));
8312 tmp2 = alpha_emit_set_long_const (gen_rtx_REG (Pmode, 1),
8313 vcall_offset, -(vcall_offset < 0));
8314 emit_insn (gen_adddi3 (tmp, tmp, tmp2));
8318 tmp2 = gen_rtx_PLUS (Pmode, tmp, GEN_INT (lo));
8321 emit_move_insn (tmp, gen_rtx_MEM (Pmode, tmp2));
8323 emit_insn (gen_adddi3 (this, this, tmp));
8326 /* Generate a tail call to the target function. */
8327 if (! TREE_USED (function))
8329 assemble_external (function);
8330 TREE_USED (function) = 1;
8332 funexp = XEXP (DECL_RTL (function), 0);
8333 funexp = gen_rtx_MEM (FUNCTION_MODE, funexp);
8334 insn = emit_call_insn (gen_sibcall (funexp, const0_rtx));
8335 SIBLING_CALL_P (insn) = 1;
8337 /* Run just enough of rest_of_compilation to get the insns emitted.
8338 There's not really enough bulk here to make other passes such as
8339 instruction scheduling worth while. Note that use_thunk calls
8340 assemble_start_function and assemble_end_function. */
8341 insn = get_insns ();
8342 insn_locators_alloc ();
8343 shorten_branches (insn);
8344 final_start_function (insn, file, 1);
8345 final (insn, file, 1);
8346 final_end_function ();
8348 #endif /* TARGET_ABI_OSF */
8350 /* Debugging support. */
8354 /* Count the number of sdb related labels are generated (to find block
8355 start and end boundaries). */
8357 int sdb_label_count = 0;
8359 /* Name of the file containing the current function. */
8361 static const char *current_function_file = "";
8363 /* Offsets to alpha virtual arg/local debugging pointers. */
8365 long alpha_arg_offset;
8366 long alpha_auto_offset;
8368 /* Emit a new filename to a stream. */
8371 alpha_output_filename (FILE *stream, const char *name)
8373 static int first_time = TRUE;
8378 ++num_source_filenames;
8379 current_function_file = name;
8380 fprintf (stream, "\t.file\t%d ", num_source_filenames);
8381 output_quoted_string (stream, name);
8382 fprintf (stream, "\n");
8383 if (!TARGET_GAS && write_symbols == DBX_DEBUG)
8384 fprintf (stream, "\t#@stabs\n");
8387 else if (write_symbols == DBX_DEBUG)
8388 /* dbxout.c will emit an appropriate .stabs directive. */
8391 else if (name != current_function_file
8392 && strcmp (name, current_function_file) != 0)
8394 if (inside_function && ! TARGET_GAS)
8395 fprintf (stream, "\t#.file\t%d ", num_source_filenames);
8398 ++num_source_filenames;
8399 current_function_file = name;
8400 fprintf (stream, "\t.file\t%d ", num_source_filenames);
8403 output_quoted_string (stream, name);
8404 fprintf (stream, "\n");
8408 /* Structure to show the current status of registers and memory. */
8410 struct shadow_summary
8413 unsigned int i : 31; /* Mask of int regs */
8414 unsigned int fp : 31; /* Mask of fp regs */
8415 unsigned int mem : 1; /* mem == imem | fpmem */
8419 /* Summary the effects of expression X on the machine. Update SUM, a pointer
8420 to the summary structure. SET is nonzero if the insn is setting the
8421 object, otherwise zero. */
8424 summarize_insn (rtx x, struct shadow_summary *sum, int set)
8426 const char *format_ptr;
8432 switch (GET_CODE (x))
8434 /* ??? Note that this case would be incorrect if the Alpha had a
8435 ZERO_EXTRACT in SET_DEST. */
8437 summarize_insn (SET_SRC (x), sum, 0);
8438 summarize_insn (SET_DEST (x), sum, 1);
8442 summarize_insn (XEXP (x, 0), sum, 1);
8446 summarize_insn (XEXP (x, 0), sum, 0);
8450 for (i = ASM_OPERANDS_INPUT_LENGTH (x) - 1; i >= 0; i--)
8451 summarize_insn (ASM_OPERANDS_INPUT (x, i), sum, 0);
8455 for (i = XVECLEN (x, 0) - 1; i >= 0; i--)
8456 summarize_insn (XVECEXP (x, 0, i), sum, 0);
8460 summarize_insn (SUBREG_REG (x), sum, 0);
8465 int regno = REGNO (x);
8466 unsigned long mask = ((unsigned long) 1) << (regno % 32);
8468 if (regno == 31 || regno == 63)
8474 sum->defd.i |= mask;
8476 sum->defd.fp |= mask;
8481 sum->used.i |= mask;
8483 sum->used.fp |= mask;
8494 /* Find the regs used in memory address computation: */
8495 summarize_insn (XEXP (x, 0), sum, 0);
8498 case CONST_INT: case CONST_DOUBLE:
8499 case SYMBOL_REF: case LABEL_REF: case CONST:
8500 case SCRATCH: case ASM_INPUT:
8503 /* Handle common unary and binary ops for efficiency. */
8504 case COMPARE: case PLUS: case MINUS: case MULT: case DIV:
8505 case MOD: case UDIV: case UMOD: case AND: case IOR:
8506 case XOR: case ASHIFT: case ROTATE: case ASHIFTRT: case LSHIFTRT:
8507 case ROTATERT: case SMIN: case SMAX: case UMIN: case UMAX:
8508 case NE: case EQ: case GE: case GT: case LE:
8509 case LT: case GEU: case GTU: case LEU: case LTU:
8510 summarize_insn (XEXP (x, 0), sum, 0);
8511 summarize_insn (XEXP (x, 1), sum, 0);
8514 case NEG: case NOT: case SIGN_EXTEND: case ZERO_EXTEND:
8515 case TRUNCATE: case FLOAT_EXTEND: case FLOAT_TRUNCATE: case FLOAT:
8516 case FIX: case UNSIGNED_FLOAT: case UNSIGNED_FIX: case ABS:
8517 case SQRT: case FFS:
8518 summarize_insn (XEXP (x, 0), sum, 0);
8522 format_ptr = GET_RTX_FORMAT (GET_CODE (x));
8523 for (i = GET_RTX_LENGTH (GET_CODE (x)) - 1; i >= 0; i--)
8524 switch (format_ptr[i])
8527 summarize_insn (XEXP (x, i), sum, 0);
8531 for (j = XVECLEN (x, i) - 1; j >= 0; j--)
8532 summarize_insn (XVECEXP (x, i, j), sum, 0);
8544 /* Ensure a sufficient number of `trapb' insns are in the code when
8545 the user requests code with a trap precision of functions or
8548 In naive mode, when the user requests a trap-precision of
8549 "instruction", a trapb is needed after every instruction that may
8550 generate a trap. This ensures that the code is resumption safe but
8553 When optimizations are turned on, we delay issuing a trapb as long
8554 as possible. In this context, a trap shadow is the sequence of
8555 instructions that starts with a (potentially) trap generating
8556 instruction and extends to the next trapb or call_pal instruction
8557 (but GCC never generates call_pal by itself). We can delay (and
8558 therefore sometimes omit) a trapb subject to the following
8561 (a) On entry to the trap shadow, if any Alpha register or memory
8562 location contains a value that is used as an operand value by some
8563 instruction in the trap shadow (live on entry), then no instruction
8564 in the trap shadow may modify the register or memory location.
8566 (b) Within the trap shadow, the computation of the base register
8567 for a memory load or store instruction may not involve using the
8568 result of an instruction that might generate an UNPREDICTABLE
8571 (c) Within the trap shadow, no register may be used more than once
8572 as a destination register. (This is to make life easier for the
8575 (d) The trap shadow may not include any branch instructions. */
8578 alpha_handle_trap_shadows (void)
8580 struct shadow_summary shadow;
8581 int trap_pending, exception_nesting;
8585 exception_nesting = 0;
8588 shadow.used.mem = 0;
8589 shadow.defd = shadow.used;
8591 for (i = get_insns (); i ; i = NEXT_INSN (i))
8593 if (GET_CODE (i) == NOTE)
8595 switch (NOTE_KIND (i))
8597 case NOTE_INSN_EH_REGION_BEG:
8598 exception_nesting++;
8603 case NOTE_INSN_EH_REGION_END:
8604 exception_nesting--;
8609 case NOTE_INSN_EPILOGUE_BEG:
8610 if (trap_pending && alpha_tp >= ALPHA_TP_FUNC)
8615 else if (trap_pending)
8617 if (alpha_tp == ALPHA_TP_FUNC)
8619 if (GET_CODE (i) == JUMP_INSN
8620 && GET_CODE (PATTERN (i)) == RETURN)
8623 else if (alpha_tp == ALPHA_TP_INSN)
8627 struct shadow_summary sum;
8632 sum.defd = sum.used;
8634 switch (GET_CODE (i))
8637 /* Annoyingly, get_attr_trap will die on these. */
8638 if (GET_CODE (PATTERN (i)) == USE
8639 || GET_CODE (PATTERN (i)) == CLOBBER)
8642 summarize_insn (PATTERN (i), &sum, 0);
8644 if ((sum.defd.i & shadow.defd.i)
8645 || (sum.defd.fp & shadow.defd.fp))
8647 /* (c) would be violated */
8651 /* Combine shadow with summary of current insn: */
8652 shadow.used.i |= sum.used.i;
8653 shadow.used.fp |= sum.used.fp;
8654 shadow.used.mem |= sum.used.mem;
8655 shadow.defd.i |= sum.defd.i;
8656 shadow.defd.fp |= sum.defd.fp;
8657 shadow.defd.mem |= sum.defd.mem;
8659 if ((sum.defd.i & shadow.used.i)
8660 || (sum.defd.fp & shadow.used.fp)
8661 || (sum.defd.mem & shadow.used.mem))
8663 /* (a) would be violated (also takes care of (b)) */
8664 gcc_assert (get_attr_trap (i) != TRAP_YES
8665 || (!(sum.defd.i & sum.used.i)
8666 && !(sum.defd.fp & sum.used.fp)));
8684 n = emit_insn_before (gen_trapb (), i);
8685 PUT_MODE (n, TImode);
8686 PUT_MODE (i, TImode);
8690 shadow.used.mem = 0;
8691 shadow.defd = shadow.used;
8696 if ((exception_nesting > 0 || alpha_tp >= ALPHA_TP_FUNC)
8697 && GET_CODE (i) == INSN
8698 && GET_CODE (PATTERN (i)) != USE
8699 && GET_CODE (PATTERN (i)) != CLOBBER
8700 && get_attr_trap (i) == TRAP_YES)
8702 if (optimize && !trap_pending)
8703 summarize_insn (PATTERN (i), &shadow, 0);
8709 /* Alpha can only issue instruction groups simultaneously if they are
8710 suitably aligned. This is very processor-specific. */
8711 /* There are a number of entries in alphaev4_insn_pipe and alphaev5_insn_pipe
8712 that are marked "fake". These instructions do not exist on that target,
8713 but it is possible to see these insns with deranged combinations of
8714 command-line options, such as "-mtune=ev4 -mmax". Instead of aborting,
8715 choose a result at random. */
8717 enum alphaev4_pipe {
8724 enum alphaev5_pipe {
8735 static enum alphaev4_pipe
8736 alphaev4_insn_pipe (rtx insn)
8738 if (recog_memoized (insn) < 0)
8740 if (get_attr_length (insn) != 4)
8743 switch (get_attr_type (insn))
8759 case TYPE_MVI: /* fake */
8774 case TYPE_FSQRT: /* fake */
8775 case TYPE_FTOI: /* fake */
8776 case TYPE_ITOF: /* fake */
8784 static enum alphaev5_pipe
8785 alphaev5_insn_pipe (rtx insn)
8787 if (recog_memoized (insn) < 0)
8789 if (get_attr_length (insn) != 4)
8792 switch (get_attr_type (insn))
8812 case TYPE_FTOI: /* fake */
8813 case TYPE_ITOF: /* fake */
8828 case TYPE_FSQRT: /* fake */
8839 /* IN_USE is a mask of the slots currently filled within the insn group.
8840 The mask bits come from alphaev4_pipe above. If EV4_IBX is set, then
8841 the insn in EV4_IB0 can be swapped by the hardware into EV4_IB1.
8843 LEN is, of course, the length of the group in bytes. */
8846 alphaev4_next_group (rtx insn, int *pin_use, int *plen)
8853 || GET_CODE (PATTERN (insn)) == CLOBBER
8854 || GET_CODE (PATTERN (insn)) == USE)
8859 enum alphaev4_pipe pipe;
8861 pipe = alphaev4_insn_pipe (insn);
8865 /* Force complex instructions to start new groups. */
8869 /* If this is a completely unrecognized insn, it's an asm.
8870 We don't know how long it is, so record length as -1 to
8871 signal a needed realignment. */
8872 if (recog_memoized (insn) < 0)
8875 len = get_attr_length (insn);
8879 if (in_use & EV4_IB0)
8881 if (in_use & EV4_IB1)
8886 in_use |= EV4_IB0 | EV4_IBX;
8890 if (in_use & EV4_IB0)
8892 if (!(in_use & EV4_IBX) || (in_use & EV4_IB1))
8900 if (in_use & EV4_IB1)
8910 /* Haifa doesn't do well scheduling branches. */
8911 if (GET_CODE (insn) == JUMP_INSN)
8915 insn = next_nonnote_insn (insn);
8917 if (!insn || ! INSN_P (insn))
8920 /* Let Haifa tell us where it thinks insn group boundaries are. */
8921 if (GET_MODE (insn) == TImode)
8924 if (GET_CODE (insn) == CLOBBER || GET_CODE (insn) == USE)
8929 insn = next_nonnote_insn (insn);
8937 /* IN_USE is a mask of the slots currently filled within the insn group.
8938 The mask bits come from alphaev5_pipe above. If EV5_E01 is set, then
8939 the insn in EV5_E0 can be swapped by the hardware into EV5_E1.
8941 LEN is, of course, the length of the group in bytes. */
8944 alphaev5_next_group (rtx insn, int *pin_use, int *plen)
8951 || GET_CODE (PATTERN (insn)) == CLOBBER
8952 || GET_CODE (PATTERN (insn)) == USE)
8957 enum alphaev5_pipe pipe;
8959 pipe = alphaev5_insn_pipe (insn);
8963 /* Force complex instructions to start new groups. */
8967 /* If this is a completely unrecognized insn, it's an asm.
8968 We don't know how long it is, so record length as -1 to
8969 signal a needed realignment. */
8970 if (recog_memoized (insn) < 0)
8973 len = get_attr_length (insn);
8976 /* ??? Most of the places below, we would like to assert never
8977 happen, as it would indicate an error either in Haifa, or
8978 in the scheduling description. Unfortunately, Haifa never
8979 schedules the last instruction of the BB, so we don't have
8980 an accurate TI bit to go off. */
8982 if (in_use & EV5_E0)
8984 if (in_use & EV5_E1)
8989 in_use |= EV5_E0 | EV5_E01;
8993 if (in_use & EV5_E0)
8995 if (!(in_use & EV5_E01) || (in_use & EV5_E1))
9003 if (in_use & EV5_E1)
9009 if (in_use & EV5_FA)
9011 if (in_use & EV5_FM)
9016 in_use |= EV5_FA | EV5_FAM;
9020 if (in_use & EV5_FA)
9026 if (in_use & EV5_FM)
9039 /* Haifa doesn't do well scheduling branches. */
9040 /* ??? If this is predicted not-taken, slotting continues, except
9041 that no more IBR, FBR, or JSR insns may be slotted. */
9042 if (GET_CODE (insn) == JUMP_INSN)
9046 insn = next_nonnote_insn (insn);
9048 if (!insn || ! INSN_P (insn))
9051 /* Let Haifa tell us where it thinks insn group boundaries are. */
9052 if (GET_MODE (insn) == TImode)
9055 if (GET_CODE (insn) == CLOBBER || GET_CODE (insn) == USE)
9060 insn = next_nonnote_insn (insn);
9069 alphaev4_next_nop (int *pin_use)
9071 int in_use = *pin_use;
9074 if (!(in_use & EV4_IB0))
9079 else if ((in_use & (EV4_IBX|EV4_IB1)) == EV4_IBX)
9084 else if (TARGET_FP && !(in_use & EV4_IB1))
9097 alphaev5_next_nop (int *pin_use)
9099 int in_use = *pin_use;
9102 if (!(in_use & EV5_E1))
9107 else if (TARGET_FP && !(in_use & EV5_FA))
9112 else if (TARGET_FP && !(in_use & EV5_FM))
9124 /* The instruction group alignment main loop. */
9127 alpha_align_insns (unsigned int max_align,
9128 rtx (*next_group) (rtx, int *, int *),
9129 rtx (*next_nop) (int *))
9131 /* ALIGN is the known alignment for the insn group. */
9133 /* OFS is the offset of the current insn in the insn group. */
9135 int prev_in_use, in_use, len, ldgp;
9138 /* Let shorten branches care for assigning alignments to code labels. */
9139 shorten_branches (get_insns ());
9141 if (align_functions < 4)
9143 else if ((unsigned int) align_functions < max_align)
9144 align = align_functions;
9148 ofs = prev_in_use = 0;
9150 if (GET_CODE (i) == NOTE)
9151 i = next_nonnote_insn (i);
9153 ldgp = alpha_function_needs_gp ? 8 : 0;
9157 next = (*next_group) (i, &in_use, &len);
9159 /* When we see a label, resync alignment etc. */
9160 if (GET_CODE (i) == CODE_LABEL)
9162 unsigned int new_align = 1 << label_to_alignment (i);
9164 if (new_align >= align)
9166 align = new_align < max_align ? new_align : max_align;
9170 else if (ofs & (new_align-1))
9171 ofs = (ofs | (new_align-1)) + 1;
9175 /* Handle complex instructions special. */
9176 else if (in_use == 0)
9178 /* Asms will have length < 0. This is a signal that we have
9179 lost alignment knowledge. Assume, however, that the asm
9180 will not mis-align instructions. */
9189 /* If the known alignment is smaller than the recognized insn group,
9190 realign the output. */
9191 else if ((int) align < len)
9193 unsigned int new_log_align = len > 8 ? 4 : 3;
9196 where = prev = prev_nonnote_insn (i);
9197 if (!where || GET_CODE (where) != CODE_LABEL)
9200 /* Can't realign between a call and its gp reload. */
9201 if (! (TARGET_EXPLICIT_RELOCS
9202 && prev && GET_CODE (prev) == CALL_INSN))
9204 emit_insn_before (gen_realign (GEN_INT (new_log_align)), where);
9205 align = 1 << new_log_align;
9210 /* We may not insert padding inside the initial ldgp sequence. */
9214 /* If the group won't fit in the same INT16 as the previous,
9215 we need to add padding to keep the group together. Rather
9216 than simply leaving the insn filling to the assembler, we
9217 can make use of the knowledge of what sorts of instructions
9218 were issued in the previous group to make sure that all of
9219 the added nops are really free. */
9220 else if (ofs + len > (int) align)
9222 int nop_count = (align - ofs) / 4;
9225 /* Insert nops before labels, branches, and calls to truly merge
9226 the execution of the nops with the previous instruction group. */
9227 where = prev_nonnote_insn (i);
9230 if (GET_CODE (where) == CODE_LABEL)
9232 rtx where2 = prev_nonnote_insn (where);
9233 if (where2 && GET_CODE (where2) == JUMP_INSN)
9236 else if (GET_CODE (where) == INSN)
9243 emit_insn_before ((*next_nop)(&prev_in_use), where);
9244 while (--nop_count);
9248 ofs = (ofs + len) & (align - 1);
9249 prev_in_use = in_use;
9254 /* Machine dependent reorg pass. */
9259 if (alpha_tp != ALPHA_TP_PROG || flag_exceptions)
9260 alpha_handle_trap_shadows ();
9262 /* Due to the number of extra trapb insns, don't bother fixing up
9263 alignment when trap precision is instruction. Moreover, we can
9264 only do our job when sched2 is run. */
9265 if (optimize && !optimize_size
9266 && alpha_tp != ALPHA_TP_INSN
9267 && flag_schedule_insns_after_reload)
9269 if (alpha_tune == PROCESSOR_EV4)
9270 alpha_align_insns (8, alphaev4_next_group, alphaev4_next_nop);
9271 else if (alpha_tune == PROCESSOR_EV5)
9272 alpha_align_insns (16, alphaev5_next_group, alphaev5_next_nop);
9276 #if !TARGET_ABI_UNICOSMK
9283 alpha_file_start (void)
9285 #ifdef OBJECT_FORMAT_ELF
9286 /* If emitting dwarf2 debug information, we cannot generate a .file
9287 directive to start the file, as it will conflict with dwarf2out
9288 file numbers. So it's only useful when emitting mdebug output. */
9289 targetm.file_start_file_directive = (write_symbols == DBX_DEBUG);
9292 default_file_start ();
9294 fprintf (asm_out_file, "\t.verstamp %d %d\n", MS_STAMP, LS_STAMP);
9297 fputs ("\t.set noreorder\n", asm_out_file);
9298 fputs ("\t.set volatile\n", asm_out_file);
9299 if (!TARGET_ABI_OPEN_VMS)
9300 fputs ("\t.set noat\n", asm_out_file);
9301 if (TARGET_EXPLICIT_RELOCS)
9302 fputs ("\t.set nomacro\n", asm_out_file);
9303 if (TARGET_SUPPORT_ARCH | TARGET_BWX | TARGET_MAX | TARGET_FIX | TARGET_CIX)
9307 if (alpha_cpu == PROCESSOR_EV6 || TARGET_FIX || TARGET_CIX)
9309 else if (TARGET_MAX)
9311 else if (TARGET_BWX)
9313 else if (alpha_cpu == PROCESSOR_EV5)
9318 fprintf (asm_out_file, "\t.arch %s\n", arch);
9323 #ifdef OBJECT_FORMAT_ELF
9324 /* Since we don't have a .dynbss section, we should not allow global
9325 relocations in the .rodata section. */
9328 alpha_elf_reloc_rw_mask (void)
9330 return flag_pic ? 3 : 2;
9333 /* Return a section for X. The only special thing we do here is to
9334 honor small data. */
9337 alpha_elf_select_rtx_section (enum machine_mode mode, rtx x,
9338 unsigned HOST_WIDE_INT align)
9340 if (TARGET_SMALL_DATA && GET_MODE_SIZE (mode) <= g_switch_value)
9341 /* ??? Consider using mergeable sdata sections. */
9342 return sdata_section;
9344 return default_elf_select_rtx_section (mode, x, align);
9348 alpha_elf_section_type_flags (tree decl, const char *name, int reloc)
9350 unsigned int flags = 0;
9352 if (strcmp (name, ".sdata") == 0
9353 || strncmp (name, ".sdata.", 7) == 0
9354 || strncmp (name, ".gnu.linkonce.s.", 16) == 0
9355 || strcmp (name, ".sbss") == 0
9356 || strncmp (name, ".sbss.", 6) == 0
9357 || strncmp (name, ".gnu.linkonce.sb.", 17) == 0)
9358 flags = SECTION_SMALL;
9360 flags |= default_section_type_flags (decl, name, reloc);
9363 #endif /* OBJECT_FORMAT_ELF */
9365 /* Structure to collect function names for final output in link section. */
9366 /* Note that items marked with GTY can't be ifdef'ed out. */
9368 enum links_kind {KIND_UNUSED, KIND_LOCAL, KIND_EXTERN};
9369 enum reloc_kind {KIND_LINKAGE, KIND_CODEADDR};
9371 struct alpha_links GTY(())
9375 enum links_kind lkind;
9376 enum reloc_kind rkind;
9379 struct alpha_funcs GTY(())
9382 splay_tree GTY ((param1_is (char *), param2_is (struct alpha_links *)))
9386 static GTY ((param1_is (char *), param2_is (struct alpha_links *)))
9387 splay_tree alpha_links_tree;
9388 static GTY ((param1_is (tree), param2_is (struct alpha_funcs *)))
9389 splay_tree alpha_funcs_tree;
9391 static GTY(()) int alpha_funcs_num;
9393 #if TARGET_ABI_OPEN_VMS
9395 /* Return the VMS argument type corresponding to MODE. */
9398 alpha_arg_type (enum machine_mode mode)
9403 return TARGET_FLOAT_VAX ? FF : FS;
9405 return TARGET_FLOAT_VAX ? FD : FT;
9411 /* Return an rtx for an integer representing the VMS Argument Information
9415 alpha_arg_info_reg_val (CUMULATIVE_ARGS cum)
9417 unsigned HOST_WIDE_INT regval = cum.num_args;
9420 for (i = 0; i < 6; i++)
9421 regval |= ((int) cum.atypes[i]) << (i * 3 + 8);
9423 return GEN_INT (regval);
9426 /* Make (or fake) .linkage entry for function call.
9428 IS_LOCAL is 0 if name is used in call, 1 if name is used in definition.
9430 Return an SYMBOL_REF rtx for the linkage. */
9433 alpha_need_linkage (const char *name, int is_local)
9435 splay_tree_node node;
9436 struct alpha_links *al;
9443 struct alpha_funcs *cfaf;
9445 if (!alpha_funcs_tree)
9446 alpha_funcs_tree = splay_tree_new_ggc ((splay_tree_compare_fn)
9447 splay_tree_compare_pointers);
9449 cfaf = (struct alpha_funcs *) ggc_alloc (sizeof (struct alpha_funcs));
9452 cfaf->num = ++alpha_funcs_num;
9454 splay_tree_insert (alpha_funcs_tree,
9455 (splay_tree_key) current_function_decl,
9456 (splay_tree_value) cfaf);
9459 if (alpha_links_tree)
9461 /* Is this name already defined? */
9463 node = splay_tree_lookup (alpha_links_tree, (splay_tree_key) name);
9466 al = (struct alpha_links *) node->value;
9469 /* Defined here but external assumed. */
9470 if (al->lkind == KIND_EXTERN)
9471 al->lkind = KIND_LOCAL;
9475 /* Used here but unused assumed. */
9476 if (al->lkind == KIND_UNUSED)
9477 al->lkind = KIND_LOCAL;
9483 alpha_links_tree = splay_tree_new_ggc ((splay_tree_compare_fn) strcmp);
9485 al = (struct alpha_links *) ggc_alloc (sizeof (struct alpha_links));
9486 name = ggc_strdup (name);
9488 /* Assume external if no definition. */
9489 al->lkind = (is_local ? KIND_UNUSED : KIND_EXTERN);
9491 /* Ensure we have an IDENTIFIER so assemble_name can mark it used. */
9492 get_identifier (name);
9494 /* Construct a SYMBOL_REF for us to call. */
9496 size_t name_len = strlen (name);
9497 char *linksym = alloca (name_len + 6);
9499 memcpy (linksym + 1, name, name_len);
9500 memcpy (linksym + 1 + name_len, "..lk", 5);
9501 al->linkage = gen_rtx_SYMBOL_REF (Pmode,
9502 ggc_alloc_string (linksym, name_len + 5));
9505 splay_tree_insert (alpha_links_tree, (splay_tree_key) name,
9506 (splay_tree_value) al);
9512 alpha_use_linkage (rtx linkage, tree cfundecl, int lflag, int rflag)
9514 splay_tree_node cfunnode;
9515 struct alpha_funcs *cfaf;
9516 struct alpha_links *al;
9517 const char *name = XSTR (linkage, 0);
9519 cfaf = (struct alpha_funcs *) 0;
9520 al = (struct alpha_links *) 0;
9522 cfunnode = splay_tree_lookup (alpha_funcs_tree, (splay_tree_key) cfundecl);
9523 cfaf = (struct alpha_funcs *) cfunnode->value;
9527 splay_tree_node lnode;
9529 /* Is this name already defined? */
9531 lnode = splay_tree_lookup (cfaf->links, (splay_tree_key) name);
9533 al = (struct alpha_links *) lnode->value;
9536 cfaf->links = splay_tree_new_ggc ((splay_tree_compare_fn) strcmp);
9544 splay_tree_node node = 0;
9545 struct alpha_links *anl;
9550 name_len = strlen (name);
9552 al = (struct alpha_links *) ggc_alloc (sizeof (struct alpha_links));
9553 al->num = cfaf->num;
9555 node = splay_tree_lookup (alpha_links_tree, (splay_tree_key) name);
9558 anl = (struct alpha_links *) node->value;
9559 al->lkind = anl->lkind;
9562 sprintf (buf, "$%d..%s..lk", cfaf->num, name);
9563 buflen = strlen (buf);
9564 linksym = alloca (buflen + 1);
9565 memcpy (linksym, buf, buflen + 1);
9567 al->linkage = gen_rtx_SYMBOL_REF
9568 (Pmode, ggc_alloc_string (linksym, buflen + 1));
9570 splay_tree_insert (cfaf->links, (splay_tree_key) name,
9571 (splay_tree_value) al);
9575 al->rkind = KIND_CODEADDR;
9577 al->rkind = KIND_LINKAGE;
9580 return gen_rtx_MEM (Pmode, plus_constant (al->linkage, 8));
9586 alpha_write_one_linkage (splay_tree_node node, void *data)
9588 const char *const name = (const char *) node->key;
9589 struct alpha_links *link = (struct alpha_links *) node->value;
9590 FILE *stream = (FILE *) data;
9592 fprintf (stream, "$%d..%s..lk:\n", link->num, name);
9593 if (link->rkind == KIND_CODEADDR)
9595 if (link->lkind == KIND_LOCAL)
9597 /* Local and used */
9598 fprintf (stream, "\t.quad %s..en\n", name);
9602 /* External and used, request code address. */
9603 fprintf (stream, "\t.code_address %s\n", name);
9608 if (link->lkind == KIND_LOCAL)
9610 /* Local and used, build linkage pair. */
9611 fprintf (stream, "\t.quad %s..en\n", name);
9612 fprintf (stream, "\t.quad %s\n", name);
9616 /* External and used, request linkage pair. */
9617 fprintf (stream, "\t.linkage %s\n", name);
9625 alpha_write_linkage (FILE *stream, const char *funname, tree fundecl)
9627 splay_tree_node node;
9628 struct alpha_funcs *func;
9630 fprintf (stream, "\t.link\n");
9631 fprintf (stream, "\t.align 3\n");
9634 node = splay_tree_lookup (alpha_funcs_tree, (splay_tree_key) fundecl);
9635 func = (struct alpha_funcs *) node->value;
9637 fputs ("\t.name ", stream);
9638 assemble_name (stream, funname);
9639 fputs ("..na\n", stream);
9640 ASM_OUTPUT_LABEL (stream, funname);
9641 fprintf (stream, "\t.pdesc ");
9642 assemble_name (stream, funname);
9643 fprintf (stream, "..en,%s\n",
9644 alpha_procedure_type == PT_STACK ? "stack"
9645 : alpha_procedure_type == PT_REGISTER ? "reg" : "null");
9649 splay_tree_foreach (func->links, alpha_write_one_linkage, stream);
9650 /* splay_tree_delete (func->links); */
9654 /* Given a decl, a section name, and whether the decl initializer
9655 has relocs, choose attributes for the section. */
9657 #define SECTION_VMS_OVERLAY SECTION_FORGET
9658 #define SECTION_VMS_GLOBAL SECTION_MACH_DEP
9659 #define SECTION_VMS_INITIALIZE (SECTION_VMS_GLOBAL << 1)
9662 vms_section_type_flags (tree decl, const char *name, int reloc)
9664 unsigned int flags = default_section_type_flags (decl, name, reloc);
9666 if (decl && DECL_ATTRIBUTES (decl)
9667 && lookup_attribute ("overlaid", DECL_ATTRIBUTES (decl)))
9668 flags |= SECTION_VMS_OVERLAY;
9669 if (decl && DECL_ATTRIBUTES (decl)
9670 && lookup_attribute ("global", DECL_ATTRIBUTES (decl)))
9671 flags |= SECTION_VMS_GLOBAL;
9672 if (decl && DECL_ATTRIBUTES (decl)
9673 && lookup_attribute ("initialize", DECL_ATTRIBUTES (decl)))
9674 flags |= SECTION_VMS_INITIALIZE;
9679 /* Switch to an arbitrary section NAME with attributes as specified
9680 by FLAGS. ALIGN specifies any known alignment requirements for
9681 the section; 0 if the default should be used. */
9684 vms_asm_named_section (const char *name, unsigned int flags,
9685 tree decl ATTRIBUTE_UNUSED)
9687 fputc ('\n', asm_out_file);
9688 fprintf (asm_out_file, ".section\t%s", name);
9690 if (flags & SECTION_VMS_OVERLAY)
9691 fprintf (asm_out_file, ",OVR");
9692 if (flags & SECTION_VMS_GLOBAL)
9693 fprintf (asm_out_file, ",GBL");
9694 if (flags & SECTION_VMS_INITIALIZE)
9695 fprintf (asm_out_file, ",NOMOD");
9696 if (flags & SECTION_DEBUG)
9697 fprintf (asm_out_file, ",NOWRT");
9699 fputc ('\n', asm_out_file);
9702 /* Record an element in the table of global constructors. SYMBOL is
9703 a SYMBOL_REF of the function to be called; PRIORITY is a number
9704 between 0 and MAX_INIT_PRIORITY.
9706 Differs from default_ctors_section_asm_out_constructor in that the
9707 width of the .ctors entry is always 64 bits, rather than the 32 bits
9708 used by a normal pointer. */
9711 vms_asm_out_constructor (rtx symbol, int priority ATTRIBUTE_UNUSED)
9713 switch_to_section (ctors_section);
9714 assemble_align (BITS_PER_WORD);
9715 assemble_integer (symbol, UNITS_PER_WORD, BITS_PER_WORD, 1);
9719 vms_asm_out_destructor (rtx symbol, int priority ATTRIBUTE_UNUSED)
9721 switch_to_section (dtors_section);
9722 assemble_align (BITS_PER_WORD);
9723 assemble_integer (symbol, UNITS_PER_WORD, BITS_PER_WORD, 1);
9728 alpha_need_linkage (const char *name ATTRIBUTE_UNUSED,
9729 int is_local ATTRIBUTE_UNUSED)
9735 alpha_use_linkage (rtx linkage ATTRIBUTE_UNUSED,
9736 tree cfundecl ATTRIBUTE_UNUSED,
9737 int lflag ATTRIBUTE_UNUSED,
9738 int rflag ATTRIBUTE_UNUSED)
9743 #endif /* TARGET_ABI_OPEN_VMS */
9745 #if TARGET_ABI_UNICOSMK
9747 /* This evaluates to true if we do not know how to pass TYPE solely in
9748 registers. This is the case for all arguments that do not fit in two
9752 unicosmk_must_pass_in_stack (enum machine_mode mode, tree type)
9757 if (TREE_CODE (TYPE_SIZE (type)) != INTEGER_CST)
9759 if (TREE_ADDRESSABLE (type))
9762 return ALPHA_ARG_SIZE (mode, type, 0) > 2;
9765 /* Define the offset between two registers, one to be eliminated, and the
9766 other its replacement, at the start of a routine. */
9769 unicosmk_initial_elimination_offset (int from, int to)
9773 fixed_size = alpha_sa_size();
9774 if (fixed_size != 0)
9777 if (from == FRAME_POINTER_REGNUM && to == HARD_FRAME_POINTER_REGNUM)
9779 else if (from == ARG_POINTER_REGNUM && to == HARD_FRAME_POINTER_REGNUM)
9781 else if (from == FRAME_POINTER_REGNUM && to == STACK_POINTER_REGNUM)
9782 return (ALPHA_ROUND (current_function_outgoing_args_size)
9783 + ALPHA_ROUND (get_frame_size()));
9784 else if (from == ARG_POINTER_REGNUM && to == STACK_POINTER_REGNUM)
9785 return (ALPHA_ROUND (fixed_size)
9786 + ALPHA_ROUND (get_frame_size()
9787 + current_function_outgoing_args_size));
9792 /* Output the module name for .ident and .end directives. We have to strip
9793 directories and add make sure that the module name starts with a letter
9797 unicosmk_output_module_name (FILE *file)
9799 const char *name = lbasename (main_input_filename);
9800 unsigned len = strlen (name);
9801 char *clean_name = alloca (len + 2);
9802 char *ptr = clean_name;
9804 /* CAM only accepts module names that start with a letter or '$'. We
9805 prefix the module name with a '$' if necessary. */
9807 if (!ISALPHA (*name))
9809 memcpy (ptr, name, len + 1);
9810 clean_symbol_name (clean_name);
9811 fputs (clean_name, file);
9814 /* Output the definition of a common variable. */
9817 unicosmk_output_common (FILE *file, const char *name, int size, int align)
9820 printf ("T3E__: common %s\n", name);
9823 fputs("\t.endp\n\n\t.psect ", file);
9824 assemble_name(file, name);
9825 fprintf(file, ",%d,common\n", floor_log2 (align / BITS_PER_UNIT));
9826 fprintf(file, "\t.byte\t0:%d\n", size);
9828 /* Mark the symbol as defined in this module. */
9829 name_tree = get_identifier (name);
9830 TREE_ASM_WRITTEN (name_tree) = 1;
9833 #define SECTION_PUBLIC SECTION_MACH_DEP
9834 #define SECTION_MAIN (SECTION_PUBLIC << 1)
9835 static int current_section_align;
9837 /* A get_unnamed_section callback for switching to the text section. */
9840 unicosmk_output_text_section_asm_op (const void *data ATTRIBUTE_UNUSED)
9842 static int count = 0;
9843 fprintf (asm_out_file, "\t.endp\n\n\t.psect\tgcc@text___%d,code\n", count++);
9846 /* A get_unnamed_section callback for switching to the data section. */
9849 unicosmk_output_data_section_asm_op (const void *data ATTRIBUTE_UNUSED)
9851 static int count = 1;
9852 fprintf (asm_out_file, "\t.endp\n\n\t.psect\tgcc@data___%d,data\n", count++);
9855 /* Implement TARGET_ASM_INIT_SECTIONS.
9857 The Cray assembler is really weird with respect to sections. It has only
9858 named sections and you can't reopen a section once it has been closed.
9859 This means that we have to generate unique names whenever we want to
9860 reenter the text or the data section. */
9863 unicosmk_init_sections (void)
9865 text_section = get_unnamed_section (SECTION_CODE,
9866 unicosmk_output_text_section_asm_op,
9868 data_section = get_unnamed_section (SECTION_WRITE,
9869 unicosmk_output_data_section_asm_op,
9871 readonly_data_section = data_section;
9875 unicosmk_section_type_flags (tree decl, const char *name,
9876 int reloc ATTRIBUTE_UNUSED)
9878 unsigned int flags = default_section_type_flags (decl, name, reloc);
9883 if (TREE_CODE (decl) == FUNCTION_DECL)
9885 current_section_align = floor_log2 (FUNCTION_BOUNDARY / BITS_PER_UNIT);
9886 if (align_functions_log > current_section_align)
9887 current_section_align = align_functions_log;
9889 if (! strcmp (IDENTIFIER_POINTER (DECL_ASSEMBLER_NAME (decl)), "main"))
9890 flags |= SECTION_MAIN;
9893 current_section_align = floor_log2 (DECL_ALIGN (decl) / BITS_PER_UNIT);
9895 if (TREE_PUBLIC (decl))
9896 flags |= SECTION_PUBLIC;
9901 /* Generate a section name for decl and associate it with the
9905 unicosmk_unique_section (tree decl, int reloc ATTRIBUTE_UNUSED)
9912 name = IDENTIFIER_POINTER (DECL_ASSEMBLER_NAME (decl));
9913 name = default_strip_name_encoding (name);
9914 len = strlen (name);
9916 if (TREE_CODE (decl) == FUNCTION_DECL)
9920 /* It is essential that we prefix the section name here because
9921 otherwise the section names generated for constructors and
9922 destructors confuse collect2. */
9924 string = alloca (len + 6);
9925 sprintf (string, "code@%s", name);
9926 DECL_SECTION_NAME (decl) = build_string (len + 5, string);
9928 else if (TREE_PUBLIC (decl))
9929 DECL_SECTION_NAME (decl) = build_string (len, name);
9934 string = alloca (len + 6);
9935 sprintf (string, "data@%s", name);
9936 DECL_SECTION_NAME (decl) = build_string (len + 5, string);
9940 /* Switch to an arbitrary section NAME with attributes as specified
9941 by FLAGS. ALIGN specifies any known alignment requirements for
9942 the section; 0 if the default should be used. */
9945 unicosmk_asm_named_section (const char *name, unsigned int flags,
9946 tree decl ATTRIBUTE_UNUSED)
9950 /* Close the previous section. */
9952 fputs ("\t.endp\n\n", asm_out_file);
9954 /* Find out what kind of section we are opening. */
9956 if (flags & SECTION_MAIN)
9957 fputs ("\t.start\tmain\n", asm_out_file);
9959 if (flags & SECTION_CODE)
9961 else if (flags & SECTION_PUBLIC)
9966 if (current_section_align != 0)
9967 fprintf (asm_out_file, "\t.psect\t%s,%d,%s\n", name,
9968 current_section_align, kind);
9970 fprintf (asm_out_file, "\t.psect\t%s,%s\n", name, kind);
9974 unicosmk_insert_attributes (tree decl, tree *attr_ptr ATTRIBUTE_UNUSED)
9977 && (TREE_PUBLIC (decl) || TREE_CODE (decl) == FUNCTION_DECL))
9978 unicosmk_unique_section (decl, 0);
9981 /* Output an alignment directive. We have to use the macro 'gcc@code@align'
9982 in code sections because .align fill unused space with zeroes. */
9985 unicosmk_output_align (FILE *file, int align)
9987 if (inside_function)
9988 fprintf (file, "\tgcc@code@align\t%d\n", align);
9990 fprintf (file, "\t.align\t%d\n", align);
9993 /* Add a case vector to the current function's list of deferred case
9994 vectors. Case vectors have to be put into a separate section because CAM
9995 does not allow data definitions in code sections. */
9998 unicosmk_defer_case_vector (rtx lab, rtx vec)
10000 struct machine_function *machine = cfun->machine;
10002 vec = gen_rtx_EXPR_LIST (VOIDmode, lab, vec);
10003 machine->addr_list = gen_rtx_EXPR_LIST (VOIDmode, vec,
10004 machine->addr_list);
10007 /* Output a case vector. */
10010 unicosmk_output_addr_vec (FILE *file, rtx vec)
10012 rtx lab = XEXP (vec, 0);
10013 rtx body = XEXP (vec, 1);
10014 int vlen = XVECLEN (body, 0);
10017 (*targetm.asm_out.internal_label) (file, "L", CODE_LABEL_NUMBER (lab));
10019 for (idx = 0; idx < vlen; idx++)
10021 ASM_OUTPUT_ADDR_VEC_ELT
10022 (file, CODE_LABEL_NUMBER (XEXP (XVECEXP (body, 0, idx), 0)));
10026 /* Output current function's deferred case vectors. */
10029 unicosmk_output_deferred_case_vectors (FILE *file)
10031 struct machine_function *machine = cfun->machine;
10034 if (machine->addr_list == NULL_RTX)
10037 switch_to_section (data_section);
10038 for (t = machine->addr_list; t; t = XEXP (t, 1))
10039 unicosmk_output_addr_vec (file, XEXP (t, 0));
10042 /* Generate the name of the SSIB section for the current function. */
10044 #define SSIB_PREFIX "__SSIB_"
10045 #define SSIB_PREFIX_LEN 7
10047 static const char *
10048 unicosmk_ssib_name (void)
10050 /* This is ok since CAM won't be able to deal with names longer than that
10053 static char name[256];
10056 const char *fnname;
10059 x = DECL_RTL (cfun->decl);
10060 gcc_assert (GET_CODE (x) == MEM);
10062 gcc_assert (GET_CODE (x) == SYMBOL_REF);
10063 fnname = XSTR (x, 0);
10065 len = strlen (fnname);
10066 if (len + SSIB_PREFIX_LEN > 255)
10067 len = 255 - SSIB_PREFIX_LEN;
10069 strcpy (name, SSIB_PREFIX);
10070 strncpy (name + SSIB_PREFIX_LEN, fnname, len);
10071 name[len + SSIB_PREFIX_LEN] = 0;
10076 /* Set up the dynamic subprogram information block (DSIB) and update the
10077 frame pointer register ($15) for subroutines which have a frame. If the
10078 subroutine doesn't have a frame, simply increment $15. */
10081 unicosmk_gen_dsib (unsigned long *imaskP)
10083 if (alpha_procedure_type == PT_STACK)
10085 const char *ssib_name;
10088 /* Allocate 64 bytes for the DSIB. */
10090 FRP (emit_insn (gen_adddi3 (stack_pointer_rtx, stack_pointer_rtx,
10092 emit_insn (gen_blockage ());
10094 /* Save the return address. */
10096 mem = gen_rtx_MEM (DImode, plus_constant (stack_pointer_rtx, 56));
10097 set_mem_alias_set (mem, alpha_sr_alias_set);
10098 FRP (emit_move_insn (mem, gen_rtx_REG (DImode, REG_RA)));
10099 (*imaskP) &= ~(1UL << REG_RA);
10101 /* Save the old frame pointer. */
10103 mem = gen_rtx_MEM (DImode, plus_constant (stack_pointer_rtx, 48));
10104 set_mem_alias_set (mem, alpha_sr_alias_set);
10105 FRP (emit_move_insn (mem, hard_frame_pointer_rtx));
10106 (*imaskP) &= ~(1UL << HARD_FRAME_POINTER_REGNUM);
10108 emit_insn (gen_blockage ());
10110 /* Store the SSIB pointer. */
10112 ssib_name = ggc_strdup (unicosmk_ssib_name ());
10113 mem = gen_rtx_MEM (DImode, plus_constant (stack_pointer_rtx, 32));
10114 set_mem_alias_set (mem, alpha_sr_alias_set);
10116 FRP (emit_move_insn (gen_rtx_REG (DImode, 5),
10117 gen_rtx_SYMBOL_REF (Pmode, ssib_name)));
10118 FRP (emit_move_insn (mem, gen_rtx_REG (DImode, 5)));
10120 /* Save the CIW index. */
10122 mem = gen_rtx_MEM (DImode, plus_constant (stack_pointer_rtx, 24));
10123 set_mem_alias_set (mem, alpha_sr_alias_set);
10124 FRP (emit_move_insn (mem, gen_rtx_REG (DImode, 25)));
10126 emit_insn (gen_blockage ());
10128 /* Set the new frame pointer. */
10130 FRP (emit_insn (gen_adddi3 (hard_frame_pointer_rtx,
10131 stack_pointer_rtx, GEN_INT (64))));
10136 /* Increment the frame pointer register to indicate that we do not
10139 FRP (emit_insn (gen_adddi3 (hard_frame_pointer_rtx,
10140 hard_frame_pointer_rtx, const1_rtx)));
10144 /* Output the static subroutine information block for the current
10148 unicosmk_output_ssib (FILE *file, const char *fnname)
10154 struct machine_function *machine = cfun->machine;
10157 fprintf (file, "\t.endp\n\n\t.psect\t%s%s,data\n", user_label_prefix,
10158 unicosmk_ssib_name ());
10160 /* Some required stuff and the function name length. */
10162 len = strlen (fnname);
10163 fprintf (file, "\t.quad\t^X20008%2.2X28\n", len);
10166 ??? We don't do that yet. */
10168 fputs ("\t.quad\t0\n", file);
10170 /* Function address. */
10172 fputs ("\t.quad\t", file);
10173 assemble_name (file, fnname);
10176 fputs ("\t.quad\t0\n", file);
10177 fputs ("\t.quad\t0\n", file);
10180 ??? We do it the same way Cray CC does it but this could be
10183 for( i = 0; i < len; i++ )
10184 fprintf (file, "\t.byte\t%d\n", (int)(fnname[i]));
10185 if( (len % 8) == 0 )
10186 fputs ("\t.quad\t0\n", file);
10188 fprintf (file, "\t.bits\t%d : 0\n", (8 - (len % 8))*8);
10190 /* All call information words used in the function. */
10192 for (x = machine->first_ciw; x; x = XEXP (x, 1))
10195 #if HOST_BITS_PER_WIDE_INT == 32
10196 fprintf (file, "\t.quad\t" HOST_WIDE_INT_PRINT_DOUBLE_HEX "\n",
10197 CONST_DOUBLE_HIGH (ciw), CONST_DOUBLE_LOW (ciw));
10199 fprintf (file, "\t.quad\t" HOST_WIDE_INT_PRINT_HEX "\n", INTVAL (ciw));
10204 /* Add a call information word (CIW) to the list of the current function's
10205 CIWs and return its index.
10207 X is a CONST_INT or CONST_DOUBLE representing the CIW. */
10210 unicosmk_add_call_info_word (rtx x)
10213 struct machine_function *machine = cfun->machine;
10215 node = gen_rtx_EXPR_LIST (VOIDmode, x, NULL_RTX);
10216 if (machine->first_ciw == NULL_RTX)
10217 machine->first_ciw = node;
10219 XEXP (machine->last_ciw, 1) = node;
10221 machine->last_ciw = node;
10222 ++machine->ciw_count;
10224 return GEN_INT (machine->ciw_count
10225 + strlen (current_function_name ())/8 + 5);
10228 /* The Cray assembler doesn't accept extern declarations for symbols which
10229 are defined in the same file. We have to keep track of all global
10230 symbols which are referenced and/or defined in a source file and output
10231 extern declarations for those which are referenced but not defined at
10232 the end of file. */
10234 /* List of identifiers for which an extern declaration might have to be
10236 /* FIXME: needs to use GC, so it can be saved and restored for PCH. */
10238 struct unicosmk_extern_list
10240 struct unicosmk_extern_list *next;
10244 static struct unicosmk_extern_list *unicosmk_extern_head = 0;
10246 /* Output extern declarations which are required for every asm file. */
10249 unicosmk_output_default_externs (FILE *file)
10251 static const char *const externs[] =
10252 { "__T3E_MISMATCH" };
10257 n = ARRAY_SIZE (externs);
10259 for (i = 0; i < n; i++)
10260 fprintf (file, "\t.extern\t%s\n", externs[i]);
10263 /* Output extern declarations for global symbols which are have been
10264 referenced but not defined. */
10267 unicosmk_output_externs (FILE *file)
10269 struct unicosmk_extern_list *p;
10270 const char *real_name;
10274 len = strlen (user_label_prefix);
10275 for (p = unicosmk_extern_head; p != 0; p = p->next)
10277 /* We have to strip the encoding and possibly remove user_label_prefix
10278 from the identifier in order to handle -fleading-underscore and
10279 explicit asm names correctly (cf. gcc.dg/asm-names-1.c). */
10280 real_name = default_strip_name_encoding (p->name);
10281 if (len && p->name[0] == '*'
10282 && !memcmp (real_name, user_label_prefix, len))
10285 name_tree = get_identifier (real_name);
10286 if (! TREE_ASM_WRITTEN (name_tree))
10288 TREE_ASM_WRITTEN (name_tree) = 1;
10289 fputs ("\t.extern\t", file);
10290 assemble_name (file, p->name);
10296 /* Record an extern. */
10299 unicosmk_add_extern (const char *name)
10301 struct unicosmk_extern_list *p;
10303 p = (struct unicosmk_extern_list *)
10304 xmalloc (sizeof (struct unicosmk_extern_list));
10305 p->next = unicosmk_extern_head;
10307 unicosmk_extern_head = p;
10310 /* The Cray assembler generates incorrect code if identifiers which
10311 conflict with register names are used as instruction operands. We have
10312 to replace such identifiers with DEX expressions. */
10314 /* Structure to collect identifiers which have been replaced by DEX
10316 /* FIXME: needs to use GC, so it can be saved and restored for PCH. */
10318 struct unicosmk_dex {
10319 struct unicosmk_dex *next;
10323 /* List of identifiers which have been replaced by DEX expressions. The DEX
10324 number is determined by the position in the list. */
10326 static struct unicosmk_dex *unicosmk_dex_list = NULL;
10328 /* The number of elements in the DEX list. */
10330 static int unicosmk_dex_count = 0;
10332 /* Check if NAME must be replaced by a DEX expression. */
10335 unicosmk_special_name (const char *name)
10337 if (name[0] == '*')
10340 if (name[0] == '$')
10343 if (name[0] != 'r' && name[0] != 'f' && name[0] != 'R' && name[0] != 'F')
10348 case '1': case '2':
10349 return (name[2] == '\0' || (ISDIGIT (name[2]) && name[3] == '\0'));
10352 return (name[2] == '\0'
10353 || ((name[2] == '0' || name[2] == '1') && name[3] == '\0'));
10356 return (ISDIGIT (name[1]) && name[2] == '\0');
10360 /* Return the DEX number if X must be replaced by a DEX expression and 0
10364 unicosmk_need_dex (rtx x)
10366 struct unicosmk_dex *dex;
10370 if (GET_CODE (x) != SYMBOL_REF)
10374 if (! unicosmk_special_name (name))
10377 i = unicosmk_dex_count;
10378 for (dex = unicosmk_dex_list; dex; dex = dex->next)
10380 if (! strcmp (name, dex->name))
10385 dex = (struct unicosmk_dex *) xmalloc (sizeof (struct unicosmk_dex));
10387 dex->next = unicosmk_dex_list;
10388 unicosmk_dex_list = dex;
10390 ++unicosmk_dex_count;
10391 return unicosmk_dex_count;
10394 /* Output the DEX definitions for this file. */
10397 unicosmk_output_dex (FILE *file)
10399 struct unicosmk_dex *dex;
10402 if (unicosmk_dex_list == NULL)
10405 fprintf (file, "\t.dexstart\n");
10407 i = unicosmk_dex_count;
10408 for (dex = unicosmk_dex_list; dex; dex = dex->next)
10410 fprintf (file, "\tDEX (%d) = ", i);
10411 assemble_name (file, dex->name);
10416 fprintf (file, "\t.dexend\n");
10419 /* Output text that to appear at the beginning of an assembler file. */
10422 unicosmk_file_start (void)
10426 fputs ("\t.ident\t", asm_out_file);
10427 unicosmk_output_module_name (asm_out_file);
10428 fputs ("\n\n", asm_out_file);
10430 /* The Unicos/Mk assembler uses different register names. Instead of trying
10431 to support them, we simply use micro definitions. */
10433 /* CAM has different register names: rN for the integer register N and fN
10434 for the floating-point register N. Instead of trying to use these in
10435 alpha.md, we define the symbols $N and $fN to refer to the appropriate
10438 for (i = 0; i < 32; ++i)
10439 fprintf (asm_out_file, "$%d <- r%d\n", i, i);
10441 for (i = 0; i < 32; ++i)
10442 fprintf (asm_out_file, "$f%d <- f%d\n", i, i);
10444 putc ('\n', asm_out_file);
10446 /* The .align directive fill unused space with zeroes which does not work
10447 in code sections. We define the macro 'gcc@code@align' which uses nops
10448 instead. Note that it assumes that code sections always have the
10449 biggest possible alignment since . refers to the current offset from
10450 the beginning of the section. */
10452 fputs ("\t.macro gcc@code@align n\n", asm_out_file);
10453 fputs ("gcc@n@bytes = 1 << n\n", asm_out_file);
10454 fputs ("gcc@here = . % gcc@n@bytes\n", asm_out_file);
10455 fputs ("\t.if ne, gcc@here, 0\n", asm_out_file);
10456 fputs ("\t.repeat (gcc@n@bytes - gcc@here) / 4\n", asm_out_file);
10457 fputs ("\tbis r31,r31,r31\n", asm_out_file);
10458 fputs ("\t.endr\n", asm_out_file);
10459 fputs ("\t.endif\n", asm_out_file);
10460 fputs ("\t.endm gcc@code@align\n\n", asm_out_file);
10462 /* Output extern declarations which should always be visible. */
10463 unicosmk_output_default_externs (asm_out_file);
10465 /* Open a dummy section. We always need to be inside a section for the
10466 section-switching code to work correctly.
10467 ??? This should be a module id or something like that. I still have to
10468 figure out what the rules for those are. */
10469 fputs ("\n\t.psect\t$SG00000,data\n", asm_out_file);
10472 /* Output text to appear at the end of an assembler file. This includes all
10473 pending extern declarations and DEX expressions. */
10476 unicosmk_file_end (void)
10478 fputs ("\t.endp\n\n", asm_out_file);
10480 /* Output all pending externs. */
10482 unicosmk_output_externs (asm_out_file);
10484 /* Output dex definitions used for functions whose names conflict with
10487 unicosmk_output_dex (asm_out_file);
10489 fputs ("\t.end\t", asm_out_file);
10490 unicosmk_output_module_name (asm_out_file);
10491 putc ('\n', asm_out_file);
10497 unicosmk_output_deferred_case_vectors (FILE *file ATTRIBUTE_UNUSED)
10501 unicosmk_gen_dsib (unsigned long *imaskP ATTRIBUTE_UNUSED)
10505 unicosmk_output_ssib (FILE * file ATTRIBUTE_UNUSED,
10506 const char * fnname ATTRIBUTE_UNUSED)
10510 unicosmk_add_call_info_word (rtx x ATTRIBUTE_UNUSED)
10516 unicosmk_need_dex (rtx x ATTRIBUTE_UNUSED)
10521 #endif /* TARGET_ABI_UNICOSMK */
10524 alpha_init_libfuncs (void)
10526 if (TARGET_ABI_UNICOSMK)
10528 /* Prevent gcc from generating calls to __divsi3. */
10529 set_optab_libfunc (sdiv_optab, SImode, 0);
10530 set_optab_libfunc (udiv_optab, SImode, 0);
10532 /* Use the functions provided by the system library
10533 for DImode integer division. */
10534 set_optab_libfunc (sdiv_optab, DImode, "$sldiv");
10535 set_optab_libfunc (udiv_optab, DImode, "$uldiv");
10537 else if (TARGET_ABI_OPEN_VMS)
10539 /* Use the VMS runtime library functions for division and
10541 set_optab_libfunc (sdiv_optab, SImode, "OTS$DIV_I");
10542 set_optab_libfunc (sdiv_optab, DImode, "OTS$DIV_L");
10543 set_optab_libfunc (udiv_optab, SImode, "OTS$DIV_UI");
10544 set_optab_libfunc (udiv_optab, DImode, "OTS$DIV_UL");
10545 set_optab_libfunc (smod_optab, SImode, "OTS$REM_I");
10546 set_optab_libfunc (smod_optab, DImode, "OTS$REM_L");
10547 set_optab_libfunc (umod_optab, SImode, "OTS$REM_UI");
10548 set_optab_libfunc (umod_optab, DImode, "OTS$REM_UL");
10553 /* Initialize the GCC target structure. */
10554 #if TARGET_ABI_OPEN_VMS
10555 # undef TARGET_ATTRIBUTE_TABLE
10556 # define TARGET_ATTRIBUTE_TABLE vms_attribute_table
10557 # undef TARGET_SECTION_TYPE_FLAGS
10558 # define TARGET_SECTION_TYPE_FLAGS vms_section_type_flags
10561 #undef TARGET_IN_SMALL_DATA_P
10562 #define TARGET_IN_SMALL_DATA_P alpha_in_small_data_p
10564 #if TARGET_ABI_UNICOSMK
10565 # undef TARGET_INSERT_ATTRIBUTES
10566 # define TARGET_INSERT_ATTRIBUTES unicosmk_insert_attributes
10567 # undef TARGET_SECTION_TYPE_FLAGS
10568 # define TARGET_SECTION_TYPE_FLAGS unicosmk_section_type_flags
10569 # undef TARGET_ASM_UNIQUE_SECTION
10570 # define TARGET_ASM_UNIQUE_SECTION unicosmk_unique_section
10571 #undef TARGET_ASM_FUNCTION_RODATA_SECTION
10572 #define TARGET_ASM_FUNCTION_RODATA_SECTION default_no_function_rodata_section
10573 # undef TARGET_ASM_GLOBALIZE_LABEL
10574 # define TARGET_ASM_GLOBALIZE_LABEL hook_void_FILEptr_constcharptr
10575 # undef TARGET_MUST_PASS_IN_STACK
10576 # define TARGET_MUST_PASS_IN_STACK unicosmk_must_pass_in_stack
10579 #undef TARGET_ASM_ALIGNED_HI_OP
10580 #define TARGET_ASM_ALIGNED_HI_OP "\t.word\t"
10581 #undef TARGET_ASM_ALIGNED_DI_OP
10582 #define TARGET_ASM_ALIGNED_DI_OP "\t.quad\t"
10584 /* Default unaligned ops are provided for ELF systems. To get unaligned
10585 data for non-ELF systems, we have to turn off auto alignment. */
10586 #ifndef OBJECT_FORMAT_ELF
10587 #undef TARGET_ASM_UNALIGNED_HI_OP
10588 #define TARGET_ASM_UNALIGNED_HI_OP "\t.align 0\n\t.word\t"
10589 #undef TARGET_ASM_UNALIGNED_SI_OP
10590 #define TARGET_ASM_UNALIGNED_SI_OP "\t.align 0\n\t.long\t"
10591 #undef TARGET_ASM_UNALIGNED_DI_OP
10592 #define TARGET_ASM_UNALIGNED_DI_OP "\t.align 0\n\t.quad\t"
10595 #ifdef OBJECT_FORMAT_ELF
10596 #undef TARGET_ASM_RELOC_RW_MASK
10597 #define TARGET_ASM_RELOC_RW_MASK alpha_elf_reloc_rw_mask
10598 #undef TARGET_ASM_SELECT_RTX_SECTION
10599 #define TARGET_ASM_SELECT_RTX_SECTION alpha_elf_select_rtx_section
10600 #undef TARGET_SECTION_TYPE_FLAGS
10601 #define TARGET_SECTION_TYPE_FLAGS alpha_elf_section_type_flags
10604 #undef TARGET_ASM_FUNCTION_END_PROLOGUE
10605 #define TARGET_ASM_FUNCTION_END_PROLOGUE alpha_output_function_end_prologue
10607 #undef TARGET_INIT_LIBFUNCS
10608 #define TARGET_INIT_LIBFUNCS alpha_init_libfuncs
10610 #if TARGET_ABI_UNICOSMK
10611 #undef TARGET_ASM_FILE_START
10612 #define TARGET_ASM_FILE_START unicosmk_file_start
10613 #undef TARGET_ASM_FILE_END
10614 #define TARGET_ASM_FILE_END unicosmk_file_end
10616 #undef TARGET_ASM_FILE_START
10617 #define TARGET_ASM_FILE_START alpha_file_start
10618 #undef TARGET_ASM_FILE_START_FILE_DIRECTIVE
10619 #define TARGET_ASM_FILE_START_FILE_DIRECTIVE true
10622 #undef TARGET_SCHED_ADJUST_COST
10623 #define TARGET_SCHED_ADJUST_COST alpha_adjust_cost
10624 #undef TARGET_SCHED_ISSUE_RATE
10625 #define TARGET_SCHED_ISSUE_RATE alpha_issue_rate
10626 #undef TARGET_SCHED_FIRST_CYCLE_MULTIPASS_DFA_LOOKAHEAD
10627 #define TARGET_SCHED_FIRST_CYCLE_MULTIPASS_DFA_LOOKAHEAD \
10628 alpha_multipass_dfa_lookahead
10630 #undef TARGET_HAVE_TLS
10631 #define TARGET_HAVE_TLS HAVE_AS_TLS
10633 #undef TARGET_INIT_BUILTINS
10634 #define TARGET_INIT_BUILTINS alpha_init_builtins
10635 #undef TARGET_EXPAND_BUILTIN
10636 #define TARGET_EXPAND_BUILTIN alpha_expand_builtin
10637 #undef TARGET_FOLD_BUILTIN
10638 #define TARGET_FOLD_BUILTIN alpha_fold_builtin
10640 #undef TARGET_FUNCTION_OK_FOR_SIBCALL
10641 #define TARGET_FUNCTION_OK_FOR_SIBCALL alpha_function_ok_for_sibcall
10642 #undef TARGET_CANNOT_COPY_INSN_P
10643 #define TARGET_CANNOT_COPY_INSN_P alpha_cannot_copy_insn_p
10644 #undef TARGET_CANNOT_FORCE_CONST_MEM
10645 #define TARGET_CANNOT_FORCE_CONST_MEM alpha_cannot_force_const_mem
10648 #undef TARGET_ASM_OUTPUT_MI_THUNK
10649 #define TARGET_ASM_OUTPUT_MI_THUNK alpha_output_mi_thunk_osf
10650 #undef TARGET_ASM_CAN_OUTPUT_MI_THUNK
10651 #define TARGET_ASM_CAN_OUTPUT_MI_THUNK hook_bool_tree_hwi_hwi_tree_true
10652 #undef TARGET_STDARG_OPTIMIZE_HOOK
10653 #define TARGET_STDARG_OPTIMIZE_HOOK alpha_stdarg_optimize_hook
10656 #undef TARGET_RTX_COSTS
10657 #define TARGET_RTX_COSTS alpha_rtx_costs
10658 #undef TARGET_ADDRESS_COST
10659 #define TARGET_ADDRESS_COST hook_int_rtx_0
10661 #undef TARGET_MACHINE_DEPENDENT_REORG
10662 #define TARGET_MACHINE_DEPENDENT_REORG alpha_reorg
10664 #undef TARGET_PROMOTE_FUNCTION_ARGS
10665 #define TARGET_PROMOTE_FUNCTION_ARGS hook_bool_tree_true
10666 #undef TARGET_PROMOTE_FUNCTION_RETURN
10667 #define TARGET_PROMOTE_FUNCTION_RETURN hook_bool_tree_true
10668 #undef TARGET_PROMOTE_PROTOTYPES
10669 #define TARGET_PROMOTE_PROTOTYPES hook_bool_tree_false
10670 #undef TARGET_RETURN_IN_MEMORY
10671 #define TARGET_RETURN_IN_MEMORY alpha_return_in_memory
10672 #undef TARGET_PASS_BY_REFERENCE
10673 #define TARGET_PASS_BY_REFERENCE alpha_pass_by_reference
10674 #undef TARGET_SETUP_INCOMING_VARARGS
10675 #define TARGET_SETUP_INCOMING_VARARGS alpha_setup_incoming_varargs
10676 #undef TARGET_STRICT_ARGUMENT_NAMING
10677 #define TARGET_STRICT_ARGUMENT_NAMING hook_bool_CUMULATIVE_ARGS_true
10678 #undef TARGET_PRETEND_OUTGOING_VARARGS_NAMED
10679 #define TARGET_PRETEND_OUTGOING_VARARGS_NAMED hook_bool_CUMULATIVE_ARGS_true
10680 #undef TARGET_SPLIT_COMPLEX_ARG
10681 #define TARGET_SPLIT_COMPLEX_ARG alpha_split_complex_arg
10682 #undef TARGET_GIMPLIFY_VA_ARG_EXPR
10683 #define TARGET_GIMPLIFY_VA_ARG_EXPR alpha_gimplify_va_arg
10684 #undef TARGET_ARG_PARTIAL_BYTES
10685 #define TARGET_ARG_PARTIAL_BYTES alpha_arg_partial_bytes
10687 #undef TARGET_SECONDARY_RELOAD
10688 #define TARGET_SECONDARY_RELOAD alpha_secondary_reload
10690 #undef TARGET_SCALAR_MODE_SUPPORTED_P
10691 #define TARGET_SCALAR_MODE_SUPPORTED_P alpha_scalar_mode_supported_p
10692 #undef TARGET_VECTOR_MODE_SUPPORTED_P
10693 #define TARGET_VECTOR_MODE_SUPPORTED_P alpha_vector_mode_supported_p
10695 #undef TARGET_BUILD_BUILTIN_VA_LIST
10696 #define TARGET_BUILD_BUILTIN_VA_LIST alpha_build_builtin_va_list
10698 /* The Alpha architecture does not require sequential consistency. See
10699 http://www.cs.umd.edu/~pugh/java/memoryModel/AlphaReordering.html
10700 for an example of how it can be violated in practice. */
10701 #undef TARGET_RELAXED_ORDERING
10702 #define TARGET_RELAXED_ORDERING true
10704 #undef TARGET_DEFAULT_TARGET_FLAGS
10705 #define TARGET_DEFAULT_TARGET_FLAGS \
10706 (TARGET_DEFAULT | TARGET_CPU_DEFAULT | TARGET_DEFAULT_EXPLICIT_RELOCS)
10707 #undef TARGET_HANDLE_OPTION
10708 #define TARGET_HANDLE_OPTION alpha_handle_option
10710 #ifdef TARGET_ALTERNATE_LONG_DOUBLE_MANGLING
10711 #undef TARGET_MANGLE_TYPE
10712 #define TARGET_MANGLE_TYPE alpha_mangle_type
10715 struct gcc_target targetm = TARGET_INITIALIZER;
10718 #include "gt-alpha.h"