1 /* Subroutines used for code generation on the DEC Alpha.
2 Copyright (C) 1992, 1993, 1994, 1995, 1996, 1997, 1998, 1999, 2000, 2001,
3 2002, 2003, 2004, 2005, 2006, 2007 Free Software Foundation, Inc.
4 Contributed by Richard Kenner (kenner@vlsi1.ultra.nyu.edu)
6 This file is part of GCC.
8 GCC is free software; you can redistribute it and/or modify
9 it under the terms of the GNU General Public License as published by
10 the Free Software Foundation; either version 2, or (at your option)
13 GCC is distributed in the hope that it will be useful,
14 but WITHOUT ANY WARRANTY; without even the implied warranty of
15 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 GNU General Public License for more details.
18 You should have received a copy of the GNU General Public License
19 along with GCC; see the file COPYING. If not, write to
20 the Free Software Foundation, 51 Franklin Street, Fifth Floor,
21 Boston, MA 02110-1301, USA. */
26 #include "coretypes.h"
31 #include "hard-reg-set.h"
33 #include "insn-config.h"
34 #include "conditions.h"
36 #include "insn-attr.h"
47 #include "integrate.h"
50 #include "target-def.h"
52 #include "langhooks.h"
53 #include <splay-tree.h>
54 #include "cfglayout.h"
55 #include "tree-gimple.h"
56 #include "tree-flow.h"
57 #include "tree-stdarg.h"
58 #include "tm-constrs.h"
61 /* Specify which cpu to schedule for. */
62 enum processor_type alpha_tune;
64 /* Which cpu we're generating code for. */
65 enum processor_type alpha_cpu;
67 static const char * const alpha_cpu_name[] =
72 /* Specify how accurate floating-point traps need to be. */
74 enum alpha_trap_precision alpha_tp;
76 /* Specify the floating-point rounding mode. */
78 enum alpha_fp_rounding_mode alpha_fprm;
80 /* Specify which things cause traps. */
82 enum alpha_fp_trap_mode alpha_fptm;
84 /* Save information from a "cmpxx" operation until the branch or scc is
87 struct alpha_compare alpha_compare;
89 /* Nonzero if inside of a function, because the Alpha asm can't
90 handle .files inside of functions. */
92 static int inside_function = FALSE;
94 /* The number of cycles of latency we should assume on memory reads. */
96 int alpha_memory_latency = 3;
98 /* Whether the function needs the GP. */
100 static int alpha_function_needs_gp;
102 /* The alias set for prologue/epilogue register save/restore. */
104 static GTY(()) int alpha_sr_alias_set;
106 /* The assembler name of the current function. */
108 static const char *alpha_fnname;
110 /* The next explicit relocation sequence number. */
111 extern GTY(()) int alpha_next_sequence_number;
112 int alpha_next_sequence_number = 1;
114 /* The literal and gpdisp sequence numbers for this insn, as printed
115 by %# and %* respectively. */
116 extern GTY(()) int alpha_this_literal_sequence_number;
117 extern GTY(()) int alpha_this_gpdisp_sequence_number;
118 int alpha_this_literal_sequence_number;
119 int alpha_this_gpdisp_sequence_number;
121 /* Costs of various operations on the different architectures. */
123 struct alpha_rtx_cost_data
125 unsigned char fp_add;
126 unsigned char fp_mult;
127 unsigned char fp_div_sf;
128 unsigned char fp_div_df;
129 unsigned char int_mult_si;
130 unsigned char int_mult_di;
131 unsigned char int_shift;
132 unsigned char int_cmov;
133 unsigned short int_div;
136 static struct alpha_rtx_cost_data const alpha_rtx_cost_data[PROCESSOR_MAX] =
139 COSTS_N_INSNS (6), /* fp_add */
140 COSTS_N_INSNS (6), /* fp_mult */
141 COSTS_N_INSNS (34), /* fp_div_sf */
142 COSTS_N_INSNS (63), /* fp_div_df */
143 COSTS_N_INSNS (23), /* int_mult_si */
144 COSTS_N_INSNS (23), /* int_mult_di */
145 COSTS_N_INSNS (2), /* int_shift */
146 COSTS_N_INSNS (2), /* int_cmov */
147 COSTS_N_INSNS (97), /* int_div */
150 COSTS_N_INSNS (4), /* fp_add */
151 COSTS_N_INSNS (4), /* fp_mult */
152 COSTS_N_INSNS (15), /* fp_div_sf */
153 COSTS_N_INSNS (22), /* fp_div_df */
154 COSTS_N_INSNS (8), /* int_mult_si */
155 COSTS_N_INSNS (12), /* int_mult_di */
156 COSTS_N_INSNS (1) + 1, /* int_shift */
157 COSTS_N_INSNS (1), /* int_cmov */
158 COSTS_N_INSNS (83), /* int_div */
161 COSTS_N_INSNS (4), /* fp_add */
162 COSTS_N_INSNS (4), /* fp_mult */
163 COSTS_N_INSNS (12), /* fp_div_sf */
164 COSTS_N_INSNS (15), /* fp_div_df */
165 COSTS_N_INSNS (7), /* int_mult_si */
166 COSTS_N_INSNS (7), /* int_mult_di */
167 COSTS_N_INSNS (1), /* int_shift */
168 COSTS_N_INSNS (2), /* int_cmov */
169 COSTS_N_INSNS (86), /* int_div */
173 /* Similar but tuned for code size instead of execution latency. The
174 extra +N is fractional cost tuning based on latency. It's used to
175 encourage use of cheaper insns like shift, but only if there's just
178 static struct alpha_rtx_cost_data const alpha_rtx_cost_size =
180 COSTS_N_INSNS (1), /* fp_add */
181 COSTS_N_INSNS (1), /* fp_mult */
182 COSTS_N_INSNS (1), /* fp_div_sf */
183 COSTS_N_INSNS (1) + 1, /* fp_div_df */
184 COSTS_N_INSNS (1) + 1, /* int_mult_si */
185 COSTS_N_INSNS (1) + 2, /* int_mult_di */
186 COSTS_N_INSNS (1), /* int_shift */
187 COSTS_N_INSNS (1), /* int_cmov */
188 COSTS_N_INSNS (6), /* int_div */
191 /* Get the number of args of a function in one of two ways. */
192 #if TARGET_ABI_OPEN_VMS || TARGET_ABI_UNICOSMK
193 #define NUM_ARGS current_function_args_info.num_args
195 #define NUM_ARGS current_function_args_info
201 /* Declarations of static functions. */
202 static struct machine_function *alpha_init_machine_status (void);
203 static rtx alpha_emit_xfloating_compare (enum rtx_code *, rtx, rtx);
205 #if TARGET_ABI_OPEN_VMS
206 static void alpha_write_linkage (FILE *, const char *, tree);
209 static void unicosmk_output_deferred_case_vectors (FILE *);
210 static void unicosmk_gen_dsib (unsigned long *);
211 static void unicosmk_output_ssib (FILE *, const char *);
212 static int unicosmk_need_dex (rtx);
214 /* Implement TARGET_HANDLE_OPTION. */
217 alpha_handle_option (size_t code, const char *arg, int value)
223 target_flags |= MASK_SOFT_FP;
227 case OPT_mieee_with_inexact:
228 target_flags |= MASK_IEEE_CONFORMANT;
232 if (value != 16 && value != 32 && value != 64)
233 error ("bad value %qs for -mtls-size switch", arg);
240 #ifdef TARGET_ALTERNATE_LONG_DOUBLE_MANGLING
241 /* Implement TARGET_MANGLE_TYPE. */
244 alpha_mangle_type (tree type)
246 if (TYPE_MAIN_VARIANT (type) == long_double_type_node
247 && TARGET_LONG_DOUBLE_128)
250 /* For all other types, use normal C++ mangling. */
255 /* Parse target option strings. */
258 override_options (void)
260 static const struct cpu_table {
261 const char *const name;
262 const enum processor_type processor;
265 { "ev4", PROCESSOR_EV4, 0 },
266 { "ev45", PROCESSOR_EV4, 0 },
267 { "21064", PROCESSOR_EV4, 0 },
268 { "ev5", PROCESSOR_EV5, 0 },
269 { "21164", PROCESSOR_EV5, 0 },
270 { "ev56", PROCESSOR_EV5, MASK_BWX },
271 { "21164a", PROCESSOR_EV5, MASK_BWX },
272 { "pca56", PROCESSOR_EV5, MASK_BWX|MASK_MAX },
273 { "21164PC",PROCESSOR_EV5, MASK_BWX|MASK_MAX },
274 { "21164pc",PROCESSOR_EV5, MASK_BWX|MASK_MAX },
275 { "ev6", PROCESSOR_EV6, MASK_BWX|MASK_MAX|MASK_FIX },
276 { "21264", PROCESSOR_EV6, MASK_BWX|MASK_MAX|MASK_FIX },
277 { "ev67", PROCESSOR_EV6, MASK_BWX|MASK_MAX|MASK_FIX|MASK_CIX },
278 { "21264a", PROCESSOR_EV6, MASK_BWX|MASK_MAX|MASK_FIX|MASK_CIX },
284 /* Unicos/Mk doesn't have shared libraries. */
285 if (TARGET_ABI_UNICOSMK && flag_pic)
287 warning (0, "-f%s ignored for Unicos/Mk (not supported)",
288 (flag_pic > 1) ? "PIC" : "pic");
292 /* On Unicos/Mk, the native compiler consistently generates /d suffices for
293 floating-point instructions. Make that the default for this target. */
294 if (TARGET_ABI_UNICOSMK)
295 alpha_fprm = ALPHA_FPRM_DYN;
297 alpha_fprm = ALPHA_FPRM_NORM;
299 alpha_tp = ALPHA_TP_PROG;
300 alpha_fptm = ALPHA_FPTM_N;
302 /* We cannot use su and sui qualifiers for conversion instructions on
303 Unicos/Mk. I'm not sure if this is due to assembler or hardware
304 limitations. Right now, we issue a warning if -mieee is specified
305 and then ignore it; eventually, we should either get it right or
306 disable the option altogether. */
310 if (TARGET_ABI_UNICOSMK)
311 warning (0, "-mieee not supported on Unicos/Mk");
314 alpha_tp = ALPHA_TP_INSN;
315 alpha_fptm = ALPHA_FPTM_SU;
319 if (TARGET_IEEE_WITH_INEXACT)
321 if (TARGET_ABI_UNICOSMK)
322 warning (0, "-mieee-with-inexact not supported on Unicos/Mk");
325 alpha_tp = ALPHA_TP_INSN;
326 alpha_fptm = ALPHA_FPTM_SUI;
332 if (! strcmp (alpha_tp_string, "p"))
333 alpha_tp = ALPHA_TP_PROG;
334 else if (! strcmp (alpha_tp_string, "f"))
335 alpha_tp = ALPHA_TP_FUNC;
336 else if (! strcmp (alpha_tp_string, "i"))
337 alpha_tp = ALPHA_TP_INSN;
339 error ("bad value %qs for -mtrap-precision switch", alpha_tp_string);
342 if (alpha_fprm_string)
344 if (! strcmp (alpha_fprm_string, "n"))
345 alpha_fprm = ALPHA_FPRM_NORM;
346 else if (! strcmp (alpha_fprm_string, "m"))
347 alpha_fprm = ALPHA_FPRM_MINF;
348 else if (! strcmp (alpha_fprm_string, "c"))
349 alpha_fprm = ALPHA_FPRM_CHOP;
350 else if (! strcmp (alpha_fprm_string,"d"))
351 alpha_fprm = ALPHA_FPRM_DYN;
353 error ("bad value %qs for -mfp-rounding-mode switch",
357 if (alpha_fptm_string)
359 if (strcmp (alpha_fptm_string, "n") == 0)
360 alpha_fptm = ALPHA_FPTM_N;
361 else if (strcmp (alpha_fptm_string, "u") == 0)
362 alpha_fptm = ALPHA_FPTM_U;
363 else if (strcmp (alpha_fptm_string, "su") == 0)
364 alpha_fptm = ALPHA_FPTM_SU;
365 else if (strcmp (alpha_fptm_string, "sui") == 0)
366 alpha_fptm = ALPHA_FPTM_SUI;
368 error ("bad value %qs for -mfp-trap-mode switch", alpha_fptm_string);
371 if (alpha_cpu_string)
373 for (i = 0; cpu_table [i].name; i++)
374 if (! strcmp (alpha_cpu_string, cpu_table [i].name))
376 alpha_tune = alpha_cpu = cpu_table [i].processor;
377 target_flags &= ~ (MASK_BWX | MASK_MAX | MASK_FIX | MASK_CIX);
378 target_flags |= cpu_table [i].flags;
381 if (! cpu_table [i].name)
382 error ("bad value %qs for -mcpu switch", alpha_cpu_string);
385 if (alpha_tune_string)
387 for (i = 0; cpu_table [i].name; i++)
388 if (! strcmp (alpha_tune_string, cpu_table [i].name))
390 alpha_tune = cpu_table [i].processor;
393 if (! cpu_table [i].name)
394 error ("bad value %qs for -mcpu switch", alpha_tune_string);
397 /* Do some sanity checks on the above options. */
399 if (TARGET_ABI_UNICOSMK && alpha_fptm != ALPHA_FPTM_N)
401 warning (0, "trap mode not supported on Unicos/Mk");
402 alpha_fptm = ALPHA_FPTM_N;
405 if ((alpha_fptm == ALPHA_FPTM_SU || alpha_fptm == ALPHA_FPTM_SUI)
406 && alpha_tp != ALPHA_TP_INSN && alpha_cpu != PROCESSOR_EV6)
408 warning (0, "fp software completion requires -mtrap-precision=i");
409 alpha_tp = ALPHA_TP_INSN;
412 if (alpha_cpu == PROCESSOR_EV6)
414 /* Except for EV6 pass 1 (not released), we always have precise
415 arithmetic traps. Which means we can do software completion
416 without minding trap shadows. */
417 alpha_tp = ALPHA_TP_PROG;
420 if (TARGET_FLOAT_VAX)
422 if (alpha_fprm == ALPHA_FPRM_MINF || alpha_fprm == ALPHA_FPRM_DYN)
424 warning (0, "rounding mode not supported for VAX floats");
425 alpha_fprm = ALPHA_FPRM_NORM;
427 if (alpha_fptm == ALPHA_FPTM_SUI)
429 warning (0, "trap mode not supported for VAX floats");
430 alpha_fptm = ALPHA_FPTM_SU;
432 if (target_flags_explicit & MASK_LONG_DOUBLE_128)
433 warning (0, "128-bit long double not supported for VAX floats");
434 target_flags &= ~MASK_LONG_DOUBLE_128;
441 if (!alpha_mlat_string)
442 alpha_mlat_string = "L1";
444 if (ISDIGIT ((unsigned char)alpha_mlat_string[0])
445 && (lat = strtol (alpha_mlat_string, &end, 10), *end == '\0'))
447 else if ((alpha_mlat_string[0] == 'L' || alpha_mlat_string[0] == 'l')
448 && ISDIGIT ((unsigned char)alpha_mlat_string[1])
449 && alpha_mlat_string[2] == '\0')
451 static int const cache_latency[][4] =
453 { 3, 30, -1 }, /* ev4 -- Bcache is a guess */
454 { 2, 12, 38 }, /* ev5 -- Bcache from PC164 LMbench numbers */
455 { 3, 12, 30 }, /* ev6 -- Bcache from DS20 LMbench. */
458 lat = alpha_mlat_string[1] - '0';
459 if (lat <= 0 || lat > 3 || cache_latency[alpha_tune][lat-1] == -1)
461 warning (0, "L%d cache latency unknown for %s",
462 lat, alpha_cpu_name[alpha_tune]);
466 lat = cache_latency[alpha_tune][lat-1];
468 else if (! strcmp (alpha_mlat_string, "main"))
470 /* Most current memories have about 370ns latency. This is
471 a reasonable guess for a fast cpu. */
476 warning (0, "bad value %qs for -mmemory-latency", alpha_mlat_string);
480 alpha_memory_latency = lat;
483 /* Default the definition of "small data" to 8 bytes. */
487 /* Infer TARGET_SMALL_DATA from -fpic/-fPIC. */
489 target_flags |= MASK_SMALL_DATA;
490 else if (flag_pic == 2)
491 target_flags &= ~MASK_SMALL_DATA;
493 /* Align labels and loops for optimal branching. */
494 /* ??? Kludge these by not doing anything if we don't optimize and also if
495 we are writing ECOFF symbols to work around a bug in DEC's assembler. */
496 if (optimize > 0 && write_symbols != SDB_DEBUG)
498 if (align_loops <= 0)
500 if (align_jumps <= 0)
503 if (align_functions <= 0)
504 align_functions = 16;
506 /* Acquire a unique set number for our register saves and restores. */
507 alpha_sr_alias_set = new_alias_set ();
509 /* Register variables and functions with the garbage collector. */
511 /* Set up function hooks. */
512 init_machine_status = alpha_init_machine_status;
514 /* Tell the compiler when we're using VAX floating point. */
515 if (TARGET_FLOAT_VAX)
517 REAL_MODE_FORMAT (SFmode) = &vax_f_format;
518 REAL_MODE_FORMAT (DFmode) = &vax_g_format;
519 REAL_MODE_FORMAT (TFmode) = NULL;
522 #ifdef TARGET_DEFAULT_LONG_DOUBLE_128
523 if (!(target_flags_explicit & MASK_LONG_DOUBLE_128))
524 target_flags |= MASK_LONG_DOUBLE_128;
528 /* Returns 1 if VALUE is a mask that contains full bytes of zero or ones. */
531 zap_mask (HOST_WIDE_INT value)
535 for (i = 0; i < HOST_BITS_PER_WIDE_INT / HOST_BITS_PER_CHAR;
537 if ((value & 0xff) != 0 && (value & 0xff) != 0xff)
543 /* Return true if OP is valid for a particular TLS relocation.
544 We are already guaranteed that OP is a CONST. */
547 tls_symbolic_operand_1 (rtx op, int size, int unspec)
551 if (GET_CODE (op) != UNSPEC || XINT (op, 1) != unspec)
553 op = XVECEXP (op, 0, 0);
555 if (GET_CODE (op) != SYMBOL_REF)
558 switch (SYMBOL_REF_TLS_MODEL (op))
560 case TLS_MODEL_LOCAL_DYNAMIC:
561 return unspec == UNSPEC_DTPREL && size == alpha_tls_size;
562 case TLS_MODEL_INITIAL_EXEC:
563 return unspec == UNSPEC_TPREL && size == 64;
564 case TLS_MODEL_LOCAL_EXEC:
565 return unspec == UNSPEC_TPREL && size == alpha_tls_size;
571 /* Used by aligned_memory_operand and unaligned_memory_operand to
572 resolve what reload is going to do with OP if it's a register. */
575 resolve_reload_operand (rtx op)
577 if (reload_in_progress)
580 if (GET_CODE (tmp) == SUBREG)
581 tmp = SUBREG_REG (tmp);
582 if (GET_CODE (tmp) == REG
583 && REGNO (tmp) >= FIRST_PSEUDO_REGISTER)
585 op = reg_equiv_memory_loc[REGNO (tmp)];
593 /* The scalar modes supported differs from the default check-what-c-supports
594 version in that sometimes TFmode is available even when long double
595 indicates only DFmode. On unicosmk, we have the situation that HImode
596 doesn't map to any C type, but of course we still support that. */
599 alpha_scalar_mode_supported_p (enum machine_mode mode)
607 case TImode: /* via optabs.c */
615 return TARGET_HAS_XFLOATING_LIBS;
622 /* Alpha implements a couple of integer vector mode operations when
623 TARGET_MAX is enabled. We do not check TARGET_MAX here, however,
624 which allows the vectorizer to operate on e.g. move instructions,
625 or when expand_vector_operations can do something useful. */
628 alpha_vector_mode_supported_p (enum machine_mode mode)
630 return mode == V8QImode || mode == V4HImode || mode == V2SImode;
633 /* Return 1 if this function can directly return via $26. */
638 return (! TARGET_ABI_OPEN_VMS && ! TARGET_ABI_UNICOSMK
640 && alpha_sa_size () == 0
641 && get_frame_size () == 0
642 && current_function_outgoing_args_size == 0
643 && current_function_pretend_args_size == 0);
646 /* Return the ADDR_VEC associated with a tablejump insn. */
649 alpha_tablejump_addr_vec (rtx insn)
653 tmp = JUMP_LABEL (insn);
656 tmp = NEXT_INSN (tmp);
659 if (GET_CODE (tmp) == JUMP_INSN
660 && GET_CODE (PATTERN (tmp)) == ADDR_DIFF_VEC)
661 return PATTERN (tmp);
665 /* Return the label of the predicted edge, or CONST0_RTX if we don't know. */
668 alpha_tablejump_best_label (rtx insn)
670 rtx jump_table = alpha_tablejump_addr_vec (insn);
671 rtx best_label = NULL_RTX;
673 /* ??? Once the CFG doesn't keep getting completely rebuilt, look
674 there for edge frequency counts from profile data. */
678 int n_labels = XVECLEN (jump_table, 1);
682 for (i = 0; i < n_labels; i++)
686 for (j = i + 1; j < n_labels; j++)
687 if (XEXP (XVECEXP (jump_table, 1, i), 0)
688 == XEXP (XVECEXP (jump_table, 1, j), 0))
691 if (count > best_count)
692 best_count = count, best_label = XVECEXP (jump_table, 1, i);
696 return best_label ? best_label : const0_rtx;
699 /* Return the TLS model to use for SYMBOL. */
701 static enum tls_model
702 tls_symbolic_operand_type (rtx symbol)
704 enum tls_model model;
706 if (GET_CODE (symbol) != SYMBOL_REF)
708 model = SYMBOL_REF_TLS_MODEL (symbol);
710 /* Local-exec with a 64-bit size is the same code as initial-exec. */
711 if (model == TLS_MODEL_LOCAL_EXEC && alpha_tls_size == 64)
712 model = TLS_MODEL_INITIAL_EXEC;
717 /* Return true if the function DECL will share the same GP as any
718 function in the current unit of translation. */
721 decl_has_samegp (tree decl)
723 /* Functions that are not local can be overridden, and thus may
724 not share the same gp. */
725 if (!(*targetm.binds_local_p) (decl))
728 /* If -msmall-data is in effect, assume that there is only one GP
729 for the module, and so any local symbol has this property. We
730 need explicit relocations to be able to enforce this for symbols
731 not defined in this unit of translation, however. */
732 if (TARGET_EXPLICIT_RELOCS && TARGET_SMALL_DATA)
735 /* Functions that are not external are defined in this UoT. */
736 /* ??? Irritatingly, static functions not yet emitted are still
737 marked "external". Apply this to non-static functions only. */
738 return !TREE_PUBLIC (decl) || !DECL_EXTERNAL (decl);
741 /* Return true if EXP should be placed in the small data section. */
744 alpha_in_small_data_p (tree exp)
746 /* We want to merge strings, so we never consider them small data. */
747 if (TREE_CODE (exp) == STRING_CST)
750 /* Functions are never in the small data area. Duh. */
751 if (TREE_CODE (exp) == FUNCTION_DECL)
754 if (TREE_CODE (exp) == VAR_DECL && DECL_SECTION_NAME (exp))
756 const char *section = TREE_STRING_POINTER (DECL_SECTION_NAME (exp));
757 if (strcmp (section, ".sdata") == 0
758 || strcmp (section, ".sbss") == 0)
763 HOST_WIDE_INT size = int_size_in_bytes (TREE_TYPE (exp));
765 /* If this is an incomplete type with size 0, then we can't put it
766 in sdata because it might be too big when completed. */
767 if (size > 0 && (unsigned HOST_WIDE_INT) size <= g_switch_value)
774 #if TARGET_ABI_OPEN_VMS
776 alpha_linkage_symbol_p (const char *symname)
778 int symlen = strlen (symname);
781 return strcmp (&symname [symlen - 4], "..lk") == 0;
786 #define LINKAGE_SYMBOL_REF_P(X) \
787 ((GET_CODE (X) == SYMBOL_REF \
788 && alpha_linkage_symbol_p (XSTR (X, 0))) \
789 || (GET_CODE (X) == CONST \
790 && GET_CODE (XEXP (X, 0)) == PLUS \
791 && GET_CODE (XEXP (XEXP (X, 0), 0)) == SYMBOL_REF \
792 && alpha_linkage_symbol_p (XSTR (XEXP (XEXP (X, 0), 0), 0))))
795 /* legitimate_address_p recognizes an RTL expression that is a valid
796 memory address for an instruction. The MODE argument is the
797 machine mode for the MEM expression that wants to use this address.
799 For Alpha, we have either a constant address or the sum of a
800 register and a constant address, or just a register. For DImode,
801 any of those forms can be surrounded with an AND that clear the
802 low-order three bits; this is an "unaligned" access. */
805 alpha_legitimate_address_p (enum machine_mode mode, rtx x, int strict)
807 /* If this is an ldq_u type address, discard the outer AND. */
809 && GET_CODE (x) == AND
810 && GET_CODE (XEXP (x, 1)) == CONST_INT
811 && INTVAL (XEXP (x, 1)) == -8)
814 /* Discard non-paradoxical subregs. */
815 if (GET_CODE (x) == SUBREG
816 && (GET_MODE_SIZE (GET_MODE (x))
817 < GET_MODE_SIZE (GET_MODE (SUBREG_REG (x)))))
820 /* Unadorned general registers are valid. */
823 ? STRICT_REG_OK_FOR_BASE_P (x)
824 : NONSTRICT_REG_OK_FOR_BASE_P (x)))
827 /* Constant addresses (i.e. +/- 32k) are valid. */
828 if (CONSTANT_ADDRESS_P (x))
831 #if TARGET_ABI_OPEN_VMS
832 if (LINKAGE_SYMBOL_REF_P (x))
836 /* Register plus a small constant offset is valid. */
837 if (GET_CODE (x) == PLUS)
839 rtx ofs = XEXP (x, 1);
842 /* Discard non-paradoxical subregs. */
843 if (GET_CODE (x) == SUBREG
844 && (GET_MODE_SIZE (GET_MODE (x))
845 < GET_MODE_SIZE (GET_MODE (SUBREG_REG (x)))))
851 && NONSTRICT_REG_OK_FP_BASE_P (x)
852 && GET_CODE (ofs) == CONST_INT)
855 ? STRICT_REG_OK_FOR_BASE_P (x)
856 : NONSTRICT_REG_OK_FOR_BASE_P (x))
857 && CONSTANT_ADDRESS_P (ofs))
862 /* If we're managing explicit relocations, LO_SUM is valid, as
863 are small data symbols. */
864 else if (TARGET_EXPLICIT_RELOCS)
866 if (small_symbolic_operand (x, Pmode))
869 if (GET_CODE (x) == LO_SUM)
871 rtx ofs = XEXP (x, 1);
874 /* Discard non-paradoxical subregs. */
875 if (GET_CODE (x) == SUBREG
876 && (GET_MODE_SIZE (GET_MODE (x))
877 < GET_MODE_SIZE (GET_MODE (SUBREG_REG (x)))))
880 /* Must have a valid base register. */
883 ? STRICT_REG_OK_FOR_BASE_P (x)
884 : NONSTRICT_REG_OK_FOR_BASE_P (x))))
887 /* The symbol must be local. */
888 if (local_symbolic_operand (ofs, Pmode)
889 || dtp32_symbolic_operand (ofs, Pmode)
890 || tp32_symbolic_operand (ofs, Pmode))
898 /* Build the SYMBOL_REF for __tls_get_addr. */
900 static GTY(()) rtx tls_get_addr_libfunc;
903 get_tls_get_addr (void)
905 if (!tls_get_addr_libfunc)
906 tls_get_addr_libfunc = init_one_libfunc ("__tls_get_addr");
907 return tls_get_addr_libfunc;
910 /* Try machine-dependent ways of modifying an illegitimate address
911 to be legitimate. If we find one, return the new, valid address. */
914 alpha_legitimize_address (rtx x, rtx scratch,
915 enum machine_mode mode ATTRIBUTE_UNUSED)
917 HOST_WIDE_INT addend;
919 /* If the address is (plus reg const_int) and the CONST_INT is not a
920 valid offset, compute the high part of the constant and add it to
921 the register. Then our address is (plus temp low-part-const). */
922 if (GET_CODE (x) == PLUS
923 && GET_CODE (XEXP (x, 0)) == REG
924 && GET_CODE (XEXP (x, 1)) == CONST_INT
925 && ! CONSTANT_ADDRESS_P (XEXP (x, 1)))
927 addend = INTVAL (XEXP (x, 1));
932 /* If the address is (const (plus FOO const_int)), find the low-order
933 part of the CONST_INT. Then load FOO plus any high-order part of the
934 CONST_INT into a register. Our address is (plus reg low-part-const).
935 This is done to reduce the number of GOT entries. */
936 if (can_create_pseudo_p ()
937 && GET_CODE (x) == CONST
938 && GET_CODE (XEXP (x, 0)) == PLUS
939 && GET_CODE (XEXP (XEXP (x, 0), 1)) == CONST_INT)
941 addend = INTVAL (XEXP (XEXP (x, 0), 1));
942 x = force_reg (Pmode, XEXP (XEXP (x, 0), 0));
946 /* If we have a (plus reg const), emit the load as in (2), then add
947 the two registers, and finally generate (plus reg low-part-const) as
949 if (can_create_pseudo_p ()
950 && GET_CODE (x) == PLUS
951 && GET_CODE (XEXP (x, 0)) == REG
952 && GET_CODE (XEXP (x, 1)) == CONST
953 && GET_CODE (XEXP (XEXP (x, 1), 0)) == PLUS
954 && GET_CODE (XEXP (XEXP (XEXP (x, 1), 0), 1)) == CONST_INT)
956 addend = INTVAL (XEXP (XEXP (XEXP (x, 1), 0), 1));
957 x = expand_simple_binop (Pmode, PLUS, XEXP (x, 0),
958 XEXP (XEXP (XEXP (x, 1), 0), 0),
959 NULL_RTX, 1, OPTAB_LIB_WIDEN);
963 /* If this is a local symbol, split the address into HIGH/LO_SUM parts. */
964 if (TARGET_EXPLICIT_RELOCS && symbolic_operand (x, Pmode))
966 rtx r0, r16, eqv, tga, tp, insn, dest, seq;
968 switch (tls_symbolic_operand_type (x))
973 case TLS_MODEL_GLOBAL_DYNAMIC:
976 r0 = gen_rtx_REG (Pmode, 0);
977 r16 = gen_rtx_REG (Pmode, 16);
978 tga = get_tls_get_addr ();
979 dest = gen_reg_rtx (Pmode);
980 seq = GEN_INT (alpha_next_sequence_number++);
982 emit_insn (gen_movdi_er_tlsgd (r16, pic_offset_table_rtx, x, seq));
983 insn = gen_call_value_osf_tlsgd (r0, tga, seq);
984 insn = emit_call_insn (insn);
985 CONST_OR_PURE_CALL_P (insn) = 1;
986 use_reg (&CALL_INSN_FUNCTION_USAGE (insn), r16);
991 emit_libcall_block (insn, dest, r0, x);
994 case TLS_MODEL_LOCAL_DYNAMIC:
997 r0 = gen_rtx_REG (Pmode, 0);
998 r16 = gen_rtx_REG (Pmode, 16);
999 tga = get_tls_get_addr ();
1000 scratch = gen_reg_rtx (Pmode);
1001 seq = GEN_INT (alpha_next_sequence_number++);
1003 emit_insn (gen_movdi_er_tlsldm (r16, pic_offset_table_rtx, seq));
1004 insn = gen_call_value_osf_tlsldm (r0, tga, seq);
1005 insn = emit_call_insn (insn);
1006 CONST_OR_PURE_CALL_P (insn) = 1;
1007 use_reg (&CALL_INSN_FUNCTION_USAGE (insn), r16);
1009 insn = get_insns ();
1012 eqv = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, const0_rtx),
1013 UNSPEC_TLSLDM_CALL);
1014 emit_libcall_block (insn, scratch, r0, eqv);
1016 eqv = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, x), UNSPEC_DTPREL);
1017 eqv = gen_rtx_CONST (Pmode, eqv);
1019 if (alpha_tls_size == 64)
1021 dest = gen_reg_rtx (Pmode);
1022 emit_insn (gen_rtx_SET (VOIDmode, dest, eqv));
1023 emit_insn (gen_adddi3 (dest, dest, scratch));
1026 if (alpha_tls_size == 32)
1028 insn = gen_rtx_HIGH (Pmode, eqv);
1029 insn = gen_rtx_PLUS (Pmode, scratch, insn);
1030 scratch = gen_reg_rtx (Pmode);
1031 emit_insn (gen_rtx_SET (VOIDmode, scratch, insn));
1033 return gen_rtx_LO_SUM (Pmode, scratch, eqv);
1035 case TLS_MODEL_INITIAL_EXEC:
1036 eqv = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, x), UNSPEC_TPREL);
1037 eqv = gen_rtx_CONST (Pmode, eqv);
1038 tp = gen_reg_rtx (Pmode);
1039 scratch = gen_reg_rtx (Pmode);
1040 dest = gen_reg_rtx (Pmode);
1042 emit_insn (gen_load_tp (tp));
1043 emit_insn (gen_rtx_SET (VOIDmode, scratch, eqv));
1044 emit_insn (gen_adddi3 (dest, tp, scratch));
1047 case TLS_MODEL_LOCAL_EXEC:
1048 eqv = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, x), UNSPEC_TPREL);
1049 eqv = gen_rtx_CONST (Pmode, eqv);
1050 tp = gen_reg_rtx (Pmode);
1052 emit_insn (gen_load_tp (tp));
1053 if (alpha_tls_size == 32)
1055 insn = gen_rtx_HIGH (Pmode, eqv);
1056 insn = gen_rtx_PLUS (Pmode, tp, insn);
1057 tp = gen_reg_rtx (Pmode);
1058 emit_insn (gen_rtx_SET (VOIDmode, tp, insn));
1060 return gen_rtx_LO_SUM (Pmode, tp, eqv);
1066 if (local_symbolic_operand (x, Pmode))
1068 if (small_symbolic_operand (x, Pmode))
1072 if (can_create_pseudo_p ())
1073 scratch = gen_reg_rtx (Pmode);
1074 emit_insn (gen_rtx_SET (VOIDmode, scratch,
1075 gen_rtx_HIGH (Pmode, x)));
1076 return gen_rtx_LO_SUM (Pmode, scratch, x);
1085 HOST_WIDE_INT low, high;
1087 low = ((addend & 0xffff) ^ 0x8000) - 0x8000;
1089 high = ((addend & 0xffffffff) ^ 0x80000000) - 0x80000000;
1093 x = expand_simple_binop (Pmode, PLUS, x, GEN_INT (addend),
1094 (!can_create_pseudo_p () ? scratch : NULL_RTX),
1095 1, OPTAB_LIB_WIDEN);
1097 x = expand_simple_binop (Pmode, PLUS, x, GEN_INT (high),
1098 (!can_create_pseudo_p () ? scratch : NULL_RTX),
1099 1, OPTAB_LIB_WIDEN);
1101 return plus_constant (x, low);
1105 /* Primarily this is required for TLS symbols, but given that our move
1106 patterns *ought* to be able to handle any symbol at any time, we
1107 should never be spilling symbolic operands to the constant pool, ever. */
1110 alpha_cannot_force_const_mem (rtx x)
1112 enum rtx_code code = GET_CODE (x);
1113 return code == SYMBOL_REF || code == LABEL_REF || code == CONST;
1116 /* We do not allow indirect calls to be optimized into sibling calls, nor
1117 can we allow a call to a function with a different GP to be optimized
1121 alpha_function_ok_for_sibcall (tree decl, tree exp ATTRIBUTE_UNUSED)
1123 /* Can't do indirect tail calls, since we don't know if the target
1124 uses the same GP. */
1128 /* Otherwise, we can make a tail call if the target function shares
1130 return decl_has_samegp (decl);
1134 some_small_symbolic_operand_int (rtx *px, void *data ATTRIBUTE_UNUSED)
1138 /* Don't re-split. */
1139 if (GET_CODE (x) == LO_SUM)
1142 return small_symbolic_operand (x, Pmode) != 0;
1146 split_small_symbolic_operand_1 (rtx *px, void *data ATTRIBUTE_UNUSED)
1150 /* Don't re-split. */
1151 if (GET_CODE (x) == LO_SUM)
1154 if (small_symbolic_operand (x, Pmode))
1156 x = gen_rtx_LO_SUM (Pmode, pic_offset_table_rtx, x);
1165 split_small_symbolic_operand (rtx x)
1168 for_each_rtx (&x, split_small_symbolic_operand_1, NULL);
1172 /* Indicate that INSN cannot be duplicated. This is true for any insn
1173 that we've marked with gpdisp relocs, since those have to stay in
1174 1-1 correspondence with one another.
1176 Technically we could copy them if we could set up a mapping from one
1177 sequence number to another, across the set of insns to be duplicated.
1178 This seems overly complicated and error-prone since interblock motion
1179 from sched-ebb could move one of the pair of insns to a different block.
1181 Also cannot allow jsr insns to be duplicated. If they throw exceptions,
1182 then they'll be in a different block from their ldgp. Which could lead
1183 the bb reorder code to think that it would be ok to copy just the block
1184 containing the call and branch to the block containing the ldgp. */
1187 alpha_cannot_copy_insn_p (rtx insn)
1189 if (!reload_completed || !TARGET_EXPLICIT_RELOCS)
1191 if (recog_memoized (insn) >= 0)
1192 return get_attr_cannot_copy (insn);
1198 /* Try a machine-dependent way of reloading an illegitimate address
1199 operand. If we find one, push the reload and return the new rtx. */
1202 alpha_legitimize_reload_address (rtx x,
1203 enum machine_mode mode ATTRIBUTE_UNUSED,
1204 int opnum, int type,
1205 int ind_levels ATTRIBUTE_UNUSED)
1207 /* We must recognize output that we have already generated ourselves. */
1208 if (GET_CODE (x) == PLUS
1209 && GET_CODE (XEXP (x, 0)) == PLUS
1210 && GET_CODE (XEXP (XEXP (x, 0), 0)) == REG
1211 && GET_CODE (XEXP (XEXP (x, 0), 1)) == CONST_INT
1212 && GET_CODE (XEXP (x, 1)) == CONST_INT)
1214 push_reload (XEXP (x, 0), NULL_RTX, &XEXP (x, 0), NULL,
1215 BASE_REG_CLASS, GET_MODE (x), VOIDmode, 0, 0,
1220 /* We wish to handle large displacements off a base register by
1221 splitting the addend across an ldah and the mem insn. This
1222 cuts number of extra insns needed from 3 to 1. */
1223 if (GET_CODE (x) == PLUS
1224 && GET_CODE (XEXP (x, 0)) == REG
1225 && REGNO (XEXP (x, 0)) < FIRST_PSEUDO_REGISTER
1226 && REGNO_OK_FOR_BASE_P (REGNO (XEXP (x, 0)))
1227 && GET_CODE (XEXP (x, 1)) == CONST_INT)
1229 HOST_WIDE_INT val = INTVAL (XEXP (x, 1));
1230 HOST_WIDE_INT low = ((val & 0xffff) ^ 0x8000) - 0x8000;
1232 = (((val - low) & 0xffffffff) ^ 0x80000000) - 0x80000000;
1234 /* Check for 32-bit overflow. */
1235 if (high + low != val)
1238 /* Reload the high part into a base reg; leave the low part
1239 in the mem directly. */
1240 x = gen_rtx_PLUS (GET_MODE (x),
1241 gen_rtx_PLUS (GET_MODE (x), XEXP (x, 0),
1245 push_reload (XEXP (x, 0), NULL_RTX, &XEXP (x, 0), NULL,
1246 BASE_REG_CLASS, GET_MODE (x), VOIDmode, 0, 0,
1254 /* Compute a (partial) cost for rtx X. Return true if the complete
1255 cost has been computed, and false if subexpressions should be
1256 scanned. In either case, *TOTAL contains the cost result. */
1259 alpha_rtx_costs (rtx x, int code, int outer_code, int *total)
1261 enum machine_mode mode = GET_MODE (x);
1262 bool float_mode_p = FLOAT_MODE_P (mode);
1263 const struct alpha_rtx_cost_data *cost_data;
1266 cost_data = &alpha_rtx_cost_size;
1268 cost_data = &alpha_rtx_cost_data[alpha_tune];
1273 /* If this is an 8-bit constant, return zero since it can be used
1274 nearly anywhere with no cost. If it is a valid operand for an
1275 ADD or AND, likewise return 0 if we know it will be used in that
1276 context. Otherwise, return 2 since it might be used there later.
1277 All other constants take at least two insns. */
1278 if (INTVAL (x) >= 0 && INTVAL (x) < 256)
1286 if (x == CONST0_RTX (mode))
1288 else if ((outer_code == PLUS && add_operand (x, VOIDmode))
1289 || (outer_code == AND && and_operand (x, VOIDmode)))
1291 else if (add_operand (x, VOIDmode) || and_operand (x, VOIDmode))
1294 *total = COSTS_N_INSNS (2);
1300 if (TARGET_EXPLICIT_RELOCS && small_symbolic_operand (x, VOIDmode))
1301 *total = COSTS_N_INSNS (outer_code != MEM);
1302 else if (TARGET_EXPLICIT_RELOCS && local_symbolic_operand (x, VOIDmode))
1303 *total = COSTS_N_INSNS (1 + (outer_code != MEM));
1304 else if (tls_symbolic_operand_type (x))
1305 /* Estimate of cost for call_pal rduniq. */
1306 /* ??? How many insns do we emit here? More than one... */
1307 *total = COSTS_N_INSNS (15);
1309 /* Otherwise we do a load from the GOT. */
1310 *total = COSTS_N_INSNS (optimize_size ? 1 : alpha_memory_latency);
1314 /* This is effectively an add_operand. */
1321 *total = cost_data->fp_add;
1322 else if (GET_CODE (XEXP (x, 0)) == MULT
1323 && const48_operand (XEXP (XEXP (x, 0), 1), VOIDmode))
1325 *total = (rtx_cost (XEXP (XEXP (x, 0), 0), outer_code)
1326 + rtx_cost (XEXP (x, 1), outer_code) + COSTS_N_INSNS (1));
1333 *total = cost_data->fp_mult;
1334 else if (mode == DImode)
1335 *total = cost_data->int_mult_di;
1337 *total = cost_data->int_mult_si;
1341 if (GET_CODE (XEXP (x, 1)) == CONST_INT
1342 && INTVAL (XEXP (x, 1)) <= 3)
1344 *total = COSTS_N_INSNS (1);
1351 *total = cost_data->int_shift;
1356 *total = cost_data->fp_add;
1358 *total = cost_data->int_cmov;
1366 *total = cost_data->int_div;
1367 else if (mode == SFmode)
1368 *total = cost_data->fp_div_sf;
1370 *total = cost_data->fp_div_df;
1374 *total = COSTS_N_INSNS (optimize_size ? 1 : alpha_memory_latency);
1380 *total = COSTS_N_INSNS (1);
1388 *total = COSTS_N_INSNS (1) + cost_data->int_cmov;
1394 case UNSIGNED_FLOAT:
1397 case FLOAT_TRUNCATE:
1398 *total = cost_data->fp_add;
1402 if (GET_CODE (XEXP (x, 0)) == MEM)
1405 *total = cost_data->fp_add;
1413 /* REF is an alignable memory location. Place an aligned SImode
1414 reference into *PALIGNED_MEM and the number of bits to shift into
1415 *PBITNUM. SCRATCH is a free register for use in reloading out
1416 of range stack slots. */
1419 get_aligned_mem (rtx ref, rtx *paligned_mem, rtx *pbitnum)
1422 HOST_WIDE_INT disp, offset;
1424 gcc_assert (GET_CODE (ref) == MEM);
1426 if (reload_in_progress
1427 && ! memory_address_p (GET_MODE (ref), XEXP (ref, 0)))
1429 base = find_replacement (&XEXP (ref, 0));
1430 gcc_assert (memory_address_p (GET_MODE (ref), base));
1433 base = XEXP (ref, 0);
1435 if (GET_CODE (base) == PLUS)
1436 disp = INTVAL (XEXP (base, 1)), base = XEXP (base, 0);
1440 /* Find the byte offset within an aligned word. If the memory itself is
1441 claimed to be aligned, believe it. Otherwise, aligned_memory_operand
1442 will have examined the base register and determined it is aligned, and
1443 thus displacements from it are naturally alignable. */
1444 if (MEM_ALIGN (ref) >= 32)
1449 /* Access the entire aligned word. */
1450 *paligned_mem = widen_memory_access (ref, SImode, -offset);
1452 /* Convert the byte offset within the word to a bit offset. */
1453 if (WORDS_BIG_ENDIAN)
1454 offset = 32 - (GET_MODE_BITSIZE (GET_MODE (ref)) + offset * 8);
1457 *pbitnum = GEN_INT (offset);
1460 /* Similar, but just get the address. Handle the two reload cases.
1461 Add EXTRA_OFFSET to the address we return. */
1464 get_unaligned_address (rtx ref)
1467 HOST_WIDE_INT offset = 0;
1469 gcc_assert (GET_CODE (ref) == MEM);
1471 if (reload_in_progress
1472 && ! memory_address_p (GET_MODE (ref), XEXP (ref, 0)))
1474 base = find_replacement (&XEXP (ref, 0));
1476 gcc_assert (memory_address_p (GET_MODE (ref), base));
1479 base = XEXP (ref, 0);
1481 if (GET_CODE (base) == PLUS)
1482 offset += INTVAL (XEXP (base, 1)), base = XEXP (base, 0);
1484 return plus_constant (base, offset);
1487 /* Compute a value X, such that X & 7 == (ADDR + OFS) & 7.
1488 X is always returned in a register. */
1491 get_unaligned_offset (rtx addr, HOST_WIDE_INT ofs)
1493 if (GET_CODE (addr) == PLUS)
1495 ofs += INTVAL (XEXP (addr, 1));
1496 addr = XEXP (addr, 0);
1499 return expand_simple_binop (Pmode, PLUS, addr, GEN_INT (ofs & 7),
1500 NULL_RTX, 1, OPTAB_LIB_WIDEN);
1503 /* On the Alpha, all (non-symbolic) constants except zero go into
1504 a floating-point register via memory. Note that we cannot
1505 return anything that is not a subset of CLASS, and that some
1506 symbolic constants cannot be dropped to memory. */
1509 alpha_preferred_reload_class(rtx x, enum reg_class class)
1511 /* Zero is present in any register class. */
1512 if (x == CONST0_RTX (GET_MODE (x)))
1515 /* These sorts of constants we can easily drop to memory. */
1516 if (GET_CODE (x) == CONST_INT
1517 || GET_CODE (x) == CONST_DOUBLE
1518 || GET_CODE (x) == CONST_VECTOR)
1520 if (class == FLOAT_REGS)
1522 if (class == ALL_REGS)
1523 return GENERAL_REGS;
1527 /* All other kinds of constants should not (and in the case of HIGH
1528 cannot) be dropped to memory -- instead we use a GENERAL_REGS
1529 secondary reload. */
1531 return (class == ALL_REGS ? GENERAL_REGS : class);
1536 /* Inform reload about cases where moving X with a mode MODE to a register in
1537 CLASS requires an extra scratch or immediate register. Return the class
1538 needed for the immediate register. */
1540 static enum reg_class
1541 alpha_secondary_reload (bool in_p, rtx x, enum reg_class class,
1542 enum machine_mode mode, secondary_reload_info *sri)
1544 /* Loading and storing HImode or QImode values to and from memory
1545 usually requires a scratch register. */
1546 if (!TARGET_BWX && (mode == QImode || mode == HImode || mode == CQImode))
1548 if (any_memory_operand (x, mode))
1552 if (!aligned_memory_operand (x, mode))
1553 sri->icode = reload_in_optab[mode];
1556 sri->icode = reload_out_optab[mode];
1561 /* We also cannot do integral arithmetic into FP regs, as might result
1562 from register elimination into a DImode fp register. */
1563 if (class == FLOAT_REGS)
1565 if (MEM_P (x) && GET_CODE (XEXP (x, 0)) == AND)
1566 return GENERAL_REGS;
1567 if (in_p && INTEGRAL_MODE_P (mode)
1568 && !MEM_P (x) && !REG_P (x) && !CONST_INT_P (x))
1569 return GENERAL_REGS;
1575 /* Subfunction of the following function. Update the flags of any MEM
1576 found in part of X. */
1579 alpha_set_memflags_1 (rtx *xp, void *data)
1581 rtx x = *xp, orig = (rtx) data;
1583 if (GET_CODE (x) != MEM)
1586 MEM_VOLATILE_P (x) = MEM_VOLATILE_P (orig);
1587 MEM_IN_STRUCT_P (x) = MEM_IN_STRUCT_P (orig);
1588 MEM_SCALAR_P (x) = MEM_SCALAR_P (orig);
1589 MEM_NOTRAP_P (x) = MEM_NOTRAP_P (orig);
1590 MEM_READONLY_P (x) = MEM_READONLY_P (orig);
1592 /* Sadly, we cannot use alias sets because the extra aliasing
1593 produced by the AND interferes. Given that two-byte quantities
1594 are the only thing we would be able to differentiate anyway,
1595 there does not seem to be any point in convoluting the early
1596 out of the alias check. */
1601 /* Given INSN, which is an INSN list or the PATTERN of a single insn
1602 generated to perform a memory operation, look for any MEMs in either
1603 a SET_DEST or a SET_SRC and copy the in-struct, unchanging, and
1604 volatile flags from REF into each of the MEMs found. If REF is not
1605 a MEM, don't do anything. */
1608 alpha_set_memflags (rtx insn, rtx ref)
1612 if (GET_CODE (ref) != MEM)
1615 /* This is only called from alpha.md, after having had something
1616 generated from one of the insn patterns. So if everything is
1617 zero, the pattern is already up-to-date. */
1618 if (!MEM_VOLATILE_P (ref)
1619 && !MEM_IN_STRUCT_P (ref)
1620 && !MEM_SCALAR_P (ref)
1621 && !MEM_NOTRAP_P (ref)
1622 && !MEM_READONLY_P (ref))
1626 base_ptr = &PATTERN (insn);
1629 for_each_rtx (base_ptr, alpha_set_memflags_1, (void *) ref);
1632 static rtx alpha_emit_set_const (rtx, enum machine_mode, HOST_WIDE_INT,
1635 /* Internal routine for alpha_emit_set_const to check for N or below insns.
1636 If NO_OUTPUT is true, then we only check to see if N insns are possible,
1637 and return pc_rtx if successful. */
1640 alpha_emit_set_const_1 (rtx target, enum machine_mode mode,
1641 HOST_WIDE_INT c, int n, bool no_output)
1645 /* Use a pseudo if highly optimizing and still generating RTL. */
1647 = (flag_expensive_optimizations && can_create_pseudo_p () ? 0 : target);
1650 /* If this is a sign-extended 32-bit constant, we can do this in at most
1651 three insns, so do it if we have enough insns left. We always have
1652 a sign-extended 32-bit constant when compiling on a narrow machine. */
1654 if (HOST_BITS_PER_WIDE_INT != 64
1655 || c >> 31 == -1 || c >> 31 == 0)
1657 HOST_WIDE_INT low = ((c & 0xffff) ^ 0x8000) - 0x8000;
1658 HOST_WIDE_INT tmp1 = c - low;
1659 HOST_WIDE_INT high = (((tmp1 >> 16) & 0xffff) ^ 0x8000) - 0x8000;
1660 HOST_WIDE_INT extra = 0;
1662 /* If HIGH will be interpreted as negative but the constant is
1663 positive, we must adjust it to do two ldha insns. */
1665 if ((high & 0x8000) != 0 && c >= 0)
1669 high = ((tmp1 >> 16) & 0xffff) - 2 * ((tmp1 >> 16) & 0x8000);
1672 if (c == low || (low == 0 && extra == 0))
1674 /* We used to use copy_to_suggested_reg (GEN_INT (c), target, mode)
1675 but that meant that we can't handle INT_MIN on 32-bit machines
1676 (like NT/Alpha), because we recurse indefinitely through
1677 emit_move_insn to gen_movdi. So instead, since we know exactly
1678 what we want, create it explicitly. */
1683 target = gen_reg_rtx (mode);
1684 emit_insn (gen_rtx_SET (VOIDmode, target, GEN_INT (c)));
1687 else if (n >= 2 + (extra != 0))
1691 if (!can_create_pseudo_p ())
1693 emit_insn (gen_rtx_SET (VOIDmode, target, GEN_INT (high << 16)));
1697 temp = copy_to_suggested_reg (GEN_INT (high << 16),
1700 /* As of 2002-02-23, addsi3 is only available when not optimizing.
1701 This means that if we go through expand_binop, we'll try to
1702 generate extensions, etc, which will require new pseudos, which
1703 will fail during some split phases. The SImode add patterns
1704 still exist, but are not named. So build the insns by hand. */
1709 subtarget = gen_reg_rtx (mode);
1710 insn = gen_rtx_PLUS (mode, temp, GEN_INT (extra << 16));
1711 insn = gen_rtx_SET (VOIDmode, subtarget, insn);
1717 target = gen_reg_rtx (mode);
1718 insn = gen_rtx_PLUS (mode, temp, GEN_INT (low));
1719 insn = gen_rtx_SET (VOIDmode, target, insn);
1725 /* If we couldn't do it that way, try some other methods. But if we have
1726 no instructions left, don't bother. Likewise, if this is SImode and
1727 we can't make pseudos, we can't do anything since the expand_binop
1728 and expand_unop calls will widen and try to make pseudos. */
1730 if (n == 1 || (mode == SImode && !can_create_pseudo_p ()))
1733 /* Next, see if we can load a related constant and then shift and possibly
1734 negate it to get the constant we want. Try this once each increasing
1735 numbers of insns. */
1737 for (i = 1; i < n; i++)
1739 /* First, see if minus some low bits, we've an easy load of
1742 new = ((c & 0xffff) ^ 0x8000) - 0x8000;
1745 temp = alpha_emit_set_const (subtarget, mode, c - new, i, no_output);
1750 return expand_binop (mode, add_optab, temp, GEN_INT (new),
1751 target, 0, OPTAB_WIDEN);
1755 /* Next try complementing. */
1756 temp = alpha_emit_set_const (subtarget, mode, ~c, i, no_output);
1761 return expand_unop (mode, one_cmpl_optab, temp, target, 0);
1764 /* Next try to form a constant and do a left shift. We can do this
1765 if some low-order bits are zero; the exact_log2 call below tells
1766 us that information. The bits we are shifting out could be any
1767 value, but here we'll just try the 0- and sign-extended forms of
1768 the constant. To try to increase the chance of having the same
1769 constant in more than one insn, start at the highest number of
1770 bits to shift, but try all possibilities in case a ZAPNOT will
1773 bits = exact_log2 (c & -c);
1775 for (; bits > 0; bits--)
1778 temp = alpha_emit_set_const (subtarget, mode, new, i, no_output);
1781 new = (unsigned HOST_WIDE_INT)c >> bits;
1782 temp = alpha_emit_set_const (subtarget, mode, new,
1789 return expand_binop (mode, ashl_optab, temp, GEN_INT (bits),
1790 target, 0, OPTAB_WIDEN);
1794 /* Now try high-order zero bits. Here we try the shifted-in bits as
1795 all zero and all ones. Be careful to avoid shifting outside the
1796 mode and to avoid shifting outside the host wide int size. */
1797 /* On narrow hosts, don't shift a 1 into the high bit, since we'll
1798 confuse the recursive call and set all of the high 32 bits. */
1800 bits = (MIN (HOST_BITS_PER_WIDE_INT, GET_MODE_SIZE (mode) * 8)
1801 - floor_log2 (c) - 1 - (HOST_BITS_PER_WIDE_INT < 64));
1803 for (; bits > 0; bits--)
1806 temp = alpha_emit_set_const (subtarget, mode, new, i, no_output);
1809 new = (c << bits) | (((HOST_WIDE_INT) 1 << bits) - 1);
1810 temp = alpha_emit_set_const (subtarget, mode, new,
1817 return expand_binop (mode, lshr_optab, temp, GEN_INT (bits),
1818 target, 1, OPTAB_WIDEN);
1822 /* Now try high-order 1 bits. We get that with a sign-extension.
1823 But one bit isn't enough here. Be careful to avoid shifting outside
1824 the mode and to avoid shifting outside the host wide int size. */
1826 bits = (MIN (HOST_BITS_PER_WIDE_INT, GET_MODE_SIZE (mode) * 8)
1827 - floor_log2 (~ c) - 2);
1829 for (; bits > 0; bits--)
1832 temp = alpha_emit_set_const (subtarget, mode, new, i, no_output);
1835 new = (c << bits) | (((HOST_WIDE_INT) 1 << bits) - 1);
1836 temp = alpha_emit_set_const (subtarget, mode, new,
1843 return expand_binop (mode, ashr_optab, temp, GEN_INT (bits),
1844 target, 0, OPTAB_WIDEN);
1849 #if HOST_BITS_PER_WIDE_INT == 64
1850 /* Finally, see if can load a value into the target that is the same as the
1851 constant except that all bytes that are 0 are changed to be 0xff. If we
1852 can, then we can do a ZAPNOT to obtain the desired constant. */
1855 for (i = 0; i < 64; i += 8)
1856 if ((new & ((HOST_WIDE_INT) 0xff << i)) == 0)
1857 new |= (HOST_WIDE_INT) 0xff << i;
1859 /* We are only called for SImode and DImode. If this is SImode, ensure that
1860 we are sign extended to a full word. */
1863 new = ((new & 0xffffffff) ^ 0x80000000) - 0x80000000;
1867 temp = alpha_emit_set_const (subtarget, mode, new, n - 1, no_output);
1872 return expand_binop (mode, and_optab, temp, GEN_INT (c | ~ new),
1873 target, 0, OPTAB_WIDEN);
1881 /* Try to output insns to set TARGET equal to the constant C if it can be
1882 done in less than N insns. Do all computations in MODE. Returns the place
1883 where the output has been placed if it can be done and the insns have been
1884 emitted. If it would take more than N insns, zero is returned and no
1885 insns and emitted. */
1888 alpha_emit_set_const (rtx target, enum machine_mode mode,
1889 HOST_WIDE_INT c, int n, bool no_output)
1891 enum machine_mode orig_mode = mode;
1892 rtx orig_target = target;
1896 /* If we can't make any pseudos, TARGET is an SImode hard register, we
1897 can't load this constant in one insn, do this in DImode. */
1898 if (!can_create_pseudo_p () && mode == SImode
1899 && GET_CODE (target) == REG && REGNO (target) < FIRST_PSEUDO_REGISTER)
1901 result = alpha_emit_set_const_1 (target, mode, c, 1, no_output);
1905 target = no_output ? NULL : gen_lowpart (DImode, target);
1908 else if (mode == V8QImode || mode == V4HImode || mode == V2SImode)
1910 target = no_output ? NULL : gen_lowpart (DImode, target);
1914 /* Try 1 insn, then 2, then up to N. */
1915 for (i = 1; i <= n; i++)
1917 result = alpha_emit_set_const_1 (target, mode, c, i, no_output);
1925 insn = get_last_insn ();
1926 set = single_set (insn);
1927 if (! CONSTANT_P (SET_SRC (set)))
1928 set_unique_reg_note (get_last_insn (), REG_EQUAL, GEN_INT (c));
1933 /* Allow for the case where we changed the mode of TARGET. */
1936 if (result == target)
1937 result = orig_target;
1938 else if (mode != orig_mode)
1939 result = gen_lowpart (orig_mode, result);
1945 /* Having failed to find a 3 insn sequence in alpha_emit_set_const,
1946 fall back to a straight forward decomposition. We do this to avoid
1947 exponential run times encountered when looking for longer sequences
1948 with alpha_emit_set_const. */
1951 alpha_emit_set_long_const (rtx target, HOST_WIDE_INT c1, HOST_WIDE_INT c2)
1953 HOST_WIDE_INT d1, d2, d3, d4;
1955 /* Decompose the entire word */
1956 #if HOST_BITS_PER_WIDE_INT >= 64
1957 gcc_assert (c2 == -(c1 < 0));
1958 d1 = ((c1 & 0xffff) ^ 0x8000) - 0x8000;
1960 d2 = ((c1 & 0xffffffff) ^ 0x80000000) - 0x80000000;
1961 c1 = (c1 - d2) >> 32;
1962 d3 = ((c1 & 0xffff) ^ 0x8000) - 0x8000;
1964 d4 = ((c1 & 0xffffffff) ^ 0x80000000) - 0x80000000;
1965 gcc_assert (c1 == d4);
1967 d1 = ((c1 & 0xffff) ^ 0x8000) - 0x8000;
1969 d2 = ((c1 & 0xffffffff) ^ 0x80000000) - 0x80000000;
1970 gcc_assert (c1 == d2);
1972 d3 = ((c2 & 0xffff) ^ 0x8000) - 0x8000;
1974 d4 = ((c2 & 0xffffffff) ^ 0x80000000) - 0x80000000;
1975 gcc_assert (c2 == d4);
1978 /* Construct the high word */
1981 emit_move_insn (target, GEN_INT (d4));
1983 emit_move_insn (target, gen_rtx_PLUS (DImode, target, GEN_INT (d3)));
1986 emit_move_insn (target, GEN_INT (d3));
1988 /* Shift it into place */
1989 emit_move_insn (target, gen_rtx_ASHIFT (DImode, target, GEN_INT (32)));
1991 /* Add in the low bits. */
1993 emit_move_insn (target, gen_rtx_PLUS (DImode, target, GEN_INT (d2)));
1995 emit_move_insn (target, gen_rtx_PLUS (DImode, target, GEN_INT (d1)));
2000 /* Given an integral CONST_INT, CONST_DOUBLE, or CONST_VECTOR, return
2004 alpha_extract_integer (rtx x, HOST_WIDE_INT *p0, HOST_WIDE_INT *p1)
2006 HOST_WIDE_INT i0, i1;
2008 if (GET_CODE (x) == CONST_VECTOR)
2009 x = simplify_subreg (DImode, x, GET_MODE (x), 0);
2012 if (GET_CODE (x) == CONST_INT)
2017 else if (HOST_BITS_PER_WIDE_INT >= 64)
2019 i0 = CONST_DOUBLE_LOW (x);
2024 i0 = CONST_DOUBLE_LOW (x);
2025 i1 = CONST_DOUBLE_HIGH (x);
2032 /* Implement LEGITIMATE_CONSTANT_P. This is all constants for which we
2033 are willing to load the value into a register via a move pattern.
2034 Normally this is all symbolic constants, integral constants that
2035 take three or fewer instructions, and floating-point zero. */
2038 alpha_legitimate_constant_p (rtx x)
2040 enum machine_mode mode = GET_MODE (x);
2041 HOST_WIDE_INT i0, i1;
2043 switch (GET_CODE (x))
2051 /* TLS symbols are never valid. */
2052 return SYMBOL_REF_TLS_MODEL (x) == 0;
2055 if (x == CONST0_RTX (mode))
2057 if (FLOAT_MODE_P (mode))
2062 if (x == CONST0_RTX (mode))
2064 if (GET_MODE_CLASS (mode) != MODE_VECTOR_INT)
2066 if (GET_MODE_SIZE (mode) != 8)
2072 if (TARGET_BUILD_CONSTANTS)
2074 alpha_extract_integer (x, &i0, &i1);
2075 if (HOST_BITS_PER_WIDE_INT >= 64 || i1 == (-i0 < 0))
2076 return alpha_emit_set_const_1 (x, mode, i0, 3, true) != NULL;
2084 /* Operand 1 is known to be a constant, and should require more than one
2085 instruction to load. Emit that multi-part load. */
2088 alpha_split_const_mov (enum machine_mode mode, rtx *operands)
2090 HOST_WIDE_INT i0, i1;
2091 rtx temp = NULL_RTX;
2093 alpha_extract_integer (operands[1], &i0, &i1);
2095 if (HOST_BITS_PER_WIDE_INT >= 64 || i1 == -(i0 < 0))
2096 temp = alpha_emit_set_const (operands[0], mode, i0, 3, false);
2098 if (!temp && TARGET_BUILD_CONSTANTS)
2099 temp = alpha_emit_set_long_const (operands[0], i0, i1);
2103 if (!rtx_equal_p (operands[0], temp))
2104 emit_move_insn (operands[0], temp);
2111 /* Expand a move instruction; return true if all work is done.
2112 We don't handle non-bwx subword loads here. */
2115 alpha_expand_mov (enum machine_mode mode, rtx *operands)
2117 /* If the output is not a register, the input must be. */
2118 if (GET_CODE (operands[0]) == MEM
2119 && ! reg_or_0_operand (operands[1], mode))
2120 operands[1] = force_reg (mode, operands[1]);
2122 /* Allow legitimize_address to perform some simplifications. */
2123 if (mode == Pmode && symbolic_operand (operands[1], mode))
2127 tmp = alpha_legitimize_address (operands[1], operands[0], mode);
2130 if (tmp == operands[0])
2137 /* Early out for non-constants and valid constants. */
2138 if (! CONSTANT_P (operands[1]) || input_operand (operands[1], mode))
2141 /* Split large integers. */
2142 if (GET_CODE (operands[1]) == CONST_INT
2143 || GET_CODE (operands[1]) == CONST_DOUBLE
2144 || GET_CODE (operands[1]) == CONST_VECTOR)
2146 if (alpha_split_const_mov (mode, operands))
2150 /* Otherwise we've nothing left but to drop the thing to memory. */
2151 operands[1] = force_const_mem (mode, operands[1]);
2152 if (reload_in_progress)
2154 emit_move_insn (operands[0], XEXP (operands[1], 0));
2155 operands[1] = replace_equiv_address (operands[1], operands[0]);
2158 operands[1] = validize_mem (operands[1]);
2162 /* Expand a non-bwx QImode or HImode move instruction;
2163 return true if all work is done. */
2166 alpha_expand_mov_nobwx (enum machine_mode mode, rtx *operands)
2170 /* If the output is not a register, the input must be. */
2171 if (MEM_P (operands[0]))
2172 operands[1] = force_reg (mode, operands[1]);
2174 /* Handle four memory cases, unaligned and aligned for either the input
2175 or the output. The only case where we can be called during reload is
2176 for aligned loads; all other cases require temporaries. */
2178 if (any_memory_operand (operands[1], mode))
2180 if (aligned_memory_operand (operands[1], mode))
2182 if (reload_in_progress)
2185 seq = gen_reload_inqi_aligned (operands[0], operands[1]);
2187 seq = gen_reload_inhi_aligned (operands[0], operands[1]);
2192 rtx aligned_mem, bitnum;
2193 rtx scratch = gen_reg_rtx (SImode);
2197 get_aligned_mem (operands[1], &aligned_mem, &bitnum);
2199 subtarget = operands[0];
2200 if (GET_CODE (subtarget) == REG)
2201 subtarget = gen_lowpart (DImode, subtarget), copyout = false;
2203 subtarget = gen_reg_rtx (DImode), copyout = true;
2206 seq = gen_aligned_loadqi (subtarget, aligned_mem,
2209 seq = gen_aligned_loadhi (subtarget, aligned_mem,
2214 emit_move_insn (operands[0], gen_lowpart (mode, subtarget));
2219 /* Don't pass these as parameters since that makes the generated
2220 code depend on parameter evaluation order which will cause
2221 bootstrap failures. */
2223 rtx temp1, temp2, subtarget, ua;
2226 temp1 = gen_reg_rtx (DImode);
2227 temp2 = gen_reg_rtx (DImode);
2229 subtarget = operands[0];
2230 if (GET_CODE (subtarget) == REG)
2231 subtarget = gen_lowpart (DImode, subtarget), copyout = false;
2233 subtarget = gen_reg_rtx (DImode), copyout = true;
2235 ua = get_unaligned_address (operands[1]);
2237 seq = gen_unaligned_loadqi (subtarget, ua, temp1, temp2);
2239 seq = gen_unaligned_loadhi (subtarget, ua, temp1, temp2);
2241 alpha_set_memflags (seq, operands[1]);
2245 emit_move_insn (operands[0], gen_lowpart (mode, subtarget));
2250 if (any_memory_operand (operands[0], mode))
2252 if (aligned_memory_operand (operands[0], mode))
2254 rtx aligned_mem, bitnum;
2255 rtx temp1 = gen_reg_rtx (SImode);
2256 rtx temp2 = gen_reg_rtx (SImode);
2258 get_aligned_mem (operands[0], &aligned_mem, &bitnum);
2260 emit_insn (gen_aligned_store (aligned_mem, operands[1], bitnum,
2265 rtx temp1 = gen_reg_rtx (DImode);
2266 rtx temp2 = gen_reg_rtx (DImode);
2267 rtx temp3 = gen_reg_rtx (DImode);
2268 rtx ua = get_unaligned_address (operands[0]);
2271 seq = gen_unaligned_storeqi (ua, operands[1], temp1, temp2, temp3);
2273 seq = gen_unaligned_storehi (ua, operands[1], temp1, temp2, temp3);
2275 alpha_set_memflags (seq, operands[0]);
2284 /* Implement the movmisalign patterns. One of the operands is a memory
2285 that is not naturally aligned. Emit instructions to load it. */
2288 alpha_expand_movmisalign (enum machine_mode mode, rtx *operands)
2290 /* Honor misaligned loads, for those we promised to do so. */
2291 if (MEM_P (operands[1]))
2295 if (register_operand (operands[0], mode))
2298 tmp = gen_reg_rtx (mode);
2300 alpha_expand_unaligned_load (tmp, operands[1], 8, 0, 0);
2301 if (tmp != operands[0])
2302 emit_move_insn (operands[0], tmp);
2304 else if (MEM_P (operands[0]))
2306 if (!reg_or_0_operand (operands[1], mode))
2307 operands[1] = force_reg (mode, operands[1]);
2308 alpha_expand_unaligned_store (operands[0], operands[1], 8, 0);
2314 /* Generate an unsigned DImode to FP conversion. This is the same code
2315 optabs would emit if we didn't have TFmode patterns.
2317 For SFmode, this is the only construction I've found that can pass
2318 gcc.c-torture/execute/ieee/rbug.c. No scenario that uses DFmode
2319 intermediates will work, because you'll get intermediate rounding
2320 that ruins the end result. Some of this could be fixed by turning
2321 on round-to-positive-infinity, but that requires diddling the fpsr,
2322 which kills performance. I tried turning this around and converting
2323 to a negative number, so that I could turn on /m, but either I did
2324 it wrong or there's something else cause I wound up with the exact
2325 same single-bit error. There is a branch-less form of this same code:
2336 fcmoveq $f10,$f11,$f0
2338 I'm not using it because it's the same number of instructions as
2339 this branch-full form, and it has more serialized long latency
2340 instructions on the critical path.
2342 For DFmode, we can avoid rounding errors by breaking up the word
2343 into two pieces, converting them separately, and adding them back:
2345 LC0: .long 0,0x5f800000
2350 cpyse $f11,$f31,$f10
2351 cpyse $f31,$f11,$f11
2359 This doesn't seem to be a clear-cut win over the optabs form.
2360 It probably all depends on the distribution of numbers being
2361 converted -- in the optabs form, all but high-bit-set has a
2362 much lower minimum execution time. */
2365 alpha_emit_floatuns (rtx operands[2])
2367 rtx neglab, donelab, i0, i1, f0, in, out;
2368 enum machine_mode mode;
2371 in = force_reg (DImode, operands[1]);
2372 mode = GET_MODE (out);
2373 neglab = gen_label_rtx ();
2374 donelab = gen_label_rtx ();
2375 i0 = gen_reg_rtx (DImode);
2376 i1 = gen_reg_rtx (DImode);
2377 f0 = gen_reg_rtx (mode);
2379 emit_cmp_and_jump_insns (in, const0_rtx, LT, const0_rtx, DImode, 0, neglab);
2381 emit_insn (gen_rtx_SET (VOIDmode, out, gen_rtx_FLOAT (mode, in)));
2382 emit_jump_insn (gen_jump (donelab));
2385 emit_label (neglab);
2387 emit_insn (gen_lshrdi3 (i0, in, const1_rtx));
2388 emit_insn (gen_anddi3 (i1, in, const1_rtx));
2389 emit_insn (gen_iordi3 (i0, i0, i1));
2390 emit_insn (gen_rtx_SET (VOIDmode, f0, gen_rtx_FLOAT (mode, i0)));
2391 emit_insn (gen_rtx_SET (VOIDmode, out, gen_rtx_PLUS (mode, f0, f0)));
2393 emit_label (donelab);
2396 /* Generate the comparison for a conditional branch. */
2399 alpha_emit_conditional_branch (enum rtx_code code)
2401 enum rtx_code cmp_code, branch_code;
2402 enum machine_mode cmp_mode, branch_mode = VOIDmode;
2403 rtx op0 = alpha_compare.op0, op1 = alpha_compare.op1;
2406 if (alpha_compare.fp_p && GET_MODE (op0) == TFmode)
2408 op0 = alpha_emit_xfloating_compare (&code, op0, op1);
2410 alpha_compare.fp_p = 0;
2413 /* The general case: fold the comparison code to the types of compares
2414 that we have, choosing the branch as necessary. */
2417 case EQ: case LE: case LT: case LEU: case LTU:
2419 /* We have these compares: */
2420 cmp_code = code, branch_code = NE;
2425 /* These must be reversed. */
2426 cmp_code = reverse_condition (code), branch_code = EQ;
2429 case GE: case GT: case GEU: case GTU:
2430 /* For FP, we swap them, for INT, we reverse them. */
2431 if (alpha_compare.fp_p)
2433 cmp_code = swap_condition (code);
2435 tem = op0, op0 = op1, op1 = tem;
2439 cmp_code = reverse_condition (code);
2448 if (alpha_compare.fp_p)
2451 if (flag_unsafe_math_optimizations)
2453 /* When we are not as concerned about non-finite values, and we
2454 are comparing against zero, we can branch directly. */
2455 if (op1 == CONST0_RTX (DFmode))
2456 cmp_code = UNKNOWN, branch_code = code;
2457 else if (op0 == CONST0_RTX (DFmode))
2459 /* Undo the swap we probably did just above. */
2460 tem = op0, op0 = op1, op1 = tem;
2461 branch_code = swap_condition (cmp_code);
2467 /* ??? We mark the branch mode to be CCmode to prevent the
2468 compare and branch from being combined, since the compare
2469 insn follows IEEE rules that the branch does not. */
2470 branch_mode = CCmode;
2477 /* The following optimizations are only for signed compares. */
2478 if (code != LEU && code != LTU && code != GEU && code != GTU)
2480 /* Whee. Compare and branch against 0 directly. */
2481 if (op1 == const0_rtx)
2482 cmp_code = UNKNOWN, branch_code = code;
2484 /* If the constants doesn't fit into an immediate, but can
2485 be generated by lda/ldah, we adjust the argument and
2486 compare against zero, so we can use beq/bne directly. */
2487 /* ??? Don't do this when comparing against symbols, otherwise
2488 we'll reduce (&x == 0x1234) to (&x-0x1234 == 0), which will
2489 be declared false out of hand (at least for non-weak). */
2490 else if (GET_CODE (op1) == CONST_INT
2491 && (code == EQ || code == NE)
2492 && !(symbolic_operand (op0, VOIDmode)
2493 || (GET_CODE (op0) == REG && REG_POINTER (op0))))
2495 rtx n_op1 = GEN_INT (-INTVAL (op1));
2497 if (! satisfies_constraint_I (op1)
2498 && (satisfies_constraint_K (n_op1)
2499 || satisfies_constraint_L (n_op1)))
2500 cmp_code = PLUS, branch_code = code, op1 = n_op1;
2504 if (!reg_or_0_operand (op0, DImode))
2505 op0 = force_reg (DImode, op0);
2506 if (cmp_code != PLUS && !reg_or_8bit_operand (op1, DImode))
2507 op1 = force_reg (DImode, op1);
2510 /* Emit an initial compare instruction, if necessary. */
2512 if (cmp_code != UNKNOWN)
2514 tem = gen_reg_rtx (cmp_mode);
2515 emit_move_insn (tem, gen_rtx_fmt_ee (cmp_code, cmp_mode, op0, op1));
2518 /* Zero the operands. */
2519 memset (&alpha_compare, 0, sizeof (alpha_compare));
2521 /* Return the branch comparison. */
2522 return gen_rtx_fmt_ee (branch_code, branch_mode, tem, CONST0_RTX (cmp_mode));
2525 /* Certain simplifications can be done to make invalid setcc operations
2526 valid. Return the final comparison, or NULL if we can't work. */
2529 alpha_emit_setcc (enum rtx_code code)
2531 enum rtx_code cmp_code;
2532 rtx op0 = alpha_compare.op0, op1 = alpha_compare.op1;
2533 int fp_p = alpha_compare.fp_p;
2536 /* Zero the operands. */
2537 memset (&alpha_compare, 0, sizeof (alpha_compare));
2539 if (fp_p && GET_MODE (op0) == TFmode)
2541 op0 = alpha_emit_xfloating_compare (&code, op0, op1);
2546 if (fp_p && !TARGET_FIX)
2549 /* The general case: fold the comparison code to the types of compares
2550 that we have, choosing the branch as necessary. */
2555 case EQ: case LE: case LT: case LEU: case LTU:
2557 /* We have these compares. */
2559 cmp_code = code, code = NE;
2563 if (!fp_p && op1 == const0_rtx)
2568 cmp_code = reverse_condition (code);
2572 case GE: case GT: case GEU: case GTU:
2573 /* These normally need swapping, but for integer zero we have
2574 special patterns that recognize swapped operands. */
2575 if (!fp_p && op1 == const0_rtx)
2577 code = swap_condition (code);
2579 cmp_code = code, code = NE;
2580 tmp = op0, op0 = op1, op1 = tmp;
2589 if (!register_operand (op0, DImode))
2590 op0 = force_reg (DImode, op0);
2591 if (!reg_or_8bit_operand (op1, DImode))
2592 op1 = force_reg (DImode, op1);
2595 /* Emit an initial compare instruction, if necessary. */
2596 if (cmp_code != UNKNOWN)
2598 enum machine_mode mode = fp_p ? DFmode : DImode;
2600 tmp = gen_reg_rtx (mode);
2601 emit_insn (gen_rtx_SET (VOIDmode, tmp,
2602 gen_rtx_fmt_ee (cmp_code, mode, op0, op1)));
2604 op0 = fp_p ? gen_lowpart (DImode, tmp) : tmp;
2608 /* Return the setcc comparison. */
2609 return gen_rtx_fmt_ee (code, DImode, op0, op1);
2613 /* Rewrite a comparison against zero CMP of the form
2614 (CODE (cc0) (const_int 0)) so it can be written validly in
2615 a conditional move (if_then_else CMP ...).
2616 If both of the operands that set cc0 are nonzero we must emit
2617 an insn to perform the compare (it can't be done within
2618 the conditional move). */
2621 alpha_emit_conditional_move (rtx cmp, enum machine_mode mode)
2623 enum rtx_code code = GET_CODE (cmp);
2624 enum rtx_code cmov_code = NE;
2625 rtx op0 = alpha_compare.op0;
2626 rtx op1 = alpha_compare.op1;
2627 int fp_p = alpha_compare.fp_p;
2628 enum machine_mode cmp_mode
2629 = (GET_MODE (op0) == VOIDmode ? DImode : GET_MODE (op0));
2630 enum machine_mode cmp_op_mode = fp_p ? DFmode : DImode;
2631 enum machine_mode cmov_mode = VOIDmode;
2632 int local_fast_math = flag_unsafe_math_optimizations;
2635 /* Zero the operands. */
2636 memset (&alpha_compare, 0, sizeof (alpha_compare));
2638 if (fp_p != FLOAT_MODE_P (mode))
2640 enum rtx_code cmp_code;
2645 /* If we have fp<->int register move instructions, do a cmov by
2646 performing the comparison in fp registers, and move the
2647 zero/nonzero value to integer registers, where we can then
2648 use a normal cmov, or vice-versa. */
2652 case EQ: case LE: case LT: case LEU: case LTU:
2653 /* We have these compares. */
2654 cmp_code = code, code = NE;
2658 /* This must be reversed. */
2659 cmp_code = EQ, code = EQ;
2662 case GE: case GT: case GEU: case GTU:
2663 /* These normally need swapping, but for integer zero we have
2664 special patterns that recognize swapped operands. */
2665 if (!fp_p && op1 == const0_rtx)
2666 cmp_code = code, code = NE;
2669 cmp_code = swap_condition (code);
2671 tem = op0, op0 = op1, op1 = tem;
2679 tem = gen_reg_rtx (cmp_op_mode);
2680 emit_insn (gen_rtx_SET (VOIDmode, tem,
2681 gen_rtx_fmt_ee (cmp_code, cmp_op_mode,
2684 cmp_mode = cmp_op_mode = fp_p ? DImode : DFmode;
2685 op0 = gen_lowpart (cmp_op_mode, tem);
2686 op1 = CONST0_RTX (cmp_op_mode);
2688 local_fast_math = 1;
2691 /* We may be able to use a conditional move directly.
2692 This avoids emitting spurious compares. */
2693 if (signed_comparison_operator (cmp, VOIDmode)
2694 && (!fp_p || local_fast_math)
2695 && (op0 == CONST0_RTX (cmp_mode) || op1 == CONST0_RTX (cmp_mode)))
2696 return gen_rtx_fmt_ee (code, VOIDmode, op0, op1);
2698 /* We can't put the comparison inside the conditional move;
2699 emit a compare instruction and put that inside the
2700 conditional move. Make sure we emit only comparisons we have;
2701 swap or reverse as necessary. */
2703 if (!can_create_pseudo_p ())
2708 case EQ: case LE: case LT: case LEU: case LTU:
2709 /* We have these compares: */
2713 /* This must be reversed. */
2714 code = reverse_condition (code);
2718 case GE: case GT: case GEU: case GTU:
2719 /* These must be swapped. */
2720 if (op1 != CONST0_RTX (cmp_mode))
2722 code = swap_condition (code);
2723 tem = op0, op0 = op1, op1 = tem;
2733 if (!reg_or_0_operand (op0, DImode))
2734 op0 = force_reg (DImode, op0);
2735 if (!reg_or_8bit_operand (op1, DImode))
2736 op1 = force_reg (DImode, op1);
2739 /* ??? We mark the branch mode to be CCmode to prevent the compare
2740 and cmov from being combined, since the compare insn follows IEEE
2741 rules that the cmov does not. */
2742 if (fp_p && !local_fast_math)
2745 tem = gen_reg_rtx (cmp_op_mode);
2746 emit_move_insn (tem, gen_rtx_fmt_ee (code, cmp_op_mode, op0, op1));
2747 return gen_rtx_fmt_ee (cmov_code, cmov_mode, tem, CONST0_RTX (cmp_op_mode));
2750 /* Simplify a conditional move of two constants into a setcc with
2751 arithmetic. This is done with a splitter since combine would
2752 just undo the work if done during code generation. It also catches
2753 cases we wouldn't have before cse. */
2756 alpha_split_conditional_move (enum rtx_code code, rtx dest, rtx cond,
2757 rtx t_rtx, rtx f_rtx)
2759 HOST_WIDE_INT t, f, diff;
2760 enum machine_mode mode;
2761 rtx target, subtarget, tmp;
2763 mode = GET_MODE (dest);
2768 if (((code == NE || code == EQ) && diff < 0)
2769 || (code == GE || code == GT))
2771 code = reverse_condition (code);
2772 diff = t, t = f, f = diff;
2776 subtarget = target = dest;
2779 target = gen_lowpart (DImode, dest);
2780 if (can_create_pseudo_p ())
2781 subtarget = gen_reg_rtx (DImode);
2785 /* Below, we must be careful to use copy_rtx on target and subtarget
2786 in intermediate insns, as they may be a subreg rtx, which may not
2789 if (f == 0 && exact_log2 (diff) > 0
2790 /* On EV6, we've got enough shifters to make non-arithmetic shifts
2791 viable over a longer latency cmove. On EV5, the E0 slot is a
2792 scarce resource, and on EV4 shift has the same latency as a cmove. */
2793 && (diff <= 8 || alpha_tune == PROCESSOR_EV6))
2795 tmp = gen_rtx_fmt_ee (code, DImode, cond, const0_rtx);
2796 emit_insn (gen_rtx_SET (VOIDmode, copy_rtx (subtarget), tmp));
2798 tmp = gen_rtx_ASHIFT (DImode, copy_rtx (subtarget),
2799 GEN_INT (exact_log2 (t)));
2800 emit_insn (gen_rtx_SET (VOIDmode, target, tmp));
2802 else if (f == 0 && t == -1)
2804 tmp = gen_rtx_fmt_ee (code, DImode, cond, const0_rtx);
2805 emit_insn (gen_rtx_SET (VOIDmode, copy_rtx (subtarget), tmp));
2807 emit_insn (gen_negdi2 (target, copy_rtx (subtarget)));
2809 else if (diff == 1 || diff == 4 || diff == 8)
2813 tmp = gen_rtx_fmt_ee (code, DImode, cond, const0_rtx);
2814 emit_insn (gen_rtx_SET (VOIDmode, copy_rtx (subtarget), tmp));
2817 emit_insn (gen_adddi3 (target, copy_rtx (subtarget), GEN_INT (f)));
2820 add_op = GEN_INT (f);
2821 if (sext_add_operand (add_op, mode))
2823 tmp = gen_rtx_MULT (DImode, copy_rtx (subtarget),
2825 tmp = gen_rtx_PLUS (DImode, tmp, add_op);
2826 emit_insn (gen_rtx_SET (VOIDmode, target, tmp));
2838 /* Look up the function X_floating library function name for the
2841 struct xfloating_op GTY(())
2843 const enum rtx_code code;
2844 const char *const GTY((skip)) osf_func;
2845 const char *const GTY((skip)) vms_func;
2849 static GTY(()) struct xfloating_op xfloating_ops[] =
2851 { PLUS, "_OtsAddX", "OTS$ADD_X", 0 },
2852 { MINUS, "_OtsSubX", "OTS$SUB_X", 0 },
2853 { MULT, "_OtsMulX", "OTS$MUL_X", 0 },
2854 { DIV, "_OtsDivX", "OTS$DIV_X", 0 },
2855 { EQ, "_OtsEqlX", "OTS$EQL_X", 0 },
2856 { NE, "_OtsNeqX", "OTS$NEQ_X", 0 },
2857 { LT, "_OtsLssX", "OTS$LSS_X", 0 },
2858 { LE, "_OtsLeqX", "OTS$LEQ_X", 0 },
2859 { GT, "_OtsGtrX", "OTS$GTR_X", 0 },
2860 { GE, "_OtsGeqX", "OTS$GEQ_X", 0 },
2861 { FIX, "_OtsCvtXQ", "OTS$CVTXQ", 0 },
2862 { FLOAT, "_OtsCvtQX", "OTS$CVTQX", 0 },
2863 { UNSIGNED_FLOAT, "_OtsCvtQUX", "OTS$CVTQUX", 0 },
2864 { FLOAT_EXTEND, "_OtsConvertFloatTX", "OTS$CVT_FLOAT_T_X", 0 },
2865 { FLOAT_TRUNCATE, "_OtsConvertFloatXT", "OTS$CVT_FLOAT_X_T", 0 }
2868 static GTY(()) struct xfloating_op vax_cvt_ops[] =
2870 { FLOAT_EXTEND, "_OtsConvertFloatGX", "OTS$CVT_FLOAT_G_X", 0 },
2871 { FLOAT_TRUNCATE, "_OtsConvertFloatXG", "OTS$CVT_FLOAT_X_G", 0 }
2875 alpha_lookup_xfloating_lib_func (enum rtx_code code)
2877 struct xfloating_op *ops = xfloating_ops;
2878 long n = ARRAY_SIZE (xfloating_ops);
2881 gcc_assert (TARGET_HAS_XFLOATING_LIBS);
2883 /* How irritating. Nothing to key off for the main table. */
2884 if (TARGET_FLOAT_VAX && (code == FLOAT_EXTEND || code == FLOAT_TRUNCATE))
2887 n = ARRAY_SIZE (vax_cvt_ops);
2890 for (i = 0; i < n; ++i, ++ops)
2891 if (ops->code == code)
2893 rtx func = ops->libcall;
2896 func = init_one_libfunc (TARGET_ABI_OPEN_VMS
2897 ? ops->vms_func : ops->osf_func);
2898 ops->libcall = func;
2906 /* Most X_floating operations take the rounding mode as an argument.
2907 Compute that here. */
2910 alpha_compute_xfloating_mode_arg (enum rtx_code code,
2911 enum alpha_fp_rounding_mode round)
2917 case ALPHA_FPRM_NORM:
2920 case ALPHA_FPRM_MINF:
2923 case ALPHA_FPRM_CHOP:
2926 case ALPHA_FPRM_DYN:
2932 /* XXX For reference, round to +inf is mode = 3. */
2935 if (code == FLOAT_TRUNCATE && alpha_fptm == ALPHA_FPTM_N)
2941 /* Emit an X_floating library function call.
2943 Note that these functions do not follow normal calling conventions:
2944 TFmode arguments are passed in two integer registers (as opposed to
2945 indirect); TFmode return values appear in R16+R17.
2947 FUNC is the function to call.
2948 TARGET is where the output belongs.
2949 OPERANDS are the inputs.
2950 NOPERANDS is the count of inputs.
2951 EQUIV is the expression equivalent for the function.
2955 alpha_emit_xfloating_libcall (rtx func, rtx target, rtx operands[],
2956 int noperands, rtx equiv)
2958 rtx usage = NULL_RTX, tmp, reg;
2963 for (i = 0; i < noperands; ++i)
2965 switch (GET_MODE (operands[i]))
2968 reg = gen_rtx_REG (TFmode, regno);
2973 reg = gen_rtx_REG (DFmode, regno + 32);
2978 gcc_assert (GET_CODE (operands[i]) == CONST_INT);
2981 reg = gen_rtx_REG (DImode, regno);
2989 emit_move_insn (reg, operands[i]);
2990 usage = alloc_EXPR_LIST (0, gen_rtx_USE (VOIDmode, reg), usage);
2993 switch (GET_MODE (target))
2996 reg = gen_rtx_REG (TFmode, 16);
2999 reg = gen_rtx_REG (DFmode, 32);
3002 reg = gen_rtx_REG (DImode, 0);
3008 tmp = gen_rtx_MEM (QImode, func);
3009 tmp = emit_call_insn (GEN_CALL_VALUE (reg, tmp, const0_rtx,
3010 const0_rtx, const0_rtx));
3011 CALL_INSN_FUNCTION_USAGE (tmp) = usage;
3012 CONST_OR_PURE_CALL_P (tmp) = 1;
3017 emit_libcall_block (tmp, target, reg, equiv);
3020 /* Emit an X_floating library function call for arithmetic (+,-,*,/). */
3023 alpha_emit_xfloating_arith (enum rtx_code code, rtx operands[])
3027 rtx out_operands[3];
3029 func = alpha_lookup_xfloating_lib_func (code);
3030 mode = alpha_compute_xfloating_mode_arg (code, alpha_fprm);
3032 out_operands[0] = operands[1];
3033 out_operands[1] = operands[2];
3034 out_operands[2] = GEN_INT (mode);
3035 alpha_emit_xfloating_libcall (func, operands[0], out_operands, 3,
3036 gen_rtx_fmt_ee (code, TFmode, operands[1],
3040 /* Emit an X_floating library function call for a comparison. */
3043 alpha_emit_xfloating_compare (enum rtx_code *pcode, rtx op0, rtx op1)
3045 enum rtx_code cmp_code, res_code;
3046 rtx func, out, operands[2];
3048 /* X_floating library comparison functions return
3052 Convert the compare against the raw return value. */
3080 func = alpha_lookup_xfloating_lib_func (cmp_code);
3084 out = gen_reg_rtx (DImode);
3086 /* ??? Strange mode for equiv because what's actually returned
3087 is -1,0,1, not a proper boolean value. */
3088 alpha_emit_xfloating_libcall (func, out, operands, 2,
3089 gen_rtx_fmt_ee (cmp_code, CCmode, op0, op1));
3094 /* Emit an X_floating library function call for a conversion. */
3097 alpha_emit_xfloating_cvt (enum rtx_code orig_code, rtx operands[])
3099 int noperands = 1, mode;
3100 rtx out_operands[2];
3102 enum rtx_code code = orig_code;
3104 if (code == UNSIGNED_FIX)
3107 func = alpha_lookup_xfloating_lib_func (code);
3109 out_operands[0] = operands[1];
3114 mode = alpha_compute_xfloating_mode_arg (code, ALPHA_FPRM_CHOP);
3115 out_operands[1] = GEN_INT (mode);
3118 case FLOAT_TRUNCATE:
3119 mode = alpha_compute_xfloating_mode_arg (code, alpha_fprm);
3120 out_operands[1] = GEN_INT (mode);
3127 alpha_emit_xfloating_libcall (func, operands[0], out_operands, noperands,
3128 gen_rtx_fmt_e (orig_code,
3129 GET_MODE (operands[0]),
3133 /* Split a TImode or TFmode move from OP[1] to OP[0] into a pair of
3134 DImode moves from OP[2,3] to OP[0,1]. If FIXUP_OVERLAP is true,
3135 guarantee that the sequence
3138 is valid. Naturally, output operand ordering is little-endian.
3139 This is used by *movtf_internal and *movti_internal. */
3142 alpha_split_tmode_pair (rtx operands[4], enum machine_mode mode,
3145 switch (GET_CODE (operands[1]))
3148 operands[3] = gen_rtx_REG (DImode, REGNO (operands[1]) + 1);
3149 operands[2] = gen_rtx_REG (DImode, REGNO (operands[1]));
3153 operands[3] = adjust_address (operands[1], DImode, 8);
3154 operands[2] = adjust_address (operands[1], DImode, 0);
3159 gcc_assert (operands[1] == CONST0_RTX (mode));
3160 operands[2] = operands[3] = const0_rtx;
3167 switch (GET_CODE (operands[0]))
3170 operands[1] = gen_rtx_REG (DImode, REGNO (operands[0]) + 1);
3171 operands[0] = gen_rtx_REG (DImode, REGNO (operands[0]));
3175 operands[1] = adjust_address (operands[0], DImode, 8);
3176 operands[0] = adjust_address (operands[0], DImode, 0);
3183 if (fixup_overlap && reg_overlap_mentioned_p (operands[0], operands[3]))
3186 tmp = operands[0], operands[0] = operands[1], operands[1] = tmp;
3187 tmp = operands[2], operands[2] = operands[3], operands[3] = tmp;
3191 /* Implement negtf2 or abstf2. Op0 is destination, op1 is source,
3192 op2 is a register containing the sign bit, operation is the
3193 logical operation to be performed. */
3196 alpha_split_tfmode_frobsign (rtx operands[3], rtx (*operation) (rtx, rtx, rtx))
3198 rtx high_bit = operands[2];
3202 alpha_split_tmode_pair (operands, TFmode, false);
3204 /* Detect three flavors of operand overlap. */
3206 if (rtx_equal_p (operands[0], operands[2]))
3208 else if (rtx_equal_p (operands[1], operands[2]))
3210 if (rtx_equal_p (operands[0], high_bit))
3217 emit_move_insn (operands[0], operands[2]);
3219 /* ??? If the destination overlaps both source tf and high_bit, then
3220 assume source tf is dead in its entirety and use the other half
3221 for a scratch register. Otherwise "scratch" is just the proper
3222 destination register. */
3223 scratch = operands[move < 2 ? 1 : 3];
3225 emit_insn ((*operation) (scratch, high_bit, operands[3]));
3229 emit_move_insn (operands[0], operands[2]);
3231 emit_move_insn (operands[1], scratch);
3235 /* Use ext[wlq][lh] as the Architecture Handbook describes for extracting
3239 word: ldq_u r1,X(r11) ldq_u r1,X(r11)
3240 ldq_u r2,X+1(r11) ldq_u r2,X+1(r11)
3241 lda r3,X(r11) lda r3,X+2(r11)
3242 extwl r1,r3,r1 extql r1,r3,r1
3243 extwh r2,r3,r2 extqh r2,r3,r2
3244 or r1.r2.r1 or r1,r2,r1
3247 long: ldq_u r1,X(r11) ldq_u r1,X(r11)
3248 ldq_u r2,X+3(r11) ldq_u r2,X+3(r11)
3249 lda r3,X(r11) lda r3,X(r11)
3250 extll r1,r3,r1 extll r1,r3,r1
3251 extlh r2,r3,r2 extlh r2,r3,r2
3252 or r1.r2.r1 addl r1,r2,r1
3254 quad: ldq_u r1,X(r11)
3263 alpha_expand_unaligned_load (rtx tgt, rtx mem, HOST_WIDE_INT size,
3264 HOST_WIDE_INT ofs, int sign)
3266 rtx meml, memh, addr, extl, exth, tmp, mema;
3267 enum machine_mode mode;
3269 if (TARGET_BWX && size == 2)
3271 meml = adjust_address (mem, QImode, ofs);
3272 memh = adjust_address (mem, QImode, ofs+1);
3273 if (BYTES_BIG_ENDIAN)
3274 tmp = meml, meml = memh, memh = tmp;
3275 extl = gen_reg_rtx (DImode);
3276 exth = gen_reg_rtx (DImode);
3277 emit_insn (gen_zero_extendqidi2 (extl, meml));
3278 emit_insn (gen_zero_extendqidi2 (exth, memh));
3279 exth = expand_simple_binop (DImode, ASHIFT, exth, GEN_INT (8),
3280 NULL, 1, OPTAB_LIB_WIDEN);
3281 addr = expand_simple_binop (DImode, IOR, extl, exth,
3282 NULL, 1, OPTAB_LIB_WIDEN);
3284 if (sign && GET_MODE (tgt) != HImode)
3286 addr = gen_lowpart (HImode, addr);
3287 emit_insn (gen_extend_insn (tgt, addr, GET_MODE (tgt), HImode, 0));
3291 if (GET_MODE (tgt) != DImode)
3292 addr = gen_lowpart (GET_MODE (tgt), addr);
3293 emit_move_insn (tgt, addr);
3298 meml = gen_reg_rtx (DImode);
3299 memh = gen_reg_rtx (DImode);
3300 addr = gen_reg_rtx (DImode);
3301 extl = gen_reg_rtx (DImode);
3302 exth = gen_reg_rtx (DImode);
3304 mema = XEXP (mem, 0);
3305 if (GET_CODE (mema) == LO_SUM)
3306 mema = force_reg (Pmode, mema);
3308 /* AND addresses cannot be in any alias set, since they may implicitly
3309 alias surrounding code. Ideally we'd have some alias set that
3310 covered all types except those with alignment 8 or higher. */
3312 tmp = change_address (mem, DImode,
3313 gen_rtx_AND (DImode,
3314 plus_constant (mema, ofs),
3316 set_mem_alias_set (tmp, 0);
3317 emit_move_insn (meml, tmp);
3319 tmp = change_address (mem, DImode,
3320 gen_rtx_AND (DImode,
3321 plus_constant (mema, ofs + size - 1),
3323 set_mem_alias_set (tmp, 0);
3324 emit_move_insn (memh, tmp);
3326 if (WORDS_BIG_ENDIAN && sign && (size == 2 || size == 4))
3328 emit_move_insn (addr, plus_constant (mema, -1));
3330 emit_insn (gen_extqh_be (extl, meml, addr));
3331 emit_insn (gen_extxl_be (exth, memh, GEN_INT (64), addr));
3333 addr = expand_binop (DImode, ior_optab, extl, exth, tgt, 1, OPTAB_WIDEN);
3334 addr = expand_binop (DImode, ashr_optab, addr, GEN_INT (64 - size*8),
3335 addr, 1, OPTAB_WIDEN);
3337 else if (sign && size == 2)
3339 emit_move_insn (addr, plus_constant (mema, ofs+2));
3341 emit_insn (gen_extxl_le (extl, meml, GEN_INT (64), addr));
3342 emit_insn (gen_extqh_le (exth, memh, addr));
3344 /* We must use tgt here for the target. Alpha-vms port fails if we use
3345 addr for the target, because addr is marked as a pointer and combine
3346 knows that pointers are always sign-extended 32-bit values. */
3347 addr = expand_binop (DImode, ior_optab, extl, exth, tgt, 1, OPTAB_WIDEN);
3348 addr = expand_binop (DImode, ashr_optab, addr, GEN_INT (48),
3349 addr, 1, OPTAB_WIDEN);
3353 if (WORDS_BIG_ENDIAN)
3355 emit_move_insn (addr, plus_constant (mema, ofs+size-1));
3359 emit_insn (gen_extwh_be (extl, meml, addr));
3364 emit_insn (gen_extlh_be (extl, meml, addr));
3369 emit_insn (gen_extqh_be (extl, meml, addr));
3376 emit_insn (gen_extxl_be (exth, memh, GEN_INT (size*8), addr));
3380 emit_move_insn (addr, plus_constant (mema, ofs));
3381 emit_insn (gen_extxl_le (extl, meml, GEN_INT (size*8), addr));
3385 emit_insn (gen_extwh_le (exth, memh, addr));
3390 emit_insn (gen_extlh_le (exth, memh, addr));
3395 emit_insn (gen_extqh_le (exth, memh, addr));
3404 addr = expand_binop (mode, ior_optab, gen_lowpart (mode, extl),
3405 gen_lowpart (mode, exth), gen_lowpart (mode, tgt),
3410 emit_move_insn (tgt, gen_lowpart (GET_MODE (tgt), addr));
3413 /* Similarly, use ins and msk instructions to perform unaligned stores. */
3416 alpha_expand_unaligned_store (rtx dst, rtx src,
3417 HOST_WIDE_INT size, HOST_WIDE_INT ofs)
3419 rtx dstl, dsth, addr, insl, insh, meml, memh, dsta;
3421 if (TARGET_BWX && size == 2)
3423 if (src != const0_rtx)
3425 dstl = gen_lowpart (QImode, src);
3426 dsth = expand_simple_binop (DImode, LSHIFTRT, src, GEN_INT (8),
3427 NULL, 1, OPTAB_LIB_WIDEN);
3428 dsth = gen_lowpart (QImode, dsth);
3431 dstl = dsth = const0_rtx;
3433 meml = adjust_address (dst, QImode, ofs);
3434 memh = adjust_address (dst, QImode, ofs+1);
3435 if (BYTES_BIG_ENDIAN)
3436 addr = meml, meml = memh, memh = addr;
3438 emit_move_insn (meml, dstl);
3439 emit_move_insn (memh, dsth);
3443 dstl = gen_reg_rtx (DImode);
3444 dsth = gen_reg_rtx (DImode);
3445 insl = gen_reg_rtx (DImode);
3446 insh = gen_reg_rtx (DImode);
3448 dsta = XEXP (dst, 0);
3449 if (GET_CODE (dsta) == LO_SUM)
3450 dsta = force_reg (Pmode, dsta);
3452 /* AND addresses cannot be in any alias set, since they may implicitly
3453 alias surrounding code. Ideally we'd have some alias set that
3454 covered all types except those with alignment 8 or higher. */
3456 meml = change_address (dst, DImode,
3457 gen_rtx_AND (DImode,
3458 plus_constant (dsta, ofs),
3460 set_mem_alias_set (meml, 0);
3462 memh = change_address (dst, DImode,
3463 gen_rtx_AND (DImode,
3464 plus_constant (dsta, ofs + size - 1),
3466 set_mem_alias_set (memh, 0);
3468 emit_move_insn (dsth, memh);
3469 emit_move_insn (dstl, meml);
3470 if (WORDS_BIG_ENDIAN)
3472 addr = copy_addr_to_reg (plus_constant (dsta, ofs+size-1));
3474 if (src != const0_rtx)
3479 emit_insn (gen_inswl_be (insh, gen_lowpart (HImode,src), addr));
3482 emit_insn (gen_insll_be (insh, gen_lowpart (SImode,src), addr));
3485 emit_insn (gen_insql_be (insh, gen_lowpart (DImode,src), addr));
3488 emit_insn (gen_insxh (insl, gen_lowpart (DImode, src),
3489 GEN_INT (size*8), addr));
3495 emit_insn (gen_mskxl_be (dsth, dsth, GEN_INT (0xffff), addr));
3499 rtx msk = immed_double_const (0xffffffff, 0, DImode);
3500 emit_insn (gen_mskxl_be (dsth, dsth, msk, addr));
3504 emit_insn (gen_mskxl_be (dsth, dsth, constm1_rtx, addr));
3508 emit_insn (gen_mskxh (dstl, dstl, GEN_INT (size*8), addr));
3512 addr = copy_addr_to_reg (plus_constant (dsta, ofs));
3514 if (src != CONST0_RTX (GET_MODE (src)))
3516 emit_insn (gen_insxh (insh, gen_lowpart (DImode, src),
3517 GEN_INT (size*8), addr));
3522 emit_insn (gen_inswl_le (insl, gen_lowpart (HImode, src), addr));
3525 emit_insn (gen_insll_le (insl, gen_lowpart (SImode, src), addr));
3528 emit_insn (gen_insql_le (insl, src, addr));
3533 emit_insn (gen_mskxh (dsth, dsth, GEN_INT (size*8), addr));
3538 emit_insn (gen_mskxl_le (dstl, dstl, GEN_INT (0xffff), addr));
3542 rtx msk = immed_double_const (0xffffffff, 0, DImode);
3543 emit_insn (gen_mskxl_le (dstl, dstl, msk, addr));
3547 emit_insn (gen_mskxl_le (dstl, dstl, constm1_rtx, addr));
3552 if (src != CONST0_RTX (GET_MODE (src)))
3554 dsth = expand_binop (DImode, ior_optab, insh, dsth, dsth, 0, OPTAB_WIDEN);
3555 dstl = expand_binop (DImode, ior_optab, insl, dstl, dstl, 0, OPTAB_WIDEN);
3558 if (WORDS_BIG_ENDIAN)
3560 emit_move_insn (meml, dstl);
3561 emit_move_insn (memh, dsth);
3565 /* Must store high before low for degenerate case of aligned. */
3566 emit_move_insn (memh, dsth);
3567 emit_move_insn (meml, dstl);
3571 /* The block move code tries to maximize speed by separating loads and
3572 stores at the expense of register pressure: we load all of the data
3573 before we store it back out. There are two secondary effects worth
3574 mentioning, that this speeds copying to/from aligned and unaligned
3575 buffers, and that it makes the code significantly easier to write. */
3577 #define MAX_MOVE_WORDS 8
3579 /* Load an integral number of consecutive unaligned quadwords. */
3582 alpha_expand_unaligned_load_words (rtx *out_regs, rtx smem,
3583 HOST_WIDE_INT words, HOST_WIDE_INT ofs)
3585 rtx const im8 = GEN_INT (-8);
3586 rtx const i64 = GEN_INT (64);
3587 rtx ext_tmps[MAX_MOVE_WORDS], data_regs[MAX_MOVE_WORDS+1];
3588 rtx sreg, areg, tmp, smema;
3591 smema = XEXP (smem, 0);
3592 if (GET_CODE (smema) == LO_SUM)
3593 smema = force_reg (Pmode, smema);
3595 /* Generate all the tmp registers we need. */
3596 for (i = 0; i < words; ++i)
3598 data_regs[i] = out_regs[i];
3599 ext_tmps[i] = gen_reg_rtx (DImode);
3601 data_regs[words] = gen_reg_rtx (DImode);
3604 smem = adjust_address (smem, GET_MODE (smem), ofs);
3606 /* Load up all of the source data. */
3607 for (i = 0; i < words; ++i)
3609 tmp = change_address (smem, DImode,
3610 gen_rtx_AND (DImode,
3611 plus_constant (smema, 8*i),
3613 set_mem_alias_set (tmp, 0);
3614 emit_move_insn (data_regs[i], tmp);
3617 tmp = change_address (smem, DImode,
3618 gen_rtx_AND (DImode,
3619 plus_constant (smema, 8*words - 1),
3621 set_mem_alias_set (tmp, 0);
3622 emit_move_insn (data_regs[words], tmp);
3624 /* Extract the half-word fragments. Unfortunately DEC decided to make
3625 extxh with offset zero a noop instead of zeroing the register, so
3626 we must take care of that edge condition ourselves with cmov. */
3628 sreg = copy_addr_to_reg (smema);
3629 areg = expand_binop (DImode, and_optab, sreg, GEN_INT (7), NULL,
3631 if (WORDS_BIG_ENDIAN)
3632 emit_move_insn (sreg, plus_constant (sreg, 7));
3633 for (i = 0; i < words; ++i)
3635 if (WORDS_BIG_ENDIAN)
3637 emit_insn (gen_extqh_be (data_regs[i], data_regs[i], sreg));
3638 emit_insn (gen_extxl_be (ext_tmps[i], data_regs[i+1], i64, sreg));
3642 emit_insn (gen_extxl_le (data_regs[i], data_regs[i], i64, sreg));
3643 emit_insn (gen_extqh_le (ext_tmps[i], data_regs[i+1], sreg));
3645 emit_insn (gen_rtx_SET (VOIDmode, ext_tmps[i],
3646 gen_rtx_IF_THEN_ELSE (DImode,
3647 gen_rtx_EQ (DImode, areg,
3649 const0_rtx, ext_tmps[i])));
3652 /* Merge the half-words into whole words. */
3653 for (i = 0; i < words; ++i)
3655 out_regs[i] = expand_binop (DImode, ior_optab, data_regs[i],
3656 ext_tmps[i], data_regs[i], 1, OPTAB_WIDEN);
3660 /* Store an integral number of consecutive unaligned quadwords. DATA_REGS
3661 may be NULL to store zeros. */
3664 alpha_expand_unaligned_store_words (rtx *data_regs, rtx dmem,
3665 HOST_WIDE_INT words, HOST_WIDE_INT ofs)
3667 rtx const im8 = GEN_INT (-8);
3668 rtx const i64 = GEN_INT (64);
3669 rtx ins_tmps[MAX_MOVE_WORDS];
3670 rtx st_tmp_1, st_tmp_2, dreg;
3671 rtx st_addr_1, st_addr_2, dmema;
3674 dmema = XEXP (dmem, 0);
3675 if (GET_CODE (dmema) == LO_SUM)
3676 dmema = force_reg (Pmode, dmema);
3678 /* Generate all the tmp registers we need. */
3679 if (data_regs != NULL)
3680 for (i = 0; i < words; ++i)
3681 ins_tmps[i] = gen_reg_rtx(DImode);
3682 st_tmp_1 = gen_reg_rtx(DImode);
3683 st_tmp_2 = gen_reg_rtx(DImode);
3686 dmem = adjust_address (dmem, GET_MODE (dmem), ofs);
3688 st_addr_2 = change_address (dmem, DImode,
3689 gen_rtx_AND (DImode,
3690 plus_constant (dmema, words*8 - 1),
3692 set_mem_alias_set (st_addr_2, 0);
3694 st_addr_1 = change_address (dmem, DImode,
3695 gen_rtx_AND (DImode, dmema, im8));
3696 set_mem_alias_set (st_addr_1, 0);
3698 /* Load up the destination end bits. */
3699 emit_move_insn (st_tmp_2, st_addr_2);
3700 emit_move_insn (st_tmp_1, st_addr_1);
3702 /* Shift the input data into place. */
3703 dreg = copy_addr_to_reg (dmema);
3704 if (WORDS_BIG_ENDIAN)
3705 emit_move_insn (dreg, plus_constant (dreg, 7));
3706 if (data_regs != NULL)
3708 for (i = words-1; i >= 0; --i)
3710 if (WORDS_BIG_ENDIAN)
3712 emit_insn (gen_insql_be (ins_tmps[i], data_regs[i], dreg));
3713 emit_insn (gen_insxh (data_regs[i], data_regs[i], i64, dreg));
3717 emit_insn (gen_insxh (ins_tmps[i], data_regs[i], i64, dreg));
3718 emit_insn (gen_insql_le (data_regs[i], data_regs[i], dreg));
3721 for (i = words-1; i > 0; --i)
3723 ins_tmps[i-1] = expand_binop (DImode, ior_optab, data_regs[i],
3724 ins_tmps[i-1], ins_tmps[i-1], 1,
3729 /* Split and merge the ends with the destination data. */
3730 if (WORDS_BIG_ENDIAN)
3732 emit_insn (gen_mskxl_be (st_tmp_2, st_tmp_2, constm1_rtx, dreg));
3733 emit_insn (gen_mskxh (st_tmp_1, st_tmp_1, i64, dreg));
3737 emit_insn (gen_mskxh (st_tmp_2, st_tmp_2, i64, dreg));
3738 emit_insn (gen_mskxl_le (st_tmp_1, st_tmp_1, constm1_rtx, dreg));
3741 if (data_regs != NULL)
3743 st_tmp_2 = expand_binop (DImode, ior_optab, st_tmp_2, ins_tmps[words-1],
3744 st_tmp_2, 1, OPTAB_WIDEN);
3745 st_tmp_1 = expand_binop (DImode, ior_optab, st_tmp_1, data_regs[0],
3746 st_tmp_1, 1, OPTAB_WIDEN);
3750 if (WORDS_BIG_ENDIAN)
3751 emit_move_insn (st_addr_1, st_tmp_1);
3753 emit_move_insn (st_addr_2, st_tmp_2);
3754 for (i = words-1; i > 0; --i)
3756 rtx tmp = change_address (dmem, DImode,
3757 gen_rtx_AND (DImode,
3758 plus_constant(dmema,
3759 WORDS_BIG_ENDIAN ? i*8-1 : i*8),
3761 set_mem_alias_set (tmp, 0);
3762 emit_move_insn (tmp, data_regs ? ins_tmps[i-1] : const0_rtx);
3764 if (WORDS_BIG_ENDIAN)
3765 emit_move_insn (st_addr_2, st_tmp_2);
3767 emit_move_insn (st_addr_1, st_tmp_1);
3771 /* Expand string/block move operations.
3773 operands[0] is the pointer to the destination.
3774 operands[1] is the pointer to the source.
3775 operands[2] is the number of bytes to move.
3776 operands[3] is the alignment. */
3779 alpha_expand_block_move (rtx operands[])
3781 rtx bytes_rtx = operands[2];
3782 rtx align_rtx = operands[3];
3783 HOST_WIDE_INT orig_bytes = INTVAL (bytes_rtx);
3784 HOST_WIDE_INT bytes = orig_bytes;
3785 HOST_WIDE_INT src_align = INTVAL (align_rtx) * BITS_PER_UNIT;
3786 HOST_WIDE_INT dst_align = src_align;
3787 rtx orig_src = operands[1];
3788 rtx orig_dst = operands[0];
3789 rtx data_regs[2 * MAX_MOVE_WORDS + 16];
3791 unsigned int i, words, ofs, nregs = 0;
3793 if (orig_bytes <= 0)
3795 else if (orig_bytes > MAX_MOVE_WORDS * UNITS_PER_WORD)
3798 /* Look for additional alignment information from recorded register info. */
3800 tmp = XEXP (orig_src, 0);
3801 if (GET_CODE (tmp) == REG)
3802 src_align = MAX (src_align, REGNO_POINTER_ALIGN (REGNO (tmp)));
3803 else if (GET_CODE (tmp) == PLUS
3804 && GET_CODE (XEXP (tmp, 0)) == REG
3805 && GET_CODE (XEXP (tmp, 1)) == CONST_INT)
3807 unsigned HOST_WIDE_INT c = INTVAL (XEXP (tmp, 1));
3808 unsigned int a = REGNO_POINTER_ALIGN (REGNO (XEXP (tmp, 0)));
3812 if (a >= 64 && c % 8 == 0)
3814 else if (a >= 32 && c % 4 == 0)
3816 else if (a >= 16 && c % 2 == 0)
3821 tmp = XEXP (orig_dst, 0);
3822 if (GET_CODE (tmp) == REG)
3823 dst_align = MAX (dst_align, REGNO_POINTER_ALIGN (REGNO (tmp)));
3824 else if (GET_CODE (tmp) == PLUS
3825 && GET_CODE (XEXP (tmp, 0)) == REG
3826 && GET_CODE (XEXP (tmp, 1)) == CONST_INT)
3828 unsigned HOST_WIDE_INT c = INTVAL (XEXP (tmp, 1));
3829 unsigned int a = REGNO_POINTER_ALIGN (REGNO (XEXP (tmp, 0)));
3833 if (a >= 64 && c % 8 == 0)
3835 else if (a >= 32 && c % 4 == 0)
3837 else if (a >= 16 && c % 2 == 0)
3843 if (src_align >= 64 && bytes >= 8)
3847 for (i = 0; i < words; ++i)
3848 data_regs[nregs + i] = gen_reg_rtx (DImode);
3850 for (i = 0; i < words; ++i)
3851 emit_move_insn (data_regs[nregs + i],
3852 adjust_address (orig_src, DImode, ofs + i * 8));
3859 if (src_align >= 32 && bytes >= 4)
3863 for (i = 0; i < words; ++i)
3864 data_regs[nregs + i] = gen_reg_rtx (SImode);
3866 for (i = 0; i < words; ++i)
3867 emit_move_insn (data_regs[nregs + i],
3868 adjust_address (orig_src, SImode, ofs + i * 4));
3879 for (i = 0; i < words+1; ++i)
3880 data_regs[nregs + i] = gen_reg_rtx (DImode);
3882 alpha_expand_unaligned_load_words (data_regs + nregs, orig_src,
3890 if (! TARGET_BWX && bytes >= 4)
3892 data_regs[nregs++] = tmp = gen_reg_rtx (SImode);
3893 alpha_expand_unaligned_load (tmp, orig_src, 4, ofs, 0);
3900 if (src_align >= 16)
3903 data_regs[nregs++] = tmp = gen_reg_rtx (HImode);
3904 emit_move_insn (tmp, adjust_address (orig_src, HImode, ofs));
3907 } while (bytes >= 2);
3909 else if (! TARGET_BWX)
3911 data_regs[nregs++] = tmp = gen_reg_rtx (HImode);
3912 alpha_expand_unaligned_load (tmp, orig_src, 2, ofs, 0);
3920 data_regs[nregs++] = tmp = gen_reg_rtx (QImode);
3921 emit_move_insn (tmp, adjust_address (orig_src, QImode, ofs));
3926 gcc_assert (nregs <= ARRAY_SIZE (data_regs));
3928 /* Now save it back out again. */
3932 /* Write out the data in whatever chunks reading the source allowed. */
3933 if (dst_align >= 64)
3935 while (i < nregs && GET_MODE (data_regs[i]) == DImode)
3937 emit_move_insn (adjust_address (orig_dst, DImode, ofs),
3944 if (dst_align >= 32)
3946 /* If the source has remaining DImode regs, write them out in
3948 while (i < nregs && GET_MODE (data_regs[i]) == DImode)
3950 tmp = expand_binop (DImode, lshr_optab, data_regs[i], GEN_INT (32),
3951 NULL_RTX, 1, OPTAB_WIDEN);
3953 emit_move_insn (adjust_address (orig_dst, SImode, ofs),
3954 gen_lowpart (SImode, data_regs[i]));
3955 emit_move_insn (adjust_address (orig_dst, SImode, ofs + 4),
3956 gen_lowpart (SImode, tmp));
3961 while (i < nregs && GET_MODE (data_regs[i]) == SImode)
3963 emit_move_insn (adjust_address (orig_dst, SImode, ofs),
3970 if (i < nregs && GET_MODE (data_regs[i]) == DImode)
3972 /* Write out a remaining block of words using unaligned methods. */
3974 for (words = 1; i + words < nregs; words++)
3975 if (GET_MODE (data_regs[i + words]) != DImode)
3979 alpha_expand_unaligned_store (orig_dst, data_regs[i], 8, ofs);
3981 alpha_expand_unaligned_store_words (data_regs + i, orig_dst,
3988 /* Due to the above, this won't be aligned. */
3989 /* ??? If we have more than one of these, consider constructing full
3990 words in registers and using alpha_expand_unaligned_store_words. */
3991 while (i < nregs && GET_MODE (data_regs[i]) == SImode)
3993 alpha_expand_unaligned_store (orig_dst, data_regs[i], 4, ofs);
3998 if (dst_align >= 16)
3999 while (i < nregs && GET_MODE (data_regs[i]) == HImode)
4001 emit_move_insn (adjust_address (orig_dst, HImode, ofs), data_regs[i]);
4006 while (i < nregs && GET_MODE (data_regs[i]) == HImode)
4008 alpha_expand_unaligned_store (orig_dst, data_regs[i], 2, ofs);
4013 /* The remainder must be byte copies. */
4016 gcc_assert (GET_MODE (data_regs[i]) == QImode);
4017 emit_move_insn (adjust_address (orig_dst, QImode, ofs), data_regs[i]);
4026 alpha_expand_block_clear (rtx operands[])
4028 rtx bytes_rtx = operands[1];
4029 rtx align_rtx = operands[3];
4030 HOST_WIDE_INT orig_bytes = INTVAL (bytes_rtx);
4031 HOST_WIDE_INT bytes = orig_bytes;
4032 HOST_WIDE_INT align = INTVAL (align_rtx) * BITS_PER_UNIT;
4033 HOST_WIDE_INT alignofs = 0;
4034 rtx orig_dst = operands[0];
4036 int i, words, ofs = 0;
4038 if (orig_bytes <= 0)
4040 if (orig_bytes > MAX_MOVE_WORDS * UNITS_PER_WORD)
4043 /* Look for stricter alignment. */
4044 tmp = XEXP (orig_dst, 0);
4045 if (GET_CODE (tmp) == REG)
4046 align = MAX (align, REGNO_POINTER_ALIGN (REGNO (tmp)));
4047 else if (GET_CODE (tmp) == PLUS
4048 && GET_CODE (XEXP (tmp, 0)) == REG
4049 && GET_CODE (XEXP (tmp, 1)) == CONST_INT)
4051 HOST_WIDE_INT c = INTVAL (XEXP (tmp, 1));
4052 int a = REGNO_POINTER_ALIGN (REGNO (XEXP (tmp, 0)));
4057 align = a, alignofs = 8 - c % 8;
4059 align = a, alignofs = 4 - c % 4;
4061 align = a, alignofs = 2 - c % 2;
4065 /* Handle an unaligned prefix first. */
4069 #if HOST_BITS_PER_WIDE_INT >= 64
4070 /* Given that alignofs is bounded by align, the only time BWX could
4071 generate three stores is for a 7 byte fill. Prefer two individual
4072 stores over a load/mask/store sequence. */
4073 if ((!TARGET_BWX || alignofs == 7)
4075 && !(alignofs == 4 && bytes >= 4))
4077 enum machine_mode mode = (align >= 64 ? DImode : SImode);
4078 int inv_alignofs = (align >= 64 ? 8 : 4) - alignofs;
4082 mem = adjust_address (orig_dst, mode, ofs - inv_alignofs);
4083 set_mem_alias_set (mem, 0);
4085 mask = ~(~(HOST_WIDE_INT)0 << (inv_alignofs * 8));
4086 if (bytes < alignofs)
4088 mask |= ~(HOST_WIDE_INT)0 << ((inv_alignofs + bytes) * 8);
4099 tmp = expand_binop (mode, and_optab, mem, GEN_INT (mask),
4100 NULL_RTX, 1, OPTAB_WIDEN);
4102 emit_move_insn (mem, tmp);
4106 if (TARGET_BWX && (alignofs & 1) && bytes >= 1)
4108 emit_move_insn (adjust_address (orig_dst, QImode, ofs), const0_rtx);
4113 if (TARGET_BWX && align >= 16 && (alignofs & 3) == 2 && bytes >= 2)
4115 emit_move_insn (adjust_address (orig_dst, HImode, ofs), const0_rtx);
4120 if (alignofs == 4 && bytes >= 4)
4122 emit_move_insn (adjust_address (orig_dst, SImode, ofs), const0_rtx);
4128 /* If we've not used the extra lead alignment information by now,
4129 we won't be able to. Downgrade align to match what's left over. */
4132 alignofs = alignofs & -alignofs;
4133 align = MIN (align, alignofs * BITS_PER_UNIT);
4137 /* Handle a block of contiguous long-words. */
4139 if (align >= 64 && bytes >= 8)
4143 for (i = 0; i < words; ++i)
4144 emit_move_insn (adjust_address (orig_dst, DImode, ofs + i * 8),
4151 /* If the block is large and appropriately aligned, emit a single
4152 store followed by a sequence of stq_u insns. */
4154 if (align >= 32 && bytes > 16)
4158 emit_move_insn (adjust_address (orig_dst, SImode, ofs), const0_rtx);
4162 orig_dsta = XEXP (orig_dst, 0);
4163 if (GET_CODE (orig_dsta) == LO_SUM)
4164 orig_dsta = force_reg (Pmode, orig_dsta);
4167 for (i = 0; i < words; ++i)
4170 = change_address (orig_dst, DImode,
4171 gen_rtx_AND (DImode,
4172 plus_constant (orig_dsta, ofs + i*8),
4174 set_mem_alias_set (mem, 0);
4175 emit_move_insn (mem, const0_rtx);
4178 /* Depending on the alignment, the first stq_u may have overlapped
4179 with the initial stl, which means that the last stq_u didn't
4180 write as much as it would appear. Leave those questionable bytes
4182 bytes -= words * 8 - 4;
4183 ofs += words * 8 - 4;
4186 /* Handle a smaller block of aligned words. */
4188 if ((align >= 64 && bytes == 4)
4189 || (align == 32 && bytes >= 4))
4193 for (i = 0; i < words; ++i)
4194 emit_move_insn (adjust_address (orig_dst, SImode, ofs + i * 4),
4201 /* An unaligned block uses stq_u stores for as many as possible. */
4207 alpha_expand_unaligned_store_words (NULL, orig_dst, words, ofs);
4213 /* Next clean up any trailing pieces. */
4215 #if HOST_BITS_PER_WIDE_INT >= 64
4216 /* Count the number of bits in BYTES for which aligned stores could
4219 for (i = (TARGET_BWX ? 1 : 4); i * BITS_PER_UNIT <= align ; i <<= 1)
4223 /* If we have appropriate alignment (and it wouldn't take too many
4224 instructions otherwise), mask out the bytes we need. */
4225 if (TARGET_BWX ? words > 2 : bytes > 0)
4232 mem = adjust_address (orig_dst, DImode, ofs);
4233 set_mem_alias_set (mem, 0);
4235 mask = ~(HOST_WIDE_INT)0 << (bytes * 8);
4237 tmp = expand_binop (DImode, and_optab, mem, GEN_INT (mask),
4238 NULL_RTX, 1, OPTAB_WIDEN);
4240 emit_move_insn (mem, tmp);
4243 else if (align >= 32 && bytes < 4)
4248 mem = adjust_address (orig_dst, SImode, ofs);
4249 set_mem_alias_set (mem, 0);
4251 mask = ~(HOST_WIDE_INT)0 << (bytes * 8);
4253 tmp = expand_binop (SImode, and_optab, mem, GEN_INT (mask),
4254 NULL_RTX, 1, OPTAB_WIDEN);
4256 emit_move_insn (mem, tmp);
4262 if (!TARGET_BWX && bytes >= 4)
4264 alpha_expand_unaligned_store (orig_dst, const0_rtx, 4, ofs);
4274 emit_move_insn (adjust_address (orig_dst, HImode, ofs),
4278 } while (bytes >= 2);
4280 else if (! TARGET_BWX)
4282 alpha_expand_unaligned_store (orig_dst, const0_rtx, 2, ofs);
4290 emit_move_insn (adjust_address (orig_dst, QImode, ofs), const0_rtx);
4298 /* Returns a mask so that zap(x, value) == x & mask. */
4301 alpha_expand_zap_mask (HOST_WIDE_INT value)
4306 if (HOST_BITS_PER_WIDE_INT >= 64)
4308 HOST_WIDE_INT mask = 0;
4310 for (i = 7; i >= 0; --i)
4313 if (!((value >> i) & 1))
4317 result = gen_int_mode (mask, DImode);
4321 HOST_WIDE_INT mask_lo = 0, mask_hi = 0;
4323 gcc_assert (HOST_BITS_PER_WIDE_INT == 32);
4325 for (i = 7; i >= 4; --i)
4328 if (!((value >> i) & 1))
4332 for (i = 3; i >= 0; --i)
4335 if (!((value >> i) & 1))
4339 result = immed_double_const (mask_lo, mask_hi, DImode);
4346 alpha_expand_builtin_vector_binop (rtx (*gen) (rtx, rtx, rtx),
4347 enum machine_mode mode,
4348 rtx op0, rtx op1, rtx op2)
4350 op0 = gen_lowpart (mode, op0);
4352 if (op1 == const0_rtx)
4353 op1 = CONST0_RTX (mode);
4355 op1 = gen_lowpart (mode, op1);
4357 if (op2 == const0_rtx)
4358 op2 = CONST0_RTX (mode);
4360 op2 = gen_lowpart (mode, op2);
4362 emit_insn ((*gen) (op0, op1, op2));
4365 /* A subroutine of the atomic operation splitters. Jump to LABEL if
4366 COND is true. Mark the jump as unlikely to be taken. */
4369 emit_unlikely_jump (rtx cond, rtx label)
4371 rtx very_unlikely = GEN_INT (REG_BR_PROB_BASE / 100 - 1);
4374 x = gen_rtx_IF_THEN_ELSE (VOIDmode, cond, label, pc_rtx);
4375 x = emit_jump_insn (gen_rtx_SET (VOIDmode, pc_rtx, x));
4376 REG_NOTES (x) = gen_rtx_EXPR_LIST (REG_BR_PROB, very_unlikely, NULL_RTX);
4379 /* A subroutine of the atomic operation splitters. Emit a load-locked
4380 instruction in MODE. */
4383 emit_load_locked (enum machine_mode mode, rtx reg, rtx mem)
4385 rtx (*fn) (rtx, rtx) = NULL;
4387 fn = gen_load_locked_si;
4388 else if (mode == DImode)
4389 fn = gen_load_locked_di;
4390 emit_insn (fn (reg, mem));
4393 /* A subroutine of the atomic operation splitters. Emit a store-conditional
4394 instruction in MODE. */
4397 emit_store_conditional (enum machine_mode mode, rtx res, rtx mem, rtx val)
4399 rtx (*fn) (rtx, rtx, rtx) = NULL;
4401 fn = gen_store_conditional_si;
4402 else if (mode == DImode)
4403 fn = gen_store_conditional_di;
4404 emit_insn (fn (res, mem, val));
4407 /* A subroutine of the atomic operation splitters. Emit an insxl
4408 instruction in MODE. */
4411 emit_insxl (enum machine_mode mode, rtx op1, rtx op2)
4413 rtx ret = gen_reg_rtx (DImode);
4414 rtx (*fn) (rtx, rtx, rtx);
4416 if (WORDS_BIG_ENDIAN)
4430 /* The insbl and inswl patterns require a register operand. */
4431 op1 = force_reg (mode, op1);
4432 emit_insn (fn (ret, op1, op2));
4437 /* Expand an atomic fetch-and-operate pattern. CODE is the binary operation
4438 to perform. MEM is the memory on which to operate. VAL is the second
4439 operand of the binary operator. BEFORE and AFTER are optional locations to
4440 return the value of MEM either before of after the operation. SCRATCH is
4441 a scratch register. */
4444 alpha_split_atomic_op (enum rtx_code code, rtx mem, rtx val,
4445 rtx before, rtx after, rtx scratch)
4447 enum machine_mode mode = GET_MODE (mem);
4448 rtx label, x, cond = gen_rtx_REG (DImode, REGNO (scratch));
4450 emit_insn (gen_memory_barrier ());
4452 label = gen_label_rtx ();
4454 label = gen_rtx_LABEL_REF (DImode, label);
4458 emit_load_locked (mode, before, mem);
4461 x = gen_rtx_AND (mode, gen_rtx_NOT (mode, before), val);
4463 x = gen_rtx_fmt_ee (code, mode, before, val);
4465 emit_insn (gen_rtx_SET (VOIDmode, after, copy_rtx (x)));
4466 emit_insn (gen_rtx_SET (VOIDmode, scratch, x));
4468 emit_store_conditional (mode, cond, mem, scratch);
4470 x = gen_rtx_EQ (DImode, cond, const0_rtx);
4471 emit_unlikely_jump (x, label);
4473 emit_insn (gen_memory_barrier ());
4476 /* Expand a compare and swap operation. */
4479 alpha_split_compare_and_swap (rtx retval, rtx mem, rtx oldval, rtx newval,
4482 enum machine_mode mode = GET_MODE (mem);
4483 rtx label1, label2, x, cond = gen_lowpart (DImode, scratch);
4485 emit_insn (gen_memory_barrier ());
4487 label1 = gen_rtx_LABEL_REF (DImode, gen_label_rtx ());
4488 label2 = gen_rtx_LABEL_REF (DImode, gen_label_rtx ());
4489 emit_label (XEXP (label1, 0));
4491 emit_load_locked (mode, retval, mem);
4493 x = gen_lowpart (DImode, retval);
4494 if (oldval == const0_rtx)
4495 x = gen_rtx_NE (DImode, x, const0_rtx);
4498 x = gen_rtx_EQ (DImode, x, oldval);
4499 emit_insn (gen_rtx_SET (VOIDmode, cond, x));
4500 x = gen_rtx_EQ (DImode, cond, const0_rtx);
4502 emit_unlikely_jump (x, label2);
4504 emit_move_insn (scratch, newval);
4505 emit_store_conditional (mode, cond, mem, scratch);
4507 x = gen_rtx_EQ (DImode, cond, const0_rtx);
4508 emit_unlikely_jump (x, label1);
4510 emit_insn (gen_memory_barrier ());
4511 emit_label (XEXP (label2, 0));
4515 alpha_expand_compare_and_swap_12 (rtx dst, rtx mem, rtx oldval, rtx newval)
4517 enum machine_mode mode = GET_MODE (mem);
4518 rtx addr, align, wdst;
4519 rtx (*fn5) (rtx, rtx, rtx, rtx, rtx);
4521 addr = force_reg (DImode, XEXP (mem, 0));
4522 align = expand_simple_binop (Pmode, AND, addr, GEN_INT (-8),
4523 NULL_RTX, 1, OPTAB_DIRECT);
4525 oldval = convert_modes (DImode, mode, oldval, 1);
4526 newval = emit_insxl (mode, newval, addr);
4528 wdst = gen_reg_rtx (DImode);
4530 fn5 = gen_sync_compare_and_swapqi_1;
4532 fn5 = gen_sync_compare_and_swaphi_1;
4533 emit_insn (fn5 (wdst, addr, oldval, newval, align));
4535 emit_move_insn (dst, gen_lowpart (mode, wdst));
4539 alpha_split_compare_and_swap_12 (enum machine_mode mode, rtx dest, rtx addr,
4540 rtx oldval, rtx newval, rtx align,
4541 rtx scratch, rtx cond)
4543 rtx label1, label2, mem, width, mask, x;
4545 mem = gen_rtx_MEM (DImode, align);
4546 MEM_VOLATILE_P (mem) = 1;
4548 emit_insn (gen_memory_barrier ());
4549 label1 = gen_rtx_LABEL_REF (DImode, gen_label_rtx ());
4550 label2 = gen_rtx_LABEL_REF (DImode, gen_label_rtx ());
4551 emit_label (XEXP (label1, 0));
4553 emit_load_locked (DImode, scratch, mem);
4555 width = GEN_INT (GET_MODE_BITSIZE (mode));
4556 mask = GEN_INT (mode == QImode ? 0xff : 0xffff);
4557 if (WORDS_BIG_ENDIAN)
4558 emit_insn (gen_extxl_be (dest, scratch, width, addr));
4560 emit_insn (gen_extxl_le (dest, scratch, width, addr));
4562 if (oldval == const0_rtx)
4563 x = gen_rtx_NE (DImode, dest, const0_rtx);
4566 x = gen_rtx_EQ (DImode, dest, oldval);
4567 emit_insn (gen_rtx_SET (VOIDmode, cond, x));
4568 x = gen_rtx_EQ (DImode, cond, const0_rtx);
4570 emit_unlikely_jump (x, label2);
4572 if (WORDS_BIG_ENDIAN)
4573 emit_insn (gen_mskxl_be (scratch, scratch, mask, addr));
4575 emit_insn (gen_mskxl_le (scratch, scratch, mask, addr));
4576 emit_insn (gen_iordi3 (scratch, scratch, newval));
4578 emit_store_conditional (DImode, scratch, mem, scratch);
4580 x = gen_rtx_EQ (DImode, scratch, const0_rtx);
4581 emit_unlikely_jump (x, label1);
4583 emit_insn (gen_memory_barrier ());
4584 emit_label (XEXP (label2, 0));
4587 /* Expand an atomic exchange operation. */
4590 alpha_split_lock_test_and_set (rtx retval, rtx mem, rtx val, rtx scratch)
4592 enum machine_mode mode = GET_MODE (mem);
4593 rtx label, x, cond = gen_lowpart (DImode, scratch);
4595 emit_insn (gen_memory_barrier ());
4597 label = gen_rtx_LABEL_REF (DImode, gen_label_rtx ());
4598 emit_label (XEXP (label, 0));
4600 emit_load_locked (mode, retval, mem);
4601 emit_move_insn (scratch, val);
4602 emit_store_conditional (mode, cond, mem, scratch);
4604 x = gen_rtx_EQ (DImode, cond, const0_rtx);
4605 emit_unlikely_jump (x, label);
4609 alpha_expand_lock_test_and_set_12 (rtx dst, rtx mem, rtx val)
4611 enum machine_mode mode = GET_MODE (mem);
4612 rtx addr, align, wdst;
4613 rtx (*fn4) (rtx, rtx, rtx, rtx);
4615 /* Force the address into a register. */
4616 addr = force_reg (DImode, XEXP (mem, 0));
4618 /* Align it to a multiple of 8. */
4619 align = expand_simple_binop (Pmode, AND, addr, GEN_INT (-8),
4620 NULL_RTX, 1, OPTAB_DIRECT);
4622 /* Insert val into the correct byte location within the word. */
4623 val = emit_insxl (mode, val, addr);
4625 wdst = gen_reg_rtx (DImode);
4627 fn4 = gen_sync_lock_test_and_setqi_1;
4629 fn4 = gen_sync_lock_test_and_sethi_1;
4630 emit_insn (fn4 (wdst, addr, val, align));
4632 emit_move_insn (dst, gen_lowpart (mode, wdst));
4636 alpha_split_lock_test_and_set_12 (enum machine_mode mode, rtx dest, rtx addr,
4637 rtx val, rtx align, rtx scratch)
4639 rtx label, mem, width, mask, x;
4641 mem = gen_rtx_MEM (DImode, align);
4642 MEM_VOLATILE_P (mem) = 1;
4644 emit_insn (gen_memory_barrier ());
4645 label = gen_rtx_LABEL_REF (DImode, gen_label_rtx ());
4646 emit_label (XEXP (label, 0));
4648 emit_load_locked (DImode, scratch, mem);
4650 width = GEN_INT (GET_MODE_BITSIZE (mode));
4651 mask = GEN_INT (mode == QImode ? 0xff : 0xffff);
4652 if (WORDS_BIG_ENDIAN)
4654 emit_insn (gen_extxl_be (dest, scratch, width, addr));
4655 emit_insn (gen_mskxl_be (scratch, scratch, mask, addr));
4659 emit_insn (gen_extxl_le (dest, scratch, width, addr));
4660 emit_insn (gen_mskxl_le (scratch, scratch, mask, addr));
4662 emit_insn (gen_iordi3 (scratch, scratch, val));
4664 emit_store_conditional (DImode, scratch, mem, scratch);
4666 x = gen_rtx_EQ (DImode, scratch, const0_rtx);
4667 emit_unlikely_jump (x, label);
4670 /* Adjust the cost of a scheduling dependency. Return the new cost of
4671 a dependency LINK or INSN on DEP_INSN. COST is the current cost. */
4674 alpha_adjust_cost (rtx insn, rtx link, rtx dep_insn, int cost)
4676 enum attr_type insn_type, dep_insn_type;
4678 /* If the dependence is an anti-dependence, there is no cost. For an
4679 output dependence, there is sometimes a cost, but it doesn't seem
4680 worth handling those few cases. */
4681 if (REG_NOTE_KIND (link) != 0)
4684 /* If we can't recognize the insns, we can't really do anything. */
4685 if (recog_memoized (insn) < 0 || recog_memoized (dep_insn) < 0)
4688 insn_type = get_attr_type (insn);
4689 dep_insn_type = get_attr_type (dep_insn);
4691 /* Bring in the user-defined memory latency. */
4692 if (dep_insn_type == TYPE_ILD
4693 || dep_insn_type == TYPE_FLD
4694 || dep_insn_type == TYPE_LDSYM)
4695 cost += alpha_memory_latency-1;
4697 /* Everything else handled in DFA bypasses now. */
4702 /* The number of instructions that can be issued per cycle. */
4705 alpha_issue_rate (void)
4707 return (alpha_tune == PROCESSOR_EV4 ? 2 : 4);
4710 /* How many alternative schedules to try. This should be as wide as the
4711 scheduling freedom in the DFA, but no wider. Making this value too
4712 large results extra work for the scheduler.
4714 For EV4, loads can be issued to either IB0 or IB1, thus we have 2
4715 alternative schedules. For EV5, we can choose between E0/E1 and
4716 FA/FM. For EV6, an arithmetic insn can be issued to U0/U1/L0/L1. */
4719 alpha_multipass_dfa_lookahead (void)
4721 return (alpha_tune == PROCESSOR_EV6 ? 4 : 2);
4724 /* Machine-specific function data. */
4726 struct machine_function GTY(())
4729 /* List of call information words for calls from this function. */
4730 struct rtx_def *first_ciw;
4731 struct rtx_def *last_ciw;
4734 /* List of deferred case vectors. */
4735 struct rtx_def *addr_list;
4738 const char *some_ld_name;
4740 /* For TARGET_LD_BUGGY_LDGP. */
4741 struct rtx_def *gp_save_rtx;
4744 /* How to allocate a 'struct machine_function'. */
4746 static struct machine_function *
4747 alpha_init_machine_status (void)
4749 return ((struct machine_function *)
4750 ggc_alloc_cleared (sizeof (struct machine_function)));
4753 /* Functions to save and restore alpha_return_addr_rtx. */
4755 /* Start the ball rolling with RETURN_ADDR_RTX. */
4758 alpha_return_addr (int count, rtx frame ATTRIBUTE_UNUSED)
4763 return get_hard_reg_initial_val (Pmode, REG_RA);
4766 /* Return or create a memory slot containing the gp value for the current
4767 function. Needed only if TARGET_LD_BUGGY_LDGP. */
4770 alpha_gp_save_rtx (void)
4772 rtx seq, m = cfun->machine->gp_save_rtx;
4778 m = assign_stack_local (DImode, UNITS_PER_WORD, BITS_PER_WORD);
4779 m = validize_mem (m);
4780 emit_move_insn (m, pic_offset_table_rtx);
4784 emit_insn_at_entry (seq);
4786 cfun->machine->gp_save_rtx = m;
4793 alpha_ra_ever_killed (void)
4797 if (!has_hard_reg_initial_val (Pmode, REG_RA))
4798 return (int)df_regs_ever_live_p (REG_RA);
4800 push_topmost_sequence ();
4802 pop_topmost_sequence ();
4804 return reg_set_between_p (gen_rtx_REG (Pmode, REG_RA), top, NULL_RTX);
4808 /* Return the trap mode suffix applicable to the current
4809 instruction, or NULL. */
4812 get_trap_mode_suffix (void)
4814 enum attr_trap_suffix s = get_attr_trap_suffix (current_output_insn);
4818 case TRAP_SUFFIX_NONE:
4821 case TRAP_SUFFIX_SU:
4822 if (alpha_fptm >= ALPHA_FPTM_SU)
4826 case TRAP_SUFFIX_SUI:
4827 if (alpha_fptm >= ALPHA_FPTM_SUI)
4831 case TRAP_SUFFIX_V_SV:
4839 case ALPHA_FPTM_SUI:
4845 case TRAP_SUFFIX_V_SV_SVI:
4854 case ALPHA_FPTM_SUI:
4861 case TRAP_SUFFIX_U_SU_SUI:
4870 case ALPHA_FPTM_SUI:
4883 /* Return the rounding mode suffix applicable to the current
4884 instruction, or NULL. */
4887 get_round_mode_suffix (void)
4889 enum attr_round_suffix s = get_attr_round_suffix (current_output_insn);
4893 case ROUND_SUFFIX_NONE:
4895 case ROUND_SUFFIX_NORMAL:
4898 case ALPHA_FPRM_NORM:
4900 case ALPHA_FPRM_MINF:
4902 case ALPHA_FPRM_CHOP:
4904 case ALPHA_FPRM_DYN:
4911 case ROUND_SUFFIX_C:
4920 /* Locate some local-dynamic symbol still in use by this function
4921 so that we can print its name in some movdi_er_tlsldm pattern. */
4924 get_some_local_dynamic_name_1 (rtx *px, void *data ATTRIBUTE_UNUSED)
4928 if (GET_CODE (x) == SYMBOL_REF
4929 && SYMBOL_REF_TLS_MODEL (x) == TLS_MODEL_LOCAL_DYNAMIC)
4931 cfun->machine->some_ld_name = XSTR (x, 0);
4939 get_some_local_dynamic_name (void)
4943 if (cfun->machine->some_ld_name)
4944 return cfun->machine->some_ld_name;
4946 for (insn = get_insns (); insn ; insn = NEXT_INSN (insn))
4948 && for_each_rtx (&PATTERN (insn), get_some_local_dynamic_name_1, 0))
4949 return cfun->machine->some_ld_name;
4954 /* Print an operand. Recognize special options, documented below. */
4957 print_operand (FILE *file, rtx x, int code)
4964 /* Print the assembler name of the current function. */
4965 assemble_name (file, alpha_fnname);
4969 assemble_name (file, get_some_local_dynamic_name ());
4974 const char *trap = get_trap_mode_suffix ();
4975 const char *round = get_round_mode_suffix ();
4978 fprintf (file, (TARGET_AS_SLASH_BEFORE_SUFFIX ? "/%s%s" : "%s%s"),
4979 (trap ? trap : ""), (round ? round : ""));
4984 /* Generates single precision instruction suffix. */
4985 fputc ((TARGET_FLOAT_VAX ? 'f' : 's'), file);
4989 /* Generates double precision instruction suffix. */
4990 fputc ((TARGET_FLOAT_VAX ? 'g' : 't'), file);
4994 if (alpha_this_literal_sequence_number == 0)
4995 alpha_this_literal_sequence_number = alpha_next_sequence_number++;
4996 fprintf (file, "%d", alpha_this_literal_sequence_number);
5000 if (alpha_this_gpdisp_sequence_number == 0)
5001 alpha_this_gpdisp_sequence_number = alpha_next_sequence_number++;
5002 fprintf (file, "%d", alpha_this_gpdisp_sequence_number);
5006 if (GET_CODE (x) == HIGH)
5007 output_addr_const (file, XEXP (x, 0));
5009 output_operand_lossage ("invalid %%H value");
5016 if (GET_CODE (x) == UNSPEC && XINT (x, 1) == UNSPEC_TLSGD_CALL)
5018 x = XVECEXP (x, 0, 0);
5019 lituse = "lituse_tlsgd";
5021 else if (GET_CODE (x) == UNSPEC && XINT (x, 1) == UNSPEC_TLSLDM_CALL)
5023 x = XVECEXP (x, 0, 0);
5024 lituse = "lituse_tlsldm";
5026 else if (GET_CODE (x) == CONST_INT)
5027 lituse = "lituse_jsr";
5030 output_operand_lossage ("invalid %%J value");
5034 if (x != const0_rtx)
5035 fprintf (file, "\t\t!%s!%d", lituse, (int) INTVAL (x));
5043 #ifdef HAVE_AS_JSRDIRECT_RELOCS
5044 lituse = "lituse_jsrdirect";
5046 lituse = "lituse_jsr";
5049 gcc_assert (INTVAL (x) != 0);
5050 fprintf (file, "\t\t!%s!%d", lituse, (int) INTVAL (x));
5054 /* If this operand is the constant zero, write it as "$31". */
5055 if (GET_CODE (x) == REG)
5056 fprintf (file, "%s", reg_names[REGNO (x)]);
5057 else if (x == CONST0_RTX (GET_MODE (x)))
5058 fprintf (file, "$31");
5060 output_operand_lossage ("invalid %%r value");
5064 /* Similar, but for floating-point. */
5065 if (GET_CODE (x) == REG)
5066 fprintf (file, "%s", reg_names[REGNO (x)]);
5067 else if (x == CONST0_RTX (GET_MODE (x)))
5068 fprintf (file, "$f31");
5070 output_operand_lossage ("invalid %%R value");
5074 /* Write the 1's complement of a constant. */
5075 if (GET_CODE (x) != CONST_INT)
5076 output_operand_lossage ("invalid %%N value");
5078 fprintf (file, HOST_WIDE_INT_PRINT_DEC, ~ INTVAL (x));
5082 /* Write 1 << C, for a constant C. */
5083 if (GET_CODE (x) != CONST_INT)
5084 output_operand_lossage ("invalid %%P value");
5086 fprintf (file, HOST_WIDE_INT_PRINT_DEC, (HOST_WIDE_INT) 1 << INTVAL (x));
5090 /* Write the high-order 16 bits of a constant, sign-extended. */
5091 if (GET_CODE (x) != CONST_INT)
5092 output_operand_lossage ("invalid %%h value");
5094 fprintf (file, HOST_WIDE_INT_PRINT_DEC, INTVAL (x) >> 16);
5098 /* Write the low-order 16 bits of a constant, sign-extended. */
5099 if (GET_CODE (x) != CONST_INT)
5100 output_operand_lossage ("invalid %%L value");
5102 fprintf (file, HOST_WIDE_INT_PRINT_DEC,
5103 (INTVAL (x) & 0xffff) - 2 * (INTVAL (x) & 0x8000));
5107 /* Write mask for ZAP insn. */
5108 if (GET_CODE (x) == CONST_DOUBLE)
5110 HOST_WIDE_INT mask = 0;
5111 HOST_WIDE_INT value;
5113 value = CONST_DOUBLE_LOW (x);
5114 for (i = 0; i < HOST_BITS_PER_WIDE_INT / HOST_BITS_PER_CHAR;
5119 value = CONST_DOUBLE_HIGH (x);
5120 for (i = 0; i < HOST_BITS_PER_WIDE_INT / HOST_BITS_PER_CHAR;
5123 mask |= (1 << (i + sizeof (int)));
5125 fprintf (file, HOST_WIDE_INT_PRINT_DEC, mask & 0xff);
5128 else if (GET_CODE (x) == CONST_INT)
5130 HOST_WIDE_INT mask = 0, value = INTVAL (x);
5132 for (i = 0; i < 8; i++, value >>= 8)
5136 fprintf (file, HOST_WIDE_INT_PRINT_DEC, mask);
5139 output_operand_lossage ("invalid %%m value");
5143 /* 'b', 'w', 'l', or 'q' as the value of the constant. */
5144 if (GET_CODE (x) != CONST_INT
5145 || (INTVAL (x) != 8 && INTVAL (x) != 16
5146 && INTVAL (x) != 32 && INTVAL (x) != 64))
5147 output_operand_lossage ("invalid %%M value");
5149 fprintf (file, "%s",
5150 (INTVAL (x) == 8 ? "b"
5151 : INTVAL (x) == 16 ? "w"
5152 : INTVAL (x) == 32 ? "l"
5157 /* Similar, except do it from the mask. */
5158 if (GET_CODE (x) == CONST_INT)
5160 HOST_WIDE_INT value = INTVAL (x);
5167 if (value == 0xffff)
5172 if (value == 0xffffffff)
5183 else if (HOST_BITS_PER_WIDE_INT == 32
5184 && GET_CODE (x) == CONST_DOUBLE
5185 && CONST_DOUBLE_LOW (x) == 0xffffffff
5186 && CONST_DOUBLE_HIGH (x) == 0)
5191 output_operand_lossage ("invalid %%U value");
5195 /* Write the constant value divided by 8 for little-endian mode or
5196 (56 - value) / 8 for big-endian mode. */
5198 if (GET_CODE (x) != CONST_INT
5199 || (unsigned HOST_WIDE_INT) INTVAL (x) >= (WORDS_BIG_ENDIAN
5202 || (INTVAL (x) & 7) != 0)
5203 output_operand_lossage ("invalid %%s value");
5205 fprintf (file, HOST_WIDE_INT_PRINT_DEC,
5207 ? (56 - INTVAL (x)) / 8
5212 /* Same, except compute (64 - c) / 8 */
5214 if (GET_CODE (x) != CONST_INT
5215 && (unsigned HOST_WIDE_INT) INTVAL (x) >= 64
5216 && (INTVAL (x) & 7) != 8)
5217 output_operand_lossage ("invalid %%s value");
5219 fprintf (file, HOST_WIDE_INT_PRINT_DEC, (64 - INTVAL (x)) / 8);
5224 /* On Unicos/Mk systems: use a DEX expression if the symbol
5225 clashes with a register name. */
5226 int dex = unicosmk_need_dex (x);
5228 fprintf (file, "DEX(%d)", dex);
5230 output_addr_const (file, x);
5234 case 'C': case 'D': case 'c': case 'd':
5235 /* Write out comparison name. */
5237 enum rtx_code c = GET_CODE (x);
5239 if (!COMPARISON_P (x))
5240 output_operand_lossage ("invalid %%C value");
5242 else if (code == 'D')
5243 c = reverse_condition (c);
5244 else if (code == 'c')
5245 c = swap_condition (c);
5246 else if (code == 'd')
5247 c = swap_condition (reverse_condition (c));
5250 fprintf (file, "ule");
5252 fprintf (file, "ult");
5253 else if (c == UNORDERED)
5254 fprintf (file, "un");
5256 fprintf (file, "%s", GET_RTX_NAME (c));
5261 /* Write the divide or modulus operator. */
5262 switch (GET_CODE (x))
5265 fprintf (file, "div%s", GET_MODE (x) == SImode ? "l" : "q");
5268 fprintf (file, "div%su", GET_MODE (x) == SImode ? "l" : "q");
5271 fprintf (file, "rem%s", GET_MODE (x) == SImode ? "l" : "q");
5274 fprintf (file, "rem%su", GET_MODE (x) == SImode ? "l" : "q");
5277 output_operand_lossage ("invalid %%E value");
5283 /* Write "_u" for unaligned access. */
5284 if (GET_CODE (x) == MEM && GET_CODE (XEXP (x, 0)) == AND)
5285 fprintf (file, "_u");
5289 if (GET_CODE (x) == REG)
5290 fprintf (file, "%s", reg_names[REGNO (x)]);
5291 else if (GET_CODE (x) == MEM)
5292 output_address (XEXP (x, 0));
5293 else if (GET_CODE (x) == CONST && GET_CODE (XEXP (x, 0)) == UNSPEC)
5295 switch (XINT (XEXP (x, 0), 1))
5299 output_addr_const (file, XVECEXP (XEXP (x, 0), 0, 0));
5302 output_operand_lossage ("unknown relocation unspec");
5307 output_addr_const (file, x);
5311 output_operand_lossage ("invalid %%xn code");
5316 print_operand_address (FILE *file, rtx addr)
5319 HOST_WIDE_INT offset = 0;
5321 if (GET_CODE (addr) == AND)
5322 addr = XEXP (addr, 0);
5324 if (GET_CODE (addr) == PLUS
5325 && GET_CODE (XEXP (addr, 1)) == CONST_INT)
5327 offset = INTVAL (XEXP (addr, 1));
5328 addr = XEXP (addr, 0);
5331 if (GET_CODE (addr) == LO_SUM)
5333 const char *reloc16, *reloclo;
5334 rtx op1 = XEXP (addr, 1);
5336 if (GET_CODE (op1) == CONST && GET_CODE (XEXP (op1, 0)) == UNSPEC)
5338 op1 = XEXP (op1, 0);
5339 switch (XINT (op1, 1))
5343 reloclo = (alpha_tls_size == 16 ? "dtprel" : "dtprello");
5347 reloclo = (alpha_tls_size == 16 ? "tprel" : "tprello");
5350 output_operand_lossage ("unknown relocation unspec");
5354 output_addr_const (file, XVECEXP (op1, 0, 0));
5359 reloclo = "gprellow";
5360 output_addr_const (file, op1);
5364 fprintf (file, "+" HOST_WIDE_INT_PRINT_DEC, offset);
5366 addr = XEXP (addr, 0);
5367 switch (GET_CODE (addr))
5370 basereg = REGNO (addr);
5374 basereg = subreg_regno (addr);
5381 fprintf (file, "($%d)\t\t!%s", basereg,
5382 (basereg == 29 ? reloc16 : reloclo));
5386 switch (GET_CODE (addr))
5389 basereg = REGNO (addr);
5393 basereg = subreg_regno (addr);
5397 offset = INTVAL (addr);
5400 #if TARGET_ABI_OPEN_VMS
5402 fprintf (file, "%s", XSTR (addr, 0));
5406 gcc_assert (GET_CODE (XEXP (addr, 0)) == PLUS
5407 && GET_CODE (XEXP (XEXP (addr, 0), 0)) == SYMBOL_REF);
5408 fprintf (file, "%s+" HOST_WIDE_INT_PRINT_DEC,
5409 XSTR (XEXP (XEXP (addr, 0), 0), 0),
5410 INTVAL (XEXP (XEXP (addr, 0), 1)));
5418 fprintf (file, HOST_WIDE_INT_PRINT_DEC "($%d)", offset, basereg);
5421 /* Emit RTL insns to initialize the variable parts of a trampoline at
5422 TRAMP. FNADDR is an RTX for the address of the function's pure
5423 code. CXT is an RTX for the static chain value for the function.
5425 The three offset parameters are for the individual template's
5426 layout. A JMPOFS < 0 indicates that the trampoline does not
5427 contain instructions at all.
5429 We assume here that a function will be called many more times than
5430 its address is taken (e.g., it might be passed to qsort), so we
5431 take the trouble to initialize the "hint" field in the JMP insn.
5432 Note that the hint field is PC (new) + 4 * bits 13:0. */
5435 alpha_initialize_trampoline (rtx tramp, rtx fnaddr, rtx cxt,
5436 int fnofs, int cxtofs, int jmpofs)
5438 rtx temp, temp1, addr;
5439 /* VMS really uses DImode pointers in memory at this point. */
5440 enum machine_mode mode = TARGET_ABI_OPEN_VMS ? Pmode : ptr_mode;
5442 #ifdef POINTERS_EXTEND_UNSIGNED
5443 fnaddr = convert_memory_address (mode, fnaddr);
5444 cxt = convert_memory_address (mode, cxt);
5447 /* Store function address and CXT. */
5448 addr = memory_address (mode, plus_constant (tramp, fnofs));
5449 emit_move_insn (gen_rtx_MEM (mode, addr), fnaddr);
5450 addr = memory_address (mode, plus_constant (tramp, cxtofs));
5451 emit_move_insn (gen_rtx_MEM (mode, addr), cxt);
5453 /* This has been disabled since the hint only has a 32k range, and in
5454 no existing OS is the stack within 32k of the text segment. */
5455 if (0 && jmpofs >= 0)
5457 /* Compute hint value. */
5458 temp = force_operand (plus_constant (tramp, jmpofs+4), NULL_RTX);
5459 temp = expand_binop (DImode, sub_optab, fnaddr, temp, temp, 1,
5461 temp = expand_shift (RSHIFT_EXPR, Pmode, temp,
5462 build_int_cst (NULL_TREE, 2), NULL_RTX, 1);
5463 temp = expand_and (SImode, gen_lowpart (SImode, temp),
5464 GEN_INT (0x3fff), 0);
5466 /* Merge in the hint. */
5467 addr = memory_address (SImode, plus_constant (tramp, jmpofs));
5468 temp1 = force_reg (SImode, gen_rtx_MEM (SImode, addr));
5469 temp1 = expand_and (SImode, temp1, GEN_INT (0xffffc000), NULL_RTX);
5470 temp1 = expand_binop (SImode, ior_optab, temp1, temp, temp1, 1,
5472 emit_move_insn (gen_rtx_MEM (SImode, addr), temp1);
5475 #ifdef ENABLE_EXECUTE_STACK
5476 emit_library_call (init_one_libfunc ("__enable_execute_stack"),
5477 0, VOIDmode, 1, tramp, Pmode);
5481 emit_insn (gen_imb ());
5484 /* Determine where to put an argument to a function.
5485 Value is zero to push the argument on the stack,
5486 or a hard register in which to store the argument.
5488 MODE is the argument's machine mode.
5489 TYPE is the data type of the argument (as a tree).
5490 This is null for libcalls where that information may
5492 CUM is a variable of type CUMULATIVE_ARGS which gives info about
5493 the preceding args and about the function being called.
5494 NAMED is nonzero if this argument is a named parameter
5495 (otherwise it is an extra parameter matching an ellipsis).
5497 On Alpha the first 6 words of args are normally in registers
5498 and the rest are pushed. */
5501 function_arg (CUMULATIVE_ARGS cum, enum machine_mode mode, tree type,
5502 int named ATTRIBUTE_UNUSED)
5507 /* Don't get confused and pass small structures in FP registers. */
5508 if (type && AGGREGATE_TYPE_P (type))
5512 #ifdef ENABLE_CHECKING
5513 /* With alpha_split_complex_arg, we shouldn't see any raw complex
5515 gcc_assert (!COMPLEX_MODE_P (mode));
5518 /* Set up defaults for FP operands passed in FP registers, and
5519 integral operands passed in integer registers. */
5520 if (TARGET_FPREGS && GET_MODE_CLASS (mode) == MODE_FLOAT)
5526 /* ??? Irritatingly, the definition of CUMULATIVE_ARGS is different for
5527 the three platforms, so we can't avoid conditional compilation. */
5528 #if TARGET_ABI_OPEN_VMS
5530 if (mode == VOIDmode)
5531 return alpha_arg_info_reg_val (cum);
5533 num_args = cum.num_args;
5535 || targetm.calls.must_pass_in_stack (mode, type))
5538 #elif TARGET_ABI_UNICOSMK
5542 /* If this is the last argument, generate the call info word (CIW). */
5543 /* ??? We don't include the caller's line number in the CIW because
5544 I don't know how to determine it if debug infos are turned off. */
5545 if (mode == VOIDmode)
5554 for (i = 0; i < cum.num_reg_words && i < 5; i++)
5555 if (cum.reg_args_type[i])
5556 lo |= (1 << (7 - i));
5558 if (cum.num_reg_words == 6 && cum.reg_args_type[5])
5561 lo |= cum.num_reg_words;
5563 #if HOST_BITS_PER_WIDE_INT == 32
5564 hi = (cum.num_args << 20) | cum.num_arg_words;
5566 lo = lo | ((HOST_WIDE_INT) cum.num_args << 52)
5567 | ((HOST_WIDE_INT) cum.num_arg_words << 32);
5570 ciw = immed_double_const (lo, hi, DImode);
5572 return gen_rtx_UNSPEC (DImode, gen_rtvec (1, ciw),
5573 UNSPEC_UMK_LOAD_CIW);
5576 size = ALPHA_ARG_SIZE (mode, type, named);
5577 num_args = cum.num_reg_words;
5579 || cum.num_reg_words + size > 6
5580 || targetm.calls.must_pass_in_stack (mode, type))
5582 else if (type && TYPE_MODE (type) == BLKmode)
5586 reg1 = gen_rtx_REG (DImode, num_args + 16);
5587 reg1 = gen_rtx_EXPR_LIST (DImode, reg1, const0_rtx);
5589 /* The argument fits in two registers. Note that we still need to
5590 reserve a register for empty structures. */
5594 return gen_rtx_PARALLEL (mode, gen_rtvec (1, reg1));
5597 reg2 = gen_rtx_REG (DImode, num_args + 17);
5598 reg2 = gen_rtx_EXPR_LIST (DImode, reg2, GEN_INT (8));
5599 return gen_rtx_PARALLEL (mode, gen_rtvec (2, reg1, reg2));
5603 #elif TARGET_ABI_OSF
5609 /* VOID is passed as a special flag for "last argument". */
5610 if (type == void_type_node)
5612 else if (targetm.calls.must_pass_in_stack (mode, type))
5616 #error Unhandled ABI
5619 return gen_rtx_REG (mode, num_args + basereg);
5623 alpha_arg_partial_bytes (CUMULATIVE_ARGS *cum ATTRIBUTE_UNUSED,
5624 enum machine_mode mode ATTRIBUTE_UNUSED,
5625 tree type ATTRIBUTE_UNUSED,
5626 bool named ATTRIBUTE_UNUSED)
5630 #if TARGET_ABI_OPEN_VMS
5631 if (cum->num_args < 6
5632 && 6 < cum->num_args + ALPHA_ARG_SIZE (mode, type, named))
5633 words = 6 - cum->num_args;
5634 #elif TARGET_ABI_UNICOSMK
5635 /* Never any split arguments. */
5636 #elif TARGET_ABI_OSF
5637 if (*cum < 6 && 6 < *cum + ALPHA_ARG_SIZE (mode, type, named))
5640 #error Unhandled ABI
5643 return words * UNITS_PER_WORD;
5647 /* Return true if TYPE must be returned in memory, instead of in registers. */
5650 alpha_return_in_memory (tree type, tree fndecl ATTRIBUTE_UNUSED)
5652 enum machine_mode mode = VOIDmode;
5657 mode = TYPE_MODE (type);
5659 /* All aggregates are returned in memory. */
5660 if (AGGREGATE_TYPE_P (type))
5664 size = GET_MODE_SIZE (mode);
5665 switch (GET_MODE_CLASS (mode))
5667 case MODE_VECTOR_FLOAT:
5668 /* Pass all float vectors in memory, like an aggregate. */
5671 case MODE_COMPLEX_FLOAT:
5672 /* We judge complex floats on the size of their element,
5673 not the size of the whole type. */
5674 size = GET_MODE_UNIT_SIZE (mode);
5679 case MODE_COMPLEX_INT:
5680 case MODE_VECTOR_INT:
5684 /* ??? We get called on all sorts of random stuff from
5685 aggregate_value_p. We must return something, but it's not
5686 clear what's safe to return. Pretend it's a struct I
5691 /* Otherwise types must fit in one register. */
5692 return size > UNITS_PER_WORD;
5695 /* Return true if TYPE should be passed by invisible reference. */
5698 alpha_pass_by_reference (CUMULATIVE_ARGS *ca ATTRIBUTE_UNUSED,
5699 enum machine_mode mode,
5700 tree type ATTRIBUTE_UNUSED,
5701 bool named ATTRIBUTE_UNUSED)
5703 return mode == TFmode || mode == TCmode;
5706 /* Define how to find the value returned by a function. VALTYPE is the
5707 data type of the value (as a tree). If the precise function being
5708 called is known, FUNC is its FUNCTION_DECL; otherwise, FUNC is 0.
5709 MODE is set instead of VALTYPE for libcalls.
5711 On Alpha the value is found in $0 for integer functions and
5712 $f0 for floating-point functions. */
5715 function_value (tree valtype, tree func ATTRIBUTE_UNUSED,
5716 enum machine_mode mode)
5718 unsigned int regnum, dummy;
5719 enum mode_class class;
5721 gcc_assert (!valtype || !alpha_return_in_memory (valtype, func));
5724 mode = TYPE_MODE (valtype);
5726 class = GET_MODE_CLASS (mode);
5730 PROMOTE_MODE (mode, dummy, valtype);
5733 case MODE_COMPLEX_INT:
5734 case MODE_VECTOR_INT:
5742 case MODE_COMPLEX_FLOAT:
5744 enum machine_mode cmode = GET_MODE_INNER (mode);
5746 return gen_rtx_PARALLEL
5749 gen_rtx_EXPR_LIST (VOIDmode, gen_rtx_REG (cmode, 32),
5751 gen_rtx_EXPR_LIST (VOIDmode, gen_rtx_REG (cmode, 33),
5752 GEN_INT (GET_MODE_SIZE (cmode)))));
5759 return gen_rtx_REG (mode, regnum);
5762 /* TCmode complex values are passed by invisible reference. We
5763 should not split these values. */
5766 alpha_split_complex_arg (tree type)
5768 return TYPE_MODE (type) != TCmode;
5772 alpha_build_builtin_va_list (void)
5774 tree base, ofs, space, record, type_decl;
5776 if (TARGET_ABI_OPEN_VMS || TARGET_ABI_UNICOSMK)
5777 return ptr_type_node;
5779 record = (*lang_hooks.types.make_type) (RECORD_TYPE);
5780 type_decl = build_decl (TYPE_DECL, get_identifier ("__va_list_tag"), record);
5781 TREE_CHAIN (record) = type_decl;
5782 TYPE_NAME (record) = type_decl;
5784 /* C++? SET_IS_AGGR_TYPE (record, 1); */
5786 /* Dummy field to prevent alignment warnings. */
5787 space = build_decl (FIELD_DECL, NULL_TREE, integer_type_node);
5788 DECL_FIELD_CONTEXT (space) = record;
5789 DECL_ARTIFICIAL (space) = 1;
5790 DECL_IGNORED_P (space) = 1;
5792 ofs = build_decl (FIELD_DECL, get_identifier ("__offset"),
5794 DECL_FIELD_CONTEXT (ofs) = record;
5795 TREE_CHAIN (ofs) = space;
5797 base = build_decl (FIELD_DECL, get_identifier ("__base"),
5799 DECL_FIELD_CONTEXT (base) = record;
5800 TREE_CHAIN (base) = ofs;
5802 TYPE_FIELDS (record) = base;
5803 layout_type (record);
5805 va_list_gpr_counter_field = ofs;
5810 /* Helper function for alpha_stdarg_optimize_hook. Skip over casts
5811 and constant additions. */
5814 va_list_skip_additions (tree lhs)
5818 if (TREE_CODE (lhs) != SSA_NAME)
5823 stmt = SSA_NAME_DEF_STMT (lhs);
5825 if (TREE_CODE (stmt) == PHI_NODE)
5828 if (TREE_CODE (stmt) != GIMPLE_MODIFY_STMT
5829 || GIMPLE_STMT_OPERAND (stmt, 0) != lhs)
5832 rhs = GIMPLE_STMT_OPERAND (stmt, 1);
5833 if (TREE_CODE (rhs) == WITH_SIZE_EXPR)
5834 rhs = TREE_OPERAND (rhs, 0);
5836 if ((TREE_CODE (rhs) != NOP_EXPR
5837 && TREE_CODE (rhs) != CONVERT_EXPR
5838 && (TREE_CODE (rhs) != PLUS_EXPR
5839 || TREE_CODE (TREE_OPERAND (rhs, 1)) != INTEGER_CST
5840 || !host_integerp (TREE_OPERAND (rhs, 1), 1)))
5841 || TREE_CODE (TREE_OPERAND (rhs, 0)) != SSA_NAME)
5844 lhs = TREE_OPERAND (rhs, 0);
5848 /* Check if LHS = RHS statement is
5849 LHS = *(ap.__base + ap.__offset + cst)
5852 + ((ap.__offset + cst <= 47)
5853 ? ap.__offset + cst - 48 : ap.__offset + cst) + cst2).
5854 If the former, indicate that GPR registers are needed,
5855 if the latter, indicate that FPR registers are needed.
5857 Also look for LHS = (*ptr).field, where ptr is one of the forms
5860 On alpha, cfun->va_list_gpr_size is used as size of the needed
5861 regs and cfun->va_list_fpr_size is a bitmask, bit 0 set if GPR
5862 registers are needed and bit 1 set if FPR registers are needed.
5863 Return true if va_list references should not be scanned for the
5864 current statement. */
5867 alpha_stdarg_optimize_hook (struct stdarg_info *si, tree lhs, tree rhs)
5869 tree base, offset, arg1, arg2;
5872 while (handled_component_p (rhs))
5873 rhs = TREE_OPERAND (rhs, 0);
5874 if (TREE_CODE (rhs) != INDIRECT_REF
5875 || TREE_CODE (TREE_OPERAND (rhs, 0)) != SSA_NAME)
5878 lhs = va_list_skip_additions (TREE_OPERAND (rhs, 0));
5879 if (lhs == NULL_TREE
5880 || TREE_CODE (lhs) != PLUS_EXPR)
5883 base = TREE_OPERAND (lhs, 0);
5884 if (TREE_CODE (base) == SSA_NAME)
5885 base = va_list_skip_additions (base);
5887 if (TREE_CODE (base) != COMPONENT_REF
5888 || TREE_OPERAND (base, 1) != TYPE_FIELDS (va_list_type_node))
5890 base = TREE_OPERAND (lhs, 0);
5891 if (TREE_CODE (base) == SSA_NAME)
5892 base = va_list_skip_additions (base);
5894 if (TREE_CODE (base) != COMPONENT_REF
5895 || TREE_OPERAND (base, 1) != TYPE_FIELDS (va_list_type_node))
5901 base = get_base_address (base);
5902 if (TREE_CODE (base) != VAR_DECL
5903 || !bitmap_bit_p (si->va_list_vars, DECL_UID (base)))
5906 offset = TREE_OPERAND (lhs, offset_arg);
5907 if (TREE_CODE (offset) == SSA_NAME)
5908 offset = va_list_skip_additions (offset);
5910 if (TREE_CODE (offset) == PHI_NODE)
5914 if (PHI_NUM_ARGS (offset) != 2)
5917 arg1 = va_list_skip_additions (PHI_ARG_DEF (offset, 0));
5918 arg2 = va_list_skip_additions (PHI_ARG_DEF (offset, 1));
5919 if (TREE_CODE (arg2) != MINUS_EXPR && TREE_CODE (arg2) != PLUS_EXPR)
5925 if (TREE_CODE (arg2) != MINUS_EXPR && TREE_CODE (arg2) != PLUS_EXPR)
5928 if (!host_integerp (TREE_OPERAND (arg2, 1), 0))
5931 sub = tree_low_cst (TREE_OPERAND (arg2, 1), 0);
5932 if (TREE_CODE (arg2) == MINUS_EXPR)
5934 if (sub < -48 || sub > -32)
5937 arg2 = va_list_skip_additions (TREE_OPERAND (arg2, 0));
5941 if (TREE_CODE (arg1) == SSA_NAME)
5942 arg1 = va_list_skip_additions (arg1);
5944 if (TREE_CODE (arg1) != COMPONENT_REF
5945 || TREE_OPERAND (arg1, 1) != va_list_gpr_counter_field
5946 || get_base_address (arg1) != base)
5949 /* Need floating point regs. */
5950 cfun->va_list_fpr_size |= 2;
5952 else if (TREE_CODE (offset) != COMPONENT_REF
5953 || TREE_OPERAND (offset, 1) != va_list_gpr_counter_field
5954 || get_base_address (offset) != base)
5957 /* Need general regs. */
5958 cfun->va_list_fpr_size |= 1;
5962 si->va_list_escapes = true;
5967 /* Perform any needed actions needed for a function that is receiving a
5968 variable number of arguments. */
5971 alpha_setup_incoming_varargs (CUMULATIVE_ARGS *pcum, enum machine_mode mode,
5972 tree type, int *pretend_size, int no_rtl)
5974 CUMULATIVE_ARGS cum = *pcum;
5976 /* Skip the current argument. */
5977 FUNCTION_ARG_ADVANCE (cum, mode, type, 1);
5979 #if TARGET_ABI_UNICOSMK
5980 /* On Unicos/Mk, the standard subroutine __T3E_MISMATCH stores all register
5981 arguments on the stack. Unfortunately, it doesn't always store the first
5982 one (i.e. the one that arrives in $16 or $f16). This is not a problem
5983 with stdargs as we always have at least one named argument there. */
5984 if (cum.num_reg_words < 6)
5988 emit_insn (gen_umk_mismatch_args (GEN_INT (cum.num_reg_words)));
5989 emit_insn (gen_arg_home_umk ());
5993 #elif TARGET_ABI_OPEN_VMS
5994 /* For VMS, we allocate space for all 6 arg registers plus a count.
5996 However, if NO registers need to be saved, don't allocate any space.
5997 This is not only because we won't need the space, but because AP
5998 includes the current_pretend_args_size and we don't want to mess up
5999 any ap-relative addresses already made. */
6000 if (cum.num_args < 6)
6004 emit_move_insn (gen_rtx_REG (DImode, 1), virtual_incoming_args_rtx);
6005 emit_insn (gen_arg_home ());
6007 *pretend_size = 7 * UNITS_PER_WORD;
6010 /* On OSF/1 and friends, we allocate space for all 12 arg registers, but
6011 only push those that are remaining. However, if NO registers need to
6012 be saved, don't allocate any space. This is not only because we won't
6013 need the space, but because AP includes the current_pretend_args_size
6014 and we don't want to mess up any ap-relative addresses already made.
6016 If we are not to use the floating-point registers, save the integer
6017 registers where we would put the floating-point registers. This is
6018 not the most efficient way to implement varargs with just one register
6019 class, but it isn't worth doing anything more efficient in this rare
6026 int count, set = get_varargs_alias_set ();
6029 count = cfun->va_list_gpr_size / UNITS_PER_WORD;
6030 if (count > 6 - cum)
6033 /* Detect whether integer registers or floating-point registers
6034 are needed by the detected va_arg statements. See above for
6035 how these values are computed. Note that the "escape" value
6036 is VA_LIST_MAX_FPR_SIZE, which is 255, which has both of
6038 gcc_assert ((VA_LIST_MAX_FPR_SIZE & 3) == 3);
6040 if (cfun->va_list_fpr_size & 1)
6042 tmp = gen_rtx_MEM (BLKmode,
6043 plus_constant (virtual_incoming_args_rtx,
6044 (cum + 6) * UNITS_PER_WORD));
6045 MEM_NOTRAP_P (tmp) = 1;
6046 set_mem_alias_set (tmp, set);
6047 move_block_from_reg (16 + cum, tmp, count);
6050 if (cfun->va_list_fpr_size & 2)
6052 tmp = gen_rtx_MEM (BLKmode,
6053 plus_constant (virtual_incoming_args_rtx,
6054 cum * UNITS_PER_WORD));
6055 MEM_NOTRAP_P (tmp) = 1;
6056 set_mem_alias_set (tmp, set);
6057 move_block_from_reg (16 + cum + TARGET_FPREGS*32, tmp, count);
6060 *pretend_size = 12 * UNITS_PER_WORD;
6065 alpha_va_start (tree valist, rtx nextarg ATTRIBUTE_UNUSED)
6067 HOST_WIDE_INT offset;
6068 tree t, offset_field, base_field;
6070 if (TREE_CODE (TREE_TYPE (valist)) == ERROR_MARK)
6073 if (TARGET_ABI_UNICOSMK)
6074 std_expand_builtin_va_start (valist, nextarg);
6076 /* For Unix, TARGET_SETUP_INCOMING_VARARGS moves the starting address base
6077 up by 48, storing fp arg registers in the first 48 bytes, and the
6078 integer arg registers in the next 48 bytes. This is only done,
6079 however, if any integer registers need to be stored.
6081 If no integer registers need be stored, then we must subtract 48
6082 in order to account for the integer arg registers which are counted
6083 in argsize above, but which are not actually stored on the stack.
6084 Must further be careful here about structures straddling the last
6085 integer argument register; that futzes with pretend_args_size,
6086 which changes the meaning of AP. */
6089 offset = TARGET_ABI_OPEN_VMS ? UNITS_PER_WORD : 6 * UNITS_PER_WORD;
6091 offset = -6 * UNITS_PER_WORD + current_function_pretend_args_size;
6093 if (TARGET_ABI_OPEN_VMS)
6095 nextarg = plus_constant (nextarg, offset);
6096 nextarg = plus_constant (nextarg, NUM_ARGS * UNITS_PER_WORD);
6097 t = build2 (GIMPLE_MODIFY_STMT, TREE_TYPE (valist), valist,
6098 make_tree (ptr_type_node, nextarg));
6099 TREE_SIDE_EFFECTS (t) = 1;
6101 expand_expr (t, const0_rtx, VOIDmode, EXPAND_NORMAL);
6105 base_field = TYPE_FIELDS (TREE_TYPE (valist));
6106 offset_field = TREE_CHAIN (base_field);
6108 base_field = build3 (COMPONENT_REF, TREE_TYPE (base_field),
6109 valist, base_field, NULL_TREE);
6110 offset_field = build3 (COMPONENT_REF, TREE_TYPE (offset_field),
6111 valist, offset_field, NULL_TREE);
6113 t = make_tree (ptr_type_node, virtual_incoming_args_rtx);
6114 t = build2 (PLUS_EXPR, ptr_type_node, t,
6115 build_int_cst (NULL_TREE, offset));
6116 t = build2 (GIMPLE_MODIFY_STMT, TREE_TYPE (base_field), base_field, t);
6117 TREE_SIDE_EFFECTS (t) = 1;
6118 expand_expr (t, const0_rtx, VOIDmode, EXPAND_NORMAL);
6120 t = build_int_cst (NULL_TREE, NUM_ARGS * UNITS_PER_WORD);
6121 t = build2 (GIMPLE_MODIFY_STMT, TREE_TYPE (offset_field),
6123 TREE_SIDE_EFFECTS (t) = 1;
6124 expand_expr (t, const0_rtx, VOIDmode, EXPAND_NORMAL);
6129 alpha_gimplify_va_arg_1 (tree type, tree base, tree offset, tree *pre_p)
6131 tree type_size, ptr_type, addend, t, addr, internal_post;
6133 /* If the type could not be passed in registers, skip the block
6134 reserved for the registers. */
6135 if (targetm.calls.must_pass_in_stack (TYPE_MODE (type), type))
6137 t = build_int_cst (TREE_TYPE (offset), 6*8);
6138 t = build2 (GIMPLE_MODIFY_STMT, TREE_TYPE (offset), offset,
6139 build2 (MAX_EXPR, TREE_TYPE (offset), offset, t));
6140 gimplify_and_add (t, pre_p);
6144 ptr_type = build_pointer_type (type);
6146 if (TREE_CODE (type) == COMPLEX_TYPE)
6148 tree real_part, imag_part, real_temp;
6150 real_part = alpha_gimplify_va_arg_1 (TREE_TYPE (type), base,
6153 /* Copy the value into a new temporary, lest the formal temporary
6154 be reused out from under us. */
6155 real_temp = get_initialized_tmp_var (real_part, pre_p, NULL);
6157 imag_part = alpha_gimplify_va_arg_1 (TREE_TYPE (type), base,
6160 return build2 (COMPLEX_EXPR, type, real_temp, imag_part);
6162 else if (TREE_CODE (type) == REAL_TYPE)
6164 tree fpaddend, cond, fourtyeight;
6166 fourtyeight = build_int_cst (TREE_TYPE (addend), 6*8);
6167 fpaddend = fold_build2 (MINUS_EXPR, TREE_TYPE (addend),
6168 addend, fourtyeight);
6169 cond = fold_build2 (LT_EXPR, boolean_type_node, addend, fourtyeight);
6170 addend = fold_build3 (COND_EXPR, TREE_TYPE (addend), cond,
6174 /* Build the final address and force that value into a temporary. */
6175 addr = build2 (PLUS_EXPR, ptr_type, fold_convert (ptr_type, base),
6176 fold_convert (ptr_type, addend));
6177 internal_post = NULL;
6178 gimplify_expr (&addr, pre_p, &internal_post, is_gimple_val, fb_rvalue);
6179 append_to_statement_list (internal_post, pre_p);
6181 /* Update the offset field. */
6182 type_size = TYPE_SIZE_UNIT (TYPE_MAIN_VARIANT (type));
6183 if (type_size == NULL || TREE_OVERFLOW (type_size))
6187 t = size_binop (PLUS_EXPR, type_size, size_int (7));
6188 t = size_binop (TRUNC_DIV_EXPR, t, size_int (8));
6189 t = size_binop (MULT_EXPR, t, size_int (8));
6191 t = fold_convert (TREE_TYPE (offset), t);
6192 t = build2 (GIMPLE_MODIFY_STMT, void_type_node, offset,
6193 build2 (PLUS_EXPR, TREE_TYPE (offset), offset, t));
6194 gimplify_and_add (t, pre_p);
6196 return build_va_arg_indirect_ref (addr);
6200 alpha_gimplify_va_arg (tree valist, tree type, tree *pre_p, tree *post_p)
6202 tree offset_field, base_field, offset, base, t, r;
6205 if (TARGET_ABI_OPEN_VMS || TARGET_ABI_UNICOSMK)
6206 return std_gimplify_va_arg_expr (valist, type, pre_p, post_p);
6208 base_field = TYPE_FIELDS (va_list_type_node);
6209 offset_field = TREE_CHAIN (base_field);
6210 base_field = build3 (COMPONENT_REF, TREE_TYPE (base_field),
6211 valist, base_field, NULL_TREE);
6212 offset_field = build3 (COMPONENT_REF, TREE_TYPE (offset_field),
6213 valist, offset_field, NULL_TREE);
6215 /* Pull the fields of the structure out into temporaries. Since we never
6216 modify the base field, we can use a formal temporary. Sign-extend the
6217 offset field so that it's the proper width for pointer arithmetic. */
6218 base = get_formal_tmp_var (base_field, pre_p);
6220 t = fold_convert (lang_hooks.types.type_for_size (64, 0), offset_field);
6221 offset = get_initialized_tmp_var (t, pre_p, NULL);
6223 indirect = pass_by_reference (NULL, TYPE_MODE (type), type, false);
6225 type = build_pointer_type (type);
6227 /* Find the value. Note that this will be a stable indirection, or
6228 a composite of stable indirections in the case of complex. */
6229 r = alpha_gimplify_va_arg_1 (type, base, offset, pre_p);
6231 /* Stuff the offset temporary back into its field. */
6232 t = build2 (GIMPLE_MODIFY_STMT, void_type_node, offset_field,
6233 fold_convert (TREE_TYPE (offset_field), offset));
6234 gimplify_and_add (t, pre_p);
6237 r = build_va_arg_indirect_ref (r);
6246 ALPHA_BUILTIN_CMPBGE,
6247 ALPHA_BUILTIN_EXTBL,
6248 ALPHA_BUILTIN_EXTWL,
6249 ALPHA_BUILTIN_EXTLL,
6250 ALPHA_BUILTIN_EXTQL,
6251 ALPHA_BUILTIN_EXTWH,
6252 ALPHA_BUILTIN_EXTLH,
6253 ALPHA_BUILTIN_EXTQH,
6254 ALPHA_BUILTIN_INSBL,
6255 ALPHA_BUILTIN_INSWL,
6256 ALPHA_BUILTIN_INSLL,
6257 ALPHA_BUILTIN_INSQL,
6258 ALPHA_BUILTIN_INSWH,
6259 ALPHA_BUILTIN_INSLH,
6260 ALPHA_BUILTIN_INSQH,
6261 ALPHA_BUILTIN_MSKBL,
6262 ALPHA_BUILTIN_MSKWL,
6263 ALPHA_BUILTIN_MSKLL,
6264 ALPHA_BUILTIN_MSKQL,
6265 ALPHA_BUILTIN_MSKWH,
6266 ALPHA_BUILTIN_MSKLH,
6267 ALPHA_BUILTIN_MSKQH,
6268 ALPHA_BUILTIN_UMULH,
6270 ALPHA_BUILTIN_ZAPNOT,
6271 ALPHA_BUILTIN_AMASK,
6272 ALPHA_BUILTIN_IMPLVER,
6274 ALPHA_BUILTIN_THREAD_POINTER,
6275 ALPHA_BUILTIN_SET_THREAD_POINTER,
6278 ALPHA_BUILTIN_MINUB8,
6279 ALPHA_BUILTIN_MINSB8,
6280 ALPHA_BUILTIN_MINUW4,
6281 ALPHA_BUILTIN_MINSW4,
6282 ALPHA_BUILTIN_MAXUB8,
6283 ALPHA_BUILTIN_MAXSB8,
6284 ALPHA_BUILTIN_MAXUW4,
6285 ALPHA_BUILTIN_MAXSW4,
6289 ALPHA_BUILTIN_UNPKBL,
6290 ALPHA_BUILTIN_UNPKBW,
6295 ALPHA_BUILTIN_CTPOP,
6300 static unsigned int const code_for_builtin[ALPHA_BUILTIN_max] = {
6301 CODE_FOR_builtin_cmpbge,
6302 CODE_FOR_builtin_extbl,
6303 CODE_FOR_builtin_extwl,
6304 CODE_FOR_builtin_extll,
6305 CODE_FOR_builtin_extql,
6306 CODE_FOR_builtin_extwh,
6307 CODE_FOR_builtin_extlh,
6308 CODE_FOR_builtin_extqh,
6309 CODE_FOR_builtin_insbl,
6310 CODE_FOR_builtin_inswl,
6311 CODE_FOR_builtin_insll,
6312 CODE_FOR_builtin_insql,
6313 CODE_FOR_builtin_inswh,
6314 CODE_FOR_builtin_inslh,
6315 CODE_FOR_builtin_insqh,
6316 CODE_FOR_builtin_mskbl,
6317 CODE_FOR_builtin_mskwl,
6318 CODE_FOR_builtin_mskll,
6319 CODE_FOR_builtin_mskql,
6320 CODE_FOR_builtin_mskwh,
6321 CODE_FOR_builtin_msklh,
6322 CODE_FOR_builtin_mskqh,
6323 CODE_FOR_umuldi3_highpart,
6324 CODE_FOR_builtin_zap,
6325 CODE_FOR_builtin_zapnot,
6326 CODE_FOR_builtin_amask,
6327 CODE_FOR_builtin_implver,
6328 CODE_FOR_builtin_rpcc,
6333 CODE_FOR_builtin_minub8,
6334 CODE_FOR_builtin_minsb8,
6335 CODE_FOR_builtin_minuw4,
6336 CODE_FOR_builtin_minsw4,
6337 CODE_FOR_builtin_maxub8,
6338 CODE_FOR_builtin_maxsb8,
6339 CODE_FOR_builtin_maxuw4,
6340 CODE_FOR_builtin_maxsw4,
6341 CODE_FOR_builtin_perr,
6342 CODE_FOR_builtin_pklb,
6343 CODE_FOR_builtin_pkwb,
6344 CODE_FOR_builtin_unpkbl,
6345 CODE_FOR_builtin_unpkbw,
6350 CODE_FOR_popcountdi2
6353 struct alpha_builtin_def
6356 enum alpha_builtin code;
6357 unsigned int target_mask;
6361 static struct alpha_builtin_def const zero_arg_builtins[] = {
6362 { "__builtin_alpha_implver", ALPHA_BUILTIN_IMPLVER, 0, true },
6363 { "__builtin_alpha_rpcc", ALPHA_BUILTIN_RPCC, 0, false }
6366 static struct alpha_builtin_def const one_arg_builtins[] = {
6367 { "__builtin_alpha_amask", ALPHA_BUILTIN_AMASK, 0, true },
6368 { "__builtin_alpha_pklb", ALPHA_BUILTIN_PKLB, MASK_MAX, true },
6369 { "__builtin_alpha_pkwb", ALPHA_BUILTIN_PKWB, MASK_MAX, true },
6370 { "__builtin_alpha_unpkbl", ALPHA_BUILTIN_UNPKBL, MASK_MAX, true },
6371 { "__builtin_alpha_unpkbw", ALPHA_BUILTIN_UNPKBW, MASK_MAX, true },
6372 { "__builtin_alpha_cttz", ALPHA_BUILTIN_CTTZ, MASK_CIX, true },
6373 { "__builtin_alpha_ctlz", ALPHA_BUILTIN_CTLZ, MASK_CIX, true },
6374 { "__builtin_alpha_ctpop", ALPHA_BUILTIN_CTPOP, MASK_CIX, true }
6377 static struct alpha_builtin_def const two_arg_builtins[] = {
6378 { "__builtin_alpha_cmpbge", ALPHA_BUILTIN_CMPBGE, 0, true },
6379 { "__builtin_alpha_extbl", ALPHA_BUILTIN_EXTBL, 0, true },
6380 { "__builtin_alpha_extwl", ALPHA_BUILTIN_EXTWL, 0, true },
6381 { "__builtin_alpha_extll", ALPHA_BUILTIN_EXTLL, 0, true },
6382 { "__builtin_alpha_extql", ALPHA_BUILTIN_EXTQL, 0, true },
6383 { "__builtin_alpha_extwh", ALPHA_BUILTIN_EXTWH, 0, true },
6384 { "__builtin_alpha_extlh", ALPHA_BUILTIN_EXTLH, 0, true },
6385 { "__builtin_alpha_extqh", ALPHA_BUILTIN_EXTQH, 0, true },
6386 { "__builtin_alpha_insbl", ALPHA_BUILTIN_INSBL, 0, true },
6387 { "__builtin_alpha_inswl", ALPHA_BUILTIN_INSWL, 0, true },
6388 { "__builtin_alpha_insll", ALPHA_BUILTIN_INSLL, 0, true },
6389 { "__builtin_alpha_insql", ALPHA_BUILTIN_INSQL, 0, true },
6390 { "__builtin_alpha_inswh", ALPHA_BUILTIN_INSWH, 0, true },
6391 { "__builtin_alpha_inslh", ALPHA_BUILTIN_INSLH, 0, true },
6392 { "__builtin_alpha_insqh", ALPHA_BUILTIN_INSQH, 0, true },
6393 { "__builtin_alpha_mskbl", ALPHA_BUILTIN_MSKBL, 0, true },
6394 { "__builtin_alpha_mskwl", ALPHA_BUILTIN_MSKWL, 0, true },
6395 { "__builtin_alpha_mskll", ALPHA_BUILTIN_MSKLL, 0, true },
6396 { "__builtin_alpha_mskql", ALPHA_BUILTIN_MSKQL, 0, true },
6397 { "__builtin_alpha_mskwh", ALPHA_BUILTIN_MSKWH, 0, true },
6398 { "__builtin_alpha_msklh", ALPHA_BUILTIN_MSKLH, 0, true },
6399 { "__builtin_alpha_mskqh", ALPHA_BUILTIN_MSKQH, 0, true },
6400 { "__builtin_alpha_umulh", ALPHA_BUILTIN_UMULH, 0, true },
6401 { "__builtin_alpha_zap", ALPHA_BUILTIN_ZAP, 0, true },
6402 { "__builtin_alpha_zapnot", ALPHA_BUILTIN_ZAPNOT, 0, true },
6403 { "__builtin_alpha_minub8", ALPHA_BUILTIN_MINUB8, MASK_MAX, true },
6404 { "__builtin_alpha_minsb8", ALPHA_BUILTIN_MINSB8, MASK_MAX, true },
6405 { "__builtin_alpha_minuw4", ALPHA_BUILTIN_MINUW4, MASK_MAX, true },
6406 { "__builtin_alpha_minsw4", ALPHA_BUILTIN_MINSW4, MASK_MAX, true },
6407 { "__builtin_alpha_maxub8", ALPHA_BUILTIN_MAXUB8, MASK_MAX, true },
6408 { "__builtin_alpha_maxsb8", ALPHA_BUILTIN_MAXSB8, MASK_MAX, true },
6409 { "__builtin_alpha_maxuw4", ALPHA_BUILTIN_MAXUW4, MASK_MAX, true },
6410 { "__builtin_alpha_maxsw4", ALPHA_BUILTIN_MAXSW4, MASK_MAX, true },
6411 { "__builtin_alpha_perr", ALPHA_BUILTIN_PERR, MASK_MAX, true }
6414 static GTY(()) tree alpha_v8qi_u;
6415 static GTY(()) tree alpha_v8qi_s;
6416 static GTY(()) tree alpha_v4hi_u;
6417 static GTY(()) tree alpha_v4hi_s;
6419 /* Helper function of alpha_init_builtins. Add the COUNT built-in
6420 functions pointed to by P, with function type FTYPE. */
6423 alpha_add_builtins (const struct alpha_builtin_def *p, size_t count,
6429 for (i = 0; i < count; ++i, ++p)
6430 if ((target_flags & p->target_mask) == p->target_mask)
6432 decl = add_builtin_function (p->name, ftype, p->code, BUILT_IN_MD,
6435 TREE_READONLY (decl) = 1;
6436 TREE_NOTHROW (decl) = 1;
6442 alpha_init_builtins (void)
6444 tree dimode_integer_type_node;
6447 dimode_integer_type_node = lang_hooks.types.type_for_mode (DImode, 0);
6449 ftype = build_function_type (dimode_integer_type_node, void_list_node);
6450 alpha_add_builtins (zero_arg_builtins, ARRAY_SIZE (zero_arg_builtins),
6453 ftype = build_function_type_list (dimode_integer_type_node,
6454 dimode_integer_type_node, NULL_TREE);
6455 alpha_add_builtins (one_arg_builtins, ARRAY_SIZE (one_arg_builtins),
6458 ftype = build_function_type_list (dimode_integer_type_node,
6459 dimode_integer_type_node,
6460 dimode_integer_type_node, NULL_TREE);
6461 alpha_add_builtins (two_arg_builtins, ARRAY_SIZE (two_arg_builtins),
6464 ftype = build_function_type (ptr_type_node, void_list_node);
6465 decl = add_builtin_function ("__builtin_thread_pointer", ftype,
6466 ALPHA_BUILTIN_THREAD_POINTER, BUILT_IN_MD,
6468 TREE_NOTHROW (decl) = 1;
6470 ftype = build_function_type_list (void_type_node, ptr_type_node, NULL_TREE);
6471 decl = add_builtin_function ("__builtin_set_thread_pointer", ftype,
6472 ALPHA_BUILTIN_SET_THREAD_POINTER, BUILT_IN_MD,
6474 TREE_NOTHROW (decl) = 1;
6476 alpha_v8qi_u = build_vector_type (unsigned_intQI_type_node, 8);
6477 alpha_v8qi_s = build_vector_type (intQI_type_node, 8);
6478 alpha_v4hi_u = build_vector_type (unsigned_intHI_type_node, 4);
6479 alpha_v4hi_s = build_vector_type (intHI_type_node, 4);
6482 /* Expand an expression EXP that calls a built-in function,
6483 with result going to TARGET if that's convenient
6484 (and in mode MODE if that's convenient).
6485 SUBTARGET may be used as the target for computing one of EXP's operands.
6486 IGNORE is nonzero if the value is to be ignored. */
6489 alpha_expand_builtin (tree exp, rtx target,
6490 rtx subtarget ATTRIBUTE_UNUSED,
6491 enum machine_mode mode ATTRIBUTE_UNUSED,
6492 int ignore ATTRIBUTE_UNUSED)
6496 tree fndecl = TREE_OPERAND (CALL_EXPR_FN (exp), 0);
6497 unsigned int fcode = DECL_FUNCTION_CODE (fndecl);
6499 call_expr_arg_iterator iter;
6500 enum insn_code icode;
6501 rtx op[MAX_ARGS], pat;
6505 if (fcode >= ALPHA_BUILTIN_max)
6506 internal_error ("bad builtin fcode");
6507 icode = code_for_builtin[fcode];
6509 internal_error ("bad builtin fcode");
6511 nonvoid = TREE_TYPE (TREE_TYPE (fndecl)) != void_type_node;
6514 FOR_EACH_CALL_EXPR_ARG (arg, iter, exp)
6516 const struct insn_operand_data *insn_op;
6518 if (arg == error_mark_node)
6520 if (arity > MAX_ARGS)
6523 insn_op = &insn_data[icode].operand[arity + nonvoid];
6525 op[arity] = expand_expr (arg, NULL_RTX, insn_op->mode, 0);
6527 if (!(*insn_op->predicate) (op[arity], insn_op->mode))
6528 op[arity] = copy_to_mode_reg (insn_op->mode, op[arity]);
6534 enum machine_mode tmode = insn_data[icode].operand[0].mode;
6536 || GET_MODE (target) != tmode
6537 || !(*insn_data[icode].operand[0].predicate) (target, tmode))
6538 target = gen_reg_rtx (tmode);
6544 pat = GEN_FCN (icode) (target);
6548 pat = GEN_FCN (icode) (target, op[0]);
6550 pat = GEN_FCN (icode) (op[0]);
6553 pat = GEN_FCN (icode) (target, op[0], op[1]);
6569 /* Several bits below assume HWI >= 64 bits. This should be enforced
6571 #if HOST_BITS_PER_WIDE_INT < 64
6572 # error "HOST_WIDE_INT too small"
6575 /* Fold the builtin for the CMPBGE instruction. This is a vector comparison
6576 with an 8-bit output vector. OPINT contains the integer operands; bit N
6577 of OP_CONST is set if OPINT[N] is valid. */
6580 alpha_fold_builtin_cmpbge (unsigned HOST_WIDE_INT opint[], long op_const)
6585 for (i = 0, val = 0; i < 8; ++i)
6587 unsigned HOST_WIDE_INT c0 = (opint[0] >> (i * 8)) & 0xff;
6588 unsigned HOST_WIDE_INT c1 = (opint[1] >> (i * 8)) & 0xff;
6592 return build_int_cst (long_integer_type_node, val);
6594 else if (op_const == 2 && opint[1] == 0)
6595 return build_int_cst (long_integer_type_node, 0xff);
6599 /* Fold the builtin for the ZAPNOT instruction. This is essentially a
6600 specialized form of an AND operation. Other byte manipulation instructions
6601 are defined in terms of this instruction, so this is also used as a
6602 subroutine for other builtins.
6604 OP contains the tree operands; OPINT contains the extracted integer values.
6605 Bit N of OP_CONST it set if OPINT[N] is valid. OP may be null if only
6606 OPINT may be considered. */
6609 alpha_fold_builtin_zapnot (tree *op, unsigned HOST_WIDE_INT opint[],
6614 unsigned HOST_WIDE_INT mask = 0;
6617 for (i = 0; i < 8; ++i)
6618 if ((opint[1] >> i) & 1)
6619 mask |= (unsigned HOST_WIDE_INT)0xff << (i * 8);
6622 return build_int_cst (long_integer_type_node, opint[0] & mask);
6625 return fold_build2 (BIT_AND_EXPR, long_integer_type_node, op[0],
6626 build_int_cst (long_integer_type_node, mask));
6628 else if ((op_const & 1) && opint[0] == 0)
6629 return build_int_cst (long_integer_type_node, 0);
6633 /* Fold the builtins for the EXT family of instructions. */
6636 alpha_fold_builtin_extxx (tree op[], unsigned HOST_WIDE_INT opint[],
6637 long op_const, unsigned HOST_WIDE_INT bytemask,
6641 tree *zap_op = NULL;
6645 unsigned HOST_WIDE_INT loc;
6648 if (BYTES_BIG_ENDIAN)
6656 unsigned HOST_WIDE_INT temp = opint[0];
6669 opint[1] = bytemask;
6670 return alpha_fold_builtin_zapnot (zap_op, opint, zap_const);
6673 /* Fold the builtins for the INS family of instructions. */
6676 alpha_fold_builtin_insxx (tree op[], unsigned HOST_WIDE_INT opint[],
6677 long op_const, unsigned HOST_WIDE_INT bytemask,
6680 if ((op_const & 1) && opint[0] == 0)
6681 return build_int_cst (long_integer_type_node, 0);
6685 unsigned HOST_WIDE_INT temp, loc, byteloc;
6686 tree *zap_op = NULL;
6689 if (BYTES_BIG_ENDIAN)
6696 byteloc = (64 - (loc * 8)) & 0x3f;
6713 opint[1] = bytemask;
6714 return alpha_fold_builtin_zapnot (zap_op, opint, op_const);
6721 alpha_fold_builtin_mskxx (tree op[], unsigned HOST_WIDE_INT opint[],
6722 long op_const, unsigned HOST_WIDE_INT bytemask,
6727 unsigned HOST_WIDE_INT loc;
6730 if (BYTES_BIG_ENDIAN)
6737 opint[1] = bytemask ^ 0xff;
6740 return alpha_fold_builtin_zapnot (op, opint, op_const);
6744 alpha_fold_builtin_umulh (unsigned HOST_WIDE_INT opint[], long op_const)
6750 unsigned HOST_WIDE_INT l;
6753 mul_double (opint[0], 0, opint[1], 0, &l, &h);
6755 #if HOST_BITS_PER_WIDE_INT > 64
6759 return build_int_cst (long_integer_type_node, h);
6763 opint[1] = opint[0];
6766 /* Note that (X*1) >> 64 == 0. */
6767 if (opint[1] == 0 || opint[1] == 1)
6768 return build_int_cst (long_integer_type_node, 0);
6775 alpha_fold_vector_minmax (enum tree_code code, tree op[], tree vtype)
6777 tree op0 = fold_convert (vtype, op[0]);
6778 tree op1 = fold_convert (vtype, op[1]);
6779 tree val = fold_build2 (code, vtype, op0, op1);
6780 return fold_convert (long_integer_type_node, val);
6784 alpha_fold_builtin_perr (unsigned HOST_WIDE_INT opint[], long op_const)
6786 unsigned HOST_WIDE_INT temp = 0;
6792 for (i = 0; i < 8; ++i)
6794 unsigned HOST_WIDE_INT a = (opint[0] >> (i * 8)) & 0xff;
6795 unsigned HOST_WIDE_INT b = (opint[1] >> (i * 8)) & 0xff;
6802 return build_int_cst (long_integer_type_node, temp);
6806 alpha_fold_builtin_pklb (unsigned HOST_WIDE_INT opint[], long op_const)
6808 unsigned HOST_WIDE_INT temp;
6813 temp = opint[0] & 0xff;
6814 temp |= (opint[0] >> 24) & 0xff00;
6816 return build_int_cst (long_integer_type_node, temp);
6820 alpha_fold_builtin_pkwb (unsigned HOST_WIDE_INT opint[], long op_const)
6822 unsigned HOST_WIDE_INT temp;
6827 temp = opint[0] & 0xff;
6828 temp |= (opint[0] >> 8) & 0xff00;
6829 temp |= (opint[0] >> 16) & 0xff0000;
6830 temp |= (opint[0] >> 24) & 0xff000000;
6832 return build_int_cst (long_integer_type_node, temp);
6836 alpha_fold_builtin_unpkbl (unsigned HOST_WIDE_INT opint[], long op_const)
6838 unsigned HOST_WIDE_INT temp;
6843 temp = opint[0] & 0xff;
6844 temp |= (opint[0] & 0xff00) << 24;
6846 return build_int_cst (long_integer_type_node, temp);
6850 alpha_fold_builtin_unpkbw (unsigned HOST_WIDE_INT opint[], long op_const)
6852 unsigned HOST_WIDE_INT temp;
6857 temp = opint[0] & 0xff;
6858 temp |= (opint[0] & 0x0000ff00) << 8;
6859 temp |= (opint[0] & 0x00ff0000) << 16;
6860 temp |= (opint[0] & 0xff000000) << 24;
6862 return build_int_cst (long_integer_type_node, temp);
6866 alpha_fold_builtin_cttz (unsigned HOST_WIDE_INT opint[], long op_const)
6868 unsigned HOST_WIDE_INT temp;
6876 temp = exact_log2 (opint[0] & -opint[0]);
6878 return build_int_cst (long_integer_type_node, temp);
6882 alpha_fold_builtin_ctlz (unsigned HOST_WIDE_INT opint[], long op_const)
6884 unsigned HOST_WIDE_INT temp;
6892 temp = 64 - floor_log2 (opint[0]) - 1;
6894 return build_int_cst (long_integer_type_node, temp);
6898 alpha_fold_builtin_ctpop (unsigned HOST_WIDE_INT opint[], long op_const)
6900 unsigned HOST_WIDE_INT temp, op;
6908 temp++, op &= op - 1;
6910 return build_int_cst (long_integer_type_node, temp);
6913 /* Fold one of our builtin functions. */
6916 alpha_fold_builtin (tree fndecl, tree arglist, bool ignore ATTRIBUTE_UNUSED)
6918 tree op[MAX_ARGS], t;
6919 unsigned HOST_WIDE_INT opint[MAX_ARGS];
6920 long op_const = 0, arity = 0;
6922 for (t = arglist; t ; t = TREE_CHAIN (t), ++arity)
6924 tree arg = TREE_VALUE (t);
6925 if (arg == error_mark_node)
6927 if (arity >= MAX_ARGS)
6932 if (TREE_CODE (arg) == INTEGER_CST)
6934 op_const |= 1L << arity;
6935 opint[arity] = int_cst_value (arg);
6939 switch (DECL_FUNCTION_CODE (fndecl))
6941 case ALPHA_BUILTIN_CMPBGE:
6942 return alpha_fold_builtin_cmpbge (opint, op_const);
6944 case ALPHA_BUILTIN_EXTBL:
6945 return alpha_fold_builtin_extxx (op, opint, op_const, 0x01, false);
6946 case ALPHA_BUILTIN_EXTWL:
6947 return alpha_fold_builtin_extxx (op, opint, op_const, 0x03, false);
6948 case ALPHA_BUILTIN_EXTLL:
6949 return alpha_fold_builtin_extxx (op, opint, op_const, 0x0f, false);
6950 case ALPHA_BUILTIN_EXTQL:
6951 return alpha_fold_builtin_extxx (op, opint, op_const, 0xff, false);
6952 case ALPHA_BUILTIN_EXTWH:
6953 return alpha_fold_builtin_extxx (op, opint, op_const, 0x03, true);
6954 case ALPHA_BUILTIN_EXTLH:
6955 return alpha_fold_builtin_extxx (op, opint, op_const, 0x0f, true);
6956 case ALPHA_BUILTIN_EXTQH:
6957 return alpha_fold_builtin_extxx (op, opint, op_const, 0xff, true);
6959 case ALPHA_BUILTIN_INSBL:
6960 return alpha_fold_builtin_insxx (op, opint, op_const, 0x01, false);
6961 case ALPHA_BUILTIN_INSWL:
6962 return alpha_fold_builtin_insxx (op, opint, op_const, 0x03, false);
6963 case ALPHA_BUILTIN_INSLL:
6964 return alpha_fold_builtin_insxx (op, opint, op_const, 0x0f, false);
6965 case ALPHA_BUILTIN_INSQL:
6966 return alpha_fold_builtin_insxx (op, opint, op_const, 0xff, false);
6967 case ALPHA_BUILTIN_INSWH:
6968 return alpha_fold_builtin_insxx (op, opint, op_const, 0x03, true);
6969 case ALPHA_BUILTIN_INSLH:
6970 return alpha_fold_builtin_insxx (op, opint, op_const, 0x0f, true);
6971 case ALPHA_BUILTIN_INSQH:
6972 return alpha_fold_builtin_insxx (op, opint, op_const, 0xff, true);
6974 case ALPHA_BUILTIN_MSKBL:
6975 return alpha_fold_builtin_mskxx (op, opint, op_const, 0x01, false);
6976 case ALPHA_BUILTIN_MSKWL:
6977 return alpha_fold_builtin_mskxx (op, opint, op_const, 0x03, false);
6978 case ALPHA_BUILTIN_MSKLL:
6979 return alpha_fold_builtin_mskxx (op, opint, op_const, 0x0f, false);
6980 case ALPHA_BUILTIN_MSKQL:
6981 return alpha_fold_builtin_mskxx (op, opint, op_const, 0xff, false);
6982 case ALPHA_BUILTIN_MSKWH:
6983 return alpha_fold_builtin_mskxx (op, opint, op_const, 0x03, true);
6984 case ALPHA_BUILTIN_MSKLH:
6985 return alpha_fold_builtin_mskxx (op, opint, op_const, 0x0f, true);
6986 case ALPHA_BUILTIN_MSKQH:
6987 return alpha_fold_builtin_mskxx (op, opint, op_const, 0xff, true);
6989 case ALPHA_BUILTIN_UMULH:
6990 return alpha_fold_builtin_umulh (opint, op_const);
6992 case ALPHA_BUILTIN_ZAP:
6995 case ALPHA_BUILTIN_ZAPNOT:
6996 return alpha_fold_builtin_zapnot (op, opint, op_const);
6998 case ALPHA_BUILTIN_MINUB8:
6999 return alpha_fold_vector_minmax (MIN_EXPR, op, alpha_v8qi_u);
7000 case ALPHA_BUILTIN_MINSB8:
7001 return alpha_fold_vector_minmax (MIN_EXPR, op, alpha_v8qi_s);
7002 case ALPHA_BUILTIN_MINUW4:
7003 return alpha_fold_vector_minmax (MIN_EXPR, op, alpha_v4hi_u);
7004 case ALPHA_BUILTIN_MINSW4:
7005 return alpha_fold_vector_minmax (MIN_EXPR, op, alpha_v4hi_s);
7006 case ALPHA_BUILTIN_MAXUB8:
7007 return alpha_fold_vector_minmax (MAX_EXPR, op, alpha_v8qi_u);
7008 case ALPHA_BUILTIN_MAXSB8:
7009 return alpha_fold_vector_minmax (MAX_EXPR, op, alpha_v8qi_s);
7010 case ALPHA_BUILTIN_MAXUW4:
7011 return alpha_fold_vector_minmax (MAX_EXPR, op, alpha_v4hi_u);
7012 case ALPHA_BUILTIN_MAXSW4:
7013 return alpha_fold_vector_minmax (MAX_EXPR, op, alpha_v4hi_s);
7015 case ALPHA_BUILTIN_PERR:
7016 return alpha_fold_builtin_perr (opint, op_const);
7017 case ALPHA_BUILTIN_PKLB:
7018 return alpha_fold_builtin_pklb (opint, op_const);
7019 case ALPHA_BUILTIN_PKWB:
7020 return alpha_fold_builtin_pkwb (opint, op_const);
7021 case ALPHA_BUILTIN_UNPKBL:
7022 return alpha_fold_builtin_unpkbl (opint, op_const);
7023 case ALPHA_BUILTIN_UNPKBW:
7024 return alpha_fold_builtin_unpkbw (opint, op_const);
7026 case ALPHA_BUILTIN_CTTZ:
7027 return alpha_fold_builtin_cttz (opint, op_const);
7028 case ALPHA_BUILTIN_CTLZ:
7029 return alpha_fold_builtin_ctlz (opint, op_const);
7030 case ALPHA_BUILTIN_CTPOP:
7031 return alpha_fold_builtin_ctpop (opint, op_const);
7033 case ALPHA_BUILTIN_AMASK:
7034 case ALPHA_BUILTIN_IMPLVER:
7035 case ALPHA_BUILTIN_RPCC:
7036 case ALPHA_BUILTIN_THREAD_POINTER:
7037 case ALPHA_BUILTIN_SET_THREAD_POINTER:
7038 /* None of these are foldable at compile-time. */
7044 /* This page contains routines that are used to determine what the function
7045 prologue and epilogue code will do and write them out. */
7047 /* Compute the size of the save area in the stack. */
7049 /* These variables are used for communication between the following functions.
7050 They indicate various things about the current function being compiled
7051 that are used to tell what kind of prologue, epilogue and procedure
7052 descriptor to generate. */
7054 /* Nonzero if we need a stack procedure. */
7055 enum alpha_procedure_types {PT_NULL = 0, PT_REGISTER = 1, PT_STACK = 2};
7056 static enum alpha_procedure_types alpha_procedure_type;
7058 /* Register number (either FP or SP) that is used to unwind the frame. */
7059 static int vms_unwind_regno;
7061 /* Register number used to save FP. We need not have one for RA since
7062 we don't modify it for register procedures. This is only defined
7063 for register frame procedures. */
7064 static int vms_save_fp_regno;
7066 /* Register number used to reference objects off our PV. */
7067 static int vms_base_regno;
7069 /* Compute register masks for saved registers. */
7072 alpha_sa_mask (unsigned long *imaskP, unsigned long *fmaskP)
7074 unsigned long imask = 0;
7075 unsigned long fmask = 0;
7078 /* When outputting a thunk, we don't have valid register life info,
7079 but assemble_start_function wants to output .frame and .mask
7081 if (current_function_is_thunk)
7088 if (TARGET_ABI_OPEN_VMS && alpha_procedure_type == PT_STACK)
7089 imask |= (1UL << HARD_FRAME_POINTER_REGNUM);
7091 /* One for every register we have to save. */
7092 for (i = 0; i < FIRST_PSEUDO_REGISTER; i++)
7093 if (! fixed_regs[i] && ! call_used_regs[i]
7094 && df_regs_ever_live_p (i) && i != REG_RA
7095 && (!TARGET_ABI_UNICOSMK || i != HARD_FRAME_POINTER_REGNUM))
7098 imask |= (1UL << i);
7100 fmask |= (1UL << (i - 32));
7103 /* We need to restore these for the handler. */
7104 if (current_function_calls_eh_return)
7108 unsigned regno = EH_RETURN_DATA_REGNO (i);
7109 if (regno == INVALID_REGNUM)
7111 imask |= 1UL << regno;
7115 /* If any register spilled, then spill the return address also. */
7116 /* ??? This is required by the Digital stack unwind specification
7117 and isn't needed if we're doing Dwarf2 unwinding. */
7118 if (imask || fmask || alpha_ra_ever_killed ())
7119 imask |= (1UL << REG_RA);
7126 alpha_sa_size (void)
7128 unsigned long mask[2];
7132 alpha_sa_mask (&mask[0], &mask[1]);
7134 if (TARGET_ABI_UNICOSMK)
7136 if (mask[0] || mask[1])
7141 for (j = 0; j < 2; ++j)
7142 for (i = 0; i < 32; ++i)
7143 if ((mask[j] >> i) & 1)
7147 if (TARGET_ABI_UNICOSMK)
7149 /* We might not need to generate a frame if we don't make any calls
7150 (including calls to __T3E_MISMATCH if this is a vararg function),
7151 don't have any local variables which require stack slots, don't
7152 use alloca and have not determined that we need a frame for other
7155 alpha_procedure_type
7156 = (sa_size || get_frame_size() != 0
7157 || current_function_outgoing_args_size
7158 || current_function_stdarg || current_function_calls_alloca
7159 || frame_pointer_needed)
7160 ? PT_STACK : PT_REGISTER;
7162 /* Always reserve space for saving callee-saved registers if we
7163 need a frame as required by the calling convention. */
7164 if (alpha_procedure_type == PT_STACK)
7167 else if (TARGET_ABI_OPEN_VMS)
7169 /* Start by assuming we can use a register procedure if we don't
7170 make any calls (REG_RA not used) or need to save any
7171 registers and a stack procedure if we do. */
7172 if ((mask[0] >> REG_RA) & 1)
7173 alpha_procedure_type = PT_STACK;
7174 else if (get_frame_size() != 0)
7175 alpha_procedure_type = PT_REGISTER;
7177 alpha_procedure_type = PT_NULL;
7179 /* Don't reserve space for saving FP & RA yet. Do that later after we've
7180 made the final decision on stack procedure vs register procedure. */
7181 if (alpha_procedure_type == PT_STACK)
7184 /* Decide whether to refer to objects off our PV via FP or PV.
7185 If we need FP for something else or if we receive a nonlocal
7186 goto (which expects PV to contain the value), we must use PV.
7187 Otherwise, start by assuming we can use FP. */
7190 = (frame_pointer_needed
7191 || current_function_has_nonlocal_label
7192 || alpha_procedure_type == PT_STACK
7193 || current_function_outgoing_args_size)
7194 ? REG_PV : HARD_FRAME_POINTER_REGNUM;
7196 /* If we want to copy PV into FP, we need to find some register
7197 in which to save FP. */
7199 vms_save_fp_regno = -1;
7200 if (vms_base_regno == HARD_FRAME_POINTER_REGNUM)
7201 for (i = 0; i < 32; i++)
7202 if (! fixed_regs[i] && call_used_regs[i] && ! df_regs_ever_live_p (i))
7203 vms_save_fp_regno = i;
7205 if (vms_save_fp_regno == -1 && alpha_procedure_type == PT_REGISTER)
7206 vms_base_regno = REG_PV, alpha_procedure_type = PT_STACK;
7207 else if (alpha_procedure_type == PT_NULL)
7208 vms_base_regno = REG_PV;
7210 /* Stack unwinding should be done via FP unless we use it for PV. */
7211 vms_unwind_regno = (vms_base_regno == REG_PV
7212 ? HARD_FRAME_POINTER_REGNUM : STACK_POINTER_REGNUM);
7214 /* If this is a stack procedure, allow space for saving FP and RA. */
7215 if (alpha_procedure_type == PT_STACK)
7220 /* Our size must be even (multiple of 16 bytes). */
7228 /* Define the offset between two registers, one to be eliminated,
7229 and the other its replacement, at the start of a routine. */
7232 alpha_initial_elimination_offset (unsigned int from,
7233 unsigned int to ATTRIBUTE_UNUSED)
7237 ret = alpha_sa_size ();
7238 ret += ALPHA_ROUND (current_function_outgoing_args_size);
7242 case FRAME_POINTER_REGNUM:
7245 case ARG_POINTER_REGNUM:
7246 ret += (ALPHA_ROUND (get_frame_size ()
7247 + current_function_pretend_args_size)
7248 - current_function_pretend_args_size);
7259 alpha_pv_save_size (void)
7262 return alpha_procedure_type == PT_STACK ? 8 : 0;
7266 alpha_using_fp (void)
7269 return vms_unwind_regno == HARD_FRAME_POINTER_REGNUM;
7272 #if TARGET_ABI_OPEN_VMS
7274 const struct attribute_spec vms_attribute_table[] =
7276 /* { name, min_len, max_len, decl_req, type_req, fn_type_req, handler } */
7277 { "overlaid", 0, 0, true, false, false, NULL },
7278 { "global", 0, 0, true, false, false, NULL },
7279 { "initialize", 0, 0, true, false, false, NULL },
7280 { NULL, 0, 0, false, false, false, NULL }
7286 find_lo_sum_using_gp (rtx *px, void *data ATTRIBUTE_UNUSED)
7288 return GET_CODE (*px) == LO_SUM && XEXP (*px, 0) == pic_offset_table_rtx;
7292 alpha_find_lo_sum_using_gp (rtx insn)
7294 return for_each_rtx (&PATTERN (insn), find_lo_sum_using_gp, NULL) > 0;
7298 alpha_does_function_need_gp (void)
7302 /* The GP being variable is an OSF abi thing. */
7303 if (! TARGET_ABI_OSF)
7306 /* We need the gp to load the address of __mcount. */
7307 if (TARGET_PROFILING_NEEDS_GP && current_function_profile)
7310 /* The code emitted by alpha_output_mi_thunk_osf uses the gp. */
7311 if (current_function_is_thunk)
7314 /* The nonlocal receiver pattern assumes that the gp is valid for
7315 the nested function. Reasonable because it's almost always set
7316 correctly already. For the cases where that's wrong, make sure
7317 the nested function loads its gp on entry. */
7318 if (current_function_has_nonlocal_goto)
7321 /* If we need a GP (we have a LDSYM insn or a CALL_INSN), load it first.
7322 Even if we are a static function, we still need to do this in case
7323 our address is taken and passed to something like qsort. */
7325 push_topmost_sequence ();
7326 insn = get_insns ();
7327 pop_topmost_sequence ();
7329 for (; insn; insn = NEXT_INSN (insn))
7331 && ! JUMP_TABLE_DATA_P (insn)
7332 && GET_CODE (PATTERN (insn)) != USE
7333 && GET_CODE (PATTERN (insn)) != CLOBBER
7334 && get_attr_usegp (insn))
7341 /* Helper function to set RTX_FRAME_RELATED_P on instructions, including
7345 set_frame_related_p (void)
7347 rtx seq = get_insns ();
7358 while (insn != NULL_RTX)
7360 RTX_FRAME_RELATED_P (insn) = 1;
7361 insn = NEXT_INSN (insn);
7363 seq = emit_insn (seq);
7367 seq = emit_insn (seq);
7368 RTX_FRAME_RELATED_P (seq) = 1;
7373 #define FRP(exp) (start_sequence (), exp, set_frame_related_p ())
7375 /* Generates a store with the proper unwind info attached. VALUE is
7376 stored at BASE_REG+BASE_OFS. If FRAME_BIAS is nonzero, then BASE_REG
7377 contains SP+FRAME_BIAS, and that is the unwind info that should be
7378 generated. If FRAME_REG != VALUE, then VALUE is being stored on
7379 behalf of FRAME_REG, and FRAME_REG should be present in the unwind. */
7382 emit_frame_store_1 (rtx value, rtx base_reg, HOST_WIDE_INT frame_bias,
7383 HOST_WIDE_INT base_ofs, rtx frame_reg)
7385 rtx addr, mem, insn;
7387 addr = plus_constant (base_reg, base_ofs);
7388 mem = gen_rtx_MEM (DImode, addr);
7389 set_mem_alias_set (mem, alpha_sr_alias_set);
7391 insn = emit_move_insn (mem, value);
7392 RTX_FRAME_RELATED_P (insn) = 1;
7394 if (frame_bias || value != frame_reg)
7398 addr = plus_constant (stack_pointer_rtx, frame_bias + base_ofs);
7399 mem = gen_rtx_MEM (DImode, addr);
7403 = gen_rtx_EXPR_LIST (REG_FRAME_RELATED_EXPR,
7404 gen_rtx_SET (VOIDmode, mem, frame_reg),
7410 emit_frame_store (unsigned int regno, rtx base_reg,
7411 HOST_WIDE_INT frame_bias, HOST_WIDE_INT base_ofs)
7413 rtx reg = gen_rtx_REG (DImode, regno);
7414 emit_frame_store_1 (reg, base_reg, frame_bias, base_ofs, reg);
7417 /* Write function prologue. */
7419 /* On vms we have two kinds of functions:
7421 - stack frame (PROC_STACK)
7422 these are 'normal' functions with local vars and which are
7423 calling other functions
7424 - register frame (PROC_REGISTER)
7425 keeps all data in registers, needs no stack
7427 We must pass this to the assembler so it can generate the
7428 proper pdsc (procedure descriptor)
7429 This is done with the '.pdesc' command.
7431 On not-vms, we don't really differentiate between the two, as we can
7432 simply allocate stack without saving registers. */
7435 alpha_expand_prologue (void)
7437 /* Registers to save. */
7438 unsigned long imask = 0;
7439 unsigned long fmask = 0;
7440 /* Stack space needed for pushing registers clobbered by us. */
7441 HOST_WIDE_INT sa_size;
7442 /* Complete stack size needed. */
7443 HOST_WIDE_INT frame_size;
7444 /* Offset from base reg to register save area. */
7445 HOST_WIDE_INT reg_offset;
7449 sa_size = alpha_sa_size ();
7451 frame_size = get_frame_size ();
7452 if (TARGET_ABI_OPEN_VMS)
7453 frame_size = ALPHA_ROUND (sa_size
7454 + (alpha_procedure_type == PT_STACK ? 8 : 0)
7456 + current_function_pretend_args_size);
7457 else if (TARGET_ABI_UNICOSMK)
7458 /* We have to allocate space for the DSIB if we generate a frame. */
7459 frame_size = ALPHA_ROUND (sa_size
7460 + (alpha_procedure_type == PT_STACK ? 48 : 0))
7461 + ALPHA_ROUND (frame_size
7462 + current_function_outgoing_args_size);
7464 frame_size = (ALPHA_ROUND (current_function_outgoing_args_size)
7466 + ALPHA_ROUND (frame_size
7467 + current_function_pretend_args_size));
7469 if (TARGET_ABI_OPEN_VMS)
7472 reg_offset = ALPHA_ROUND (current_function_outgoing_args_size);
7474 alpha_sa_mask (&imask, &fmask);
7476 /* Emit an insn to reload GP, if needed. */
7479 alpha_function_needs_gp = alpha_does_function_need_gp ();
7480 if (alpha_function_needs_gp)
7481 emit_insn (gen_prologue_ldgp ());
7484 /* TARGET_PROFILING_NEEDS_GP actually implies that we need to insert
7485 the call to mcount ourselves, rather than having the linker do it
7486 magically in response to -pg. Since _mcount has special linkage,
7487 don't represent the call as a call. */
7488 if (TARGET_PROFILING_NEEDS_GP && current_function_profile)
7489 emit_insn (gen_prologue_mcount ());
7491 if (TARGET_ABI_UNICOSMK)
7492 unicosmk_gen_dsib (&imask);
7494 /* Adjust the stack by the frame size. If the frame size is > 4096
7495 bytes, we need to be sure we probe somewhere in the first and last
7496 4096 bytes (we can probably get away without the latter test) and
7497 every 8192 bytes in between. If the frame size is > 32768, we
7498 do this in a loop. Otherwise, we generate the explicit probe
7501 Note that we are only allowed to adjust sp once in the prologue. */
7503 if (frame_size <= 32768)
7505 if (frame_size > 4096)
7509 for (probed = 4096; probed < frame_size; probed += 8192)
7510 emit_insn (gen_probe_stack (GEN_INT (TARGET_ABI_UNICOSMK
7514 /* We only have to do this probe if we aren't saving registers. */
7515 if (sa_size == 0 && frame_size > probed - 4096)
7516 emit_insn (gen_probe_stack (GEN_INT (-frame_size)));
7519 if (frame_size != 0)
7520 FRP (emit_insn (gen_adddi3 (stack_pointer_rtx, stack_pointer_rtx,
7521 GEN_INT (TARGET_ABI_UNICOSMK
7527 /* Here we generate code to set R22 to SP + 4096 and set R23 to the
7528 number of 8192 byte blocks to probe. We then probe each block
7529 in the loop and then set SP to the proper location. If the
7530 amount remaining is > 4096, we have to do one more probe if we
7531 are not saving any registers. */
7533 HOST_WIDE_INT blocks = (frame_size + 4096) / 8192;
7534 HOST_WIDE_INT leftover = frame_size + 4096 - blocks * 8192;
7535 rtx ptr = gen_rtx_REG (DImode, 22);
7536 rtx count = gen_rtx_REG (DImode, 23);
7539 emit_move_insn (count, GEN_INT (blocks));
7540 emit_insn (gen_adddi3 (ptr, stack_pointer_rtx,
7541 GEN_INT (TARGET_ABI_UNICOSMK ? 4096 - 64 : 4096)));
7543 /* Because of the difficulty in emitting a new basic block this
7544 late in the compilation, generate the loop as a single insn. */
7545 emit_insn (gen_prologue_stack_probe_loop (count, ptr));
7547 if (leftover > 4096 && sa_size == 0)
7549 rtx last = gen_rtx_MEM (DImode, plus_constant (ptr, -leftover));
7550 MEM_VOLATILE_P (last) = 1;
7551 emit_move_insn (last, const0_rtx);
7554 if (TARGET_ABI_WINDOWS_NT)
7556 /* For NT stack unwind (done by 'reverse execution'), it's
7557 not OK to take the result of a loop, even though the value
7558 is already in ptr, so we reload it via a single operation
7559 and subtract it to sp.
7561 Yes, that's correct -- we have to reload the whole constant
7562 into a temporary via ldah+lda then subtract from sp. */
7564 HOST_WIDE_INT lo, hi;
7565 lo = ((frame_size & 0xffff) ^ 0x8000) - 0x8000;
7566 hi = frame_size - lo;
7568 emit_move_insn (ptr, GEN_INT (hi));
7569 emit_insn (gen_adddi3 (ptr, ptr, GEN_INT (lo)));
7570 seq = emit_insn (gen_subdi3 (stack_pointer_rtx, stack_pointer_rtx,
7575 seq = emit_insn (gen_adddi3 (stack_pointer_rtx, ptr,
7576 GEN_INT (-leftover)));
7579 /* This alternative is special, because the DWARF code cannot
7580 possibly intuit through the loop above. So we invent this
7581 note it looks at instead. */
7582 RTX_FRAME_RELATED_P (seq) = 1;
7584 = gen_rtx_EXPR_LIST (REG_FRAME_RELATED_EXPR,
7585 gen_rtx_SET (VOIDmode, stack_pointer_rtx,
7586 gen_rtx_PLUS (Pmode, stack_pointer_rtx,
7587 GEN_INT (TARGET_ABI_UNICOSMK
7593 if (!TARGET_ABI_UNICOSMK)
7595 HOST_WIDE_INT sa_bias = 0;
7597 /* Cope with very large offsets to the register save area. */
7598 sa_reg = stack_pointer_rtx;
7599 if (reg_offset + sa_size > 0x8000)
7601 int low = ((reg_offset & 0xffff) ^ 0x8000) - 0x8000;
7604 if (low + sa_size <= 0x8000)
7605 sa_bias = reg_offset - low, reg_offset = low;
7607 sa_bias = reg_offset, reg_offset = 0;
7609 sa_reg = gen_rtx_REG (DImode, 24);
7610 sa_bias_rtx = GEN_INT (sa_bias);
7612 if (add_operand (sa_bias_rtx, DImode))
7613 emit_insn (gen_adddi3 (sa_reg, stack_pointer_rtx, sa_bias_rtx));
7616 emit_move_insn (sa_reg, sa_bias_rtx);
7617 emit_insn (gen_adddi3 (sa_reg, stack_pointer_rtx, sa_reg));
7621 /* Save regs in stack order. Beginning with VMS PV. */
7622 if (TARGET_ABI_OPEN_VMS && alpha_procedure_type == PT_STACK)
7623 emit_frame_store (REG_PV, stack_pointer_rtx, 0, 0);
7625 /* Save register RA next. */
7626 if (imask & (1UL << REG_RA))
7628 emit_frame_store (REG_RA, sa_reg, sa_bias, reg_offset);
7629 imask &= ~(1UL << REG_RA);
7633 /* Now save any other registers required to be saved. */
7634 for (i = 0; i < 31; i++)
7635 if (imask & (1UL << i))
7637 emit_frame_store (i, sa_reg, sa_bias, reg_offset);
7641 for (i = 0; i < 31; i++)
7642 if (fmask & (1UL << i))
7644 emit_frame_store (i+32, sa_reg, sa_bias, reg_offset);
7648 else if (TARGET_ABI_UNICOSMK && alpha_procedure_type == PT_STACK)
7650 /* The standard frame on the T3E includes space for saving registers.
7651 We just have to use it. We don't have to save the return address and
7652 the old frame pointer here - they are saved in the DSIB. */
7655 for (i = 9; i < 15; i++)
7656 if (imask & (1UL << i))
7658 emit_frame_store (i, hard_frame_pointer_rtx, 0, reg_offset);
7661 for (i = 2; i < 10; i++)
7662 if (fmask & (1UL << i))
7664 emit_frame_store (i+32, hard_frame_pointer_rtx, 0, reg_offset);
7669 if (TARGET_ABI_OPEN_VMS)
7671 if (alpha_procedure_type == PT_REGISTER)
7672 /* Register frame procedures save the fp.
7673 ?? Ought to have a dwarf2 save for this. */
7674 emit_move_insn (gen_rtx_REG (DImode, vms_save_fp_regno),
7675 hard_frame_pointer_rtx);
7677 if (alpha_procedure_type != PT_NULL && vms_base_regno != REG_PV)
7678 emit_insn (gen_force_movdi (gen_rtx_REG (DImode, vms_base_regno),
7679 gen_rtx_REG (DImode, REG_PV)));
7681 if (alpha_procedure_type != PT_NULL
7682 && vms_unwind_regno == HARD_FRAME_POINTER_REGNUM)
7683 FRP (emit_move_insn (hard_frame_pointer_rtx, stack_pointer_rtx));
7685 /* If we have to allocate space for outgoing args, do it now. */
7686 if (current_function_outgoing_args_size != 0)
7689 = emit_move_insn (stack_pointer_rtx,
7691 (hard_frame_pointer_rtx,
7693 (current_function_outgoing_args_size))));
7695 /* Only set FRAME_RELATED_P on the stack adjustment we just emitted
7696 if ! frame_pointer_needed. Setting the bit will change the CFA
7697 computation rule to use sp again, which would be wrong if we had
7698 frame_pointer_needed, as this means sp might move unpredictably
7702 frame_pointer_needed
7703 => vms_unwind_regno == HARD_FRAME_POINTER_REGNUM
7705 current_function_outgoing_args_size != 0
7706 => alpha_procedure_type != PT_NULL,
7708 so when we are not setting the bit here, we are guaranteed to
7709 have emitted an FRP frame pointer update just before. */
7710 RTX_FRAME_RELATED_P (seq) = ! frame_pointer_needed;
7713 else if (!TARGET_ABI_UNICOSMK)
7715 /* If we need a frame pointer, set it from the stack pointer. */
7716 if (frame_pointer_needed)
7718 if (TARGET_CAN_FAULT_IN_PROLOGUE)
7719 FRP (emit_move_insn (hard_frame_pointer_rtx, stack_pointer_rtx));
7721 /* This must always be the last instruction in the
7722 prologue, thus we emit a special move + clobber. */
7723 FRP (emit_insn (gen_init_fp (hard_frame_pointer_rtx,
7724 stack_pointer_rtx, sa_reg)));
7728 /* The ABIs for VMS and OSF/1 say that while we can schedule insns into
7729 the prologue, for exception handling reasons, we cannot do this for
7730 any insn that might fault. We could prevent this for mems with a
7731 (clobber:BLK (scratch)), but this doesn't work for fp insns. So we
7732 have to prevent all such scheduling with a blockage.
7734 Linux, on the other hand, never bothered to implement OSF/1's
7735 exception handling, and so doesn't care about such things. Anyone
7736 planning to use dwarf2 frame-unwind info can also omit the blockage. */
7738 if (! TARGET_CAN_FAULT_IN_PROLOGUE)
7739 emit_insn (gen_blockage ());
7742 /* Count the number of .file directives, so that .loc is up to date. */
7743 int num_source_filenames = 0;
7745 /* Output the textual info surrounding the prologue. */
7748 alpha_start_function (FILE *file, const char *fnname,
7749 tree decl ATTRIBUTE_UNUSED)
7751 unsigned long imask = 0;
7752 unsigned long fmask = 0;
7753 /* Stack space needed for pushing registers clobbered by us. */
7754 HOST_WIDE_INT sa_size;
7755 /* Complete stack size needed. */
7756 unsigned HOST_WIDE_INT frame_size;
7757 /* The maximum debuggable frame size (512 Kbytes using Tru64 as). */
7758 unsigned HOST_WIDE_INT max_frame_size = TARGET_ABI_OSF && !TARGET_GAS
7761 /* Offset from base reg to register save area. */
7762 HOST_WIDE_INT reg_offset;
7763 char *entry_label = (char *) alloca (strlen (fnname) + 6);
7766 /* Don't emit an extern directive for functions defined in the same file. */
7767 if (TARGET_ABI_UNICOSMK)
7770 name_tree = get_identifier (fnname);
7771 TREE_ASM_WRITTEN (name_tree) = 1;
7774 alpha_fnname = fnname;
7775 sa_size = alpha_sa_size ();
7777 frame_size = get_frame_size ();
7778 if (TARGET_ABI_OPEN_VMS)
7779 frame_size = ALPHA_ROUND (sa_size
7780 + (alpha_procedure_type == PT_STACK ? 8 : 0)
7782 + current_function_pretend_args_size);
7783 else if (TARGET_ABI_UNICOSMK)
7784 frame_size = ALPHA_ROUND (sa_size
7785 + (alpha_procedure_type == PT_STACK ? 48 : 0))
7786 + ALPHA_ROUND (frame_size
7787 + current_function_outgoing_args_size);
7789 frame_size = (ALPHA_ROUND (current_function_outgoing_args_size)
7791 + ALPHA_ROUND (frame_size
7792 + current_function_pretend_args_size));
7794 if (TARGET_ABI_OPEN_VMS)
7797 reg_offset = ALPHA_ROUND (current_function_outgoing_args_size);
7799 alpha_sa_mask (&imask, &fmask);
7801 /* Ecoff can handle multiple .file directives, so put out file and lineno.
7802 We have to do that before the .ent directive as we cannot switch
7803 files within procedures with native ecoff because line numbers are
7804 linked to procedure descriptors.
7805 Outputting the lineno helps debugging of one line functions as they
7806 would otherwise get no line number at all. Please note that we would
7807 like to put out last_linenum from final.c, but it is not accessible. */
7809 if (write_symbols == SDB_DEBUG)
7811 #ifdef ASM_OUTPUT_SOURCE_FILENAME
7812 ASM_OUTPUT_SOURCE_FILENAME (file,
7813 DECL_SOURCE_FILE (current_function_decl));
7815 #ifdef SDB_OUTPUT_SOURCE_LINE
7816 if (debug_info_level != DINFO_LEVEL_TERSE)
7817 SDB_OUTPUT_SOURCE_LINE (file,
7818 DECL_SOURCE_LINE (current_function_decl));
7822 /* Issue function start and label. */
7823 if (TARGET_ABI_OPEN_VMS
7824 || (!TARGET_ABI_UNICOSMK && !flag_inhibit_size_directive))
7826 fputs ("\t.ent ", file);
7827 assemble_name (file, fnname);
7830 /* If the function needs GP, we'll write the "..ng" label there.
7831 Otherwise, do it here. */
7833 && ! alpha_function_needs_gp
7834 && ! current_function_is_thunk)
7837 assemble_name (file, fnname);
7838 fputs ("..ng:\n", file);
7842 strcpy (entry_label, fnname);
7843 if (TARGET_ABI_OPEN_VMS)
7844 strcat (entry_label, "..en");
7846 /* For public functions, the label must be globalized by appending an
7847 additional colon. */
7848 if (TARGET_ABI_UNICOSMK && TREE_PUBLIC (decl))
7849 strcat (entry_label, ":");
7851 ASM_OUTPUT_LABEL (file, entry_label);
7852 inside_function = TRUE;
7854 if (TARGET_ABI_OPEN_VMS)
7855 fprintf (file, "\t.base $%d\n", vms_base_regno);
7857 if (!TARGET_ABI_OPEN_VMS && !TARGET_ABI_UNICOSMK && TARGET_IEEE_CONFORMANT
7858 && !flag_inhibit_size_directive)
7860 /* Set flags in procedure descriptor to request IEEE-conformant
7861 math-library routines. The value we set it to is PDSC_EXC_IEEE
7862 (/usr/include/pdsc.h). */
7863 fputs ("\t.eflag 48\n", file);
7866 /* Set up offsets to alpha virtual arg/local debugging pointer. */
7867 alpha_auto_offset = -frame_size + current_function_pretend_args_size;
7868 alpha_arg_offset = -frame_size + 48;
7870 /* Describe our frame. If the frame size is larger than an integer,
7871 print it as zero to avoid an assembler error. We won't be
7872 properly describing such a frame, but that's the best we can do. */
7873 if (TARGET_ABI_UNICOSMK)
7875 else if (TARGET_ABI_OPEN_VMS)
7876 fprintf (file, "\t.frame $%d," HOST_WIDE_INT_PRINT_DEC ",$26,"
7877 HOST_WIDE_INT_PRINT_DEC "\n",
7879 frame_size >= (1UL << 31) ? 0 : frame_size,
7881 else if (!flag_inhibit_size_directive)
7882 fprintf (file, "\t.frame $%d," HOST_WIDE_INT_PRINT_DEC ",$26,%d\n",
7883 (frame_pointer_needed
7884 ? HARD_FRAME_POINTER_REGNUM : STACK_POINTER_REGNUM),
7885 frame_size >= max_frame_size ? 0 : frame_size,
7886 current_function_pretend_args_size);
7888 /* Describe which registers were spilled. */
7889 if (TARGET_ABI_UNICOSMK)
7891 else if (TARGET_ABI_OPEN_VMS)
7894 /* ??? Does VMS care if mask contains ra? The old code didn't
7895 set it, so I don't here. */
7896 fprintf (file, "\t.mask 0x%lx,0\n", imask & ~(1UL << REG_RA));
7898 fprintf (file, "\t.fmask 0x%lx,0\n", fmask);
7899 if (alpha_procedure_type == PT_REGISTER)
7900 fprintf (file, "\t.fp_save $%d\n", vms_save_fp_regno);
7902 else if (!flag_inhibit_size_directive)
7906 fprintf (file, "\t.mask 0x%lx," HOST_WIDE_INT_PRINT_DEC "\n", imask,
7907 frame_size >= max_frame_size ? 0 : reg_offset - frame_size);
7909 for (i = 0; i < 32; ++i)
7910 if (imask & (1UL << i))
7915 fprintf (file, "\t.fmask 0x%lx," HOST_WIDE_INT_PRINT_DEC "\n", fmask,
7916 frame_size >= max_frame_size ? 0 : reg_offset - frame_size);
7919 #if TARGET_ABI_OPEN_VMS
7920 /* Ifdef'ed cause link_section are only available then. */
7921 switch_to_section (readonly_data_section);
7922 fprintf (file, "\t.align 3\n");
7923 assemble_name (file, fnname); fputs ("..na:\n", file);
7924 fputs ("\t.ascii \"", file);
7925 assemble_name (file, fnname);
7926 fputs ("\\0\"\n", file);
7927 alpha_need_linkage (fnname, 1);
7928 switch_to_section (text_section);
7932 /* Emit the .prologue note at the scheduled end of the prologue. */
7935 alpha_output_function_end_prologue (FILE *file)
7937 if (TARGET_ABI_UNICOSMK)
7939 else if (TARGET_ABI_OPEN_VMS)
7940 fputs ("\t.prologue\n", file);
7941 else if (TARGET_ABI_WINDOWS_NT)
7942 fputs ("\t.prologue 0\n", file);
7943 else if (!flag_inhibit_size_directive)
7944 fprintf (file, "\t.prologue %d\n",
7945 alpha_function_needs_gp || current_function_is_thunk);
7948 /* Write function epilogue. */
7950 /* ??? At some point we will want to support full unwind, and so will
7951 need to mark the epilogue as well. At the moment, we just confuse
7954 #define FRP(exp) exp
7957 alpha_expand_epilogue (void)
7959 /* Registers to save. */
7960 unsigned long imask = 0;
7961 unsigned long fmask = 0;
7962 /* Stack space needed for pushing registers clobbered by us. */
7963 HOST_WIDE_INT sa_size;
7964 /* Complete stack size needed. */
7965 HOST_WIDE_INT frame_size;
7966 /* Offset from base reg to register save area. */
7967 HOST_WIDE_INT reg_offset;
7968 int fp_is_frame_pointer, fp_offset;
7969 rtx sa_reg, sa_reg_exp = NULL;
7970 rtx sp_adj1, sp_adj2, mem;
7974 sa_size = alpha_sa_size ();
7976 frame_size = get_frame_size ();
7977 if (TARGET_ABI_OPEN_VMS)
7978 frame_size = ALPHA_ROUND (sa_size
7979 + (alpha_procedure_type == PT_STACK ? 8 : 0)
7981 + current_function_pretend_args_size);
7982 else if (TARGET_ABI_UNICOSMK)
7983 frame_size = ALPHA_ROUND (sa_size
7984 + (alpha_procedure_type == PT_STACK ? 48 : 0))
7985 + ALPHA_ROUND (frame_size
7986 + current_function_outgoing_args_size);
7988 frame_size = (ALPHA_ROUND (current_function_outgoing_args_size)
7990 + ALPHA_ROUND (frame_size
7991 + current_function_pretend_args_size));
7993 if (TARGET_ABI_OPEN_VMS)
7995 if (alpha_procedure_type == PT_STACK)
8001 reg_offset = ALPHA_ROUND (current_function_outgoing_args_size);
8003 alpha_sa_mask (&imask, &fmask);
8006 = ((TARGET_ABI_OPEN_VMS && alpha_procedure_type == PT_STACK)
8007 || (!TARGET_ABI_OPEN_VMS && frame_pointer_needed));
8009 sa_reg = stack_pointer_rtx;
8011 if (current_function_calls_eh_return)
8012 eh_ofs = EH_RETURN_STACKADJ_RTX;
8016 if (!TARGET_ABI_UNICOSMK && sa_size)
8018 /* If we have a frame pointer, restore SP from it. */
8019 if ((TARGET_ABI_OPEN_VMS
8020 && vms_unwind_regno == HARD_FRAME_POINTER_REGNUM)
8021 || (!TARGET_ABI_OPEN_VMS && frame_pointer_needed))
8022 FRP (emit_move_insn (stack_pointer_rtx, hard_frame_pointer_rtx));
8024 /* Cope with very large offsets to the register save area. */
8025 if (reg_offset + sa_size > 0x8000)
8027 int low = ((reg_offset & 0xffff) ^ 0x8000) - 0x8000;
8030 if (low + sa_size <= 0x8000)
8031 bias = reg_offset - low, reg_offset = low;
8033 bias = reg_offset, reg_offset = 0;
8035 sa_reg = gen_rtx_REG (DImode, 22);
8036 sa_reg_exp = plus_constant (stack_pointer_rtx, bias);
8038 FRP (emit_move_insn (sa_reg, sa_reg_exp));
8041 /* Restore registers in order, excepting a true frame pointer. */
8043 mem = gen_rtx_MEM (DImode, plus_constant (sa_reg, reg_offset));
8045 set_mem_alias_set (mem, alpha_sr_alias_set);
8046 FRP (emit_move_insn (gen_rtx_REG (DImode, REG_RA), mem));
8049 imask &= ~(1UL << REG_RA);
8051 for (i = 0; i < 31; ++i)
8052 if (imask & (1UL << i))
8054 if (i == HARD_FRAME_POINTER_REGNUM && fp_is_frame_pointer)
8055 fp_offset = reg_offset;
8058 mem = gen_rtx_MEM (DImode, plus_constant(sa_reg, reg_offset));
8059 set_mem_alias_set (mem, alpha_sr_alias_set);
8060 FRP (emit_move_insn (gen_rtx_REG (DImode, i), mem));
8065 for (i = 0; i < 31; ++i)
8066 if (fmask & (1UL << i))
8068 mem = gen_rtx_MEM (DFmode, plus_constant(sa_reg, reg_offset));
8069 set_mem_alias_set (mem, alpha_sr_alias_set);
8070 FRP (emit_move_insn (gen_rtx_REG (DFmode, i+32), mem));
8074 else if (TARGET_ABI_UNICOSMK && alpha_procedure_type == PT_STACK)
8076 /* Restore callee-saved general-purpose registers. */
8080 for (i = 9; i < 15; i++)
8081 if (imask & (1UL << i))
8083 mem = gen_rtx_MEM (DImode, plus_constant(hard_frame_pointer_rtx,
8085 set_mem_alias_set (mem, alpha_sr_alias_set);
8086 FRP (emit_move_insn (gen_rtx_REG (DImode, i), mem));
8090 for (i = 2; i < 10; i++)
8091 if (fmask & (1UL << i))
8093 mem = gen_rtx_MEM (DFmode, plus_constant(hard_frame_pointer_rtx,
8095 set_mem_alias_set (mem, alpha_sr_alias_set);
8096 FRP (emit_move_insn (gen_rtx_REG (DFmode, i+32), mem));
8100 /* Restore the return address from the DSIB. */
8102 mem = gen_rtx_MEM (DImode, plus_constant(hard_frame_pointer_rtx, -8));
8103 set_mem_alias_set (mem, alpha_sr_alias_set);
8104 FRP (emit_move_insn (gen_rtx_REG (DImode, REG_RA), mem));
8107 if (frame_size || eh_ofs)
8109 sp_adj1 = stack_pointer_rtx;
8113 sp_adj1 = gen_rtx_REG (DImode, 23);
8114 emit_move_insn (sp_adj1,
8115 gen_rtx_PLUS (Pmode, stack_pointer_rtx, eh_ofs));
8118 /* If the stack size is large, begin computation into a temporary
8119 register so as not to interfere with a potential fp restore,
8120 which must be consecutive with an SP restore. */
8121 if (frame_size < 32768
8122 && ! (TARGET_ABI_UNICOSMK && current_function_calls_alloca))
8123 sp_adj2 = GEN_INT (frame_size);
8124 else if (TARGET_ABI_UNICOSMK)
8126 sp_adj1 = gen_rtx_REG (DImode, 23);
8127 FRP (emit_move_insn (sp_adj1, hard_frame_pointer_rtx));
8128 sp_adj2 = const0_rtx;
8130 else if (frame_size < 0x40007fffL)
8132 int low = ((frame_size & 0xffff) ^ 0x8000) - 0x8000;
8134 sp_adj2 = plus_constant (sp_adj1, frame_size - low);
8135 if (sa_reg_exp && rtx_equal_p (sa_reg_exp, sp_adj2))
8139 sp_adj1 = gen_rtx_REG (DImode, 23);
8140 FRP (emit_move_insn (sp_adj1, sp_adj2));
8142 sp_adj2 = GEN_INT (low);
8146 rtx tmp = gen_rtx_REG (DImode, 23);
8147 FRP (sp_adj2 = alpha_emit_set_const (tmp, DImode, frame_size,
8151 /* We can't drop new things to memory this late, afaik,
8152 so build it up by pieces. */
8153 FRP (sp_adj2 = alpha_emit_set_long_const (tmp, frame_size,
8154 -(frame_size < 0)));
8155 gcc_assert (sp_adj2);
8159 /* From now on, things must be in order. So emit blockages. */
8161 /* Restore the frame pointer. */
8162 if (TARGET_ABI_UNICOSMK)
8164 emit_insn (gen_blockage ());
8165 mem = gen_rtx_MEM (DImode,
8166 plus_constant (hard_frame_pointer_rtx, -16));
8167 set_mem_alias_set (mem, alpha_sr_alias_set);
8168 FRP (emit_move_insn (hard_frame_pointer_rtx, mem));
8170 else if (fp_is_frame_pointer)
8172 emit_insn (gen_blockage ());
8173 mem = gen_rtx_MEM (DImode, plus_constant (sa_reg, fp_offset));
8174 set_mem_alias_set (mem, alpha_sr_alias_set);
8175 FRP (emit_move_insn (hard_frame_pointer_rtx, mem));
8177 else if (TARGET_ABI_OPEN_VMS)
8179 emit_insn (gen_blockage ());
8180 FRP (emit_move_insn (hard_frame_pointer_rtx,
8181 gen_rtx_REG (DImode, vms_save_fp_regno)));
8184 /* Restore the stack pointer. */
8185 emit_insn (gen_blockage ());
8186 if (sp_adj2 == const0_rtx)
8187 FRP (emit_move_insn (stack_pointer_rtx, sp_adj1));
8189 FRP (emit_move_insn (stack_pointer_rtx,
8190 gen_rtx_PLUS (DImode, sp_adj1, sp_adj2)));
8194 if (TARGET_ABI_OPEN_VMS && alpha_procedure_type == PT_REGISTER)
8196 emit_insn (gen_blockage ());
8197 FRP (emit_move_insn (hard_frame_pointer_rtx,
8198 gen_rtx_REG (DImode, vms_save_fp_regno)));
8200 else if (TARGET_ABI_UNICOSMK && alpha_procedure_type != PT_STACK)
8202 /* Decrement the frame pointer if the function does not have a
8205 emit_insn (gen_blockage ());
8206 FRP (emit_insn (gen_adddi3 (hard_frame_pointer_rtx,
8207 hard_frame_pointer_rtx, constm1_rtx)));
8212 /* Output the rest of the textual info surrounding the epilogue. */
8215 alpha_end_function (FILE *file, const char *fnname, tree decl ATTRIBUTE_UNUSED)
8219 /* We output a nop after noreturn calls at the very end of the function to
8220 ensure that the return address always remains in the caller's code range,
8221 as not doing so might confuse unwinding engines. */
8222 insn = get_last_insn ();
8224 insn = prev_active_insn (insn);
8225 if (GET_CODE (insn) == CALL_INSN)
8226 output_asm_insn (get_insn_template (CODE_FOR_nop, NULL), NULL);
8228 #if TARGET_ABI_OPEN_VMS
8229 alpha_write_linkage (file, fnname, decl);
8232 /* End the function. */
8233 if (!TARGET_ABI_UNICOSMK && !flag_inhibit_size_directive)
8235 fputs ("\t.end ", file);
8236 assemble_name (file, fnname);
8239 inside_function = FALSE;
8241 /* Output jump tables and the static subroutine information block. */
8242 if (TARGET_ABI_UNICOSMK)
8244 unicosmk_output_ssib (file, fnname);
8245 unicosmk_output_deferred_case_vectors (file);
8250 /* Emit a tail call to FUNCTION after adjusting THIS by DELTA.
8252 In order to avoid the hordes of differences between generated code
8253 with and without TARGET_EXPLICIT_RELOCS, and to avoid duplicating
8254 lots of code loading up large constants, generate rtl and emit it
8255 instead of going straight to text.
8257 Not sure why this idea hasn't been explored before... */
8260 alpha_output_mi_thunk_osf (FILE *file, tree thunk_fndecl ATTRIBUTE_UNUSED,
8261 HOST_WIDE_INT delta, HOST_WIDE_INT vcall_offset,
8264 HOST_WIDE_INT hi, lo;
8265 rtx this, insn, funexp;
8267 /* We always require a valid GP. */
8268 emit_insn (gen_prologue_ldgp ());
8269 emit_note (NOTE_INSN_PROLOGUE_END);
8271 /* Find the "this" pointer. If the function returns a structure,
8272 the structure return pointer is in $16. */
8273 if (aggregate_value_p (TREE_TYPE (TREE_TYPE (function)), function))
8274 this = gen_rtx_REG (Pmode, 17);
8276 this = gen_rtx_REG (Pmode, 16);
8278 /* Add DELTA. When possible we use ldah+lda. Otherwise load the
8279 entire constant for the add. */
8280 lo = ((delta & 0xffff) ^ 0x8000) - 0x8000;
8281 hi = (((delta - lo) & 0xffffffff) ^ 0x80000000) - 0x80000000;
8282 if (hi + lo == delta)
8285 emit_insn (gen_adddi3 (this, this, GEN_INT (hi)));
8287 emit_insn (gen_adddi3 (this, this, GEN_INT (lo)));
8291 rtx tmp = alpha_emit_set_long_const (gen_rtx_REG (Pmode, 0),
8292 delta, -(delta < 0));
8293 emit_insn (gen_adddi3 (this, this, tmp));
8296 /* Add a delta stored in the vtable at VCALL_OFFSET. */
8301 tmp = gen_rtx_REG (Pmode, 0);
8302 emit_move_insn (tmp, gen_rtx_MEM (Pmode, this));
8304 lo = ((vcall_offset & 0xffff) ^ 0x8000) - 0x8000;
8305 hi = (((vcall_offset - lo) & 0xffffffff) ^ 0x80000000) - 0x80000000;
8306 if (hi + lo == vcall_offset)
8309 emit_insn (gen_adddi3 (tmp, tmp, GEN_INT (hi)));
8313 tmp2 = alpha_emit_set_long_const (gen_rtx_REG (Pmode, 1),
8314 vcall_offset, -(vcall_offset < 0));
8315 emit_insn (gen_adddi3 (tmp, tmp, tmp2));
8319 tmp2 = gen_rtx_PLUS (Pmode, tmp, GEN_INT (lo));
8322 emit_move_insn (tmp, gen_rtx_MEM (Pmode, tmp2));
8324 emit_insn (gen_adddi3 (this, this, tmp));
8327 /* Generate a tail call to the target function. */
8328 if (! TREE_USED (function))
8330 assemble_external (function);
8331 TREE_USED (function) = 1;
8333 funexp = XEXP (DECL_RTL (function), 0);
8334 funexp = gen_rtx_MEM (FUNCTION_MODE, funexp);
8335 insn = emit_call_insn (gen_sibcall (funexp, const0_rtx));
8336 SIBLING_CALL_P (insn) = 1;
8338 /* Run just enough of rest_of_compilation to get the insns emitted.
8339 There's not really enough bulk here to make other passes such as
8340 instruction scheduling worth while. Note that use_thunk calls
8341 assemble_start_function and assemble_end_function. */
8342 insn = get_insns ();
8343 insn_locators_alloc ();
8344 shorten_branches (insn);
8345 final_start_function (insn, file, 1);
8346 final (insn, file, 1);
8347 final_end_function ();
8349 #endif /* TARGET_ABI_OSF */
8351 /* Debugging support. */
8355 /* Count the number of sdb related labels are generated (to find block
8356 start and end boundaries). */
8358 int sdb_label_count = 0;
8360 /* Name of the file containing the current function. */
8362 static const char *current_function_file = "";
8364 /* Offsets to alpha virtual arg/local debugging pointers. */
8366 long alpha_arg_offset;
8367 long alpha_auto_offset;
8369 /* Emit a new filename to a stream. */
8372 alpha_output_filename (FILE *stream, const char *name)
8374 static int first_time = TRUE;
8379 ++num_source_filenames;
8380 current_function_file = name;
8381 fprintf (stream, "\t.file\t%d ", num_source_filenames);
8382 output_quoted_string (stream, name);
8383 fprintf (stream, "\n");
8384 if (!TARGET_GAS && write_symbols == DBX_DEBUG)
8385 fprintf (stream, "\t#@stabs\n");
8388 else if (write_symbols == DBX_DEBUG)
8389 /* dbxout.c will emit an appropriate .stabs directive. */
8392 else if (name != current_function_file
8393 && strcmp (name, current_function_file) != 0)
8395 if (inside_function && ! TARGET_GAS)
8396 fprintf (stream, "\t#.file\t%d ", num_source_filenames);
8399 ++num_source_filenames;
8400 current_function_file = name;
8401 fprintf (stream, "\t.file\t%d ", num_source_filenames);
8404 output_quoted_string (stream, name);
8405 fprintf (stream, "\n");
8409 /* Structure to show the current status of registers and memory. */
8411 struct shadow_summary
8414 unsigned int i : 31; /* Mask of int regs */
8415 unsigned int fp : 31; /* Mask of fp regs */
8416 unsigned int mem : 1; /* mem == imem | fpmem */
8420 /* Summary the effects of expression X on the machine. Update SUM, a pointer
8421 to the summary structure. SET is nonzero if the insn is setting the
8422 object, otherwise zero. */
8425 summarize_insn (rtx x, struct shadow_summary *sum, int set)
8427 const char *format_ptr;
8433 switch (GET_CODE (x))
8435 /* ??? Note that this case would be incorrect if the Alpha had a
8436 ZERO_EXTRACT in SET_DEST. */
8438 summarize_insn (SET_SRC (x), sum, 0);
8439 summarize_insn (SET_DEST (x), sum, 1);
8443 summarize_insn (XEXP (x, 0), sum, 1);
8447 summarize_insn (XEXP (x, 0), sum, 0);
8451 for (i = ASM_OPERANDS_INPUT_LENGTH (x) - 1; i >= 0; i--)
8452 summarize_insn (ASM_OPERANDS_INPUT (x, i), sum, 0);
8456 for (i = XVECLEN (x, 0) - 1; i >= 0; i--)
8457 summarize_insn (XVECEXP (x, 0, i), sum, 0);
8461 summarize_insn (SUBREG_REG (x), sum, 0);
8466 int regno = REGNO (x);
8467 unsigned long mask = ((unsigned long) 1) << (regno % 32);
8469 if (regno == 31 || regno == 63)
8475 sum->defd.i |= mask;
8477 sum->defd.fp |= mask;
8482 sum->used.i |= mask;
8484 sum->used.fp |= mask;
8495 /* Find the regs used in memory address computation: */
8496 summarize_insn (XEXP (x, 0), sum, 0);
8499 case CONST_INT: case CONST_DOUBLE:
8500 case SYMBOL_REF: case LABEL_REF: case CONST:
8501 case SCRATCH: case ASM_INPUT:
8504 /* Handle common unary and binary ops for efficiency. */
8505 case COMPARE: case PLUS: case MINUS: case MULT: case DIV:
8506 case MOD: case UDIV: case UMOD: case AND: case IOR:
8507 case XOR: case ASHIFT: case ROTATE: case ASHIFTRT: case LSHIFTRT:
8508 case ROTATERT: case SMIN: case SMAX: case UMIN: case UMAX:
8509 case NE: case EQ: case GE: case GT: case LE:
8510 case LT: case GEU: case GTU: case LEU: case LTU:
8511 summarize_insn (XEXP (x, 0), sum, 0);
8512 summarize_insn (XEXP (x, 1), sum, 0);
8515 case NEG: case NOT: case SIGN_EXTEND: case ZERO_EXTEND:
8516 case TRUNCATE: case FLOAT_EXTEND: case FLOAT_TRUNCATE: case FLOAT:
8517 case FIX: case UNSIGNED_FLOAT: case UNSIGNED_FIX: case ABS:
8518 case SQRT: case FFS:
8519 summarize_insn (XEXP (x, 0), sum, 0);
8523 format_ptr = GET_RTX_FORMAT (GET_CODE (x));
8524 for (i = GET_RTX_LENGTH (GET_CODE (x)) - 1; i >= 0; i--)
8525 switch (format_ptr[i])
8528 summarize_insn (XEXP (x, i), sum, 0);
8532 for (j = XVECLEN (x, i) - 1; j >= 0; j--)
8533 summarize_insn (XVECEXP (x, i, j), sum, 0);
8545 /* Ensure a sufficient number of `trapb' insns are in the code when
8546 the user requests code with a trap precision of functions or
8549 In naive mode, when the user requests a trap-precision of
8550 "instruction", a trapb is needed after every instruction that may
8551 generate a trap. This ensures that the code is resumption safe but
8554 When optimizations are turned on, we delay issuing a trapb as long
8555 as possible. In this context, a trap shadow is the sequence of
8556 instructions that starts with a (potentially) trap generating
8557 instruction and extends to the next trapb or call_pal instruction
8558 (but GCC never generates call_pal by itself). We can delay (and
8559 therefore sometimes omit) a trapb subject to the following
8562 (a) On entry to the trap shadow, if any Alpha register or memory
8563 location contains a value that is used as an operand value by some
8564 instruction in the trap shadow (live on entry), then no instruction
8565 in the trap shadow may modify the register or memory location.
8567 (b) Within the trap shadow, the computation of the base register
8568 for a memory load or store instruction may not involve using the
8569 result of an instruction that might generate an UNPREDICTABLE
8572 (c) Within the trap shadow, no register may be used more than once
8573 as a destination register. (This is to make life easier for the
8576 (d) The trap shadow may not include any branch instructions. */
8579 alpha_handle_trap_shadows (void)
8581 struct shadow_summary shadow;
8582 int trap_pending, exception_nesting;
8586 exception_nesting = 0;
8589 shadow.used.mem = 0;
8590 shadow.defd = shadow.used;
8592 for (i = get_insns (); i ; i = NEXT_INSN (i))
8594 if (GET_CODE (i) == NOTE)
8596 switch (NOTE_KIND (i))
8598 case NOTE_INSN_EH_REGION_BEG:
8599 exception_nesting++;
8604 case NOTE_INSN_EH_REGION_END:
8605 exception_nesting--;
8610 case NOTE_INSN_EPILOGUE_BEG:
8611 if (trap_pending && alpha_tp >= ALPHA_TP_FUNC)
8616 else if (trap_pending)
8618 if (alpha_tp == ALPHA_TP_FUNC)
8620 if (GET_CODE (i) == JUMP_INSN
8621 && GET_CODE (PATTERN (i)) == RETURN)
8624 else if (alpha_tp == ALPHA_TP_INSN)
8628 struct shadow_summary sum;
8633 sum.defd = sum.used;
8635 switch (GET_CODE (i))
8638 /* Annoyingly, get_attr_trap will die on these. */
8639 if (GET_CODE (PATTERN (i)) == USE
8640 || GET_CODE (PATTERN (i)) == CLOBBER)
8643 summarize_insn (PATTERN (i), &sum, 0);
8645 if ((sum.defd.i & shadow.defd.i)
8646 || (sum.defd.fp & shadow.defd.fp))
8648 /* (c) would be violated */
8652 /* Combine shadow with summary of current insn: */
8653 shadow.used.i |= sum.used.i;
8654 shadow.used.fp |= sum.used.fp;
8655 shadow.used.mem |= sum.used.mem;
8656 shadow.defd.i |= sum.defd.i;
8657 shadow.defd.fp |= sum.defd.fp;
8658 shadow.defd.mem |= sum.defd.mem;
8660 if ((sum.defd.i & shadow.used.i)
8661 || (sum.defd.fp & shadow.used.fp)
8662 || (sum.defd.mem & shadow.used.mem))
8664 /* (a) would be violated (also takes care of (b)) */
8665 gcc_assert (get_attr_trap (i) != TRAP_YES
8666 || (!(sum.defd.i & sum.used.i)
8667 && !(sum.defd.fp & sum.used.fp)));
8685 n = emit_insn_before (gen_trapb (), i);
8686 PUT_MODE (n, TImode);
8687 PUT_MODE (i, TImode);
8691 shadow.used.mem = 0;
8692 shadow.defd = shadow.used;
8697 if ((exception_nesting > 0 || alpha_tp >= ALPHA_TP_FUNC)
8698 && GET_CODE (i) == INSN
8699 && GET_CODE (PATTERN (i)) != USE
8700 && GET_CODE (PATTERN (i)) != CLOBBER
8701 && get_attr_trap (i) == TRAP_YES)
8703 if (optimize && !trap_pending)
8704 summarize_insn (PATTERN (i), &shadow, 0);
8710 /* Alpha can only issue instruction groups simultaneously if they are
8711 suitably aligned. This is very processor-specific. */
8712 /* There are a number of entries in alphaev4_insn_pipe and alphaev5_insn_pipe
8713 that are marked "fake". These instructions do not exist on that target,
8714 but it is possible to see these insns with deranged combinations of
8715 command-line options, such as "-mtune=ev4 -mmax". Instead of aborting,
8716 choose a result at random. */
8718 enum alphaev4_pipe {
8725 enum alphaev5_pipe {
8736 static enum alphaev4_pipe
8737 alphaev4_insn_pipe (rtx insn)
8739 if (recog_memoized (insn) < 0)
8741 if (get_attr_length (insn) != 4)
8744 switch (get_attr_type (insn))
8760 case TYPE_MVI: /* fake */
8775 case TYPE_FSQRT: /* fake */
8776 case TYPE_FTOI: /* fake */
8777 case TYPE_ITOF: /* fake */
8785 static enum alphaev5_pipe
8786 alphaev5_insn_pipe (rtx insn)
8788 if (recog_memoized (insn) < 0)
8790 if (get_attr_length (insn) != 4)
8793 switch (get_attr_type (insn))
8813 case TYPE_FTOI: /* fake */
8814 case TYPE_ITOF: /* fake */
8829 case TYPE_FSQRT: /* fake */
8840 /* IN_USE is a mask of the slots currently filled within the insn group.
8841 The mask bits come from alphaev4_pipe above. If EV4_IBX is set, then
8842 the insn in EV4_IB0 can be swapped by the hardware into EV4_IB1.
8844 LEN is, of course, the length of the group in bytes. */
8847 alphaev4_next_group (rtx insn, int *pin_use, int *plen)
8854 || GET_CODE (PATTERN (insn)) == CLOBBER
8855 || GET_CODE (PATTERN (insn)) == USE)
8860 enum alphaev4_pipe pipe;
8862 pipe = alphaev4_insn_pipe (insn);
8866 /* Force complex instructions to start new groups. */
8870 /* If this is a completely unrecognized insn, it's an asm.
8871 We don't know how long it is, so record length as -1 to
8872 signal a needed realignment. */
8873 if (recog_memoized (insn) < 0)
8876 len = get_attr_length (insn);
8880 if (in_use & EV4_IB0)
8882 if (in_use & EV4_IB1)
8887 in_use |= EV4_IB0 | EV4_IBX;
8891 if (in_use & EV4_IB0)
8893 if (!(in_use & EV4_IBX) || (in_use & EV4_IB1))
8901 if (in_use & EV4_IB1)
8911 /* Haifa doesn't do well scheduling branches. */
8912 if (GET_CODE (insn) == JUMP_INSN)
8916 insn = next_nonnote_insn (insn);
8918 if (!insn || ! INSN_P (insn))
8921 /* Let Haifa tell us where it thinks insn group boundaries are. */
8922 if (GET_MODE (insn) == TImode)
8925 if (GET_CODE (insn) == CLOBBER || GET_CODE (insn) == USE)
8930 insn = next_nonnote_insn (insn);
8938 /* IN_USE is a mask of the slots currently filled within the insn group.
8939 The mask bits come from alphaev5_pipe above. If EV5_E01 is set, then
8940 the insn in EV5_E0 can be swapped by the hardware into EV5_E1.
8942 LEN is, of course, the length of the group in bytes. */
8945 alphaev5_next_group (rtx insn, int *pin_use, int *plen)
8952 || GET_CODE (PATTERN (insn)) == CLOBBER
8953 || GET_CODE (PATTERN (insn)) == USE)
8958 enum alphaev5_pipe pipe;
8960 pipe = alphaev5_insn_pipe (insn);
8964 /* Force complex instructions to start new groups. */
8968 /* If this is a completely unrecognized insn, it's an asm.
8969 We don't know how long it is, so record length as -1 to
8970 signal a needed realignment. */
8971 if (recog_memoized (insn) < 0)
8974 len = get_attr_length (insn);
8977 /* ??? Most of the places below, we would like to assert never
8978 happen, as it would indicate an error either in Haifa, or
8979 in the scheduling description. Unfortunately, Haifa never
8980 schedules the last instruction of the BB, so we don't have
8981 an accurate TI bit to go off. */
8983 if (in_use & EV5_E0)
8985 if (in_use & EV5_E1)
8990 in_use |= EV5_E0 | EV5_E01;
8994 if (in_use & EV5_E0)
8996 if (!(in_use & EV5_E01) || (in_use & EV5_E1))
9004 if (in_use & EV5_E1)
9010 if (in_use & EV5_FA)
9012 if (in_use & EV5_FM)
9017 in_use |= EV5_FA | EV5_FAM;
9021 if (in_use & EV5_FA)
9027 if (in_use & EV5_FM)
9040 /* Haifa doesn't do well scheduling branches. */
9041 /* ??? If this is predicted not-taken, slotting continues, except
9042 that no more IBR, FBR, or JSR insns may be slotted. */
9043 if (GET_CODE (insn) == JUMP_INSN)
9047 insn = next_nonnote_insn (insn);
9049 if (!insn || ! INSN_P (insn))
9052 /* Let Haifa tell us where it thinks insn group boundaries are. */
9053 if (GET_MODE (insn) == TImode)
9056 if (GET_CODE (insn) == CLOBBER || GET_CODE (insn) == USE)
9061 insn = next_nonnote_insn (insn);
9070 alphaev4_next_nop (int *pin_use)
9072 int in_use = *pin_use;
9075 if (!(in_use & EV4_IB0))
9080 else if ((in_use & (EV4_IBX|EV4_IB1)) == EV4_IBX)
9085 else if (TARGET_FP && !(in_use & EV4_IB1))
9098 alphaev5_next_nop (int *pin_use)
9100 int in_use = *pin_use;
9103 if (!(in_use & EV5_E1))
9108 else if (TARGET_FP && !(in_use & EV5_FA))
9113 else if (TARGET_FP && !(in_use & EV5_FM))
9125 /* The instruction group alignment main loop. */
9128 alpha_align_insns (unsigned int max_align,
9129 rtx (*next_group) (rtx, int *, int *),
9130 rtx (*next_nop) (int *))
9132 /* ALIGN is the known alignment for the insn group. */
9134 /* OFS is the offset of the current insn in the insn group. */
9136 int prev_in_use, in_use, len, ldgp;
9139 /* Let shorten branches care for assigning alignments to code labels. */
9140 shorten_branches (get_insns ());
9142 if (align_functions < 4)
9144 else if ((unsigned int) align_functions < max_align)
9145 align = align_functions;
9149 ofs = prev_in_use = 0;
9151 if (GET_CODE (i) == NOTE)
9152 i = next_nonnote_insn (i);
9154 ldgp = alpha_function_needs_gp ? 8 : 0;
9158 next = (*next_group) (i, &in_use, &len);
9160 /* When we see a label, resync alignment etc. */
9161 if (GET_CODE (i) == CODE_LABEL)
9163 unsigned int new_align = 1 << label_to_alignment (i);
9165 if (new_align >= align)
9167 align = new_align < max_align ? new_align : max_align;
9171 else if (ofs & (new_align-1))
9172 ofs = (ofs | (new_align-1)) + 1;
9176 /* Handle complex instructions special. */
9177 else if (in_use == 0)
9179 /* Asms will have length < 0. This is a signal that we have
9180 lost alignment knowledge. Assume, however, that the asm
9181 will not mis-align instructions. */
9190 /* If the known alignment is smaller than the recognized insn group,
9191 realign the output. */
9192 else if ((int) align < len)
9194 unsigned int new_log_align = len > 8 ? 4 : 3;
9197 where = prev = prev_nonnote_insn (i);
9198 if (!where || GET_CODE (where) != CODE_LABEL)
9201 /* Can't realign between a call and its gp reload. */
9202 if (! (TARGET_EXPLICIT_RELOCS
9203 && prev && GET_CODE (prev) == CALL_INSN))
9205 emit_insn_before (gen_realign (GEN_INT (new_log_align)), where);
9206 align = 1 << new_log_align;
9211 /* We may not insert padding inside the initial ldgp sequence. */
9215 /* If the group won't fit in the same INT16 as the previous,
9216 we need to add padding to keep the group together. Rather
9217 than simply leaving the insn filling to the assembler, we
9218 can make use of the knowledge of what sorts of instructions
9219 were issued in the previous group to make sure that all of
9220 the added nops are really free. */
9221 else if (ofs + len > (int) align)
9223 int nop_count = (align - ofs) / 4;
9226 /* Insert nops before labels, branches, and calls to truly merge
9227 the execution of the nops with the previous instruction group. */
9228 where = prev_nonnote_insn (i);
9231 if (GET_CODE (where) == CODE_LABEL)
9233 rtx where2 = prev_nonnote_insn (where);
9234 if (where2 && GET_CODE (where2) == JUMP_INSN)
9237 else if (GET_CODE (where) == INSN)
9244 emit_insn_before ((*next_nop)(&prev_in_use), where);
9245 while (--nop_count);
9249 ofs = (ofs + len) & (align - 1);
9250 prev_in_use = in_use;
9255 /* Machine dependent reorg pass. */
9260 if (alpha_tp != ALPHA_TP_PROG || flag_exceptions)
9261 alpha_handle_trap_shadows ();
9263 /* Due to the number of extra trapb insns, don't bother fixing up
9264 alignment when trap precision is instruction. Moreover, we can
9265 only do our job when sched2 is run. */
9266 if (optimize && !optimize_size
9267 && alpha_tp != ALPHA_TP_INSN
9268 && flag_schedule_insns_after_reload)
9270 if (alpha_tune == PROCESSOR_EV4)
9271 alpha_align_insns (8, alphaev4_next_group, alphaev4_next_nop);
9272 else if (alpha_tune == PROCESSOR_EV5)
9273 alpha_align_insns (16, alphaev5_next_group, alphaev5_next_nop);
9277 #if !TARGET_ABI_UNICOSMK
9284 alpha_file_start (void)
9286 #ifdef OBJECT_FORMAT_ELF
9287 /* If emitting dwarf2 debug information, we cannot generate a .file
9288 directive to start the file, as it will conflict with dwarf2out
9289 file numbers. So it's only useful when emitting mdebug output. */
9290 targetm.file_start_file_directive = (write_symbols == DBX_DEBUG);
9293 default_file_start ();
9295 fprintf (asm_out_file, "\t.verstamp %d %d\n", MS_STAMP, LS_STAMP);
9298 fputs ("\t.set noreorder\n", asm_out_file);
9299 fputs ("\t.set volatile\n", asm_out_file);
9300 if (!TARGET_ABI_OPEN_VMS)
9301 fputs ("\t.set noat\n", asm_out_file);
9302 if (TARGET_EXPLICIT_RELOCS)
9303 fputs ("\t.set nomacro\n", asm_out_file);
9304 if (TARGET_SUPPORT_ARCH | TARGET_BWX | TARGET_MAX | TARGET_FIX | TARGET_CIX)
9308 if (alpha_cpu == PROCESSOR_EV6 || TARGET_FIX || TARGET_CIX)
9310 else if (TARGET_MAX)
9312 else if (TARGET_BWX)
9314 else if (alpha_cpu == PROCESSOR_EV5)
9319 fprintf (asm_out_file, "\t.arch %s\n", arch);
9324 #ifdef OBJECT_FORMAT_ELF
9325 /* Since we don't have a .dynbss section, we should not allow global
9326 relocations in the .rodata section. */
9329 alpha_elf_reloc_rw_mask (void)
9331 return flag_pic ? 3 : 2;
9334 /* Return a section for X. The only special thing we do here is to
9335 honor small data. */
9338 alpha_elf_select_rtx_section (enum machine_mode mode, rtx x,
9339 unsigned HOST_WIDE_INT align)
9341 if (TARGET_SMALL_DATA && GET_MODE_SIZE (mode) <= g_switch_value)
9342 /* ??? Consider using mergeable sdata sections. */
9343 return sdata_section;
9345 return default_elf_select_rtx_section (mode, x, align);
9349 alpha_elf_section_type_flags (tree decl, const char *name, int reloc)
9351 unsigned int flags = 0;
9353 if (strcmp (name, ".sdata") == 0
9354 || strncmp (name, ".sdata.", 7) == 0
9355 || strncmp (name, ".gnu.linkonce.s.", 16) == 0
9356 || strcmp (name, ".sbss") == 0
9357 || strncmp (name, ".sbss.", 6) == 0
9358 || strncmp (name, ".gnu.linkonce.sb.", 17) == 0)
9359 flags = SECTION_SMALL;
9361 flags |= default_section_type_flags (decl, name, reloc);
9364 #endif /* OBJECT_FORMAT_ELF */
9366 /* Structure to collect function names for final output in link section. */
9367 /* Note that items marked with GTY can't be ifdef'ed out. */
9369 enum links_kind {KIND_UNUSED, KIND_LOCAL, KIND_EXTERN};
9370 enum reloc_kind {KIND_LINKAGE, KIND_CODEADDR};
9372 struct alpha_links GTY(())
9376 enum links_kind lkind;
9377 enum reloc_kind rkind;
9380 struct alpha_funcs GTY(())
9383 splay_tree GTY ((param1_is (char *), param2_is (struct alpha_links *)))
9387 static GTY ((param1_is (char *), param2_is (struct alpha_links *)))
9388 splay_tree alpha_links_tree;
9389 static GTY ((param1_is (tree), param2_is (struct alpha_funcs *)))
9390 splay_tree alpha_funcs_tree;
9392 static GTY(()) int alpha_funcs_num;
9394 #if TARGET_ABI_OPEN_VMS
9396 /* Return the VMS argument type corresponding to MODE. */
9399 alpha_arg_type (enum machine_mode mode)
9404 return TARGET_FLOAT_VAX ? FF : FS;
9406 return TARGET_FLOAT_VAX ? FD : FT;
9412 /* Return an rtx for an integer representing the VMS Argument Information
9416 alpha_arg_info_reg_val (CUMULATIVE_ARGS cum)
9418 unsigned HOST_WIDE_INT regval = cum.num_args;
9421 for (i = 0; i < 6; i++)
9422 regval |= ((int) cum.atypes[i]) << (i * 3 + 8);
9424 return GEN_INT (regval);
9427 /* Make (or fake) .linkage entry for function call.
9429 IS_LOCAL is 0 if name is used in call, 1 if name is used in definition.
9431 Return an SYMBOL_REF rtx for the linkage. */
9434 alpha_need_linkage (const char *name, int is_local)
9436 splay_tree_node node;
9437 struct alpha_links *al;
9444 struct alpha_funcs *cfaf;
9446 if (!alpha_funcs_tree)
9447 alpha_funcs_tree = splay_tree_new_ggc ((splay_tree_compare_fn)
9448 splay_tree_compare_pointers);
9450 cfaf = (struct alpha_funcs *) ggc_alloc (sizeof (struct alpha_funcs));
9453 cfaf->num = ++alpha_funcs_num;
9455 splay_tree_insert (alpha_funcs_tree,
9456 (splay_tree_key) current_function_decl,
9457 (splay_tree_value) cfaf);
9460 if (alpha_links_tree)
9462 /* Is this name already defined? */
9464 node = splay_tree_lookup (alpha_links_tree, (splay_tree_key) name);
9467 al = (struct alpha_links *) node->value;
9470 /* Defined here but external assumed. */
9471 if (al->lkind == KIND_EXTERN)
9472 al->lkind = KIND_LOCAL;
9476 /* Used here but unused assumed. */
9477 if (al->lkind == KIND_UNUSED)
9478 al->lkind = KIND_LOCAL;
9484 alpha_links_tree = splay_tree_new_ggc ((splay_tree_compare_fn) strcmp);
9486 al = (struct alpha_links *) ggc_alloc (sizeof (struct alpha_links));
9487 name = ggc_strdup (name);
9489 /* Assume external if no definition. */
9490 al->lkind = (is_local ? KIND_UNUSED : KIND_EXTERN);
9492 /* Ensure we have an IDENTIFIER so assemble_name can mark it used. */
9493 get_identifier (name);
9495 /* Construct a SYMBOL_REF for us to call. */
9497 size_t name_len = strlen (name);
9498 char *linksym = alloca (name_len + 6);
9500 memcpy (linksym + 1, name, name_len);
9501 memcpy (linksym + 1 + name_len, "..lk", 5);
9502 al->linkage = gen_rtx_SYMBOL_REF (Pmode,
9503 ggc_alloc_string (linksym, name_len + 5));
9506 splay_tree_insert (alpha_links_tree, (splay_tree_key) name,
9507 (splay_tree_value) al);
9513 alpha_use_linkage (rtx linkage, tree cfundecl, int lflag, int rflag)
9515 splay_tree_node cfunnode;
9516 struct alpha_funcs *cfaf;
9517 struct alpha_links *al;
9518 const char *name = XSTR (linkage, 0);
9520 cfaf = (struct alpha_funcs *) 0;
9521 al = (struct alpha_links *) 0;
9523 cfunnode = splay_tree_lookup (alpha_funcs_tree, (splay_tree_key) cfundecl);
9524 cfaf = (struct alpha_funcs *) cfunnode->value;
9528 splay_tree_node lnode;
9530 /* Is this name already defined? */
9532 lnode = splay_tree_lookup (cfaf->links, (splay_tree_key) name);
9534 al = (struct alpha_links *) lnode->value;
9537 cfaf->links = splay_tree_new_ggc ((splay_tree_compare_fn) strcmp);
9545 splay_tree_node node = 0;
9546 struct alpha_links *anl;
9551 name_len = strlen (name);
9553 al = (struct alpha_links *) ggc_alloc (sizeof (struct alpha_links));
9554 al->num = cfaf->num;
9556 node = splay_tree_lookup (alpha_links_tree, (splay_tree_key) name);
9559 anl = (struct alpha_links *) node->value;
9560 al->lkind = anl->lkind;
9563 sprintf (buf, "$%d..%s..lk", cfaf->num, name);
9564 buflen = strlen (buf);
9565 linksym = alloca (buflen + 1);
9566 memcpy (linksym, buf, buflen + 1);
9568 al->linkage = gen_rtx_SYMBOL_REF
9569 (Pmode, ggc_alloc_string (linksym, buflen + 1));
9571 splay_tree_insert (cfaf->links, (splay_tree_key) name,
9572 (splay_tree_value) al);
9576 al->rkind = KIND_CODEADDR;
9578 al->rkind = KIND_LINKAGE;
9581 return gen_rtx_MEM (Pmode, plus_constant (al->linkage, 8));
9587 alpha_write_one_linkage (splay_tree_node node, void *data)
9589 const char *const name = (const char *) node->key;
9590 struct alpha_links *link = (struct alpha_links *) node->value;
9591 FILE *stream = (FILE *) data;
9593 fprintf (stream, "$%d..%s..lk:\n", link->num, name);
9594 if (link->rkind == KIND_CODEADDR)
9596 if (link->lkind == KIND_LOCAL)
9598 /* Local and used */
9599 fprintf (stream, "\t.quad %s..en\n", name);
9603 /* External and used, request code address. */
9604 fprintf (stream, "\t.code_address %s\n", name);
9609 if (link->lkind == KIND_LOCAL)
9611 /* Local and used, build linkage pair. */
9612 fprintf (stream, "\t.quad %s..en\n", name);
9613 fprintf (stream, "\t.quad %s\n", name);
9617 /* External and used, request linkage pair. */
9618 fprintf (stream, "\t.linkage %s\n", name);
9626 alpha_write_linkage (FILE *stream, const char *funname, tree fundecl)
9628 splay_tree_node node;
9629 struct alpha_funcs *func;
9631 fprintf (stream, "\t.link\n");
9632 fprintf (stream, "\t.align 3\n");
9635 node = splay_tree_lookup (alpha_funcs_tree, (splay_tree_key) fundecl);
9636 func = (struct alpha_funcs *) node->value;
9638 fputs ("\t.name ", stream);
9639 assemble_name (stream, funname);
9640 fputs ("..na\n", stream);
9641 ASM_OUTPUT_LABEL (stream, funname);
9642 fprintf (stream, "\t.pdesc ");
9643 assemble_name (stream, funname);
9644 fprintf (stream, "..en,%s\n",
9645 alpha_procedure_type == PT_STACK ? "stack"
9646 : alpha_procedure_type == PT_REGISTER ? "reg" : "null");
9650 splay_tree_foreach (func->links, alpha_write_one_linkage, stream);
9651 /* splay_tree_delete (func->links); */
9655 /* Given a decl, a section name, and whether the decl initializer
9656 has relocs, choose attributes for the section. */
9658 #define SECTION_VMS_OVERLAY SECTION_FORGET
9659 #define SECTION_VMS_GLOBAL SECTION_MACH_DEP
9660 #define SECTION_VMS_INITIALIZE (SECTION_VMS_GLOBAL << 1)
9663 vms_section_type_flags (tree decl, const char *name, int reloc)
9665 unsigned int flags = default_section_type_flags (decl, name, reloc);
9667 if (decl && DECL_ATTRIBUTES (decl)
9668 && lookup_attribute ("overlaid", DECL_ATTRIBUTES (decl)))
9669 flags |= SECTION_VMS_OVERLAY;
9670 if (decl && DECL_ATTRIBUTES (decl)
9671 && lookup_attribute ("global", DECL_ATTRIBUTES (decl)))
9672 flags |= SECTION_VMS_GLOBAL;
9673 if (decl && DECL_ATTRIBUTES (decl)
9674 && lookup_attribute ("initialize", DECL_ATTRIBUTES (decl)))
9675 flags |= SECTION_VMS_INITIALIZE;
9680 /* Switch to an arbitrary section NAME with attributes as specified
9681 by FLAGS. ALIGN specifies any known alignment requirements for
9682 the section; 0 if the default should be used. */
9685 vms_asm_named_section (const char *name, unsigned int flags,
9686 tree decl ATTRIBUTE_UNUSED)
9688 fputc ('\n', asm_out_file);
9689 fprintf (asm_out_file, ".section\t%s", name);
9691 if (flags & SECTION_VMS_OVERLAY)
9692 fprintf (asm_out_file, ",OVR");
9693 if (flags & SECTION_VMS_GLOBAL)
9694 fprintf (asm_out_file, ",GBL");
9695 if (flags & SECTION_VMS_INITIALIZE)
9696 fprintf (asm_out_file, ",NOMOD");
9697 if (flags & SECTION_DEBUG)
9698 fprintf (asm_out_file, ",NOWRT");
9700 fputc ('\n', asm_out_file);
9703 /* Record an element in the table of global constructors. SYMBOL is
9704 a SYMBOL_REF of the function to be called; PRIORITY is a number
9705 between 0 and MAX_INIT_PRIORITY.
9707 Differs from default_ctors_section_asm_out_constructor in that the
9708 width of the .ctors entry is always 64 bits, rather than the 32 bits
9709 used by a normal pointer. */
9712 vms_asm_out_constructor (rtx symbol, int priority ATTRIBUTE_UNUSED)
9714 switch_to_section (ctors_section);
9715 assemble_align (BITS_PER_WORD);
9716 assemble_integer (symbol, UNITS_PER_WORD, BITS_PER_WORD, 1);
9720 vms_asm_out_destructor (rtx symbol, int priority ATTRIBUTE_UNUSED)
9722 switch_to_section (dtors_section);
9723 assemble_align (BITS_PER_WORD);
9724 assemble_integer (symbol, UNITS_PER_WORD, BITS_PER_WORD, 1);
9729 alpha_need_linkage (const char *name ATTRIBUTE_UNUSED,
9730 int is_local ATTRIBUTE_UNUSED)
9736 alpha_use_linkage (rtx linkage ATTRIBUTE_UNUSED,
9737 tree cfundecl ATTRIBUTE_UNUSED,
9738 int lflag ATTRIBUTE_UNUSED,
9739 int rflag ATTRIBUTE_UNUSED)
9744 #endif /* TARGET_ABI_OPEN_VMS */
9746 #if TARGET_ABI_UNICOSMK
9748 /* This evaluates to true if we do not know how to pass TYPE solely in
9749 registers. This is the case for all arguments that do not fit in two
9753 unicosmk_must_pass_in_stack (enum machine_mode mode, tree type)
9758 if (TREE_CODE (TYPE_SIZE (type)) != INTEGER_CST)
9760 if (TREE_ADDRESSABLE (type))
9763 return ALPHA_ARG_SIZE (mode, type, 0) > 2;
9766 /* Define the offset between two registers, one to be eliminated, and the
9767 other its replacement, at the start of a routine. */
9770 unicosmk_initial_elimination_offset (int from, int to)
9774 fixed_size = alpha_sa_size();
9775 if (fixed_size != 0)
9778 if (from == FRAME_POINTER_REGNUM && to == HARD_FRAME_POINTER_REGNUM)
9780 else if (from == ARG_POINTER_REGNUM && to == HARD_FRAME_POINTER_REGNUM)
9782 else if (from == FRAME_POINTER_REGNUM && to == STACK_POINTER_REGNUM)
9783 return (ALPHA_ROUND (current_function_outgoing_args_size)
9784 + ALPHA_ROUND (get_frame_size()));
9785 else if (from == ARG_POINTER_REGNUM && to == STACK_POINTER_REGNUM)
9786 return (ALPHA_ROUND (fixed_size)
9787 + ALPHA_ROUND (get_frame_size()
9788 + current_function_outgoing_args_size));
9793 /* Output the module name for .ident and .end directives. We have to strip
9794 directories and add make sure that the module name starts with a letter
9798 unicosmk_output_module_name (FILE *file)
9800 const char *name = lbasename (main_input_filename);
9801 unsigned len = strlen (name);
9802 char *clean_name = alloca (len + 2);
9803 char *ptr = clean_name;
9805 /* CAM only accepts module names that start with a letter or '$'. We
9806 prefix the module name with a '$' if necessary. */
9808 if (!ISALPHA (*name))
9810 memcpy (ptr, name, len + 1);
9811 clean_symbol_name (clean_name);
9812 fputs (clean_name, file);
9815 /* Output the definition of a common variable. */
9818 unicosmk_output_common (FILE *file, const char *name, int size, int align)
9821 printf ("T3E__: common %s\n", name);
9824 fputs("\t.endp\n\n\t.psect ", file);
9825 assemble_name(file, name);
9826 fprintf(file, ",%d,common\n", floor_log2 (align / BITS_PER_UNIT));
9827 fprintf(file, "\t.byte\t0:%d\n", size);
9829 /* Mark the symbol as defined in this module. */
9830 name_tree = get_identifier (name);
9831 TREE_ASM_WRITTEN (name_tree) = 1;
9834 #define SECTION_PUBLIC SECTION_MACH_DEP
9835 #define SECTION_MAIN (SECTION_PUBLIC << 1)
9836 static int current_section_align;
9838 /* A get_unnamed_section callback for switching to the text section. */
9841 unicosmk_output_text_section_asm_op (const void *data ATTRIBUTE_UNUSED)
9843 static int count = 0;
9844 fprintf (asm_out_file, "\t.endp\n\n\t.psect\tgcc@text___%d,code\n", count++);
9847 /* A get_unnamed_section callback for switching to the data section. */
9850 unicosmk_output_data_section_asm_op (const void *data ATTRIBUTE_UNUSED)
9852 static int count = 1;
9853 fprintf (asm_out_file, "\t.endp\n\n\t.psect\tgcc@data___%d,data\n", count++);
9856 /* Implement TARGET_ASM_INIT_SECTIONS.
9858 The Cray assembler is really weird with respect to sections. It has only
9859 named sections and you can't reopen a section once it has been closed.
9860 This means that we have to generate unique names whenever we want to
9861 reenter the text or the data section. */
9864 unicosmk_init_sections (void)
9866 text_section = get_unnamed_section (SECTION_CODE,
9867 unicosmk_output_text_section_asm_op,
9869 data_section = get_unnamed_section (SECTION_WRITE,
9870 unicosmk_output_data_section_asm_op,
9872 readonly_data_section = data_section;
9876 unicosmk_section_type_flags (tree decl, const char *name,
9877 int reloc ATTRIBUTE_UNUSED)
9879 unsigned int flags = default_section_type_flags (decl, name, reloc);
9884 if (TREE_CODE (decl) == FUNCTION_DECL)
9886 current_section_align = floor_log2 (FUNCTION_BOUNDARY / BITS_PER_UNIT);
9887 if (align_functions_log > current_section_align)
9888 current_section_align = align_functions_log;
9890 if (! strcmp (IDENTIFIER_POINTER (DECL_ASSEMBLER_NAME (decl)), "main"))
9891 flags |= SECTION_MAIN;
9894 current_section_align = floor_log2 (DECL_ALIGN (decl) / BITS_PER_UNIT);
9896 if (TREE_PUBLIC (decl))
9897 flags |= SECTION_PUBLIC;
9902 /* Generate a section name for decl and associate it with the
9906 unicosmk_unique_section (tree decl, int reloc ATTRIBUTE_UNUSED)
9913 name = IDENTIFIER_POINTER (DECL_ASSEMBLER_NAME (decl));
9914 name = default_strip_name_encoding (name);
9915 len = strlen (name);
9917 if (TREE_CODE (decl) == FUNCTION_DECL)
9921 /* It is essential that we prefix the section name here because
9922 otherwise the section names generated for constructors and
9923 destructors confuse collect2. */
9925 string = alloca (len + 6);
9926 sprintf (string, "code@%s", name);
9927 DECL_SECTION_NAME (decl) = build_string (len + 5, string);
9929 else if (TREE_PUBLIC (decl))
9930 DECL_SECTION_NAME (decl) = build_string (len, name);
9935 string = alloca (len + 6);
9936 sprintf (string, "data@%s", name);
9937 DECL_SECTION_NAME (decl) = build_string (len + 5, string);
9941 /* Switch to an arbitrary section NAME with attributes as specified
9942 by FLAGS. ALIGN specifies any known alignment requirements for
9943 the section; 0 if the default should be used. */
9946 unicosmk_asm_named_section (const char *name, unsigned int flags,
9947 tree decl ATTRIBUTE_UNUSED)
9951 /* Close the previous section. */
9953 fputs ("\t.endp\n\n", asm_out_file);
9955 /* Find out what kind of section we are opening. */
9957 if (flags & SECTION_MAIN)
9958 fputs ("\t.start\tmain\n", asm_out_file);
9960 if (flags & SECTION_CODE)
9962 else if (flags & SECTION_PUBLIC)
9967 if (current_section_align != 0)
9968 fprintf (asm_out_file, "\t.psect\t%s,%d,%s\n", name,
9969 current_section_align, kind);
9971 fprintf (asm_out_file, "\t.psect\t%s,%s\n", name, kind);
9975 unicosmk_insert_attributes (tree decl, tree *attr_ptr ATTRIBUTE_UNUSED)
9978 && (TREE_PUBLIC (decl) || TREE_CODE (decl) == FUNCTION_DECL))
9979 unicosmk_unique_section (decl, 0);
9982 /* Output an alignment directive. We have to use the macro 'gcc@code@align'
9983 in code sections because .align fill unused space with zeroes. */
9986 unicosmk_output_align (FILE *file, int align)
9988 if (inside_function)
9989 fprintf (file, "\tgcc@code@align\t%d\n", align);
9991 fprintf (file, "\t.align\t%d\n", align);
9994 /* Add a case vector to the current function's list of deferred case
9995 vectors. Case vectors have to be put into a separate section because CAM
9996 does not allow data definitions in code sections. */
9999 unicosmk_defer_case_vector (rtx lab, rtx vec)
10001 struct machine_function *machine = cfun->machine;
10003 vec = gen_rtx_EXPR_LIST (VOIDmode, lab, vec);
10004 machine->addr_list = gen_rtx_EXPR_LIST (VOIDmode, vec,
10005 machine->addr_list);
10008 /* Output a case vector. */
10011 unicosmk_output_addr_vec (FILE *file, rtx vec)
10013 rtx lab = XEXP (vec, 0);
10014 rtx body = XEXP (vec, 1);
10015 int vlen = XVECLEN (body, 0);
10018 (*targetm.asm_out.internal_label) (file, "L", CODE_LABEL_NUMBER (lab));
10020 for (idx = 0; idx < vlen; idx++)
10022 ASM_OUTPUT_ADDR_VEC_ELT
10023 (file, CODE_LABEL_NUMBER (XEXP (XVECEXP (body, 0, idx), 0)));
10027 /* Output current function's deferred case vectors. */
10030 unicosmk_output_deferred_case_vectors (FILE *file)
10032 struct machine_function *machine = cfun->machine;
10035 if (machine->addr_list == NULL_RTX)
10038 switch_to_section (data_section);
10039 for (t = machine->addr_list; t; t = XEXP (t, 1))
10040 unicosmk_output_addr_vec (file, XEXP (t, 0));
10043 /* Generate the name of the SSIB section for the current function. */
10045 #define SSIB_PREFIX "__SSIB_"
10046 #define SSIB_PREFIX_LEN 7
10048 static const char *
10049 unicosmk_ssib_name (void)
10051 /* This is ok since CAM won't be able to deal with names longer than that
10054 static char name[256];
10057 const char *fnname;
10060 x = DECL_RTL (cfun->decl);
10061 gcc_assert (GET_CODE (x) == MEM);
10063 gcc_assert (GET_CODE (x) == SYMBOL_REF);
10064 fnname = XSTR (x, 0);
10066 len = strlen (fnname);
10067 if (len + SSIB_PREFIX_LEN > 255)
10068 len = 255 - SSIB_PREFIX_LEN;
10070 strcpy (name, SSIB_PREFIX);
10071 strncpy (name + SSIB_PREFIX_LEN, fnname, len);
10072 name[len + SSIB_PREFIX_LEN] = 0;
10077 /* Set up the dynamic subprogram information block (DSIB) and update the
10078 frame pointer register ($15) for subroutines which have a frame. If the
10079 subroutine doesn't have a frame, simply increment $15. */
10082 unicosmk_gen_dsib (unsigned long *imaskP)
10084 if (alpha_procedure_type == PT_STACK)
10086 const char *ssib_name;
10089 /* Allocate 64 bytes for the DSIB. */
10091 FRP (emit_insn (gen_adddi3 (stack_pointer_rtx, stack_pointer_rtx,
10093 emit_insn (gen_blockage ());
10095 /* Save the return address. */
10097 mem = gen_rtx_MEM (DImode, plus_constant (stack_pointer_rtx, 56));
10098 set_mem_alias_set (mem, alpha_sr_alias_set);
10099 FRP (emit_move_insn (mem, gen_rtx_REG (DImode, REG_RA)));
10100 (*imaskP) &= ~(1UL << REG_RA);
10102 /* Save the old frame pointer. */
10104 mem = gen_rtx_MEM (DImode, plus_constant (stack_pointer_rtx, 48));
10105 set_mem_alias_set (mem, alpha_sr_alias_set);
10106 FRP (emit_move_insn (mem, hard_frame_pointer_rtx));
10107 (*imaskP) &= ~(1UL << HARD_FRAME_POINTER_REGNUM);
10109 emit_insn (gen_blockage ());
10111 /* Store the SSIB pointer. */
10113 ssib_name = ggc_strdup (unicosmk_ssib_name ());
10114 mem = gen_rtx_MEM (DImode, plus_constant (stack_pointer_rtx, 32));
10115 set_mem_alias_set (mem, alpha_sr_alias_set);
10117 FRP (emit_move_insn (gen_rtx_REG (DImode, 5),
10118 gen_rtx_SYMBOL_REF (Pmode, ssib_name)));
10119 FRP (emit_move_insn (mem, gen_rtx_REG (DImode, 5)));
10121 /* Save the CIW index. */
10123 mem = gen_rtx_MEM (DImode, plus_constant (stack_pointer_rtx, 24));
10124 set_mem_alias_set (mem, alpha_sr_alias_set);
10125 FRP (emit_move_insn (mem, gen_rtx_REG (DImode, 25)));
10127 emit_insn (gen_blockage ());
10129 /* Set the new frame pointer. */
10131 FRP (emit_insn (gen_adddi3 (hard_frame_pointer_rtx,
10132 stack_pointer_rtx, GEN_INT (64))));
10137 /* Increment the frame pointer register to indicate that we do not
10140 FRP (emit_insn (gen_adddi3 (hard_frame_pointer_rtx,
10141 hard_frame_pointer_rtx, const1_rtx)));
10145 /* Output the static subroutine information block for the current
10149 unicosmk_output_ssib (FILE *file, const char *fnname)
10155 struct machine_function *machine = cfun->machine;
10158 fprintf (file, "\t.endp\n\n\t.psect\t%s%s,data\n", user_label_prefix,
10159 unicosmk_ssib_name ());
10161 /* Some required stuff and the function name length. */
10163 len = strlen (fnname);
10164 fprintf (file, "\t.quad\t^X20008%2.2X28\n", len);
10167 ??? We don't do that yet. */
10169 fputs ("\t.quad\t0\n", file);
10171 /* Function address. */
10173 fputs ("\t.quad\t", file);
10174 assemble_name (file, fnname);
10177 fputs ("\t.quad\t0\n", file);
10178 fputs ("\t.quad\t0\n", file);
10181 ??? We do it the same way Cray CC does it but this could be
10184 for( i = 0; i < len; i++ )
10185 fprintf (file, "\t.byte\t%d\n", (int)(fnname[i]));
10186 if( (len % 8) == 0 )
10187 fputs ("\t.quad\t0\n", file);
10189 fprintf (file, "\t.bits\t%d : 0\n", (8 - (len % 8))*8);
10191 /* All call information words used in the function. */
10193 for (x = machine->first_ciw; x; x = XEXP (x, 1))
10196 #if HOST_BITS_PER_WIDE_INT == 32
10197 fprintf (file, "\t.quad\t" HOST_WIDE_INT_PRINT_DOUBLE_HEX "\n",
10198 CONST_DOUBLE_HIGH (ciw), CONST_DOUBLE_LOW (ciw));
10200 fprintf (file, "\t.quad\t" HOST_WIDE_INT_PRINT_HEX "\n", INTVAL (ciw));
10205 /* Add a call information word (CIW) to the list of the current function's
10206 CIWs and return its index.
10208 X is a CONST_INT or CONST_DOUBLE representing the CIW. */
10211 unicosmk_add_call_info_word (rtx x)
10214 struct machine_function *machine = cfun->machine;
10216 node = gen_rtx_EXPR_LIST (VOIDmode, x, NULL_RTX);
10217 if (machine->first_ciw == NULL_RTX)
10218 machine->first_ciw = node;
10220 XEXP (machine->last_ciw, 1) = node;
10222 machine->last_ciw = node;
10223 ++machine->ciw_count;
10225 return GEN_INT (machine->ciw_count
10226 + strlen (current_function_name ())/8 + 5);
10229 /* The Cray assembler doesn't accept extern declarations for symbols which
10230 are defined in the same file. We have to keep track of all global
10231 symbols which are referenced and/or defined in a source file and output
10232 extern declarations for those which are referenced but not defined at
10233 the end of file. */
10235 /* List of identifiers for which an extern declaration might have to be
10237 /* FIXME: needs to use GC, so it can be saved and restored for PCH. */
10239 struct unicosmk_extern_list
10241 struct unicosmk_extern_list *next;
10245 static struct unicosmk_extern_list *unicosmk_extern_head = 0;
10247 /* Output extern declarations which are required for every asm file. */
10250 unicosmk_output_default_externs (FILE *file)
10252 static const char *const externs[] =
10253 { "__T3E_MISMATCH" };
10258 n = ARRAY_SIZE (externs);
10260 for (i = 0; i < n; i++)
10261 fprintf (file, "\t.extern\t%s\n", externs[i]);
10264 /* Output extern declarations for global symbols which are have been
10265 referenced but not defined. */
10268 unicosmk_output_externs (FILE *file)
10270 struct unicosmk_extern_list *p;
10271 const char *real_name;
10275 len = strlen (user_label_prefix);
10276 for (p = unicosmk_extern_head; p != 0; p = p->next)
10278 /* We have to strip the encoding and possibly remove user_label_prefix
10279 from the identifier in order to handle -fleading-underscore and
10280 explicit asm names correctly (cf. gcc.dg/asm-names-1.c). */
10281 real_name = default_strip_name_encoding (p->name);
10282 if (len && p->name[0] == '*'
10283 && !memcmp (real_name, user_label_prefix, len))
10286 name_tree = get_identifier (real_name);
10287 if (! TREE_ASM_WRITTEN (name_tree))
10289 TREE_ASM_WRITTEN (name_tree) = 1;
10290 fputs ("\t.extern\t", file);
10291 assemble_name (file, p->name);
10297 /* Record an extern. */
10300 unicosmk_add_extern (const char *name)
10302 struct unicosmk_extern_list *p;
10304 p = (struct unicosmk_extern_list *)
10305 xmalloc (sizeof (struct unicosmk_extern_list));
10306 p->next = unicosmk_extern_head;
10308 unicosmk_extern_head = p;
10311 /* The Cray assembler generates incorrect code if identifiers which
10312 conflict with register names are used as instruction operands. We have
10313 to replace such identifiers with DEX expressions. */
10315 /* Structure to collect identifiers which have been replaced by DEX
10317 /* FIXME: needs to use GC, so it can be saved and restored for PCH. */
10319 struct unicosmk_dex {
10320 struct unicosmk_dex *next;
10324 /* List of identifiers which have been replaced by DEX expressions. The DEX
10325 number is determined by the position in the list. */
10327 static struct unicosmk_dex *unicosmk_dex_list = NULL;
10329 /* The number of elements in the DEX list. */
10331 static int unicosmk_dex_count = 0;
10333 /* Check if NAME must be replaced by a DEX expression. */
10336 unicosmk_special_name (const char *name)
10338 if (name[0] == '*')
10341 if (name[0] == '$')
10344 if (name[0] != 'r' && name[0] != 'f' && name[0] != 'R' && name[0] != 'F')
10349 case '1': case '2':
10350 return (name[2] == '\0' || (ISDIGIT (name[2]) && name[3] == '\0'));
10353 return (name[2] == '\0'
10354 || ((name[2] == '0' || name[2] == '1') && name[3] == '\0'));
10357 return (ISDIGIT (name[1]) && name[2] == '\0');
10361 /* Return the DEX number if X must be replaced by a DEX expression and 0
10365 unicosmk_need_dex (rtx x)
10367 struct unicosmk_dex *dex;
10371 if (GET_CODE (x) != SYMBOL_REF)
10375 if (! unicosmk_special_name (name))
10378 i = unicosmk_dex_count;
10379 for (dex = unicosmk_dex_list; dex; dex = dex->next)
10381 if (! strcmp (name, dex->name))
10386 dex = (struct unicosmk_dex *) xmalloc (sizeof (struct unicosmk_dex));
10388 dex->next = unicosmk_dex_list;
10389 unicosmk_dex_list = dex;
10391 ++unicosmk_dex_count;
10392 return unicosmk_dex_count;
10395 /* Output the DEX definitions for this file. */
10398 unicosmk_output_dex (FILE *file)
10400 struct unicosmk_dex *dex;
10403 if (unicosmk_dex_list == NULL)
10406 fprintf (file, "\t.dexstart\n");
10408 i = unicosmk_dex_count;
10409 for (dex = unicosmk_dex_list; dex; dex = dex->next)
10411 fprintf (file, "\tDEX (%d) = ", i);
10412 assemble_name (file, dex->name);
10417 fprintf (file, "\t.dexend\n");
10420 /* Output text that to appear at the beginning of an assembler file. */
10423 unicosmk_file_start (void)
10427 fputs ("\t.ident\t", asm_out_file);
10428 unicosmk_output_module_name (asm_out_file);
10429 fputs ("\n\n", asm_out_file);
10431 /* The Unicos/Mk assembler uses different register names. Instead of trying
10432 to support them, we simply use micro definitions. */
10434 /* CAM has different register names: rN for the integer register N and fN
10435 for the floating-point register N. Instead of trying to use these in
10436 alpha.md, we define the symbols $N and $fN to refer to the appropriate
10439 for (i = 0; i < 32; ++i)
10440 fprintf (asm_out_file, "$%d <- r%d\n", i, i);
10442 for (i = 0; i < 32; ++i)
10443 fprintf (asm_out_file, "$f%d <- f%d\n", i, i);
10445 putc ('\n', asm_out_file);
10447 /* The .align directive fill unused space with zeroes which does not work
10448 in code sections. We define the macro 'gcc@code@align' which uses nops
10449 instead. Note that it assumes that code sections always have the
10450 biggest possible alignment since . refers to the current offset from
10451 the beginning of the section. */
10453 fputs ("\t.macro gcc@code@align n\n", asm_out_file);
10454 fputs ("gcc@n@bytes = 1 << n\n", asm_out_file);
10455 fputs ("gcc@here = . % gcc@n@bytes\n", asm_out_file);
10456 fputs ("\t.if ne, gcc@here, 0\n", asm_out_file);
10457 fputs ("\t.repeat (gcc@n@bytes - gcc@here) / 4\n", asm_out_file);
10458 fputs ("\tbis r31,r31,r31\n", asm_out_file);
10459 fputs ("\t.endr\n", asm_out_file);
10460 fputs ("\t.endif\n", asm_out_file);
10461 fputs ("\t.endm gcc@code@align\n\n", asm_out_file);
10463 /* Output extern declarations which should always be visible. */
10464 unicosmk_output_default_externs (asm_out_file);
10466 /* Open a dummy section. We always need to be inside a section for the
10467 section-switching code to work correctly.
10468 ??? This should be a module id or something like that. I still have to
10469 figure out what the rules for those are. */
10470 fputs ("\n\t.psect\t$SG00000,data\n", asm_out_file);
10473 /* Output text to appear at the end of an assembler file. This includes all
10474 pending extern declarations and DEX expressions. */
10477 unicosmk_file_end (void)
10479 fputs ("\t.endp\n\n", asm_out_file);
10481 /* Output all pending externs. */
10483 unicosmk_output_externs (asm_out_file);
10485 /* Output dex definitions used for functions whose names conflict with
10488 unicosmk_output_dex (asm_out_file);
10490 fputs ("\t.end\t", asm_out_file);
10491 unicosmk_output_module_name (asm_out_file);
10492 putc ('\n', asm_out_file);
10498 unicosmk_output_deferred_case_vectors (FILE *file ATTRIBUTE_UNUSED)
10502 unicosmk_gen_dsib (unsigned long *imaskP ATTRIBUTE_UNUSED)
10506 unicosmk_output_ssib (FILE * file ATTRIBUTE_UNUSED,
10507 const char * fnname ATTRIBUTE_UNUSED)
10511 unicosmk_add_call_info_word (rtx x ATTRIBUTE_UNUSED)
10517 unicosmk_need_dex (rtx x ATTRIBUTE_UNUSED)
10522 #endif /* TARGET_ABI_UNICOSMK */
10525 alpha_init_libfuncs (void)
10527 if (TARGET_ABI_UNICOSMK)
10529 /* Prevent gcc from generating calls to __divsi3. */
10530 set_optab_libfunc (sdiv_optab, SImode, 0);
10531 set_optab_libfunc (udiv_optab, SImode, 0);
10533 /* Use the functions provided by the system library
10534 for DImode integer division. */
10535 set_optab_libfunc (sdiv_optab, DImode, "$sldiv");
10536 set_optab_libfunc (udiv_optab, DImode, "$uldiv");
10538 else if (TARGET_ABI_OPEN_VMS)
10540 /* Use the VMS runtime library functions for division and
10542 set_optab_libfunc (sdiv_optab, SImode, "OTS$DIV_I");
10543 set_optab_libfunc (sdiv_optab, DImode, "OTS$DIV_L");
10544 set_optab_libfunc (udiv_optab, SImode, "OTS$DIV_UI");
10545 set_optab_libfunc (udiv_optab, DImode, "OTS$DIV_UL");
10546 set_optab_libfunc (smod_optab, SImode, "OTS$REM_I");
10547 set_optab_libfunc (smod_optab, DImode, "OTS$REM_L");
10548 set_optab_libfunc (umod_optab, SImode, "OTS$REM_UI");
10549 set_optab_libfunc (umod_optab, DImode, "OTS$REM_UL");
10554 /* Initialize the GCC target structure. */
10555 #if TARGET_ABI_OPEN_VMS
10556 # undef TARGET_ATTRIBUTE_TABLE
10557 # define TARGET_ATTRIBUTE_TABLE vms_attribute_table
10558 # undef TARGET_SECTION_TYPE_FLAGS
10559 # define TARGET_SECTION_TYPE_FLAGS vms_section_type_flags
10562 #undef TARGET_IN_SMALL_DATA_P
10563 #define TARGET_IN_SMALL_DATA_P alpha_in_small_data_p
10565 #if TARGET_ABI_UNICOSMK
10566 # undef TARGET_INSERT_ATTRIBUTES
10567 # define TARGET_INSERT_ATTRIBUTES unicosmk_insert_attributes
10568 # undef TARGET_SECTION_TYPE_FLAGS
10569 # define TARGET_SECTION_TYPE_FLAGS unicosmk_section_type_flags
10570 # undef TARGET_ASM_UNIQUE_SECTION
10571 # define TARGET_ASM_UNIQUE_SECTION unicosmk_unique_section
10572 #undef TARGET_ASM_FUNCTION_RODATA_SECTION
10573 #define TARGET_ASM_FUNCTION_RODATA_SECTION default_no_function_rodata_section
10574 # undef TARGET_ASM_GLOBALIZE_LABEL
10575 # define TARGET_ASM_GLOBALIZE_LABEL hook_void_FILEptr_constcharptr
10576 # undef TARGET_MUST_PASS_IN_STACK
10577 # define TARGET_MUST_PASS_IN_STACK unicosmk_must_pass_in_stack
10580 #undef TARGET_ASM_ALIGNED_HI_OP
10581 #define TARGET_ASM_ALIGNED_HI_OP "\t.word\t"
10582 #undef TARGET_ASM_ALIGNED_DI_OP
10583 #define TARGET_ASM_ALIGNED_DI_OP "\t.quad\t"
10585 /* Default unaligned ops are provided for ELF systems. To get unaligned
10586 data for non-ELF systems, we have to turn off auto alignment. */
10587 #ifndef OBJECT_FORMAT_ELF
10588 #undef TARGET_ASM_UNALIGNED_HI_OP
10589 #define TARGET_ASM_UNALIGNED_HI_OP "\t.align 0\n\t.word\t"
10590 #undef TARGET_ASM_UNALIGNED_SI_OP
10591 #define TARGET_ASM_UNALIGNED_SI_OP "\t.align 0\n\t.long\t"
10592 #undef TARGET_ASM_UNALIGNED_DI_OP
10593 #define TARGET_ASM_UNALIGNED_DI_OP "\t.align 0\n\t.quad\t"
10596 #ifdef OBJECT_FORMAT_ELF
10597 #undef TARGET_ASM_RELOC_RW_MASK
10598 #define TARGET_ASM_RELOC_RW_MASK alpha_elf_reloc_rw_mask
10599 #undef TARGET_ASM_SELECT_RTX_SECTION
10600 #define TARGET_ASM_SELECT_RTX_SECTION alpha_elf_select_rtx_section
10601 #undef TARGET_SECTION_TYPE_FLAGS
10602 #define TARGET_SECTION_TYPE_FLAGS alpha_elf_section_type_flags
10605 #undef TARGET_ASM_FUNCTION_END_PROLOGUE
10606 #define TARGET_ASM_FUNCTION_END_PROLOGUE alpha_output_function_end_prologue
10608 #undef TARGET_INIT_LIBFUNCS
10609 #define TARGET_INIT_LIBFUNCS alpha_init_libfuncs
10611 #if TARGET_ABI_UNICOSMK
10612 #undef TARGET_ASM_FILE_START
10613 #define TARGET_ASM_FILE_START unicosmk_file_start
10614 #undef TARGET_ASM_FILE_END
10615 #define TARGET_ASM_FILE_END unicosmk_file_end
10617 #undef TARGET_ASM_FILE_START
10618 #define TARGET_ASM_FILE_START alpha_file_start
10619 #undef TARGET_ASM_FILE_START_FILE_DIRECTIVE
10620 #define TARGET_ASM_FILE_START_FILE_DIRECTIVE true
10623 #undef TARGET_SCHED_ADJUST_COST
10624 #define TARGET_SCHED_ADJUST_COST alpha_adjust_cost
10625 #undef TARGET_SCHED_ISSUE_RATE
10626 #define TARGET_SCHED_ISSUE_RATE alpha_issue_rate
10627 #undef TARGET_SCHED_FIRST_CYCLE_MULTIPASS_DFA_LOOKAHEAD
10628 #define TARGET_SCHED_FIRST_CYCLE_MULTIPASS_DFA_LOOKAHEAD \
10629 alpha_multipass_dfa_lookahead
10631 #undef TARGET_HAVE_TLS
10632 #define TARGET_HAVE_TLS HAVE_AS_TLS
10634 #undef TARGET_INIT_BUILTINS
10635 #define TARGET_INIT_BUILTINS alpha_init_builtins
10636 #undef TARGET_EXPAND_BUILTIN
10637 #define TARGET_EXPAND_BUILTIN alpha_expand_builtin
10638 #undef TARGET_FOLD_BUILTIN
10639 #define TARGET_FOLD_BUILTIN alpha_fold_builtin
10641 #undef TARGET_FUNCTION_OK_FOR_SIBCALL
10642 #define TARGET_FUNCTION_OK_FOR_SIBCALL alpha_function_ok_for_sibcall
10643 #undef TARGET_CANNOT_COPY_INSN_P
10644 #define TARGET_CANNOT_COPY_INSN_P alpha_cannot_copy_insn_p
10645 #undef TARGET_CANNOT_FORCE_CONST_MEM
10646 #define TARGET_CANNOT_FORCE_CONST_MEM alpha_cannot_force_const_mem
10649 #undef TARGET_ASM_OUTPUT_MI_THUNK
10650 #define TARGET_ASM_OUTPUT_MI_THUNK alpha_output_mi_thunk_osf
10651 #undef TARGET_ASM_CAN_OUTPUT_MI_THUNK
10652 #define TARGET_ASM_CAN_OUTPUT_MI_THUNK hook_bool_tree_hwi_hwi_tree_true
10653 #undef TARGET_STDARG_OPTIMIZE_HOOK
10654 #define TARGET_STDARG_OPTIMIZE_HOOK alpha_stdarg_optimize_hook
10657 #undef TARGET_RTX_COSTS
10658 #define TARGET_RTX_COSTS alpha_rtx_costs
10659 #undef TARGET_ADDRESS_COST
10660 #define TARGET_ADDRESS_COST hook_int_rtx_0
10662 #undef TARGET_MACHINE_DEPENDENT_REORG
10663 #define TARGET_MACHINE_DEPENDENT_REORG alpha_reorg
10665 #undef TARGET_PROMOTE_FUNCTION_ARGS
10666 #define TARGET_PROMOTE_FUNCTION_ARGS hook_bool_tree_true
10667 #undef TARGET_PROMOTE_FUNCTION_RETURN
10668 #define TARGET_PROMOTE_FUNCTION_RETURN hook_bool_tree_true
10669 #undef TARGET_PROMOTE_PROTOTYPES
10670 #define TARGET_PROMOTE_PROTOTYPES hook_bool_tree_false
10671 #undef TARGET_RETURN_IN_MEMORY
10672 #define TARGET_RETURN_IN_MEMORY alpha_return_in_memory
10673 #undef TARGET_PASS_BY_REFERENCE
10674 #define TARGET_PASS_BY_REFERENCE alpha_pass_by_reference
10675 #undef TARGET_SETUP_INCOMING_VARARGS
10676 #define TARGET_SETUP_INCOMING_VARARGS alpha_setup_incoming_varargs
10677 #undef TARGET_STRICT_ARGUMENT_NAMING
10678 #define TARGET_STRICT_ARGUMENT_NAMING hook_bool_CUMULATIVE_ARGS_true
10679 #undef TARGET_PRETEND_OUTGOING_VARARGS_NAMED
10680 #define TARGET_PRETEND_OUTGOING_VARARGS_NAMED hook_bool_CUMULATIVE_ARGS_true
10681 #undef TARGET_SPLIT_COMPLEX_ARG
10682 #define TARGET_SPLIT_COMPLEX_ARG alpha_split_complex_arg
10683 #undef TARGET_GIMPLIFY_VA_ARG_EXPR
10684 #define TARGET_GIMPLIFY_VA_ARG_EXPR alpha_gimplify_va_arg
10685 #undef TARGET_ARG_PARTIAL_BYTES
10686 #define TARGET_ARG_PARTIAL_BYTES alpha_arg_partial_bytes
10688 #undef TARGET_SECONDARY_RELOAD
10689 #define TARGET_SECONDARY_RELOAD alpha_secondary_reload
10691 #undef TARGET_SCALAR_MODE_SUPPORTED_P
10692 #define TARGET_SCALAR_MODE_SUPPORTED_P alpha_scalar_mode_supported_p
10693 #undef TARGET_VECTOR_MODE_SUPPORTED_P
10694 #define TARGET_VECTOR_MODE_SUPPORTED_P alpha_vector_mode_supported_p
10696 #undef TARGET_BUILD_BUILTIN_VA_LIST
10697 #define TARGET_BUILD_BUILTIN_VA_LIST alpha_build_builtin_va_list
10699 /* The Alpha architecture does not require sequential consistency. See
10700 http://www.cs.umd.edu/~pugh/java/memoryModel/AlphaReordering.html
10701 for an example of how it can be violated in practice. */
10702 #undef TARGET_RELAXED_ORDERING
10703 #define TARGET_RELAXED_ORDERING true
10705 #undef TARGET_DEFAULT_TARGET_FLAGS
10706 #define TARGET_DEFAULT_TARGET_FLAGS \
10707 (TARGET_DEFAULT | TARGET_CPU_DEFAULT | TARGET_DEFAULT_EXPLICIT_RELOCS)
10708 #undef TARGET_HANDLE_OPTION
10709 #define TARGET_HANDLE_OPTION alpha_handle_option
10711 #ifdef TARGET_ALTERNATE_LONG_DOUBLE_MANGLING
10712 #undef TARGET_MANGLE_TYPE
10713 #define TARGET_MANGLE_TYPE alpha_mangle_type
10716 struct gcc_target targetm = TARGET_INITIALIZER;
10719 #include "gt-alpha.h"